diff --git a/.vscode/settings.json b/.vscode/settings.json index e2c03af64..88dbc0bf8 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -35,6 +35,10 @@ }, ],*/ + "[rust]": { + "editor.defaultFormatter": "rust-lang.rust-analyzer", + "editor.formatOnSave": true + }, "rust-analyzer.server.extraEnv": { // this allows us to use unstable features (as is true for the Docker builds), without rust-analyzer showing an error on the "#![feature(XXX)]" line //"RUSTC_BOOTSTRAP": "1", // not needed anymore diff --git a/Packages/app-server/src/db/_general.rs b/Packages/app-server/src/db/_general.rs index 791af8164..0f0dd265d 100644 --- a/Packages/app-server/src/db/_general.rs +++ b/Packages/app-server/src/db/_general.rs @@ -1,55 +1,55 @@ -use rust_shared::anyhow::{Context, Error, anyhow, ensure}; -use rust_shared::async_graphql::{Object, Schema, Subscription, ID, async_stream, OutputType, scalar, EmptySubscription, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt, StreamExt, Future}; +use futures_util::{stream, Future, Stream, StreamExt, TryFutureExt}; +use rust_shared::anyhow::{anyhow, ensure, Context, Error}; +use rust_shared::async_graphql::{async_stream, scalar, EmptySubscription, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; +use rust_shared::db_constants::SYSTEM_USER_ID; use rust_shared::indoc::indoc; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::db_constants::SYSTEM_USER_ID; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::Client; use rust_shared::utils::general_::extensions::ToOwnedV; -use rust_shared::{async_graphql, serde_json, GQLError, SubError, to_sub_err}; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::serde_json::json; -use rust_shared::tokio_postgres::{Client}; -use tracing::{info, error, warn}; +use rust_shared::{async_graphql, serde_json, to_sub_err, GQLError, SubError}; use std::env; use std::path::Path; -use std::{time::Duration, pin::Pin, task::Poll}; +use std::{pin::Pin, task::Poll, time::Duration}; +use tracing::{error, info, warn}; use crate::utils::db::agql_ext::gql_request_storage::GQLRequestStorage; -use super::commands::add_term::{AddTermResult}; +use super::commands::add_term::AddTermResult; use super::commands::refresh_lq_data::refresh_lq_data; use super::general::sign_in_::jwt_utils::{get_user_jwt_data_from_gql_ctx, resolve_and_verify_jwt_string}; /// Wrapper around `ensure!` macro, which makes it easily usable in functions that return `Result`. pub fn ensure_gql(passed: bool, error_message: impl AsRef) -> Result<(), Error> { - ensure!(passed, "{}", error_message.as_ref()); - Ok(()) + ensure!(passed, "{}", error_message.as_ref()); + Ok(()) } pub fn trusted_operator_passkey_is_correct(passkey: String, log_message_if_wrong: bool) -> bool { - let Ok(stored_passkey) = env::var("TRUSTED_OPERATOR_PASSKEY") else { - error!(indoc!{r#" + let Ok(stored_passkey) = env::var("TRUSTED_OPERATOR_PASSKEY") else { + error!(indoc! {r#" The debate-map-trusted-operator secret/passkey does not exist, or is invalid! This endpoint cannot be used until an admin fixes/creates that secret. K8s path for secret: namespace "default", name "debate-map-trusted-provider", field "passkey", value any utf8 string "#}); - return false; - }; - let result = passkey == stored_passkey; - if !result && log_message_if_wrong { - error!("Trusted-operator passkey is incorrect! Submitted:{}", passkey); - } - return result; + return false; + }; + let result = passkey == stored_passkey; + if !result && log_message_if_wrong { + error!("Trusted-operator passkey is incorrect! Submitted:{}", passkey); + } + return result; } pub fn ensure_trusted_operator_passkey_is_correct(passkey: String, log_message_if_wrong: bool) -> Result<(), Error> { - if !trusted_operator_passkey_is_correct(passkey, log_message_if_wrong) { - return Err(anyhow!("Trusted-operator passkey is incorrect!")); - } - Ok(()) + if !trusted_operator_passkey_is_correct(passkey, log_message_if_wrong) { + return Err(anyhow!("Trusted-operator passkey is incorrect!")); + } + Ok(()) } -wrap_slow_macros!{ +wrap_slow_macros! { // queries // ========== @@ -58,13 +58,13 @@ wrap_slow_macros!{ pub struct QueryShard_General; #[Object] impl QueryShard_General { - // useful for testing monitor-tool's logs page - async fn print_empty_log(&self) -> &str { - info!("print_empty_log called"); - warn!("Test2"); - error!("Test3"); - "" - } + // useful for testing monitor-tool's logs page + async fn print_empty_log(&self) -> &str { + info!("print_empty_log called"); + warn!("Test2"); + error!("Test3"); + "" + } } // mutations @@ -72,14 +72,14 @@ impl QueryShard_General { #[derive(Default)] pub struct MutationShard_General; #[Object] impl MutationShard_General { - async fn refreshLQData(&self, ctx: &async_graphql::Context<'_>, payload: JSONValue) -> Result { - let result = refresh_lq_data(ctx, payload).await?; - Ok(result) - } + async fn refreshLQData(&self, ctx: &async_graphql::Context<'_>, payload: JSONValue) -> Result { + let result = refresh_lq_data(ctx, payload).await?; + Ok(result) + } } #[derive(SimpleObject)] pub struct GenericMutation_Result { - pub message: String, + pub message: String, } // subscriptions @@ -92,67 +92,67 @@ pub struct LinkPreserverInput { #[derive(SimpleObject)] struct LinkPreserverResult { - alive: bool, - // probably move effects like this (unrelated to link-preserving) into a separate subscription eventually - pageRefreshRequested: bool, + alive: bool, + // probably move effects like this (unrelated to link-preserving) into a separate subscription eventually + pageRefreshRequested: bool, } #[derive(SimpleObject)] struct PingResult { - pong: String, + pong: String, } #[derive(Default)] pub struct SubscriptionShard_General; #[Subscription] impl SubscriptionShard_General { - /// This endpoint serves two purposes: - /// * Keeps cloudflare from terminating the websocket for inactivity, in cases where >100s pass without data changing or the user navigating anywhere. - /// * Keeps the frontend from closing the websocket, in cases where the client is not watching any data. (eg. on homepage when not signed-in) - async fn linkPreserver(&self, _ctx: &async_graphql::Context<'_>, input: LinkPreserverInput) -> impl Stream> { - let base_stream = async_stream::stream! { - let LinkPreserverInput { updateInterval } = input; - if (updateInterval < 10000) { Err(SubError::new(format!("Update-interval cannot be lower than 10000ms.")))?; } - - let mut refresh_requested_last_iteration = Path::new("./refreshPageForAllUsers_enabled").exists(); - loop { - // create the listed file in the app-server pod (eg. using Lens), if you've made an update that you need all clients to refresh for - let refresh_requested_new = Path::new("./refreshPageForAllUsers_enabled").exists(); - let refresh_just_requested = refresh_requested_new && !refresh_requested_last_iteration; - let result = LinkPreserverResult { - alive: true, - pageRefreshRequested: refresh_just_requested, - }; - refresh_requested_last_iteration = refresh_requested_new; - - yield Ok(result); - rust_shared::tokio::time::sleep(Duration::from_millis(updateInterval)).await; - } - }; - base_stream - } - - // for testing (eg. in gql-playground) [temporarily also used by frontend as a websocket keep-alive -- inferior to above since doesn't work in the no-data-watched case] - #[graphql(name = "_ping")] - async fn _ping(&self, _ctx: &async_graphql::Context<'_>) -> impl Stream { - let pong = "pong".to_owned(); - stream::once(async move { PingResult { - pong, - } }) - } - - // for debugging only, so hide from gql api introspection - #[graphql(visible = false)] - async fn checkUser<'a>(&self, ctx: &'a async_graphql::Context<'a>) -> impl Stream> + 'a { - let base_stream = async_stream::stream! { - let jwt_data = get_user_jwt_data_from_gql_ctx(ctx).await.map_err(to_sub_err)?; - yield Ok(CheckUserResult { userID: jwt_data.id }); - }; - base_stream - } + /// This endpoint serves two purposes: + /// * Keeps cloudflare from terminating the websocket for inactivity, in cases where >100s pass without data changing or the user navigating anywhere. + /// * Keeps the frontend from closing the websocket, in cases where the client is not watching any data. (eg. on homepage when not signed-in) + async fn linkPreserver(&self, _ctx: &async_graphql::Context<'_>, input: LinkPreserverInput) -> impl Stream> { + let base_stream = async_stream::stream! { + let LinkPreserverInput { updateInterval } = input; + if (updateInterval < 10000) { Err(SubError::new(format!("Update-interval cannot be lower than 10000ms.")))?; } + + let mut refresh_requested_last_iteration = Path::new("./refreshPageForAllUsers_enabled").exists(); + loop { + // create the listed file in the app-server pod (eg. using Lens), if you've made an update that you need all clients to refresh for + let refresh_requested_new = Path::new("./refreshPageForAllUsers_enabled").exists(); + let refresh_just_requested = refresh_requested_new && !refresh_requested_last_iteration; + let result = LinkPreserverResult { + alive: true, + pageRefreshRequested: refresh_just_requested, + }; + refresh_requested_last_iteration = refresh_requested_new; + + yield Ok(result); + rust_shared::tokio::time::sleep(Duration::from_millis(updateInterval)).await; + } + }; + base_stream + } + + // for testing (eg. in gql-playground) [temporarily also used by frontend as a websocket keep-alive -- inferior to above since doesn't work in the no-data-watched case] + #[graphql(name = "_ping")] + async fn _ping(&self, _ctx: &async_graphql::Context<'_>) -> impl Stream { + let pong = "pong".to_owned(); + stream::once(async move { PingResult { + pong, + } }) + } + + // for debugging only, so hide from gql api introspection + #[graphql(visible = false)] + async fn checkUser<'a>(&self, ctx: &'a async_graphql::Context<'a>) -> impl Stream> + 'a { + let base_stream = async_stream::stream! { + let jwt_data = get_user_jwt_data_from_gql_ctx(ctx).await.map_err(to_sub_err)?; + yield Ok(CheckUserResult { userID: jwt_data.id }); + }; + base_stream + } } #[derive(SimpleObject, Debug)] struct CheckUserResult { - userID: String, + userID: String, } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/_shared/access_policy_target.rs b/Packages/app-server/src/db/_shared/access_policy_target.rs index c105f7f45..b6d7c7730 100644 --- a/Packages/app-server/src/db/_shared/access_policy_target.rs +++ b/Packages/app-server/src/db/_shared/access_policy_target.rs @@ -1,69 +1,79 @@ -use rust_shared::{async_graphql::{self, ScalarType, InputValueResult, InputValueError, Scalar, Value}, utils::general_::extensions::ToOwnedV, serde_json}; -use serde::{Serialize, Serializer, Deserialize, Deserializer}; +use rust_shared::{ + async_graphql::{self, InputValueError, InputValueResult, Scalar, ScalarType, Value}, + serde_json, + utils::general_::extensions::ToOwnedV, +}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use crate::db::access_policies_::_permission_set::APTable; pub struct AccessPolicyTarget { - pub policy_id: String, - pub ap_table: APTable, + pub policy_id: String, + pub ap_table: APTable, } impl AccessPolicyTarget { - pub fn new(access_policy: String, table: APTable) -> Self { - Self { policy_id: access_policy, ap_table: table.o() } - } + pub fn new(access_policy: String, table: APTable) -> Self { + Self { policy_id: access_policy, ap_table: table.o() } + } } impl Clone for AccessPolicyTarget { - fn clone(&self) -> Self { - AccessPolicyTarget::new(self.policy_id.o(), self.ap_table) - } + fn clone(&self) -> Self { + AccessPolicyTarget::new(self.policy_id.o(), self.ap_table) + } } impl Serialize for AccessPolicyTarget { - fn serialize(&self, serializer: S) -> Result where S: Serializer { - //serializer.serialize_str(self.inner.to_string().as_str()) - let str_val = format!("{}:{:?}", self.policy_id, self.ap_table); - str_val.serialize(serializer) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + //serializer.serialize_str(self.inner.to_string().as_str()) + let str_val = format!("{}:{:?}", self.policy_id, self.ap_table); + str_val.serialize(serializer) + } } impl<'de> Deserialize<'de> for AccessPolicyTarget { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { - let str_val = String::deserialize(deserializer)?; - let (policy_id, table_name) = str_val.split_once(":").ok_or_else(|| serde::de::Error::custom("AccessPolicyTarget must be in the format `access_policy_id:policy_subfield`"))?; + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let str_val = String::deserialize(deserializer)?; + let (policy_id, table_name) = str_val.split_once(":").ok_or_else(|| serde::de::Error::custom("AccessPolicyTarget must be in the format `access_policy_id:policy_subfield`"))?; - // ensure that policy_id substring is a valid UUID/slugid - // todo: probably change `policy_id` field to custom type that enforces this for itself - if policy_id.len() != 22 { - return Err(serde::de::Error::custom(format!("The policy-id within the access-policy-target must be a valid slugid; for example, its length must be 22 characters. Actual length:{}", policy_id.len()))); - } - let table: APTable = serde_json::from_value(serde_json::Value::String(table_name.o())).map_err(|e| serde::de::Error::custom(format!("Failed to parse valid table-name from access-policy-target:{}", e)))?; - - Ok(AccessPolicyTarget::new(policy_id.o(), table)) - } + // ensure that policy_id substring is a valid UUID/slugid + // todo: probably change `policy_id` field to custom type that enforces this for itself + if policy_id.len() != 22 { + return Err(serde::de::Error::custom(format!("The policy-id within the access-policy-target must be a valid slugid; for example, its length must be 22 characters. Actual length:{}", policy_id.len()))); + } + let table: APTable = serde_json::from_value(serde_json::Value::String(table_name.o())).map_err(|e| serde::de::Error::custom(format!("Failed to parse valid table-name from access-policy-target:{}", e)))?; + + Ok(AccessPolicyTarget::new(policy_id.o(), table)) + } } #[Scalar] impl ScalarType for AccessPolicyTarget { - fn parse(value: Value) -> InputValueResult { - match value { - Value::String(str_val) => { - //Ok(serde_json::from_str(&str_val).map_err(|e| InputValueError::custom(e))?) - let (policy_id, table_name) = str_val.split_once(":").ok_or_else(|| InputValueError::custom("AccessPolicyTarget must be in the format `access_policy_id:policy_subfield`"))?; + fn parse(value: Value) -> InputValueResult { + match value { + Value::String(str_val) => { + //Ok(serde_json::from_str(&str_val).map_err(|e| InputValueError::custom(e))?) + let (policy_id, table_name) = str_val.split_once(":").ok_or_else(|| InputValueError::custom("AccessPolicyTarget must be in the format `access_policy_id:policy_subfield`"))?; + + // ensure that policy_id substring is a valid UUID/slugid + // todo: probably change `policy_id` field to custom type that enforces this for itself + if policy_id.len() != 22 { + return Err(InputValueError::custom(format!("The policy-id within the access-policy-target must be a valid slugid; for example, its length must be 22 characters. Actual length:{}", policy_id.len()))); + } + let table: APTable = serde_json::from_value(serde_json::Value::String(table_name.o())).map_err(|e| InputValueError::custom(format!("Failed to parse valid table-name from access-policy-target:{}", e)))?; - // ensure that policy_id substring is a valid UUID/slugid - // todo: probably change `policy_id` field to custom type that enforces this for itself - if policy_id.len() != 22 { - return Err(InputValueError::custom(format!("The policy-id within the access-policy-target must be a valid slugid; for example, its length must be 22 characters. Actual length:{}", policy_id.len()))); - } - let table: APTable = serde_json::from_value(serde_json::Value::String(table_name.o())).map_err(|e| InputValueError::custom(format!("Failed to parse valid table-name from access-policy-target:{}", e)))?; - - Ok(AccessPolicyTarget::new(policy_id.o(), table)) - }, - _ => Err(InputValueError::custom("AccessPolicyTarget must be a string")), - } - } - fn to_value(&self) -> Value { - //Value::String(serde_json::to_string(&self).unwrap()) - let str_val = format!("{}:{:?}", self.policy_id, self.ap_table); - Value::String(str_val) - } -} \ No newline at end of file + Ok(AccessPolicyTarget::new(policy_id.o(), table)) + }, + _ => Err(InputValueError::custom("AccessPolicyTarget must be a string")), + } + } + fn to_value(&self) -> Value { + //Value::String(serde_json::to_string(&self).unwrap()) + let str_val = format!("{}:{:?}", self.policy_id, self.ap_table); + Value::String(str_val) + } +} diff --git a/Packages/app-server/src/db/_shared/attachments.rs b/Packages/app-server/src/db/_shared/attachments.rs index 76c7be6f7..20508a100 100644 --- a/Packages/app-server/src/db/_shared/attachments.rs +++ b/Packages/app-server/src/db/_shared/attachments.rs @@ -1,12 +1,12 @@ -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; use rust_shared::async_graphql; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; +use rust_shared::rust_macros::wrap_slow_macros; use rust_shared::utils::type_aliases::JSONValue; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use super::attachments_::source_chain::SourceChain; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(SimpleObject, InputObject, Clone, Serialize, Deserialize)] #[graphql(input_name = "TermAttachmentInput")] @@ -18,15 +18,15 @@ pub struct TermAttachment { #[graphql(input_name = "AttachmentInput")] pub struct Attachment { pub expandedByDefault: Option, - pub extras: Option, - + pub extras: Option, + // components - pub equation: Option, - pub references: Option, - pub quote: Option, - pub media: Option, - //pub media: Option, - pub description: Option, + pub equation: Option, + pub references: Option, + pub quote: Option, + pub media: Option, + //pub media: Option, + pub description: Option, } // todo: have Attachment struct use these directly (delayed, since means a change in the graphql api) @@ -63,4 +63,4 @@ pub struct DescriptionAttachment { pub text: String, } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/_shared/attachments_/source_chain.rs b/Packages/app-server/src/db/_shared/attachments_/source_chain.rs index 51c2201d8..d5e1a694a 100644 --- a/Packages/app-server/src/db/_shared/attachments_/source_chain.rs +++ b/Packages/app-server/src/db/_shared/attachments_/source_chain.rs @@ -1,7 +1,12 @@ -use rust_shared::{async_graphql::{self, Enum, SimpleObject, InputObject}, rust_macros::wrap_slow_macros, utils::{type_aliases::JSONValue, general_::serde::JSONValueV}, anyhow::Error}; -use serde::{Serialize, Deserialize}; +use rust_shared::{ + anyhow::Error, + async_graphql::{self, Enum, InputObject, SimpleObject}, + rust_macros::wrap_slow_macros, + utils::{general_::serde::JSONValueV, type_aliases::JSONValue}, +}; +use serde::{Deserialize, Serialize}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(SimpleObject, InputObject, Clone, Serialize, Deserialize)] #[graphql(input_name = "SourceChainInput")] @@ -33,49 +38,48 @@ pub struct Source { pub time_min: Option, pub time_max: Option, pub link: Option, - + pub hypothesisAnnotationId: Option, // todo: remove this (use extras container instead) - pub extras: Option, // used for, eg. external-ids from claim-miner and hypothesis + pub extras: Option, // used for, eg. external-ids from claim-miner and hypothesis } } pub fn source_chains_from_old_json_data(data: Option<&JSONValue>) -> Result, Error> { - match data { - Some(data) => { - let mut result: Vec = vec![]; - for source_chain_data in data.try_as_array()? { - result.push(source_chain_from_old_json_data(source_chain_data)?); - } - Ok(result) - }, - None => Ok(vec![]), - } + match data { + Some(data) => { + let mut result: Vec = vec![]; + for source_chain_data in data.try_as_array()? { + result.push(source_chain_from_old_json_data(source_chain_data)?); + } + Ok(result) + }, + None => Ok(vec![]), + } } +#[rustfmt::skip] pub fn source_chain_from_old_json_data(data: &JSONValue) -> Result { - let sources: Vec = data.try_get("sources")?.try_as_array()?.iter().map(|source| { - let source_type = match source["type"].as_i64().unwrap() { - 10 => SourceType::speech, - 20 => SourceType::text, - 30 => SourceType::image, - 40 => SourceType::video, - 50 => SourceType::webpage, - _ => panic!("Invalid source type"), - }; - Source { - r#type: source_type, - name: source.get("name").map(|a| a.as_string()).unwrap_or(None), - author: source.get("author").map(|a| a.as_string()).unwrap_or(None), - location: source.get("location").map(|a| a.as_string()).unwrap_or(None), - time_min: source.get("time_min").map(|a| a.as_f64()).unwrap_or(None), - time_max: source.get("time_max").map(|a| a.as_f64()).unwrap_or(None), - link: source.get("link").map(|a| a.as_string()).unwrap_or(None), - hypothesisAnnotationId: None, - extras: None, - } - }).collect(); - Ok(SourceChain { - sources, - }) -} \ No newline at end of file + let sources: Vec = data.try_get("sources")?.try_as_array()?.iter().map(|source| { + let source_type = match source["type"].as_i64().unwrap() { + 10 => SourceType::speech, + 20 => SourceType::text, + 30 => SourceType::image, + 40 => SourceType::video, + 50 => SourceType::webpage, + _ => panic!("Invalid source type"), + }; + Source { + r#type: source_type, + name: source.get("name").map(|a| a.as_string()).unwrap_or(None), + author: source.get("author").map(|a| a.as_string()).unwrap_or(None), + location: source.get("location").map(|a| a.as_string()).unwrap_or(None), + time_min: source.get("time_min").map(|a| a.as_f64()).unwrap_or(None), + time_max: source.get("time_max").map(|a| a.as_f64()).unwrap_or(None), + link: source.get("link").map(|a| a.as_string()).unwrap_or(None), + hypothesisAnnotationId: None, + extras: None, + } + }).collect(); + Ok(SourceChain { sources }) +} diff --git a/Packages/app-server/src/db/_shared/common_errors.rs b/Packages/app-server/src/db/_shared/common_errors.rs index 255d220ad..a3b515d1d 100644 --- a/Packages/app-server/src/db/_shared/common_errors.rs +++ b/Packages/app-server/src/db/_shared/common_errors.rs @@ -1,8 +1,8 @@ use rust_shared::anyhow::{anyhow, Error}; pub fn err_should_be_populated(field_path: &str) -> Error { - anyhow!(r#"The `{}` field should be populated at this point."#, field_path) + anyhow!(r#"The `{}` field should be populated at this point."#, field_path) } pub fn err_should_be_null(field_path: &str) -> Error { - anyhow!(r#"The `{}` field should be null at this point."#, field_path) -} \ No newline at end of file + anyhow!(r#"The `{}` field should be null at this point."#, field_path) +} diff --git a/Packages/app-server/src/db/_shared/path_finder.rs b/Packages/app-server/src/db/_shared/path_finder.rs index ffa3a553b..654fc2015 100644 --- a/Packages/app-server/src/db/_shared/path_finder.rs +++ b/Packages/app-server/src/db/_shared/path_finder.rs @@ -1,54 +1,56 @@ use futures_util::Future; -use rust_shared::utils::{type_aliases::JSONValue, general_::func_types::AsyncFn_Args3}; use rust_shared::anyhow::Error; +use rust_shared::utils::{general_::func_types::AsyncFn_Args3, type_aliases::JSONValue}; -use crate::{db::{nodes::get_node, node_links::get_node_links}, utils::db::accessors::AccessorContext}; +use crate::{ + db::{node_links::get_node_links, nodes::get_node}, + utils::db::accessors::AccessorContext, +}; pub async fn id_is_of_node_that_is_root_of_map(ctx: &AccessorContext<'_>, id: &str, _extra_data: Option<&JSONValue>) -> Result { Ok(get_node(ctx, id).await?.rootNodeForMap.is_some()) } pub async fn search_up_from_node_for_node_matching_x( - ctx: &AccessorContext<'_>, - start_node_id: &str, - //x_match_func: fn(&str, Option<&JSONValue>) -> bool, - x_match_func: impl for<'a> AsyncFn_Args3< - Result, - &'a AccessorContext<'a>, - &'a str, - Option<&'a JSONValue> - >, - x_match_func_data: Option<&JSONValue>, - node_ids_to_ignore: Vec + ctx: &AccessorContext<'_>, + start_node_id: &str, + //x_match_func: fn(&str, Option<&JSONValue>) -> bool, + x_match_func: impl for<'a> AsyncFn_Args3, &'a AccessorContext<'a>, &'a str, Option<&'a JSONValue>>, + x_match_func_data: Option<&JSONValue>, + node_ids_to_ignore: Vec, ) -> Result, Error> { - let start_node = get_node(ctx, start_node_id).await?; + let start_node = get_node(ctx, start_node_id).await?; - #[derive(Debug)] - struct Head { - id: String, - path: Vec, - } - let mut current_layer_heads: Vec = vec![Head { id: start_node.id.to_string(), path: vec![start_node.id.to_string()] }]; - while current_layer_heads.len() > 0 { - // first, check if any current-layer-head nodes are the root-node (if so, return right away, as we found a shortest path) - for layer_head in ¤t_layer_heads { - if x_match_func(&ctx, &layer_head.id, x_match_func_data).await? { - return Ok(Some(layer_head.path.join("/"))); - } - } + #[derive(Debug)] + struct Head { + id: String, + path: Vec, + } + let mut current_layer_heads: Vec = vec![Head { id: start_node.id.to_string(), path: vec![start_node.id.to_string()] }]; + while current_layer_heads.len() > 0 { + // first, check if any current-layer-head nodes are the root-node (if so, return right away, as we found a shortest path) + for layer_head in ¤t_layer_heads { + if x_match_func(&ctx, &layer_head.id, x_match_func_data).await? { + return Ok(Some(layer_head.path.join("/"))); + } + } - // else, find new-layer-heads for next search loop - let mut new_layer_heads = vec![]; - for layer_head in ¤t_layer_heads { - let node = get_node(ctx, &layer_head.id).await?; - let parent_links = get_node_links(ctx, None, Some(node.id.as_str())).await?; - for parent_id in parent_links.iter().map(|a| a.parent.clone()) { - if layer_head.path.contains(&parent_id) { continue; } // parent-id is already part of path; ignore, so we don't cause infinite-loop - if node_ids_to_ignore.contains(&parent_id) { continue; } - new_layer_heads.push(Head { id: parent_id.clone(), path: vec![parent_id.clone()].into_iter().chain(layer_head.path.clone().into_iter()).collect() }); - } - } - current_layer_heads = new_layer_heads; - } - Ok(None) -} \ No newline at end of file + // else, find new-layer-heads for next search loop + let mut new_layer_heads = vec![]; + for layer_head in ¤t_layer_heads { + let node = get_node(ctx, &layer_head.id).await?; + let parent_links = get_node_links(ctx, None, Some(node.id.as_str())).await?; + for parent_id in parent_links.iter().map(|a| a.parent.clone()) { + if layer_head.path.contains(&parent_id) { + continue; + } // parent-id is already part of path; ignore, so we don't cause infinite-loop + if node_ids_to_ignore.contains(&parent_id) { + continue; + } + new_layer_heads.push(Head { id: parent_id.clone(), path: vec![parent_id.clone()].into_iter().chain(layer_head.path.clone().into_iter()).collect() }); + } + } + current_layer_heads = new_layer_heads; + } + Ok(None) +} diff --git a/Packages/app-server/src/db/_shared/table_permissions.rs b/Packages/app-server/src/db/_shared/table_permissions.rs index 72ffeed65..579321e6d 100644 --- a/Packages/app-server/src/db/_shared/table_permissions.rs +++ b/Packages/app-server/src/db/_shared/table_permissions.rs @@ -1,3 +1,5 @@ +#![cfg_attr(rustfmt, rustfmt_skip)] + use rust_shared::{utils::{auth::jwt_utils_base::UserJWTData, general_::extensions::ToOwnedV}, anyhow::{bail, anyhow}, anyhow::Error}; use tracing::info; diff --git a/Packages/app-server/src/db/access_policies.rs b/Packages/app-server/src/db/access_policies.rs index ac6ad9b94..abd3d6ddd 100644 --- a/Packages/app-server/src/db/access_policies.rs +++ b/Packages/app-server/src/db/access_policies.rs @@ -1,36 +1,40 @@ use std::panic; -use rust_shared::anyhow::{Error, anyhow}; -use rust_shared::indexmap::IndexMap; -use rust_shared::serde_json::json; +use futures_util::{stream, Stream, TryFutureExt}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::{async_graphql, SubError, serde, serde_json, GQLError}; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::indexmap::IndexMap; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::tokio_postgres::{Client}; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::Client; use rust_shared::tokio_postgres::Row; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{async_graphql, serde, serde_json, GQLError, SubError}; -use crate::utils::db::accessors::{get_db_entry, get_db_entries, AccessorContext}; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::{QueryFilter, FilterInput}}}; +use crate::utils::db::accessors::{get_db_entries, get_db_entry, AccessorContext}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; +use crate::utils::db::{ + filter::{FilterInput, QueryFilter}, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use super::access_policies_::_access_policy::AccessPolicy; use super::commands::_command::CanOmit; +#[rustfmt::skip] pub async fn get_access_policy(ctx: &AccessorContext<'_>, id: &str) -> Result { get_db_entry(ctx, "accessPolicies", &Some(json!({ "id": {"equalTo": id} }))).await } pub async fn get_access_policies(ctx: &AccessorContext<'_>, creator_id: Option) -> Result, Error> { - let mut filter_map = serde_json::Map::new(); - if let Some(creator_id) = creator_id { - filter_map.insert("creator".to_owned(), json!({"equalTo": creator_id})); - } - get_db_entries(ctx, "accessPolicies", &Some(JSONValue::Object(filter_map))).await + let mut filter_map = serde_json::Map::new(); + if let Some(creator_id) = creator_id { + filter_map.insert("creator".to_owned(), json!({"equalTo": creator_id})); + } + get_db_entries(ctx, "accessPolicies", &Some(JSONValue::Object(filter_map))).await } pub async fn get_system_access_policy(ctx: &AccessorContext<'_>, name: &str) -> Result { @@ -40,13 +44,13 @@ pub async fn get_system_access_policy(ctx: &AccessorContext<'_>, name: &str) -> Ok(matching_policy) } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Clone)] pub struct GQLSet_AccessPolicy { pub nodes: Vec } #[Object] impl GQLSet_AccessPolicy { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_AccessPolicy { - fn from(entries: Vec) -> GQLSet_AccessPolicy { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_AccessPolicy { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_AccessPolicy; @@ -61,12 +65,12 @@ impl GQLSet for GQLSet_AccessPolicy { #[derive(Default)] pub struct SubscriptionShard_AccessPolicy; #[Subscription] impl SubscriptionShard_AccessPolicy { - async fn accessPolicies<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "accessPolicies", filter).await - } - async fn accessPolicy<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "accessPolicies", id).await - } + async fn accessPolicies<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "accessPolicies", filter).await + } + async fn accessPolicy<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "accessPolicies", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/access_policies_/_access_policy.rs b/Packages/app-server/src/db/access_policies_/_access_policy.rs index 7543266ce..c90f651b6 100644 --- a/Packages/app-server/src/db/access_policies_/_access_policy.rs +++ b/Packages/app-server/src/db/access_policies_/_access_policy.rs @@ -1,79 +1,82 @@ use std::collections::HashMap; use std::panic; -use rust_shared::anyhow::{Error, anyhow}; -use rust_shared::indexmap::IndexMap; -use rust_shared::serde_json::json; +use futures_util::{stream, Stream, TryFutureExt}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::{async_graphql, SubError, serde, serde_json}; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::indexmap::IndexMap; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::tokio_postgres::{Client}; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::Client; use rust_shared::tokio_postgres::Row; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{async_graphql, serde, serde_json, SubError}; -use crate::utils::db::accessors::{get_db_entry, get_db_entries, AccessorContext}; +use crate::utils::db::accessors::{get_db_entries, get_db_entry, AccessorContext}; use crate::utils::db::agql_ext::gql_utils::IndexMapAGQL; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::{QueryFilter, FilterInput}}}; +use crate::utils::db::{ + filter::{FilterInput, QueryFilter}, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use super::super::commands::_command::CanOmit; -use super::_permission_set::{PermissionSet, PermissionSetForType, APTable, APAction}; +use super::_permission_set::{APAction, APTable, PermissionSet, PermissionSetForType}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct AccessPolicy { - pub id: ID, - pub creator: String, - pub createdAt: i64, - pub name: String, - pub permissions: PermissionSet, - #[graphql(name = "permissions_userExtends")] - pub permissions_userExtends: IndexMapAGQL, + pub id: ID, + pub creator: String, + pub createdAt: i64, + pub name: String, + pub permissions: PermissionSet, + #[graphql(name = "permissions_userExtends")] + pub permissions_userExtends: IndexMapAGQL, } impl AccessPolicy { - pub fn permission_extends_for_user_and_table(&self, user_id: Option<&str>, table: APTable) -> Option { - let user_id = match user_id { - None => return None, - Some(user) => user, - }; - let permission_set_for_user = match self.permissions_userExtends.get(user_id) { - Some(val) => val, - None => return None, - }; - let permission_set_for_type = permission_set_for_user.for_table(table); - Some(permission_set_for_type) - } + pub fn permission_extends_for_user_and_table(&self, user_id: Option<&str>, table: APTable) -> Option { + let user_id = match user_id { + None => return None, + Some(user) => user, + }; + let permission_set_for_user = match self.permissions_userExtends.get(user_id) { + Some(val) => val, + None => return None, + }; + let permission_set_for_type = permission_set_for_user.for_table(table); + Some(permission_set_for_type) + } } impl From for AccessPolicy { - fn from(row: Row) -> Self { - Self { - id: ID::from(&row.get::<_, String>("id")), - creator: row.get("creator"), - createdAt: row.get("createdAt"), - name: row.get("name"), - permissions: serde_json::from_value(row.get("permissions")).unwrap(), - permissions_userExtends: serde_json::from_value(row.get("permissions_userExtends")).unwrap(), - } - } + fn from(row: Row) -> Self { + Self { + id: ID::from(&row.get::<_, String>("id")), + creator: row.get("creator"), + createdAt: row.get("createdAt"), + name: row.get("name"), + permissions: serde_json::from_value(row.get("permissions")).unwrap(), + permissions_userExtends: serde_json::from_value(row.get("permissions_userExtends")).unwrap(), + } + } } #[derive(InputObject, Clone, Serialize, Deserialize)] pub struct AccessPolicyInput { - pub name: String, - pub permissions: PermissionSet, - #[graphql(name = "permissions_userExtends")] - pub permissions_userExtends: IndexMapAGQL, + pub name: String, + pub permissions: PermissionSet, + #[graphql(name = "permissions_userExtends")] + pub permissions_userExtends: IndexMapAGQL, } #[derive(SimpleObject, InputObject, Clone, Serialize, Deserialize)] pub struct AccessPolicyUpdates { - pub name: CanOmit, - pub permissions: CanOmit, - #[graphql(name = "permissions_userExtends")] - pub permissions_userExtends: CanOmit>, + pub name: CanOmit, + pub permissions: CanOmit, + #[graphql(name = "permissions_userExtends")] + pub permissions_userExtends: CanOmit>, } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/access_policies_/_permission_set.rs b/Packages/app-server/src/db/access_policies_/_permission_set.rs index 83545179c..746ba8bcb 100644 --- a/Packages/app-server/src/db/access_policies_/_permission_set.rs +++ b/Packages/app-server/src/db/access_policies_/_permission_set.rs @@ -1,92 +1,95 @@ use std::panic; -use rust_shared::anyhow::{Error, anyhow}; -use rust_shared::indexmap::IndexMap; -use rust_shared::serde_json::json; +use futures_util::{stream, Stream, TryFutureExt}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::{async_graphql, SubError, serde, serde_json}; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::indexmap::IndexMap; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::tokio_postgres::{Client}; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::Client; use rust_shared::tokio_postgres::Row; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{async_graphql, serde, serde_json, SubError}; -use crate::utils::db::accessors::{get_db_entry, get_db_entries, AccessorContext}; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::{QueryFilter, FilterInput}}}; +use crate::utils::db::accessors::{get_db_entries, get_db_entry, AccessorContext}; +use crate::utils::db::{ + filter::{FilterInput, QueryFilter}, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use super::super::commands::_command::CanOmit; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(SimpleObject, InputObject, Clone, Serialize, Deserialize)] #[graphql(input_name = "PermissionSetInput")] pub struct PermissionSet { - pub terms: PermissionSetForType, - pub medias: PermissionSetForType, - pub maps: PermissionSetForType, - pub nodes: PermissionSetForType, - // most node-related rows use their node's access-policy as their own; node-ratings is an exception, because individual entries can be kept hidden without disrupting collaboration significantly - pub nodeRatings: PermissionSetForType, - pub others: PermissionSetForType, + pub terms: PermissionSetForType, + pub medias: PermissionSetForType, + pub maps: PermissionSetForType, + pub nodes: PermissionSetForType, + // most node-related rows use their node's access-policy as their own; node-ratings is an exception, because individual entries can be kept hidden without disrupting collaboration significantly + pub nodeRatings: PermissionSetForType, + pub others: PermissionSetForType, } impl PermissionSet { - pub fn for_table(&self, table: APTable) -> PermissionSetForType { - match table { - APTable::terms => self.terms.clone(), - APTable::medias => self.medias.clone(), - APTable::maps => self.maps.clone(), - APTable::nodes => self.nodes.clone(), - APTable::nodeRatings => self.nodeRatings.clone(), - APTable::others => self.others.clone(), - } - } + pub fn for_table(&self, table: APTable) -> PermissionSetForType { + match table { + APTable::terms => self.terms.clone(), + APTable::medias => self.medias.clone(), + APTable::maps => self.maps.clone(), + APTable::nodes => self.nodes.clone(), + APTable::nodeRatings => self.nodeRatings.clone(), + APTable::others => self.others.clone(), + } + } } #[derive(SimpleObject, InputObject, Clone, Serialize, Deserialize)] #[graphql(input_name = "PermissionSetForTypeInput")] pub struct PermissionSetForType { - pub access: bool, // true = anyone, false = no-one - pub modify: PermitCriteria, - pub delete: PermitCriteria, + pub access: bool, // true = anyone, false = no-one + pub modify: PermitCriteria, + pub delete: PermitCriteria, - // for nodes only - // ========== + // for nodes only + // ========== - // todo: probably replace with more fluid system (eg. where users can always "add children", but where governed maps can easily set a lens entry that hides unapproved children by default) - pub addChild: Option, - pub addPhrasing: Option, - pub vote: Option, + // todo: probably replace with more fluid system (eg. where users can always "add children", but where governed maps can easily set a lens entry that hides unapproved children by default) + pub addChild: Option, + pub addPhrasing: Option, + pub vote: Option, } impl PermissionSetForType { - pub fn as_bool(&self, action: APAction) -> bool { - match action { - APAction::access => self.access, - APAction::modify => self.modify.minApprovals != -1, - APAction::delete => self.delete.minApprovals != -1, - APAction::addChild => self.addChild.as_ref().map(|a| a.minApprovals).unwrap_or(-1) != -1, - APAction::addPhrasing => self.addPhrasing.as_ref().map(|a| a.minApprovals).unwrap_or(-1) != -1, - APAction::vote => self.vote.as_ref().map(|a| a.minApprovals).unwrap_or(-1) != -1, - } - } - /*pub fn as_criteria(&self, action: APAction) -> Result { - match action { - APAction::Access => Ok(PermitCriteria { minApprovals: 0, minApprovalPercent: 0 }), - APAction::Modify => Ok(self.modify.clone()), - APAction::Delete => Ok(self.delete.clone()), - APAction::Vote => Ok(self.vote.clone()), - APAction::AddPhrasing => Ok(self.addPhrasing.clone()), - //APAction::AddChild => Ok(self.addChild.clone()), - } - }*/ + pub fn as_bool(&self, action: APAction) -> bool { + match action { + APAction::access => self.access, + APAction::modify => self.modify.minApprovals != -1, + APAction::delete => self.delete.minApprovals != -1, + APAction::addChild => self.addChild.as_ref().map(|a| a.minApprovals).unwrap_or(-1) != -1, + APAction::addPhrasing => self.addPhrasing.as_ref().map(|a| a.minApprovals).unwrap_or(-1) != -1, + APAction::vote => self.vote.as_ref().map(|a| a.minApprovals).unwrap_or(-1) != -1, + } + } + /*pub fn as_criteria(&self, action: APAction) -> Result { + match action { + APAction::Access => Ok(PermitCriteria { minApprovals: 0, minApprovalPercent: 0 }), + APAction::Modify => Ok(self.modify.clone()), + APAction::Delete => Ok(self.delete.clone()), + APAction::Vote => Ok(self.vote.clone()), + APAction::AddPhrasing => Ok(self.addPhrasing.clone()), + //APAction::AddChild => Ok(self.addChild.clone()), + } + }*/ } #[derive(SimpleObject, InputObject, Clone, Serialize, Deserialize)] #[graphql(input_name = "PermitCriteriaInput")] pub struct PermitCriteria { - pub minApprovals: i64, // 0 = anyone, -1 = no-one - pub minApprovalPercent: i64, // 0 = anyone, -1 = no-one + pub minApprovals: i64, // 0 = anyone, -1 = no-one + pub minApprovalPercent: i64, // 0 = anyone, -1 = no-one } // helper types (used only in this crate, ie. not exposed to graphql) @@ -94,46 +97,46 @@ pub struct PermitCriteria { /*#[derive(Debug, Clone, Copy, Deserialize)] pub enum Table { - AccessPolicies, - Shares, - GlobalData, - Users, - - Feedback_Proposals, - Feedback_UserInfos, - - Maps, - Medias, - Nodes, - Terms, - - MapNodeEdits, - NodeLinks, - NodePhrasings, - NodeRatings, - NodeRevisions, - NodeTags, - UserHiddens, - CommandRuns, + AccessPolicies, + Shares, + GlobalData, + Users, + + Feedback_Proposals, + Feedback_UserInfos, + + Maps, + Medias, + Nodes, + Terms, + + MapNodeEdits, + NodeLinks, + NodePhrasings, + NodeRatings, + NodeRevisions, + NodeTags, + UserHiddens, + CommandRuns, }*/ #[derive(Debug, Clone, Copy, Deserialize)] pub enum APTable { - maps, - medias, - terms, - nodes, - nodeRatings, - others, + maps, + medias, + terms, + nodes, + nodeRatings, + others, } #[derive(Debug, Clone, Copy)] pub enum APAction { - access, - modify, - delete, - addChild, - addPhrasing, - vote, + access, + modify, + delete, + addChild, + addPhrasing, + vote, } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/command_runs.rs b/Packages/app-server/src/db/command_runs.rs index c8a760b11..9b262db27 100644 --- a/Packages/app-server/src/db/command_runs.rs +++ b/Packages/app-server/src/db/command_runs.rs @@ -1,13 +1,16 @@ -use rust_shared::{SubError, serde, serde_json, async_graphql, GQLError}; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject}; -use futures_util::{Stream, stream, TryFutureExt}; -use rust_shared::rust_macros::{wrap_slow_macros, wrap_serde_macros, Deserialize_Stub, Serialize_Stub}; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::tokio_postgres::{Row, Client}; +use futures_util::{stream, Stream, TryFutureExt}; +use rust_shared::async_graphql::{Context, Object, OutputType, Schema, SimpleObject, Subscription, ID}; +use rust_shared::rust_macros::{wrap_serde_macros, wrap_slow_macros, Deserialize_Stub, Serialize_Stub}; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::{async_graphql, serde, serde_json, GQLError, SubError}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; -use crate::utils::db::{generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, filter::{QueryFilter, FilterInput}}; +use crate::utils::db::{ + filter::{FilterInput, QueryFilter}, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use super::_shared::access_policy_target::AccessPolicyTarget; @@ -23,17 +26,17 @@ pub struct Test1 {} /*#[derive(Serialize, Deserialize)] pub struct Test2 {}*/ /*impl serde::Serialize for Test2 { - fn serialize<__S>(&self, __serializer: __S) -> serde::__private::Result<__S::Ok, __S::Error> where __S: serde::Serializer { - Err(serde::ser::Error::custom("This is a placeholder generated by the Serialize_Stub macro, for quick resolution during cargo-check. You should not be seeing this.")) - } + fn serialize<__S>(&self, __serializer: __S) -> serde::__private::Result<__S::Ok, __S::Error> where __S: serde::Serializer { + Err(serde::ser::Error::custom("This is a placeholder generated by the Serialize_Stub macro, for quick resolution during cargo-check. You should not be seeing this.")) + } } impl <'de> serde::Deserialize<'de> for Test2 { - fn deserialize<__D>(__deserializer:__D) -> serde::__private::Result where __D: serde::Deserializer<'de> { - Err(serde::de::Error::custom("This is a placeholder generated by the Deserialize_Stub macro, for quick resolution during cargo-check. You should not be seeing this.")) - } + fn deserialize<__D>(__deserializer:__D) -> serde::__private::Result where __D: serde::Deserializer<'de> { + Err(serde::de::Error::custom("This is a placeholder generated by the Deserialize_Stub macro, for quick resolution during cargo-check. You should not be seeing this.")) + } }*/ -wrap_slow_macros!{ +wrap_slow_macros! { /*cached_expand!{ const ce_args: &str = r##" @@ -43,24 +46,24 @@ excludeLinesWith = "#[graphql(name" /*#[derive(SimpleObject, Deserialize)] pub struct RLSTargets { - pub nodes: Vec, + pub nodes: Vec, }*/ #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct CommandRun { - pub id: ID, - pub actor: String, + pub id: ID, + pub actor: String, pub runTime: i64, - #[graphql(name = "public_base")] - pub public_base: bool, - pub commandName: String, - pub commandInput: serde_json::Value, - pub commandResult: serde_json::Value, - - #[graphql(name = "c_involvedNodes")] - pub c_involvedNodes: Vec, - #[graphql(name = "c_accessPolicyTargets")] - pub c_accessPolicyTargets: Vec, + #[graphql(name = "public_base")] + pub public_base: bool, + pub commandName: String, + pub commandInput: serde_json::Value, + pub commandResult: serde_json::Value, + + #[graphql(name = "c_involvedNodes")] + pub c_involvedNodes: Vec, + #[graphql(name = "c_accessPolicyTargets")] + pub c_accessPolicyTargets: Vec, } impl From for CommandRun { fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } @@ -69,8 +72,8 @@ impl From for CommandRun { #[derive(Clone)] pub struct GQLSet_CommandRun { pub nodes: Vec } #[Object] impl GQLSet_CommandRun { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_CommandRun { - fn from(entries: Vec) -> GQLSet_CommandRun { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_CommandRun { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_CommandRun; @@ -85,12 +88,12 @@ impl GQLSet for GQLSet_CommandRun { #[derive(Default)] pub struct SubscriptionShard_CommandRun; #[Subscription] impl SubscriptionShard_CommandRun { - async fn commandRuns<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "commandRuns", filter).await - } - async fn commandRun<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "commandRuns", id).await - } + async fn commandRuns<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "commandRuns", filter).await + } + async fn commandRun<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "commandRuns", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/_command.rs b/Packages/app-server/src/db/commands/_command.rs index 45b90d016..453114058 100644 --- a/Packages/app-server/src/db/commands/_command.rs +++ b/Packages/app-server/src/db/commands/_command.rs @@ -1,30 +1,42 @@ -use std::iter::{once, empty}; +use std::iter::{empty, once}; -use rust_shared::async_graphql::{MaybeUndefined, self}; +use deadpool_postgres::{Pool, Transaction}; +use futures_util::{Future, TryStreamExt}; +use rust_shared::anyhow::{anyhow, Context, Error}; +use rust_shared::async_graphql::{self, MaybeUndefined}; use rust_shared::indoc::indoc; use rust_shared::itertools::{chain, Itertools}; +use rust_shared::serde::Serialize; +use rust_shared::serde_json::json; use rust_shared::utils::general_::extensions::IteratorV; use rust_shared::utils::type_aliases::{JSONValue, RowData}; use rust_shared::{bytes, serde_json}; -use rust_shared::serde::Serialize; -use futures_util::{TryStreamExt, Future}; -use rust_shared::serde_json::json; -use rust_shared::{tokio_postgres, tokio_postgres::{Row, types::ToSql}}; -use rust_shared::anyhow::{anyhow, Error, Context}; -use deadpool_postgres::{Transaction, Pool}; +use rust_shared::{ + tokio_postgres, + tokio_postgres::{types::ToSql, Row}, +}; use crate::db::users::User; -use crate::utils::db::sql_param::{SQLParamBoxed}; -use crate::utils::{db::{sql_fragment::{SQLFragment, SF}, filter::{json_value_to_guessed_sql_value_param_fragment}, accessors::AccessorContext, sql_ident::SQLIdent, sql_param::{SQLParam, CustomPGSerializer}}, general::{general::{match_cond_to_iter}}}; +use crate::utils::db::sql_param::SQLParamBoxed; +use crate::utils::{ + db::{ + accessors::AccessorContext, + filter::json_value_to_guessed_sql_value_param_fragment, + sql_fragment::{SQLFragment, SF}, + sql_ident::SQLIdent, + sql_param::{CustomPGSerializer, SQLParam}, + }, + general::general::match_cond_to_iter, +}; /*pub struct UserInfo { - pub id: String, + pub id: String, } #[async_trait(?Send)] pub trait Command { - async fn Validate(&self, ctx: &AccessorContext<'_>) -> Result; - fn Commit(&self, ctx: &AccessorContext<'_>) -> Result<(), Error>; + async fn Validate(&self, ctx: &AccessorContext<'_>) -> Result; + fn Commit(&self, ctx: &AccessorContext<'_>) -> Result<(), Error>; }*/ // command helpers @@ -33,36 +45,37 @@ pub trait Command { //pub fn db_set, T2: Serialize>(ctx: &AccessorContext<'_>, path: &[T], value: T2) {} /*pub async fn set_db_entry_by_id(ctx: &AccessorContext<'_>, table_name: String, id: String, new_row: RowData) -> Result, Error> { - set_db_entry_by_filter(ctx, table_name, json!({ - "id": {"equalTo": id} - }), new_row).await + set_db_entry_by_filter(ctx, table_name, json!({ + "id": {"equalTo": id} + }), new_row).await } pub async fn set_db_entry_by_filter(ctx: &AccessorContext<'_>, table_name: String, filter_json: FilterInput, new_row: RowData) -> Result, Error> { - let filter = QueryFilter::from_filter_input_opt(&Some(filter_json))?; - let filters_sql = filter.get_sql_for_application().with_context(|| format!("Got error while getting sql for filter:{filter:?}"))?; - //let filters_sql_str = filters_sql.to_string(); // workaround for difficulty implementing Clone for SQLFragment () - let where_sql = SF::merge(vec![ - SF::lit(" WHERE "), - filters_sql - ]); - [...] + let filter = QueryFilter::from_filter_input_opt(&Some(filter_json))?; + let filters_sql = filter.get_sql_for_application().with_context(|| format!("Got error while getting sql for filter:{filter:?}"))?; + //let filters_sql_str = filters_sql.to_string(); // workaround for difficulty implementing Clone for SQLFragment () + let where_sql = SF::merge(vec![ + SF::lit(" WHERE "), + filters_sql + ]); + [...] }*/ pub fn to_row_data(data: impl Serialize) -> Result { - let as_json = serde_json::to_value(data)?; - let as_map = as_json.as_object().ok_or(anyhow!("The passed data did not serialize to a json object/map!"))?; - Ok(as_map.to_owned()) + let as_json = serde_json::to_value(data)?; + let as_map = as_json.as_object().ok_or(anyhow!("The passed data did not serialize to a json object/map!"))?; + Ok(as_map.to_owned()) } pub async fn insert_db_entry_by_id_for_struct(ctx: &AccessorContext<'_>, table_name: String, id: String, new_row_struct: T) -> Result, Error> { - let struct_as_row_data = to_row_data(new_row_struct)?; - set_db_entry_by_id(ctx, table_name, id, struct_as_row_data, false).await + let struct_as_row_data = to_row_data(new_row_struct)?; + set_db_entry_by_id(ctx, table_name, id, struct_as_row_data, false).await } pub async fn upsert_db_entry_by_id_for_struct(ctx: &AccessorContext<'_>, table_name: String, id: String, new_row_struct: T) -> Result, Error> { - let struct_as_row_data = to_row_data(new_row_struct)?; - set_db_entry_by_id(ctx, table_name, id, struct_as_row_data, true).await + let struct_as_row_data = to_row_data(new_row_struct)?; + set_db_entry_by_id(ctx, table_name, id, struct_as_row_data, true).await } +#[rustfmt::skip] pub async fn set_db_entry_by_id(ctx: &AccessorContext<'_>, table_name: String, id: String, new_row: RowData, allow_update: bool) -> Result, Error> { // todo: maybe remove this (it's not really necessary to pass the id in separately from the row-data) let id_from_row_data = new_row.get("id").ok_or(anyhow!("No \"id\" field in entry!"))? @@ -139,8 +152,9 @@ pub async fn set_db_entry_by_id(ctx: &AccessorContext<'_>, table_name: String, i } pub async fn delete_db_entry_by_id(ctx: &AccessorContext<'_>, table_name: String, id: String) -> Result, Error> { - Ok(delete_db_entry_by_field_value(ctx, table_name, "id".to_owned(), Box::new(id)).await?) + Ok(delete_db_entry_by_field_value(ctx, table_name, "id".to_owned(), Box::new(id)).await?) } +#[rustfmt::skip] pub async fn delete_db_entry_by_field_value(ctx: &AccessorContext<'_>, table_name: String, field_name: String, field_value: SQLParamBoxed) -> Result, Error> { let mut final_query = SF::new("DELETE FROM $I WHERE $I = $V RETURNING *", vec![ SQLIdent::new_boxed(table_name.clone())?, @@ -162,8 +176,9 @@ pub async fn delete_db_entry_by_field_value(ctx: &AccessorContext<'_>, table_nam #[derive(Debug)] pub struct ToSqlWrapper { - pub data: Box, + pub data: Box, } +#[rustfmt::skip] impl ToSql for ToSqlWrapper { fn accepts(_ty: &tokio_postgres::types::Type) -> bool where Self: Sized { panic!("Call to_sql_checked instead."); @@ -182,11 +197,11 @@ impl ToSql for ToSqlWrapper { /// This should be used only in cases where more robust approaches would be painful to implement, since it reduces type-safety of the field a bit. (ie. field might now be "set", but left as something invalid) /// * `func_that_will_provide_value`: Indicates which function will end up setting the value. (used to provide greater clarity) pub fn tbd(func_that_will_provide_value: &str) -> String { - format!("") + format!("") } pub fn gql_placeholder() -> String { - "Do not request this field; it's here transiently merely to satisfy graphql (see: https://github.com/graphql/graphql-spec/issues/568). Instead, request the hidden \"__typename\" field, as that will always exist.".to_owned() + "Do not request this field; it's here transiently merely to satisfy graphql (see: https://github.com/graphql/graphql-spec/issues/568). Instead, request the hidden \"__typename\" field, as that will always exist.".to_owned() } /*pub type FieldInit = T; @@ -218,15 +233,15 @@ pub type CanNullOrOmit = MaybeUndefined; // ========== /*pub fn init_field_omittable(val_in_init: CanOmit, default_val: T) -> T { - match val_in_init { - None => default_val, - Some(val) => val, - } + match val_in_init { + None => default_val, + Some(val) => val, + } }*/ pub fn init_field_of_extras(val_in_init: CanOmit, start_val: JSONValue, locked_subfields: Vec<&str>) -> Result { - // atm, the checks work the same in both cases - update_field_of_extras(val_in_init, start_val, locked_subfields) + // atm, the checks work the same in both cases + update_field_of_extras(val_in_init, start_val, locked_subfields) } // update helpers @@ -234,46 +249,46 @@ pub fn init_field_of_extras(val_in_init: CanOmit, start_val: JSONValu /// Always use this to update non-nullable fields within "update_xxx" command-functions. (exception: a struct's `extras` field, which should use `update_field_of_extras`) pub fn update_field(val_in_updates: CanOmit, old_val: T) -> T { - match val_in_updates { - None => old_val, - Some(val) => val, - } + match val_in_updates { + None => old_val, + Some(val) => val, + } } /// Always use this to update nullable fields within "update_xxx" command-functions. (exception: a struct's `extras` field, which should use `update_field_of_extras`) pub fn update_field_nullable(val_in_updates: CanNullOrOmit, old_val: Option) -> Option { - match val_in_updates { - MaybeUndefined::Undefined => old_val, - MaybeUndefined::Null => None, - MaybeUndefined::Value(val) => Some(val), - } + match val_in_updates { + MaybeUndefined::Undefined => old_val, + MaybeUndefined::Null => None, + MaybeUndefined::Value(val) => Some(val), + } } /// Variant of `update_field` for use with the `extras` field of db-structs, allowing easy updating of its data through the standard `update_x` commands, while preserving locked subfields. pub fn update_field_of_extras(val_in_updates: CanOmit, old_val: JSONValue, locked_subfields: Vec<&str>) -> Result { - let mut result = match val_in_updates { - None => old_val.clone(), - Some(val) => val, - }; - - let old_val_map = old_val.as_object().ok_or(anyhow!("The old-value for the \"extras\" field was not a json map/object!"))?; - let result_map = result.as_object_mut().ok_or(anyhow!("The final value for the \"extras\" field was somehow not a json map/object!"))?; - for key in locked_subfields { - let subfield_old_val = old_val_map.get(key).clone(); - let subfield_new_val = result_map.get(key).clone(); - - // throw error if user is trying to update the locked subfield - if format!("{:?}", subfield_old_val) != format!("{:?}", subfield_new_val) { - return Err(anyhow!("The `extras->{key}` jsonb-subfield cannot be updated from this generic update command; look for a command that deals with updating it specifically. @oldVal:{subfield_old_val:?} @newVal:{subfield_new_val:?}")); - } - - // in case the stringification above fails to catch a change (eg. flawed Debug implementation), make certain that it doesn't go through, by always resetting the subfield to its old value - match old_val.get(key) { - None => result_map.remove(key), - Some(val) => result_map.insert(key.to_owned(), val.clone()), - }; - } - - Ok(result) + let mut result = match val_in_updates { + None => old_val.clone(), + Some(val) => val, + }; + + let old_val_map = old_val.as_object().ok_or(anyhow!("The old-value for the \"extras\" field was not a json map/object!"))?; + let result_map = result.as_object_mut().ok_or(anyhow!("The final value for the \"extras\" field was somehow not a json map/object!"))?; + for key in locked_subfields { + let subfield_old_val = old_val_map.get(key).clone(); + let subfield_new_val = result_map.get(key).clone(); + + // throw error if user is trying to update the locked subfield + if format!("{:?}", subfield_old_val) != format!("{:?}", subfield_new_val) { + return Err(anyhow!("The `extras->{key}` jsonb-subfield cannot be updated from this generic update command; look for a command that deals with updating it specifically. @oldVal:{subfield_old_val:?} @newVal:{subfield_new_val:?}")); + } + + // in case the stringification above fails to catch a change (eg. flawed Debug implementation), make certain that it doesn't go through, by always resetting the subfield to its old value + match old_val.get(key) { + None => result_map.remove(key), + Some(val) => result_map.insert(key.to_owned(), val.clone()), + }; + } + + Ok(result) } // others @@ -282,27 +297,27 @@ pub fn update_field_of_extras(val_in_updates: CanOmit, old_val: JSONV // Usage example: `command_boilerplate!(gql_ctx, input, only_validate, delete_map);` // Note: I've tried creating two variants of this (a pair of pre and post macros, and a regular function); if wanted for reference, view git history. macro_rules! command_boilerplate { - ($gql_ctx:ident, $input:ident, $only_validate:ident, $command_impl_func:ident) => { - let mut anchor = $crate::utils::general::data_anchor::DataAnchorFor1::empty(); // holds pg-client + ($gql_ctx:ident, $input:ident, $only_validate:ident, $command_impl_func:ident) => { + let mut anchor = $crate::utils::general::data_anchor::DataAnchorFor1::empty(); // holds pg-client let ctx = $crate::utils::db::accessors::AccessorContext::new_write_advanced(&mut anchor, $gql_ctx, false, $only_validate).await?; let actor = $crate::db::general::sign_in_::jwt_utils::get_user_info_from_gql_ctx($gql_ctx, &ctx).await?; - let input_json = serde_json::to_string(&$input)?; + let input_json = serde_json::to_string(&$input)?; let result = $command_impl_func(&ctx, &actor, true, $input, Default::default()).await?; if $only_validate.unwrap_or(false) { - // before rolling back, ensure that none of the constraints are violated at this point (we must check manually, since commit is never called) - $crate::utils::db::accessors::trigger_deferred_constraints(&ctx.tx).await?; - - // the transaction would be rolled-back automatically after this blocks ends, but let's call rollback() explicitly just to be clear/certain - ctx.tx.rollback().await?; - tracing::info!("Command completed a \"validation only\" run without hitting errors. @Result:{:?} @Input:{} ", result, input_json); - } else { - ctx.tx.commit().await?; - tracing::info!("Command executed. @Result:{:?} @Input:{}", result, input_json); - } + // before rolling back, ensure that none of the constraints are violated at this point (we must check manually, since commit is never called) + $crate::utils::db::accessors::trigger_deferred_constraints(&ctx.tx).await?; + + // the transaction would be rolled-back automatically after this blocks ends, but let's call rollback() explicitly just to be clear/certain + ctx.tx.rollback().await?; + tracing::info!("Command completed a \"validation only\" run without hitting errors. @Result:{:?} @Input:{} ", result, input_json); + } else { + ctx.tx.commit().await?; + tracing::info!("Command executed. @Result:{:?} @Input:{}", result, input_json); + } return Ok(result); - } + }; } pub(crate) use command_boilerplate; @@ -310,14 +325,14 @@ pub type NoExtras = bool; // todo: probably change command-params from having `actor: &User, is_root: bool` to having `cmd_ctx: CommandContext<'_>` /*pub struct CommandContext<'a> { - actor: &'a User, - is_root: bool, + actor: &'a User, + is_root: bool, } impl<'a> CommandContext<'a> { - pub fn child(&self) -> Self { - Self { - actor: self.actor, - is_root: false, - } - } -}*/ \ No newline at end of file + pub fn child(&self) -> Self { + Self { + actor: self.actor, + is_root: false, + } + } +}*/ diff --git a/Packages/app-server/src/db/commands/_shared/add_node.rs b/Packages/app-server/src/db/commands/_shared/add_node.rs index b1c96914e..9be6afad2 100644 --- a/Packages/app-server/src/db/commands/_shared/add_node.rs +++ b/Packages/app-server/src/db/commands/_shared/add_node.rs @@ -14,13 +14,13 @@ use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::_shared::common_errors::err_should_be_null; -use crate::db::commands::_command::{command_boilerplate, tbd, upsert_db_entry_by_id_for_struct, update_field_of_extras, init_field_of_extras}; +use crate::db::commands::_command::{command_boilerplate, init_field_of_extras, tbd, update_field_of_extras, upsert_db_entry_by_id_for_struct}; use crate::db::commands::_shared::increment_edit_counts::increment_edit_counts_if_valid; use crate::db::commands::add_node_revision::{self, add_node_revision, AddNodeRevisionExtras, AddNodeRevisionInput, AddNodeRevisionResult}; use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::map_node_edits::{ChangeType, MapNodeEdit}; use crate::db::node_revisions::{NodeRevision, NodeRevisionInput}; -use crate::db::nodes_::_node::{Node, NodeInput, node_extras_locked_subfields}; +use crate::db::nodes_::_node::{node_extras_locked_subfields, Node, NodeInput}; use crate::db::nodes_::_node_type::NodeType; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; @@ -29,55 +29,50 @@ use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; #[derive(/*SimpleObject,*/ Debug)] pub struct AddNodeResult { - pub nodeID: String, - pub revisionID: String, + pub nodeID: String, + pub revisionID: String, } /*#[derive(Default)] pub struct AddNodeExtras { - pub id_override: Option, + pub id_override: Option, }*/ // in the future, this will very likely be a command present in the graphql api; until the use-case comes up though, we'll keep it as just a function pub async fn add_node(ctx: &AccessorContext<'_>, actor: &User, node_: NodeInput, node_id_override: Option, mut revision: NodeRevisionInput) -> Result { - let revision_id = new_uuid_v4_as_b64(); - let node = Node { - // set by server - id: ID(node_id_override.unwrap_or(new_uuid_v4_as_b64())), - creator: actor.id.to_string(), - createdAt: time_since_epoch_ms_i64(), - // pass-through - accessPolicy: node_.accessPolicy, - r#type: node_.r#type, - rootNodeForMap: node_.rootNodeForMap, - c_currentRevision: revision_id.clone(), - multiPremiseArgument: node_.multiPremiseArgument, - argumentType: node_.argumentType, - extras: init_field_of_extras(node_.extras, json!({}), node_extras_locked_subfields())?, // "extras" fields use special handling - }; + let revision_id = new_uuid_v4_as_b64(); + let node = Node { + // set by server + id: ID(node_id_override.unwrap_or(new_uuid_v4_as_b64())), + creator: actor.id.to_string(), + createdAt: time_since_epoch_ms_i64(), + // pass-through + accessPolicy: node_.accessPolicy, + r#type: node_.r#type, + rootNodeForMap: node_.rootNodeForMap, + c_currentRevision: revision_id.clone(), + multiPremiseArgument: node_.multiPremiseArgument, + argumentType: node_.argumentType, + extras: init_field_of_extras(node_.extras, json!({}), node_extras_locked_subfields())?, // "extras" fields use special handling + }; - // validate the node, then add it to db - validate_node(&node)?; - upsert_db_entry_by_id_for_struct(&ctx, "nodes".to_owned(), node.id.to_string(), node.clone()).await?; + // validate the node, then add it to db + validate_node(&node)?; + upsert_db_entry_by_id_for_struct(&ctx, "nodes".to_owned(), node.id.to_string(), node.clone()).await?; - // add node-revision to db - ensure!(revision.node.is_none(), err_should_be_null("revision.node").to_string()); - revision.node = Some(node.id.to_string()); - let add_rev_result = add_node_revision( - ctx, actor, false, - AddNodeRevisionInput { mapID: None, revision }, - AddNodeRevisionExtras { id_override: Some(revision_id.clone()) } - ).await?; - ensure!(add_rev_result.id == revision_id, "The revision-id returned by add_node_revision didn't match the revision-id-override supplied to it!"); + // add node-revision to db + ensure!(revision.node.is_none(), err_should_be_null("revision.node").to_string()); + revision.node = Some(node.id.to_string()); + let add_rev_result = add_node_revision(ctx, actor, false, AddNodeRevisionInput { mapID: None, revision }, AddNodeRevisionExtras { id_override: Some(revision_id.clone()) }).await?; + ensure!(add_rev_result.id == revision_id, "The revision-id returned by add_node_revision didn't match the revision-id-override supplied to it!"); - Ok(AddNodeResult { - nodeID: node.id.to_string(), - revisionID: revision_id.to_string(), - }) + Ok(AddNodeResult { nodeID: node.id.to_string(), revisionID: revision_id.to_string() }) } pub fn validate_node(node: &Node) -> Result<(), Error> { - if node.multiPremiseArgument.is_some() { ensure!(node.r#type == NodeType::argument); } + if node.multiPremiseArgument.is_some() { + ensure!(node.r#type == NodeType::argument); + } - Ok(()) -} \ No newline at end of file + Ok(()) +} diff --git a/Packages/app-server/src/db/commands/_shared/increment_edit_counts.rs b/Packages/app-server/src/db/commands/_shared/increment_edit_counts.rs index 77575c45f..67a909b6a 100644 --- a/Packages/app-server/src/db/commands/_shared/increment_edit_counts.rs +++ b/Packages/app-server/src/db/commands/_shared/increment_edit_counts.rs @@ -1,40 +1,39 @@ use rust_shared::{anyhow::Error, utils::time::time_since_epoch_ms_i64}; -use crate::{utils::db::accessors::AccessorContext, db::{maps::{get_map, Map}, commands::_command::upsert_db_entry_by_id_for_struct, users::{User, get_user}}}; +use crate::{ + db::{ + commands::_command::upsert_db_entry_by_id_for_struct, + maps::{get_map, Map}, + users::{get_user, User}, + }, + utils::db::accessors::AccessorContext, +}; pub async fn increment_edit_counts_if_valid(ctx: &AccessorContext<'_>, user: Option<&User>, map_id: Option, is_root: bool) -> Result<(), Error> { - if !is_root { - return Ok(()); - } - if let Some(user) = user { - increment_user_edits(ctx, user.id.as_str()).await?; - } - if let Some(map_id) = map_id { - increment_map_edits(ctx, &map_id).await?; - } - Ok(()) + if !is_root { + return Ok(()); + } + if let Some(user) = user { + increment_user_edits(ctx, user.id.as_str()).await?; + } + if let Some(map_id) = map_id { + increment_map_edits(ctx, &map_id).await?; + } + Ok(()) } pub async fn increment_user_edits(ctx: &AccessorContext<'_>, user_id: &str) -> Result<(), Error> { - let old_data = get_user(ctx, &user_id).await?; - let new_data = User { - edits: old_data.edits + 1, - lastEditAt: Some(time_since_epoch_ms_i64()), - ..old_data - }; + let old_data = get_user(ctx, &user_id).await?; + let new_data = User { edits: old_data.edits + 1, lastEditAt: Some(time_since_epoch_ms_i64()), ..old_data }; - upsert_db_entry_by_id_for_struct(&ctx, "users".to_owned(), user_id.to_string(), new_data).await?; + upsert_db_entry_by_id_for_struct(&ctx, "users".to_owned(), user_id.to_string(), new_data).await?; - Ok(()) + Ok(()) } pub async fn increment_map_edits(ctx: &AccessorContext<'_>, map_id: &str) -> Result<(), Error> { - let old_data = get_map(ctx, &map_id).await?; - let new_data = Map { - edits: old_data.edits + 1, - editedAt: Some(time_since_epoch_ms_i64()), - ..old_data - }; + let old_data = get_map(ctx, &map_id).await?; + let new_data = Map { edits: old_data.edits + 1, editedAt: Some(time_since_epoch_ms_i64()), ..old_data }; - upsert_db_entry_by_id_for_struct(&ctx, "maps".to_owned(), map_id.to_string(), new_data).await?; + upsert_db_entry_by_id_for_struct(&ctx, "maps".to_owned(), map_id.to_string(), new_data).await?; - Ok(()) -} \ No newline at end of file + Ok(()) +} diff --git a/Packages/app-server/src/db/commands/_shared/jsonb_utils.rs b/Packages/app-server/src/db/commands/_shared/jsonb_utils.rs index 055a2451d..dec2bdd1f 100644 --- a/Packages/app-server/src/db/commands/_shared/jsonb_utils.rs +++ b/Packages/app-server/src/db/commands/_shared/jsonb_utils.rs @@ -1,127 +1,143 @@ use std::iter::empty; -use deadpool_postgres::{Transaction, tokio_postgres::types::ToSql}; -use rust_shared::{utils::{type_aliases::JSONValue, general_::extensions::IteratorV}, itertools::{chain, Itertools}, anyhow::{anyhow, Error, Context}, serde_json}; +use deadpool_postgres::{tokio_postgres::types::ToSql, Transaction}; +use rust_shared::{ + anyhow::{anyhow, Context, Error}, + itertools::{chain, Itertools}, + serde_json, + utils::{general_::extensions::IteratorV, type_aliases::JSONValue}, +}; -use crate::{utils::{db::{sql_fragment::{SF, SQLFragment}, sql_param::{SQLParamBoxed, CustomPGSerializer}, sql_ident::SQLIdent}, general::{general::match_cond_to_iter}}, db::commands::_command::ToSqlWrapper}; +use crate::{ + db::commands::_command::ToSqlWrapper, + utils::{ + db::{ + sql_fragment::{SQLFragment, SF}, + sql_ident::SQLIdent, + sql_param::{CustomPGSerializer, SQLParamBoxed}, + }, + general::general::match_cond_to_iter, + }, +}; pub async fn jsonb_set(tx: &Transaction<'_>, table: &str, id: &str, field: &str, jsonb_path: Vec, value: Option) -> Result<(), Error> { - let mut sql_fragment = jsonb_set_to_sql_fragment(table, id, field, jsonb_path, value)?; + let mut sql_fragment = jsonb_set_to_sql_fragment(table, id, field, jsonb_path, value)?; - let (sql_text, params) = sql_fragment.into_query_args()?; + let (sql_text, params) = sql_fragment.into_query_args()?; - let debug_info_str = format!("@sqlText:{}\n@params:{:?}", &sql_text, ¶ms); - // wrap params into boxes, then refs, to satisfy ToSql constraint generically; not ideal, but best approach known atm; see: https://github.com/sfackler/rust-postgres/issues/712 - let params_wrapped: Vec = params.into_iter().map(|a| ToSqlWrapper { data: a }).collect(); - let params_as_refs: Vec<&(dyn ToSql + Sync)> = params_wrapped.iter().map(|x| x as &(dyn ToSql + Sync)).collect(); + let debug_info_str = format!("@sqlText:{}\n@params:{:?}", &sql_text, ¶ms); + // wrap params into boxes, then refs, to satisfy ToSql constraint generically; not ideal, but best approach known atm; see: https://github.com/sfackler/rust-postgres/issues/712 + let params_wrapped: Vec = params.into_iter().map(|a| ToSqlWrapper { data: a }).collect(); + let params_as_refs: Vec<&(dyn ToSql + Sync)> = params_wrapped.iter().map(|x| x as &(dyn ToSql + Sync)).collect(); - tx.execute_raw(&sql_text, params_as_refs).await - //.map_err(|err| anyhow!("Got error while running query, for call to jsonb-set. @error:{}\n{}", err.to_string(), &debug_info_str))?; - .with_context(|| anyhow!("Got error while running query, for call to jsonb-set. {}", &debug_info_str))?; + tx.execute_raw(&sql_text, params_as_refs) + .await + //.map_err(|err| anyhow!("Got error while running query, for call to jsonb-set. @error:{}\n{}", err.to_string(), &debug_info_str))?; + .with_context(|| anyhow!("Got error while running query, for call to jsonb-set. {}", &debug_info_str))?; - Ok(()) + Ok(()) } +#[rustfmt::skip] pub fn jsonb_set_to_sql_fragment(table: &str, id: &str, field: &str, jsonb_path: Vec, jsonb_value: Option) -> Result { - Ok(match jsonb_value { - Some(jsonb_value) => { - // the value for this code-path will always be within a JSONB cell, so just json-serialize it - //let value_serialized_as_json_string = serde_json::to_string(&value)?; - - // approach for safely setting the value of a "deeply nested" in-jsonb field (see here: https://stackoverflow.com/a/69534368) - SF::merge_lines(vec![ - SF::new("UPDATE $I SET $I =", vec![ - SQLIdent::new_boxed(table.to_owned())?, - SQLIdent::new_boxed(field.to_owned())?, - ]), - SF::merge_lines( - jsonb_path.iter().enumerate().map(|(i, subfield)| -> Result { - let prior_path_segments = jsonb_path.iter().take(i).cloned().collect_vec(); - Ok(SF::merge(vec![ - SF::new("jsonb_set(COALESCE($I", vec![SQLIdent::new_boxed(field.to_owned())?]), - SF::merge( - prior_path_segments.iter().map(|subfield| { - SF::merge(vec![ - //SF::lit("->'"), - SF::lit("->"), - SF::value(subfield.to_owned()), - //SF::lit("'"), - ]) - }).collect_vec() - ), - //SF::new(", '{}'), '{$I}',", vec![SQLIdent::new_boxed(subfield.to_owned())?]), - SF::new(", '{}'), array[$V],", vec![Box::new(subfield.to_owned())]), - ])) - }).try_collect2::>()? - ), - //SF::value(Box::new(value_serialized_as_json_string)), - //SF::value(CustomPGSerializer::new("::jsonb".to_owned(), value_serialized_as_json_string)), - //SF::value(CustomPGSerializer::new("::jsonb".to_owned(), jsonb_value.clone())), - SF::value(jsonb_value), - SF::merge( - jsonb_path.iter().map(|_| SF::lit(")")).collect_vec() - ), - SF::new("WHERE id = $V", vec![Box::new(id.to_owned())]), - ]) - }, - None => { - /*let jsonb_path_quoted = jsonb_path.iter().map(|a| format!("'{a}'")).collect_vec(); - let query = format!("UPDATE \"{table}\" SET \"{field}\" = \"{field}\" #- "); - tx.query(&query, &[]).await?;*/ + Ok(match jsonb_value { + Some(jsonb_value) => { + // the value for this code-path will always be within a JSONB cell, so just json-serialize it + //let value_serialized_as_json_string = serde_json::to_string(&value)?; + + // approach for safely setting the value of a "deeply nested" in-jsonb field (see here: https://stackoverflow.com/a/69534368) + SF::merge_lines(vec![ + SF::new("UPDATE $I SET $I =", vec![ + SQLIdent::new_boxed(table.to_owned())?, + SQLIdent::new_boxed(field.to_owned())?, + ]), + SF::merge_lines( + jsonb_path.iter().enumerate().map(|(i, subfield)| -> Result { + let prior_path_segments = jsonb_path.iter().take(i).cloned().collect_vec(); + Ok(SF::merge(vec![ + SF::new("jsonb_set(COALESCE($I", vec![SQLIdent::new_boxed(field.to_owned())?]), + SF::merge( + prior_path_segments.iter().map(|subfield| { + SF::merge(vec![ + //SF::lit("->'"), + SF::lit("->"), + SF::value(subfield.to_owned()), + //SF::lit("'"), + ]) + }).collect_vec() + ), + //SF::new(", '{}'), '{$I}',", vec![SQLIdent::new_boxed(subfield.to_owned())?]), + SF::new(", '{}'), array[$V],", vec![Box::new(subfield.to_owned())]), + ])) + }).try_collect2::>()? + ), + //SF::value(Box::new(value_serialized_as_json_string)), + //SF::value(CustomPGSerializer::new("::jsonb".to_owned(), value_serialized_as_json_string)), + //SF::value(CustomPGSerializer::new("::jsonb".to_owned(), jsonb_value.clone())), + SF::value(jsonb_value), + SF::merge( + jsonb_path.iter().map(|_| SF::lit(")")).collect_vec() + ), + SF::new("WHERE id = $V", vec![Box::new(id.to_owned())]), + ]) + }, + None => { + /*let jsonb_path_quoted = jsonb_path.iter().map(|a| format!("'{a}'")).collect_vec(); + let query = format!("UPDATE \"{table}\" SET \"{field}\" = \"{field}\" #- "); + tx.query(&query, &[]).await?;*/ - //let mut final_query = SF::new(format!("UPDATE $I SET $I = $I #- array[{jsonb_path_quoted}] WHERE id = $V"), vec![ - SF::merge(vec![ - SF::new("UPDATE $I SET $I = $I #- array[", vec![ - SQLIdent::new_boxed(table.to_owned())?, - SQLIdent::new_boxed(field.to_owned())?, - SQLIdent::new_boxed(field.to_owned())?, - ]), - SF::merge( - /*jsonb_path.iter().enumerate().map(|(i, subfield)| -> Result { - Ok(SF::merge(chain!( - match_cond_to_iter(i > 0, SF::lit(", ").once(), empty()), - //Some(SF::ident(SQLIdent::new(subfield.to_owned())?)), - Some(SF::value(subfield.to_owned())), - ).collect_vec())) - }).try_collect2::>()?,*/ - jsonb_path.iter().enumerate().map(|(i, subfield)| { - SF::merge(vec![ - if i > 0 { SF::lit(", ") } else { SF::lit("") }, - SF::value(subfield.to_owned()), - ]) - }).collect_vec(), - ), - SF::new("] WHERE id = $V", vec![Box::new(id.to_owned())]), - ]) - }, - }) + //let mut final_query = SF::new(format!("UPDATE $I SET $I = $I #- array[{jsonb_path_quoted}] WHERE id = $V"), vec![ + SF::merge(vec![ + SF::new("UPDATE $I SET $I = $I #- array[", vec![ + SQLIdent::new_boxed(table.to_owned())?, + SQLIdent::new_boxed(field.to_owned())?, + SQLIdent::new_boxed(field.to_owned())?, + ]), + SF::merge( + /*jsonb_path.iter().enumerate().map(|(i, subfield)| -> Result { + Ok(SF::merge(chain!( + match_cond_to_iter(i > 0, SF::lit(", ").once(), empty()), + //Some(SF::ident(SQLIdent::new(subfield.to_owned())?)), + Some(SF::value(subfield.to_owned())), + ).collect_vec())) + }).try_collect2::>()?,*/ + jsonb_path.iter().enumerate().map(|(i, subfield)| { + SF::merge(vec![ + if i > 0 { SF::lit(", ") } else { SF::lit("") }, + SF::value(subfield.to_owned()), + ]) + }).collect_vec(), + ), + SF::new("] WHERE id = $V", vec![Box::new(id.to_owned())]), + ]) + }, + }) } #[cfg(test)] mod tests { - use rust_shared::{serde_json::json, indoc::indoc}; + use rust_shared::{indoc::indoc, serde_json::json}; - use crate::db::commands::_shared::jsonb_utils::jsonb_set_to_sql_fragment; + use crate::db::commands::_shared::jsonb_utils::jsonb_set_to_sql_fragment; - // run in PowerShell using: `cargo test jsonb_set -- --nocapture` - #[test] - fn jsonb_set() { - let mut sql = jsonb_set_to_sql_fragment( - "myTable", "myRowID", "myField", - vec!["depth1".to_owned(), "depth2".to_owned(), "depth3".to_owned(), "depth4".to_owned()], - Some(json!("newValue")), - ).unwrap(); - // meant to match with example shown here: https://stackoverflow.com/a/69534368 - /*assert_eq!(sql.sql_text, indoc!(r#" - UPDATE $I SET $I = - jsonb_set(COALESCE($I, '{}'), array[$V], - jsonb_set(COALESCE($I->'depth1', '{}'), array[$V], - jsonb_set(COALESCE($I->'depth1'->'depth2', '{}'), array[$V], - jsonb_set(COALESCE($I->'depth1'->'depth2'->'depth3', '{}'), array[$V], - $V - )))) WHERE $I = $V - "#));*/ - assert_eq!(sql.sql_text, indoc!(r#" + // run in PowerShell using: `cargo test jsonb_set -- --nocapture` + #[test] + fn jsonb_set() { + let mut sql = jsonb_set_to_sql_fragment("myTable", "myRowID", "myField", vec!["depth1".to_owned(), "depth2".to_owned(), "depth3".to_owned(), "depth4".to_owned()], Some(json!("newValue"))).unwrap(); + // meant to match with example shown here: https://stackoverflow.com/a/69534368 + /*assert_eq!(sql.sql_text, indoc!(r#" + UPDATE $I SET $I = + jsonb_set(COALESCE($I, '{}'), array[$V], + jsonb_set(COALESCE($I->'depth1', '{}'), array[$V], + jsonb_set(COALESCE($I->'depth1'->'depth2', '{}'), array[$V], + jsonb_set(COALESCE($I->'depth1'->'depth2'->'depth3', '{}'), array[$V], + $V + )))) WHERE $I = $V + "#));*/ + assert_eq!( + sql.sql_text, + indoc!( + r#" UPDATE $I SET $I = jsonb_set(COALESCE($I, '{}'), array[$V], jsonb_set(COALESCE($I->$V, '{}'), array[$V], @@ -130,9 +146,15 @@ mod tests { $V )))) WHERE id = $V - "#).trim_end()); - let (sql_text_final, params) = sql.into_query_args().unwrap(); - assert_eq!(sql_text_final, indoc!(r#" + "# + ) + .trim_end() + ); + let (sql_text_final, params) = sql.into_query_args().unwrap(); + assert_eq!( + sql_text_final, + indoc!( + r#" UPDATE "myTable" SET "myField" = jsonb_set(COALESCE("myField", '{}'), array[$1::text], jsonb_set(COALESCE("myField"->$2::text, '{}'), array[$3::text], @@ -141,12 +163,15 @@ mod tests { $11::jsonb )))) WHERE id = $12::text - "#).trim_end()); - assert_eq!(params.len(), 12); - } + "# + ) + .trim_end() + ); + assert_eq!(params.len(), 12); + } - // run in PowerShell using: `cargo test jsonb_delete -- --nocapture` - /*#[test] - fn jsonb_delete() { - }*/ -} \ No newline at end of file + // run in PowerShell using: `cargo test jsonb_delete -- --nocapture` + /*#[test] + fn jsonb_delete() { + }*/ +} diff --git a/Packages/app-server/src/db/commands/_shared/rating_processor.rs b/Packages/app-server/src/db/commands/_shared/rating_processor.rs index 93277fecb..0d00fdd36 100644 --- a/Packages/app-server/src/db/commands/_shared/rating_processor.rs +++ b/Packages/app-server/src/db/commands/_shared/rating_processor.rs @@ -1,87 +1,96 @@ use std::collections::HashSet; -use rust_shared::{anyhow::{anyhow, Error, bail}, db_constants::SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME, utils::{time::time_since_epoch_ms_i64, db::uuid::new_uuid_v4_as_b64_id}, itertools::Itertools}; +use rust_shared::{ + anyhow::{anyhow, bail, Error}, + db_constants::SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME, + itertools::Itertools, + utils::{db::uuid::new_uuid_v4_as_b64_id, time::time_since_epoch_ms_i64}, +}; -use crate::{db::{node_ratings::{NodeRating, get_node_rating_by_user, get_rating_average, get_node_ratings, get_node_ratings_base, get_node_rating_by_user_base, get_rating_average_base}, nodes_::_node::{get_node_form, Node, ArgumentType}, node_links::ClaimForm, access_policies::get_system_access_policy, node_ratings_::_node_rating_type::NodeRatingType, _shared::access_policy_target::AccessPolicyTarget}, utils::db::accessors::AccessorContext}; +use crate::{ + db::{ + _shared::access_policy_target::AccessPolicyTarget, + access_policies::get_system_access_policy, + node_links::ClaimForm, + node_ratings::{get_node_rating_by_user, get_node_rating_by_user_base, get_node_ratings, get_node_ratings_base, get_rating_average, get_rating_average_base, NodeRating}, + node_ratings_::_node_rating_type::NodeRatingType, + nodes_::_node::{get_node_form, ArgumentType, Node}, + }, + utils::db::accessors::AccessorContext, +}; // sync:js pub async fn get_argument_impact_pseudo_rating(ctx: &AccessorContext<'_>, argument: &Node, premises: &[Node], user_id: &str, use_average_for_missing: bool) -> Result { - if premises.len() == 0 { return Err(anyhow!("No premises provided.")); } - - let mut premise_probabilities = vec![]; - for premise in premises { - let rating_value = match get_node_rating_by_user_base(ctx, premise.id.as_str(), NodeRatingType::truth, user_id).await { - Ok(rating) => rating.value, - Err(_) => match use_average_for_missing { - true => get_rating_average_base(ctx, premise.id.as_str(), NodeRatingType::truth, None).await?, - false => bail!("Premise node #{} has no truth rating by user {}.", premise.id.as_str(), user_id), - }, - }; - - let form = get_node_form(ctx, &premise.id, &argument.id).await?; - let probability = match form { - ClaimForm::negation => 1.0 - (rating_value / 100.0), - _ => rating_value / 100.0, - }; - premise_probabilities.push(probability); - } - - let combined_truth_of_premises: f64 = match argument.argumentType { - Some(ArgumentType::all) => premise_probabilities.iter().fold(1.0, |total, current| total * current), - Some(ArgumentType::anyTwo) => { - let strongest = premise_probabilities.iter().fold(0f64, |max, current| max.max(*current)); - let second_strongest = if premise_probabilities.len() > 1 { - premise_probabilities - .iter() - .fold(0f64, |max, current| { - if *current != strongest { - max.max(*current) - } else { - max - } - }) - } else { - 0f64 - }; - strongest * second_strongest - } - Some(ArgumentType::any) => premise_probabilities.iter().fold(0f64, |max, current| max.max(*current)), - None => return Err(anyhow!("Argument node's `argumentType` field was set to null!")), - }; - - let relevance: f64 = match get_node_rating_by_user_base(ctx, argument.id.as_str(), NodeRatingType::relevance, user_id).await { - Ok(rating) => rating.value, - Err(_) => match use_average_for_missing { - true => get_rating_average_base(ctx, &argument.id, NodeRatingType::relevance, None).await?, - false => bail!("Argument node #{} has no relevance rating by user {}.", argument.id.as_str(), user_id), - }, - }; - + if premises.len() == 0 { + return Err(anyhow!("No premises provided.")); + } + + let mut premise_probabilities = vec![]; + for premise in premises { + let rating_value = match get_node_rating_by_user_base(ctx, premise.id.as_str(), NodeRatingType::truth, user_id).await { + Ok(rating) => rating.value, + Err(_) => match use_average_for_missing { + true => get_rating_average_base(ctx, premise.id.as_str(), NodeRatingType::truth, None).await?, + false => bail!("Premise node #{} has no truth rating by user {}.", premise.id.as_str(), user_id), + }, + }; + + let form = get_node_form(ctx, &premise.id, &argument.id).await?; + let probability = match form { + ClaimForm::negation => 1.0 - (rating_value / 100.0), + _ => rating_value / 100.0, + }; + premise_probabilities.push(probability); + } + + let combined_truth_of_premises: f64 = match argument.argumentType { + Some(ArgumentType::all) => premise_probabilities.iter().fold(1.0, |total, current| total * current), + Some(ArgumentType::anyTwo) => { + let strongest = premise_probabilities.iter().fold(0f64, |max, current| max.max(*current)); + let second_strongest = if premise_probabilities.len() > 1 { + premise_probabilities.iter().fold(0f64, |max, current| if *current != strongest { max.max(*current) } else { max }) + } else { + 0f64 + }; + strongest * second_strongest + }, + Some(ArgumentType::any) => premise_probabilities.iter().fold(0f64, |max, current| max.max(*current)), + None => return Err(anyhow!("Argument node's `argumentType` field was set to null!")), + }; + + let relevance: f64 = match get_node_rating_by_user_base(ctx, argument.id.as_str(), NodeRatingType::relevance, user_id).await { + Ok(rating) => rating.value, + Err(_) => match use_average_for_missing { + true => get_rating_average_base(ctx, &argument.id, NodeRatingType::relevance, None).await?, + false => bail!("Argument node #{} has no relevance rating by user {}.", argument.id.as_str(), user_id), + }, + }; + // let strengthForType = adjustment.Distance(50) / 50; let result: f64 = combined_truth_of_premises * (relevance / 100f64); - let access_policy = get_system_access_policy(ctx, SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME).await?; + let access_policy = get_system_access_policy(ctx, SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME).await?; Ok(NodeRating { - id: new_uuid_v4_as_b64_id(), - accessPolicy: access_policy.id.to_string(), + id: new_uuid_v4_as_b64_id(), + accessPolicy: access_policy.id.to_string(), node: argument.id.to_string(), r#type: NodeRatingType::impact, creator: user_id.to_owned(), createdAt: time_since_epoch_ms_i64(), value: float_to_percent(result), - c_accessPolicyTargets: vec![], // auto-set by db + c_accessPolicyTargets: vec![], // auto-set by db }) } fn float_to_percent(f: f64) -> f64 { - let result = f * 100.0; - result.round() + let result = f * 100.0; + result.round() } /*pub struct RatingFilter { - pub node_id: String, - pub rating_type: NodeRatingType, - pub user_ids: Option>, + pub node_id: String, + pub rating_type: NodeRatingType, + pub user_ids: Option>, } pub fn rating_list_after_removes_and_adds(base_list: &[NodeRating], ratings_to_remove: Option<&Vec>, ratings_to_add: Option<&Vec>, ratings_to_add_filter: &RatingFilter) -> Vec { let mut result = base_list.to_vec(); @@ -89,29 +98,26 @@ pub fn rating_list_after_removes_and_adds(base_list: &[NodeRating], ratings_to_r result.retain(|a| !ratings_to_remove.iter().any(|b| b == a.id.as_str())); } if let Some(ratings_to_add) = ratings_to_add { - let ratings_to_add_filtered = ratings_to_add.iter().filter(|a| { - a.node == ratings_to_add_filter.node_id - && a.r#type == ratings_to_add_filter.rating_type - && (ratings_to_add_filter.user_ids.is_none() || ratings_to_add_filter.user_ids.unwrap().iter().any(|b| b == &a.creator)) - }).map(|a| a.clone()).collect_vec(); + let ratings_to_add_filtered = ratings_to_add.iter().filter(|a| { + a.node == ratings_to_add_filter.node_id + && a.r#type == ratings_to_add_filter.rating_type + && (ratings_to_add_filter.user_ids.is_none() || ratings_to_add_filter.user_ids.unwrap().iter().any(|b| b == &a.creator)) + }).map(|a| a.clone()).collect_vec(); result.extend(ratings_to_add_filtered); } result }*/ // sync:js[loosely] -pub async fn get_argument_impact_pseudo_ratings( - ctx: &AccessorContext<'_>, - argument: &Node, premises: &Vec, user_ids: Option<&Vec>, use_average_for_missing: bool, -) -> Result, Error> { +pub async fn get_argument_impact_pseudo_ratings(ctx: &AccessorContext<'_>, argument: &Node, premises: &Vec, user_ids: Option<&Vec>, use_average_for_missing: bool) -> Result, Error> { let mut result = vec![]; - + let argument_relevance_ratings = get_node_ratings_base(ctx, &argument.id, Some(NodeRatingType::relevance), user_ids).await?; - + let mut users_who_rated_arg_and_premises = argument_relevance_ratings.iter().map(|a| a.creator.clone()).collect::>(); for premise in premises { let premise_truth_ratings = get_node_ratings_base(ctx, &premise.id, Some(NodeRatingType::truth), user_ids).await?; - + let users_who_rated_premise = premise_truth_ratings.iter().map(|a| a.creator.clone()).collect::>(); for user_id in users_who_rated_arg_and_premises.clone() { if !users_who_rated_premise.contains(&user_id) { @@ -119,12 +125,12 @@ pub async fn get_argument_impact_pseudo_ratings( } } } - + for user_id in &users_who_rated_arg_and_premises { if let Ok(impact_rating) = get_argument_impact_pseudo_rating(ctx, &argument, &premises, user_id, use_average_for_missing).await { result.push(impact_rating); } } - + Ok(result) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/_shared/record_command_run.rs b/Packages/app-server/src/db/commands/_shared/record_command_run.rs index 0203cd332..4c97f25ef 100644 --- a/Packages/app-server/src/db/commands/_shared/record_command_run.rs +++ b/Packages/app-server/src/db/commands/_shared/record_command_run.rs @@ -1,75 +1,91 @@ -use rust_shared::{anyhow::{Error, anyhow}, utils::{general_::extensions::ToOwnedV, type_aliases::JSONValue, time::time_since_epoch_ms_i64, db::uuid::new_uuid_v4_as_b64}, itertools::Itertools, async_graphql::ID}; +use rust_shared::{ + anyhow::{anyhow, Error}, + async_graphql::ID, + itertools::Itertools, + utils::{db::uuid::new_uuid_v4_as_b64, general_::extensions::ToOwnedV, time::time_since_epoch_ms_i64, type_aliases::JSONValue}, +}; use tracing::error; -use crate::{utils::{db::{accessors::AccessorContext, queries::get_entries_in_collection, filter::QueryFilter}, general::data_anchor::DataAnchorFor1}, db::{maps::{get_map, Map}, commands::_command::upsert_db_entry_by_id_for_struct, user_hiddens::get_user_hidden, users::User, command_runs::CommandRun}}; +use crate::{ + db::{ + command_runs::CommandRun, + commands::_command::upsert_db_entry_by_id_for_struct, + maps::{get_map, Map}, + user_hiddens::get_user_hidden, + users::User, + }, + utils::{ + db::{accessors::AccessorContext, filter::QueryFilter, queries::get_entries_in_collection}, + general::data_anchor::DataAnchorFor1, + }, +}; -pub async fn record_command_run_if_root( - ctx: &AccessorContext<'_>, actor: &User, is_root: bool, - command_name: String, command_input: JSONValue, command_result: JSONValue, - involved_nodes: Vec, -) -> Result<(), Error> { - if !is_root { return Ok(()); } - record_command_run(ctx, actor, command_name, command_input, command_result, involved_nodes).await +pub async fn record_command_run_if_root(ctx: &AccessorContext<'_>, actor: &User, is_root: bool, command_name: String, command_input: JSONValue, command_result: JSONValue, involved_nodes: Vec) -> Result<(), Error> { + if !is_root { + return Ok(()); + } + record_command_run(ctx, actor, command_name, command_input, command_result, involved_nodes).await } -pub async fn record_command_run( - ctx: &AccessorContext<'_>, actor: &User, - command_name: String, command_input: JSONValue, command_result: JSONValue, - involved_nodes: Vec, -) -> Result<(), Error> { - let actor_hidden = get_user_hidden(ctx, actor.id.as_str()).await?; - let make_public_base = actor_hidden.addToStream; +pub async fn record_command_run(ctx: &AccessorContext<'_>, actor: &User, command_name: String, command_input: JSONValue, command_result: JSONValue, involved_nodes: Vec) -> Result<(), Error> { + let actor_hidden = get_user_hidden(ctx, actor.id.as_str()).await?; + let make_public_base = actor_hidden.addToStream; - ctx.with_rls_disabled(|| async { - trim_old_command_runs(ctx).await - }, Some("Failed to perform trimming of old command-runs.")).await?; + ctx.with_rls_disabled(|| async { trim_old_command_runs(ctx).await }, Some("Failed to perform trimming of old command-runs.")).await?; - let id = new_uuid_v4_as_b64(); - let command_run = CommandRun { - // set by this func - id: ID(id), - actor: actor.id.to_string(), - runTime: time_since_epoch_ms_i64(), - public_base: make_public_base, - // pass-through - commandName: command_name, - commandInput: command_input, - commandResult: command_result, - c_involvedNodes: involved_nodes, - c_accessPolicyTargets: vec![], // auto-set by db - }; - upsert_db_entry_by_id_for_struct(&ctx, "commandRuns".to_owned(), command_run.id.to_string(), command_run).await?; + let id = new_uuid_v4_as_b64(); + let command_run = CommandRun { + // set by this func + id: ID(id), + actor: actor.id.to_string(), + runTime: time_since_epoch_ms_i64(), + public_base: make_public_base, + // pass-through + commandName: command_name, + commandInput: command_input, + commandResult: command_result, + c_involvedNodes: involved_nodes, + c_accessPolicyTargets: vec![], // auto-set by db + }; + upsert_db_entry_by_id_for_struct(&ctx, "commandRuns".to_owned(), command_run.id.to_string(), command_run).await?; - Ok(()) + Ok(()) } /// Helper function to keep command-runs collection from growing beyond X entries. (this implementation isn't great, but better than nothing for now) async fn trim_old_command_runs(ctx: &AccessorContext<'_>) -> Result<(), Error> { - // this is an alternative way to have an admin-access transaction temporarily (commented since more complex) - /*let gql_ctx = ctx.gql_ctx.unwrap(); - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx_admin = AccessorContext::new_write(&mut anchor, gql_ctx, false).await?;*/ + // this is an alternative way to have an admin-access transaction temporarily (commented since more complex) + /*let gql_ctx = ctx.gql_ctx.unwrap(); + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx_admin = AccessorContext::new_write(&mut anchor, gql_ctx, false).await?;*/ - let (_, command_runs) = get_entries_in_collection::(ctx, "commandRuns".o(), &QueryFilter::empty(), None).await?; - let command_runs_to_remove = command_runs.into_iter().enumerate() - // sort by run-time, descending (so that latest ones are first) - .sorted_by_cached_key(|a| -a.1.runTime) - .filter(|(index, commandRun)| { - // keep the most recent 100 entries - if index < &100 { return false; } + let (_, command_runs) = get_entries_in_collection::(ctx, "commandRuns".o(), &QueryFilter::empty(), None).await?; + let command_runs_to_remove = command_runs + .into_iter() + .enumerate() + // sort by run-time, descending (so that latest ones are first) + .sorted_by_cached_key(|a| -a.1.runTime) + .filter(|(index, commandRun)| { + // keep the most recent 100 entries + if index < &100 { + return false; + } - // keep entries created in the last 3 days (so long as the total count is less than 1000) - let timeSinceRun = time_since_epoch_ms_i64() - commandRun.runTime; - if timeSinceRun < 3 * 24 * 60 * 60 * 1000 && index < &1000 { return false; } + // keep entries created in the last 3 days (so long as the total count is less than 1000) + let timeSinceRun = time_since_epoch_ms_i64() - commandRun.runTime; + if timeSinceRun < 3 * 24 * 60 * 60 * 1000 && index < &1000 { + return false; + } - // delete the rest - return true; - }) - // for now, limit command-runs-to-remove to the oldest 10 entries (else server can be overwhelmed and crash; exact diagnosis unknown, but happened for case of 227-at-once) - .rev().take(10) - .collect_vec(); - - let command_run_ids_to_remove = command_runs_to_remove.iter().map(|commandRun| commandRun.1.id.to_string()).collect_vec(); - ctx.tx.execute(r#"DELETE FROM "commandRuns" WHERE id = ANY($1)"#, &[&command_run_ids_to_remove]).await?; - Ok(()) -} \ No newline at end of file + // delete the rest + return true; + }) + // for now, limit command-runs-to-remove to the oldest 10 entries (else server can be overwhelmed and crash; exact diagnosis unknown, but happened for case of 227-at-once) + .rev() + .take(10) + .collect_vec(); + + let command_run_ids_to_remove = command_runs_to_remove.iter().map(|commandRun| commandRun.1.id.to_string()).collect_vec(); + ctx.tx.execute(r#"DELETE FROM "commandRuns" WHERE id = ANY($1)"#, &[&command_run_ids_to_remove]).await?; + Ok(()) +} diff --git a/Packages/app-server/src/db/commands/_shared/update_node_rating_summaries.rs b/Packages/app-server/src/db/commands/_shared/update_node_rating_summaries.rs index 264ad71d3..ca9af3722 100644 --- a/Packages/app-server/src/db/commands/_shared/update_node_rating_summaries.rs +++ b/Packages/app-server/src/db/commands/_shared/update_node_rating_summaries.rs @@ -1,35 +1,36 @@ use std::collections::HashMap; -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; +use rust_shared::db_constants::SYSTEM_USER_ID; use rust_shared::itertools::Itertools; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; -use rust_shared::db_constants::SYSTEM_USER_ID; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; use rust_shared::utils::general::{average, enum_to_string}; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable, command_boilerplate}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_ratings_::_node_rating_type::{rating_value_is_in_range, get_rating_type_info, NodeRatingType}; -use crate::db::node_ratings::{NodeRatingInput, get_node_ratings, get_node_ratings_base}; -use crate::db::nodes::{get_node, get_node_parents, get_node_children}; -use crate::db::nodes_::_node::{RatingSummary}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_ratings::{get_node_ratings, get_node_ratings_base, NodeRatingInput}; +use crate::db::node_ratings_::_node_rating_type::{get_rating_type_info, rating_value_is_in_range, NodeRatingType}; +use crate::db::nodes::{get_node, get_node_children, get_node_parents}; +use crate::db::nodes_::_node::RatingSummary; use crate::db::nodes_::_node_type::NodeType; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::jsonb_utils::jsonb_set; use super::rating_processor::get_argument_impact_pseudo_ratings; +#[rustfmt::skip] pub async fn update_node_rating_summaries(ctx: &AccessorContext<'_>, _actor: &User, node_id: String, rating_type: NodeRatingType) -> Result<(), Error> { ctx.with_rls_disabled(|| async { let rating_type_info = get_rating_type_info(rating_type); @@ -95,4 +96,4 @@ pub async fn update_node_rating_summaries(ctx: &AccessorContext<'_>, _actor: &Us }, Some("Failed to update node-rating-summaries for node.")).await?; Ok(()) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/_temp/clone_map_special.rs b/Packages/app-server/src/db/commands/_temp/clone_map_special.rs index 37f9a3ad4..bef9b3f1b 100644 --- a/Packages/app-server/src/db/commands/_temp/clone_map_special.rs +++ b/Packages/app-server/src/db/commands/_temp/clone_map_special.rs @@ -1,24 +1,24 @@ -use std::fmt::{Formatter, Display}; +use std::fmt::{Display, Formatter}; use futures_util::{pin_mut, Stream, StreamExt}; -use rust_shared::async_graphql::{ID, SimpleObject, InputObject, Subscription, async_stream}; +use rust_shared::anyhow::{anyhow, ensure, Context, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{async_stream, InputObject, SimpleObject, Subscription, ID}; +use rust_shared::db_constants::SYSTEM_USER_ID; use rust_shared::indexmap::map; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; -use rust_shared::db_constants::SYSTEM_USER_ID; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; use rust_shared::utils::general_::extensions::ToOwnedV; -use rust_shared::{anyhow, async_graphql, flume, serde_json, to_sub_err, GQLError, SubError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::{FReceiver, FSender, JSONValue}; -use rust_shared::anyhow::{anyhow, ensure, Context, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, flume, serde_json, to_sub_err, GQLError, SubError}; use tracing::{info, warn}; use crate::db::commands::_command::command_boilerplate; use crate::db::commands::add_child_node::AddChildNodeExtras; use crate::db::commands::add_map::{add_map, AddMapInput}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::maps::{get_map, MapInput}; use crate::db::node_links::{get_node_links, ChildGroup, NodeLinkInput}; use crate::db::node_phrasings::NodePhrasing_Embedded; @@ -28,239 +28,241 @@ use crate::db::nodes_::_node::{Node, NodeInput}; use crate::db::nodes_::_node_type::NodeType; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use crate::utils::general::order_key::OrderKey; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::super::_command::{upsert_db_entry_by_id_for_struct, NoExtras, tbd}; +use super::super::_command::{tbd, upsert_db_entry_by_id_for_struct, NoExtras}; use super::super::_shared::add_node::add_node; use super::super::_shared::increment_edit_counts::increment_edit_counts_if_valid; use super::super::add_child_node::{add_child_node, AddChildNodeInput}; //wrap_slow_macros!{ -#[derive(Default)] pub struct SubscriptionShard_CloneMapSpecial; -#[Subscription] impl SubscriptionShard_CloneMapSpecial { - async fn clone_map_special<'a>(&self, gql_ctx: &'a async_graphql::Context<'a>, input: CloneMapSpecialInput, only_validate: Option) -> impl Stream> + 'a { - let base_stream = async_stream::stream! { - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_write_advanced(&mut anchor, gql_ctx, false, Some(false)).await.map_err(to_sub_err)?; - let actor = get_user_info_from_gql_ctx(gql_ctx, &ctx).await.map_err(to_sub_err)?; - let input_json = serde_json::to_string(&input).map_err(to_sub_err)?; +#[derive(Default)] +pub struct SubscriptionShard_CloneMapSpecial; +#[Subscription] +impl SubscriptionShard_CloneMapSpecial { + async fn clone_map_special<'a>(&self, gql_ctx: &'a async_graphql::Context<'a>, input: CloneMapSpecialInput, only_validate: Option) -> impl Stream> + 'a { + let base_stream = async_stream::stream! { + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_write_advanced(&mut anchor, gql_ctx, false, Some(false)).await.map_err(to_sub_err)?; + let actor = get_user_info_from_gql_ctx(gql_ctx, &ctx).await.map_err(to_sub_err)?; + let input_json = serde_json::to_string(&input).map_err(to_sub_err)?; - let mut nodes_cloned = 0; - let mut nodes_warned: Vec = vec![]; - let (nodes_warned_s1, nodes_warned_r1): (FSender, FReceiver) = flume::unbounded(); - let mut last_result: Result = Err(anyhow!("No results yet")).map_err(to_sub_err); - - // use block here, to elucidate any code mistakes that might keep a reference from within "stream" alive longer than it should - { - let stream = clone_map_special(&ctx, &actor, false, input, Default::default(), nodes_warned_s1); - pin_mut!(stream); // needed for iteration - while let Some(subresult) = stream.next().await { - // first, collect any buffered warnings into the nodes_warned list - while let Ok(node_id) = nodes_warned_r1.try_recv() { - if !nodes_warned.contains(&node_id) { - nodes_warned.push(node_id); - } - } + let mut nodes_cloned = 0; + let mut nodes_warned: Vec = vec![]; + let (nodes_warned_s1, nodes_warned_r1): (FSender, FReceiver) = flume::unbounded(); + let mut last_result: Result = Err(anyhow!("No results yet")).map_err(to_sub_err); - // then process the next "main stream" result (ie. of another node having been cloned) - match subresult { - Err(e) => { - last_result = Err(e).map_err(to_sub_err); - //yield last_result.clone(); - last_result.clone()?; // use this syntax, to halt if error is hit - }, - Ok((new_map_id, _node_id)) => { - nodes_cloned += 1; - last_result = Ok(CloneMapSpecialResult { newMapID: new_map_id.clone(), nodesCloned: nodes_cloned, nodesWarned: nodes_warned.clone(), doneAt: None }); + // use block here, to elucidate any code mistakes that might keep a reference from within "stream" alive longer than it should + { + let stream = clone_map_special(&ctx, &actor, false, input, Default::default(), nodes_warned_s1); + pin_mut!(stream); // needed for iteration + while let Some(subresult) = stream.next().await { + // first, collect any buffered warnings into the nodes_warned list + while let Ok(node_id) = nodes_warned_r1.try_recv() { + if !nodes_warned.contains(&node_id) { + nodes_warned.push(node_id); + } + } - // only update every X nodes (else eg. gql-playground UI can become unresponsive) - let interval = match nodes_cloned { - x if x < 10 => 1, - x if x < 100 => 10, - x if x < 1000 => 100, - x if x < 10000 => 500, - _ => 1000, - }; - if nodes_cloned % interval == 0 { - yield last_result.clone(); - } - }, - } - } - } - - // sync from: _command.rs - if only_validate.unwrap_or(false) { - // before rolling back, ensure that none of the constraints are violated at this point (we must check manually, since commit is never called) - crate::utils::db::accessors::trigger_deferred_constraints(&ctx.tx).await.map_err(to_sub_err)?; - - // the transaction would be rolled-back automatically after this blocks ends, but let's call rollback() explicitly just to be clear/certain - ctx.tx.rollback().await.map_err(to_sub_err)?; - tracing::info!("Command completed a \"validation only\" run without hitting errors. @NodesCloned:{:?} @Input:{} ", last_result, input_json); - } else { - ctx.tx.commit().await.map_err(to_sub_err)?; - tracing::info!("Command executed. @Result:{:?} @Input:{}", last_result, input_json); - } + // then process the next "main stream" result (ie. of another node having been cloned) + match subresult { + Err(e) => { + last_result = Err(e).map_err(to_sub_err); + //yield last_result.clone(); + last_result.clone()?; // use this syntax, to halt if error is hit + }, + Ok((new_map_id, _node_id)) => { + nodes_cloned += 1; + last_result = Ok(CloneMapSpecialResult { newMapID: new_map_id.clone(), nodesCloned: nodes_cloned, nodesWarned: nodes_warned.clone(), doneAt: None }); - if let Ok(inner) = last_result { - yield Ok(CloneMapSpecialResult { - doneAt: Some(time_since_epoch_ms_i64()), - ..inner - }); - } - }; - base_stream - } + // only update every X nodes (else eg. gql-playground UI can become unresponsive) + let interval = match nodes_cloned { + x if x < 10 => 1, + x if x < 100 => 10, + x if x < 1000 => 100, + x if x < 10000 => 500, + _ => 1000, + }; + if nodes_cloned % interval == 0 { + yield last_result.clone(); + } + }, + } + } + } + + // sync from: _command.rs + if only_validate.unwrap_or(false) { + // before rolling back, ensure that none of the constraints are violated at this point (we must check manually, since commit is never called) + crate::utils::db::accessors::trigger_deferred_constraints(&ctx.tx).await.map_err(to_sub_err)?; + + // the transaction would be rolled-back automatically after this blocks ends, but let's call rollback() explicitly just to be clear/certain + ctx.tx.rollback().await.map_err(to_sub_err)?; + tracing::info!("Command completed a \"validation only\" run without hitting errors. @NodesCloned:{:?} @Input:{} ", last_result, input_json); + } else { + ctx.tx.commit().await.map_err(to_sub_err)?; + tracing::info!("Command executed. @Result:{:?} @Input:{}", last_result, input_json); + } + + if let Ok(inner) = last_result { + yield Ok(CloneMapSpecialResult { + doneAt: Some(time_since_epoch_ms_i64()), + ..inner + }); + } + }; + base_stream + } } #[derive(InputObject, Serialize, Deserialize)] pub struct CloneMapSpecialInput { - pub mapID: String, + pub mapID: String, } #[derive(SimpleObject, Debug, Clone)] pub struct CloneMapSpecialResult { - pub newMapID: String, - pub nodesCloned: i32, - pub nodesWarned: Vec, - pub doneAt: Option, + pub newMapID: String, + pub nodesCloned: i32, + pub nodesWarned: Vec, + pub doneAt: Option, } //} pub fn clone_map_special<'a>(ctx: &'a AccessorContext<'_>, actor: &'a User, _is_root: bool, input: CloneMapSpecialInput, _extras: NoExtras, nodes_warned_s1: FSender) -> impl Stream> + 'a { - let base_stream = async_stream::stream!{ - let CloneMapSpecialInput { mapID } = input; + let base_stream = async_stream::stream! { + let CloneMapSpecialInput { mapID } = input; - let map = get_map(ctx, &mapID).await?; - let new_map = MapInput { - name: format!("Clone of {}", map.name), - accessPolicy: map.accessPolicy, - note: map.note.clone(), - noteInline: map.noteInline, - defaultExpandDepth: map.defaultExpandDepth, - nodeAccessPolicy: map.nodeAccessPolicy.clone(), - featured: map.featured, - editors: map.editors.clone(), - extras: json!({}), - }; - let new_map_input = AddMapInput {map: new_map}; - let new_map_result = add_map(ctx, actor, false, new_map_input, Default::default()).await?; + let map = get_map(ctx, &mapID).await?; + let new_map = MapInput { + name: format!("Clone of {}", map.name), + accessPolicy: map.accessPolicy, + note: map.note.clone(), + noteInline: map.noteInline, + defaultExpandDepth: map.defaultExpandDepth, + nodeAccessPolicy: map.nodeAccessPolicy.clone(), + featured: map.featured, + editors: map.editors.clone(), + extras: json!({}), + }; + let new_map_input = AddMapInput {map: new_map}; + let new_map_result = add_map(ctx, actor, false, new_map_input, Default::default()).await?; - let root_node = get_node(ctx, &map.rootNode).await?; - let new_root_node = get_node(ctx, &new_map_result.root_node_id).await?; + let root_node = get_node(ctx, &map.rootNode).await?; + let new_root_node = get_node(ctx, &new_map_result.root_node_id).await?; - let stream = clone_node_tree_special(ctx, actor, map.id.as_str(), root_node, new_root_node, nodes_warned_s1); - for await result in stream { - match result { - Err(e) => Err(e)?, // use this syntax, to halt if error is hit - Ok(node_id) => yield Ok((new_map_result.id.clone(), node_id)), - } - } - }; - base_stream + let stream = clone_node_tree_special(ctx, actor, map.id.as_str(), root_node, new_root_node, nodes_warned_s1); + for await result in stream { + match result { + Err(e) => Err(e)?, // use this syntax, to halt if error is hit + Ok(node_id) => yield Ok((new_map_result.id.clone(), node_id)), + } + } + }; + base_stream } pub fn clone_node_tree_special<'a>(ctx: &'a AccessorContext<'_>, actor: &'a User, map_id: &'a str, old_node: Node, new_node: Node, nodes_warned_s1: FSender) -> impl Stream> + 'a { - let base_stream = async_stream::stream!{ - //let rev = get_node_revision(ctx, node.c_currentRevision.as_str()).await?; + let base_stream = async_stream::stream! { + //let rev = get_node_revision(ctx, node.c_currentRevision.as_str()).await?; - let links = get_node_links(ctx, Some(old_node.id.as_str()), None).await?; - for link in links { - let child = get_node(ctx, link.child.as_str()).await?; - let child_rev = get_node_revision(ctx, child.c_currentRevision.as_str()).await?; + let links = get_node_links(ctx, Some(old_node.id.as_str()), None).await?; + for link in links { + let child = get_node(ctx, link.child.as_str()).await?; + let child_rev = get_node_revision(ctx, child.c_currentRevision.as_str()).await?; - // if child is an argument, try to "skip over it" during construction of map-clone (ie. to remove the sl-unwanted "intermediary node") - if child.r#type == NodeType::argument { - let grandchild_links = get_node_links(ctx, Some(child.id.as_str()), None).await?; - let has_attachments = child_rev.attachments.len() > 0; - // ensure that argument has no title, in any of the text_XXX fields - let get_if_non_empty = |s: &Option| { - match s { - Some(s) => match s { - s if s.is_empty() => None, - _ => Some(s.to_owned()), - }, - None => None, - } - }; - let first_non_empty_title = - get_if_non_empty(&Some(child_rev.phrasing.text_base.clone())) - .or(get_if_non_empty(&child_rev.phrasing.text_negation.clone())) - .or(get_if_non_empty(&child_rev.phrasing.text_question.clone())) - .or(get_if_non_empty(&child_rev.phrasing.text_narrative.clone())); - /*if let Some(title) = first_non_empty_title { - warn!("Argument node #{} has a non-empty title. If this is a dry-run, it's recommended to investigate these entries before proceeding. @title:\n\t{}", child.id.as_str(), title); - }*/ + // if child is an argument, try to "skip over it" during construction of map-clone (ie. to remove the sl-unwanted "intermediary node") + if child.r#type == NodeType::argument { + let grandchild_links = get_node_links(ctx, Some(child.id.as_str()), None).await?; + let has_attachments = child_rev.attachments.len() > 0; + // ensure that argument has no title, in any of the text_XXX fields + let get_if_non_empty = |s: &Option| { + match s { + Some(s) => match s { + s if s.is_empty() => None, + _ => Some(s.to_owned()), + }, + None => None, + } + }; + let first_non_empty_title = + get_if_non_empty(&Some(child_rev.phrasing.text_base.clone())) + .or(get_if_non_empty(&child_rev.phrasing.text_negation.clone())) + .or(get_if_non_empty(&child_rev.phrasing.text_question.clone())) + .or(get_if_non_empty(&child_rev.phrasing.text_narrative.clone())); + /*if let Some(title) = first_non_empty_title { + warn!("Argument node #{} has a non-empty title. If this is a dry-run, it's recommended to investigate these entries before proceeding. @title:\n\t{}", child.id.as_str(), title); + }*/ - // only "get rid of the intermediary argument" when it's "safe to do so", which requires that: (all of...) - // 1) The arg has only 0-1 children. - // 2) The arg has no attachments. - // 3) The arg has only null/empty titles. - let safe_to_omit = grandchild_links.len() <= 1 && !has_attachments && first_non_empty_title.is_none(); + // only "get rid of the intermediary argument" when it's "safe to do so", which requires that: (all of...) + // 1) The arg has only 0-1 children. + // 2) The arg has no attachments. + // 3) The arg has only null/empty titles. + let safe_to_omit = grandchild_links.len() <= 1 && !has_attachments && first_non_empty_title.is_none(); - // add warnings (visible to graphql caller) for arguments that aren't safe to omit (since we *want* all arguments to be omittable, if possible) - if !safe_to_omit { - nodes_warned_s1.send(format!("{} @childCount:{} @hasAttach:{} @hasNonEmptyTitle:{}", child.id.as_str(), grandchild_links.len(), has_attachments, first_non_empty_title.is_some()))?; - } - - if safe_to_omit { - // rather than add this argument itself, skip over it and add its children directly instead - for grandchild_link in grandchild_links { - let grandchild = get_node(ctx, &grandchild_link.child).await?; - let grandchild_rev = get_node_revision(ctx, grandchild.c_currentRevision.as_str()).await?; - let add_child_input = AddChildNodeInput { - mapID: Some(map_id.o()), - parentID: new_node.id.as_str().o(), - node: grandchild.clone().into_input(true), - revision: grandchild_rev.into_input(false), - link: NodeLinkInput { - parent: None, - child: None, - group: match new_node.r#type { - NodeType::category => ChildGroup::generic, - _ => ChildGroup::freeform, - }, - orderKey: link.orderKey.clone(), // use [old_node -> child] link's order-key, since that ordering is more meaningful - form: grandchild_link.form, - seriesAnchor: grandchild_link.seriesAnchor, - seriesEnd: grandchild_link.seriesEnd, - polarity: grandchild_link.polarity, - } - }; - let add_node_result = add_child_node(ctx, actor, false, add_child_input, AddChildNodeExtras { avoid_recording_command_run: true }).await?; - yield Ok(add_node_result.nodeID.clone()); - let new_node = get_node(ctx, &add_node_result.nodeID).await?; + // add warnings (visible to graphql caller) for arguments that aren't safe to omit (since we *want* all arguments to be omittable, if possible) + if !safe_to_omit { + nodes_warned_s1.send(format!("{} @childCount:{} @hasAttach:{} @hasNonEmptyTitle:{}", child.id.as_str(), grandchild_links.len(), has_attachments, first_non_empty_title.is_some()))?; + } - //Box::pin(clone_node_tree_special(ctx, actor, map_id, grandchild, new_node)).await?; - for await result in Box::pin(clone_node_tree_special(ctx, actor, map_id, grandchild, new_node, nodes_warned_s1.clone())) { - yield Ok(result?); // use this syntax, to halt if error is hit - } - } - // we've done special processing for this argument node, so skip the generic processing below - continue; - } - } + if safe_to_omit { + // rather than add this argument itself, skip over it and add its children directly instead + for grandchild_link in grandchild_links { + let grandchild = get_node(ctx, &grandchild_link.child).await?; + let grandchild_rev = get_node_revision(ctx, grandchild.c_currentRevision.as_str()).await?; + let add_child_input = AddChildNodeInput { + mapID: Some(map_id.o()), + parentID: new_node.id.as_str().o(), + node: grandchild.clone().into_input(true), + revision: grandchild_rev.into_input(false), + link: NodeLinkInput { + parent: None, + child: None, + group: match new_node.r#type { + NodeType::category => ChildGroup::generic, + _ => ChildGroup::freeform, + }, + orderKey: link.orderKey.clone(), // use [old_node -> child] link's order-key, since that ordering is more meaningful + form: grandchild_link.form, + seriesAnchor: grandchild_link.seriesAnchor, + seriesEnd: grandchild_link.seriesEnd, + polarity: grandchild_link.polarity, + } + }; + let add_node_result = add_child_node(ctx, actor, false, add_child_input, AddChildNodeExtras { avoid_recording_command_run: true }).await?; + yield Ok(add_node_result.nodeID.clone()); + let new_node = get_node(ctx, &add_node_result.nodeID).await?; - let add_child_input = AddChildNodeInput { - mapID: Some(map_id.o()), - parentID: new_node.id.as_str().o(), - node: child.clone().into_input(true), - revision: child_rev.into_input(false), - link: link.into_input(false), - }; - let add_child_result = add_child_node(ctx, actor, false, add_child_input, AddChildNodeExtras { avoid_recording_command_run: true }).await?; - yield Ok(add_child_result.nodeID.clone()); - let new_child = get_node(ctx, &add_child_result.nodeID).await?; + //Box::pin(clone_node_tree_special(ctx, actor, map_id, grandchild, new_node)).await?; + for await result in Box::pin(clone_node_tree_special(ctx, actor, map_id, grandchild, new_node, nodes_warned_s1.clone())) { + yield Ok(result?); // use this syntax, to halt if error is hit + } + } + // we've done special processing for this argument node, so skip the generic processing below + continue; + } + } - //Box::pin(clone_node_tree_special(ctx, actor, map_id, child, new_child)).await?; - for await result in Box::pin(clone_node_tree_special(ctx, actor, map_id, child, new_child, nodes_warned_s1.clone())) { - yield Ok(result?); // use this syntax, to halt if error is hit - } - } - }; - base_stream -} \ No newline at end of file + let add_child_input = AddChildNodeInput { + mapID: Some(map_id.o()), + parentID: new_node.id.as_str().o(), + node: child.clone().into_input(true), + revision: child_rev.into_input(false), + link: link.into_input(false), + }; + let add_child_result = add_child_node(ctx, actor, false, add_child_input, AddChildNodeExtras { avoid_recording_command_run: true }).await?; + yield Ok(add_child_result.nodeID.clone()); + let new_child = get_node(ctx, &add_child_result.nodeID).await?; + + //Box::pin(clone_node_tree_special(ctx, actor, map_id, child, new_child)).await?; + for await result in Box::pin(clone_node_tree_special(ctx, actor, map_id, child, new_child, nodes_warned_s1.clone())) { + yield Ok(result?); // use this syntax, to halt if error is hit + } + } + }; + base_stream +} diff --git a/Packages/app-server/src/db/commands/add_access_policy.rs b/Packages/app-server/src/db/commands/add_access_policy.rs index c9824a937..10b76c82d 100644 --- a/Packages/app-server/src/db/commands/add_access_policy.rs +++ b/Packages/app-server/src/db/commands/add_access_policy.rs @@ -1,33 +1,33 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; +use rust_shared::db_constants::SYSTEM_USER_ID; use rust_shared::indexmap::IndexMap; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; -use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; -use crate::db::access_policies_::_access_policy::{AccessPolicyInput, AccessPolicy}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::access_policies_::_access_policy::{AccessPolicy, AccessPolicyInput}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::terms::{Term, TermInput}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, command_boilerplate, NoExtras}; +use super::_command::{command_boilerplate, upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_AddAccessPolicy; #[Object] impl MutationShard_AddAccessPolicy { async fn add_access_policy(&self, gql_ctx: &async_graphql::Context<'_>, input: AddAccessPolicyInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, add_access_policy); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -44,7 +44,7 @@ pub struct AddAccessPolicyResult { pub async fn add_access_policy(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: AddAccessPolicyInput, _extras: NoExtras) -> Result { let AddAccessPolicyInput { policy: policy_ } = input; - + let policy = AccessPolicy { // set by server id: ID(new_uuid_v4_as_b64()), @@ -59,4 +59,4 @@ pub async fn add_access_policy(ctx: &AccessorContext<'_>, actor: &User, _is_root upsert_db_entry_by_id_for_struct(&ctx, "accessPolicies".to_owned(), policy.id.to_string(), policy.clone()).await?; Ok(AddAccessPolicyResult { id: policy.id.to_string() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/add_argument_and_claim.rs b/Packages/app-server/src/db/commands/add_argument_and_claim.rs index 264627ceb..045dc8761 100644 --- a/Packages/app-server/src/db/commands/add_argument_and_claim.rs +++ b/Packages/app-server/src/db/commands/add_argument_and_claim.rs @@ -1,97 +1,85 @@ -use std::fmt::{Formatter, Display}; +use std::fmt::{Display, Formatter}; -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Context, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; use rust_shared::utils::general_::extensions::ToOwnedV; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, Context}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::commands::_command::command_boilerplate; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::node_links::NodeLinkInput; use crate::db::node_phrasings::NodePhrasing_Embedded; use crate::db::node_revisions::{NodeRevision, NodeRevisionInput}; -use crate::db::nodes_::_node::{NodeInput}; +use crate::db::nodes_::_node::NodeInput; use crate::db::nodes_::_node_type::NodeType; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras, tbd}; +use super::_command::{tbd, upsert_db_entry_by_id_for_struct, NoExtras}; use super::_shared::add_node::add_node; use super::_shared::increment_edit_counts::increment_edit_counts_if_valid; use super::add_child_node::{add_child_node, AddChildNodeInput}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_AddArgumentAndClaim; #[Object] impl MutationShard_AddArgumentAndClaim { - async fn add_argument_and_claim(&self, gql_ctx: &async_graphql::Context<'_>, input: AddArgumentAndClaimInput, only_validate: Option) -> Result { - command_boilerplate!(gql_ctx, input, only_validate, add_argument_and_claim); - } + async fn add_argument_and_claim(&self, gql_ctx: &async_graphql::Context<'_>, input: AddArgumentAndClaimInput, only_validate: Option) -> Result { + command_boilerplate!(gql_ctx, input, only_validate, add_argument_and_claim); + } } #[derive(InputObject, Serialize, Deserialize)] pub struct AddArgumentAndClaimInput { - pub mapID: Option, - pub argumentParentID: String, - pub argumentNode: NodeInput, - pub argumentRevision: NodeRevisionInput, - pub argumentLink: NodeLinkInput, - pub claimNode: NodeInput, - pub claimRevision: NodeRevisionInput, - pub claimLink: NodeLinkInput, + pub mapID: Option, + pub argumentParentID: String, + pub argumentNode: NodeInput, + pub argumentRevision: NodeRevisionInput, + pub argumentLink: NodeLinkInput, + pub claimNode: NodeInput, + pub claimRevision: NodeRevisionInput, + pub claimLink: NodeLinkInput, } #[derive(SimpleObject, Debug)] pub struct AddArgumentAndClaimResult { - pub argumentNodeID: String, - pub argumentRevisionID: String, - pub argumentLinkID: String, - pub claimNodeID: String, - pub claimRevisionID: String, - pub claimLinkID: String, - pub doneAt: i64, + pub argumentNodeID: String, + pub argumentRevisionID: String, + pub argumentLinkID: String, + pub claimNodeID: String, + pub claimRevisionID: String, + pub claimLinkID: String, + pub doneAt: i64, } } pub async fn add_argument_and_claim(ctx: &AccessorContext<'_>, actor: &User, is_root: bool, input: AddArgumentAndClaimInput, _extras: NoExtras) -> Result { - let AddArgumentAndClaimInput { mapID, argumentParentID, argumentNode, argumentRevision, argumentLink, claimNode, claimRevision, claimLink } = input; + let AddArgumentAndClaimInput { mapID, argumentParentID, argumentNode, argumentRevision, argumentLink, claimNode, claimRevision, claimLink } = input; - let add_argument_result = add_child_node(ctx, actor, false, AddChildNodeInput { - mapID: None, - parentID: argumentParentID.clone(), - node: argumentNode.clone(), - revision: argumentRevision.clone(), - link: argumentLink.clone(), - }, Default::default()).await?; + let add_argument_result = add_child_node(ctx, actor, false, AddChildNodeInput { mapID: None, parentID: argumentParentID.clone(), node: argumentNode.clone(), revision: argumentRevision.clone(), link: argumentLink.clone() }, Default::default()).await?; - let add_claim_result = add_child_node(ctx, actor, false, AddChildNodeInput { - mapID: None, - parentID: add_argument_result.nodeID.clone(), - node: claimNode, - revision: claimRevision, - link: claimLink, - }, Default::default()).await?; + let add_claim_result = add_child_node(ctx, actor, false, AddChildNodeInput { mapID: None, parentID: add_argument_result.nodeID.clone(), node: claimNode, revision: claimRevision, link: claimLink }, Default::default()).await?; - increment_edit_counts_if_valid(&ctx, Some(actor), mapID, is_root).await?; + increment_edit_counts_if_valid(&ctx, Some(actor), mapID, is_root).await?; - Ok(AddArgumentAndClaimResult { - argumentNodeID: add_argument_result.nodeID, - argumentRevisionID: add_argument_result.revisionID, - argumentLinkID: add_argument_result.linkID, - claimNodeID: add_claim_result.nodeID, - claimRevisionID: add_claim_result.revisionID, - claimLinkID: add_claim_result.linkID, - doneAt: time_since_epoch_ms_i64(), - }) -} \ No newline at end of file + Ok(AddArgumentAndClaimResult { + argumentNodeID: add_argument_result.nodeID, + argumentRevisionID: add_argument_result.revisionID, + argumentLinkID: add_argument_result.linkID, + claimNodeID: add_claim_result.nodeID, + claimRevisionID: add_claim_result.revisionID, + claimLinkID: add_claim_result.linkID, + doneAt: time_since_epoch_ms_i64(), + }) +} diff --git a/Packages/app-server/src/db/commands/add_child_node.rs b/Packages/app-server/src/db/commands/add_child_node.rs index e16404c97..bfd1baaec 100644 --- a/Packages/app-server/src/db/commands/add_child_node.rs +++ b/Packages/app-server/src/db/commands/add_child_node.rs @@ -1,17 +1,17 @@ -use std::fmt::{Formatter, Display}; +use std::fmt::{Display, Formatter}; -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, ensure, Context, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; use rust_shared::utils::general_::extensions::ToOwnedV; use rust_shared::utils::general_::serde::to_json_value_for_borrowed_obj; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, Context, ensure}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::_shared::common_errors::err_should_be_null; @@ -19,45 +19,45 @@ use crate::db::commands::_command::command_boilerplate; use crate::db::commands::_shared::increment_edit_counts::increment_edit_counts_if_valid; use crate::db::commands::_shared::record_command_run::record_command_run; use crate::db::commands::add_node_link::{add_node_link, AddNodeLinkInput}; -use crate::db::general::permission_helpers::{assert_user_can_add_phrasing, assert_user_can_add_child}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_links::{NodeLinkInput, NodeLink}; +use crate::db::general::permission_helpers::{assert_user_can_add_child, assert_user_can_add_phrasing}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_links::{NodeLink, NodeLinkInput}; use crate::db::node_phrasings::NodePhrasing_Embedded; use crate::db::node_revisions::{NodeRevision, NodeRevisionInput}; use crate::db::nodes::get_node; -use crate::db::nodes_::_node::{NodeInput}; +use crate::db::nodes_::_node::NodeInput; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras, tbd}; +use super::_command::{tbd, upsert_db_entry_by_id_for_struct, NoExtras}; use super::_shared::add_node::add_node; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_AddChildNode; #[Object] impl MutationShard_AddChildNode { - async fn add_child_node(&self, gql_ctx: &async_graphql::Context<'_>, input: AddChildNodeInput, only_validate: Option) -> Result { - command_boilerplate!(gql_ctx, input, only_validate, add_child_node); - } + async fn add_child_node(&self, gql_ctx: &async_graphql::Context<'_>, input: AddChildNodeInput, only_validate: Option) -> Result { + command_boilerplate!(gql_ctx, input, only_validate, add_child_node); + } } #[derive(InputObject, Deserialize, Serialize, Clone)] pub struct AddChildNodeInput { - pub mapID: Option, - pub parentID: String, - pub node: NodeInput, - pub revision: NodeRevisionInput, - pub link: NodeLinkInput, + pub mapID: Option, + pub parentID: String, + pub node: NodeInput, + pub revision: NodeRevisionInput, + pub link: NodeLinkInput, } #[derive(SimpleObject, Debug, Serialize)] pub struct AddChildNodeResult { - pub nodeID: String, - pub revisionID: String, - pub linkID: String, - pub doneAt: i64, + pub nodeID: String, + pub revisionID: String, + pub linkID: String, + pub doneAt: i64, } } @@ -68,40 +68,31 @@ pub struct AddChildNodeExtras { } pub async fn add_child_node(ctx: &AccessorContext<'_>, actor: &User, is_root: bool, input: AddChildNodeInput, extras: AddChildNodeExtras) -> Result { - let AddChildNodeInput { mapID, parentID, node: node_, revision: revision_, link: link_ } = input.clone(); - - let parent = get_node(ctx, &parentID).await?; - assert_user_can_add_child(ctx, actor, &parent).await?; // defensive - - let node_id = new_uuid_v4_as_b64(); - ensure!(link_.parent.is_none() && link_.child.is_none(), err_should_be_null("[input.link.parent and input.link.child]").to_string()); - let link = NodeLinkInput { - // set by server - parent: Some(parentID.clone()), - child: Some(node_id.clone()), - // pass-through - ..link_ - }; - - let add_node_result = add_node(ctx, actor, node_, Some(node_id.clone()), revision_).await?; - ensure!(add_node_result.nodeID == node_id, "The node-id returned by add_node didn't match the node-id-override supplied to it!"); - - let add_node_link_result = add_node_link(ctx, actor, false, AddNodeLinkInput { link }, Default::default()).await?; - - increment_edit_counts_if_valid(&ctx, Some(actor), mapID, is_root).await?; - - let result = AddChildNodeResult { - nodeID: add_node_result.nodeID, - revisionID: add_node_result.revisionID, - linkID: add_node_link_result.id, - doneAt: time_since_epoch_ms_i64(), - }; - if !extras.avoid_recording_command_run { - record_command_run( - ctx, actor, - "addChildNode".to_owned(), to_json_value_for_borrowed_obj(&input)?, to_json_value_for_borrowed_obj(&result)?, - vec![input.parentID, result.nodeID.clone()], - ).await?; - } - Ok(result) -} \ No newline at end of file + let AddChildNodeInput { mapID, parentID, node: node_, revision: revision_, link: link_ } = input.clone(); + + let parent = get_node(ctx, &parentID).await?; + assert_user_can_add_child(ctx, actor, &parent).await?; // defensive + + let node_id = new_uuid_v4_as_b64(); + ensure!(link_.parent.is_none() && link_.child.is_none(), err_should_be_null("[input.link.parent and input.link.child]").to_string()); + let link = NodeLinkInput { + // set by server + parent: Some(parentID.clone()), + child: Some(node_id.clone()), + // pass-through + ..link_ + }; + + let add_node_result = add_node(ctx, actor, node_, Some(node_id.clone()), revision_).await?; + ensure!(add_node_result.nodeID == node_id, "The node-id returned by add_node didn't match the node-id-override supplied to it!"); + + let add_node_link_result = add_node_link(ctx, actor, false, AddNodeLinkInput { link }, Default::default()).await?; + + increment_edit_counts_if_valid(&ctx, Some(actor), mapID, is_root).await?; + + let result = AddChildNodeResult { nodeID: add_node_result.nodeID, revisionID: add_node_result.revisionID, linkID: add_node_link_result.id, doneAt: time_since_epoch_ms_i64() }; + if !extras.avoid_recording_command_run { + record_command_run(ctx, actor, "addChildNode".to_owned(), to_json_value_for_borrowed_obj(&input)?, to_json_value_for_borrowed_obj(&result)?, vec![input.parentID, result.nodeID.clone()]).await?; + } + Ok(result) +} diff --git a/Packages/app-server/src/db/commands/add_map.rs b/Packages/app-server/src/db/commands/add_map.rs index fe9490461..95a291e77 100644 --- a/Packages/app-server/src/db/commands/add_map.rs +++ b/Packages/app-server/src/db/commands/add_map.rs @@ -1,42 +1,42 @@ -use std::fmt::{Formatter, Display}; +use std::fmt::{Display, Formatter}; -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Context, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; use rust_shared::utils::general_::extensions::ToOwnedV; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, Context}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::commands::_command::command_boilerplate; use crate::db::commands::_shared::increment_edit_counts::increment_edit_counts_if_valid; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::map_node_edits::{ChangeType, MapNodeEdit}; -use crate::db::maps::{MapInput, Map}; +use crate::db::maps::{Map, MapInput}; use crate::db::node_phrasings::NodePhrasing_Embedded; use crate::db::node_revisions::{NodeRevision, NodeRevisionInput}; -use crate::db::nodes_::_node::{NodeInput}; +use crate::db::nodes_::_node::NodeInput; use crate::db::nodes_::_node_type::NodeType; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras, tbd, insert_db_entry_by_id_for_struct, CanOmit}; +use super::_command::{insert_db_entry_by_id_for_struct, tbd, upsert_db_entry_by_id_for_struct, CanOmit, NoExtras}; use super::_shared::add_node::add_node; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_AddMap; #[Object] impl MutationShard_AddMap { async fn add_map(&self, gql_ctx: &async_graphql::Context<'_>, input: AddMapInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, add_map); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -54,7 +54,7 @@ pub struct AddMapResult { pub async fn add_map(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: AddMapInput, _extras: NoExtras) -> Result { let AddMapInput { map: map_ } = input; - + let root_node_id = new_uuid_v4_as_b64(); let map = Map { // set by server @@ -86,22 +86,12 @@ pub async fn add_map(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, in extras: CanOmit::None, }; let new_root_node_rev = NodeRevisionInput { - phrasing: NodePhrasing_Embedded { - text_base: "Root".o(), - text_negation: None, - text_question: None, - text_narrative: None, - note: None, - terms: vec![], - }, + phrasing: NodePhrasing_Embedded { text_base: "Root".o(), text_negation: None, text_question: None, text_narrative: None, note: None, terms: vec![] }, node: None, displayDetails: None, attachments: vec![], }; add_node(ctx, actor, new_root_node, Some(root_node_id.clone()), new_root_node_rev).await?; - Ok(AddMapResult { - id: map.id.to_string(), - root_node_id, - }) -} \ No newline at end of file + Ok(AddMapResult { id: map.id.to_string(), root_node_id }) +} diff --git a/Packages/app-server/src/db/commands/add_media.rs b/Packages/app-server/src/db/commands/add_media.rs index d3fa932d7..e37454359 100644 --- a/Packages/app-server/src/db/commands/add_media.rs +++ b/Packages/app-server/src/db/commands/add_media.rs @@ -1,32 +1,32 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::commands::_command::command_boilerplate; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::medias::{Media, MediaInput}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_AddMedia; #[Object] impl MutationShard_AddMedia { async fn add_media(&self, gql_ctx: &async_graphql::Context<'_>, input: AddMediaInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, add_media); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -43,7 +43,7 @@ pub struct AddMediaResult { pub async fn add_media(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: AddMediaInput, _extras: NoExtras) -> Result { let AddMediaInput { media: media_ } = input; - + let media = Media { // set by server id: ID(new_uuid_v4_as_b64()), @@ -58,8 +58,10 @@ pub async fn add_media(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, }; //assert_user_is_mod(&user_info)?; - if !actor.permissionGroups.r#mod { Err(anyhow!("Only moderators can add media currently. (till review/approval system is implemented)"))? } + if !actor.permissionGroups.r#mod { + Err(anyhow!("Only moderators can add media currently. (till review/approval system is implemented)"))? + } upsert_db_entry_by_id_for_struct(&ctx, "medias".to_owned(), media.id.to_string(), media.clone()).await?; Ok(AddMediaResult { id: media.id.to_string() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/add_node_link.rs b/Packages/app-server/src/db/commands/add_node_link.rs index 8753ac4f4..260105b53 100644 --- a/Packages/app-server/src/db/commands/add_node_link.rs +++ b/Packages/app-server/src/db/commands/add_node_link.rs @@ -1,43 +1,43 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; +use rust_shared::anyhow::{anyhow, bail, ensure, Context, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; +use rust_shared::db_constants::{GLOBAL_ROOT_NODE_ID, SYSTEM_USER_ID}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; -use rust_shared::db_constants::{SYSTEM_USER_ID, GLOBAL_ROOT_NODE_ID}; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, ensure, bail, Context}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::_shared::access_policy_target::AccessPolicyTarget; use crate::db::_shared::common_errors::err_should_be_populated; -use crate::db::_shared::table_permissions::{does_policy_allow_x, CanVote, CanAddChild}; +use crate::db::_shared::table_permissions::{does_policy_allow_x, CanAddChild, CanVote}; use crate::db::access_policies::get_access_policy; use crate::db::access_policies_::_permission_set::{APAction, APTable}; use crate::db::commands::_command::command_boilerplate; use crate::db::general::permission_helpers::assert_user_can_add_child; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::node_links::{get_node_links, ChildGroup, NodeLink, NodeLinkInput, Polarity}; use crate::db::node_links_::node_link_validity::assert_new_link_is_valid; use crate::db::nodes::get_node; -use crate::db::nodes_::_node::{Node}; +use crate::db::nodes_::_node::Node; use crate::db::nodes_::_node_type::{get_node_type_info, NodeType}; -use crate::db::users::{User, PermissionGroups, get_user}; +use crate::db::users::{get_user, PermissionGroups, User}; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_AddNodeLink; #[Object] impl MutationShard_AddNodeLink { /// This is a low-level function; for many use-cases, the higher-level `linkNode` command is preferred. async fn add_node_link(&self, gql_ctx: &async_graphql::Context<'_>, input: AddNodeLinkInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, add_node_link); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -54,7 +54,7 @@ pub struct AddNodeLinkResult { pub async fn add_node_link(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: AddNodeLinkInput, _extras: NoExtras) -> Result { let AddNodeLinkInput { link: link_ } = input; - + let parent_id = link_.parent.ok_or(err_should_be_populated("link.parent"))?; let child_id = link_.child.ok_or(err_should_be_populated("link.child"))?; let parent = get_node(&ctx, &parent_id).await?; @@ -88,8 +88,8 @@ pub async fn add_node_link(ctx: &AccessorContext<'_>, actor: &User, _is_root: bo assert_new_link_is_valid(ctx, &parent_id, &link.child, link.c_childType, link.group, link.polarity, Some(actor)).await?; } - + upsert_db_entry_by_id_for_struct(&ctx, "nodeLinks".to_owned(), link.id.to_string(), link.clone()).await?; Ok(AddNodeLinkResult { id: link.id.to_string() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/add_node_phrasing.rs b/Packages/app-server/src/db/commands/add_node_phrasing.rs index e87f3a0a1..8acf2c5b8 100644 --- a/Packages/app-server/src/db/commands/add_node_phrasing.rs +++ b/Packages/app-server/src/db/commands/add_node_phrasing.rs @@ -1,35 +1,35 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::_shared::access_policy_target::AccessPolicyTarget; use crate::db::commands::_command::command_boilerplate; use crate::db::general::permission_helpers::assert_user_can_add_phrasing; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_phrasings::{NodePhrasingInput, NodePhrasing}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_phrasings::{NodePhrasing, NodePhrasingInput}; use crate::db::nodes::get_node; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_AddNodePhrasing; #[Object] impl MutationShard_AddNodePhrasing { async fn add_node_phrasing(&self, gql_ctx: &async_graphql::Context<'_>, input: AddNodePhrasingInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, add_node_phrasing); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -49,7 +49,7 @@ pub async fn add_node_phrasing(ctx: &AccessorContext<'_>, actor: &User, _is_root let node = get_node(&ctx, &phrasing_.node).await?; assert_user_can_add_phrasing(ctx, actor, &node).await?; - + let phrasing = NodePhrasing { // set by server id: ID(new_uuid_v4_as_b64()), @@ -71,4 +71,4 @@ pub async fn add_node_phrasing(ctx: &AccessorContext<'_>, actor: &User, _is_root upsert_db_entry_by_id_for_struct(&ctx, "nodePhrasings".to_owned(), phrasing.id.to_string(), phrasing.clone()).await?; Ok(AddNodePhrasingResult { id: phrasing.id.to_string() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/add_node_revision.rs b/Packages/app-server/src/db/commands/add_node_revision.rs index 19a89a394..5842744a1 100644 --- a/Packages/app-server/src/db/commands/add_node_revision.rs +++ b/Packages/app-server/src/db/commands/add_node_revision.rs @@ -1,44 +1,44 @@ -use std::fmt::{Formatter, Display}; +use std::fmt::{Display, Formatter}; -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Context, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; use rust_shared::utils::general_::extensions::ToOwnedV; use rust_shared::utils::general_::serde::to_json_value_for_borrowed_obj; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, Context}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::_shared::common_errors::err_should_be_populated; -use crate::db::access_policies_::_permission_set::{APTable, APAction}; +use crate::db::access_policies_::_permission_set::{APAction, APTable}; use crate::db::commands::_command::command_boilerplate; use crate::db::commands::_shared::increment_edit_counts::increment_edit_counts_if_valid; use crate::db::general::permission_helpers::assert_user_can_modify; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::map_node_edits::{ChangeType, MapNodeEdit}; use crate::db::maps::get_map; -use crate::db::node_revisions::{NodeRevisionInput, NodeRevision}; +use crate::db::node_revisions::{NodeRevision, NodeRevisionInput}; use crate::db::nodes::get_node; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; use super::_shared::record_command_run::{record_command_run, record_command_run_if_root}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_AddNodeRevision; #[Object] impl MutationShard_AddNodeRevision { async fn add_node_revision(&self, gql_ctx: &async_graphql::Context<'_>, input: AddNodeRevisionInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, add_node_revision); - } + } } #[derive(InputObject, Deserialize, Serialize, Clone)] @@ -62,7 +62,7 @@ pub struct AddNodeRevisionExtras { pub async fn add_node_revision(ctx: &AccessorContext<'_>, actor: &User, is_root: bool, input: AddNodeRevisionInput, extras: AddNodeRevisionExtras) -> Result { let AddNodeRevisionInput { mapID, revision: revision_ } = input.clone(); - + let node_id = revision_.node.ok_or(err_should_be_populated("revision.node"))?; let node = get_node(ctx, &node_id).await?; //assert_user_can_do_x_for_commands(ctx, actor, APAction::Modify, ActionTarget::for_node(APTable::Nodes, node.accessPolicy.o())).await?; @@ -95,9 +95,14 @@ pub async fn add_node_revision(ctx: &AccessorContext<'_>, actor: &User, is_root: // delete old node-edits (ie. older than last 100) for this map, in mapNodeEdits, to avoid table getting too large // (we also limit the number of rows removed to 30, to avoid the possibility of hundreds of rows being removed all at once -- which caused a crash in the past) - ctx.tx.execute(r#"DELETE FROM "mapNodeEdits" WHERE id IN ( + ctx.tx + .execute( + r#"DELETE FROM "mapNodeEdits" WHERE id IN ( SELECT id FROM "mapNodeEdits" WHERE map = $1 ORDER BY time DESC OFFSET 100 LIMIT 30 - )"#, &[&map_id]).await?; + )"#, + &[&map_id], + ) + .await?; // add new node-edit entry let edit = MapNodeEdit { @@ -115,10 +120,6 @@ pub async fn add_node_revision(ctx: &AccessorContext<'_>, actor: &User, is_root: let result = AddNodeRevisionResult { id: revision.id.to_string() }; //if extras.is_child_command != Some(true) { - record_command_run_if_root( - ctx, actor, is_root, - "addNodeRevision".to_owned(), to_json_value_for_borrowed_obj(&input)?, to_json_value_for_borrowed_obj(&result)?, - vec![input.revision.node.ok_or(err_should_be_populated("input.revision.node"))?], - ).await?; + record_command_run_if_root(ctx, actor, is_root, "addNodeRevision".to_owned(), to_json_value_for_borrowed_obj(&input)?, to_json_value_for_borrowed_obj(&result)?, vec![input.revision.node.ok_or(err_should_be_populated("input.revision.node"))?]).await?; Ok(result) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/add_node_tag.rs b/Packages/app-server/src/db/commands/add_node_tag.rs index d023a0156..731494c3d 100644 --- a/Packages/app-server/src/db/commands/add_node_tag.rs +++ b/Packages/app-server/src/db/commands/add_node_tag.rs @@ -1,31 +1,31 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::node_tags::{NodeTag, NodeTagInput}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, command_boilerplate, NoExtras}; +use super::_command::{command_boilerplate, upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_AddNodeTag; #[Object] impl MutationShard_AddNodeTag { async fn add_node_tag(&self, gql_ctx: &async_graphql::Context<'_>, input: AddNodeTagInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, add_node_tag); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -42,7 +42,7 @@ pub struct AddNodeTagResult { pub async fn add_node_tag(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: AddNodeTagInput, _extras: NoExtras) -> Result { let AddNodeTagInput { tag: tag_ } = input; - + let tag = NodeTag { // set by server id: ID(new_uuid_v4_as_b64()), @@ -62,4 +62,4 @@ pub async fn add_node_tag(ctx: &AccessorContext<'_>, actor: &User, _is_root: boo upsert_db_entry_by_id_for_struct(&ctx, "nodeTags".to_owned(), tag.id.to_string(), tag.clone()).await?; Ok(AddNodeTagResult { id: tag.id.to_string() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/add_share.rs b/Packages/app-server/src/db/commands/add_share.rs index 350149350..df5189996 100644 --- a/Packages/app-server/src/db/commands/add_share.rs +++ b/Packages/app-server/src/db/commands/add_share.rs @@ -1,31 +1,31 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::shares::{Share, ShareInput}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras, command_boilerplate}; +use super::_command::{command_boilerplate, upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_AddShare; #[Object] impl MutationShard_AddShare { async fn add_share(&self, gql_ctx: &async_graphql::Context<'_>, input: AddShareInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, add_share); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -42,7 +42,7 @@ pub struct AddShareResult { pub async fn add_share(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: AddShareInput, _extras: NoExtras) -> Result { let AddShareInput { share: share_ } = input; - + let share = Share { // set by server id: ID(new_uuid_v4_as_b64()), @@ -58,4 +58,4 @@ pub async fn add_share(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, upsert_db_entry_by_id_for_struct(&ctx, "shares".to_owned(), share.id.to_string(), share.clone()).await?; Ok(AddShareResult { id: share.id.to_string() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/add_term.rs b/Packages/app-server/src/db/commands/add_term.rs index 006eadda9..2d25151f5 100644 --- a/Packages/app-server/src/db/commands/add_term.rs +++ b/Packages/app-server/src/db/commands/add_term.rs @@ -1,32 +1,32 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::commands::_command::command_boilerplate; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::terms::{Term, TermInput}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_AddTerm; #[Object] impl MutationShard_AddTerm { async fn add_term(&self, gql_ctx: &async_graphql::Context<'_>, input: AddTermInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, add_term); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -43,7 +43,7 @@ pub struct AddTermResult { pub async fn add_term(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: AddTermInput, _extras: NoExtras) -> Result { let AddTermInput { term: term_ } = input; - + let term = Term { // set by server id: ID(new_uuid_v4_as_b64()), @@ -63,4 +63,4 @@ pub async fn add_term(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, i upsert_db_entry_by_id_for_struct(&ctx, "terms".to_owned(), term.id.to_string(), term.clone()).await?; Ok(AddTermResult { id: term.id.to_string() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/add_timeline.rs b/Packages/app-server/src/db/commands/add_timeline.rs index 12feef89e..a68620a72 100644 --- a/Packages/app-server/src/db/commands/add_timeline.rs +++ b/Packages/app-server/src/db/commands/add_timeline.rs @@ -1,32 +1,32 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::commands::_command::command_boilerplate; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::timelines::{Timeline, TimelineInput}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_AddTimeline; #[Object] impl MutationShard_AddTimeline { async fn add_timeline(&self, gql_ctx: &async_graphql::Context<'_>, input: AddTimelineInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, add_timeline); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -43,7 +43,7 @@ pub struct AddTimelineResult { pub async fn add_timeline(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: AddTimelineInput, _extras: NoExtras) -> Result { let AddTimelineInput { timeline: timeline_ } = input; - + let timeline = Timeline { // set by server id: ID(new_uuid_v4_as_b64()), @@ -61,4 +61,4 @@ pub async fn add_timeline(ctx: &AccessorContext<'_>, actor: &User, _is_root: boo upsert_db_entry_by_id_for_struct(&ctx, "timelines".to_owned(), timeline.id.to_string(), timeline.clone()).await?; Ok(AddTimelineResult { id: timeline.id.to_string() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/add_timeline_step.rs b/Packages/app-server/src/db/commands/add_timeline_step.rs index cc247a636..84441565d 100644 --- a/Packages/app-server/src/db/commands/add_timeline_step.rs +++ b/Packages/app-server/src/db/commands/add_timeline_step.rs @@ -1,32 +1,32 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::commands::_command::command_boilerplate; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::timeline_steps::{TimelineStep, TimelineStepInput, timeline_step_extras_locked_subfields}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::timeline_steps::{timeline_step_extras_locked_subfields, TimelineStep, TimelineStepInput}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras, update_field_of_extras, init_field_of_extras}; +use super::_command::{init_field_of_extras, update_field_of_extras, upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_AddTimelineStep; #[Object] impl MutationShard_AddTimelineStep { async fn add_timeline_step(&self, gql_ctx: &async_graphql::Context<'_>, input: AddTimelineStepInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, add_timeline_step); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -43,7 +43,7 @@ pub struct AddTimelineStepResult { pub async fn add_timeline_step(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: AddTimelineStepInput, _extras: NoExtras) -> Result { let AddTimelineStepInput { step: step_ } = input; - + let timeline_step = TimelineStep { // set by server id: ID(new_uuid_v4_as_b64()), @@ -57,11 +57,11 @@ pub async fn add_timeline_step(ctx: &AccessorContext<'_>, actor: &User, _is_root timeFromLastStep: step_.timeFromLastStep, timeUntilNextStep: step_.timeUntilNextStep, message: step_.message, - extras: init_field_of_extras(step_.extras, json!({}), timeline_step_extras_locked_subfields())?, // "extras" fields use special handling - c_accessPolicyTargets: vec![], // auto-set by db + extras: init_field_of_extras(step_.extras, json!({}), timeline_step_extras_locked_subfields())?, // "extras" fields use special handling + c_accessPolicyTargets: vec![], // auto-set by db }; upsert_db_entry_by_id_for_struct(&ctx, "timelineSteps".to_owned(), timeline_step.id.to_string(), timeline_step.clone()).await?; Ok(AddTimelineStepResult { id: timeline_step.id.to_string() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/clone_subtree.rs b/Packages/app-server/src/db/commands/clone_subtree.rs index ca969cd76..5d22fb389 100644 --- a/Packages/app-server/src/db/commands/clone_subtree.rs +++ b/Packages/app-server/src/db/commands/clone_subtree.rs @@ -1,48 +1,48 @@ -use rust_shared::indexmap::IndexMap; -use rust_shared::itertools::Itertools; -use jsonschema::JSONSchema; +use deadpool_postgres::{Client, Pool, Transaction}; +use futures_util::{stream, Future, Stream, StreamExt, TryFutureExt, TryStreamExt}; use jsonschema::output::BasicOutput; +use jsonschema::JSONSchema; use lazy_static::lazy_static; -use rust_shared::{async_graphql, serde, serde_json}; -use rust_shared::utils::type_aliases::JSONValue; use rust_shared::anyhow::{anyhow, Context, Error}; -use rust_shared::async_graphql::{Object, Schema, Subscription, ID, async_stream, OutputType, scalar, EmptySubscription, SimpleObject}; -use deadpool_postgres::{Pool, Client, Transaction}; -use futures_util::{Stream, stream, TryFutureExt, StreamExt, Future, TryStreamExt}; +use rust_shared::async_graphql::{async_stream, scalar, EmptySubscription, Object, OutputType, Schema, SimpleObject, Subscription, ID}; +use rust_shared::db_constants::SYSTEM_USER_ID; +use rust_shared::indexmap::IndexMap; +use rust_shared::itertools::Itertools; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::utils::time::{time_since_epoch_ms, time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::json; use rust_shared::tokio::sync::RwLock; -use rust_shared::tokio_postgres::Row; use rust_shared::tokio_postgres::types::ToSql; -use rust_shared::db_constants::{SYSTEM_USER_ID}; -use tracing::info; +use rust_shared::tokio_postgres::Row; +use rust_shared::utils::time::{time_since_epoch_ms, time_since_epoch_ms_i64}; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{async_graphql, serde, serde_json}; use std::collections::HashSet; use std::path::Path; use std::rc::Rc; use std::sync::Arc; -use std::{time::Duration, pin::Pin, task::Poll}; +use std::{pin::Pin, task::Poll, time::Duration}; +use tracing::info; use crate::db::_general::GenericMutation_Result; use crate::db::access_policies_::_access_policy::AccessPolicy; use crate::db::general::subtree_collector::get_node_subtree; use crate::db::medias::Media; -use crate::db::node_links::{NodeLink, get_node_links}; +use crate::db::node_links::{get_node_links, NodeLink}; use crate::db::node_phrasings::NodePhrasing; use crate::db::node_revisions::NodeRevision; use crate::db::node_tags::{NodeTag, TagComp_CloneHistory}; use crate::db::nodes_::_node::Node; use crate::db::terms::Term; use crate::utils::db::accessors::AccessorContext; -use crate::utils::db::filter::{QueryFilter, FilterInput}; -use rust_shared::utils::type_aliases::RowData; +use crate::utils::db::filter::{FilterInput, QueryFilter}; +use crate::utils::db::generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}; use crate::utils::db::sql_fragment::SQLFragment; use crate::utils::db::transactions::{start_read_transaction, start_write_transaction}; +use crate::utils::general::data_anchor::{DataAnchor, DataAnchorFor1}; +use crate::utils::type_aliases::PGClientObject; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1, DataAnchor}; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}}}; -use crate::utils::type_aliases::{PGClientObject}; +use rust_shared::utils::type_aliases::RowData; use super::_command::{set_db_entry_by_id, upsert_db_entry_by_id_for_struct}; @@ -50,189 +50,181 @@ use super::_command::{set_db_entry_by_id, upsert_db_entry_by_id_for_struct}; #[derive(Serialize, Deserialize, Debug)] //#[serde(crate = "rust_shared::serde")] pub struct CloneSubtreePayload { - parentNodeID: String, - rootNodeID: String, - maxDepth: usize, + parentNodeID: String, + rootNodeID: String, + maxDepth: usize, } // todo: maybe remove the json-schema-based validation (Rust's stronger type guarantees arguably make it not worth the effort) lazy_static! { - static ref CLONE_SUBTREE_PAYLOAD_SCHEMA_JSON: JSONValue = json!({ - "properties": { - "parentNodeID": {"type": "string"}, - "rootNodeID": {"type": "string"}, - "maxDepth": {"type": "number"}, - }, - "required": ["parentNodeID", "rootNodeID", "maxDepth"], - }); - static ref CLONE_SUBTREE_PAYLOAD_SCHEMA_JSON_COMPILED: JSONSchema = JSONSchema::compile(&CLONE_SUBTREE_PAYLOAD_SCHEMA_JSON).expect("A valid schema"); + static ref CLONE_SUBTREE_PAYLOAD_SCHEMA_JSON: JSONValue = json!({ + "properties": { + "parentNodeID": {"type": "string"}, + "rootNodeID": {"type": "string"}, + "maxDepth": {"type": "number"}, + }, + "required": ["parentNodeID", "rootNodeID", "maxDepth"], + }); + static ref CLONE_SUBTREE_PAYLOAD_SCHEMA_JSON_COMPILED: JSONSchema = JSONSchema::compile(&CLONE_SUBTREE_PAYLOAD_SCHEMA_JSON).expect("A valid schema"); } pub async fn clone_subtree(gql_ctx: &async_graphql::Context<'_>, payload_raw: JSONValue) -> Result { - let output: BasicOutput = CLONE_SUBTREE_PAYLOAD_SCHEMA_JSON_COMPILED.apply(&payload_raw).basic(); - if !output.is_valid() { - let output_json = serde_json::to_value(output)?; - return Err(anyhow!(output_json)); - } - let payload: CloneSubtreePayload = serde_json::from_value(payload_raw)?; - - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_write(&mut anchor, gql_ctx, false).await?; - - // probably temp: helper for logging - let log = |text: &str| { - info!("CloneSubtreeLog: {text}"); - //msg_sender.send(GeneralMessage::MigrateLogMessageAdded(text.to_owned())).unwrap(); - }; - - log("part 0"); - let subtree = get_node_subtree(&ctx, payload.rootNodeID.clone(), payload.maxDepth).await?; - // these don't need cloning (since they don't "reference back"): terms, medias - - log("part 0.5"); - let ids = subtree.get_all_ids(); - let mut id_replacements: IndexMap = IndexMap::new(); - for id in &ids { - id_replacements.insert(id.clone(), new_uuid_v4_as_b64()); - } - let get_new_id_str = |old_id: &String| id_replacements.get(old_id).unwrap().to_owned(); - let get_new_id = |old_id: &ID| ID(get_new_id_str(&old_id.to_string())); - - log("part 0.75"); + let output: BasicOutput = CLONE_SUBTREE_PAYLOAD_SCHEMA_JSON_COMPILED.apply(&payload_raw).basic(); + if !output.is_valid() { + let output_json = serde_json::to_value(output)?; + return Err(anyhow!(output_json)); + } + let payload: CloneSubtreePayload = serde_json::from_value(payload_raw)?; + + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_write(&mut anchor, gql_ctx, false).await?; + + // probably temp: helper for logging + let log = |text: &str| { + info!("CloneSubtreeLog: {text}"); + //msg_sender.send(GeneralMessage::MigrateLogMessageAdded(text.to_owned())).unwrap(); + }; + + log("part 0"); + let subtree = get_node_subtree(&ctx, payload.rootNodeID.clone(), payload.maxDepth).await?; + // these don't need cloning (since they don't "reference back"): terms, medias + + log("part 0.5"); + let ids = subtree.get_all_ids(); + let mut id_replacements: IndexMap = IndexMap::new(); + for id in &ids { + id_replacements.insert(id.clone(), new_uuid_v4_as_b64()); + } + let get_new_id_str = |old_id: &String| id_replacements.get(old_id).unwrap().to_owned(); + let get_new_id = |old_id: &ID| ID(get_new_id_str(&old_id.to_string())); + + log("part 0.75"); // defer database's checking of foreign-key constraints until the end of the transaction (else would error) - ctx.tx.execute("SET CONSTRAINTS ALL DEFERRED;", &[]).await?; - - // todo: make-so these new entries all have their "creator" field updated to the actual user that's doing the cloning - //let actor_id = SYSTEM_USER_ID.to_owned(); - let actor_id = || SYSTEM_USER_ID.to_owned(); - - log("part 1"); - // first, add a new link from the old-node's parent to the new-node (which we've generated an id for, and are about to construct) - let old_root_links = get_node_links(&ctx, Some(payload.parentNodeID.as_str()), Some(payload.rootNodeID.as_str())).await?; - let old_root_link = old_root_links.get(0).ok_or(anyhow!("No child-link found between provided root-node \"{}\" and parent \"{}\".", payload.rootNodeID, payload.parentNodeID))?; - let new_root_link = NodeLink { - id: ID(new_uuid_v4_as_b64()), - creator: actor_id(), - createdAt: time_since_epoch_ms_i64(), - //child: id_replacements.get(&payload.rootNodeID).ok_or(anyhow!("Generation of new id for clone of root-node failed somehow."))?.to_owned(), - child: get_new_id_str(&payload.rootNodeID), - ..old_root_link.clone() - }; - log("part 1.5"); - upsert_db_entry_by_id_for_struct(&ctx, "nodeLinks".to_owned(), new_root_link.id.to_string(), new_root_link).await?; - - log("part 2"); - //let mut nodes_needing_clone_history_tag: HashSet = HashSet::new(); - let mut nodes_still_needing_clone_history_tag: Vec = vec![]; - for node_old in subtree.nodes { - //nodes_needing_clone_history_tag.insert(node_old.id.to_string()); - nodes_still_needing_clone_history_tag.push(node_old.id.to_string()); - let node = Node { - id: get_new_id(&node_old.id), - creator: actor_id(), - createdAt: time_since_epoch_ms_i64(), - c_currentRevision: get_new_id_str(&node_old.c_currentRevision), - ..node_old.clone() - }; - upsert_db_entry_by_id_for_struct(&ctx, "nodes".to_owned(), node.id.to_string(), node).await?; - } - log("part 3"); - for rev_old in subtree.nodeRevisions { - let rev = NodeRevision { - id: get_new_id(&rev_old.id), - creator: actor_id(), - createdAt: time_since_epoch_ms_i64(), - node: get_new_id_str(&rev_old.node), - ..rev_old.clone() - }; - /*for attachment in &rev.attachments { - if let Some(media) = attachment.media { - } - }*/ - upsert_db_entry_by_id_for_struct(&ctx, "nodeRevisions".to_owned(), rev.id.to_string(), rev).await?; - } - log("part 4"); - for phrasing_old in subtree.nodePhrasings { - let phrasing = NodePhrasing { - id: get_new_id(&phrasing_old.id), - creator: actor_id(), - createdAt: time_since_epoch_ms_i64(), - node: get_new_id_str(&phrasing_old.node), - ..phrasing_old.clone() - }; - upsert_db_entry_by_id_for_struct(&ctx, "nodePhrasings".to_owned(), phrasing.id.to_string(), phrasing).await?; - } - log("part 5"); - for link_old in subtree.nodeLinks { - let link = NodeLink { - id: get_new_id(&link_old.id), - creator: actor_id(), - createdAt: time_since_epoch_ms_i64(), - parent: get_new_id_str(&link_old.parent), - child: get_new_id_str(&link_old.child), - ..link_old.clone() - }; - upsert_db_entry_by_id_for_struct(&ctx, "nodeLinks".to_owned(), link.id.to_string(), link).await?; - } - log("part 6"); - for tag_old in subtree.nodeTags { - let mut tag = tag_old.clone(); - tag.id = get_new_id(&tag.id); - tag.creator = actor_id(); - tag.createdAt = time_since_epoch_ms_i64(); - - // todo: recreate the CalculateNodeIDsForTag function, so we don't need manual updating of the "nodes" field - - // for now, only transfer tags in the "basics" group like labels, and special-cases like clone-history tags (see MaybeCloneAndRetargetNodeTag func) - if let Some(labels) = tag.labels.as_mut() { - let old_nodeX_id = labels.nodeX.clone(); - labels.nodeX = get_new_id_str(&labels.nodeX); - tag.nodes = tag.nodes.into_iter().map(|node_id| if node_id == old_nodeX_id { labels.nodeX.clone() } else { node_id }).collect_vec(); - } - // clone-history tags are a special case: clone+extend them if-and-only-if the result/final-entry is the old-node (preserving history without creating confusion) - if let Some(clone_history) = tag.cloneHistory.as_mut() { - let last_node_in_history = clone_history.cloneChain.last(); - if let Some(last_node_in_history) = last_node_in_history && ids.contains(last_node_in_history) { - let last_node_in_history_clone = last_node_in_history.clone(); - //let new_node_id = id_replacements.get(last_node_in_history).unwrap().to_owned(); - let new_node_id = get_new_id_str(last_node_in_history); - clone_history.cloneChain.push(new_node_id.clone()); - tag.nodes.push(new_node_id); - //nodes_needing_clone_history_tag.remove(&last_node_in_history); - nodes_still_needing_clone_history_tag.retain(|a| *a != last_node_in_history_clone); - } else { - // this tag marks the old-node merely as the source for a clone, which shouldn't transfer to new node, so skip this tag (ie. don't clone it) - continue; - } - } - - upsert_db_entry_by_id_for_struct(&ctx, "nodeTags".to_owned(), tag.id.to_string(), tag).await?; - } - log("part 6.5"); - for old_node_id in nodes_still_needing_clone_history_tag { - let tag = NodeTag { - id: ID(new_uuid_v4_as_b64()), - creator: actor_id(), - createdAt: time_since_epoch_ms_i64(), - nodes: vec![old_node_id.clone(), get_new_id_str(&old_node_id)], - cloneHistory: Some(TagComp_CloneHistory { - cloneChain: vec![old_node_id.clone(), get_new_id_str(&old_node_id)], - }), - labels: None, - mirrorChildrenFromXToY: None, - mutuallyExclusiveGroup: None, - restrictMirroringOfX: None, - xIsExtendedByY: None, - c_accessPolicyTargets: vec![], // auto-set by db - }; - upsert_db_entry_by_id_for_struct(&ctx, "nodeTags".to_owned(), tag.id.to_string(), tag).await?; - //nodes_needing_clone_history_tag.retain(|a| *a != old_node_id); - } - //nodes_still_needing_clone_history_tag.clear(); // commented; not needed, since contents already moved out of vector - - log("Committing transaction..."); - ctx.tx.commit().await?; - log("Clone-subtree command complete!"); - Ok(GenericMutation_Result { - message: "Command completed successfully.".to_owned(), - }) -} \ No newline at end of file + ctx.tx.execute("SET CONSTRAINTS ALL DEFERRED;", &[]).await?; + + // todo: make-so these new entries all have their "creator" field updated to the actual user that's doing the cloning + //let actor_id = SYSTEM_USER_ID.to_owned(); + let actor_id = || SYSTEM_USER_ID.to_owned(); + + log("part 1"); + // first, add a new link from the old-node's parent to the new-node (which we've generated an id for, and are about to construct) + let old_root_links = get_node_links(&ctx, Some(payload.parentNodeID.as_str()), Some(payload.rootNodeID.as_str())).await?; + let old_root_link = old_root_links.get(0).ok_or(anyhow!("No child-link found between provided root-node \"{}\" and parent \"{}\".", payload.rootNodeID, payload.parentNodeID))?; + let new_root_link = NodeLink { + id: ID(new_uuid_v4_as_b64()), + creator: actor_id(), + createdAt: time_since_epoch_ms_i64(), + //child: id_replacements.get(&payload.rootNodeID).ok_or(anyhow!("Generation of new id for clone of root-node failed somehow."))?.to_owned(), + child: get_new_id_str(&payload.rootNodeID), + ..old_root_link.clone() + }; + log("part 1.5"); + upsert_db_entry_by_id_for_struct(&ctx, "nodeLinks".to_owned(), new_root_link.id.to_string(), new_root_link).await?; + + log("part 2"); + //let mut nodes_needing_clone_history_tag: HashSet = HashSet::new(); + let mut nodes_still_needing_clone_history_tag: Vec = vec![]; + for node_old in subtree.nodes { + //nodes_needing_clone_history_tag.insert(node_old.id.to_string()); + nodes_still_needing_clone_history_tag.push(node_old.id.to_string()); + let node = Node { + id: get_new_id(&node_old.id), + creator: actor_id(), + createdAt: time_since_epoch_ms_i64(), + c_currentRevision: get_new_id_str(&node_old.c_currentRevision), + ..node_old.clone() + }; + upsert_db_entry_by_id_for_struct(&ctx, "nodes".to_owned(), node.id.to_string(), node).await?; + } + log("part 3"); + for rev_old in subtree.nodeRevisions { + let rev = NodeRevision { id: get_new_id(&rev_old.id), creator: actor_id(), createdAt: time_since_epoch_ms_i64(), node: get_new_id_str(&rev_old.node), ..rev_old.clone() }; + /*for attachment in &rev.attachments { + if let Some(media) = attachment.media { + } + }*/ + upsert_db_entry_by_id_for_struct(&ctx, "nodeRevisions".to_owned(), rev.id.to_string(), rev).await?; + } + log("part 4"); + for phrasing_old in subtree.nodePhrasings { + let phrasing = NodePhrasing { + id: get_new_id(&phrasing_old.id), + creator: actor_id(), + createdAt: time_since_epoch_ms_i64(), + node: get_new_id_str(&phrasing_old.node), + ..phrasing_old.clone() + }; + upsert_db_entry_by_id_for_struct(&ctx, "nodePhrasings".to_owned(), phrasing.id.to_string(), phrasing).await?; + } + log("part 5"); + for link_old in subtree.nodeLinks { + let link = NodeLink { + id: get_new_id(&link_old.id), + creator: actor_id(), + createdAt: time_since_epoch_ms_i64(), + parent: get_new_id_str(&link_old.parent), + child: get_new_id_str(&link_old.child), + ..link_old.clone() + }; + upsert_db_entry_by_id_for_struct(&ctx, "nodeLinks".to_owned(), link.id.to_string(), link).await?; + } + log("part 6"); + for tag_old in subtree.nodeTags { + let mut tag = tag_old.clone(); + tag.id = get_new_id(&tag.id); + tag.creator = actor_id(); + tag.createdAt = time_since_epoch_ms_i64(); + + // todo: recreate the CalculateNodeIDsForTag function, so we don't need manual updating of the "nodes" field + + // for now, only transfer tags in the "basics" group like labels, and special-cases like clone-history tags (see MaybeCloneAndRetargetNodeTag func) + if let Some(labels) = tag.labels.as_mut() { + let old_nodeX_id = labels.nodeX.clone(); + labels.nodeX = get_new_id_str(&labels.nodeX); + tag.nodes = tag.nodes.into_iter().map(|node_id| if node_id == old_nodeX_id { labels.nodeX.clone() } else { node_id }).collect_vec(); + } + // clone-history tags are a special case: clone+extend them if-and-only-if the result/final-entry is the old-node (preserving history without creating confusion) + if let Some(clone_history) = tag.cloneHistory.as_mut() { + let last_node_in_history = clone_history.cloneChain.last(); + if let Some(last_node_in_history) = last_node_in_history + && ids.contains(last_node_in_history) + { + let last_node_in_history_clone = last_node_in_history.clone(); + //let new_node_id = id_replacements.get(last_node_in_history).unwrap().to_owned(); + let new_node_id = get_new_id_str(last_node_in_history); + clone_history.cloneChain.push(new_node_id.clone()); + tag.nodes.push(new_node_id); + //nodes_needing_clone_history_tag.remove(&last_node_in_history); + nodes_still_needing_clone_history_tag.retain(|a| *a != last_node_in_history_clone); + } else { + // this tag marks the old-node merely as the source for a clone, which shouldn't transfer to new node, so skip this tag (ie. don't clone it) + continue; + } + } + + upsert_db_entry_by_id_for_struct(&ctx, "nodeTags".to_owned(), tag.id.to_string(), tag).await?; + } + log("part 6.5"); + for old_node_id in nodes_still_needing_clone_history_tag { + let tag = NodeTag { + id: ID(new_uuid_v4_as_b64()), + creator: actor_id(), + createdAt: time_since_epoch_ms_i64(), + nodes: vec![old_node_id.clone(), get_new_id_str(&old_node_id)], + cloneHistory: Some(TagComp_CloneHistory { cloneChain: vec![old_node_id.clone(), get_new_id_str(&old_node_id)] }), + labels: None, + mirrorChildrenFromXToY: None, + mutuallyExclusiveGroup: None, + restrictMirroringOfX: None, + xIsExtendedByY: None, + c_accessPolicyTargets: vec![], // auto-set by db + }; + upsert_db_entry_by_id_for_struct(&ctx, "nodeTags".to_owned(), tag.id.to_string(), tag).await?; + //nodes_needing_clone_history_tag.retain(|a| *a != old_node_id); + } + //nodes_still_needing_clone_history_tag.clear(); // commented; not needed, since contents already moved out of vector + + log("Committing transaction..."); + ctx.tx.commit().await?; + log("Clone-subtree command complete!"); + Ok(GenericMutation_Result { message: "Command completed successfully.".to_owned() }) +} diff --git a/Packages/app-server/src/db/commands/delete_access_policy.rs b/Packages/app-server/src/db/commands/delete_access_policy.rs index 446243f2f..c42292dad 100644 --- a/Packages/app-server/src/db/commands/delete_access_policy.rs +++ b/Packages/app-server/src/db/commands/delete_access_policy.rs @@ -1,37 +1,37 @@ use deadpool_postgres::tokio_postgres::GenericClient; -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; use crate::db::access_policies_::_permission_set::APAction; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, command_boilerplate}; -use crate::db::general::permission_helpers::{assert_user_can_delete}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder}; +use crate::db::general::permission_helpers::assert_user_can_delete; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::general::subtree_collector::params; use crate::db::user_hiddens::get_user_hiddens; use crate::db::users::User; -use crate::utils::db::accessors::{AccessorContext, get_db_entries}; +use crate::utils::db::accessors::{get_db_entries, AccessorContext}; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_DeleteAccessPolicy; #[Object] impl MutationShard_DeleteAccessPolicy { async fn delete_access_policy(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteAccessPolicyInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, delete_access_policy); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -48,7 +48,7 @@ pub struct DeleteAccessPolicyResult { pub async fn delete_access_policy(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: DeleteAccessPolicyInput, _extras: NoExtras) -> Result { let DeleteAccessPolicyInput { id } = input; - + let old_data = get_access_policy(&ctx, &id).await?; //let rls_data = old_data.get_rls_data(); assert_user_can_delete(&ctx, &actor, &old_data).await?; @@ -62,4 +62,4 @@ pub async fn delete_access_policy(ctx: &AccessorContext<'_>, actor: &User, _is_r ctx.tx.query_raw(r#"UPDATE "userHiddens" as t1 SET "lastAccessPolicy" = NULL WHERE t1."lastAccessPolicy" = $1"#, params(&[&id])).await?; Ok(DeleteAccessPolicyResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/delete_argument.rs b/Packages/app-server/src/db/commands/delete_argument.rs index 33ebb18d5..5f6c4cfd1 100644 --- a/Packages/app-server/src/db/commands/delete_argument.rs +++ b/Packages/app-server/src/db/commands/delete_argument.rs @@ -1,37 +1,37 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder}; use crate::db::general::permission_helpers::assert_user_can_delete; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::node_links::get_node_links; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; use super::_shared::increment_edit_counts::increment_edit_counts_if_valid; use super::delete_node::{delete_node, DeleteNodeInput}; -use super::delete_node_link::{DeleteNodeLinkInput, delete_node_link}; +use super::delete_node_link::{delete_node_link, DeleteNodeLinkInput}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_DeleteArgument; #[Object] impl MutationShard_DeleteArgument { async fn delete_argument(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteArgumentInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, delete_argument); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -52,7 +52,7 @@ pub struct DeleteArgumentResult { // todo: eventually remove (or rework) this command, since unused pub async fn delete_argument(ctx: &AccessorContext<'_>, actor: &User, is_root: bool, input: DeleteArgumentInput, _extras: NoExtras) -> Result { let DeleteArgumentInput { mapID, argumentID, claimID, deleteClaim } = input; - + if deleteClaim { delete_node(ctx, actor, false, DeleteNodeInput { mapID: None, nodeID: claimID }, Default::default()).await?; } else { @@ -68,4 +68,4 @@ pub async fn delete_argument(ctx: &AccessorContext<'_>, actor: &User, is_root: b increment_edit_counts_if_valid(&ctx, Some(actor), mapID, is_root).await?; Ok(DeleteArgumentResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/delete_map.rs b/Packages/app-server/src/db/commands/delete_map.rs index 5dc2c7852..e167eb786 100644 --- a/Packages/app-server/src/db/commands/delete_map.rs +++ b/Packages/app-server/src/db/commands/delete_map.rs @@ -1,36 +1,36 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError, to_anyhow}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, to_anyhow, GQLError}; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, command_boilerplate}; -use crate::db::commands::_shared::increment_edit_counts::{increment_map_edits, increment_edit_counts_if_valid}; -use crate::db::commands::delete_node::{DeleteNodeInput, delete_node}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder}; +use crate::db::commands::_shared::increment_edit_counts::{increment_edit_counts_if_valid, increment_map_edits}; +use crate::db::commands::delete_node::{delete_node, DeleteNodeInput}; use crate::db::general::permission_helpers::{assert_user_can_delete, is_user_creator_or_mod}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::maps::{Map, get_map}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::maps::{get_map, Map}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; use super::delete_node::DeleteNodeExtras; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_DeleteMap; #[Object] impl MutationShard_DeleteMap { async fn delete_map(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteMapInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, delete_map); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -47,7 +47,7 @@ pub struct DeleteMapResult { pub async fn delete_map(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: DeleteMapInput, _extras: NoExtras) -> Result { let DeleteMapInput { id } = input; - + let old_data = get_map(ctx, &id).await?; assert_user_can_delete(ctx, actor, &old_data).await?; @@ -57,4 +57,4 @@ pub async fn delete_map(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, delete_db_entry_by_id(ctx, "maps".to_owned(), id.to_string()).await?; Ok(DeleteMapResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/delete_media.rs b/Packages/app-server/src/db/commands/delete_media.rs index fe63a2c70..14d4d1fd2 100644 --- a/Packages/app-server/src/db/commands/delete_media.rs +++ b/Packages/app-server/src/db/commands/delete_media.rs @@ -1,34 +1,34 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder}; use crate::db::general::permission_helpers::assert_user_can_delete; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::medias::{Media, MediaInput, get_media}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::medias::{get_media, Media, MediaInput}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_DeleteMedia; #[Object] impl MutationShard_DeleteMedia { async fn delete_media(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteMediaInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, delete_media); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -45,11 +45,11 @@ pub struct DeleteMediaResult { pub async fn delete_media(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: DeleteMediaInput, _extras: NoExtras) -> Result { let DeleteMediaInput { id } = input; - + let old_data = get_media(&ctx, &id).await?; assert_user_can_delete(&ctx, &actor, &old_data).await?; delete_db_entry_by_id(&ctx, "medias".to_owned(), id.to_string()).await?; Ok(DeleteMediaResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/delete_node.rs b/Packages/app-server/src/db/commands/delete_node.rs index 765191eb4..70fe3b814 100644 --- a/Packages/app-server/src/db/commands/delete_node.rs +++ b/Packages/app-server/src/db/commands/delete_node.rs @@ -1,36 +1,36 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; -use tracing::{info, error}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; +use tracing::{error, info}; use crate::db::access_policies::get_access_policy; use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder}; -use crate::db::commands::_shared::increment_edit_counts::{increment_map_edits, increment_edit_counts_if_valid}; +use crate::db::commands::_shared::increment_edit_counts::{increment_edit_counts_if_valid, increment_map_edits}; use crate::db::general::permission_helpers::{assert_user_can_delete, is_user_creator_or_mod}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::node_links::get_node_links; -use crate::db::nodes::{get_node, is_root_node, assert_user_can_delete_node}; +use crate::db::nodes::{assert_user_can_delete_node, get_node, is_root_node}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, command_boilerplate}; +use super::_command::{command_boilerplate, upsert_db_entry_by_id_for_struct}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_DeleteNode; #[Object] impl MutationShard_DeleteNode { async fn delete_node(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteNodeInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, delete_node); - } + } } @@ -56,32 +56,36 @@ pub struct DeleteNodeExtras { pub async fn delete_node(ctx: &AccessorContext<'_>, actor: &User, is_root: bool, input: DeleteNodeInput, extras: DeleteNodeExtras) -> Result { let DeleteNodeInput { mapID, nodeID } = input; - + let old_data = get_node(&ctx, &nodeID).await?; assert_user_can_delete_node(&ctx, &actor, &old_data, extras.as_part_of_map_delete, vec![], vec![]).await?; - ctx.with_rls_disabled(|| async { - // first delete the rows in other tables that reference this node - // (will likely need to update this later, when completing permission system; have to decide how to handle deletion of node, when other users created linked phrasings, ratings, etc.) - // (first step will probably be adding a "soft delete" system, in place-of/addition-to the hard-delete behavior that currently occurs) - ctx.tx.execute(r#"DELETE FROM "nodePhrasings" WHERE node = $1"#, &[&nodeID]).await?; - ctx.tx.execute(r#"DELETE FROM "nodeRatings" WHERE node = $1"#, &[&nodeID]).await?; - ctx.tx.execute(r#"DELETE FROM "nodeLinks" WHERE parent = $1 OR child = $1"#, &[&nodeID]).await?; - ctx.tx.execute(r#"DELETE FROM "nodeLinks" WHERE parent = $1 OR child = $1"#, &[&nodeID]).await?; - ctx.tx.execute(r#"DELETE FROM "mapNodeEdits" WHERE node = $1"#, &[&nodeID]).await?; - ctx.tx.execute(r#"DELETE FROM "nodeRevisions" WHERE node = $1"#, &[&nodeID]).await?; - // todo: maybe change approach: rather than deleting associated command-runs, we leave them up, but just restrict access to admins only from this point forward - ctx.tx.execute(r#"DELETE FROM "commandRuns" WHERE $1 = ANY("c_involvedNodes")"#, &[&nodeID]).await?; - - // todo: for any tag where this node is a member, update it to remove this node's id from the `nodes` array (and possibly other fields too) - // todo: delete any tags for which this node is the only associated node - - Ok(()) - }, Some("Failed to delete data associated with node.")).await?; + ctx.with_rls_disabled( + || async { + // first delete the rows in other tables that reference this node + // (will likely need to update this later, when completing permission system; have to decide how to handle deletion of node, when other users created linked phrasings, ratings, etc.) + // (first step will probably be adding a "soft delete" system, in place-of/addition-to the hard-delete behavior that currently occurs) + ctx.tx.execute(r#"DELETE FROM "nodePhrasings" WHERE node = $1"#, &[&nodeID]).await?; + ctx.tx.execute(r#"DELETE FROM "nodeRatings" WHERE node = $1"#, &[&nodeID]).await?; + ctx.tx.execute(r#"DELETE FROM "nodeLinks" WHERE parent = $1 OR child = $1"#, &[&nodeID]).await?; + ctx.tx.execute(r#"DELETE FROM "nodeLinks" WHERE parent = $1 OR child = $1"#, &[&nodeID]).await?; + ctx.tx.execute(r#"DELETE FROM "mapNodeEdits" WHERE node = $1"#, &[&nodeID]).await?; + ctx.tx.execute(r#"DELETE FROM "nodeRevisions" WHERE node = $1"#, &[&nodeID]).await?; + // todo: maybe change approach: rather than deleting associated command-runs, we leave them up, but just restrict access to admins only from this point forward + ctx.tx.execute(r#"DELETE FROM "commandRuns" WHERE $1 = ANY("c_involvedNodes")"#, &[&nodeID]).await?; + + // todo: for any tag where this node is a member, update it to remove this node's id from the `nodes` array (and possibly other fields too) + // todo: delete any tags for which this node is the only associated node + + Ok(()) + }, + Some("Failed to delete data associated with node."), + ) + .await?; delete_db_entry_by_id(&ctx, "nodes".to_owned(), nodeID.to_string()).await?; increment_edit_counts_if_valid(&ctx, Some(actor), mapID, is_root).await?; Ok(DeleteNodeResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/delete_node_link.rs b/Packages/app-server/src/db/commands/delete_node_link.rs index 48296c274..81287ac5c 100644 --- a/Packages/app-server/src/db/commands/delete_node_link.rs +++ b/Packages/app-server/src/db/commands/delete_node_link.rs @@ -1,37 +1,37 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, ensure, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, ensure}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder}; use crate::db::commands::_shared::increment_edit_counts::increment_edit_counts_if_valid; use crate::db::general::permission_helpers::{assert_user_can_delete, is_user_creator_or_mod}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_links::{get_node_links, get_node_link}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_links::{get_node_link, get_node_links}; use crate::db::nodes::{get_node, is_root_node}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; use super::delete_node::{delete_node, DeleteNodeInput}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_DeleteNodeLink; #[Object] impl MutationShard_DeleteNodeLink { async fn delete_node_link(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteNodeLinkInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, delete_node_link); - } + } } #[derive(InputObject, Serialize, Deserialize, Clone)] @@ -54,7 +54,7 @@ pub struct DeleteNodeLinkExtras { pub async fn delete_node_link(ctx: &AccessorContext<'_>, actor: &User, is_root: bool, input: DeleteNodeLinkInput, _extras: NoExtras) -> Result { let DeleteNodeLinkInput { mapID, id } = input; - + /*let child_number_of_parents = get_node_links(ctx, None, Some(&childID)).await?.len(); let parent_to_child_links = get_node_links(ctx, Some(&parentID), Some(&childID)).await?; ensure!(parent_to_child_links.len() == 1, "There should be 1 and only 1 link between parent and child. Link count:{}", parent_to_child_links.len());*/ @@ -82,4 +82,4 @@ pub async fn delete_node_link(ctx: &AccessorContext<'_>, actor: &User, is_root: increment_edit_counts_if_valid(&ctx, Some(actor), mapID, is_root).await?; Ok(DeleteNodeLinkResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/delete_node_phrasing.rs b/Packages/app-server/src/db/commands/delete_node_phrasing.rs index 34ddc0579..175de63eb 100644 --- a/Packages/app-server/src/db/commands/delete_node_phrasing.rs +++ b/Packages/app-server/src/db/commands/delete_node_phrasing.rs @@ -1,34 +1,34 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, command_boilerplate}; -use crate::db::general::permission_helpers::{assert_user_can_delete}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_phrasings::{NodePhrasing, NodePhrasingInput, get_node_phrasing}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder}; +use crate::db::general::permission_helpers::assert_user_can_delete; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_phrasings::{get_node_phrasing, NodePhrasing, NodePhrasingInput}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_DeleteNodePhrasing; #[Object] impl MutationShard_DeleteNodePhrasing { async fn delete_node_phrasing(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteNodePhrasingInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, delete_node_phrasing); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -45,11 +45,11 @@ pub struct DeleteNodePhrasingResult { pub async fn delete_node_phrasing(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: DeleteNodePhrasingInput, _extras: NoExtras) -> Result { let DeleteNodePhrasingInput { id } = input; - + let old_data = get_node_phrasing(&ctx, &id).await?; assert_user_can_delete(&ctx, &actor, &old_data).await?; delete_db_entry_by_id(&ctx, "nodePhrasings".to_owned(), id.to_string()).await?; Ok(DeleteNodePhrasingResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/delete_node_rating.rs b/Packages/app-server/src/db/commands/delete_node_rating.rs index 71c19ab9c..8c78e6a71 100644 --- a/Packages/app-server/src/db/commands/delete_node_rating.rs +++ b/Packages/app-server/src/db/commands/delete_node_rating.rs @@ -1,35 +1,35 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, command_boilerplate}; -use crate::db::general::permission_helpers::{assert_user_can_delete}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_ratings::{NodeRating, NodeRatingInput, get_node_rating}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder}; +use crate::db::general::permission_helpers::assert_user_can_delete; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_ratings::{get_node_rating, NodeRating, NodeRatingInput}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; use super::_shared::update_node_rating_summaries::update_node_rating_summaries; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_DeleteNodeRating; #[Object] impl MutationShard_DeleteNodeRating { async fn delete_node_rating(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteNodeRatingInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, delete_node_rating); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -46,7 +46,7 @@ pub struct DeleteNodeRatingResult { pub async fn delete_node_rating(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: DeleteNodeRatingInput, _extras: NoExtras) -> Result { let DeleteNodeRatingInput { id } = input; - + let old_data = get_node_rating(&ctx, &id).await?; assert_user_can_delete(&ctx, &actor, &old_data).await?; @@ -55,4 +55,4 @@ pub async fn delete_node_rating(ctx: &AccessorContext<'_>, actor: &User, _is_roo update_node_rating_summaries(ctx, actor, old_data.node, old_data.r#type).await?; Ok(DeleteNodeRatingResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/delete_node_revision.rs b/Packages/app-server/src/db/commands/delete_node_revision.rs index b93aadd59..34a796cc5 100644 --- a/Packages/app-server/src/db/commands/delete_node_revision.rs +++ b/Packages/app-server/src/db/commands/delete_node_revision.rs @@ -1,35 +1,35 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, bail, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, bail}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, command_boilerplate}; -use crate::db::general::permission_helpers::{assert_user_can_delete}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_revisions::{NodeRevision, NodeRevisionInput, get_node_revision}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder}; +use crate::db::general::permission_helpers::assert_user_can_delete; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_revisions::{get_node_revision, NodeRevision, NodeRevisionInput}; use crate::db::nodes::get_node; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_DeleteNodeRevision; #[Object] impl MutationShard_DeleteNodeRevision { async fn delete_node_revision(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteNodeRevisionInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, delete_node_revision); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -46,11 +46,11 @@ pub struct DeleteNodeRevisionResult { pub async fn delete_node_revision(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: DeleteNodeRevisionInput, _extras: NoExtras) -> Result { let DeleteNodeRevisionInput { id } = input; - + let rev = get_node_revision(&ctx, &id).await?; assert_user_can_delete(&ctx, &actor, &rev).await?; delete_db_entry_by_id(&ctx, "nodeRevisions".to_owned(), id.to_string()).await?; Ok(DeleteNodeRevisionResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/delete_node_tag.rs b/Packages/app-server/src/db/commands/delete_node_tag.rs index 29bee99c9..ceb7daa49 100644 --- a/Packages/app-server/src/db/commands/delete_node_tag.rs +++ b/Packages/app-server/src/db/commands/delete_node_tag.rs @@ -1,34 +1,34 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, command_boilerplate}; -use crate::db::general::permission_helpers::{assert_user_can_delete}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_tags::{NodeTag, NodeTagInput, get_node_tag}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder}; +use crate::db::general::permission_helpers::assert_user_can_delete; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_tags::{get_node_tag, NodeTag, NodeTagInput}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_DeleteNodeTag; #[Object] impl MutationShard_DeleteNodeTag { async fn delete_node_tag(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteNodeTagInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, delete_node_tag); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -45,11 +45,11 @@ pub struct DeleteNodeTagResult { pub async fn delete_node_tag(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: DeleteNodeTagInput, _extras: NoExtras) -> Result { let DeleteNodeTagInput { id } = input; - + let old_data = get_node_tag(&ctx, &id).await?; assert_user_can_delete(&ctx, &actor, &old_data).await?; delete_db_entry_by_id(&ctx, "nodeTags".to_owned(), id.to_string()).await?; Ok(DeleteNodeTagResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/delete_share.rs b/Packages/app-server/src/db/commands/delete_share.rs index dfc0be077..bd38f3666 100644 --- a/Packages/app-server/src/db/commands/delete_share.rs +++ b/Packages/app-server/src/db/commands/delete_share.rs @@ -1,34 +1,34 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, command_boilerplate}; -use crate::db::general::permission_helpers::{assert_user_can_delete}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::shares::{Share, ShareInput, get_share}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder}; +use crate::db::general::permission_helpers::assert_user_can_delete; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::shares::{get_share, Share, ShareInput}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_DeleteShare; #[Object] impl MutationShard_DeleteShare { async fn delete_share(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteShareInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, delete_share); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -45,11 +45,11 @@ pub struct DeleteShareResult { pub async fn delete_share(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: DeleteShareInput, _extras: NoExtras) -> Result { let DeleteShareInput { id } = input; - + let old_data = get_share(&ctx, &id).await?; assert_user_can_delete(&ctx, &actor, &old_data).await?; delete_db_entry_by_id(&ctx, "shares".to_owned(), id.to_string()).await?; Ok(DeleteShareResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/delete_term.rs b/Packages/app-server/src/db/commands/delete_term.rs index 54e2fa06a..44c4d876b 100644 --- a/Packages/app-server/src/db/commands/delete_term.rs +++ b/Packages/app-server/src/db/commands/delete_term.rs @@ -1,34 +1,34 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder}; use crate::db::general::permission_helpers::assert_user_can_delete; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::terms::{Term, TermInput, get_term}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::terms::{get_term, Term, TermInput}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_DeleteTerm; #[Object] impl MutationShard_DeleteTerm { async fn delete_term(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteTermInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, delete_term); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -45,11 +45,11 @@ pub struct DeleteTermResult { pub async fn delete_term(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: DeleteTermInput, _extras: NoExtras) -> Result { let DeleteTermInput { id } = input; - + let old_data = get_term(&ctx, &id).await?; assert_user_can_delete(&ctx, &actor, &old_data).await?; delete_db_entry_by_id(&ctx, "terms".to_owned(), id.to_string()).await?; Ok(DeleteTermResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/delete_timeline.rs b/Packages/app-server/src/db/commands/delete_timeline.rs index f3b53978d..594be97d1 100644 --- a/Packages/app-server/src/db/commands/delete_timeline.rs +++ b/Packages/app-server/src/db/commands/delete_timeline.rs @@ -1,35 +1,35 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, bail, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, bail}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder}; use crate::db::general::permission_helpers::assert_user_can_delete; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::timeline_steps::get_timeline_steps; -use crate::db::timelines::{Timeline, TimelineInput, get_timeline}; +use crate::db::timelines::{get_timeline, Timeline, TimelineInput}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_DeleteTimeline; #[Object] impl MutationShard_DeleteTimeline { async fn delete_timeline(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteTimelineInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, delete_timeline); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -46,7 +46,7 @@ pub struct DeleteTimelineResult { pub async fn delete_timeline(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: DeleteTimelineInput, _extras: NoExtras) -> Result { let DeleteTimelineInput { id } = input; - + let old_data = get_timeline(&ctx, &id).await?; assert_user_can_delete(&ctx, &actor, &old_data).await?; @@ -58,4 +58,4 @@ pub async fn delete_timeline(ctx: &AccessorContext<'_>, actor: &User, _is_root: delete_db_entry_by_id(&ctx, "timelines".to_owned(), id.to_string()).await?; Ok(DeleteTimelineResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/delete_timeline_step.rs b/Packages/app-server/src/db/commands/delete_timeline_step.rs index 9483e22a4..0c351cc0d 100644 --- a/Packages/app-server/src/db/commands/delete_timeline_step.rs +++ b/Packages/app-server/src/db/commands/delete_timeline_step.rs @@ -1,34 +1,34 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder}; use crate::db::general::permission_helpers::assert_user_can_delete; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::timeline_steps::{TimelineStep, TimelineStepInput, get_timeline_step}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::timeline_steps::{get_timeline_step, TimelineStep, TimelineStepInput}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_DeleteTimelineStep; #[Object] impl MutationShard_DeleteTimelineStep { async fn delete_timeline_step(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteTimelineStepInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, delete_timeline_step); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -45,11 +45,11 @@ pub struct DeleteTimelineStepResult { pub async fn delete_timeline_step(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: DeleteTimelineStepInput, _extras: NoExtras) -> Result { let DeleteTimelineStepInput { id } = input; - + let old_data = get_timeline_step(&ctx, &id).await?; assert_user_can_delete(&ctx, &actor, &old_data).await?; delete_db_entry_by_id(&ctx, "timelineSteps".to_owned(), id.to_string()).await?; Ok(DeleteTimelineStepResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/import_firestore_dump.rs b/Packages/app-server/src/db/commands/import_firestore_dump.rs index 478eefdf0..538d45cc3 100644 --- a/Packages/app-server/src/db/commands/import_firestore_dump.rs +++ b/Packages/app-server/src/db/commands/import_firestore_dump.rs @@ -2,62 +2,62 @@ use std::collections::HashMap; use std::fmt::Debug; use std::fs; -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; +use rust_shared::anyhow::{anyhow, bail, ensure, Context, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; +use rust_shared::db_constants::{GLOBAL_MAP_ID, GLOBAL_ROOT_NODE_ID, SYSTEM_POLICY_PRIVATE_GOVERNED_NAME, SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME, SYSTEM_USER_EMAIL, SYSTEM_USER_ID}; use rust_shared::indexmap::IndexMap; use rust_shared::itertools::Itertools; use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::value::Index; use rust_shared::serde_json::{json, Value}; -use rust_shared::db_constants::{SYSTEM_USER_ID, GLOBAL_ROOT_NODE_ID, SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME, SYSTEM_USER_EMAIL, GLOBAL_MAP_ID, SYSTEM_POLICY_PRIVATE_GOVERNED_NAME}; use rust_shared::utils::general_::extensions::ToOwnedV; use rust_shared::utils::general_::serde::JSONValueV; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, ensure, bail, Context}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::{info, warn}; use crate::db::_shared::access_policy_target::AccessPolicyTarget; -use crate::db::_shared::attachments::{TermAttachment, Attachment, EquationAttachment, MediaAttachment, QuoteAttachment, ReferencesAttachment}; +use crate::db::_shared::attachments::{Attachment, EquationAttachment, MediaAttachment, QuoteAttachment, ReferencesAttachment, TermAttachment}; use crate::db::_shared::attachments_::source_chain::{source_chain_from_old_json_data, source_chains_from_old_json_data}; use crate::db::_shared::common_errors::err_should_be_populated; -use crate::db::_shared::table_permissions::{does_policy_allow_x, CanVote, CanAddChild}; +use crate::db::_shared::table_permissions::{does_policy_allow_x, CanAddChild, CanVote}; use crate::db::access_policies::{get_access_policy, get_system_access_policy}; use crate::db::access_policies_::_permission_set::{APAction, APTable}; use crate::db::commands::_command::{command_boilerplate, upsert_db_entry_by_id_for_struct}; use crate::db::general::permission_helpers::{assert_user_can_add_child, is_user_admin}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::maps::Map; use crate::db::medias::{Media, MediaType}; -use crate::db::node_links::{NodeLink, NodeLinkInput, get_node_links, ChildGroup, Polarity, ClaimForm}; +use crate::db::node_links::{get_node_links, ChildGroup, ClaimForm, NodeLink, NodeLinkInput, Polarity}; use crate::db::node_phrasings::{NodePhrasing, NodePhrasingType, NodePhrasing_Embedded}; use crate::db::node_ratings::NodeRating; use crate::db::node_ratings_::_node_rating_type::NodeRatingType; -use crate::db::node_revisions::{NodeRevision, ChildOrdering}; +use crate::db::node_revisions::{ChildOrdering, NodeRevision}; use crate::db::node_tags::NodeTag; use crate::db::nodes::get_node; -use crate::db::nodes_::_node::{Node, ArgumentType}; +use crate::db::nodes_::_node::{ArgumentType, Node}; use crate::db::nodes_::_node_type::{get_node_type_info, NodeType}; use crate::db::shares::{Share, ShareType}; use crate::db::terms::{Term, TermType}; -use crate::db::user_hiddens::{UserHidden, get_user_hiddens}; -use crate::db::users::{User, PermissionGroups, get_user, get_users}; +use crate::db::user_hiddens::{get_user_hiddens, UserHidden}; +use crate::db::users::{get_user, get_users, PermissionGroups, User}; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use crate::utils::general::order_key::OrderKey; use rust_shared::utils::db::uuid::{new_uuid_v4_as_b64, new_uuid_v4_as_b64_id}; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{insert_db_entry_by_id_for_struct, NoExtras, gql_placeholder}; +use super::_command::{gql_placeholder, insert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_ImportFirestoreDump; #[Object] impl MutationShard_ImportFirestoreDump { async fn import_firestore_dump(&self, gql_ctx: &async_graphql::Context<'_>, input: ImportFirestoreDumpInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, import_firestore_dump); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -76,29 +76,27 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ let ImportFirestoreDumpInput { placeholder: _placeholder } = input; ensure!(is_user_admin(actor), "Must be admin to call this endpoint."); // defer database's checking of foreign-key constraints until the end of the transaction (else would error) - ctx.tx.execute("SET CONSTRAINTS ALL DEFERRED;", &[]).await?; + ctx.tx.execute("SET CONSTRAINTS ALL DEFERRED;", &[]).await?; // just disable rls for whole command ctx.disable_rls().await?; let log = |text: &str| { info!("ImportFSLog: {text}"); - }; + }; log("Starting import of firestore dump..."); let default_policy_id = get_system_access_policy(ctx, SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME).await?.id.to_string(); let private_policy_id = get_system_access_policy(ctx, SYSTEM_POLICY_PRIVATE_GOVERNED_NAME).await?.id.to_string(); //let root: JSONValue = serde_json::from_str(include_str!("../../../@Temp_FirestoreImport.json"))?; - let data_str = fs::read_to_string("@Temp_FirestoreImport.json")? - .replace("GLOBAL_MAP_00000000001", "GLOBAL_MAP_00000000002") - .replace("GLOBAL_ROOT_0000000001", "GLOBAL_ROOT_0000000002"); + let data_str = fs::read_to_string("@Temp_FirestoreImport.json")?.replace("GLOBAL_MAP_00000000001", "GLOBAL_MAP_00000000002").replace("GLOBAL_ROOT_0000000001", "GLOBAL_ROOT_0000000002"); let root: JSONValue = serde_json::from_str(&data_str)?; let collections = root.try_get("__collections__")?.try_get("versions")?.try_get("v12-prod")?.try_get("__collections__")?; - + // id-replacements // commented; rather than using id-replacements for the two things that need changing (global-map-id and global-root-node-id), just use replace-all in json-str (less error-prone overall) - /*let mut id_replacements: IndexMap = IndexMap::new(); - let mut get_new_id_str = |old_id: &String| { + /*let mut id_replacements: IndexMap = IndexMap::new(); + let mut get_new_id_str = |old_id: &String| { if !id_replacements.contains_key(old_id) { id_replacements.insert(old_id.to_owned(), new_uuid_v4_as_b64()); } @@ -110,8 +108,8 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ } return old_id.o(); };*/ - //let mut get_new_id = |old_id: &ID| ID(get_new_id_str(&old_id.to_string())); - //let mut get_new_id = |old_id: &String| ID(get_new_id_str(old_id)); + //let mut get_new_id = |old_id: &ID| ID(get_new_id_str(&old_id.to_string())); + //let mut get_new_id = |old_id: &String| ID(get_new_id_str(old_id)); log("Processing users..."); // users @@ -153,7 +151,7 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ } subresult }; - let get_existing_user_structs_for_email = |email: &str| -> Option<(User, UserHidden)> { + let get_existing_user_structs_for_email = |email: &str| -> Option<(User, UserHidden)> { let user_hidden = existing_user_hiddens.iter().find(|a| a.email == email); match user_hidden { None => return None, @@ -163,7 +161,7 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ }, } }; - let final_user_id = |importing_user_id: &String| -> Result { + let final_user_id = |importing_user_id: &String| -> Result { //let email = importing_user_hiddens.get(importing_user_id).ok_or(anyhow!("Could not find importing-user-hidden for id:{}", importing_user_id))?.email.as_str(); let importing_user_hidden = match importing_user_hiddens.get(importing_user_id) { // there are some users where the user-hiddens entry is missing, so matching with existing users by email is impossible @@ -172,7 +170,7 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ None => return Ok(SYSTEM_USER_ID.o()), Some(a) => a, }; - + let email = importing_user_hidden.email.as_str(); let existing_user_structs_for_email = get_existing_user_structs_for_email(email); match existing_user_structs_for_email { @@ -228,7 +226,9 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ //let current_rev = node_revs.iter().find(|a| a.id.as_str() == ¤t_rev_id).unwrap(); let mut root_node_for_map = val.get("rootNodeForMap").map(|a| a.as_string()).unwrap_or(None); - if let Some(root_node_for_map_val) = &root_node_for_map && !importing_maps.contains_key(root_node_for_map_val) { + if let Some(root_node_for_map_val) = &root_node_for_map + && !importing_maps.contains_key(root_node_for_map_val) + { warn!("Encountered node whose rootNodeForMap references a map that no longer exists. @node_id:{} @map_id:{}", old_id, root_node_for_map_val); root_node_for_map = None; } @@ -238,7 +238,14 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ creator: final_user_id(&val.try_get("creator")?.try_as_string()?)?, createdAt: val.try_get("createdAt")?.try_as_i64()?, accessPolicy: default_policy_id.o(), - r#type: match val.try_get("type")?.try_as_i64()? { 10 => NodeType::category, 20 => NodeType::package, 30 => NodeType::multiChoiceQuestion, 40 => NodeType::claim, 50 => NodeType::argument, _ => bail!("Invalid node type") }, + r#type: match val.try_get("type")?.try_as_i64()? { + 10 => NodeType::category, + 20 => NodeType::package, + 30 => NodeType::multiChoiceQuestion, + 40 => NodeType::claim, + 50 => NodeType::argument, + _ => bail!("Invalid node type"), + }, rootNodeForMap: root_node_for_map, c_currentRevision: current_rev_id, multiPremiseArgument: val.get("multiPremiseArgument").map(|a| a.as_bool()).unwrap_or(None), @@ -247,7 +254,7 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ Some(15) => Some(ArgumentType::anyTwo), Some(20) => Some(ArgumentType::all), None => None, - _ => bail!("Invalid argument type") + _ => bail!("Invalid argument type"), }, extras: json!({}), }; @@ -255,7 +262,7 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ importing_nodes.insert(old_id.o(), entry); } - + log("Importing medias..."); for (old_id, val) in collections.try_get("medias")?.try_as_object()? { let entry = Media { @@ -264,13 +271,17 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ createdAt: val.try_get("createdAt")?.try_as_i64()?, accessPolicy: default_policy_id.o(), name: val.try_get("name")?.try_as_string()?, - r#type: match val.try_get("type")?.try_as_i64()? { 10 => MediaType::image, 20 => MediaType::video, _ => bail!("Invalid media type") }, + r#type: match val.try_get("type")?.try_as_i64()? { + 10 => MediaType::image, + 20 => MediaType::video, + _ => bail!("Invalid media type"), + }, url: val.try_get("url")?.try_as_string()?, description: val.try_get("description")?.try_as_string()?, }; insert_db_entry_by_id_for_struct(ctx, "medias".o(), entry.id.to_string(), entry).await?; } - + log("Importing nodePhrasings..."); for (old_id, val) in collections.try_get("nodePhrasings")?.try_as_object()? { let entry = NodePhrasing { @@ -278,10 +289,14 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ creator: final_user_id(&val.try_get("creator")?.try_as_string()?)?, createdAt: val.try_get("createdAt")?.try_as_i64()?, node: val.try_get("node")?.try_as_string()?, - r#type: match val.try_get("type")?.try_as_i64()? { 10 => NodePhrasingType::technical, 20 => NodePhrasingType::standard, _ => bail!("Invalid phrasing type") }, + r#type: match val.try_get("type")?.try_as_i64()? { + 10 => NodePhrasingType::technical, + 20 => NodePhrasingType::standard, + _ => bail!("Invalid phrasing type"), + }, text_base: val.try_get("text")?.try_as_string()?, - text_negation: None, //val.get("text_negation").map(|a| a.as_string()).unwrap_or(None), - text_question: None, //val.get("text_question").map(|a| a.as_string()).unwrap_or(None), + text_negation: None, //val.get("text_negation").map(|a| a.as_string()).unwrap_or(None), + text_question: None, //val.get("text_question").map(|a| a.as_string()).unwrap_or(None), text_narrative: None, //val.get("text_narrative").map(|a| a.as_string()).unwrap_or(None), note: val.get("description").map(|a| a.as_string()).unwrap_or(None), terms: vec![], @@ -295,7 +310,7 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ insert_db_entry_by_id_for_struct(ctx, "nodePhrasings".o(), entry.id.to_string(), entry).await?; } - + log("Importing nodeRatings..."); for (old_id, val) in collections.try_get("nodeRatings")?.try_as_object()? { let entry = NodeRating { @@ -310,7 +325,7 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ "truth" => NodeRatingType::truth, "relevance" => NodeRatingType::relevance, "impact" => NodeRatingType::impact, - _ => bail!("Invalid rating type") + _ => bail!("Invalid rating type"), }, value: val.try_get("value")?.try_as_f64()?, c_accessPolicyTargets: vec![], @@ -339,7 +354,7 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ cloneHistory: None, c_accessPolicyTargets: vec![], }; - + // exclude node-tags whose associated nodes are all non-existent if entry.nodes.iter().all(|a| !importing_nodes.contains_key(a)) { warn!("Encountered node-tag whose associated nodes all no longer exist. @tag_id:{} Skipping...", entry.id.to_string()); @@ -358,9 +373,7 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ let mut child_ids_ordered = children_map.keys().cloned().collect_vec(); let children_order_vec: Option> = parent_raw_data.get("childrenOrder").and_then(|a| Some(serde_json::from_value(a.clone()).unwrap())).unwrap_or(None); if let Some(children_order) = children_order_vec { - child_ids_ordered.sort_by_cached_key(|id| { - children_order.iter().position(|a| a == id).unwrap_or(1000) - }); + child_ids_ordered.sort_by_cached_key(|id| children_order.iter().position(|a| a == id).unwrap_or(1000)); } let mut child_order_keys: HashMap = HashMap::new(); for (i, child_id) in child_ids_ordered.iter().enumerate() { @@ -395,7 +408,7 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ Some(20) => Some(ClaimForm::negation), Some(30) => Some(ClaimForm::question), None => None, - _ => bail!("Invalid form") + _ => bail!("Invalid form"), }, seriesAnchor: link_info.get("seriesEnd").map(|a| a.as_bool()).unwrap_or(None), seriesEnd: None, @@ -403,7 +416,7 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ Some(10) => Some(Polarity::supporting), Some(20) => Some(Polarity::opposing), None => None, - _ => bail!("Invalid polarity") + _ => bail!("Invalid polarity"), }, c_parentType: parent.r#type.clone(), c_childType: child.r#type.clone(), @@ -417,7 +430,7 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ insert_db_entry_by_id_for_struct(ctx, "nodeLinks".o(), link.id.to_string(), link).await?; } } - + log("Importing node-revisions..."); for (old_id, val) in collections.try_get("nodeRevisions")?.try_as_object()? { let node_id = val.try_get("node")?.try_as_string()?; @@ -432,8 +445,11 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ //let is_latest_revision = collections.try_get("nodes")?.try_as_object()?.values().any(|a| a.get("currentRevision").and_then(|a| a.as_string()) == Some(old_id.o())); let is_latest_revision = node.c_currentRevision == old_id.o(); - if !is_latest_revision { continue; } + if !is_latest_revision { + continue; + } + #[rustfmt::skip] let entry = NodeRevision { id: ID(old_id.o()), creator: final_user_id(&val.try_get("creator")?.try_as_string()?)?, @@ -508,7 +524,14 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ name: val.try_get("name")?.try_as_string()?, forms: val.try_get("forms")?.try_as_array()?.into_iter().map(|a| a.try_as_string().unwrap()).collect_vec(), disambiguation: val.get("disambiguation").map(|a| a.as_string()).unwrap_or(None), - r#type: match val.try_get("type")?.try_as_i64()? { 10 => TermType::commonNoun, 20 => TermType::properNoun, 30 => TermType::adjective, 40 => TermType::verb, 50 => TermType::adverb, _ => bail!("Invalid term type") }, + r#type: match val.try_get("type")?.try_as_i64()? { + 10 => TermType::commonNoun, + 20 => TermType::properNoun, + 30 => TermType::adjective, + 40 => TermType::verb, + 50 => TermType::adverb, + _ => bail!("Invalid term type"), + }, definition: val.try_get("definition")?.try_as_string()?, note: val.get("note").map(|a| a.as_string()).unwrap_or(None), attachments: vec![], @@ -524,7 +547,7 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ None => { warn!("Importing-user-hidden data not found for user: {} (references replaced with refs to system-user-id) @edits:{}", old_id, importing_user.edits); continue; - } + }, }; let email = importing_user_hidden.email.as_str(); @@ -550,4 +573,4 @@ pub async fn import_firestore_dump(ctx: &AccessorContext<'_>, actor: &User, _is_ log("Done!"); Ok(ImportFirestoreDumpResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/link_node.rs b/Packages/app-server/src/db/commands/link_node.rs index 6ddff80db..871c7d24f 100644 --- a/Packages/app-server/src/db/commands/link_node.rs +++ b/Packages/app-server/src/db/commands/link_node.rs @@ -1,51 +1,51 @@ -use std::fmt::{Formatter, Display}; +use std::fmt::{Display, Formatter}; -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, ensure, Context, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; use rust_shared::utils::general_::extensions::ToOwnedV; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError, to_anyhow}; -use rust_shared::async_graphql::{Object}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, Context, ensure}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, to_anyhow, GQLError}; use tracing::info; -use crate::db::_shared::path_finder::{search_up_from_node_for_node_matching_x, id_is_of_node_that_is_root_of_map}; +use crate::db::_shared::path_finder::{id_is_of_node_that_is_root_of_map, search_up_from_node_for_node_matching_x}; use crate::db::commands::_command::{command_boilerplate, CanOmit}; -use crate::db::commands::add_node_link::{AddNodeLinkInput, add_node_link}; +use crate::db::commands::add_node_link::{add_node_link, AddNodeLinkInput}; use crate::db::commands::delete_node::{delete_node, DeleteNodeInput}; use crate::db::commands::delete_node_link::{self, delete_node_link, DeleteNodeLinkInput}; use crate::db::general::permission_helpers::assert_user_can_add_child; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_links::{NodeLinkInput, ClaimForm, ChildGroup, Polarity, get_node_links, get_first_link_under_parent, get_highest_order_key_under_parent}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_links::{get_first_link_under_parent, get_highest_order_key_under_parent, get_node_links, ChildGroup, ClaimForm, NodeLinkInput, Polarity}; use crate::db::node_links_::node_link_validity::assert_link_is_valid; use crate::db::node_phrasings::NodePhrasing_Embedded; use crate::db::node_revisions::{NodeRevision, NodeRevisionInput}; use crate::db::nodes::get_node; -use crate::db::nodes_::_node::{NodeInput, ArgumentType}; +use crate::db::nodes_::_node::{ArgumentType, NodeInput}; use crate::db::nodes_::_node_type::NodeType; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use crate::utils::general::order_key::OrderKey; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras, tbd}; +use super::_command::{tbd, upsert_db_entry_by_id_for_struct, NoExtras}; use super::_shared::add_node::add_node; use super::_shared::increment_edit_counts::increment_edit_counts_if_valid; use super::add_child_node::{add_child_node, AddChildNodeInput}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_LinkNode; #[Object] impl MutationShard_LinkNode { /// This is a higher-level wrapper around `addNodeLink`, which handles unlinking from old parent (if requested), etc. async fn link_node(&self, gql_ctx: &async_graphql::Context<'_>, input: LinkNodeInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, link_node); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -77,7 +77,7 @@ pub async fn link_node(ctx: &AccessorContext<'_>, actor: &User, is_root: bool, i let LinkNodeInput { mapID, oldParentID, newParentID, nodeID, childGroup, newForm, newPolarity, unlinkFromOldParent, deleteEmptyArgumentWrapper } = input; let unlink_from_old_parent = unlinkFromOldParent.unwrap_or(false); let delete_empty_argument_wrapper = deleteEmptyArgumentWrapper.unwrap_or(false); - + let node_data = get_node(ctx, &nodeID).await?; //let old_parent = oldParentID.map_or(async { None }, |a| get_node(ctx, &a)).await?; let old_parent = if let Some(oldParentID) = &oldParentID { Some(get_node(ctx, oldParentID).await?) } else { None }; @@ -88,7 +88,7 @@ pub async fn link_node(ctx: &AccessorContext<'_>, actor: &User, is_root: bool, i get_highest_order_key_under_parent(ctx, Some(&newParentID)).await?.next()? }; - assert_user_can_add_child(ctx, actor, &new_parent).await?; // defensive + assert_user_can_add_child(ctx, actor, &new_parent).await?; // defensive let pasting_premise_as_relevance_arg = node_data.r#type == NodeType::claim && childGroup == ChildGroup::relevance; ensure!(oldParentID.as_ref() != Some(&newParentID) || pasting_premise_as_relevance_arg, "Old-parent-id and new-parent-id cannot be the same! (unless changing between truth-arg and relevance-arg)"); @@ -107,8 +107,7 @@ pub async fn link_node(ctx: &AccessorContext<'_>, actor: &User, is_root: bool, i let add_arg_wrapper_result = match wrapper_arg_needed { false => None, true => { - ensure!(childGroup == ChildGroup::relevance || childGroup == ChildGroup::truth, - "Claim is being linked under parent that requires a wrapper-argument, but the specified child-group ({childGroup:?}) is incompatible with that."); + ensure!(childGroup == ChildGroup::relevance || childGroup == ChildGroup::truth, "Claim is being linked under parent that requires a wrapper-argument, but the specified child-group ({childGroup:?}) is incompatible with that."); let new_polarity = newPolarity.unwrap_or(Polarity::supporting); // if new-polarity isn't supplied, just default to Supporting (this can happen if a claim is copied from search-results) let argument_wrapper = NodeInput { @@ -121,10 +120,7 @@ pub async fn link_node(ctx: &AccessorContext<'_>, actor: &User, is_root: bool, i //extras: json!({}), extras: CanOmit::None, }; - let argument_wrapper_revision = NodeRevisionInput { - phrasing: NodePhrasing_Embedded { text_base: "".o(), ..Default::default() }, - ..Default::default() - }; + let argument_wrapper_revision = NodeRevisionInput { phrasing: NodePhrasing_Embedded { text_base: "".o(), ..Default::default() }, ..Default::default() }; let argument_wrapper_link = NodeLinkInput { group: childGroup, orderKey: order_key_for_outer_node.clone(), @@ -147,6 +143,7 @@ pub async fn link_node(ctx: &AccessorContext<'_>, actor: &User, is_root: bool, i NodeType::claim => newPolarity, // note: this case *should* only be happening for clients that are in "sl mode" (since debate-map standard doesn't want these claim->claim truth links) _ => None, }; + #[rustfmt::skip] add_node_link(ctx, actor, false, AddNodeLinkInput { //mapID, link: NodeLinkInput { @@ -173,12 +170,10 @@ pub async fn link_node(ctx: &AccessorContext<'_>, actor: &User, is_root: bool, i delete_node(ctx, actor, false, DeleteNodeInput { mapID: None, nodeID: old_parent.id.to_string() }, Default::default()).await?; } } - + increment_edit_counts_if_valid(&ctx, Some(actor), mapID, is_root).await?; - Ok(LinkNodeResult { - argumentWrapperID: add_arg_wrapper_result.map(|a| a.nodeID), - }) + Ok(LinkNodeResult { argumentWrapperID: add_arg_wrapper_result.map(|a| a.nodeID) }) } pub fn is_wrapper_arg_needed_for_transfer(parent_type: NodeType, parent_child_group: ChildGroup, transfer_node_type: NodeType, transfer_node_child_group: Option) -> bool { @@ -186,4 +181,4 @@ pub fn is_wrapper_arg_needed_for_transfer(parent_type: NodeType, parent_child_gr let wrapper_arg_would_be_valid_in_parent = assert_link_is_valid(parent_type, NodeType::argument, parent_child_group, None).is_ok(); let transfer_node_can_be_placed_in_wrapper_arg = transfer_node_type == NodeType::claim && (transfer_node_child_group.is_none() || transfer_node_child_group == Some(ChildGroup::generic)); !transfer_node_is_valid_already && wrapper_arg_would_be_valid_in_parent && transfer_node_can_be_placed_in_wrapper_arg -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/refresh_lq_data.rs b/Packages/app-server/src/db/commands/refresh_lq_data.rs index 361448d9f..f6449b6a5 100644 --- a/Packages/app-server/src/db/commands/refresh_lq_data.rs +++ b/Packages/app-server/src/db/commands/refresh_lq_data.rs @@ -1,51 +1,52 @@ +use deadpool_postgres::Pool; +use jsonschema::{output::BasicOutput, JSONSchema}; +use lazy_static::lazy_static; use rust_shared::anyhow::{anyhow, Error}; -use jsonschema::{JSONSchema, output::BasicOutput}; -use rust_shared::{async_graphql, serde, serde_json}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::json; -use lazy_static::lazy_static; -use deadpool_postgres::Pool; use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{async_graphql, serde, serde_json}; -use crate::store::storage::{AppStateArc, get_app_state_from_gql_ctx}; -use crate::{db::{_general::GenericMutation_Result, nodes::get_node}, store::live_queries::LQStorageArc}; +use crate::store::storage::{get_app_state_from_gql_ctx, AppStateArc}; +use crate::{ + db::{_general::GenericMutation_Result, nodes::get_node}, + store::live_queries::LQStorageArc, +}; #[derive(Serialize, Deserialize, Debug)] //#[serde(crate = "rust_shared::serde")] pub struct RefreshLQDataPayload { collection: String, - entryID: String, + entryID: String, } lazy_static! { - static ref REFRESH_LQ_DATA_PAYLOAD_SCHEMA_JSON: JSONValue = json!({ - "properties": { - "collection": {"type": "string"}, - "entryID": {"type": "string"}, - }, - "required": ["collection", "entryID"], - }); - static ref REFRESH_LQ_DATA_PAYLOAD_SCHEMA_JSON_COMPILED: JSONSchema = JSONSchema::compile(&REFRESH_LQ_DATA_PAYLOAD_SCHEMA_JSON).expect("A valid schema"); + static ref REFRESH_LQ_DATA_PAYLOAD_SCHEMA_JSON: JSONValue = json!({ + "properties": { + "collection": {"type": "string"}, + "entryID": {"type": "string"}, + }, + "required": ["collection", "entryID"], + }); + static ref REFRESH_LQ_DATA_PAYLOAD_SCHEMA_JSON_COMPILED: JSONSchema = JSONSchema::compile(&REFRESH_LQ_DATA_PAYLOAD_SCHEMA_JSON).expect("A valid schema"); } pub async fn refresh_lq_data(ctx: &async_graphql::Context<'_>, payload_raw: JSONValue) -> Result { - let output: BasicOutput = REFRESH_LQ_DATA_PAYLOAD_SCHEMA_JSON_COMPILED.apply(&payload_raw).basic(); - if !output.is_valid() { - let output_json = serde_json::to_value(output).expect("Failed to serialize output"); - return Err(anyhow!(output_json)); - } - let payload: RefreshLQDataPayload = serde_json::from_value(payload_raw)?; - /*let filter = json!({ - "id": {"equalTo": payload.entryID} - });*/ + let output: BasicOutput = REFRESH_LQ_DATA_PAYLOAD_SCHEMA_JSON_COMPILED.apply(&payload_raw).basic(); + if !output.is_valid() { + let output_json = serde_json::to_value(output).expect("Failed to serialize output"); + return Err(anyhow!(output_json)); + } + let payload: RefreshLQDataPayload = serde_json::from_value(payload_raw)?; + /*let filter = json!({ + "id": {"equalTo": payload.entryID} + });*/ - let lq_storage = { - let ctx2 = ctx; // move ctx, so we know this block is the last usage of it - let app_state = get_app_state_from_gql_ctx(ctx2); - app_state.live_queries.clone() - }; + let lq_storage = { + let ctx2 = ctx; // move ctx, so we know this block is the last usage of it + let app_state = get_app_state_from_gql_ctx(ctx2); + app_state.live_queries.clone() + }; - lq_storage.refresh_lq_data(payload.collection, payload.entryID).await?; - - Ok(GenericMutation_Result { - message: "Command completed successfully.".to_owned(), - }) -} \ No newline at end of file + lq_storage.refresh_lq_data(payload.collection, payload.entryID).await?; + + Ok(GenericMutation_Result { message: "Command completed successfully.".to_owned() }) +} diff --git a/Packages/app-server/src/db/commands/run_command_batch.rs b/Packages/app-server/src/db/commands/run_command_batch.rs index 702071176..0a3e2336a 100644 --- a/Packages/app-server/src/db/commands/run_command_batch.rs +++ b/Packages/app-server/src/db/commands/run_command_batch.rs @@ -1,19 +1,19 @@ -use std::fmt::{Formatter, Display}; +use std::fmt::{Display, Formatter}; use futures_util::Stream; use futures_util::StreamExt; -use rust_shared::async_graphql::{ID, SimpleObject, InputObject, Subscription, async_stream}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, bail, ensure, Context, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{async_stream, InputObject, SimpleObject, Subscription, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; use rust_shared::utils::general_::extensions::ToOwnedV; use rust_shared::utils::general_::serde::to_json_value_for_borrowed_obj; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError, to_anyhow, SubError, to_sub_err}; -use rust_shared::async_graphql::{Object}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, Context, ensure, bail}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, to_anyhow, to_sub_err, GQLError, SubError}; use tracing::info; use crate::db::commands::_command::command_boilerplate; @@ -24,126 +24,126 @@ use crate::db::commands::delete_node::delete_node; use crate::db::commands::delete_node::DeleteNodeExtras; use crate::db::commands::delete_node_link::delete_node_link; use crate::db::commands::update_node::update_node; -use crate::db::general::permission_helpers::{assert_user_can_add_phrasing, assert_user_can_add_child}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_links::{NodeLinkInput, NodeLink}; +use crate::db::general::permission_helpers::{assert_user_can_add_child, assert_user_can_add_phrasing}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_links::{NodeLink, NodeLinkInput}; use crate::db::node_phrasings::NodePhrasing_Embedded; use crate::db::node_revisions::{NodeRevision, NodeRevisionInput}; use crate::db::nodes::get_node; -use crate::db::nodes_::_node::{NodeInput}; +use crate::db::nodes_::_node::NodeInput; use crate::db::users::User; -use crate::utils::db::accessors::{AccessorContext, trigger_deferred_constraints}; +use crate::utils::db::accessors::{trigger_deferred_constraints, AccessorContext}; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras, tbd}; +use super::_command::{tbd, upsert_db_entry_by_id_for_struct, NoExtras}; use super::_shared::add_node::add_node; -use super::add_child_node::{AddChildNodeInput, add_child_node, AddChildNodeExtras}; +use super::add_child_node::{add_child_node, AddChildNodeExtras, AddChildNodeInput}; use super::delete_node::DeleteNodeInput; use super::delete_node_link::DeleteNodeLinkInput; use super::update_node::UpdateNodeInput; -wrap_slow_macros!{ +wrap_slow_macros! { /*#[derive(Default)] pub struct MutationShard_RunCommandBatch; #[Object] impl MutationShard_RunCommandBatch { - async fn run_command_batch(&self, gql_ctx: &async_graphql::Context<'_>, input: RunCommandBatchInput, only_validate: Option) -> Result { - command_boilerplate!(gql_ctx, input, only_validate, run_command_batch); - } + async fn run_command_batch(&self, gql_ctx: &async_graphql::Context<'_>, input: RunCommandBatchInput, only_validate: Option) -> Result { + command_boilerplate!(gql_ctx, input, only_validate, run_command_batch); + } - // todo: probably add another version of run-command-batch that works in "streaming mode", ie. using a graphql "subscription" to let the caller know how the batch's execution is progressing + // todo: probably add another version of run-command-batch that works in "streaming mode", ie. using a graphql "subscription" to let the caller know how the batch's execution is progressing }*/ #[derive(Default)] pub struct SubscriptionShard_RunCommandBatch; #[Subscription] impl SubscriptionShard_RunCommandBatch { - async fn run_command_batch<'a>(&self, gql_ctx: &'a async_graphql::Context<'a>, input: RunCommandBatchInput) -> impl Stream> + 'a { - async_stream::stream! { - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_write_advanced(&mut anchor, gql_ctx, false, Some(false)).await.map_err(to_sub_err)?; - let actor = get_user_info_from_gql_ctx(gql_ctx, &ctx).await.map_err(to_sub_err)?; - - let mut last_subcommand_results: Vec = Vec::new(); - { - let mut stream = Box::pin(run_command_batch(&ctx, &actor, true, input.clone(), NoExtras::default()).await); - while let Some(batch_result_so_far) = stream.next().await { - let batch_result_so_far = batch_result_so_far?; - last_subcommand_results = batch_result_so_far.results.clone(); - yield Ok(batch_result_so_far); - } - } - - ctx.tx.commit().await.map_err(to_sub_err)?; - tracing::info!("Command-batch execution completed. @CommandCount:{}", input.commands.len()); - yield Ok(RunCommandBatchResult { results: last_subcommand_results, committed: true }); - } - } + async fn run_command_batch<'a>(&self, gql_ctx: &'a async_graphql::Context<'a>, input: RunCommandBatchInput) -> impl Stream> + 'a { + async_stream::stream! { + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_write_advanced(&mut anchor, gql_ctx, false, Some(false)).await.map_err(to_sub_err)?; + let actor = get_user_info_from_gql_ctx(gql_ctx, &ctx).await.map_err(to_sub_err)?; + + let mut last_subcommand_results: Vec = Vec::new(); + { + let mut stream = Box::pin(run_command_batch(&ctx, &actor, true, input.clone(), NoExtras::default()).await); + while let Some(batch_result_so_far) = stream.next().await { + let batch_result_so_far = batch_result_so_far?; + last_subcommand_results = batch_result_so_far.results.clone(); + yield Ok(batch_result_so_far); + } + } + + ctx.tx.commit().await.map_err(to_sub_err)?; + tracing::info!("Command-batch execution completed. @CommandCount:{}", input.commands.len()); + yield Ok(RunCommandBatchResult { results: last_subcommand_results, committed: true }); + } + } } pub async fn run_command_batch<'a>(ctx: &'a AccessorContext<'_>, actor: &'a User, _is_root: bool, input: RunCommandBatchInput, _extras: NoExtras) -> impl Stream> + 'a { - async_stream::stream! { - let RunCommandBatchInput { commands } = input; - - let mut command_results: Vec = Vec::new(); - for (index, command) in commands.iter().enumerate() { - if let Some(command_input) = &command.addChildNode { - let mut command_input_final = command_input.clone(); - - // allow add-child-node commands to set some of their fields based on the results of prior commands in the batch (eg. for importing a tree of nodes) - if let Some(parent_source_index) = command.setParentNodeToResultOfCommandAtIndex { - let earlier_command = commands.get(parent_source_index).ok_or_else(|| anyhow!("Command #{} referred to a command index that doesn't exist.", index)).map_err(to_sub_err)?; - let earlier_command_result = command_results.get(parent_source_index as usize).ok_or_else(|| anyhow!("Command #{} referred to a command-result index that doesn't yet exist.", index)).map_err(to_sub_err)?; - if earlier_command.addChildNode.is_some() { - let earlier_command_result_node_id = earlier_command_result.get("nodeID").and_then(|a| a.as_str()).ok_or_else(|| anyhow!("Add-child-node command's result (index #{}) lacks a nodeID field!", index)).map_err(to_sub_err)?; - command_input_final.parentID = earlier_command_result_node_id.to_owned(); - } else { - Err(anyhow!("Command #{} referred to a command index that doesn't have a recognized subfield.", index)).map_err(to_sub_err)?; - } - } - - let result = add_child_node(&ctx, &actor, false, command_input_final, AddChildNodeExtras { avoid_recording_command_run: true }).await.map_err(to_sub_err)?; - command_results.push(serde_json::to_value(result).map_err(to_sub_err)?); - } else if let Some(command_input) = &command.updateNode { - let command_input_final = command_input.clone(); - let result = update_node(&ctx, &actor, false, command_input_final, NoExtras::default()).await.map_err(to_sub_err)?; - command_results.push(serde_json::to_value(result).map_err(to_sub_err)?); - } else if let Some(command_input) = &command.deleteNode { - let command_input_final = command_input.clone(); - let result = delete_node(&ctx, &actor, false, command_input_final, DeleteNodeExtras { as_part_of_map_delete: false }).await.map_err(to_sub_err)?; - command_results.push(serde_json::to_value(result).map_err(to_sub_err)?); - } else if let Some(command_input) = &command.deleteNodeLink { - let command_input_final = command_input.clone(); - let result = delete_node_link(&ctx, &actor, false, command_input_final, NoExtras::default()).await.map_err(to_sub_err)?; - command_results.push(serde_json::to_value(result).map_err(to_sub_err)?); - } else { - Err(anyhow!("Command #{} had no recognized command subfield.", index)).map_err(to_sub_err)?; - } - - // after each command completion, send the results obtained so far to the client - yield Ok(RunCommandBatchResult { results: command_results.clone(), committed: false }); - } - } + async_stream::stream! { + let RunCommandBatchInput { commands } = input; + + let mut command_results: Vec = Vec::new(); + for (index, command) in commands.iter().enumerate() { + if let Some(command_input) = &command.addChildNode { + let mut command_input_final = command_input.clone(); + + // allow add-child-node commands to set some of their fields based on the results of prior commands in the batch (eg. for importing a tree of nodes) + if let Some(parent_source_index) = command.setParentNodeToResultOfCommandAtIndex { + let earlier_command = commands.get(parent_source_index).ok_or_else(|| anyhow!("Command #{} referred to a command index that doesn't exist.", index)).map_err(to_sub_err)?; + let earlier_command_result = command_results.get(parent_source_index as usize).ok_or_else(|| anyhow!("Command #{} referred to a command-result index that doesn't yet exist.", index)).map_err(to_sub_err)?; + if earlier_command.addChildNode.is_some() { + let earlier_command_result_node_id = earlier_command_result.get("nodeID").and_then(|a| a.as_str()).ok_or_else(|| anyhow!("Add-child-node command's result (index #{}) lacks a nodeID field!", index)).map_err(to_sub_err)?; + command_input_final.parentID = earlier_command_result_node_id.to_owned(); + } else { + Err(anyhow!("Command #{} referred to a command index that doesn't have a recognized subfield.", index)).map_err(to_sub_err)?; + } + } + + let result = add_child_node(&ctx, &actor, false, command_input_final, AddChildNodeExtras { avoid_recording_command_run: true }).await.map_err(to_sub_err)?; + command_results.push(serde_json::to_value(result).map_err(to_sub_err)?); + } else if let Some(command_input) = &command.updateNode { + let command_input_final = command_input.clone(); + let result = update_node(&ctx, &actor, false, command_input_final, NoExtras::default()).await.map_err(to_sub_err)?; + command_results.push(serde_json::to_value(result).map_err(to_sub_err)?); + } else if let Some(command_input) = &command.deleteNode { + let command_input_final = command_input.clone(); + let result = delete_node(&ctx, &actor, false, command_input_final, DeleteNodeExtras { as_part_of_map_delete: false }).await.map_err(to_sub_err)?; + command_results.push(serde_json::to_value(result).map_err(to_sub_err)?); + } else if let Some(command_input) = &command.deleteNodeLink { + let command_input_final = command_input.clone(); + let result = delete_node_link(&ctx, &actor, false, command_input_final, NoExtras::default()).await.map_err(to_sub_err)?; + command_results.push(serde_json::to_value(result).map_err(to_sub_err)?); + } else { + Err(anyhow!("Command #{} had no recognized command subfield.", index)).map_err(to_sub_err)?; + } + + // after each command completion, send the results obtained so far to the client + yield Ok(RunCommandBatchResult { results: command_results.clone(), committed: false }); + } + } } #[derive(InputObject, Deserialize, Serialize, Clone)] pub struct RunCommandBatchInput { - pub commands: Vec, + pub commands: Vec, } #[derive(InputObject, Deserialize, Serialize, Clone, Default)] pub struct CommandEntry { - pub addChildNode: Option, - pub updateNode: Option, - pub deleteNode: Option, - pub deleteNodeLink: Option, + pub addChildNode: Option, + pub updateNode: Option, + pub deleteNode: Option, + pub deleteNodeLink: Option, - // extras - pub setParentNodeToResultOfCommandAtIndex: Option, // used by: addChildNode + // extras + pub setParentNodeToResultOfCommandAtIndex: Option, // used by: addChildNode } #[derive(SimpleObject, Debug, Serialize)] pub struct RunCommandBatchResult { - pub results: Vec, - pub committed: bool, + pub results: Vec, + pub committed: bool, } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/set_node_is_multi_premise_argument.rs b/Packages/app-server/src/db/commands/set_node_is_multi_premise_argument.rs index e42611ade..db5321cb0 100644 --- a/Packages/app-server/src/db/commands/set_node_is_multi_premise_argument.rs +++ b/Packages/app-server/src/db/commands/set_node_is_multi_premise_argument.rs @@ -1,36 +1,36 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, ensure, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::utils::general_::extensions::{ToOwnedV}; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::general_::extensions::ToOwnedV; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, ensure}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::commands::_shared::update_node_rating_summaries::update_node_rating_summaries; use crate::db::commands::delete_node_rating::{delete_node_rating, DeleteNodeRatingInput}; use crate::db::general::permission_helpers::assert_user_can_modify; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::nodes::get_node; use crate::db::nodes_::_node::Node; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, command_boilerplate, NoExtras, update_field_nullable, gql_placeholder}; +use super::_command::{command_boilerplate, gql_placeholder, update_field_nullable, upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_SetNodeIsMultiPremiseArgument; #[Object] impl MutationShard_SetNodeIsMultiPremiseArgument { async fn set_node_is_multi_premise_argument(&self, gql_ctx: &async_graphql::Context<'_>, input: SetNodeIsMultiPremiseArgumentInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, set_node_is_multi_premise_argument); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -49,15 +49,12 @@ pub struct SetNodeIsMultiPremiseArgumentResult { // todo: eventually remove this command, since unused pub async fn set_node_is_multi_premise_argument(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: SetNodeIsMultiPremiseArgumentInput, _extras: NoExtras) -> Result { let SetNodeIsMultiPremiseArgumentInput { id, multiPremiseArgument } = input; - + let old_data = get_node(&ctx, &id).await?; assert_user_can_modify(&ctx, &actor, &old_data).await?; - let new_data = Node { - multiPremiseArgument, - ..old_data - }; + let new_data = Node { multiPremiseArgument, ..old_data }; upsert_db_entry_by_id_for_struct(&ctx, "nodes".to_owned(), id.to_string(), new_data).await?; Ok(SetNodeIsMultiPremiseArgumentResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/set_node_rating.rs b/Packages/app-server/src/db/commands/set_node_rating.rs index 0e45a29e5..90ef946fa 100644 --- a/Packages/app-server/src/db/commands/set_node_rating.rs +++ b/Packages/app-server/src/db/commands/set_node_rating.rs @@ -1,37 +1,37 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, ensure, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::utils::general_::extensions::{ToOwnedV}; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::general_::extensions::ToOwnedV; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, ensure}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::commands::_shared::update_node_rating_summaries::update_node_rating_summaries; use crate::db::commands::delete_node_rating::{delete_node_rating, DeleteNodeRatingInput}; use crate::db::general::permission_helpers::assert_user_can_vote; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_ratings::{NodeRating, NodeRatingInput, get_node_ratings}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_ratings::{get_node_ratings, NodeRating, NodeRatingInput}; use crate::db::node_ratings_::_node_rating_type::NodeRatingType; use crate::db::nodes::get_node; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, command_boilerplate, NoExtras}; +use super::_command::{command_boilerplate, upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_SetNodeRating; #[Object] impl MutationShard_SetNodeRating { async fn set_node_rating(&self, gql_ctx: &async_graphql::Context<'_>, input: SetNodeRatingInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, set_node_rating); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -48,7 +48,7 @@ pub struct SetNodeRatingResult { pub async fn set_node_rating(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: SetNodeRatingInput, _extras: NoExtras) -> Result { let SetNodeRatingInput { rating: rating_ } = input; - + ensure!(rating_.r#type != NodeRatingType::impact, "Cannot set impact rating directly."); let node = get_node(ctx, &rating_.node).await?; assert_user_can_vote(ctx, actor, &node).await?; @@ -76,4 +76,4 @@ pub async fn set_node_rating(ctx: &AccessorContext<'_>, actor: &User, _is_root: update_node_rating_summaries(ctx, actor, rating.node, rating.r#type).await?; Ok(SetNodeRatingResult { id: rating.id.to_string() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/set_user_follow_data.rs b/Packages/app-server/src/db/commands/set_user_follow_data.rs index 099671835..c385acfa0 100644 --- a/Packages/app-server/src/db/commands/set_user_follow_data.rs +++ b/Packages/app-server/src/db/commands/set_user_follow_data.rs @@ -1,36 +1,36 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable}; use crate::db::general::permission_helpers::{assert_user_can_delete, assert_user_can_modify}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::terms::{Term, TermInput, get_term, TermUpdates}; -use crate::db::user_hiddens::{UserFollow, get_user_hidden}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::terms::{get_term, Term, TermInput, TermUpdates}; +use crate::db::user_hiddens::{get_user_hidden, UserFollow}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; use super::_shared::jsonb_utils::jsonb_set; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_SetUserFollowData; #[Object] impl MutationShard_SetUserFollowData { async fn set_user_follow_data(&self, gql_ctx: &async_graphql::Context<'_>, input: SetUserFollowDataInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, set_user_follow_data); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -51,9 +51,9 @@ pub async fn set_user_follow_data(ctx: &AccessorContext<'_>, actor: &User, _is_r let actor_hidden_data = get_user_hidden(ctx, &actor.id).await?; assert_user_can_modify(ctx, actor, &actor_hidden_data).await?; - + let user_follow_as_json_value = if let Some(userFollow) = userFollow { Some(serde_json::to_value(userFollow)?) } else { None }; jsonb_set(&ctx.tx, "userHiddens", &actor.id, "extras", vec!["userFollows".to_owned(), targetUser], user_follow_as_json_value).await?; Ok(SetUserFollowDataResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/transfer_nodes.rs b/Packages/app-server/src/db/commands/transfer_nodes.rs index b5830e0fb..d9933fba1 100644 --- a/Packages/app-server/src/db/commands/transfer_nodes.rs +++ b/Packages/app-server/src/db/commands/transfer_nodes.rs @@ -1,37 +1,37 @@ -use std::fmt::{Formatter, Display}; +use std::fmt::{Display, Formatter}; -use rust_shared::async_graphql::{ID, SimpleObject, InputObject, Enum}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, bail, ensure, Context, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{Enum, InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; use rust_shared::utils::general_::extensions::ToOwnedV; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError, to_anyhow}; -use rust_shared::async_graphql::{Object}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, Context, ensure, bail}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, to_anyhow, GQLError}; use tracing::info; use crate::db::_shared::path_finder::search_up_from_node_for_node_matching_x; use crate::db::commands::_command::command_boilerplate; -use crate::db::commands::add_node_link::{AddNodeLinkInput, add_node_link}; +use crate::db::commands::add_node_link::{add_node_link, AddNodeLinkInput}; use crate::db::commands::delete_node::{delete_node, DeleteNodeInput}; use crate::db::commands::delete_node_link::{self, delete_node_link, DeleteNodeLinkInput}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_links::{NodeLinkInput, ClaimForm, ChildGroup, Polarity, get_node_links, get_first_link_under_parent, get_highest_order_key_under_parent}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_links::{get_first_link_under_parent, get_highest_order_key_under_parent, get_node_links, ChildGroup, ClaimForm, NodeLinkInput, Polarity}; use crate::db::node_phrasings::NodePhrasing_Embedded; use crate::db::node_revisions::{NodeRevision, NodeRevisionInput}; use crate::db::nodes::get_node; -use crate::db::nodes_::_node::{NodeInput, ArgumentType}; +use crate::db::nodes_::_node::{ArgumentType, NodeInput}; use crate::db::nodes_::_node_type::NodeType; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use crate::utils::general::order_key::OrderKey; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras, tbd, gql_placeholder}; +use super::_command::{gql_placeholder, tbd, upsert_db_entry_by_id_for_struct, NoExtras}; use super::_shared::add_node::add_node; use super::_shared::increment_edit_counts::increment_edit_counts_if_valid; use super::add_child_node::{add_child_node, AddChildNodeInput}; @@ -39,14 +39,14 @@ use super::transfer_nodes_::transfer_using_clone::TransferResult_Clone; use super::transfer_nodes_::transfer_using_shim::TransferResult_Shim; use super::transfer_nodes_::{transfer_using_clone::transfer_using_clone, transfer_using_shim::transfer_using_shim}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_TransferNodes; #[Object] impl MutationShard_TransferNodes { /// This is a higher-level wrapper around `addNodeLink`, which handles unlinking from old parent (if requested), etc. async fn transfer_nodes(&self, gql_ctx: &async_graphql::Context<'_>, input: TransferNodesInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, transfer_nodes); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -60,11 +60,11 @@ pub struct NodeInfoForTransfer { pub nodeID: Option, // can be null, if transfer is of type "shim" pub oldParentID: Option, pub transferType: TransferType, - #[graphql(name = "clone_newType")] + #[graphql(name = "clone_newType")] pub clone_newType: NodeType, - #[graphql(name = "clone_keepChildren")] + #[graphql(name = "clone_keepChildren")] pub clone_keepChildren: bool, - #[graphql(name = "clone_keepTags")] + #[graphql(name = "clone_keepTags")] pub clone_keepTags: NodeTagCloneType, pub newParentID: Option, @@ -98,34 +98,34 @@ pub struct TransferNodesResult { } pub enum TransferResult { - Ignore, - Clone(TransferResult_Clone), - Shim(TransferResult_Shim), + Ignore, + Clone(TransferResult_Clone), + Shim(TransferResult_Shim), } pub async fn transfer_nodes(ctx: &AccessorContext<'_>, actor: &User, is_root: bool, input: TransferNodesInput, _extras: NoExtras) -> Result { let TransferNodesInput { mapID, nodes } = input; - let mut transfer_results: Vec = vec![]; - + let mut transfer_results: Vec = vec![]; + for (i, transfer) in nodes.iter().enumerate() { - //let _prev_transfer = nodes.get(i - 1); - //let prev_transfer_result = i.checked_sub(1).and_then(|i| transfer_results.get(i)); - let prev_transfer_result = if i == 0 { None } else { transfer_results.get(i - 1) }; - - let node_id = transfer.nodeID.as_ref().ok_or(anyhow!("For any transfer type, nodeID must be specified."))?; - - let result = match transfer.transferType { - TransferType::ignore => TransferResult::Ignore, - TransferType::r#move => bail!("Not yet implemented."), - TransferType::link => bail!("Not yet implemented."), - TransferType::clone => transfer_using_clone(ctx, actor, transfer, prev_transfer_result, node_id).await?, - TransferType::shim => transfer_using_shim(ctx, actor, transfer, prev_transfer_result, node_id).await?, - }; - transfer_results.push(result); - } - + //let _prev_transfer = nodes.get(i - 1); + //let prev_transfer_result = i.checked_sub(1).and_then(|i| transfer_results.get(i)); + let prev_transfer_result = if i == 0 { None } else { transfer_results.get(i - 1) }; + + let node_id = transfer.nodeID.as_ref().ok_or(anyhow!("For any transfer type, nodeID must be specified."))?; + + let result = match transfer.transferType { + TransferType::ignore => TransferResult::Ignore, + TransferType::r#move => bail!("Not yet implemented."), + TransferType::link => bail!("Not yet implemented."), + TransferType::clone => transfer_using_clone(ctx, actor, transfer, prev_transfer_result, node_id).await?, + TransferType::shim => transfer_using_shim(ctx, actor, transfer, prev_transfer_result, node_id).await?, + }; + transfer_results.push(result); + } + increment_edit_counts_if_valid(&ctx, Some(actor), mapID, is_root).await?; Ok(TransferNodesResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/transfer_nodes_/transfer_using_clone.rs b/Packages/app-server/src/db/commands/transfer_nodes_/transfer_using_clone.rs index 6daf44579..ae969bb61 100644 --- a/Packages/app-server/src/db/commands/transfer_nodes_/transfer_using_clone.rs +++ b/Packages/app-server/src/db/commands/transfer_nodes_/transfer_using_clone.rs @@ -1,204 +1,198 @@ -use std::fmt::{Formatter, Display}; +use std::fmt::{Display, Formatter}; +use rust_shared::anyhow::{anyhow, bail, ensure, Context, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{Enum, InputObject, SimpleObject, ID}; +use rust_shared::db_constants::SYSTEM_USER_ID; use rust_shared::indexmap::IndexSet; -use rust_shared::async_graphql::{ID, SimpleObject, InputObject, Enum}; use rust_shared::itertools::Itertools; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; -use rust_shared::db_constants::SYSTEM_USER_ID; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; use rust_shared::utils::general_::extensions::ToOwnedV; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError, to_anyhow}; -use rust_shared::async_graphql::{Object}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, Context, ensure, bail}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, to_anyhow, GQLError}; use tracing::info; use crate::db::_shared::path_finder::search_up_from_node_for_node_matching_x; use crate::db::commands::_command::{command_boilerplate, CanOmit}; -use crate::db::commands::add_node_link::{AddNodeLinkInput, add_node_link}; +use crate::db::commands::add_node_link::{add_node_link, AddNodeLinkInput}; use crate::db::commands::add_node_tag::{add_node_tag, AddNodeTagInput}; use crate::db::commands::delete_node::{delete_node, DeleteNodeInput}; use crate::db::commands::delete_node_link::{self, delete_node_link, DeleteNodeLinkInput}; use crate::db::commands::link_node::{link_node, LinkNodeInput}; -use crate::db::commands::transfer_nodes::{NodeInfoForTransfer, TransferResult, NodeTagCloneType}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_links::{NodeLinkInput, ClaimForm, ChildGroup, Polarity, get_node_links, get_first_link_under_parent, get_highest_order_key_under_parent, NodeLink}; +use crate::db::commands::transfer_nodes::{NodeInfoForTransfer, NodeTagCloneType, TransferResult}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_links::{get_first_link_under_parent, get_highest_order_key_under_parent, get_node_links, ChildGroup, ClaimForm, NodeLink, NodeLinkInput, Polarity}; use crate::db::node_links_::node_link_validity::assert_link_is_valid; use crate::db::node_phrasings::NodePhrasing_Embedded; -use crate::db::node_revisions::{NodeRevision, NodeRevisionInput, get_node_revision}; -use crate::db::node_tags::{NodeTag, get_node_tags, NodeTagInput, TagComp_CloneHistory}; +use crate::db::node_revisions::{get_node_revision, NodeRevision, NodeRevisionInput}; +use crate::db::node_tags::{get_node_tags, NodeTag, NodeTagInput, TagComp_CloneHistory}; use crate::db::nodes::get_node; -use crate::db::nodes_::_node::{NodeInput, ArgumentType, Node}; -use crate::db::nodes_::_node_type::{NodeType, get_node_type_info}; +use crate::db::nodes_::_node::{ArgumentType, Node, NodeInput}; +use crate::db::nodes_::_node_type::{get_node_type_info, NodeType}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use crate::utils::general::order_key::OrderKey; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::super::_command::{upsert_db_entry_by_id_for_struct, NoExtras, tbd, gql_placeholder}; +use super::super::_command::{gql_placeholder, tbd, upsert_db_entry_by_id_for_struct, NoExtras}; use super::super::_shared::add_node::add_node; use super::super::_shared::increment_edit_counts::increment_edit_counts_if_valid; use super::super::add_child_node::{add_child_node, AddChildNodeInput}; use super::transfer_using_shim::TransferResult_Shim; pub struct TransferResult_Clone { - pub new_node_id: String, + pub new_node_id: String, } pub async fn transfer_using_clone(ctx: &AccessorContext<'_>, actor: &User, transfer: &NodeInfoForTransfer, prev_transfer_result: Option<&TransferResult>, node_id: &str) -> Result { - let node = get_node(&ctx, node_id).await?; - - let old_parent_id = transfer.oldParentID.clone().ok_or(anyhow!("Transfer-using-clone requires oldParentID to be specified."))?; // maybe relax this requirement later - let new_parent_id = if let Some(new_parent_id) = &transfer.newParentID { - let _new_parent = get_node(ctx, new_parent_id).await.with_context(|| "New-parent-id specifies a node that doesn't exist!")?; - new_parent_id.o() - } else { - match prev_transfer_result { - Some(TransferResult::Shim(TransferResult_Shim { new_argument_wrapper_id })) => new_argument_wrapper_id.clone(), - _ => bail!("Could not determine the new-parent-id for this transfer-using-clone operation!"), - } - }; - - let new_node = NodeInput { - r#type: transfer.clone_newType, //.unwrap_or(node.r#type), - accessPolicy: transfer.newAccessPolicyID.clone().unwrap_or(node.accessPolicy), - rootNodeForMap: node.rootNodeForMap, - multiPremiseArgument: node.multiPremiseArgument, - argumentType: node.argumentType, - //extras: json!({}), - extras: CanOmit::None, - }; - - let rev = get_node_revision(ctx, &node.c_currentRevision).await?; - let new_rev = NodeRevisionInput { - node: None, - phrasing: rev.phrasing, - displayDetails: rev.displayDetails, - attachments: rev.attachments, - }; - - let link = get_first_link_under_parent(ctx, node_id, &old_parent_id).await?; - let order_key_for_new_node = get_highest_order_key_under_parent(ctx, Some(&new_parent_id)).await?.next()?; - let new_link = NodeLinkInput { - group: transfer.childGroup, - orderKey: order_key_for_new_node, - parent: None, - child: None, - form: link.form, - seriesAnchor: link.seriesAnchor, - seriesEnd: link.seriesEnd, - polarity: if new_node.r#type == NodeType::argument { - Some(transfer.argumentPolarity.unwrap_or(Polarity::supporting)) - } else { - link.polarity - }, - }; - - let add_child_node_input = AddChildNodeInput { - mapID: None, - parentID: new_parent_id, - node: new_node.clone(), - revision: new_rev, - link: new_link, - }; - let add_child_node_result = add_child_node(ctx, actor, false, add_child_node_input, Default::default()).await?; - let new_node_id = add_child_node_result.nodeID; - - if transfer.clone_keepChildren { - let old_child_links = get_node_links(&ctx, Some(node_id), None).await?; - for link in &old_child_links { - // hard-coded exception here: if old-node-type is category (with claim children), and new-node-type is claim, then have children claims be wrapped into argument nodes - if node.r#type == NodeType::category && new_node.r#type == NodeType::claim && link.c_childType == NodeType::claim { - link_node(&ctx, actor, false, LinkNodeInput { - mapID: None, - oldParentID: Some(link.parent.clone()), - newParentID: new_node_id.clone(), - childGroup: ChildGroup::truth, - nodeID: link.child.clone(), - newForm: link.form.clone(), - newPolarity: link.polarity.clone(), - deleteEmptyArgumentWrapper: Some(false), - unlinkFromOldParent: Some(false), - }, Default::default()).await?; - continue; - } - - let new_link = NodeLinkInput { - parent: Some(new_node_id.clone()), - child: Some(link.child.clone()), - // if we're changing the node's type, check for child-links it has that are invalid (eg. wrong child-group), and try to change them to be valid - group: if new_node.r#type != node.r#type && assert_link_is_valid(new_node.r#type, link.c_childType, link.group, link.polarity).is_ok() { - let first_valid_group_for_child_type = get_node_type_info(new_node.r#type).childGroup_childTypes.iter().find(|a| a.1.contains(&link.c_childType)); - match first_valid_group_for_child_type { - None => bail!("Cannot clone node while both changing type and keeping children, because there are children whose type ({:?}) cannot be placed into any of the new node's child-groups.", link.c_childType), - Some((group, _)) => group.clone(), - } - } else { - link.group - }, - orderKey: link.orderKey.o(), - form: link.form, - seriesAnchor: link.seriesAnchor, - seriesEnd: link.seriesEnd, - polarity: link.polarity, - }; - add_node_link(&ctx, actor, false, AddNodeLinkInput { link: new_link }, Default::default()).await?; - } - } - - let tags = get_node_tags(&ctx, node_id).await?; - for tag in &tags { - let new_tag = maybe_clone_and_retarget_node_tag(tag, transfer.clone_keepTags, node_id, &new_node_id); - if let Some(new_tag) = new_tag { - add_node_tag(&ctx, actor, false, AddNodeTagInput { tag: new_tag.to_input() }, Default::default()).await?; - } - } - - let tags_showing_clone_history_for_old_node = tags.iter().filter(|tag| { - if let Some(history) = &tag.cloneHistory && history.cloneChain.last() == Some(&node_id.o()) { - true - } else { - false - } - }).collect_vec(); - // if there was no clone-history tag we could extend to record this clone action, create a brand new clone-history tag for it - if tags_showing_clone_history_for_old_node.len() == 0 { - let new_nodes = vec![node_id.o(), new_node_id.o()]; - let new_clone_history = TagComp_CloneHistory { cloneChain: new_nodes.clone() }; - add_node_tag(&ctx, actor, false, AddNodeTagInput { - tag: NodeTagInput { - nodes: new_nodes, - cloneHistory: Some(new_clone_history), - ..Default::default() - } - }, Default::default()).await?; - } - - Ok(TransferResult::Clone(TransferResult_Clone { - new_node_id, - })) + let node = get_node(&ctx, node_id).await?; + + let old_parent_id = transfer.oldParentID.clone().ok_or(anyhow!("Transfer-using-clone requires oldParentID to be specified."))?; // maybe relax this requirement later + let new_parent_id = if let Some(new_parent_id) = &transfer.newParentID { + let _new_parent = get_node(ctx, new_parent_id).await.with_context(|| "New-parent-id specifies a node that doesn't exist!")?; + new_parent_id.o() + } else { + match prev_transfer_result { + Some(TransferResult::Shim(TransferResult_Shim { new_argument_wrapper_id })) => new_argument_wrapper_id.clone(), + _ => bail!("Could not determine the new-parent-id for this transfer-using-clone operation!"), + } + }; + + let new_node = NodeInput { + r#type: transfer.clone_newType, //.unwrap_or(node.r#type), + accessPolicy: transfer.newAccessPolicyID.clone().unwrap_or(node.accessPolicy), + rootNodeForMap: node.rootNodeForMap, + multiPremiseArgument: node.multiPremiseArgument, + argumentType: node.argumentType, + //extras: json!({}), + extras: CanOmit::None, + }; + + let rev = get_node_revision(ctx, &node.c_currentRevision).await?; + let new_rev = NodeRevisionInput { node: None, phrasing: rev.phrasing, displayDetails: rev.displayDetails, attachments: rev.attachments }; + + let link = get_first_link_under_parent(ctx, node_id, &old_parent_id).await?; + let order_key_for_new_node = get_highest_order_key_under_parent(ctx, Some(&new_parent_id)).await?.next()?; + let new_link = NodeLinkInput { + group: transfer.childGroup, + orderKey: order_key_for_new_node, + parent: None, + child: None, + form: link.form, + seriesAnchor: link.seriesAnchor, + seriesEnd: link.seriesEnd, + polarity: if new_node.r#type == NodeType::argument { + Some(transfer.argumentPolarity.unwrap_or(Polarity::supporting)) + } else { + link.polarity + }, + }; + + let add_child_node_input = AddChildNodeInput { mapID: None, parentID: new_parent_id, node: new_node.clone(), revision: new_rev, link: new_link }; + let add_child_node_result = add_child_node(ctx, actor, false, add_child_node_input, Default::default()).await?; + let new_node_id = add_child_node_result.nodeID; + + if transfer.clone_keepChildren { + let old_child_links = get_node_links(&ctx, Some(node_id), None).await?; + for link in &old_child_links { + // hard-coded exception here: if old-node-type is category (with claim children), and new-node-type is claim, then have children claims be wrapped into argument nodes + if node.r#type == NodeType::category && new_node.r#type == NodeType::claim && link.c_childType == NodeType::claim { + link_node( + &ctx, + actor, + false, + LinkNodeInput { + mapID: None, + oldParentID: Some(link.parent.clone()), + newParentID: new_node_id.clone(), + childGroup: ChildGroup::truth, + nodeID: link.child.clone(), + newForm: link.form.clone(), + newPolarity: link.polarity.clone(), + deleteEmptyArgumentWrapper: Some(false), + unlinkFromOldParent: Some(false), + }, + Default::default(), + ) + .await?; + continue; + } + + let new_link = NodeLinkInput { + parent: Some(new_node_id.clone()), + child: Some(link.child.clone()), + // if we're changing the node's type, check for child-links it has that are invalid (eg. wrong child-group), and try to change them to be valid + group: if new_node.r#type != node.r#type && assert_link_is_valid(new_node.r#type, link.c_childType, link.group, link.polarity).is_ok() { + let first_valid_group_for_child_type = get_node_type_info(new_node.r#type).childGroup_childTypes.iter().find(|a| a.1.contains(&link.c_childType)); + match first_valid_group_for_child_type { + None => bail!("Cannot clone node while both changing type and keeping children, because there are children whose type ({:?}) cannot be placed into any of the new node's child-groups.", link.c_childType), + Some((group, _)) => group.clone(), + } + } else { + link.group + }, + orderKey: link.orderKey.o(), + form: link.form, + seriesAnchor: link.seriesAnchor, + seriesEnd: link.seriesEnd, + polarity: link.polarity, + }; + add_node_link(&ctx, actor, false, AddNodeLinkInput { link: new_link }, Default::default()).await?; + } + } + + let tags = get_node_tags(&ctx, node_id).await?; + for tag in &tags { + let new_tag = maybe_clone_and_retarget_node_tag(tag, transfer.clone_keepTags, node_id, &new_node_id); + if let Some(new_tag) = new_tag { + add_node_tag(&ctx, actor, false, AddNodeTagInput { tag: new_tag.to_input() }, Default::default()).await?; + } + } + + let tags_showing_clone_history_for_old_node = tags + .iter() + .filter(|tag| { + if let Some(history) = &tag.cloneHistory + && history.cloneChain.last() == Some(&node_id.o()) + { + true + } else { + false + } + }) + .collect_vec(); + // if there was no clone-history tag we could extend to record this clone action, create a brand new clone-history tag for it + if tags_showing_clone_history_for_old_node.len() == 0 { + let new_nodes = vec![node_id.o(), new_node_id.o()]; + let new_clone_history = TagComp_CloneHistory { cloneChain: new_nodes.clone() }; + add_node_tag(&ctx, actor, false, AddNodeTagInput { tag: NodeTagInput { nodes: new_nodes, cloneHistory: Some(new_clone_history), ..Default::default() } }, Default::default()).await?; + } + + Ok(TransferResult::Clone(TransferResult_Clone { new_node_id })) } +#[rustfmt::skip] fn maybe_clone_and_retarget_node_tag(tag: &NodeTag, clone_type: NodeTagCloneType, old_node_id: &str, new_node_id: &str) -> Option { - let tag_clone_level = match clone_type { - NodeTagCloneType::minimal => 0, - NodeTagCloneType::basics => 1, - }; - - let mut new_tag = tag.clone(); - if let Some(labels) = new_tag.labels.as_mut() && tag_clone_level >= 1 { - labels.nodeX = new_node_id.o(); - } - // clone-history tags are a special case: clone+extend them if-and-only-if the result/final-entry is the old-node (preserving history without creating confusion) - else if let Some(history) = new_tag.cloneHistory.as_mut() && history.cloneChain.last() == Some(&old_node_id.o()) { - history.cloneChain.push(new_node_id.o()); - } else { - return None; - } - - new_tag.nodes = new_tag.calculate_new_nodes_list(); - Some(new_tag) -} \ No newline at end of file + let tag_clone_level = match clone_type { + NodeTagCloneType::minimal => 0, + NodeTagCloneType::basics => 1, + }; + + let mut new_tag = tag.clone(); + if let Some(labels) = new_tag.labels.as_mut() && tag_clone_level >= 1 { + labels.nodeX = new_node_id.o(); + } + // clone-history tags are a special case: clone+extend them if-and-only-if the result/final-entry is the old-node (preserving history without creating confusion) + else if let Some(history) = new_tag.cloneHistory.as_mut() && history.cloneChain.last() == Some(&old_node_id.o()) { + history.cloneChain.push(new_node_id.o()); + } else { + return None; + } + + new_tag.nodes = new_tag.calculate_new_nodes_list(); + Some(new_tag) +} diff --git a/Packages/app-server/src/db/commands/transfer_nodes_/transfer_using_shim.rs b/Packages/app-server/src/db/commands/transfer_nodes_/transfer_using_shim.rs index 3aad53362..2e4942704 100644 --- a/Packages/app-server/src/db/commands/transfer_nodes_/transfer_using_shim.rs +++ b/Packages/app-server/src/db/commands/transfer_nodes_/transfer_using_shim.rs @@ -1,82 +1,86 @@ -use std::fmt::{Formatter, Display}; +use std::fmt::{Display, Formatter}; -use rust_shared::async_graphql::{ID, SimpleObject, InputObject, Enum}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, bail, ensure, Context, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{Enum, InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; use rust_shared::utils::general_::extensions::ToOwnedV; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError, to_anyhow}; -use rust_shared::async_graphql::{Object}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, Context, ensure, bail}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, to_anyhow, GQLError}; use tracing::info; use crate::db::_shared::path_finder::search_up_from_node_for_node_matching_x; use crate::db::commands::_command::{command_boilerplate, CanOmit}; -use crate::db::commands::add_node_link::{AddNodeLinkInput, add_node_link}; +use crate::db::commands::add_node_link::{add_node_link, AddNodeLinkInput}; use crate::db::commands::delete_node::{delete_node, DeleteNodeInput}; use crate::db::commands::delete_node_link::{self, delete_node_link, DeleteNodeLinkInput}; use crate::db::commands::transfer_nodes::{NodeInfoForTransfer, TransferResult}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_links::{NodeLinkInput, ClaimForm, ChildGroup, Polarity, get_node_links, get_first_link_under_parent, get_highest_order_key_under_parent}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_links::{get_first_link_under_parent, get_highest_order_key_under_parent, get_node_links, ChildGroup, ClaimForm, NodeLinkInput, Polarity}; use crate::db::node_phrasings::NodePhrasing_Embedded; use crate::db::node_revisions::{NodeRevision, NodeRevisionInput}; use crate::db::nodes::get_node; -use crate::db::nodes_::_node::{NodeInput, ArgumentType}; +use crate::db::nodes_::_node::{ArgumentType, NodeInput}; use crate::db::nodes_::_node_type::NodeType; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use crate::utils::general::order_key::OrderKey; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::super::_command::{upsert_db_entry_by_id_for_struct, NoExtras, tbd, gql_placeholder}; +use super::super::_command::{gql_placeholder, tbd, upsert_db_entry_by_id_for_struct, NoExtras}; use super::super::_shared::add_node::add_node; use super::super::_shared::increment_edit_counts::increment_edit_counts_if_valid; use super::super::add_child_node::{add_child_node, AddChildNodeInput}; pub struct TransferResult_Shim { - pub new_argument_wrapper_id: String, + pub new_argument_wrapper_id: String, } pub async fn transfer_using_shim(ctx: &AccessorContext<'_>, actor: &User, transfer: &NodeInfoForTransfer, _prev_transfer_result: Option<&TransferResult>, node_id: &str) -> Result { - let node = get_node(&ctx, node_id).await?; - let new_parent_id = transfer.newParentID.clone().ok_or(anyhow!("For transfer of type \"shim\", newParentID must be specified."))?; - let order_key_for_new_node = get_highest_order_key_under_parent(&ctx, Some(&new_parent_id)).await?.next()?; + let node = get_node(&ctx, node_id).await?; + let new_parent_id = transfer.newParentID.clone().ok_or(anyhow!("For transfer of type \"shim\", newParentID must be specified."))?; + let order_key_for_new_node = get_highest_order_key_under_parent(&ctx, Some(&new_parent_id)).await?.next()?; - let argument_wrapper = NodeInput { - accessPolicy: transfer.newAccessPolicyID.clone().unwrap_or(node.accessPolicy), - r#type: NodeType::argument, - rootNodeForMap: None, - multiPremiseArgument: None, - argumentType: Some(ArgumentType::all), - //extras: json!({}), - extras: CanOmit::None, - }; - let argument_wrapper_revision = NodeRevisionInput { - phrasing: NodePhrasing_Embedded { text_base: "".o(), ..Default::default() }, - // defaults - node: None, displayDetails: None, attachments: vec![], - }; + let argument_wrapper = NodeInput { + accessPolicy: transfer.newAccessPolicyID.clone().unwrap_or(node.accessPolicy), + r#type: NodeType::argument, + rootNodeForMap: None, + multiPremiseArgument: None, + argumentType: Some(ArgumentType::all), + //extras: json!({}), + extras: CanOmit::None, + }; + let argument_wrapper_revision = NodeRevisionInput { + phrasing: NodePhrasing_Embedded { text_base: "".o(), ..Default::default() }, + // defaults + node: None, + displayDetails: None, + attachments: vec![], + }; - let add_child_node_input = AddChildNodeInput { - mapID: None, - parentID: new_parent_id, - node: argument_wrapper, - revision: argument_wrapper_revision, - link: NodeLinkInput { - group: transfer.childGroup, - orderKey: order_key_for_new_node, - polarity: Some(transfer.argumentPolarity.unwrap_or(Polarity::supporting)), - // defaults - parent: None, child: None, form: None, seriesAnchor: None, seriesEnd: None, - }, - }; - let result = add_child_node(&ctx, actor, false, add_child_node_input, Default::default()).await?; + let add_child_node_input = AddChildNodeInput { + mapID: None, + parentID: new_parent_id, + node: argument_wrapper, + revision: argument_wrapper_revision, + link: NodeLinkInput { + group: transfer.childGroup, + orderKey: order_key_for_new_node, + polarity: Some(transfer.argumentPolarity.unwrap_or(Polarity::supporting)), + // defaults + parent: None, + child: None, + form: None, + seriesAnchor: None, + seriesEnd: None, + }, + }; + let result = add_child_node(&ctx, actor, false, add_child_node_input, Default::default()).await?; - Ok(TransferResult::Shim(TransferResult_Shim { - new_argument_wrapper_id: result.nodeID, - })) -} \ No newline at end of file + Ok(TransferResult::Shim(TransferResult_Shim { new_argument_wrapper_id: result.nodeID })) +} diff --git a/Packages/app-server/src/db/commands/update_access_policy.rs b/Packages/app-server/src/db/commands/update_access_policy.rs index 809fbd3a3..f3b6b85e2 100644 --- a/Packages/app-server/src/db/commands/update_access_policy.rs +++ b/Packages/app-server/src/db/commands/update_access_policy.rs @@ -1,35 +1,35 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::access_policies_::_access_policy::{AccessPolicyUpdates, AccessPolicy}; +use crate::db::access_policies_::_access_policy::{AccessPolicy, AccessPolicyUpdates}; use crate::db::access_policies_::_permission_set::APAction; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable, command_boilerplate}; -use crate::db::general::permission_helpers::{assert_user_can_modify}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable}; +use crate::db::general::permission_helpers::assert_user_can_modify; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_UpdateAccessPolicy; #[Object] impl MutationShard_UpdateAccessPolicy { async fn update_access_policy(&self, gql_ctx: &async_graphql::Context<'_>, input: UpdateAccessPolicyInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, update_access_policy); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -47,7 +47,7 @@ pub struct UpdateAccessPolicyResult { pub async fn update_access_policy(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: UpdateAccessPolicyInput, _extras: NoExtras) -> Result { let UpdateAccessPolicyInput { id, updates } = input; - + let old_data = get_access_policy(&ctx, &id).await?; //assert_user_can_modify_simple(&actor, &old_data.creator)?; //assert_user_can_do_x_for_commands(ctx, &actor, APAction::Modify, ActionTarget::for_access_policy(old_data.creator)).await?; @@ -62,4 +62,4 @@ pub async fn update_access_policy(ctx: &AccessorContext<'_>, actor: &User, _is_r upsert_db_entry_by_id_for_struct(&ctx, "accessPolicies".to_owned(), id.to_string(), new_data).await?; Ok(UpdateAccessPolicyResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/update_map.rs b/Packages/app-server/src/db/commands/update_map.rs index 5157b13fb..0ca9ea080 100644 --- a/Packages/app-server/src/db/commands/update_map.rs +++ b/Packages/app-server/src/db/commands/update_map.rs @@ -1,34 +1,34 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, ensure, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, ensure}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable}; use crate::db::general::permission_helpers::{assert_user_can_delete, assert_user_can_modify, is_user_creator_or_mod, is_user_mod}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::maps::{Map, MapInput, get_map, MapUpdates}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::maps::{get_map, Map, MapInput, MapUpdates}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_UpdateMap; #[Object] impl MutationShard_UpdateMap { async fn update_map(&self, gql_ctx: &async_graphql::Context<'_>, input: UpdateMapInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, update_map); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -46,12 +46,14 @@ pub struct UpdateMapResult { pub async fn update_map(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: UpdateMapInput, _extras: NoExtras) -> Result { let UpdateMapInput { id, updates } = input; - + let old_data = get_map(&ctx, &id).await?; assert_user_can_modify(&ctx, &actor, &old_data).await?; // when trying to modify certain fields, extra permissions are required - if !updates.featured.is_undefined() { ensure!(is_user_mod(actor), "Only mods can set whether a map is featured.") } + if !updates.featured.is_undefined() { + ensure!(is_user_mod(actor), "Only mods can set whether a map is featured.") + } let new_data = Map { accessPolicy: update_field(updates.accessPolicy, old_data.accessPolicy), @@ -72,4 +74,4 @@ pub async fn update_map(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, upsert_db_entry_by_id_for_struct(&ctx, "maps".to_owned(), id.to_string(), new_data).await?; Ok(UpdateMapResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/update_media.rs b/Packages/app-server/src/db/commands/update_media.rs index 763532402..0a9ed4b90 100644 --- a/Packages/app-server/src/db/commands/update_media.rs +++ b/Packages/app-server/src/db/commands/update_media.rs @@ -1,34 +1,34 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable}; use crate::db::general::permission_helpers::{assert_user_can_delete, assert_user_can_modify}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::medias::{Media, MediaInput, get_media, MediaUpdates}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::medias::{get_media, Media, MediaInput, MediaUpdates}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_UpdateMedia; #[Object] impl MutationShard_UpdateMedia { async fn update_media(&self, gql_ctx: &async_graphql::Context<'_>, input: UpdateMediaInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, update_media); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -46,7 +46,7 @@ pub struct UpdateMediaResult { pub async fn update_media(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: UpdateMediaInput, _extras: NoExtras) -> Result { let UpdateMediaInput { id, updates } = input; - + let old_data = get_media(&ctx, &id).await?; assert_user_can_modify(&ctx, &actor, &old_data).await?; let new_data = Media { @@ -61,4 +61,4 @@ pub async fn update_media(ctx: &AccessorContext<'_>, actor: &User, _is_root: boo upsert_db_entry_by_id_for_struct(&ctx, "medias".to_owned(), id.to_string(), new_data).await?; Ok(UpdateMediaResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/update_node.rs b/Packages/app-server/src/db/commands/update_node.rs index 7ed66ca71..a7bb5db9b 100644 --- a/Packages/app-server/src/db/commands/update_node.rs +++ b/Packages/app-server/src/db/commands/update_node.rs @@ -1,36 +1,36 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; use crate::db::access_policies_::_permission_set::APAction; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable}; use crate::db::general::permission_helpers::{assert_user_can_delete, assert_user_can_modify}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::nodes::{get_node}; -use crate::db::nodes_::_node::{Node, NodeUpdates, node_extras_locked_subfields}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::nodes::get_node; +use crate::db::nodes_::_node::{node_extras_locked_subfields, Node, NodeUpdates}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras, update_field_of_extras}; +use super::_command::{update_field_of_extras, upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_UpdateNode; #[Object] impl MutationShard_UpdateNode { async fn update_node(&self, gql_ctx: &async_graphql::Context<'_>, input: UpdateNodeInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, update_node); - } + } } #[derive(InputObject, Serialize, Deserialize, Clone)] @@ -48,7 +48,7 @@ pub struct UpdateNodeResult { pub async fn update_node(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: UpdateNodeInput, _extras: NoExtras) -> Result { let UpdateNodeInput { id, updates } = input; - + let old_data = get_node(&ctx, &id).await?; //assert_user_can_do_x_for_commands(&ctx, &actor, APAction::Modify, ActionTarget::for_node(old_data.creator.o(), old_data.accessPolicy.o())).await?; assert_user_can_modify(&ctx, &actor, &old_data).await?; @@ -63,4 +63,4 @@ pub async fn update_node(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool upsert_db_entry_by_id_for_struct(&ctx, "nodes".to_owned(), id.to_string(), new_data).await?; Ok(UpdateNodeResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/update_node_link.rs b/Packages/app-server/src/db/commands/update_node_link.rs index f386c82a7..8aa1b26ff 100644 --- a/Packages/app-server/src/db/commands/update_node_link.rs +++ b/Packages/app-server/src/db/commands/update_node_link.rs @@ -1,34 +1,34 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable}; use crate::db::general::permission_helpers::{assert_user_can_delete, assert_user_can_modify}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_links::{NodeLink, get_node_link, NodeLinkUpdates}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_links::{get_node_link, NodeLink, NodeLinkUpdates}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_UpdateNodeLink; #[Object] impl MutationShard_UpdateNodeLink { async fn update_node_link(&self, gql_ctx: &async_graphql::Context<'_>, input: UpdateNodeLinkInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, update_node_link); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -46,7 +46,7 @@ pub struct UpdateNodeLinkResult { pub async fn update_node_link(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: UpdateNodeLinkInput, _extras: NoExtras) -> Result { let UpdateNodeLinkInput { id, updates } = input; - + let old_data = get_node_link(&ctx, &id).await?; assert_user_can_modify(&ctx, &actor, &old_data).await?; let new_data = NodeLink { @@ -59,4 +59,4 @@ pub async fn update_node_link(ctx: &AccessorContext<'_>, actor: &User, _is_root: upsert_db_entry_by_id_for_struct(&ctx, "nodeLinks".to_owned(), id.to_string(), new_data).await?; Ok(UpdateNodeLinkResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/update_node_phrasing.rs b/Packages/app-server/src/db/commands/update_node_phrasing.rs index 92f29adf5..087b03a4e 100644 --- a/Packages/app-server/src/db/commands/update_node_phrasing.rs +++ b/Packages/app-server/src/db/commands/update_node_phrasing.rs @@ -1,34 +1,34 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable}; use crate::db::general::permission_helpers::{assert_user_can_delete, assert_user_can_modify}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_phrasings::{NodePhrasing, NodePhrasingInput, get_node_phrasing, NodePhrasingUpdates}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_phrasings::{get_node_phrasing, NodePhrasing, NodePhrasingInput, NodePhrasingUpdates}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_UpdateNodePhrasing; #[Object] impl MutationShard_UpdateNodePhrasing { async fn update_node_phrasing(&self, gql_ctx: &async_graphql::Context<'_>, input: UpdateNodePhrasingInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, update_node_phrasing); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -46,7 +46,7 @@ pub struct UpdateNodePhrasingResult { pub async fn update_node_phrasing(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: UpdateNodePhrasingInput, _extras: NoExtras) -> Result { let UpdateNodePhrasingInput { id, updates } = input; - + let old_data = get_node_phrasing(&ctx, &id).await?; assert_user_can_modify(&ctx, &actor, &old_data).await?; let new_data = NodePhrasing { @@ -64,4 +64,4 @@ pub async fn update_node_phrasing(ctx: &AccessorContext<'_>, actor: &User, _is_r upsert_db_entry_by_id_for_struct(&ctx, "nodePhrasings".to_owned(), id.to_string(), new_data).await?; Ok(UpdateNodePhrasingResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/update_node_tag.rs b/Packages/app-server/src/db/commands/update_node_tag.rs index 834ef608b..7568249ac 100644 --- a/Packages/app-server/src/db/commands/update_node_tag.rs +++ b/Packages/app-server/src/db/commands/update_node_tag.rs @@ -1,35 +1,35 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable}; use crate::db::general::permission_helpers::{assert_user_can_delete, assert_user_can_modify}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_tags::{NodeTag, NodeTagInput, get_node_tag, NodeTagUpdates}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_tags::{get_node_tag, NodeTag, NodeTagInput, NodeTagUpdates}; use crate::db::nodes::get_node; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, command_boilerplate, NoExtras}; +use super::_command::{command_boilerplate, upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_UpdateNodeTag; #[Object] impl MutationShard_UpdateNodeTag { async fn update_node_tag(&self, gql_ctx: &async_graphql::Context<'_>, input: UpdateNodeTagInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, update_node_tag); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -47,9 +47,10 @@ pub struct UpdateNodeTagResult { pub async fn update_node_tag(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: UpdateNodeTagInput, _extras: NoExtras) -> Result { let UpdateNodeTagInput { id, updates } = input; - + let old_data = get_node_tag(&ctx, &id).await?; - assert_user_can_modify(&ctx, &actor, &old_data).await?; // this maybe checks less than is ideal, but it's okay for now + // this maybe checks less than is ideal, but it's okay for now + assert_user_can_modify(&ctx, &actor, &old_data).await?; /*for node_id in old_data.nodes { let node = get_node(&ctx, &node_id).await?; assert_user_can_modify(&ctx, &actor, node.creator, node.access_policy).await?; @@ -68,4 +69,4 @@ pub async fn update_node_tag(ctx: &AccessorContext<'_>, actor: &User, _is_root: upsert_db_entry_by_id_for_struct(&ctx, "nodeTags".to_owned(), id.to_string(), new_data).await?; Ok(UpdateNodeTagResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/update_share.rs b/Packages/app-server/src/db/commands/update_share.rs index a4daa593a..2f68aa7d4 100644 --- a/Packages/app-server/src/db/commands/update_share.rs +++ b/Packages/app-server/src/db/commands/update_share.rs @@ -1,34 +1,34 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable}; use crate::db::general::permission_helpers::{assert_user_can_delete, assert_user_can_modify}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::shares::{Share, ShareInput, get_share, ShareUpdates}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::shares::{get_share, Share, ShareInput, ShareUpdates}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_UpdateShare; #[Object] impl MutationShard_UpdateShare { async fn update_share(&self, gql_ctx: &async_graphql::Context<'_>, input: UpdateShareInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, update_share); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -46,7 +46,7 @@ pub struct UpdateShareResult { pub async fn update_share(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: UpdateShareInput, _extras: NoExtras) -> Result { let UpdateShareInput { id, updates } = input; - + let old_data = get_share(&ctx, &id).await?; assert_user_can_modify(&ctx, &actor, &old_data).await?; let new_data = Share { @@ -59,4 +59,4 @@ pub async fn update_share(ctx: &AccessorContext<'_>, actor: &User, _is_root: boo upsert_db_entry_by_id_for_struct(&ctx, "shares".to_owned(), id.to_string(), new_data).await?; Ok(UpdateShareResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/update_term.rs b/Packages/app-server/src/db/commands/update_term.rs index 4c5d04e0f..6095c66de 100644 --- a/Packages/app-server/src/db/commands/update_term.rs +++ b/Packages/app-server/src/db/commands/update_term.rs @@ -1,34 +1,34 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable}; use crate::db::general::permission_helpers::{assert_user_can_delete, assert_user_can_modify}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::terms::{Term, TermInput, get_term, TermUpdates}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::terms::{get_term, Term, TermInput, TermUpdates}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_UpdateTerm; #[Object] impl MutationShard_UpdateTerm { async fn update_term(&self, gql_ctx: &async_graphql::Context<'_>, input: UpdateTermInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, update_term); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -46,7 +46,7 @@ pub struct UpdateTermResult { pub async fn update_term(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: UpdateTermInput, _extras: NoExtras) -> Result { let UpdateTermInput { id, updates } = input; - + let old_data = get_term(&ctx, &id).await?; assert_user_can_modify(&ctx, &actor, &old_data).await?; let new_data = Term { @@ -64,4 +64,4 @@ pub async fn update_term(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool upsert_db_entry_by_id_for_struct(&ctx, "terms".to_owned(), id.to_string(), new_data).await?; Ok(UpdateTermResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/update_timeline.rs b/Packages/app-server/src/db/commands/update_timeline.rs index e5d50aede..45fa19a19 100644 --- a/Packages/app-server/src/db/commands/update_timeline.rs +++ b/Packages/app-server/src/db/commands/update_timeline.rs @@ -1,34 +1,34 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable}; use crate::db::general::permission_helpers::{assert_user_can_delete, assert_user_can_modify}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::timelines::{Timeline, TimelineInput, get_timeline, TimelineUpdates}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::timelines::{get_timeline, Timeline, TimelineInput, TimelineUpdates}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_UpdateTimeline; #[Object] impl MutationShard_UpdateTimeline { async fn update_timeline(&self, gql_ctx: &async_graphql::Context<'_>, input: UpdateTimelineInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, update_timeline); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -46,7 +46,7 @@ pub struct UpdateTimelineResult { pub async fn update_timeline(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: UpdateTimelineInput, _extras: NoExtras) -> Result { let UpdateTimelineInput { id, updates } = input; - + let old_data = get_timeline(&ctx, &id).await?; assert_user_can_modify(&ctx, &actor, &old_data).await?; let new_data = Timeline { @@ -61,4 +61,4 @@ pub async fn update_timeline(ctx: &AccessorContext<'_>, actor: &User, _is_root: upsert_db_entry_by_id_for_struct(&ctx, "timelines".to_owned(), id.to_string(), new_data).await?; Ok(UpdateTimelineResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/update_timeline_step.rs b/Packages/app-server/src/db/commands/update_timeline_step.rs index dc6ce39ea..243160988 100644 --- a/Packages/app-server/src/db/commands/update_timeline_step.rs +++ b/Packages/app-server/src/db/commands/update_timeline_step.rs @@ -1,34 +1,34 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable}; use crate::db::general::permission_helpers::{assert_user_can_delete, assert_user_can_modify}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::timeline_steps::{TimelineStep, TimelineStepInput, get_timeline_step, TimelineStepUpdates, timeline_step_extras_locked_subfields}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::timeline_steps::{get_timeline_step, timeline_step_extras_locked_subfields, TimelineStep, TimelineStepInput, TimelineStepUpdates}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras, update_field_of_extras}; +use super::_command::{update_field_of_extras, upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_UpdateTimelineStep; #[Object] impl MutationShard_UpdateTimelineStep { async fn update_timeline_step(&self, gql_ctx: &async_graphql::Context<'_>, input: UpdateTimelineStepInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, update_timeline_step); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -46,7 +46,7 @@ pub struct UpdateTimelineStepResult { pub async fn update_timeline_step(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: UpdateTimelineStepInput, _extras: NoExtras) -> Result { let UpdateTimelineStepInput { id, updates } = input; - + let old_data = get_timeline_step(&ctx, &id).await?; assert_user_can_modify(&ctx, &actor, &old_data).await?; let new_data = TimelineStep { @@ -63,4 +63,4 @@ pub async fn update_timeline_step(ctx: &AccessorContext<'_>, actor: &User, _is_r upsert_db_entry_by_id_for_struct(&ctx, "timelineSteps".to_owned(), id.to_string(), new_data).await?; Ok(UpdateTimelineStepResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/update_user.rs b/Packages/app-server/src/db/commands/update_user.rs index ed7c65411..a6473a28a 100644 --- a/Packages/app-server/src/db/commands/update_user.rs +++ b/Packages/app-server/src/db/commands/update_user.rs @@ -1,33 +1,33 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, ensure, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, ensure}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable}; use crate::db::general::permission_helpers::{assert_user_can_delete, assert_user_can_modify, is_user_admin}; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::users::{User, get_user, UserUpdates}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::users::{get_user, User, UserUpdates}; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_UpdateUser; #[Object] impl MutationShard_UpdateUser { async fn update_user(&self, gql_ctx: &async_graphql::Context<'_>, input: UpdateUserInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, update_user); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -45,7 +45,7 @@ pub struct UpdateUserResult { pub async fn update_user(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: UpdateUserInput, _extras: NoExtras) -> Result { let UpdateUserInput { id, updates } = input; - + let old_data = get_user(&ctx, &id).await?; assert_user_can_modify(ctx, actor, &old_data).await?; @@ -56,7 +56,7 @@ pub async fn update_user(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool if let Some(new_permission_groups) = updates.permissionGroups.clone() { let admin = actor.permissionGroups.admin; ensure!(admin, "Only admins can modify the permission-groups of a user."); - + let changing_own_admin_state = id == actor.id.to_string() && new_permission_groups.admin != old_data.permissionGroups.admin; ensure!(!changing_own_admin_state, "Even an admin cannot change their own account's admin-state. (to prevent accidental, permanent self-demotion)"); } @@ -70,4 +70,4 @@ pub async fn update_user(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool upsert_db_entry_by_id_for_struct(&ctx, "users".to_owned(), id.to_string(), new_data).await?; Ok(UpdateUserResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/commands/update_user_hidden.rs b/Packages/app-server/src/db/commands/update_user_hidden.rs index b451a9e1b..46a0dcb8c 100644 --- a/Packages/app-server/src/db/commands/update_user_hidden.rs +++ b/Packages/app-server/src/db/commands/update_user_hidden.rs @@ -1,33 +1,33 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, ensure, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, ensure}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::access_policies::get_access_policy; -use crate::db::commands::_command::{delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable, command_boilerplate}; +use crate::db::commands::_command::{command_boilerplate, delete_db_entry_by_id, gql_placeholder, set_db_entry_by_id, update_field, update_field_nullable}; use crate::db::general::permission_helpers::assert_user_can_modify; -use crate::db::user_hiddens::{UserHidden, get_user_hidden, UserHiddenUpdates, user_hidden_extras_locked_subfields}; +use crate::db::user_hiddens::{get_user_hidden, user_hidden_extras_locked_subfields, UserHidden, UserHiddenUpdates}; use crate::db::users::User; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; -use super::_command::{upsert_db_entry_by_id_for_struct, NoExtras, update_field_of_extras}; +use super::_command::{update_field_of_extras, upsert_db_entry_by_id_for_struct, NoExtras}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct MutationShard_UpdateUserHidden; #[Object] impl MutationShard_UpdateUserHidden { async fn update_user_hidden(&self, gql_ctx: &async_graphql::Context<'_>, input: UpdateUserHiddenInput, only_validate: Option) -> Result { command_boilerplate!(gql_ctx, input, only_validate, update_user_hidden); - } + } } #[derive(InputObject, Serialize, Deserialize)] @@ -45,7 +45,7 @@ pub struct UpdateUserHiddenResult { pub async fn update_user_hidden(ctx: &AccessorContext<'_>, actor: &User, _is_root: bool, input: UpdateUserHiddenInput, _extras: NoExtras) -> Result { let UpdateUserHiddenInput { id, updates } = input; - + let old_data = get_user_hidden(&ctx, &id).await?; assert_user_can_modify(ctx, actor, &old_data).await?; @@ -63,4 +63,4 @@ pub async fn update_user_hidden(ctx: &AccessorContext<'_>, actor: &User, _is_roo upsert_db_entry_by_id_for_struct(&ctx, "userHiddens".to_owned(), id.to_string(), new_data).await?; Ok(UpdateUserHiddenResult { __: gql_placeholder() }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/feedback_proposals.rs b/Packages/app-server/src/db/feedback_proposals.rs index 2aa50a008..808b12cf7 100644 --- a/Packages/app-server/src/db/feedback_proposals.rs +++ b/Packages/app-server/src/db/feedback_proposals.rs @@ -1,35 +1,43 @@ -use rust_shared::{serde, async_graphql, async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject}, GQLError}; -use futures_util::{Stream, stream, TryFutureExt}; +use futures_util::{stream, Stream, TryFutureExt}; use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::tokio_postgres::{Client, Row}; use rust_shared::SubError; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::tokio_postgres::{Row, Client}; +use rust_shared::{ + async_graphql, + async_graphql::{Context, Object, OutputType, Schema, SimpleObject, Subscription, ID}, + serde, GQLError, +}; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput, pg_row_to_json::postgres_row_to_struct}}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; +use crate::utils::db::{ + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, + pg_row_to_json::postgres_row_to_struct, +}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(SimpleObject, Clone, Serialize, Deserialize)] //#[serde(crate = "rust_shared::serde")] pub struct Proposal { - pub id: ID, - pub r#type: String, - pub title: String, - pub text: String, - pub creator: String, + pub id: ID, + pub r#type: String, + pub title: String, + pub text: String, + pub creator: String, pub createdAt: i64, pub editedAt: Option, pub completedAt: Option, } impl From for Proposal { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(Clone)] pub struct GQLSet_Proposal { pub nodes: Vec } #[Object] impl GQLSet_Proposal { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_Proposal { - fn from(entries: Vec) -> GQLSet_Proposal { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_Proposal { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_Proposal; @@ -44,14 +52,14 @@ impl GQLSet for GQLSet_Proposal { #[derive(Default)] pub struct SubscriptionShard_Proposal; #[Subscription] impl SubscriptionShard_Proposal { - #[graphql(name = "feedback_proposals")] - async fn feedback_proposals<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "feedback_proposals", filter).await - } - #[graphql(name = "feedback_proposal")] - async fn feedback_proposal<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "feedback_proposals", id).await - } + #[graphql(name = "feedback_proposals")] + async fn feedback_proposals<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "feedback_proposals", filter).await + } + #[graphql(name = "feedback_proposal")] + async fn feedback_proposal<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "feedback_proposals", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/feedback_user_infos.rs b/Packages/app-server/src/db/feedback_user_infos.rs index 026e00450..3281af1b7 100644 --- a/Packages/app-server/src/db/feedback_user_infos.rs +++ b/Packages/app-server/src/db/feedback_user_infos.rs @@ -1,16 +1,19 @@ -use rust_shared::{SubError, GQLError}; +use futures_util::{stream, Stream, TryFutureExt}; use rust_shared::async_graphql; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::async_graphql::{Context, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::tokio_postgres::{Row, Client}; use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::{GQLError, SubError}; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput}}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; +use crate::utils::db::{ + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; -wrap_slow_macros!{ +wrap_slow_macros! { /*cached_expand!{ const ce_args: &str = r##" @@ -20,14 +23,14 @@ excludeLinesWith = "#[graphql(name" #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct UserInfo { - pub id: ID, - pub proposalsOrder: Vec, + pub id: ID, + pub proposalsOrder: Vec, } impl From for UserInfo { fn from(row: Row) -> Self { Self { - id: ID::from(&row.get::<_, String>("id")), - proposalsOrder: row.get("proposalsOrder"), + id: ID::from(&row.get::<_, String>("id")), + proposalsOrder: row.get("proposalsOrder"), } } } @@ -35,8 +38,8 @@ impl From for UserInfo { #[derive(Clone)] pub struct GQLSet_UserInfo { pub nodes: Vec } #[Object] impl GQLSet_UserInfo { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_UserInfo { - fn from(entries: Vec) -> GQLSet_UserInfo { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_UserInfo { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_UserInfo; @@ -51,14 +54,14 @@ impl GQLSet for GQLSet_UserInfo { #[derive(Default)] pub struct SubscriptionShard_UserInfo; #[Subscription] impl SubscriptionShard_UserInfo { - #[graphql(name = "feedback_userInfos")] - async fn feedback_userInfos<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "feedback_userInfos", filter).await - } - #[graphql(name = "feedback_userInfo")] - async fn feedback_userInfo<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "feedback_userInfos", id).await - } + #[graphql(name = "feedback_userInfos")] + async fn feedback_userInfos<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "feedback_userInfos", filter).await + } + #[graphql(name = "feedback_userInfo")] + async fn feedback_userInfo<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "feedback_userInfos", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/general/backups.rs b/Packages/app-server/src/db/general/backups.rs index d0fd7b161..216b3c9c8 100644 --- a/Packages/app-server/src/db/general/backups.rs +++ b/Packages/app-server/src/db/general/backups.rs @@ -1,43 +1,62 @@ use std::process::Command; -use rust_shared::{axum::{self, response::{self, IntoResponse}, extract::Extension}, tower_http, utils::{general::k8s_env, _k8s::{exec_command_in_another_pod, get_k8s_pod_basic_infos}, general_::extensions::ToOwnedV}, anyhow::{bail, ensure}, itertools::Itertools}; -use rust_shared::hyper::{Request, Method}; -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; +use rust_shared::anyhow::{anyhow, Context, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; use rust_shared::db_constants::SYSTEM_USER_ID; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; -use rust_shared::anyhow::{anyhow, Error, Context}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::hyper::{Method, Request}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; +use rust_shared::{ + anyhow::{bail, ensure}, + axum::{ + self, + extract::Extension, + response::{self, IntoResponse}, + }, + itertools::Itertools, + tower_http, + utils::{ + _k8s::{exec_command_in_another_pod, get_k8s_pod_basic_infos}, + general::k8s_env, + general_::extensions::ToOwnedV, + }, +}; use tracing::{info, warn}; -use crate::db::users::User; -use crate::{utils::{general::data_anchor::DataAnchorFor1, db::accessors::AccessorContext}, gql::get_gql_data_from_http_request, db::general::sign_in_::jwt_utils::resolve_jwt_to_user_info, store::storage::AppStateArc}; -use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx}; +use crate::db::general::sign_in_::jwt_utils::get_user_info_from_gql_ctx; use crate::db::map_node_edits::{ChangeType, MapNodeEdit}; -use crate::db::node_revisions::{NodeRevisionInput, NodeRevision}; +use crate::db::node_revisions::{NodeRevision, NodeRevisionInput}; +use crate::db::users::User; +use crate::{ + db::general::sign_in_::jwt_utils::resolve_jwt_to_user_info, + gql::get_gql_data_from_http_request, + store::storage::AppStateArc, + utils::{db::accessors::AccessorContext, general::data_anchor::DataAnchorFor1}, +}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct QueryShard_General_Backups; #[Object] impl QueryShard_General_Backups { - #[graphql(name = "getDBDump")] + #[graphql(name = "getDBDump")] async fn get_db_dump(&self, gql_ctx: &async_graphql::Context<'_>, /*input: GetDBDumpInput*/) -> Result { - // query boilerplate (similar to start of output of `command_boilerplate!`, but no such macro exists for queries atm) + // query boilerplate (similar to start of output of `command_boilerplate!`, but no such macro exists for queries atm) let mut anchor = DataAnchorFor1::empty(); // holds pg-client let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; let actor = get_user_info_from_gql_ctx(&gql_ctx, &ctx).await?; let pgdump_sql = try_get_db_dump(&actor).await?; - ctx.tx.commit().await?; - tracing::info!("PG-dump executed, and returned to caller. @actor:{} ({}) @pgdump_sql_len:{}", actor.id.to_string(), actor.displayName, pgdump_sql.len()); - + ctx.tx.commit().await?; + tracing::info!("PG-dump executed, and returned to caller. @actor:{} ({}) @pgdump_sql_len:{}", actor.id.to_string(), actor.displayName, pgdump_sql.len()); + return Ok(GetDBDumpResult { - pgdump_sql, - }); - } + pgdump_sql, + }); + } } /*#[derive(InputObject, Serialize, Deserialize)] @@ -52,26 +71,22 @@ pub struct GetDBDumpResult { } pub async fn try_get_db_dump(actor: &User) -> Result { - ensure!(actor.permissionGroups.admin, "Only admins can access this endpoint."); - - let target_pod = get_k8s_pod_basic_infos("postgres-operator", true).await.context("Failed to retrieve basic-info of the k8s pods.")? - .into_iter().find(|a| a.name.starts_with("debate-map-instance1")).map(|a| a.name).ok_or_else(|| anyhow!("Could not find debate-map-instance1-XXX pod."))?; - let container = "database"; // pod's list of containers: postgres-startup nss-wrapper-init database replication-cert-copy pgbackrest pgbackrest-config + ensure!(actor.permissionGroups.admin, "Only admins can access this endpoint."); + + let target_pod = + get_k8s_pod_basic_infos("postgres-operator", true).await.context("Failed to retrieve basic-info of the k8s pods.")?.into_iter().find(|a| a.name.starts_with("debate-map-instance1")).map(|a| a.name).ok_or_else(|| anyhow!("Could not find debate-map-instance1-XXX pod."))?; + let container = "database"; // pod's list of containers: postgres-startup nss-wrapper-init database replication-cert-copy pgbackrest pgbackrest-config - // raw command string: pg_dump -U postgres debate-map - let pgdump_output = exec_command_in_another_pod("postgres-operator", &target_pod, Some(container), "pg_dump", vec![ - "-E".o(), "UTF-8".o(), - "-U".o(), "postgres".o(), - "debate-map".o() - ], true).await.context("Failed to run pg_dump command in PG pod.")?; + // raw command string: pg_dump -U postgres debate-map + let pgdump_output = exec_command_in_another_pod("postgres-operator", &target_pod, Some(container), "pg_dump", vec!["-E".o(), "UTF-8".o(), "-U".o(), "postgres".o(), "debate-map".o()], true).await.context("Failed to run pg_dump command in PG pod.")?; - // Above, we request utf-8 encoding; however, some chars in prod-cluster's db-dump still fail to parse as utf-8! - // So, we pass `true` above to allow lossy utf-8 conversion, and then we log a warning if any chars failed to convert. - let chars = pgdump_output.chars().collect_vec(); - let failed_conversion_chars = chars.iter().filter(|c| **c == char::REPLACEMENT_CHARACTER).count(); - if failed_conversion_chars > 0 { - warn!("During retrieval of pg-dump, {} chars failed to convert to utf-8; they were replaced with \"{}\". @pgdump_output_len:{}", failed_conversion_chars, char::REPLACEMENT_CHARACTER, pgdump_output.len()); - } + // Above, we request utf-8 encoding; however, some chars in prod-cluster's db-dump still fail to parse as utf-8! + // So, we pass `true` above to allow lossy utf-8 conversion, and then we log a warning if any chars failed to convert. + let chars = pgdump_output.chars().collect_vec(); + let failed_conversion_chars = chars.iter().filter(|c| **c == char::REPLACEMENT_CHARACTER).count(); + if failed_conversion_chars > 0 { + warn!("During retrieval of pg-dump, {} chars failed to convert to utf-8; they were replaced with \"{}\". @pgdump_output_len:{}", failed_conversion_chars, char::REPLACEMENT_CHARACTER, pgdump_output.len()); + } - Ok(pgdump_output) -} \ No newline at end of file + Ok(pgdump_output) +} diff --git a/Packages/app-server/src/db/general/permission_helpers.rs b/Packages/app-server/src/db/general/permission_helpers.rs index db6982603..e0f45c7f9 100644 --- a/Packages/app-server/src/db/general/permission_helpers.rs +++ b/Packages/app-server/src/db/general/permission_helpers.rs @@ -1,52 +1,79 @@ use std::collections::HashMap; -use rust_shared::{anyhow::{anyhow, bail}, utils::auth::jwt_utils_base::UserJWTData}; +use rust_shared::{ + anyhow::{anyhow, bail}, + utils::auth::jwt_utils_base::UserJWTData, +}; -use crate::{utils::db::{accessors::AccessorContext}, db::{users::{User, get_user}, access_policies::{get_access_policy}, access_policies_::{_permission_set::{APTable, APAction}, _access_policy::AccessPolicy}, _shared::{access_policy_target::AccessPolicyTarget, table_permissions::{CanModify, CanDelete, CanVote, CanAddPhrasing, CanAddChild}}, nodes_::_node::Node}, links::db_live_cache::{get_admin_user_ids_cached, get_access_policy_cached}}; +use crate::{ + db::{ + _shared::{ + access_policy_target::AccessPolicyTarget, + table_permissions::{CanAddChild, CanAddPhrasing, CanDelete, CanModify, CanVote}, + }, + access_policies::get_access_policy, + access_policies_::{ + _access_policy::AccessPolicy, + _permission_set::{APAction, APTable}, + }, + nodes_::_node::Node, + users::{get_user, User}, + }, + links::db_live_cache::{get_access_policy_cached, get_admin_user_ids_cached}, + utils::db::accessors::AccessorContext, +}; use rust_shared::anyhow::Error; -pub fn is_user_mod(user: &User) -> bool { user.permissionGroups.r#mod } -pub fn is_user_admin(user: &User) -> bool { user.permissionGroups.admin } +pub fn is_user_mod(user: &User) -> bool { + user.permissionGroups.r#mod +} +pub fn is_user_admin(user: &User) -> bool { + user.permissionGroups.admin +} /// If user is the creator, also requires that they (still) have basic permissions. pub fn is_user_creator_or_mod(user: &User, target_creator: &str) -> bool { - if user.id == target_creator && user.permissionGroups.basic { return true; } - if user.permissionGroups.r#mod { return true; } - false + if user.id == target_creator && user.permissionGroups.basic { + return true; + } + if user.permissionGroups.r#mod { + return true; + } + false } /*pub fn assert_user_is_mod(user_info: &User) -> Result<(), Error> { - if actor.permissionGroups.r#mod { return Ok(()); } - Err(anyhow!("This action requires moderator permissions.")) + if actor.permissionGroups.r#mod { return Ok(()); } + Err(anyhow!("This action requires moderator permissions.")) }*/ pub async fn assert_user_can_modify(ctx: &AccessorContext<'_>, actor: &User, target: &impl CanModify) -> Result<(), Error> { - match target.can_modify(ctx, actor).await? { - true => Ok(()), - false => Err(anyhow!("You do not have permission to modify this entry.")), - } + match target.can_modify(ctx, actor).await? { + true => Ok(()), + false => Err(anyhow!("You do not have permission to modify this entry.")), + } } pub async fn assert_user_can_delete(ctx: &AccessorContext<'_>, actor: &User, target: &impl CanDelete) -> Result<(), Error> { - match target.can_delete(ctx, actor).await? { - true => Ok(()), - false => Err(anyhow!("You do not have permission to delete this entry.")), - } + match target.can_delete(ctx, actor).await? { + true => Ok(()), + false => Err(anyhow!("You do not have permission to delete this entry.")), + } } pub async fn assert_user_can_add_child(ctx: &AccessorContext<'_>, actor: &User, target: &impl CanAddChild) -> Result<(), Error> { - match target.can_add_child(ctx, actor).await? { - true => Ok(()), - false => Err(anyhow!("You do not have permission to add a child to this entry.")), - } + match target.can_add_child(ctx, actor).await? { + true => Ok(()), + false => Err(anyhow!("You do not have permission to add a child to this entry.")), + } } pub async fn assert_user_can_add_phrasing(ctx: &AccessorContext<'_>, actor: &User, target: &impl CanAddPhrasing) -> Result<(), Error> { - match target.can_add_phrasing(ctx, actor).await? { - true => Ok(()), - false => Err(anyhow!("You do not have permission to add a phrasing to this entry.")), - } + match target.can_add_phrasing(ctx, actor).await? { + true => Ok(()), + false => Err(anyhow!("You do not have permission to add a phrasing to this entry.")), + } } pub async fn assert_user_can_vote(ctx: &AccessorContext<'_>, actor: &User, target: &impl CanVote) -> Result<(), Error> { - match target.can_vote(ctx, actor).await? { - true => Ok(()), - false => Err(anyhow!("You do not have permission to vote on this entry.")), - } -} \ No newline at end of file + match target.can_vote(ctx, actor).await? { + true => Ok(()), + false => Err(anyhow!("You do not have permission to vote on this entry.")), + } +} diff --git a/Packages/app-server/src/db/general/search.rs b/Packages/app-server/src/db/general/search.rs index 8fceabc57..c6d655ac8 100644 --- a/Packages/app-server/src/db/general/search.rs +++ b/Packages/app-server/src/db/general/search.rs @@ -1,14 +1,14 @@ -use jsonschema::JSONSchema; +use deadpool_postgres::{Client, Pool, Transaction}; +use futures_util::{stream, Future, Stream, StreamExt, TryFutureExt, TryStreamExt}; use jsonschema::output::BasicOutput; +use jsonschema::JSONSchema; use lazy_static::lazy_static; use rust_shared::anyhow::{anyhow, Context, Error}; -use rust_shared::async_graphql::{Object, Schema, Subscription, ID, async_stream, OutputType, scalar, EmptySubscription, SimpleObject, InputObject, self, Enum}; -use deadpool_postgres::{Pool, Client, Transaction}; -use futures_util::{Stream, stream, TryFutureExt, StreamExt, Future, TryStreamExt}; +use rust_shared::async_graphql::{self, async_stream, scalar, EmptySubscription, Enum, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::once_cell::sync::Lazy; use rust_shared::regex::Regex; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::json; use rust_shared::tokio::sync::{RwLock, Semaphore}; use rust_shared::tokio_postgres::{IsolationLevel, Row}; @@ -16,12 +16,12 @@ use rust_shared::utils::general_::extensions::IteratorV; use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; use rust_shared::{serde, GQLError}; -use tracing::info; use std::collections::HashSet; use std::path::Path; use std::rc::Rc; use std::sync::Arc; -use std::{time::Duration, pin::Pin, task::Poll}; +use std::{pin::Pin, task::Poll, time::Duration}; +use tracing::info; use crate::db::_general::GenericMutation_Result; use crate::db::commands::clone_subtree::clone_subtree; @@ -32,52 +32,52 @@ use crate::db::node_phrasings::NodePhrasing; use crate::db::node_tags::NodeTag; use crate::db::terms::Term; use crate::store::storage::get_app_state_from_gql_ctx; -use crate::utils::db::filter::{QueryFilter, FilterInput}; +use crate::utils::db::accessors::AccessorContext; +use crate::utils::db::filter::{FilterInput, QueryFilter}; +use crate::utils::db::generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; use crate::utils::db::sql_fragment::SQLFragment; use crate::utils::db::transactions::start_read_transaction; -use crate::utils::general::data_anchor::{DataAnchorFor1, DataAnchor}; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}}}; -use crate::utils::type_aliases::{PGClientObject}; -use crate::utils::db::accessors::{AccessorContext}; +use crate::utils::general::data_anchor::{DataAnchor, DataAnchorFor1}; +use crate::utils::type_aliases::PGClientObject; -use super::subtree_collector::{get_node_subtree, params, get_node_subtree2}; +use super::subtree_collector::{get_node_subtree, get_node_subtree2, params}; -wrap_slow_macros!{ +wrap_slow_macros! { // queries // ========== #[derive(InputObject, Serialize, Deserialize)] pub struct SearchGloballyInput { - query: String, - search_limit: usize, - search_offset: Option, - alt_phrasing_rank_factor: Option, - quote_rank_factor: Option, + query: String, + search_limit: usize, + search_offset: Option, + alt_phrasing_rank_factor: Option, + quote_rank_factor: Option, } #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct SearchGloballyResult { - node_id: String, - rank: f64, - r#type: String, - found_text: String, - node_text: String, + node_id: String, + rank: f64, + r#type: String, + found_text: String, + node_text: String, } impl From for SearchGloballyResult { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct SearchSubtreeResult { - node_id: String, - rank: f64, - r#type: String, - found_text: String, - node_text: String, + node_id: String, + rank: f64, + r#type: String, + found_text: String, + node_text: String, } impl From for SearchSubtreeResult { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(Enum, Copy, Clone, Eq, PartialEq, Serialize, Deserialize)] @@ -87,113 +87,113 @@ pub enum ExternalIdType { } #[derive(InputObject, Serialize, Deserialize)] pub struct SearchForExternalIdsInput { - id_type: ExternalIdType, - ids: Vec + id_type: ExternalIdType, + ids: Vec } #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct SearchForExternalIdsResult { - found_ids: Vec, + found_ids: Vec, } #[derive(Default)] pub struct QueryShard_General_Search; #[Object] impl QueryShard_General_Search { - async fn search_globally(&self, gql_ctx: &async_graphql::Context<'_>, input: SearchGloballyInput) -> Result, GQLError> { - let start = time_since_epoch_ms_i64(); - let SearchGloballyInput { query, search_limit, search_offset, alt_phrasing_rank_factor, quote_rank_factor } = input; - let search_limit_i32 = search_limit as i32; - let search_offset_i32 = search_offset.unwrap_or(0) as i32; - let alt_phrasing_rank_factor_f64 = alt_phrasing_rank_factor.unwrap_or(0.95) as f64; - let quote_rank_factor_f64 = quote_rank_factor.unwrap_or(0.9) as f64; - - let rows = { - // use semaphore, so that only X threads can be executing search queries (in `search_globally` or `search_subtree`) at the same time - info!("Test1:{}", time_since_epoch_ms_i64() - start); - let _permit = SEMAPHORE__SEARCH_EXECUTION.acquire().await.unwrap(); - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - info!("Test2:{}", time_since_epoch_ms_i64() - start); - //let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; - let ctx = AccessorContext::new_read_base(&mut anchor, Some(gql_ctx), &get_app_state_from_gql_ctx(gql_ctx).db_pool, try_get_user_jwt_data_from_gql_ctx(gql_ctx).await?, false, IsolationLevel::ReadCommitted).await?; - //let ctx = AccessorContext::new_read_base(&mut anchor, Some(gql_ctx), &get_app_state_from_gql_ctx(gql_ctx).db_pool, try_get_user_jwt_data_from_gql_ctx(gql_ctx).await?, true, IsolationLevel::ReadCommitted).await?; - info!("Test3:{}", time_since_epoch_ms_i64() - start); - let rows_test = ctx.tx.query_raw(r#"SELECT * from global_search($1, $2, $3, $4, $5)"#, params(&[ - &query, &search_limit_i32, &search_offset_i32, &alt_phrasing_rank_factor_f64, "e_rank_factor_f64, - ])).await; - info!("Test3.5:{}", time_since_epoch_ms_i64() - start); - let rows: Vec = rows_test?.try_collect().await?; - info!("Test4:{}", time_since_epoch_ms_i64() - start); - rows - }; - - let search_results: Vec = rows.into_iter().map(|a| a.into()).collect(); - info!("Test5:{}", time_since_epoch_ms_i64() - start); - Ok(search_results) - } - - async fn search_subtree( - &self, gql_ctx: &async_graphql::Context<'_>, - root_node_id: String, max_depth: Option, query: String, - search_limit: usize, search_offset: Option, - alt_phrasing_rank_factor: Option, quote_rank_factor: Option, - ) -> Result, GQLError> { - let max_depth_i32 = max_depth.unwrap_or(10000) as i32; - let search_limit_i32 = search_limit as i32; - let search_offset_i32 = search_offset.unwrap_or(0) as i32; - let alt_phrasing_rank_factor_f64 = alt_phrasing_rank_factor.unwrap_or(0.95) as f64; - let quote_rank_factor_f64 = quote_rank_factor.unwrap_or(0.9) as f64; - - let rows = { - // use semaphore, so that only X threads can be executing search queries (in `search_globally` or `search_subtree`) at the same time - let _permit = SEMAPHORE__SEARCH_EXECUTION.acquire().await.unwrap(); - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; - let rows: Vec = ctx.tx.query_raw(r#"SELECT * from local_search($1, $2, $3, $4, $5, $6, $7)"#, params(&[ - &root_node_id, &query, &search_limit_i32, &search_offset_i32, &max_depth_i32, "e_rank_factor_f64, &alt_phrasing_rank_factor_f64 - ])).await?.try_collect().await?; - rows - }; - - let search_results: Vec = rows.into_iter().map(|a| a.into()).collect(); - Ok(search_results) - } - - // Commented; Henceforth, I plan to consider acronyms/abbreviations as "normal" words, ie. only its first letter is capitalized, because: - // 1 [abstract]) This is arguably more consistent/unambigious. Some examples: - // * Example1) Does the snake-cased "some_xyz_field" convert to camel-case as "someXyzField" or "someXYZField"? With new casing system, this is algorithmically clear -- versus the old approach, which requires human input. - // * Example2) Does the pascal-case "APDFFile" convert to camel-case as "aPDFFile" or "apdfFile"? (admittedly an extreme edge-case of the first "word" being a single letter) - // 2 [practical]) This removes the need to do these casing-overrides for async-graphql. - // For now, we'll say it only necessarily applies to Rust code (since the JS code is filled with the other casing choice), but the JS code may ultimately switch fully as well. - //#[graphql(name = "searchForExternalIDs")] - async fn search_for_external_ids(&self, gql_ctx: &async_graphql::Context<'_>, input: SearchForExternalIdsInput) -> Result { - let SearchForExternalIdsInput { id_type, ids: ids_unsafe } = input; - let id_field = match id_type { - ExternalIdType::claimMiner => "claimMinerId", - ExternalIdType::hypothesisAnnotation => "hypothesisAnnotationId", - }; - static REGEX_FOR_VALID_ID_CHARS: Lazy = Lazy::new(|| Regex::new(r"^[\w\-_\+/=:@\|%]+$").unwrap()); - // throw error if any ids don't match the regex (can be important, since "search_for_external_ids" sql-function currently uses the ids for a concat->jsonb operation) - let ids = ids_unsafe.into_iter().map(|id| match REGEX_FOR_VALID_ID_CHARS.is_match(&id) { - true => Ok(id), - false => Err(anyhow!("Invalid id: {}", id)), - }).try_collect2::>()?; - - let rows = { - //let _permit = SEMAPHORE__SEARCH_EXECUTION.acquire().await.unwrap(); // semaphore not needed, since query fast enough - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - // For this query, bypass rls-checks. It appears safe, and brings major speed-gains (presumably since can use index): with bypass-rls=false, takes ~3000ms; with bypass-rls=true, takes <100ms - let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, true).await?; - let rows: Vec = ctx.tx.query_raw(r#"SELECT * from search_for_external_ids($1, $2)"#, params(&[ - &id_field, &ids, - ])).await?.try_collect().await?; - rows - }; - - let result = SearchForExternalIdsResult { - found_ids: rows.into_iter().map(|a| a.get(0)).collect(), - }; - Ok(result) - } + async fn search_globally(&self, gql_ctx: &async_graphql::Context<'_>, input: SearchGloballyInput) -> Result, GQLError> { + let start = time_since_epoch_ms_i64(); + let SearchGloballyInput { query, search_limit, search_offset, alt_phrasing_rank_factor, quote_rank_factor } = input; + let search_limit_i32 = search_limit as i32; + let search_offset_i32 = search_offset.unwrap_or(0) as i32; + let alt_phrasing_rank_factor_f64 = alt_phrasing_rank_factor.unwrap_or(0.95) as f64; + let quote_rank_factor_f64 = quote_rank_factor.unwrap_or(0.9) as f64; + + let rows = { + // use semaphore, so that only X threads can be executing search queries (in `search_globally` or `search_subtree`) at the same time + info!("Test1:{}", time_since_epoch_ms_i64() - start); + let _permit = SEMAPHORE__SEARCH_EXECUTION.acquire().await.unwrap(); + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + info!("Test2:{}", time_since_epoch_ms_i64() - start); + //let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; + let ctx = AccessorContext::new_read_base(&mut anchor, Some(gql_ctx), &get_app_state_from_gql_ctx(gql_ctx).db_pool, try_get_user_jwt_data_from_gql_ctx(gql_ctx).await?, false, IsolationLevel::ReadCommitted).await?; + //let ctx = AccessorContext::new_read_base(&mut anchor, Some(gql_ctx), &get_app_state_from_gql_ctx(gql_ctx).db_pool, try_get_user_jwt_data_from_gql_ctx(gql_ctx).await?, true, IsolationLevel::ReadCommitted).await?; + info!("Test3:{}", time_since_epoch_ms_i64() - start); + let rows_test = ctx.tx.query_raw(r#"SELECT * from global_search($1, $2, $3, $4, $5)"#, params(&[ + &query, &search_limit_i32, &search_offset_i32, &alt_phrasing_rank_factor_f64, "e_rank_factor_f64, + ])).await; + info!("Test3.5:{}", time_since_epoch_ms_i64() - start); + let rows: Vec = rows_test?.try_collect().await?; + info!("Test4:{}", time_since_epoch_ms_i64() - start); + rows + }; + + let search_results: Vec = rows.into_iter().map(|a| a.into()).collect(); + info!("Test5:{}", time_since_epoch_ms_i64() - start); + Ok(search_results) + } + + async fn search_subtree( + &self, gql_ctx: &async_graphql::Context<'_>, + root_node_id: String, max_depth: Option, query: String, + search_limit: usize, search_offset: Option, + alt_phrasing_rank_factor: Option, quote_rank_factor: Option, + ) -> Result, GQLError> { + let max_depth_i32 = max_depth.unwrap_or(10000) as i32; + let search_limit_i32 = search_limit as i32; + let search_offset_i32 = search_offset.unwrap_or(0) as i32; + let alt_phrasing_rank_factor_f64 = alt_phrasing_rank_factor.unwrap_or(0.95) as f64; + let quote_rank_factor_f64 = quote_rank_factor.unwrap_or(0.9) as f64; + + let rows = { + // use semaphore, so that only X threads can be executing search queries (in `search_globally` or `search_subtree`) at the same time + let _permit = SEMAPHORE__SEARCH_EXECUTION.acquire().await.unwrap(); + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; + let rows: Vec = ctx.tx.query_raw(r#"SELECT * from local_search($1, $2, $3, $4, $5, $6, $7)"#, params(&[ + &root_node_id, &query, &search_limit_i32, &search_offset_i32, &max_depth_i32, "e_rank_factor_f64, &alt_phrasing_rank_factor_f64 + ])).await?.try_collect().await?; + rows + }; + + let search_results: Vec = rows.into_iter().map(|a| a.into()).collect(); + Ok(search_results) + } + + // Commented; Henceforth, I plan to consider acronyms/abbreviations as "normal" words, ie. only its first letter is capitalized, because: + // 1 [abstract]) This is arguably more consistent/unambigious. Some examples: + // * Example1) Does the snake-cased "some_xyz_field" convert to camel-case as "someXyzField" or "someXYZField"? With new casing system, this is algorithmically clear -- versus the old approach, which requires human input. + // * Example2) Does the pascal-case "APDFFile" convert to camel-case as "aPDFFile" or "apdfFile"? (admittedly an extreme edge-case of the first "word" being a single letter) + // 2 [practical]) This removes the need to do these casing-overrides for async-graphql. + // For now, we'll say it only necessarily applies to Rust code (since the JS code is filled with the other casing choice), but the JS code may ultimately switch fully as well. + //#[graphql(name = "searchForExternalIDs")] + async fn search_for_external_ids(&self, gql_ctx: &async_graphql::Context<'_>, input: SearchForExternalIdsInput) -> Result { + let SearchForExternalIdsInput { id_type, ids: ids_unsafe } = input; + let id_field = match id_type { + ExternalIdType::claimMiner => "claimMinerId", + ExternalIdType::hypothesisAnnotation => "hypothesisAnnotationId", + }; + static REGEX_FOR_VALID_ID_CHARS: Lazy = Lazy::new(|| Regex::new(r"^[\w\-_\+/=:@\|%]+$").unwrap()); + // throw error if any ids don't match the regex (can be important, since "search_for_external_ids" sql-function currently uses the ids for a concat->jsonb operation) + let ids = ids_unsafe.into_iter().map(|id| match REGEX_FOR_VALID_ID_CHARS.is_match(&id) { + true => Ok(id), + false => Err(anyhow!("Invalid id: {}", id)), + }).try_collect2::>()?; + + let rows = { + //let _permit = SEMAPHORE__SEARCH_EXECUTION.acquire().await.unwrap(); // semaphore not needed, since query fast enough + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + // For this query, bypass rls-checks. It appears safe, and brings major speed-gains (presumably since can use index): with bypass-rls=false, takes ~3000ms; with bypass-rls=true, takes <100ms + let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, true).await?; + let rows: Vec = ctx.tx.query_raw(r#"SELECT * from search_for_external_ids($1, $2)"#, params(&[ + &id_field, &ids, + ])).await?.try_collect().await?; + rows + }; + + let result = SearchForExternalIdsResult { + found_ids: rows.into_iter().map(|a| a.get(0)).collect(), + }; + Ok(result) + } } } @@ -201,6 +201,6 @@ impl QueryShard_General_Search { // limit the number of searches that are being executed at the same time (we don't want expensive searches to drown out other requests, such as live-query execution) pub static SEMAPHORE__SEARCH_EXECUTION: Lazy = Lazy::new(|| Semaphore::new(get_search_execution_concurrency_limit())); fn get_search_execution_concurrency_limit() -> usize { - //let logical_cpus = num_cpus::get(); - 2 -} \ No newline at end of file + //let logical_cpus = num_cpus::get(); + 2 +} diff --git a/Packages/app-server/src/db/general/sign_in.rs b/Packages/app-server/src/db/general/sign_in.rs index 6c0ee7051..54b309089 100644 --- a/Packages/app-server/src/db/general/sign_in.rs +++ b/Packages/app-server/src/db/general/sign_in.rs @@ -3,80 +3,81 @@ use std::env; use std::time::Duration; use deadpool_postgres::tokio_postgres::Row; -use rust_shared::domains::{get_server_url, ServerPod, GetServerURL_Options}; -use rust_shared::hyper::body::Body; -use rust_shared::once_cell::sync::{Lazy, OnceCell}; -use rust_shared::hyper::{Request}; +use futures_util::{Stream, TryStreamExt}; use oauth2::basic::BasicClient; use oauth2::reqwest::async_http_client; -use oauth2::{PkceCodeChallenge, RevocationUrl, RedirectUrl, TokenUrl, AuthUrl, Scope, CsrfToken, ClientSecret, ClientId, AuthorizationCode, StandardRevocableToken}; use oauth2::TokenResponse; -use rust_shared::anyhow::{Context, anyhow, Error}; -use rust_shared::async_graphql::{Object, Schema, Subscription, ID, async_stream, OutputType, scalar, EmptySubscription, SimpleObject, InputObject}; -use futures_util::{Stream, TryStreamExt}; -use rust_shared::axum::response::IntoResponse; -use rust_shared::axum::{Router, response}; +use oauth2::{AuthUrl, AuthorizationCode, ClientId, ClientSecret, CsrfToken, PkceCodeChallenge, RedirectUrl, RevocationUrl, Scope, StandardRevocableToken, TokenUrl}; +use rust_shared::anyhow::{anyhow, Context, Error}; +use rust_shared::async_graphql::{async_stream, scalar, EmptySubscription, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::axum::extract::{Extension, Path}; +use rust_shared::axum::response::IntoResponse; use rust_shared::axum::routing::get; +use rust_shared::axum::{response, Router}; +use rust_shared::db_constants::SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME; +use rust_shared::domains::{get_server_url, GetServerURL_Options, ServerPod}; +use rust_shared::hyper::body::Body; +use rust_shared::hyper::Request; +use rust_shared::indoc::{formatdoc, indoc}; +use rust_shared::jwt_simple::prelude::{Claims, HS256Key, MACLike, VerificationOptions}; +use rust_shared::once_cell::sync::{Lazy, OnceCell}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::json; +use rust_shared::utils::_k8s::get_or_create_k8s_secret; use rust_shared::utils::db::uuid::{new_uuid_v4_as_b64, new_uuid_v4_as_b64_id}; -use rust_shared::db_constants::SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME; use rust_shared::utils::futures::make_reliable; use rust_shared::utils::general::{get_uri_params, k8s_dev}; -use rust_shared::indoc::{indoc, formatdoc}; use rust_shared::utils::general_::extensions::ToOwnedV; use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::{JSONValue, JWTDuration}; -use rust_shared::utils::_k8s::{get_or_create_k8s_secret}; -use rust_shared::{async_graphql, serde_json, SubError, to_sub_err, to_sub_err_in_stream, to_anyhow}; -use tracing::{info, error, warn}; -use rust_shared::jwt_simple::prelude::{HS256Key, Claims, MACLike, VerificationOptions}; +use rust_shared::{async_graphql, serde_json, to_anyhow, to_sub_err, to_sub_err_in_stream, SubError}; +use tracing::{error, info, warn}; use crate::db::_general::GenericMutation_Result; -use crate::db::general::sign_in_::fake_user::username_to_fake_user_data; use crate::db::access_policies::{get_access_policy, get_system_access_policy}; use crate::db::commands::_command::upsert_db_entry_by_id_for_struct; +use crate::db::general::sign_in_::fake_user::username_to_fake_user_data; use crate::db::general::sign_in_::google::{store_user_data_for_google_sign_in, GoogleUserInfoResult}; use crate::db::general::subtree_collector::params; -use crate::db::user_hiddens::{UserHidden, get_user_hiddens, get_user_hidden}; -use crate::db::users::{get_user, User, PermissionGroups}; -use crate::store::storage::{AppStateArc, SignInMsg, get_app_state_from_gql_ctx}; -use crate::utils::db::accessors::{AccessorContext, get_db_entries}; +use crate::db::user_hiddens::{get_user_hidden, get_user_hiddens, UserHidden}; +use crate::db::users::{get_user, PermissionGroups, User}; +use crate::store::storage::{get_app_state_from_gql_ctx, AppStateArc, SignInMsg}; +use crate::utils::db::accessors::{get_db_entries, AccessorContext}; use crate::utils::db::agql_ext::gql_request_storage::GQLRequestStorage; use crate::utils::general::data_anchor::DataAnchorFor1; -use crate::utils::type_aliases::{ABSender}; +use crate::utils::type_aliases::ABSender; -use rust_shared::utils::auth::jwt_utils_base::{UserJWTData, get_or_create_jwt_key_hs256}; +use rust_shared::utils::auth::jwt_utils_base::{get_or_create_jwt_key_hs256, UserJWTData}; -use super::sign_in_::jwt_utils::{try_get_referrer_from_gql_ctx, resolve_and_verify_jwt_string}; +use super::sign_in_::jwt_utils::{resolve_and_verify_jwt_string, try_get_referrer_from_gql_ctx}; async fn auth_google_callback(Extension(state): Extension, req: Request) -> impl IntoResponse { - let uri = req.uri(); - let params = get_uri_params(uri); - let attempt_id = params.get("state").map(|a| a.clone()).unwrap_or("n/a".to_owned()); - - info!("Got uri:{:?} @attemptID:{}", uri, attempt_id); - if let Err(err) = state.channel_for_sign_in_messages__sender_base.broadcast(SignInMsg::GotCallbackData(uri.clone())).await { - error!("Got error while broadcasting callback-data:{}", err); - return response::Html(format!("Got error while broadcasting callback-data. Please refresh page to try again.")); - } - response::Html(formatdoc!(r#" + let uri = req.uri(); + let params = get_uri_params(uri); + let attempt_id = params.get("state").map(|a| a.clone()).unwrap_or("n/a".to_owned()); + + info!("Got uri:{:?} @attemptID:{}", uri, attempt_id); + if let Err(err) = state.channel_for_sign_in_messages__sender_base.broadcast(SignInMsg::GotCallbackData(uri.clone())).await { + error!("Got error while broadcasting callback-data:{}", err); + return response::Html(format!("Got error while broadcasting callback-data. Please refresh page to try again.")); + } + response::Html(formatdoc!( + r#"
Data has been broadcast through the sign-in-message channel... (you can close this page now)
- "#)) + "# + )) } pub async fn extend_router(app: Router) -> Router { - let result = app - .route("/auth/google/callback", get(auth_google_callback)); - result + let result = app.route("/auth/google/callback", get(auth_google_callback)); + result } // Why is this placed here? So that it's nearby the other sign-in-related explanation-messages. (ie. in the `SignInStartResult.instructions` and `signInStart` funcs below) pub fn get_err_auth_data_required() -> Error { - anyhow!(indoc!{" + anyhow!(indoc! {" This endpoint requires auth-data to be supplied! For website browsing, this means signing-in using the panel at the top-right. For direct requests to the graphql api, this means obtaining your auth-data/jwt-string manually (see the \"signInStart\" endpoint at \"http://debates.app/gql-playground\"), and attaching it to your commands/requests. @@ -86,230 +87,230 @@ pub fn get_err_auth_data_required() -> Error { "}) } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(InputObject, Serialize, Deserialize)] pub struct SignInStartInput { pub provider: String, - pub jwtDuration: i64, - pub jwtReadOnly: Option, - pub preferredUsername: Option, + pub jwtDuration: i64, + pub jwtReadOnly: Option, + pub preferredUsername: Option, } struct SignInStartResult { - auth_link: Option, - result_jwt: Option, + auth_link: Option, + result_jwt: Option, } #[Object] impl SignInStartResult { - async fn instructions(&self) -> Vec { - vec![ - "1) After initial call to this endpoint, you'll see these instructions, and an \"authLink\" url below. Open that link in your browser to launch google sign-in.".to_owned(), - "2) On completion of sign-in, the \"resultJWT\" field below will populate with your JWT string.".to_owned(), - "3) For queries/mutations, add the JWT string to the \"authorization\" header, eg. in gql-playground, paste this into \"HTTP HEADERS\" panel at bottom left: {\"authorization\": \"Bearer \"}".to_owned(), - "4) For subscriptions, call the \"signInAttach\" endpoint (at start of subscription block containing the auth-requiring endpoint), passing in the JWT string as the `input.jwt` field.".to_owned(), - "5) Call the endpoints that required authentication. Now that the JWT token is included in the http headers (or attached to the websocket connection), it should succeed. (assuming your account has the needed permissions)".to_owned(), - ] - } - async fn authLink(&self) -> Option { self.auth_link.clone() } - async fn resultJWT(&self) -> Option { self.result_jwt.clone() } + async fn instructions(&self) -> Vec { + vec![ + "1) After initial call to this endpoint, you'll see these instructions, and an \"authLink\" url below. Open that link in your browser to launch google sign-in.".to_owned(), + "2) On completion of sign-in, the \"resultJWT\" field below will populate with your JWT string.".to_owned(), + "3) For queries/mutations, add the JWT string to the \"authorization\" header, eg. in gql-playground, paste this into \"HTTP HEADERS\" panel at bottom left: {\"authorization\": \"Bearer \"}".to_owned(), + "4) For subscriptions, call the \"signInAttach\" endpoint (at start of subscription block containing the auth-requiring endpoint), passing in the JWT string as the `input.jwt` field.".to_owned(), + "5) Call the endpoints that required authentication. Now that the JWT token is included in the http headers (or attached to the websocket connection), it should succeed. (assuming your account has the needed permissions)".to_owned(), + ] + } + async fn authLink(&self) -> Option { self.auth_link.clone() } + async fn resultJWT(&self) -> Option { self.result_jwt.clone() } } #[derive(Default)] pub struct SubscriptionShard_SignIn; #[Subscription] impl SubscriptionShard_SignIn { - /// Begin sign-in flow, resulting in a JWT string being returned. (to then be passed in an `authorization` header for queries/mutations, or to the `signInAttach` endpoint for subscriptions) - /// * `provider` - The authentication flow/website/sign-in-service that will be used. [string, options: "google", "dev"] - /// * `jwtDuration` - How long until the generated JWT should expire, in seconds. [i64] - /// * `preferredUsername` - Used by the "dev" provider as part of the constructed user-data. [string] - async fn signInStart<'a>(&self, gql_ctx: &'a async_graphql::Context<'a>, input: SignInStartInput) -> impl Stream> + 'a { - let SignInStartInput { provider, jwtDuration, jwtReadOnly, preferredUsername } = input; - let jwtReadOnly = jwtReadOnly.unwrap_or(false); - - let google_client_id = ClientId::new(env::var("CLIENT_ID").expect("Missing the CLIENT_ID environment variable.")); - let google_client_secret = ClientSecret::new(env::var("CLIENT_SECRET").expect("Missing the CLIENT_SECRET environment variable.")); - let auth_url = AuthUrl::new("https://accounts.google.com/o/oauth2/v2/auth".to_string()).expect("Invalid authorization endpoint URL"); - let token_url = TokenUrl::new("https://www.googleapis.com/oauth2/v3/token".to_string()).expect("Invalid token endpoint URL"); - - let referrer = try_get_referrer_from_gql_ctx(gql_ctx); - let callback_url = get_server_url(ServerPod::AppServer, "/auth/google/callback", GetServerURL_Options { - claimed_client_url: referrer, restrict_to_recognized_hosts: true, - force_localhost: false, force_https: false, - }).expect("Could not construct callback URL"); - - // Set up the config for the Google OAuth2 process. - let client = BasicClient::new(google_client_id, Some(google_client_secret), auth_url, Some(token_url)) - // This example will be running its own server at localhost:8080. (see below for the server implementation) - .set_redirect_uri(RedirectUrl::new(callback_url).expect("Invalid redirect URL")) - // Google supports OAuth 2.0 Token Revocation (RFC-7009) - .set_revocation_uri(RevocationUrl::new("https://oauth2.googleapis.com/revoke".to_string()).expect("Invalid revocation endpoint URL")); - - // Google supports Proof Key for Code Exchange (PKCE - https://oauth.net/2/pkce/). - // Create a PKCE code verifier and SHA-256 encode it as a code challenge. - let (pkce_code_challenge, pkce_code_verifier) = PkceCodeChallenge::new_random_sha256(); - //let pkce_code_verifier_as_json = pkce_code_verifier.secret(); - let pkce_code_verifier_as_json = serde_json::to_string(&pkce_code_verifier).expect("Could not serialize pkce code-verifier to json string"); - - // Generate the authorization URL to which we'll redirect the user. - // (The csrf_state is essentially an "attempt ID"; use this to match up this attempt's callback-data with our async code-run here.) - let (authorize_url, csrf_state) = client - .authorize_url(CsrfToken::new_random) - .add_scope(Scope::new("email".to_string())) - .add_scope(Scope::new("profile".to_string())) - .set_pkce_challenge(pkce_code_challenge) - .url(); - - let msg_sender = &get_app_state_from_gql_ctx(gql_ctx).channel_for_sign_in_messages__sender_base; - let mut msg_receiver = msg_sender.new_receiver(); - - let base_stream = async_stream::stream! { - match provider.as_str() { - "dev" => { - if !k8s_dev() { Err(SubError::new(format!("Cannot use \"dev\" provider in non-dev k8s cluster.")))?; } - let preferredUsername = match preferredUsername { - Some(val) => val, - None => Err(SubError::new(format!("Must provide \"preferredUsername\" when using \"dev\" provider.")))?, - }; - - let fake_user = username_to_fake_user_data(preferredUsername); - - // note: the `store_user_data_for_google_sign_in` func currently only uses these fields: email, name, picture - let fake_user_as_g_profile = GoogleUserInfoResult { - sub: "".to_owned(), - email: Some(format!("{}@fake.com", fake_user.displayName.clone())), - email_verified: Some(true), - name: Some(fake_user.displayName.clone()), - given_name: Some(fake_user.displayName.clone()), - family_name: Some(fake_user.displayName.clone()), - locale: Some("en".to_owned()), - picture: None, - }; - - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_write(&mut anchor, gql_ctx, true).await.map_err(to_sub_err)?; - let user_data = store_user_data_for_google_sign_in(fake_user_as_g_profile, &ctx, jwtReadOnly, true).await.map_err(to_sub_err)?; - info!("Committing transaction..."); - ctx.tx.commit().await.map_err(to_sub_err)?; - - let key = get_or_create_jwt_key_hs256().await.map_err(to_sub_err)?; - let claims = Claims::with_custom_claims(user_data, JWTDuration::from_secs(jwtDuration.try_into().map_err(to_sub_err)?)); - let jwt = key.authenticate(claims).map_err(to_sub_err)?; - info!("Generated dev JWT:{}", jwt); - - yield Ok(SignInStartResult { - auth_link: None, - result_jwt: Some(jwt.to_string()), - }); - } - "google" => { - yield Ok(SignInStartResult { - auth_link: Some(authorize_url.to_string()), - result_jwt: None, - }); - - loop { - match make_reliable(msg_receiver.recv(), Duration::from_millis(10)).await { - Err(_err) => break, // channel closed (program must have crashed), end loop - Ok(msg) => { - match msg { - SignInMsg::GotCallbackData(uri) => { - info!("Got callback data!{:?}", uri); - let params = get_uri_params(&uri); - let callback_attempt_id = params.get("state").map(|a| a.clone()).unwrap_or("n/a".to_owned()); - if &callback_attempt_id == csrf_state.secret() { - let code_str = params.get("code").map(|a| a.clone()).unwrap_or("n/a".to_owned()); - let code = AuthorizationCode::new(code_str); - info!("Got this run's callback data! @code:{}", code.secret()); - - let pkce_code_verifier_copy = serde_json::from_str(&pkce_code_verifier_as_json).unwrap(); - - // Exchange the code with a token. - let token_or_error_response = client - .exchange_code(code) - .set_pkce_verifier(pkce_code_verifier_copy) - .request_async(async_http_client).await; - - info!("Google returned the following token/error response:\n{:?}\n", token_or_error_response); - - let token_response = token_or_error_response.with_context(|| "Google token-retrieval request had an error response.").map_err(to_sub_err)?; - let token_str = token_response.access_token().secret(); - info!("Got token-str:{}", token_str); - - let params_str = rust_shared::url::form_urlencoded::Serializer::new(String::new()) - .append_pair("access_token", &token_str) - .finish(); - let response_as_str = rust_shared::reqwest::get(format!("https://www.googleapis.com/oauth2/v3/userinfo?{params_str}")).await.map_err(to_sub_err)? - .text().await.map_err(to_sub_err)?; - - let user_info = serde_json::from_str::(&response_as_str).with_context(|| format!("Could not parse response-str as user-info struct:{}", response_as_str)).map_err(to_sub_err)?; - info!("Got response user-info:{:?}", user_info); - - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_write(&mut anchor, gql_ctx, true).await.map_err(to_sub_err)?; - let user_data = store_user_data_for_google_sign_in(user_info, &ctx, jwtReadOnly, false).await.map_err(to_sub_err)?; - info!("Committing transaction..."); - ctx.tx.commit().await.map_err(to_sub_err)?; - - let key = get_or_create_jwt_key_hs256().await.map_err(to_sub_err)?; - //let month_in_secs = 30 * 24 * 60 * 60; // (2629800) - let claims = Claims::with_custom_claims(user_data, JWTDuration::from_secs(jwtDuration.try_into().map_err(to_sub_err)?)); - let jwt = key.authenticate(claims).map_err(to_sub_err)?; - info!("Generated JWT:{}", jwt); - - yield Ok(SignInStartResult { - auth_link: Some(authorize_url.to_string()), - result_jwt: Some(jwt.to_string()), - }); - } - }, - } - } - } - } - }, - _ => { - Err(SubError::new(format!("Invalid provider. Valid options: google, dev")))?; - } - } - }; - base_stream - } - - /// Attaches the provided sign-in data (`jwt`) to the current websocket connection, authenticating subsequent requests sent over it. - async fn signInAttach<'a>(&self, ctx: &'a async_graphql::Context<'a>, input: SignInAttachInput) -> impl Stream> + 'a { - let jwt_storage_arc = { - let request_storage = ctx.data::().unwrap(); - let jwt_storage_arc = &request_storage.jwt; - jwt_storage_arc.clone() - }; - - let SignInAttachInput { jwt } = input; - - let base_stream = async_stream::stream! { - //let jwt_data = get_user_jwt_data_from_gql_ctx(ctx).await.map_err(to_sub_err)?; - let jwt_data = if let Some(jwt) = jwt { - Some(resolve_and_verify_jwt_string(&jwt).await.map_err(to_sub_err)?) - } else { None }; - - // put in block, to ensure that lock is released quickly (not sure if block is necessary to achieve this) - { - let mut jwt_storage = jwt_storage_arc.write().await; - *jwt_storage = jwt_data; - } - yield Ok(SignInAttachResult { success: true }); - }; - base_stream - } + /// Begin sign-in flow, resulting in a JWT string being returned. (to then be passed in an `authorization` header for queries/mutations, or to the `signInAttach` endpoint for subscriptions) + /// * `provider` - The authentication flow/website/sign-in-service that will be used. [string, options: "google", "dev"] + /// * `jwtDuration` - How long until the generated JWT should expire, in seconds. [i64] + /// * `preferredUsername` - Used by the "dev" provider as part of the constructed user-data. [string] + async fn signInStart<'a>(&self, gql_ctx: &'a async_graphql::Context<'a>, input: SignInStartInput) -> impl Stream> + 'a { + let SignInStartInput { provider, jwtDuration, jwtReadOnly, preferredUsername } = input; + let jwtReadOnly = jwtReadOnly.unwrap_or(false); + + let google_client_id = ClientId::new(env::var("CLIENT_ID").expect("Missing the CLIENT_ID environment variable.")); + let google_client_secret = ClientSecret::new(env::var("CLIENT_SECRET").expect("Missing the CLIENT_SECRET environment variable.")); + let auth_url = AuthUrl::new("https://accounts.google.com/o/oauth2/v2/auth".to_string()).expect("Invalid authorization endpoint URL"); + let token_url = TokenUrl::new("https://www.googleapis.com/oauth2/v3/token".to_string()).expect("Invalid token endpoint URL"); + + let referrer = try_get_referrer_from_gql_ctx(gql_ctx); + let callback_url = get_server_url(ServerPod::AppServer, "/auth/google/callback", GetServerURL_Options { + claimed_client_url: referrer, restrict_to_recognized_hosts: true, + force_localhost: false, force_https: false, + }).expect("Could not construct callback URL"); + + // Set up the config for the Google OAuth2 process. + let client = BasicClient::new(google_client_id, Some(google_client_secret), auth_url, Some(token_url)) + // This example will be running its own server at localhost:8080. (see below for the server implementation) + .set_redirect_uri(RedirectUrl::new(callback_url).expect("Invalid redirect URL")) + // Google supports OAuth 2.0 Token Revocation (RFC-7009) + .set_revocation_uri(RevocationUrl::new("https://oauth2.googleapis.com/revoke".to_string()).expect("Invalid revocation endpoint URL")); + + // Google supports Proof Key for Code Exchange (PKCE - https://oauth.net/2/pkce/). + // Create a PKCE code verifier and SHA-256 encode it as a code challenge. + let (pkce_code_challenge, pkce_code_verifier) = PkceCodeChallenge::new_random_sha256(); + //let pkce_code_verifier_as_json = pkce_code_verifier.secret(); + let pkce_code_verifier_as_json = serde_json::to_string(&pkce_code_verifier).expect("Could not serialize pkce code-verifier to json string"); + + // Generate the authorization URL to which we'll redirect the user. + // (The csrf_state is essentially an "attempt ID"; use this to match up this attempt's callback-data with our async code-run here.) + let (authorize_url, csrf_state) = client + .authorize_url(CsrfToken::new_random) + .add_scope(Scope::new("email".to_string())) + .add_scope(Scope::new("profile".to_string())) + .set_pkce_challenge(pkce_code_challenge) + .url(); + + let msg_sender = &get_app_state_from_gql_ctx(gql_ctx).channel_for_sign_in_messages__sender_base; + let mut msg_receiver = msg_sender.new_receiver(); + + let base_stream = async_stream::stream! { + match provider.as_str() { + "dev" => { + if !k8s_dev() { Err(SubError::new(format!("Cannot use \"dev\" provider in non-dev k8s cluster.")))?; } + let preferredUsername = match preferredUsername { + Some(val) => val, + None => Err(SubError::new(format!("Must provide \"preferredUsername\" when using \"dev\" provider.")))?, + }; + + let fake_user = username_to_fake_user_data(preferredUsername); + + // note: the `store_user_data_for_google_sign_in` func currently only uses these fields: email, name, picture + let fake_user_as_g_profile = GoogleUserInfoResult { + sub: "".to_owned(), + email: Some(format!("{}@fake.com", fake_user.displayName.clone())), + email_verified: Some(true), + name: Some(fake_user.displayName.clone()), + given_name: Some(fake_user.displayName.clone()), + family_name: Some(fake_user.displayName.clone()), + locale: Some("en".to_owned()), + picture: None, + }; + + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_write(&mut anchor, gql_ctx, true).await.map_err(to_sub_err)?; + let user_data = store_user_data_for_google_sign_in(fake_user_as_g_profile, &ctx, jwtReadOnly, true).await.map_err(to_sub_err)?; + info!("Committing transaction..."); + ctx.tx.commit().await.map_err(to_sub_err)?; + + let key = get_or_create_jwt_key_hs256().await.map_err(to_sub_err)?; + let claims = Claims::with_custom_claims(user_data, JWTDuration::from_secs(jwtDuration.try_into().map_err(to_sub_err)?)); + let jwt = key.authenticate(claims).map_err(to_sub_err)?; + info!("Generated dev JWT:{}", jwt); + + yield Ok(SignInStartResult { + auth_link: None, + result_jwt: Some(jwt.to_string()), + }); + } + "google" => { + yield Ok(SignInStartResult { + auth_link: Some(authorize_url.to_string()), + result_jwt: None, + }); + + loop { + match make_reliable(msg_receiver.recv(), Duration::from_millis(10)).await { + Err(_err) => break, // channel closed (program must have crashed), end loop + Ok(msg) => { + match msg { + SignInMsg::GotCallbackData(uri) => { + info!("Got callback data!{:?}", uri); + let params = get_uri_params(&uri); + let callback_attempt_id = params.get("state").map(|a| a.clone()).unwrap_or("n/a".to_owned()); + if &callback_attempt_id == csrf_state.secret() { + let code_str = params.get("code").map(|a| a.clone()).unwrap_or("n/a".to_owned()); + let code = AuthorizationCode::new(code_str); + info!("Got this run's callback data! @code:{}", code.secret()); + + let pkce_code_verifier_copy = serde_json::from_str(&pkce_code_verifier_as_json).unwrap(); + + // Exchange the code with a token. + let token_or_error_response = client + .exchange_code(code) + .set_pkce_verifier(pkce_code_verifier_copy) + .request_async(async_http_client).await; + + info!("Google returned the following token/error response:\n{:?}\n", token_or_error_response); + + let token_response = token_or_error_response.with_context(|| "Google token-retrieval request had an error response.").map_err(to_sub_err)?; + let token_str = token_response.access_token().secret(); + info!("Got token-str:{}", token_str); + + let params_str = rust_shared::url::form_urlencoded::Serializer::new(String::new()) + .append_pair("access_token", &token_str) + .finish(); + let response_as_str = rust_shared::reqwest::get(format!("https://www.googleapis.com/oauth2/v3/userinfo?{params_str}")).await.map_err(to_sub_err)? + .text().await.map_err(to_sub_err)?; + + let user_info = serde_json::from_str::(&response_as_str).with_context(|| format!("Could not parse response-str as user-info struct:{}", response_as_str)).map_err(to_sub_err)?; + info!("Got response user-info:{:?}", user_info); + + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_write(&mut anchor, gql_ctx, true).await.map_err(to_sub_err)?; + let user_data = store_user_data_for_google_sign_in(user_info, &ctx, jwtReadOnly, false).await.map_err(to_sub_err)?; + info!("Committing transaction..."); + ctx.tx.commit().await.map_err(to_sub_err)?; + + let key = get_or_create_jwt_key_hs256().await.map_err(to_sub_err)?; + //let month_in_secs = 30 * 24 * 60 * 60; // (2629800) + let claims = Claims::with_custom_claims(user_data, JWTDuration::from_secs(jwtDuration.try_into().map_err(to_sub_err)?)); + let jwt = key.authenticate(claims).map_err(to_sub_err)?; + info!("Generated JWT:{}", jwt); + + yield Ok(SignInStartResult { + auth_link: Some(authorize_url.to_string()), + result_jwt: Some(jwt.to_string()), + }); + } + }, + } + } + } + } + }, + _ => { + Err(SubError::new(format!("Invalid provider. Valid options: google, dev")))?; + } + } + }; + base_stream + } + + /// Attaches the provided sign-in data (`jwt`) to the current websocket connection, authenticating subsequent requests sent over it. + async fn signInAttach<'a>(&self, ctx: &'a async_graphql::Context<'a>, input: SignInAttachInput) -> impl Stream> + 'a { + let jwt_storage_arc = { + let request_storage = ctx.data::().unwrap(); + let jwt_storage_arc = &request_storage.jwt; + jwt_storage_arc.clone() + }; + + let SignInAttachInput { jwt } = input; + + let base_stream = async_stream::stream! { + //let jwt_data = get_user_jwt_data_from_gql_ctx(ctx).await.map_err(to_sub_err)?; + let jwt_data = if let Some(jwt) = jwt { + Some(resolve_and_verify_jwt_string(&jwt).await.map_err(to_sub_err)?) + } else { None }; + + // put in block, to ensure that lock is released quickly (not sure if block is necessary to achieve this) + { + let mut jwt_storage = jwt_storage_arc.write().await; + *jwt_storage = jwt_data; + } + yield Ok(SignInAttachResult { success: true }); + }; + base_stream + } } #[derive(InputObject, Serialize, Deserialize)] pub struct SignInAttachInput { - // this is settable to null/none, since caller may have cases where it wants to "sign out", yet keep the same websocket connection open + // this is settable to null/none, since caller may have cases where it wants to "sign out", yet keep the same websocket connection open pub jwt: Option, } #[derive(SimpleObject, Debug)] struct SignInAttachResult { - success: bool, + success: bool, } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/general/sign_in_/fake_user.rs b/Packages/app-server/src/db/general/sign_in_/fake_user.rs index c1371f11d..0d555616a 100644 --- a/Packages/app-server/src/db/general/sign_in_/fake_user.rs +++ b/Packages/app-server/src/db/general/sign_in_/fake_user.rs @@ -1,26 +1,26 @@ use rust_shared::async_graphql::ID; -use crate::db::users::{User, PermissionGroups}; +use crate::db::users::{PermissionGroups, User}; fn username_to_fake_slug_id(username: &str) -> String { - let prefix_segment = "fakeUserData".to_string(); - let username_segment_max_length = 22 - (prefix_segment.len() + 1); + let prefix_segment = "fakeUserData".to_string(); + let username_segment_max_length = 22 - (prefix_segment.len() + 1); - let username_segment = username.replace(|c: char| !c.is_ascii_alphanumeric(), "_"); - assert!(username_segment.len() <= username_segment_max_length, "Fake username is too long!"); + let username_segment = username.replace(|c: char| !c.is_ascii_alphanumeric(), "_"); + assert!(username_segment.len() <= username_segment_max_length, "Fake username is too long!"); - let gap_size = 22 - prefix_segment.len() - username_segment.len(); - prefix_segment + &"_".repeat(gap_size) + &username_segment + let gap_size = 22 - prefix_segment.len() - username_segment.len(); + prefix_segment + &"_".repeat(gap_size) + &username_segment } pub fn username_to_fake_user_data(username: String) -> User { - return User { - id: ID(username_to_fake_slug_id(&username)), - displayName: username, - edits: 0, - joinDate: 1, - lastEditAt: None, - permissionGroups: PermissionGroups {basic: true, verified: true, r#mod: true, admin: true}, - photoURL: None, - }; -} \ No newline at end of file + return User { + id: ID(username_to_fake_slug_id(&username)), + displayName: username, + edits: 0, + joinDate: 1, + lastEditAt: None, + permissionGroups: PermissionGroups { basic: true, verified: true, r#mod: true, admin: true }, + photoURL: None, + }; +} diff --git a/Packages/app-server/src/db/general/sign_in_/google.rs b/Packages/app-server/src/db/general/sign_in_/google.rs index 4062c798e..3e23afa75 100644 --- a/Packages/app-server/src/db/general/sign_in_/google.rs +++ b/Packages/app-server/src/db/general/sign_in_/google.rs @@ -3,134 +3,132 @@ use std::env; use std::time::Duration; use deadpool_postgres::tokio_postgres::Row; -use rust_shared::once_cell::sync::{Lazy, OnceCell}; -use rust_shared::hyper::{Request}; +use futures_util::{Stream, TryStreamExt}; use oauth2::basic::BasicClient; use oauth2::reqwest::async_http_client; -use oauth2::{PkceCodeChallenge, RevocationUrl, RedirectUrl, TokenUrl, AuthUrl, Scope, CsrfToken, ClientSecret, ClientId, AuthorizationCode, StandardRevocableToken}; use oauth2::TokenResponse; -use rust_shared::anyhow::{Context, anyhow, Error, bail}; -use rust_shared::async_graphql::{Object, Schema, Subscription, ID, async_stream, OutputType, scalar, EmptySubscription, SimpleObject}; -use futures_util::{Stream, TryStreamExt}; +use oauth2::{AuthUrl, AuthorizationCode, ClientId, ClientSecret, CsrfToken, PkceCodeChallenge, RedirectUrl, RevocationUrl, Scope, StandardRevocableToken, TokenUrl}; +use rust_shared::anyhow::{anyhow, bail, Context, Error}; +use rust_shared::async_graphql::{async_stream, scalar, EmptySubscription, Object, OutputType, Schema, SimpleObject, Subscription, ID}; +use rust_shared::axum::extract::Path; use rust_shared::axum::response::IntoResponse; -use rust_shared::axum::{Router, Extension, response}; -use rust_shared::axum::extract::{Path}; use rust_shared::axum::routing::get; +use rust_shared::axum::{response, Extension, Router}; +use rust_shared::db_constants::{SYSTEM_POLICY_PRIVATE_GOVERNED_NAME, SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME}; +use rust_shared::hyper::Request; +use rust_shared::indoc::indoc; +use rust_shared::jwt_simple::prelude::{Claims, HS256Key, MACLike, VerificationOptions}; +use rust_shared::once_cell::sync::{Lazy, OnceCell}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::json; +use rust_shared::utils::_k8s::get_or_create_k8s_secret; use rust_shared::utils::auth::jwt_utils_base::UserJWTData; use rust_shared::utils::db::uuid::{new_uuid_v4_as_b64, new_uuid_v4_as_b64_id}; -use rust_shared::db_constants::{SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME, SYSTEM_POLICY_PRIVATE_GOVERNED_NAME}; use rust_shared::utils::futures::make_reliable; use rust_shared::utils::general::get_uri_params; -use rust_shared::indoc::indoc; use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::utils::_k8s::{get_or_create_k8s_secret}; -use rust_shared::{async_graphql, serde_json, SubError, to_sub_err, to_sub_err_in_stream, to_anyhow}; -use tracing::{info, error, warn}; -use rust_shared::jwt_simple::prelude::{HS256Key, Claims, MACLike, VerificationOptions}; +use rust_shared::{async_graphql, serde_json, to_anyhow, to_sub_err, to_sub_err_in_stream, SubError}; +use tracing::{error, info, warn}; use crate::db::_general::GenericMutation_Result; -use crate::db::general::sign_in_::fake_user::username_to_fake_user_data; use crate::db::access_policies::{get_access_policy, get_system_access_policy}; use crate::db::commands::_command::upsert_db_entry_by_id_for_struct; +use crate::db::general::sign_in_::fake_user::username_to_fake_user_data; use crate::db::general::subtree_collector::params; -use crate::db::user_hiddens::{UserHidden, get_user_hiddens, get_user_hidden, UserHidden_Extras}; -use crate::db::users::{get_user, User, PermissionGroups}; +use crate::db::user_hiddens::{get_user_hidden, get_user_hiddens, UserHidden, UserHidden_Extras}; +use crate::db::users::{get_user, PermissionGroups, User}; use crate::store::storage::{AppStateArc, SignInMsg}; -use crate::utils::db::accessors::{AccessorContext, get_db_entries}; +use crate::utils::db::accessors::{get_db_entries, AccessorContext}; use crate::utils::general::data_anchor::DataAnchorFor1; -use crate::utils::type_aliases::{ABSender}; +use crate::utils::type_aliases::ABSender; /// See list of available fields here: https://developers.google.com/identity/openid-connect/openid-connect #[derive(Serialize, Deserialize, Debug, Clone)] pub struct GoogleUserInfoResult { - /// Identifier for the user that is unique/unchanging, and always provided in the oauth response. - pub sub: String, - pub email: Option, - pub email_verified: Option, - pub name: Option, - pub given_name: Option, - pub family_name: Option, - pub locale: Option, - pub picture: Option, + /// Identifier for the user that is unique/unchanging, and always provided in the oauth response. + pub sub: String, + pub email: Option, + pub email_verified: Option, + pub name: Option, + pub given_name: Option, + pub family_name: Option, + pub locale: Option, + pub picture: Option, } pub async fn store_user_data_for_google_sign_in(profile: GoogleUserInfoResult, ctx: &AccessorContext<'_>, read_only: bool, force_as_admin: bool) -> Result { - let email = match &profile.email { - Some(email) => email.clone(), - None => bail!("Cannot sign-in using a Google account with no email address."), - }; - let name = match &profile.name { - Some(name) => name.clone(), - None => bail!("Cannot sign-in using a Google account with no name."), - }; - - let user_hiddens_with_email = get_user_hiddens(ctx, Some(email.clone())).await?; - match user_hiddens_with_email.len() { - 0 => {}, - 1 => { - let existing_user_hidden = user_hiddens_with_email.get(0).ok_or(anyhow!("Row missing somehow?"))?; - info!("Found existing user for email:{}", email); - let existing_user = get_user(ctx, &existing_user_hidden.id).await - .map_err(|_| anyhow!(r#"Could not find user with id matching that of the entry in userHiddens ({}), which was found based on your provided account's email ({})."#, existing_user_hidden.id.as_str(), existing_user_hidden.email))?; - info!("Also found user-data:{:?}", existing_user); - return Ok(UserJWTData { id: existing_user.id.0, email: existing_user_hidden.email.to_owned(), readOnly: Some(read_only) }); - }, - _ => return Err(anyhow!("More than one user found with same email! This shouldn't happen.")), - } + let email = match &profile.email { + Some(email) => email.clone(), + None => bail!("Cannot sign-in using a Google account with no email address."), + }; + let name = match &profile.name { + Some(name) => name.clone(), + None => bail!("Cannot sign-in using a Google account with no name."), + }; + + let user_hiddens_with_email = get_user_hiddens(ctx, Some(email.clone())).await?; + match user_hiddens_with_email.len() { + 0 => {}, + 1 => { + let existing_user_hidden = user_hiddens_with_email.get(0).ok_or(anyhow!("Row missing somehow?"))?; + info!("Found existing user for email:{}", email); + let existing_user = get_user(ctx, &existing_user_hidden.id) + .await + .map_err(|_| anyhow!(r#"Could not find user with id matching that of the entry in userHiddens ({}), which was found based on your provided account's email ({})."#, existing_user_hidden.id.as_str(), existing_user_hidden.email))?; + info!("Also found user-data:{:?}", existing_user); + return Ok(UserJWTData { id: existing_user.id.0, email: existing_user_hidden.email.to_owned(), readOnly: Some(read_only) }); + }, + _ => return Err(anyhow!("More than one user found with same email! This shouldn't happen.")), + } info!(r#"User not found for email "{}". Creating new."#, email); - let mut permissionGroups = PermissionGroups {basic: true, verified: true, r#mod: false, admin: false}; + let mut permissionGroups = PermissionGroups { basic: true, verified: true, r#mod: false, admin: false }; // maybe temp; make first (non-system) user an admin - let users_count_rows: Vec = ctx.tx.query_raw("SELECT count(*) FROM (SELECT 1 FROM users LIMIT 10) t;", params(&[])).await?.try_collect().await?; - let users_count: i64 = users_count_rows.get(0).ok_or(anyhow!("No rows"))?.try_get(0)?; + let users_count_rows: Vec = ctx.tx.query_raw("SELECT count(*) FROM (SELECT 1 FROM users LIMIT 10) t;", params(&[])).await?.try_collect().await?; + let users_count: i64 = users_count_rows.get(0).ok_or(anyhow!("No rows"))?.try_get(0)?; if users_count <= 1 || force_as_admin { info!("Marking new user as admin. (since first non-system user signing in, or using dev-mode sign-in path)"); permissionGroups.r#mod = true; - permissionGroups.admin = true; + permissionGroups.admin = true; } - let profile_clone = profile.clone(); + let profile_clone = profile.clone(); let user = User { - id: new_uuid_v4_as_b64_id(), + id: new_uuid_v4_as_b64_id(), displayName: name, permissionGroups, photoURL: profile.picture, - joinDate: time_since_epoch_ms_i64(), - edits: 0, - lastEditAt: None, + joinDate: time_since_epoch_ms_i64(), + edits: 0, + lastEditAt: None, }; - let new_user_id = user.id.as_str().to_owned(); + let new_user_id = user.id.as_str().to_owned(); let policy_public_ungoverned = get_system_access_policy(ctx, &SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME).await?; let policy_private_governed = get_system_access_policy(ctx, &SYSTEM_POLICY_PRIVATE_GOVERNED_NAME).await?; let user_hidden = UserHidden { - id: user.id.clone(), - email: email, + id: user.id.clone(), + email, providerData: serde_json::to_value(vec![profile_clone])?, lastAccessPolicy: Some(policy_public_ungoverned.id.as_str().to_owned()), - backgroundID: None, - backgroundCustom_enabled: None, - backgroundCustom_color: None, - backgroundCustom_url: None, - backgroundCustom_position: None, - addToStream: true, - extras: serde_json::to_value(UserHidden_Extras { - defaultAccessPolicy_nodeRatings: Some(policy_private_governed.id.as_str().to_owned()), - userFollows: None, - })?, + backgroundID: None, + backgroundCustom_enabled: None, + backgroundCustom_color: None, + backgroundCustom_url: None, + backgroundCustom_position: None, + addToStream: true, + extras: serde_json::to_value(UserHidden_Extras { defaultAccessPolicy_nodeRatings: Some(policy_private_governed.id.as_str().to_owned()), userFollows: None })?, }; - upsert_db_entry_by_id_for_struct(&ctx, "users".to_owned(), user.id.to_string(), user).await?; - upsert_db_entry_by_id_for_struct(&ctx, "userHiddens".to_owned(), user_hidden.id.to_string(), user_hidden.clone()).await?; + upsert_db_entry_by_id_for_struct(&ctx, "users".to_owned(), user.id.to_string(), user).await?; + upsert_db_entry_by_id_for_struct(&ctx, "userHiddens".to_owned(), user_hidden.id.to_string(), user_hidden.clone()).await?; info!("Creation of new user semi-complete! NewID:{}", new_user_id); // "semi" complete, because transaction hasn't been committed yet - let user = get_user(ctx, new_user_id.as_str()).await?; + let user = get_user(ctx, new_user_id.as_str()).await?; info!("User data:{:?}", user); Ok(UserJWTData { id: user.id.0, email: user_hidden.email.to_owned(), readOnly: Some(read_only) }) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/general/sign_in_/jwt_utils.rs b/Packages/app-server/src/db/general/sign_in_/jwt_utils.rs index 507406a5f..6668c082a 100644 --- a/Packages/app-server/src/db/general/sign_in_/jwt_utils.rs +++ b/Packages/app-server/src/db/general/sign_in_/jwt_utils.rs @@ -3,123 +3,127 @@ use std::env; use std::time::Duration; use deadpool_postgres::tokio_postgres::Row; -use rust_shared::once_cell::sync::{Lazy, OnceCell}; -use rust_shared::hyper::{Request}; +use futures_util::{Stream, TryStreamExt}; use oauth2::basic::BasicClient; use oauth2::reqwest::async_http_client; -use oauth2::{PkceCodeChallenge, RevocationUrl, RedirectUrl, TokenUrl, AuthUrl, Scope, CsrfToken, ClientSecret, ClientId, AuthorizationCode, StandardRevocableToken}; use oauth2::TokenResponse; -use rust_shared::anyhow::{Context, anyhow, Error}; -use rust_shared::async_graphql::{Object, Schema, Subscription, ID, async_stream, OutputType, scalar, EmptySubscription, SimpleObject}; -use futures_util::{Stream, TryStreamExt}; -use rust_shared::axum::response::IntoResponse; -use rust_shared::axum::{Router, response}; +use oauth2::{AuthUrl, AuthorizationCode, ClientId, ClientSecret, CsrfToken, PkceCodeChallenge, RedirectUrl, RevocationUrl, Scope, StandardRevocableToken, TokenUrl}; +use rust_shared::anyhow::{anyhow, Context, Error}; +use rust_shared::async_graphql::{async_stream, scalar, EmptySubscription, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::axum::extract::{Extension, Path}; +use rust_shared::axum::response::IntoResponse; use rust_shared::axum::routing::get; +use rust_shared::axum::{response, Router}; +use rust_shared::db_constants::SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME; +use rust_shared::hyper::Request; +use rust_shared::indoc::indoc; +use rust_shared::jwt_simple::prelude::{Claims, HS256Key, MACLike, VerificationOptions}; +use rust_shared::once_cell::sync::{Lazy, OnceCell}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::json; +use rust_shared::utils::_k8s::get_or_create_k8s_secret; use rust_shared::utils::auth::jwt_utils_base::{get_or_create_jwt_key_hs256, UserJWTData}; use rust_shared::utils::db::uuid::{new_uuid_v4_as_b64, new_uuid_v4_as_b64_id}; -use rust_shared::db_constants::SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME; use rust_shared::utils::futures::make_reliable; use rust_shared::utils::general::get_uri_params; -use rust_shared::indoc::indoc; use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::utils::_k8s::{get_or_create_k8s_secret}; -use rust_shared::{async_graphql, serde_json, SubError, to_sub_err, to_sub_err_in_stream, to_anyhow}; -use tracing::{info, error, warn}; -use rust_shared::jwt_simple::prelude::{HS256Key, Claims, MACLike, VerificationOptions}; +use rust_shared::{async_graphql, serde_json, to_anyhow, to_sub_err, to_sub_err_in_stream, SubError}; +use tracing::{error, info, warn}; use crate::db::_general::GenericMutation_Result; -use crate::db::general::sign_in::get_err_auth_data_required; -use crate::db::general::sign_in_::fake_user::username_to_fake_user_data; use crate::db::access_policies::{get_access_policy, get_system_access_policy}; use crate::db::commands::_command::upsert_db_entry_by_id_for_struct; +use crate::db::general::sign_in::get_err_auth_data_required; +use crate::db::general::sign_in_::fake_user::username_to_fake_user_data; use crate::db::general::subtree_collector::params; -use crate::db::user_hiddens::{UserHidden, get_user_hiddens, get_user_hidden}; -use crate::db::users::{get_user, User, PermissionGroups}; +use crate::db::user_hiddens::{get_user_hidden, get_user_hiddens, UserHidden}; +use crate::db::users::{get_user, PermissionGroups, User}; use crate::gql::GQLDataFromHTTPRequest; use crate::store::storage::{AppStateArc, SignInMsg}; -use crate::utils::db::accessors::{AccessorContext, get_db_entries}; +use crate::utils::db::accessors::{get_db_entries, AccessorContext}; use crate::utils::db::agql_ext::gql_request_storage::GQLRequestStorage; use crate::utils::general::data_anchor::DataAnchorFor1; -use crate::utils::type_aliases::{ABSender}; +use crate::utils::type_aliases::ABSender; // for user-jwt-data + user-info retrieved from database // ========== pub async fn get_user_info_from_gql_ctx<'a>(gql_ctx: &'a async_graphql::Context<'a>, ctx: &AccessorContext<'_>) -> Result { - let user_info = try_get_user_info_from_gql_ctx(gql_ctx, ctx).await?; - match user_info { - None => Err(get_err_auth_data_required()), - Some(user_info) => Ok(user_info), - } + let user_info = try_get_user_info_from_gql_ctx(gql_ctx, ctx).await?; + match user_info { + None => Err(get_err_auth_data_required()), + Some(user_info) => Ok(user_info), + } } pub async fn try_get_user_info_from_gql_ctx<'a>(gql_ctx: &'a async_graphql::Context<'a>, ctx: &AccessorContext<'_>) -> Result, Error> { - match try_get_user_jwt_data_from_gql_ctx(gql_ctx).await? { - None => Ok(None), - Some(jwt_data) => { - let user_info = resolve_jwt_to_user_info(ctx, &jwt_data).await?; - Ok(Some(user_info)) - } - } + match try_get_user_jwt_data_from_gql_ctx(gql_ctx).await? { + None => Ok(None), + Some(jwt_data) => { + let user_info = resolve_jwt_to_user_info(ctx, &jwt_data).await?; + Ok(Some(user_info)) + }, + } } pub async fn resolve_jwt_to_user_info<'a>(ctx: &AccessorContext<'_>, jwt_data: &UserJWTData) -> Result { - /*let user_hidden = get_user_hidden(&ctx, jwt_data.id.as_str()).await?; - let user = get_user(&ctx, &user_hidden.id).await?;*/ - let user = get_user(&ctx, jwt_data.id.as_str()).await?; - Ok(user) + /*let user_hidden = get_user_hidden(&ctx, jwt_data.id.as_str()).await?; + let user = get_user(&ctx, &user_hidden.id).await?;*/ + let user = get_user(&ctx, jwt_data.id.as_str()).await?; + Ok(user) } // for user-jwt-data only (ie. static data stored within jwt itself, without need for new db queries) // ========== pub async fn get_user_jwt_data_from_gql_ctx<'a>(gql_ctx: &'a async_graphql::Context<'a>) -> Result { - let jwt_data = try_get_user_jwt_data_from_gql_ctx(gql_ctx).await?; - match jwt_data { - None => Err(get_err_auth_data_required()), - Some(user_info) => Ok(user_info), - } + let jwt_data = try_get_user_jwt_data_from_gql_ctx(gql_ctx).await?; + match jwt_data { + None => Err(get_err_auth_data_required()), + Some(user_info) => Ok(user_info), + } } pub async fn try_get_user_jwt_data_from_gql_ctx<'a>(gql_ctx: &'a async_graphql::Context<'a>) -> Result, Error> { - // this branch is used for GET/POST requests (ie. for queries and mutations; it's populated in `have_own_graphql_handle_request()`) - if let Ok(data) = gql_ctx.data::() && let Some(jwt) = &data.jwt { - let jwt_data = resolve_and_verify_jwt_string(&jwt).await?; - Ok(Some(jwt_data)) - } - // this branch is used for websocket requests (ie. for subscriptions); it's inserted in `graphql_websocket_handler()` and populated in `signInAttach()` - else if let Ok(storage) = gql_ctx.data::() && let Some(jwt_data) = storage.jwt.read().await.clone() { - Ok(Some(jwt_data)) - } - // if no data-entry found in gql-context, return None for "no user data" - else { - Ok(None) - } + // this branch is used for GET/POST requests (ie. for queries and mutations; it's populated in `have_own_graphql_handle_request()`) + if let Ok(data) = gql_ctx.data::() + && let Some(jwt) = &data.jwt + { + let jwt_data = resolve_and_verify_jwt_string(&jwt).await?; + Ok(Some(jwt_data)) + } + // this branch is used for websocket requests (ie. for subscriptions); it's inserted in `graphql_websocket_handler()` and populated in `signInAttach()` + else if let Ok(storage) = gql_ctx.data::() + && let Some(jwt_data) = storage.jwt.read().await.clone() + { + Ok(Some(jwt_data)) + } + // if no data-entry found in gql-context, return None for "no user data" + else { + Ok(None) + } } pub async fn resolve_and_verify_jwt_string<'a>(jwt_string: &str) -> Result { - let key = get_or_create_jwt_key_hs256().await?; + let key = get_or_create_jwt_key_hs256().await?; - let verify_opts = VerificationOptions { - //accept_future: true, // accept tokens that will only be valid in the future - //time_tolerance: Some(JWTDuration::from_mins(15)), // accept tokens even if they have expired up to 15 minutes after the deadline - //max_validity: Some(JWTDuration::from_hours(1)), // reject tokens if they were issued more than 1 hour ago - //allowed_issuers: Some(HashSet::from_strings(&["example app"])), // reject tokens if they don't include an issuer from that set - .. VerificationOptions::default() - }; - let claims = key.verify_token::(jwt_string, Some(verify_opts))?; - let jwt_data: UserJWTData = claims.custom; - Ok(jwt_data) + let verify_opts = VerificationOptions { + //accept_future: true, // accept tokens that will only be valid in the future + //time_tolerance: Some(JWTDuration::from_mins(15)), // accept tokens even if they have expired up to 15 minutes after the deadline + //max_validity: Some(JWTDuration::from_hours(1)), // reject tokens if they were issued more than 1 hour ago + //allowed_issuers: Some(HashSet::from_strings(&["example app"])), // reject tokens if they don't include an issuer from that set + ..VerificationOptions::default() + }; + let claims = key.verify_token::(jwt_string, Some(verify_opts))?; + let jwt_data: UserJWTData = claims.custom; + Ok(jwt_data) } // other gql-context data // ========== pub fn try_get_referrer_from_gql_ctx<'a>(gql_ctx: &'a async_graphql::Context<'a>) -> Option { - match gql_ctx.data::() { - Ok(val) => val.referrer.clone(), - // if no data-entry found in gql-context, return None for "no user data" - Err(_err) => None, - } -} \ No newline at end of file + match gql_ctx.data::() { + Ok(val) => val.referrer.clone(), + // if no data-entry found in gql-context, return None for "no user data" + Err(_err) => None, + } +} diff --git a/Packages/app-server/src/db/general/subtree.rs b/Packages/app-server/src/db/general/subtree.rs index 4a60e7bfc..2b768ae9c 100644 --- a/Packages/app-server/src/db/general/subtree.rs +++ b/Packages/app-server/src/db/general/subtree.rs @@ -1,26 +1,26 @@ -use jsonschema::JSONSchema; +use deadpool_postgres::{Client, Pool, Transaction}; +use futures_util::{stream, Future, Stream, StreamExt, TryFutureExt, TryStreamExt}; use jsonschema::output::BasicOutput; +use jsonschema::JSONSchema; use lazy_static::lazy_static; use rust_shared::anyhow::{anyhow, Context, Error}; use rust_shared::async_graphql::{self, async_stream, scalar, EmptySubscription, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; -use deadpool_postgres::{Pool, Client, Transaction}; -use futures_util::{Stream, stream, TryFutureExt, StreamExt, Future, TryStreamExt}; use rust_shared::indexmap::{IndexMap, IndexSet}; use rust_shared::itertools::Itertools; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::json; use rust_shared::tokio::sync::RwLock; use rust_shared::tokio_postgres::Row; use rust_shared::utils::general_::extensions::VecLenU32; use rust_shared::utils::type_aliases::JSONValue; use rust_shared::{serde, to_sub_err, GQLError, SubError}; -use tracing::warn; use std::collections::HashSet; use std::path::Path; use std::rc::Rc; use std::sync::Arc; -use std::{time::Duration, pin::Pin, task::Poll}; +use std::{pin::Pin, task::Poll, time::Duration}; +use tracing::warn; use crate::db::_general::GenericMutation_Result; use crate::db::commands::_command::NoExtras; @@ -37,151 +37,151 @@ use crate::db::node_tags::NodeTag; use crate::db::nodes_::_node::Node; use crate::db::terms::Term; use crate::db::users::User; +use crate::utils::db::accessors::AccessorContext; use crate::utils::db::agql_ext::gql_utils::IndexMapAGQL; -use crate::utils::db::filter::{QueryFilter, FilterInput}; +use crate::utils::db::filter::{FilterInput, QueryFilter}; +use crate::utils::db::generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; use crate::utils::db::sql_fragment::SQLFragment; use crate::utils::db::transactions::start_read_transaction; -use crate::utils::general::data_anchor::{DataAnchorFor1, DataAnchor}; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}}}; -use crate::utils::type_aliases::{PGClientObject}; -use crate::utils::db::accessors::{AccessorContext}; +use crate::utils::general::data_anchor::{DataAnchor, DataAnchorFor1}; +use crate::utils::type_aliases::PGClientObject; -use super::subtree_collector::{get_node_subtree, params, get_node_subtree2}; +use super::subtree_collector::{get_node_subtree, get_node_subtree2, params}; -wrap_slow_macros!{ +wrap_slow_macros! { // queries // ========== #[derive(SimpleObject, Clone, Serialize, Deserialize, Default)] pub struct Subtree { - pub terms: Vec, - pub medias: Vec, - pub nodes: Vec, - pub nodeLinks: Vec, - pub nodeRevisions: Vec, - pub nodePhrasings: Vec, - pub nodeTags: Vec, + pub terms: Vec, + pub medias: Vec, + pub nodes: Vec, + pub nodeLinks: Vec, + pub nodeRevisions: Vec, + pub nodePhrasings: Vec, + pub nodeTags: Vec, } impl Subtree { - pub fn get_all_ids(&self) -> HashSet { - let mut result = HashSet::::new(); - for entry in &self.terms { result.insert(entry.id.to_string()); } - for entry in &self.medias { result.insert(entry.id.to_string()); } - for entry in &self.nodes { result.insert(entry.id.to_string()); } - for entry in &self.nodeLinks { result.insert(entry.id.to_string()); } - for entry in &self.nodeRevisions { result.insert(entry.id.to_string()); } - for entry in &self.nodePhrasings { result.insert(entry.id.to_string()); } - for entry in &self.nodeTags { result.insert(entry.id.to_string()); } - result - } - pub fn sort_all_entries(&mut self) { - // see here for reason sort_by_key isn't used: https://stackoverflow.com/a/47126516 - //self.terms.sort_by_key(|a| &a.id); - self.terms.sort_by(|x, y| x.id.cmp(&y.id)); - self.medias.sort_by(|x, y| x.id.cmp(&y.id)); - self.nodes.sort_by(|x, y| x.id.cmp(&y.id)); - self.nodeLinks.sort_by(|x, y| x.id.cmp(&y.id)); - self.nodeRevisions.sort_by(|x, y| x.id.cmp(&y.id)); - self.nodePhrasings.sort_by(|x, y| x.id.cmp(&y.id)); - self.nodeTags.sort_by(|x, y| x.id.cmp(&y.id)); - } + pub fn get_all_ids(&self) -> HashSet { + let mut result = HashSet::::new(); + for entry in &self.terms { result.insert(entry.id.to_string()); } + for entry in &self.medias { result.insert(entry.id.to_string()); } + for entry in &self.nodes { result.insert(entry.id.to_string()); } + for entry in &self.nodeLinks { result.insert(entry.id.to_string()); } + for entry in &self.nodeRevisions { result.insert(entry.id.to_string()); } + for entry in &self.nodePhrasings { result.insert(entry.id.to_string()); } + for entry in &self.nodeTags { result.insert(entry.id.to_string()); } + result + } + pub fn sort_all_entries(&mut self) { + // see here for reason sort_by_key isn't used: https://stackoverflow.com/a/47126516 + //self.terms.sort_by_key(|a| &a.id); + self.terms.sort_by(|x, y| x.id.cmp(&y.id)); + self.medias.sort_by(|x, y| x.id.cmp(&y.id)); + self.nodes.sort_by(|x, y| x.id.cmp(&y.id)); + self.nodeLinks.sort_by(|x, y| x.id.cmp(&y.id)); + self.nodeRevisions.sort_by(|x, y| x.id.cmp(&y.id)); + self.nodePhrasings.sort_by(|x, y| x.id.cmp(&y.id)); + self.nodeTags.sort_by(|x, y| x.id.cmp(&y.id)); + } } #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct Descendant { - id: String, - link_id: Option, - distance: i32, + id: String, + link_id: Option, + distance: i32, } impl From for Descendant { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct Ancestor { - id: String, - distance: i32, + id: String, + distance: i32, } impl From for Ancestor { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct PathNodeFromDB { - node_id: String, - link_id: Option, + node_id: String, + link_id: Option, } impl From for PathNodeFromDB { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(Default)] pub struct QueryShard_General_Subtree; #[Object] impl QueryShard_General_Subtree { - async fn subtree(&self, gql_ctx: &async_graphql::Context<'_>, root_node_id: String, max_depth: Option) -> Result { - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; - - let subtree = get_node_subtree(&ctx, root_node_id, max_depth.unwrap_or(10000)).await?; - Ok(subtree) - } - - // temp - async fn subtree2(&self, gql_ctx: &async_graphql::Context<'_>, root_node_id: String, max_depth: Option) -> Result { - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; - - let subtree = get_node_subtree2(&ctx, root_node_id, max_depth.unwrap_or(10000)).await?; - Ok(subtree) - } - - // lower-level functions - async fn descendants(&self, gql_ctx: &async_graphql::Context<'_>, root_node_id: String, max_depth: Option) -> Result, GQLError> { - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; - let max_depth_i32 = max_depth.unwrap_or(10000) as i32; - - let rows: Vec = ctx.tx.query_raw(r#"SELECT * from descendants($1, $2)"#, params(&[&root_node_id, &max_depth_i32])).await?.try_collect().await?; - let descendants: Vec = rows.into_iter().map(|a| a.into()).collect(); - Ok(descendants) - } - async fn ancestors(&self, gql_ctx: &async_graphql::Context<'_>, root_node_id: String, max_depth: Option) -> Result, GQLError> { - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; - let max_depth_i32 = max_depth.unwrap_or(10000) as i32; - - let rows: Vec = ctx.tx.query_raw(r#"SELECT * from ancestors($1, $2)"#, params(&[&root_node_id, &max_depth_i32])).await?.try_collect().await?; - let ancestors: Vec = rows.into_iter().map(|a| a.into()).collect(); - Ok(ancestors) - } - async fn shortestPath(&self, gql_ctx: &async_graphql::Context<'_>, start_node: String, end_node: String) -> Result, GQLError> { - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; - - let rows: Vec = ctx.tx.query_raw(r#"SELECT * from shortest_path($1, $2)"#, params(&[&start_node, &end_node])).await?.try_collect().await?; - let path_nodes: Vec = rows.into_iter().map(|a| a.into()).collect(); - Ok(path_nodes) - } - - async fn descendants2(&self, gql_ctx: &async_graphql::Context<'_>, root_node_id: String, max_depth: Option) -> Result, GQLError> { - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; - let max_depth_i32 = max_depth.unwrap_or(10000) as i32; - - let rows: Vec = ctx.tx.query_raw(r#"SELECT * from descendants2($1, $2)"#, params(&[&root_node_id, &max_depth_i32])).await?.try_collect().await?; - let descendants: Vec = rows.into_iter().map(|a| a.into()).collect(); - Ok(descendants) - } - - async fn get_prepared_data_for_deleting_subtree(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteSubtreeInput) -> Result { - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; - - let actor = get_user_info_from_gql_ctx(gql_ctx, &ctx).await?; - let (_subcommands, prepared_data) = get_prepared_data_for_deleting_subtree(&ctx, &actor, input, NoExtras::default()).await?; - Ok(prepared_data) - } + async fn subtree(&self, gql_ctx: &async_graphql::Context<'_>, root_node_id: String, max_depth: Option) -> Result { + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; + + let subtree = get_node_subtree(&ctx, root_node_id, max_depth.unwrap_or(10000)).await?; + Ok(subtree) + } + + // temp + async fn subtree2(&self, gql_ctx: &async_graphql::Context<'_>, root_node_id: String, max_depth: Option) -> Result { + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; + + let subtree = get_node_subtree2(&ctx, root_node_id, max_depth.unwrap_or(10000)).await?; + Ok(subtree) + } + + // lower-level functions + async fn descendants(&self, gql_ctx: &async_graphql::Context<'_>, root_node_id: String, max_depth: Option) -> Result, GQLError> { + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; + let max_depth_i32 = max_depth.unwrap_or(10000) as i32; + + let rows: Vec = ctx.tx.query_raw(r#"SELECT * from descendants($1, $2)"#, params(&[&root_node_id, &max_depth_i32])).await?.try_collect().await?; + let descendants: Vec = rows.into_iter().map(|a| a.into()).collect(); + Ok(descendants) + } + async fn ancestors(&self, gql_ctx: &async_graphql::Context<'_>, root_node_id: String, max_depth: Option) -> Result, GQLError> { + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; + let max_depth_i32 = max_depth.unwrap_or(10000) as i32; + + let rows: Vec = ctx.tx.query_raw(r#"SELECT * from ancestors($1, $2)"#, params(&[&root_node_id, &max_depth_i32])).await?.try_collect().await?; + let ancestors: Vec = rows.into_iter().map(|a| a.into()).collect(); + Ok(ancestors) + } + async fn shortestPath(&self, gql_ctx: &async_graphql::Context<'_>, start_node: String, end_node: String) -> Result, GQLError> { + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; + + let rows: Vec = ctx.tx.query_raw(r#"SELECT * from shortest_path($1, $2)"#, params(&[&start_node, &end_node])).await?.try_collect().await?; + let path_nodes: Vec = rows.into_iter().map(|a| a.into()).collect(); + Ok(path_nodes) + } + + async fn descendants2(&self, gql_ctx: &async_graphql::Context<'_>, root_node_id: String, max_depth: Option) -> Result, GQLError> { + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; + let max_depth_i32 = max_depth.unwrap_or(10000) as i32; + + let rows: Vec = ctx.tx.query_raw(r#"SELECT * from descendants2($1, $2)"#, params(&[&root_node_id, &max_depth_i32])).await?.try_collect().await?; + let descendants: Vec = rows.into_iter().map(|a| a.into()).collect(); + Ok(descendants) + } + + async fn get_prepared_data_for_deleting_subtree(&self, gql_ctx: &async_graphql::Context<'_>, input: DeleteSubtreeInput) -> Result { + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; + + let actor = get_user_info_from_gql_ctx(gql_ctx, &ctx).await?; + let (_subcommands, prepared_data) = get_prepared_data_for_deleting_subtree(&ctx, &actor, input, NoExtras::default()).await?; + Ok(prepared_data) + } } // mutations @@ -189,10 +189,10 @@ impl QueryShard_General_Subtree { #[derive(Default)] pub struct MutationShard_General_Subtree; #[Object] impl MutationShard_General_Subtree { - async fn cloneSubtree(&self, gql_ctx: &async_graphql::Context<'_>, payload: JSONValue) -> Result { - let result = clone_subtree(gql_ctx, payload).await?; - Ok(result) - } + async fn cloneSubtree(&self, gql_ctx: &async_graphql::Context<'_>, payload: JSONValue) -> Result { + let result = clone_subtree(gql_ctx, payload).await?; + Ok(result) + } } // subscriptions @@ -200,73 +200,74 @@ impl QueryShard_General_Subtree { #[derive(Default)] pub struct SubscriptionShared_General_Subtree; #[Subscription] impl SubscriptionShared_General_Subtree { - async fn delete_subtree<'a>(&self, gql_ctx: &'a async_graphql::Context<'a>, input: DeleteSubtreeInput) -> impl Stream> + 'a { - async_stream::stream! { - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_write_advanced(&mut anchor, gql_ctx, false, Some(false)).await.map_err(to_sub_err)?; - let actor = get_user_info_from_gql_ctx(gql_ctx, &ctx).await.map_err(to_sub_err)?; - - let (subcommands, _prepared_data) = get_prepared_data_for_deleting_subtree(&ctx, &actor, input.clone(), NoExtras::default()).await.map_err(to_sub_err)?; - let subcommand_count = subcommands.len_u32(); - - let mut last_subcommand_results: Vec = Vec::new(); - { - let batch_input = RunCommandBatchInput { commands: subcommands }; - let mut stream = Box::pin(run_command_batch(&ctx, &actor, false, batch_input, NoExtras::default()).await); - while let Some(batch_result_so_far) = stream.next().await { - let batch_result_so_far = batch_result_so_far?; - last_subcommand_results = batch_result_so_far.results.clone(); - let subtree_delete_result_so_far = DeleteSubtreeResult { subcommand_count: subcommand_count, subcommand_results: last_subcommand_results.clone(), committed: false }; - yield Ok(subtree_delete_result_so_far); - } - } - - ctx.tx.commit().await.map_err(to_sub_err)?; - tracing::info!("Command-batch execution completed. @CommandCount:{}", subcommand_count); - yield Ok(DeleteSubtreeResult { subcommand_count: subcommand_count, subcommand_results: last_subcommand_results, committed: true }); - } - } + async fn delete_subtree<'a>(&self, gql_ctx: &'a async_graphql::Context<'a>, input: DeleteSubtreeInput) -> impl Stream> + 'a { + async_stream::stream! { + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_write_advanced(&mut anchor, gql_ctx, false, Some(false)).await.map_err(to_sub_err)?; + let actor = get_user_info_from_gql_ctx(gql_ctx, &ctx).await.map_err(to_sub_err)?; + + let (subcommands, _prepared_data) = get_prepared_data_for_deleting_subtree(&ctx, &actor, input.clone(), NoExtras::default()).await.map_err(to_sub_err)?; + let subcommand_count = subcommands.len_u32(); + + let mut last_subcommand_results: Vec = Vec::new(); + { + let batch_input = RunCommandBatchInput { commands: subcommands }; + let mut stream = Box::pin(run_command_batch(&ctx, &actor, false, batch_input, NoExtras::default()).await); + while let Some(batch_result_so_far) = stream.next().await { + let batch_result_so_far = batch_result_so_far?; + last_subcommand_results = batch_result_so_far.results.clone(); + let subtree_delete_result_so_far = DeleteSubtreeResult { subcommand_count: subcommand_count, subcommand_results: last_subcommand_results.clone(), committed: false }; + yield Ok(subtree_delete_result_so_far); + } + } + + ctx.tx.commit().await.map_err(to_sub_err)?; + tracing::info!("Command-batch execution completed. @CommandCount:{}", subcommand_count); + yield Ok(DeleteSubtreeResult { subcommand_count: subcommand_count, subcommand_results: last_subcommand_results, committed: true }); + } + } } #[derive(SimpleObject)] struct PreparedDataForDeletingSubtree { - // commented; including this in the result structure requires SimpleObject on CommandEntry, *and all downstream input structs* - //subcommands: Vec, - subcommand_count: u32, - nodes_to_unlink_ids: Vec, - nodes_to_delete_ids: Vec, - nodes_to_delete_access_policies: IndexMapAGQL, - nodes_to_delete_creator_ids: IndexMapAGQL, - nodes_to_delete_creation_times: Vec, + // commented; including this in the result structure requires SimpleObject on CommandEntry, *and all downstream input structs* + //subcommands: Vec, + subcommand_count: u32, + nodes_to_unlink_ids: Vec, + nodes_to_delete_ids: Vec, + nodes_to_delete_access_policies: IndexMapAGQL, + nodes_to_delete_creator_ids: IndexMapAGQL, + nodes_to_delete_creation_times: Vec, } #[derive(InputObject, Deserialize, Serialize, Clone)] pub struct DeleteSubtreeInput { - pub map_id: Option, - pub root_node_id: String, - pub max_depth: i32, + pub map_id: Option, + pub root_node_id: String, + pub max_depth: i32, } #[derive(SimpleObject, Debug, Serialize)] pub struct DeleteSubtreeResult { - pub subcommand_count: u32, - pub subcommand_results: Vec, - pub committed: bool, + pub subcommand_count: u32, + pub subcommand_results: Vec, + pub committed: bool, } } +#[rustfmt::skip] async fn get_prepared_data_for_deleting_subtree(ctx: &AccessorContext<'_>, _actor: &User, input: DeleteSubtreeInput, _extras: NoExtras) -> Result<(Vec, PreparedDataForDeletingSubtree), Error> { - let DeleteSubtreeInput { map_id: _mapID, root_node_id: rootNodeID, max_depth: maxDepth } = input; - - let mut subcommands = Vec::::new(); - let mut links_to_unlink = IndexSet::::new(); - // collections that'll be inserted into the prepared-data structure - let mut nodes_to_unlink_ids = IndexSet::::new(); - let mut nodes_to_delete_ids = IndexSet::::new(); - let mut nodes_to_delete_access_policies = IndexMap::::new(); - let mut nodes_to_delete_creator_ids = IndexMap::::new(); - let mut nodes_to_delete_creation_times = Vec::::new(); // vec used; if someone nodes have identical creation-times, add each entry separately + let DeleteSubtreeInput { map_id: _mapID, root_node_id: rootNodeID, max_depth: maxDepth } = input; + + let mut subcommands = Vec::::new(); + let mut links_to_unlink = IndexSet::::new(); + // collections that'll be inserted into the prepared-data structure + let mut nodes_to_unlink_ids = IndexSet::::new(); + let mut nodes_to_delete_ids = IndexSet::::new(); + let mut nodes_to_delete_access_policies = IndexMap::::new(); + let mut nodes_to_delete_creator_ids = IndexMap::::new(); + let mut nodes_to_delete_creation_times = Vec::::new(); // vec used; if someone nodes have identical creation-times, add each entry separately let mut rows: Vec = ctx.tx.query_raw(r#" SELECT @@ -279,64 +280,55 @@ async fn get_prepared_data_for_deleting_subtree(ctx: &AccessorContext<'_>, _acto // reverse the results, so we get the deepest nodes first rows.reverse(); - //let rows_by_node_id: IndexMap> = rows.into_iter().group_by(|row| row.get(0)).into_iter().map(|(k, g)| (k, g.collect_vec())).collect(); - //for (node_id, rows) in rows_by_node_id { - - // each row represents a discovered node-link within the subtree being deleted - for row in rows { - let node_id: String = row.get(0); - let link_id = row.get(1); - //let distance: i32 = row.get(2); - let single_parent_ancestry: bool = row.get(3); - let access_policy: String = row.get(4); - let creator_id: String = row.get(5); - let creation_time: i64 = row.get(6); - - // if this link is the only place the child exists, mark the child for deletion - if single_parent_ancestry { - nodes_to_delete_ids.insert(node_id.to_string()); - *nodes_to_delete_access_policies.entry(access_policy).or_insert(0) += 1; - *nodes_to_delete_creator_ids.entry(creator_id).or_insert(0) += 1; - nodes_to_delete_creation_times.push(creation_time); - } - // else, mark the child for mere unlinking - else { - if let Some(link_id) = link_id { - nodes_to_unlink_ids.insert(node_id.to_string()); - links_to_unlink.insert(link_id); - } else { - // if link_id is null, then this was the subtree's root-node; the fact that we're here means it had more than one parent/parent-link, which is not ideal so log a warning - warn!("Delete-subtree was called on a tree whose root-node ({}) had more than one parent. Deletion of subtree root was skipped.", node_id); - } - } - } - - for link_id in links_to_unlink.iter() { - let command = CommandEntry { - deleteNodeLink: Some(DeleteNodeLinkInput { mapID: None, id: link_id.clone() }), - ..Default::default() - }; - subcommands.push(command); - } - - for node_id in nodes_to_delete_ids.iter() { - let command = CommandEntry { - deleteNode: Some(DeleteNodeInput { mapID: None, nodeID: node_id.clone() }), - ..Default::default() - }; - subcommands.push(command); - } - - let subcommand_count = subcommands.len_u32(); - Ok(( - subcommands, - PreparedDataForDeletingSubtree { - subcommand_count, - nodes_to_unlink_ids: nodes_to_unlink_ids.into_iter().collect(), - nodes_to_delete_ids: nodes_to_delete_ids.into_iter().collect(), - nodes_to_delete_access_policies: IndexMapAGQL(nodes_to_delete_access_policies), - nodes_to_delete_creator_ids: IndexMapAGQL(nodes_to_delete_creator_ids), - nodes_to_delete_creation_times: nodes_to_delete_creation_times, - } - )) -} \ No newline at end of file + //let rows_by_node_id: IndexMap> = rows.into_iter().group_by(|row| row.get(0)).into_iter().map(|(k, g)| (k, g.collect_vec())).collect(); + //for (node_id, rows) in rows_by_node_id { + + // each row represents a discovered node-link within the subtree being deleted + for row in rows { + let node_id: String = row.get(0); + let link_id = row.get(1); + //let distance: i32 = row.get(2); + let single_parent_ancestry: bool = row.get(3); + let access_policy: String = row.get(4); + let creator_id: String = row.get(5); + let creation_time: i64 = row.get(6); + + // if this link is the only place the child exists, mark the child for deletion + if single_parent_ancestry { + nodes_to_delete_ids.insert(node_id.to_string()); + *nodes_to_delete_access_policies.entry(access_policy).or_insert(0) += 1; + *nodes_to_delete_creator_ids.entry(creator_id).or_insert(0) += 1; + nodes_to_delete_creation_times.push(creation_time); + } + // else, mark the child for mere unlinking + else { + if let Some(link_id) = link_id { + nodes_to_unlink_ids.insert(node_id.to_string()); + links_to_unlink.insert(link_id); + } else { + // if link_id is null, then this was the subtree's root-node; the fact that we're here means it had more than one parent/parent-link, which is not ideal so log a warning + warn!("Delete-subtree was called on a tree whose root-node ({}) had more than one parent. Deletion of subtree root was skipped.", node_id); + } + } + } + + for link_id in links_to_unlink.iter() { + let command = CommandEntry { deleteNodeLink: Some(DeleteNodeLinkInput { mapID: None, id: link_id.clone() }), ..Default::default() }; + subcommands.push(command); + } + + for node_id in nodes_to_delete_ids.iter() { + let command = CommandEntry { deleteNode: Some(DeleteNodeInput { mapID: None, nodeID: node_id.clone() }), ..Default::default() }; + subcommands.push(command); + } + + let subcommand_count = subcommands.len_u32(); + Ok((subcommands, PreparedDataForDeletingSubtree { + subcommand_count, + nodes_to_unlink_ids: nodes_to_unlink_ids.into_iter().collect(), + nodes_to_delete_ids: nodes_to_delete_ids.into_iter().collect(), + nodes_to_delete_access_policies: IndexMapAGQL(nodes_to_delete_access_policies), + nodes_to_delete_creator_ids: IndexMapAGQL(nodes_to_delete_creator_ids), + nodes_to_delete_creation_times, + })) +} diff --git a/Packages/app-server/src/db/general/subtree_collector.rs b/Packages/app-server/src/db/general/subtree_collector.rs index ebd83b169..9e21f7031 100644 --- a/Packages/app-server/src/db/general/subtree_collector.rs +++ b/Packages/app-server/src/db/general/subtree_collector.rs @@ -1,24 +1,40 @@ -use std::{rc::Rc, sync::Arc}; +use super::subtree::Subtree; +use crate::{ + db::{ + commands::_command::ToSqlWrapper, + medias::{get_media, Media}, + node_links::{get_node_links, NodeLink}, + node_phrasings::{get_node_phrasings, NodePhrasing}, + node_revisions::get_node_revision, + node_tags::{get_node_tags, NodeTag}, + nodes::get_node, + terms::{get_terms_attached, Term}, + }, + utils::db::{ + accessors::AccessorContext, + filter::{FilterInput, QueryFilter}, + queries::get_entries_in_collection_base, + sql_fragment::SQLFragment, + }, +}; +use async_recursion::async_recursion; +use futures_util::{pin_mut, Future, FutureExt, StreamExt, TryStreamExt}; use rust_shared::anyhow::{anyhow, Error}; use rust_shared::async_graphql::ID; -use async_recursion::async_recursion; -use futures_util::{Future, FutureExt, TryStreamExt, StreamExt, pin_mut}; use rust_shared::indexmap::IndexMap; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::json; use rust_shared::tokio::sync::RwLock; -use rust_shared::tokio_postgres::{Row, types::ToSql}; -use crate::{db::{medias::{Media, get_media}, terms::{Term, get_terms_attached}, nodes::{get_node}, node_links::{NodeLink, get_node_links}, node_revisions::{get_node_revision}, node_phrasings::{NodePhrasing, get_node_phrasings}, node_tags::{NodeTag, get_node_tags}, commands::_command::ToSqlWrapper}, utils::{db::{queries::{get_entries_in_collection_base}, sql_fragment::SQLFragment, filter::{FilterInput, QueryFilter}, accessors::AccessorContext}}}; -use super::{subtree::Subtree}; +use rust_shared::tokio_postgres::{types::ToSql, Row}; +use std::{rc::Rc, sync::Arc}; /// Helper to make it easier to provide inline sql-params of different types. pub fn params<'a>(parameters: &'a [&'a (dyn ToSql + Sync)]) -> Vec<&(dyn ToSql + Sync)> { - parameters.iter() - .map(|x| *x as &(dyn ToSql + Sync)) - .collect() + parameters.iter().map(|x| *x as &(dyn ToSql + Sync)).collect() } #[async_recursion] +#[rustfmt::skip] pub async fn get_node_subtree(ctx: &AccessorContext<'_>, root_id: String, max_depth_usize: usize) -> Result { let max_depth = max_depth_usize as i32; @@ -67,20 +83,21 @@ pub async fn get_node_subtree(ctx: &AccessorContext<'_>, root_id: String, max_de ) AS media_ids_from_revisions ON (medias.id = media_ids_from_revisions.id#>>'{}') "#, params(&[&root_id, &max_depth])).await?.try_collect().await?; - let mut subtree = Subtree { - terms: term_rows.into_iter().map(|a| a.into()).collect(), - medias: media_rows.into_iter().map(|a| a.into()).collect(), - nodes: node_rows.into_iter().map(|a| a.into()).collect(), - nodeLinks: link_rows.into_iter().map(|a| a.into()).collect(), - nodeRevisions: revision_rows.into_iter().map(|a| a.into()).collect(), - nodePhrasings: phrasing_rows.into_iter().map(|a| a.into()).collect(), - nodeTags: tag_rows.into_iter().map(|a| a.into()).collect(), - }; - subtree.sort_all_entries(); - Ok(subtree) + let mut subtree = Subtree { + terms: term_rows.into_iter().map(|a| a.into()).collect(), + medias: media_rows.into_iter().map(|a| a.into()).collect(), + nodes: node_rows.into_iter().map(|a| a.into()).collect(), + nodeLinks: link_rows.into_iter().map(|a| a.into()).collect(), + nodeRevisions: revision_rows.into_iter().map(|a| a.into()).collect(), + nodePhrasings: phrasing_rows.into_iter().map(|a| a.into()).collect(), + nodeTags: tag_rows.into_iter().map(|a| a.into()).collect(), + }; + subtree.sort_all_entries(); + Ok(subtree) } #[async_recursion] +#[rustfmt::skip] pub async fn get_node_subtree2(ctx: &AccessorContext<'_>, root_id: String, max_depth_usize: usize) -> Result { let max_depth = max_depth_usize as i32; @@ -126,15 +143,15 @@ pub async fn get_node_subtree2(ctx: &AccessorContext<'_>, root_id: String, max_d ) AS media_ids_from_revisions ON (medias.id = media_ids_from_revisions.id#>>'{}') "#, params(&[&root_id, &max_depth])).await?.try_collect().await?; - let subtree = Subtree { - terms: term_rows.into_iter().map(|a| a.into()).collect(), - medias: media_rows.into_iter().map(|a| a.into()).collect(), - nodes: node_rows.into_iter().map(|a| a.into()).collect(), - nodeLinks: link_rows.into_iter().map(|a| a.into()).collect(), - nodeRevisions: revision_rows.into_iter().map(|a| a.into()).collect(), - nodePhrasings: phrasing_rows.into_iter().map(|a| a.into()).collect(), - nodeTags: tag_rows.into_iter().map(|a| a.into()).collect(), - }; - //subtree.sort_all_entries(); - Ok(subtree) -} \ No newline at end of file + let subtree = Subtree { + terms: term_rows.into_iter().map(|a| a.into()).collect(), + medias: media_rows.into_iter().map(|a| a.into()).collect(), + nodes: node_rows.into_iter().map(|a| a.into()).collect(), + nodeLinks: link_rows.into_iter().map(|a| a.into()).collect(), + nodeRevisions: revision_rows.into_iter().map(|a| a.into()).collect(), + nodePhrasings: phrasing_rows.into_iter().map(|a| a.into()).collect(), + nodeTags: tag_rows.into_iter().map(|a| a.into()).collect(), + }; + //subtree.sort_all_entries(); + Ok(subtree) +} diff --git a/Packages/app-server/src/db/general/subtree_collector_old.rs b/Packages/app-server/src/db/general/subtree_collector_old.rs index dbb577fc1..9ead1003a 100644 --- a/Packages/app-server/src/db/general/subtree_collector_old.rs +++ b/Packages/app-server/src/db/general/subtree_collector_old.rs @@ -1,142 +1,159 @@ use std::{rc::Rc, sync::Arc}; +use super::subtree::Subtree; +use crate::db::node_revisions::NodeRevision; +use crate::db::nodes_::_node::Node; +use crate::{ + db::{ + medias::{get_media, Media}, + node_links::{get_node_links, NodeLink}, + node_phrasings::{get_node_phrasings, NodePhrasing}, + node_revisions::get_node_revision, + node_tags::{get_node_tags, NodeTag}, + nodes::get_node, + terms::{get_terms_attached, Term}, + }, + utils::db::{ + accessors::AccessorContext, + filter::{FilterInput, QueryFilter}, + queries::get_entries_in_collection_base, + sql_fragment::SQLFragment, + }, +}; +use async_recursion::async_recursion; +use futures_util::{pin_mut, Future, FutureExt, StreamExt, TryStreamExt}; use rust_shared::anyhow::{anyhow, Error}; use rust_shared::async_graphql::ID; -use async_recursion::async_recursion; -use futures_util::{Future, FutureExt, TryStreamExt, StreamExt, pin_mut}; use rust_shared::indexmap::IndexMap; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::json; use rust_shared::tokio::sync::RwLock; -use rust_shared::tokio_postgres::{Row, types::ToSql}; -use crate::db::node_revisions::NodeRevision; -use crate::db::nodes_::_node::Node; -use crate::{db::{medias::{Media, get_media}, terms::{Term, get_terms_attached}, nodes::{get_node}, node_links::{NodeLink, get_node_links}, node_revisions::{get_node_revision}, node_phrasings::{NodePhrasing, get_node_phrasings}, node_tags::{NodeTag, get_node_tags}}, utils::{db::{queries::{get_entries_in_collection_base}, sql_fragment::SQLFragment, filter::{FilterInput, QueryFilter}, accessors::AccessorContext}}}; -use super::{subtree::Subtree}; +use rust_shared::tokio_postgres::{types::ToSql, Row}; #[derive(Default)] pub struct SubtreeCollector_Old { - //pub root_path_segments: Vec, - - pub terms: IndexMap, - pub medias: IndexMap, - pub nodes: IndexMap, - pub node_links: IndexMap, - pub node_revisions: IndexMap, - pub node_phrasings: IndexMap, - pub node_tags: IndexMap, + //pub root_path_segments: Vec, + pub terms: IndexMap, + pub medias: IndexMap, + pub nodes: IndexMap, + pub node_links: IndexMap, + pub node_revisions: IndexMap, + pub node_phrasings: IndexMap, + pub node_tags: IndexMap, } impl SubtreeCollector_Old { - pub fn to_subtree(self: &Self) -> Subtree { - let result = Subtree { - terms: self.terms.clone().into_values().collect(), - medias: self.medias.clone().into_values().collect(), - nodes: self.nodes.clone().into_values().collect(), - nodeLinks: self.node_links.clone().into_values().collect(), - nodeRevisions: self.node_revisions.clone().into_values().collect(), - nodePhrasings: self.node_phrasings.clone().into_values().collect(), - nodeTags: self.node_tags.clone().into_values().collect(), - }; - // commented; we want this endpoint's algorithm "frozen", since the alt-frontend relies on it for consistent ordering with debate-map's frontend - // (in the long-term, this should be resolved by getting accurate "orderKey" fields set for everything, and definitely-correct inserting/updating of them -- but for now, just freeze the "subtreeOld" algorithm) - //result.sort_all_entries(); - result - } + pub fn to_subtree(self: &Self) -> Subtree { + let result = Subtree { + terms: self.terms.clone().into_values().collect(), + medias: self.medias.clone().into_values().collect(), + nodes: self.nodes.clone().into_values().collect(), + nodeLinks: self.node_links.clone().into_values().collect(), + nodeRevisions: self.node_revisions.clone().into_values().collect(), + nodePhrasings: self.node_phrasings.clone().into_values().collect(), + nodeTags: self.node_tags.clone().into_values().collect(), + }; + // commented; we want this endpoint's algorithm "frozen", since the alt-frontend relies on it for consistent ordering with debate-map's frontend + // (in the long-term, this should be resolved by getting accurate "orderKey" fields set for everything, and definitely-correct inserting/updating of them -- but for now, just freeze the "subtreeOld" algorithm) + //result.sort_all_entries(); + result + } } /*pub enum FindSubtreeError { - InvalidPath, + InvalidPath, }*/ //pub type BoxFuture<'a, T> = Pin + Send + 'a, Global>>; #[async_recursion] //#[async_recursion(?Send)] pub async fn populate_subtree_collector_old(ctx: &AccessorContext<'_>, current_path: String, max_depth: usize, root_path_segments: &Vec, collector_arc: Arc>) -> Result<(), Error> { - let path_segments: Vec<&str> = current_path.split("/").collect(); - let parent_node_id = path_segments.iter().nth_back(1).map(|a| a.to_string()); - let node_id = path_segments.iter().last().ok_or(anyhow!("Invalid path:{current_path}"))?.to_string(); - //search_info = search_info ?? new GetSubtree_SearchInfo({rootPathSegments: pathSegments}); + let path_segments: Vec<&str> = current_path.split("/").collect(); + let parent_node_id = path_segments.iter().nth_back(1).map(|a| a.to_string()); + let node_id = path_segments.iter().last().ok_or(anyhow!("Invalid path:{current_path}"))?.to_string(); + //search_info = search_info ?? new GetSubtree_SearchInfo({rootPathSegments: pathSegments}); - // get data - let node = get_node(ctx, &node_id).await?; - // use match, so we can reuse outer async-context (don't know how to handle new ones in .map() easily yet) - let node_link = match parent_node_id { - Some(parent_id) => get_node_links(ctx, Some(&parent_id), Some(&node_id)).await?.into_iter().nth(0), - None => None, - }; - let node_current = get_node_revision(ctx, &node.c_currentRevision).await?; - let phrasings = get_node_phrasings(ctx, &node_id).await?; - let terms = get_terms_attached(ctx, &node_current.id.0).await?; - let medias = { - let mut temp = vec![]; - for attachment in node_current.clone().attachments { - if let Some(media_attachment) = attachment.media { - let media = get_media(ctx, &media_attachment["id"].as_str().unwrap().to_owned()).await?; - temp.push(media); - }; - } - temp - }; - let tags = get_node_tags(ctx, &node_id).await?; + // get data + let node = get_node(ctx, &node_id).await?; + // use match, so we can reuse outer async-context (don't know how to handle new ones in .map() easily yet) + let node_link = match parent_node_id { + Some(parent_id) => get_node_links(ctx, Some(&parent_id), Some(&node_id)).await?.into_iter().nth(0), + None => None, + }; + let node_current = get_node_revision(ctx, &node.c_currentRevision).await?; + let phrasings = get_node_phrasings(ctx, &node_id).await?; + let terms = get_terms_attached(ctx, &node_current.id.0).await?; + let medias = { + let mut temp = vec![]; + for attachment in node_current.clone().attachments { + if let Some(media_attachment) = attachment.media { + let media = get_media(ctx, &media_attachment["id"].as_str().unwrap().to_owned()).await?; + temp.push(media); + }; + } + temp + }; + let tags = get_node_tags(ctx, &node_id).await?; - // store data - { - // check link first, because link can differ depending on path (ie. even if node has been seen, this link may not have been) - let arc_clone = collector_arc.clone(); - let mut collector = arc_clone.write().await; - let isSubtreeRoot = path_segments.join("/") == root_path_segments.join("/"); - if let Some(node_link) = node_link { - if !isSubtreeRoot && !collector.node_links.contains_key(&node_link.id.0) { - collector.node_links.insert(node_link.id.to_string(), node_link); - } - } + // store data + { + // check link first, because link can differ depending on path (ie. even if node has been seen, this link may not have been) + let arc_clone = collector_arc.clone(); + let mut collector = arc_clone.write().await; + let isSubtreeRoot = path_segments.join("/") == root_path_segments.join("/"); + if let Some(node_link) = node_link { + if !isSubtreeRoot && !collector.node_links.contains_key(&node_link.id.0) { + collector.node_links.insert(node_link.id.to_string(), node_link); + } + } - // now check if node itself has been seen/processed; if so, ignore the rest of its data - if collector.nodes.contains_key(&node.id.0) { return Ok(()); } - collector.nodes.insert(node.id.to_string(), node); + // now check if node itself has been seen/processed; if so, ignore the rest of its data + if collector.nodes.contains_key(&node.id.0) { + return Ok(()); + } + collector.nodes.insert(node.id.to_string(), node); - assert!(!collector.node_revisions.contains_key(&node_current.id.0), "Node-revisions should be node-specific, yet entry ({}) was seen twice.", node_current.id.0); - collector.node_revisions.insert(node_current.id.to_string(), node_current.clone()); + assert!(!collector.node_revisions.contains_key(&node_current.id.0), "Node-revisions should be node-specific, yet entry ({}) was seen twice.", node_current.id.0); + collector.node_revisions.insert(node_current.id.to_string(), node_current.clone()); - for phrasing in phrasings { - assert!(!collector.node_phrasings.contains_key(&phrasing.id.0), "Node-phrasings should be node-specific, yet entry ({}) was seen twice.", phrasing.id.0); - collector.node_phrasings.insert(phrasing.id.to_string(), phrasing); - } + for phrasing in phrasings { + assert!(!collector.node_phrasings.contains_key(&phrasing.id.0), "Node-phrasings should be node-specific, yet entry ({}) was seen twice.", phrasing.id.0); + collector.node_phrasings.insert(phrasing.id.to_string(), phrasing); + } - for term in terms { - if !collector.terms.contains_key(&term.id.0) { - collector.terms.insert(term.id.to_string(), term); - } - } + for term in terms { + if !collector.terms.contains_key(&term.id.0) { + collector.terms.insert(term.id.to_string(), term); + } + } - for media in medias { - //if !collector.terms.contains_key(media_attachment["id"].as_str().unwrap()) { - if !collector.medias.contains_key(&media.id.0) { - collector.medias.insert(media.id.to_string(), media); - } - } + for media in medias { + //if !collector.terms.contains_key(media_attachment["id"].as_str().unwrap()) { + if !collector.medias.contains_key(&media.id.0) { + collector.medias.insert(media.id.to_string(), media); + } + } - for tag in tags { - if !collector.node_tags.contains_key(&tag.id.0) { - collector.node_tags.insert(tag.id.to_string(), tag); - } - } - } + for tag in tags { + if !collector.node_tags.contains_key(&tag.id.0) { + collector.node_tags.insert(tag.id.to_string(), tag); + } + } + } - // populate-data from descendants/subtree underneath the current node (must happen after store-data, else the collector.nodes.contains_key checks might get skipped past) - let currentDepth = path_segments.len() - root_path_segments.len(); - if currentDepth < max_depth { - /*for link in get_node_links(ctx, Some(&node_id), None).await? { - let child_id = link.child; - populate_subtree_collector(ctx, format!("{}/{}", current_path, child_id), max_depth, collector).await?; - }*/ - let links = get_node_links(ctx, Some(&node_id), None).await?; - let mut futures = vec![]; - for link in links { - let child_id = link.child; - futures.push(populate_subtree_collector_old(ctx, format!("{}/{}", current_path, child_id), max_depth, root_path_segments, collector_arc.clone())); - } - rust_shared::futures::future::join_all(futures).await; - } - Ok(()) -} \ No newline at end of file + // populate-data from descendants/subtree underneath the current node (must happen after store-data, else the collector.nodes.contains_key checks might get skipped past) + let currentDepth = path_segments.len() - root_path_segments.len(); + if currentDepth < max_depth { + /*for link in get_node_links(ctx, Some(&node_id), None).await? { + let child_id = link.child; + populate_subtree_collector(ctx, format!("{}/{}", current_path, child_id), max_depth, collector).await?; + }*/ + let links = get_node_links(ctx, Some(&node_id), None).await?; + let mut futures = vec![]; + for link in links { + let child_id = link.child; + futures.push(populate_subtree_collector_old(ctx, format!("{}/{}", current_path, child_id), max_depth, root_path_segments, collector_arc.clone())); + } + rust_shared::futures::future::join_all(futures).await; + } + Ok(()) +} diff --git a/Packages/app-server/src/db/general/subtree_old.rs b/Packages/app-server/src/db/general/subtree_old.rs index 8082c1ad5..a6723f441 100644 --- a/Packages/app-server/src/db/general/subtree_old.rs +++ b/Packages/app-server/src/db/general/subtree_old.rs @@ -1,21 +1,21 @@ -use jsonschema::JSONSchema; +use deadpool_postgres::{Client, Pool, Transaction}; +use futures_util::{stream, Future, Stream, StreamExt, TryFutureExt, TryStreamExt}; use jsonschema::output::BasicOutput; +use jsonschema::JSONSchema; use lazy_static::lazy_static; -use rust_shared::GQLError; use rust_shared::anyhow::{anyhow, Context, Error}; -use rust_shared::async_graphql::{Object, Schema, Subscription, ID, async_stream, OutputType, scalar, EmptySubscription, SimpleObject, self}; -use deadpool_postgres::{Pool, Client, Transaction}; -use futures_util::{Stream, stream, TryFutureExt, StreamExt, Future, TryStreamExt}; +use rust_shared::async_graphql::{self, async_stream, scalar, EmptySubscription, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::json; use rust_shared::tokio::sync::RwLock; use rust_shared::tokio_postgres::Row; +use rust_shared::GQLError; use std::collections::HashSet; use std::path::Path; use std::rc::Rc; use std::sync::Arc; -use std::{time::Duration, pin::Pin, task::Poll}; +use std::{pin::Pin, task::Poll, time::Duration}; use crate::db::_general::GenericMutation_Result; use crate::db::commands::clone_subtree::clone_subtree; @@ -24,50 +24,50 @@ use crate::db::node_links::NodeLink; use crate::db::node_phrasings::NodePhrasing; use crate::db::node_tags::NodeTag; use crate::db::terms::Term; -use crate::utils::db::filter::{QueryFilter, FilterInput}; +use crate::utils::db::accessors::AccessorContext; +use crate::utils::db::filter::{FilterInput, QueryFilter}; +use crate::utils::db::generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}; use crate::utils::db::sql_fragment::SQLFragment; use crate::utils::db::transactions::start_read_transaction; -use crate::utils::general::data_anchor::{DataAnchorFor1, DataAnchor}; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}}}; -use crate::utils::type_aliases::{PGClientObject}; -use crate::utils::db::accessors::{AccessorContext}; +use crate::utils::general::data_anchor::{DataAnchor, DataAnchorFor1}; +use crate::utils::type_aliases::PGClientObject; use super::subtree::Subtree; -use super::subtree_collector_old::{SubtreeCollector_Old, populate_subtree_collector_old}; +use super::subtree_collector_old::{populate_subtree_collector_old, SubtreeCollector_Old}; pub async fn get_subtree_old(ctx: &AccessorContext<'_>, root_node_id: String, max_depth: Option) -> Result { - let collector = SubtreeCollector_Old::default(); - let root_path_segments = vec![root_node_id.clone()]; - let collector_arc = Arc::new(RwLock::new(collector)); - populate_subtree_collector_old(&ctx, root_node_id, max_depth.unwrap_or(usize::MAX), &root_path_segments, collector_arc.clone()).await?; + let collector = SubtreeCollector_Old::default(); + let root_path_segments = vec![root_node_id.clone()]; + let collector_arc = Arc::new(RwLock::new(collector)); + populate_subtree_collector_old(&ctx, root_node_id, max_depth.unwrap_or(usize::MAX), &root_path_segments, collector_arc.clone()).await?; - let arc_clone = collector_arc.clone(); - let collector = arc_clone.read().await; - let subtree = collector.to_subtree(); - Ok(subtree) + let arc_clone = collector_arc.clone(); + let collector = arc_clone.read().await; + let subtree = collector.to_subtree(); + Ok(subtree) } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default)] pub struct QueryShard_General_Subtree_Old; #[Object] impl QueryShard_General_Subtree_Old { - async fn subtree_old(&self, gql_ctx: &async_graphql::Context<'_>, root_node_id: String, max_depth: Option) -> Result { - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; + async fn subtree_old(&self, gql_ctx: &async_graphql::Context<'_>, root_node_id: String, max_depth: Option) -> Result { + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; - let collector = SubtreeCollector_Old::default(); - let root_path_segments = vec![root_node_id.clone()]; - let collector_arc = Arc::new(RwLock::new(collector)); - populate_subtree_collector_old(&ctx, root_node_id, max_depth.unwrap_or(usize::MAX), &root_path_segments, collector_arc.clone()).await?; + let collector = SubtreeCollector_Old::default(); + let root_path_segments = vec![root_node_id.clone()]; + let collector_arc = Arc::new(RwLock::new(collector)); + populate_subtree_collector_old(&ctx, root_node_id, max_depth.unwrap_or(usize::MAX), &root_path_segments, collector_arc.clone()).await?; - let arc_clone = collector_arc.clone(); - let collector = arc_clone.read().await; - let subtree = collector.to_subtree(); + let arc_clone = collector_arc.clone(); + let collector = arc_clone.read().await; + let subtree = collector.to_subtree(); - Ok(subtree) - } + Ok(subtree) + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/general/trusted_operators.rs b/Packages/app-server/src/db/general/trusted_operators.rs index e360f4c50..99184d072 100644 --- a/Packages/app-server/src/db/general/trusted_operators.rs +++ b/Packages/app-server/src/db/general/trusted_operators.rs @@ -1,14 +1,14 @@ -use jsonschema::JSONSchema; +use deadpool_postgres::{Client, Pool, Transaction}; +use futures_util::{stream, Future, Stream, StreamExt, TryFutureExt, TryStreamExt}; use jsonschema::output::BasicOutput; +use jsonschema::JSONSchema; use lazy_static::lazy_static; -use rust_shared::anyhow::{anyhow, Context, Error, ensure}; -use rust_shared::async_graphql::{Object, Schema, Subscription, ID, async_stream, OutputType, scalar, EmptySubscription, SimpleObject, InputObject, self, Enum}; -use deadpool_postgres::{Pool, Client, Transaction}; -use futures_util::{Stream, stream, TryFutureExt, StreamExt, Future, TryStreamExt}; +use rust_shared::anyhow::{anyhow, ensure, Context, Error}; +use rust_shared::async_graphql::{self, async_stream, scalar, EmptySubscription, Enum, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::once_cell::sync::Lazy; use rust_shared::regex::Regex; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::json; use rust_shared::tokio::sync::{RwLock, Semaphore}; use rust_shared::tokio_postgres::Row; @@ -19,62 +19,62 @@ use std::collections::HashSet; use std::path::Path; use std::rc::Rc; use std::sync::Arc; -use std::{time::Duration, pin::Pin, task::Poll}; +use std::{pin::Pin, task::Poll, time::Duration}; -use crate::db::_general::{GenericMutation_Result, ensure_trusted_operator_passkey_is_correct, ensure_gql}; +use crate::db::_general::{ensure_gql, ensure_trusted_operator_passkey_is_correct, GenericMutation_Result}; use crate::db::commands::clone_subtree::clone_subtree; use crate::db::medias::Media; use crate::db::node_links::NodeLink; use crate::db::node_phrasings::NodePhrasing; use crate::db::node_tags::NodeTag; use crate::db::terms::Term; -use crate::utils::db::filter::{QueryFilter, FilterInput}; +use crate::utils::db::accessors::AccessorContext; +use crate::utils::db::filter::{FilterInput, QueryFilter}; +use crate::utils::db::generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; use crate::utils::db::sql_fragment::SQLFragment; use crate::utils::db::transactions::start_read_transaction; -use crate::utils::general::data_anchor::{DataAnchorFor1, DataAnchor}; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}}}; -use crate::utils::type_aliases::{PGClientObject}; -use crate::utils::db::accessors::{AccessorContext}; +use crate::utils::general::data_anchor::{DataAnchor, DataAnchorFor1}; +use crate::utils::type_aliases::PGClientObject; -use super::subtree_collector::{get_node_subtree, params, get_node_subtree2}; +use super::subtree_collector::{get_node_subtree, get_node_subtree2, params}; -wrap_slow_macros!{ +wrap_slow_macros! { // queries // ========== #[derive(InputObject, Serialize, Deserialize)] pub struct FindUserIdsForGoogleIdInput { - google_id: String, - trusted_operator_passkey: String, + google_id: String, + trusted_operator_passkey: String, } #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct FindUserIdsForGoogleIdResult { - user_ids: Vec, + user_ids: Vec, } #[derive(Default)] pub struct QueryShard_General_TrustedOperators; #[Object] impl QueryShard_General_TrustedOperators { - /// Special query to find all user-ids associated with a given google-id (needed for user-sync with, eg. claim-miner). Must provide the trusted-provider passkey. - /// (For now, this passkey is manually created in k8s [eg. using Lens], and shared privately with trusted partners/operators. K8s path: namespace "default", name "debate-map-trusted-provider", field "passkey") - async fn find_user_ids_for_google_id(&self, gql_ctx: &async_graphql::Context<'_>, input: FindUserIdsForGoogleIdInput) -> Result { - let FindUserIdsForGoogleIdInput { google_id, trusted_operator_passkey } = input; - ensure_trusted_operator_passkey_is_correct(trusted_operator_passkey, true)?; - ensure_gql(google_id.len() > 0, "Google-id to search for cannot be empty.")?; // defensive; block finding users with empty google-id (atm only known to be the case for generated dev/test accounts) + /// Special query to find all user-ids associated with a given google-id (needed for user-sync with, eg. claim-miner). Must provide the trusted-provider passkey. + /// (For now, this passkey is manually created in k8s [eg. using Lens], and shared privately with trusted partners/operators. K8s path: namespace "default", name "debate-map-trusted-provider", field "passkey") + async fn find_user_ids_for_google_id(&self, gql_ctx: &async_graphql::Context<'_>, input: FindUserIdsForGoogleIdInput) -> Result { + let FindUserIdsForGoogleIdInput { google_id, trusted_operator_passkey } = input; + ensure_trusted_operator_passkey_is_correct(trusted_operator_passkey, true)?; + ensure_gql(google_id.len() > 0, "Google-id to search for cannot be empty.")?; // defensive; block finding users with empty google-id (atm only known to be the case for generated dev/test accounts) - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - // For this query, bypass rls-checks. It appears safe, and brings major speed-gains (presumably since can use index): with bypass-rls=false, takes ~3000ms; with bypass-rls=true, takes <100ms - let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, true).await?; - let rows: Vec = ctx.tx.query_raw(r#"SELECT id FROM "userHiddens" WHERE ("providerData" #>> '{0,sub}') = $1"#, &[&google_id]).await?.try_collect().await?; - - let result = FindUserIdsForGoogleIdResult { - user_ids: rows.into_iter().map(|a| a.get("id")).collect(), - }; - Ok(result) - } + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + // For this query, bypass rls-checks. It appears safe, and brings major speed-gains (presumably since can use index): with bypass-rls=false, takes ~3000ms; with bypass-rls=true, takes <100ms + let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, true).await?; + let rows: Vec = ctx.tx.query_raw(r#"SELECT id FROM "userHiddens" WHERE ("providerData" #>> '{0,sub}') = $1"#, &[&google_id]).await?.try_collect().await?; + + let result = FindUserIdsForGoogleIdResult { + user_ids: rows.into_iter().map(|a| a.get("id")).collect(), + }; + Ok(result) + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/global_data.rs b/Packages/app-server/src/db/global_data.rs index 7361cdb2f..991355541 100644 --- a/Packages/app-server/src/db/global_data.rs +++ b/Packages/app-server/src/db/global_data.rs @@ -1,17 +1,20 @@ -use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::{SubError, serde_json, GQLError}; +use futures_util::{stream, Stream, TryFutureExt}; use rust_shared::async_graphql; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject}; -use rust_shared::serde; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::async_graphql::{Context, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::tokio_postgres::{Row, Client}; +use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{serde_json, GQLError, SubError}; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput}}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; +use crate::utils::db::{ + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; -wrap_slow_macros!{ +wrap_slow_macros! { /*cached_expand!{ const ce_args: &str = r##" @@ -21,14 +24,14 @@ excludeLinesWith = "#[graphql(name" #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct GlobalData { - pub id: ID, - pub extras: JSONValue, + pub id: ID, + pub extras: JSONValue, } impl From for GlobalData { fn from(row: Row) -> Self { Self { - id: ID::from(&row.get::<_, String>("id")), - extras: serde_json::from_value(row.get("extras")).unwrap(), + id: ID::from(&row.get::<_, String>("id")), + extras: serde_json::from_value(row.get("extras")).unwrap(), } } } @@ -36,8 +39,8 @@ impl From for GlobalData { #[derive(Clone)] pub struct GQLSet_GlobalData { pub nodes: Vec } #[Object] impl GQLSet_GlobalData { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_GlobalData { - fn from(entries: Vec) -> GQLSet_GlobalData { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_GlobalData { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_GlobalData; @@ -52,12 +55,12 @@ impl GQLSet for GQLSet_GlobalData { #[derive(Default)] pub struct SubscriptionShard_GlobalData; #[Subscription] impl SubscriptionShard_GlobalData { - async fn globalData<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "globalData", filter).await - } - async fn globalDatum<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "globalData", id).await - } + async fn globalData<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "globalData", filter).await + } + async fn globalDatum<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "globalData", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/map_node_edits.rs b/Packages/app-server/src/db/map_node_edits.rs index a19b4927a..407ed6501 100644 --- a/Packages/app-server/src/db/map_node_edits.rs +++ b/Packages/app-server/src/db/map_node_edits.rs @@ -1,54 +1,57 @@ -use rust_shared::SubError; +use futures_util::{stream, Stream, TryFutureExt}; +use rust_shared::anyhow::Error; use rust_shared::async_graphql; use rust_shared::async_graphql::Enum; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject}; -use futures_util::{Stream, stream, TryFutureExt}; -use rust_shared::anyhow::Error; +use rust_shared::async_graphql::{Context, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::rust_macros::wrap_slow_macros; use rust_shared::serde; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::tokio_postgres::{Client, Row}; use rust_shared::GQLError; -use rust_shared::tokio_postgres::{Row, Client}; +use rust_shared::SubError; use crate::utils::db::accessors::AccessorContext; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput}}; +use crate::utils::db::{ + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use super::_shared::access_policy_target::AccessPolicyTarget; use super::maps::get_map; use super::nodes::get_node; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Enum, Copy, Clone, Eq, PartialEq, Serialize, Deserialize)] pub enum ChangeType { - #[graphql(name = "add")] add, - #[graphql(name = "edit")] edit, - #[graphql(name = "remove")] remove, + #[graphql(name = "add")] add, + #[graphql(name = "edit")] edit, + #[graphql(name = "remove")] remove, } // this is called "MapNodeEdit" rather than just "NodeEdit", due to it always being a node edit in the context of a map #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct MapNodeEdit { - pub id: ID, + pub id: ID, pub map: String, pub node: String, pub time: i64, pub r#type: ChangeType, - - #[graphql(name = "c_accessPolicyTargets")] - pub c_accessPolicyTargets: Vec, + + #[graphql(name = "c_accessPolicyTargets")] + pub c_accessPolicyTargets: Vec, } impl From for MapNodeEdit { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(Clone)] pub struct GQLSet_MapNodeEdit { pub nodes: Vec } #[Object] impl GQLSet_MapNodeEdit { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_MapNodeEdit { - fn from(entries: Vec) -> GQLSet_MapNodeEdit { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_MapNodeEdit { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_NodeEdit; @@ -63,12 +66,12 @@ impl GQLSet for GQLSet_MapNodeEdit { #[derive(Default)] pub struct SubscriptionShard_NodeEdit; #[Subscription] impl SubscriptionShard_NodeEdit { - async fn mapNodeEdits<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "mapNodeEdits", filter).await - } - async fn mapNodeEdit<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "mapNodeEdits", id).await - } + async fn mapNodeEdits<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "mapNodeEdits", filter).await + } + async fn mapNodeEdit<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "mapNodeEdits", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/maps.rs b/Packages/app-server/src/db/maps.rs index 7365a25b2..7968029da 100644 --- a/Packages/app-server/src/db/maps.rs +++ b/Packages/app-server/src/db/maps.rs @@ -1,85 +1,89 @@ +use futures_util::{stream, Stream, TryFutureExt}; use rust_shared::anyhow::Error; -use rust_shared::serde_json::json; -use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::{SubError, serde_json, GQLError}; use rust_shared::async_graphql; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::tokio_postgres::{Row, Client}; use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{serde_json, GQLError, SubError}; -use crate::utils::db::accessors::{AccessorContext, get_db_entry}; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; +use crate::utils::db::accessors::{get_db_entry, AccessorContext}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput}}; +use crate::utils::db::{ + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use super::commands::_command::{CanNullOrOmit, CanOmit}; +#[rustfmt::skip] pub async fn get_map(ctx: &AccessorContext<'_>, id: &str) -> Result { - get_db_entry(ctx, "maps", &Some(json!({ + get_db_entry(ctx, "maps", &Some(json!({ "id": {"equalTo": id} }))).await } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct Map { - pub id: ID, - pub accessPolicy: String, + pub id: ID, + pub accessPolicy: String, pub creator: String, pub createdAt: i64, - pub name: String, - pub note: Option, - pub noteInline: Option, - pub rootNode: String, - pub defaultExpandDepth: i32, - pub nodeAccessPolicy: Option, - pub featured: Option, + pub name: String, + pub note: Option, + pub noteInline: Option, + pub rootNode: String, + pub defaultExpandDepth: i32, + pub nodeAccessPolicy: Option, + pub featured: Option, pub editors: Vec, pub edits: i32, pub editedAt: Option, - pub extras: JSONValue, + pub extras: JSONValue, } impl From for Map { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(InputObject, Clone, Serialize, Deserialize)] pub struct MapInput { - pub accessPolicy: String, - pub name: String, - pub note: Option, - pub noteInline: Option, - pub defaultExpandDepth: i32, - pub nodeAccessPolicy: Option, - pub featured: Option, + pub accessPolicy: String, + pub name: String, + pub note: Option, + pub noteInline: Option, + pub defaultExpandDepth: i32, + pub nodeAccessPolicy: Option, + pub featured: Option, pub editors: Vec, //pub edits: i32, //pub editedAt: Option, - pub extras: JSONValue, + pub extras: JSONValue, } #[derive(InputObject, Serialize, Deserialize)] pub struct MapUpdates { - pub accessPolicy: CanOmit, - pub name: CanOmit, - pub note: CanNullOrOmit, - pub noteInline: CanNullOrOmit, - pub defaultExpandDepth: CanOmit, - pub nodeAccessPolicy: CanNullOrOmit, - pub featured: CanNullOrOmit, + pub accessPolicy: CanOmit, + pub name: CanOmit, + pub note: CanNullOrOmit, + pub noteInline: CanNullOrOmit, + pub defaultExpandDepth: CanOmit, + pub nodeAccessPolicy: CanNullOrOmit, + pub featured: CanNullOrOmit, pub editors: CanOmit>, - pub extras: CanOmit, + pub extras: CanOmit, } #[derive(Clone)] pub struct GQLSet_Map { pub nodes: Vec } #[Object] impl GQLSet_Map { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_Map { - fn from(entries: Vec) -> GQLSet_Map { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_Map { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_Map; @@ -94,12 +98,12 @@ impl GQLSet for GQLSet_Map { #[derive(Default)] pub struct SubscriptionShard_Map; #[Subscription] impl SubscriptionShard_Map { - async fn maps<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "maps", filter).await - } - async fn map<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "maps", id).await - } + async fn maps<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "maps", filter).await + } + async fn map<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "maps", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/medias.rs b/Packages/app-server/src/db/medias.rs index 67dbec5d7..75c0f2de3 100644 --- a/Packages/app-server/src/db/medias.rs +++ b/Packages/app-server/src/db/medias.rs @@ -1,74 +1,78 @@ +use futures_util::{stream, Stream, TryFutureExt}; use rust_shared::anyhow::Error; -use rust_shared::{SubError, GQLError}; use rust_shared::async_graphql; use rust_shared::async_graphql::Enum; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::serde_json::json; -use rust_shared::tokio_postgres::{Row, Client}; use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::{GQLError, SubError}; -use crate::utils::db::pg_row_to_json::postgres_row_to_struct; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput}}; use crate::utils::db::accessors::{get_db_entry, AccessorContext}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; +use crate::utils::db::pg_row_to_json::postgres_row_to_struct; +use crate::utils::db::{ + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use super::commands::_command::CanOmit; +#[rustfmt::skip] pub async fn get_media(ctx: &AccessorContext<'_>, id: &str) -> Result { - get_db_entry(ctx, "medias", &Some(json!({ - "id": {"equalTo": id} - }))).await + get_db_entry(ctx, "medias", &Some(json!({ + "id": {"equalTo": id} + }))).await } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Enum, Copy, Clone, Eq, PartialEq, Serialize, Deserialize)] pub enum MediaType { - #[graphql(name = "image")] image, - #[graphql(name = "video")] video, + #[graphql(name = "image")] image, + #[graphql(name = "video")] video, } #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct Media { - pub id: ID, + pub id: ID, pub creator: String, pub createdAt: i64, - pub accessPolicy: String, - pub name: String, - pub r#type: MediaType, - pub url: String, - pub description: String, + pub accessPolicy: String, + pub name: String, + pub r#type: MediaType, + pub url: String, + pub description: String, } impl From for Media { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(InputObject, Clone, Serialize, Deserialize)] pub struct MediaInput { - pub accessPolicy: String, - pub name: String, - pub r#type: MediaType, - pub url: String, - pub description: String, + pub accessPolicy: String, + pub name: String, + pub r#type: MediaType, + pub url: String, + pub description: String, } #[derive(InputObject, Serialize, Deserialize)] pub struct MediaUpdates { - pub accessPolicy: CanOmit, - pub name: CanOmit, - pub r#type: CanOmit, - pub url: CanOmit, - pub description: CanOmit, + pub accessPolicy: CanOmit, + pub name: CanOmit, + pub r#type: CanOmit, + pub url: CanOmit, + pub description: CanOmit, } #[derive(Clone)] pub struct GQLSet_Media { pub nodes: Vec } #[Object] impl GQLSet_Media { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_Media { - fn from(entries: Vec) -> GQLSet_Media { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_Media { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_Media; @@ -83,12 +87,12 @@ impl GQLSet for GQLSet_Media { #[derive(Default)] pub struct SubscriptionShard_Media; #[Subscription] impl SubscriptionShard_Media { - async fn medias<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "medias", filter).await - } - async fn media<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "medias", id).await - } + async fn medias<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "medias", filter).await + } + async fn media<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "medias", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/mod.rs b/Packages/app-server/src/db/mod.rs index 747fe3c3a..73507828d 100644 --- a/Packages/app-server/src/db/mod.rs +++ b/Packages/app-server/src/db/mod.rs @@ -1,123 +1,120 @@ pub mod _shared { - pub mod access_policy_target; - pub mod attachments; - pub mod attachments_ { - pub mod source_chain; - } - pub mod common_errors; - pub mod path_finder; - pub mod table_permissions; + pub mod access_policy_target; + pub mod attachments; + pub mod attachments_ { + pub mod source_chain; + } + pub mod common_errors; + pub mod path_finder; + pub mod table_permissions; } pub mod access_policies_ { - pub mod _access_policy; - pub mod _permission_set; + pub mod _access_policy; + pub mod _permission_set; } pub mod commands { - pub mod _shared { - pub mod add_node; - pub mod increment_edit_counts; - pub mod jsonb_utils; - pub mod rating_processor; - pub mod record_command_run; - pub mod update_node_rating_summaries; - } - pub mod _temp { - pub mod clone_map_special; - } - pub mod transfer_nodes_ { - pub mod transfer_using_clone; - pub mod transfer_using_shim; - } - pub mod _command; - pub mod add_access_policy; - pub mod add_argument_and_claim; - pub mod add_child_node; - pub mod add_map; - pub mod add_media; - pub mod add_node_link; - pub mod add_node_revision; - pub mod add_node_phrasing; - pub mod add_node_tag; - pub mod add_share; - pub mod add_term; - pub mod add_timeline; - pub mod add_timeline_step; - pub mod clone_subtree; - pub mod delete_access_policy; - pub mod delete_argument; - pub mod delete_map; - pub mod delete_media; - pub mod delete_node; - pub mod delete_node_link; - pub mod delete_node_phrasing; - pub mod delete_node_rating; - pub mod delete_node_revision; - pub mod delete_node_tag; - pub mod delete_share; - pub mod delete_term; - pub mod delete_timeline; - pub mod delete_timeline_step; - pub mod import_firestore_dump; - pub mod link_node; - pub mod run_command_batch; - pub mod set_node_is_multi_premise_argument; - pub mod set_node_rating; - pub mod set_user_follow_data; - pub mod transfer_nodes; - pub mod update_access_policy; - pub mod update_map; - pub mod update_media; - pub mod update_node; - pub mod update_node_link; - pub mod update_node_phrasing; - pub mod update_node_tag; - pub mod update_share; - pub mod update_term; - pub mod update_timeline; - pub mod update_timeline_step; - pub mod update_user; - pub mod update_user_hidden; - pub mod refresh_lq_data; - //pub mod transfer_nodes; + pub mod _shared { + pub mod add_node; + pub mod increment_edit_counts; + pub mod jsonb_utils; + pub mod rating_processor; + pub mod record_command_run; + pub mod update_node_rating_summaries; + } + pub mod _temp { + pub mod clone_map_special; + } + pub mod transfer_nodes_ { + pub mod transfer_using_clone; + pub mod transfer_using_shim; + } + pub mod _command; + pub mod add_access_policy; + pub mod add_argument_and_claim; + pub mod add_child_node; + pub mod add_map; + pub mod add_media; + pub mod add_node_link; + pub mod add_node_phrasing; + pub mod add_node_revision; + pub mod add_node_tag; + pub mod add_share; + pub mod add_term; + pub mod add_timeline; + pub mod add_timeline_step; + pub mod clone_subtree; + pub mod delete_access_policy; + pub mod delete_argument; + pub mod delete_map; + pub mod delete_media; + pub mod delete_node; + pub mod delete_node_link; + pub mod delete_node_phrasing; + pub mod delete_node_rating; + pub mod delete_node_revision; + pub mod delete_node_tag; + pub mod delete_share; + pub mod delete_term; + pub mod delete_timeline; + pub mod delete_timeline_step; + pub mod import_firestore_dump; + pub mod link_node; + pub mod refresh_lq_data; + pub mod run_command_batch; + pub mod set_node_is_multi_premise_argument; + pub mod set_node_rating; + pub mod set_user_follow_data; + pub mod transfer_nodes; + pub mod update_access_policy; + pub mod update_map; + pub mod update_media; + pub mod update_node; + pub mod update_node_link; + pub mod update_node_phrasing; + pub mod update_node_tag; + pub mod update_share; + pub mod update_term; + pub mod update_timeline; + pub mod update_timeline_step; + pub mod update_user; + pub mod update_user_hidden; + //pub mod transfer_nodes; } pub mod _general; pub mod general { - pub mod permission_helpers; - pub mod backups; - pub mod search; - pub mod sign_in; - pub mod sign_in_ { - pub mod fake_user; - pub mod jwt_utils; - pub mod google; - } - pub mod subtree_old; - pub mod subtree_collector_old; - pub mod subtree; - pub mod subtree_collector; - pub mod trusted_operators; + pub mod backups; + pub mod permission_helpers; + pub mod search; + pub mod sign_in; + pub mod sign_in_ { + pub mod fake_user; + pub mod google; + pub mod jwt_utils; + } + pub mod subtree; + pub mod subtree_collector; + pub mod subtree_collector_old; + pub mod subtree_old; + pub mod trusted_operators; } pub mod nodes_ { - pub mod _node; - pub mod _node_type; + pub mod _node; + pub mod _node_type; } pub mod node_links_ { - pub mod node_link_validity; + pub mod node_link_validity; } pub mod node_ratings_ { - pub mod _node_rating_type; + pub mod _node_rating_type; } -pub mod users; -pub mod user_hiddens; -pub mod global_data; -pub mod maps; -pub mod terms; pub mod access_policies; -pub mod medias; pub mod command_runs; pub mod feedback_proposals; pub mod feedback_user_infos; +pub mod global_data; pub mod map_node_edits; +pub mod maps; +pub mod medias; pub mod node_links; pub mod node_phrasings; pub mod node_ratings; @@ -125,5 +122,8 @@ pub mod node_revisions; pub mod node_tags; pub mod nodes; pub mod shares; +pub mod terms; +pub mod timeline_steps; pub mod timelines; -pub mod timeline_steps; \ No newline at end of file +pub mod user_hiddens; +pub mod users; diff --git a/Packages/app-server/src/db/node_links.rs b/Packages/app-server/src/db/node_links.rs index 624de5866..179482be8 100644 --- a/Packages/app-server/src/db/node_links.rs +++ b/Packages/app-server/src/db/node_links.rs @@ -1,22 +1,26 @@ -use rust_shared::anyhow::{Error, anyhow, ensure}; -use rust_shared::once_cell::sync::Lazy; -use rust_shared::utils::general_::extensions::ToOwnedV; -use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::{SubError, serde_json, should_be_unreachable, to_anyhow, GQLError}; +use futures_util::{stream, Stream, TryFutureExt}; +use rust_shared::anyhow::{anyhow, ensure, Error}; use rust_shared::async_graphql::{self, Enum}; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; +use rust_shared::once_cell::sync::Lazy; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::serde_json::json; -use rust_shared::tokio_postgres::{Row, Client}; use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::utils::general_::extensions::ToOwnedV; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{serde_json, should_be_unreachable, to_anyhow, GQLError, SubError}; use crate::utils::db::accessors::get_db_entry; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; +use crate::utils::db::{ + accessors::{get_db_entries, AccessorContext}, + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use crate::utils::general::order_key::OrderKey; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput, accessors::{AccessorContext, get_db_entries}}}; use super::_shared::access_policy_target::AccessPolicyTarget; use super::commands::_command::{CanNullOrOmit, CanOmit}; @@ -24,52 +28,56 @@ use super::nodes::get_node; use super::nodes_::_node::Node; use super::nodes_::_node_type::NodeType; +#[rustfmt::skip] pub async fn get_node_link(ctx: &AccessorContext<'_>, id: &str) -> Result { get_db_entry(ctx, "nodeLinks", &Some(json!({ "id": {"equalTo": id} }))).await } pub async fn get_node_links(ctx: &AccessorContext<'_>, parent_id: Option<&str>, child_id: Option<&str>) -> Result, Error> { - let mut filter_map = serde_json::Map::new(); - if let Some(parent_id) = parent_id { - filter_map.insert("parent".to_owned(), json!({"equalTo": parent_id})); - } - if let Some(child_id) = child_id { - filter_map.insert("child".to_owned(), json!({"equalTo": child_id})); - } - ensure!(filter_map.len() > 0, "Must provide at least one of parent_id or child_id."); - get_db_entries(ctx, "nodeLinks", &Some(JSONValue::Object(filter_map))).await + let mut filter_map = serde_json::Map::new(); + if let Some(parent_id) = parent_id { + filter_map.insert("parent".to_owned(), json!({"equalTo": parent_id})); + } + if let Some(child_id) = child_id { + filter_map.insert("child".to_owned(), json!({"equalTo": child_id})); + } + ensure!(filter_map.len() > 0, "Must provide at least one of parent_id or child_id."); + get_db_entries(ctx, "nodeLinks", &Some(JSONValue::Object(filter_map))).await } /// Does not handle mirror-children atm. pub async fn get_first_link_under_parent(ctx: &AccessorContext<'_>, node_id: &str, parent_id: &str) -> Result { let parent_child_links = get_node_links(ctx, Some(parent_id), Some(node_id)).await?; - Ok(parent_child_links.into_iter().nth(0).ok_or(anyhow!("No link found between claimed parent #{} and child #{}.", parent_id, node_id))?) + Ok(parent_child_links.into_iter().nth(0).ok_or(anyhow!("No link found between claimed parent #{} and child #{}.", parent_id, node_id))?) } pub async fn get_highest_order_key_under_parent(ctx: &AccessorContext<'_>, parent_id: Option<&str>) -> Result { - let parent_child_links = get_node_links(ctx, parent_id, None).await?; - match parent_child_links.len() { - 0 => return Ok(OrderKey::mid()), - _ => { - let parent_last_order_key = parent_child_links.into_iter() - //.max_by_key(|a| a.orderKey).ok_or_else(should_be_unreachable)?.orderKey; - .max_by(|a, b| a.orderKey.cmp(&b.orderKey)).ok_or_else(should_be_unreachable)?.orderKey; - Ok(parent_last_order_key) - }, - } + let parent_child_links = get_node_links(ctx, parent_id, None).await?; + match parent_child_links.len() { + 0 => return Ok(OrderKey::mid()), + _ => { + let parent_last_order_key = parent_child_links + .into_iter() + //.max_by_key(|a| a.orderKey).ok_or_else(should_be_unreachable)?.orderKey; + .max_by(|a, b| a.orderKey.cmp(&b.orderKey)) + .ok_or_else(should_be_unreachable)? + .orderKey; + Ok(parent_last_order_key) + }, + } } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Enum, Copy, Clone, Eq, PartialEq, Serialize, Deserialize, Hash, Debug)] pub enum ChildGroup { - #[graphql(name = "generic")] generic, - #[graphql(name = "truth")] truth, - #[graphql(name = "relevance")] relevance, - // testing - #[graphql(name = "neutrality")] neutrality, - #[graphql(name = "freeform")] freeform, + #[graphql(name = "generic")] generic, + #[graphql(name = "truth")] truth, + #[graphql(name = "relevance")] relevance, + // testing + #[graphql(name = "neutrality")] neutrality, + #[graphql(name = "freeform")] freeform, } pub static CHILD_GROUPS_WITH_POLARITY_REQUIRED: Lazy> = Lazy::new(|| vec![ChildGroup::truth, ChildGroup::relevance, ChildGroup::neutrality]); @@ -77,20 +85,20 @@ pub static CHILD_GROUPS_WITH_POLARITY_REQUIRED_OR_OPTIONAL: Lazy #[derive(Enum, Copy, Clone, Eq, PartialEq, Serialize, Deserialize)] pub enum ClaimForm { - #[graphql(name = "base")] base, - #[graphql(name = "negation")] negation, - #[graphql(name = "question")] question, + #[graphql(name = "base")] base, + #[graphql(name = "negation")] negation, + #[graphql(name = "question")] question, } #[derive(Enum, Copy, Clone, Eq, PartialEq, Serialize, Deserialize)] pub enum Polarity { - #[graphql(name = "supporting")] supporting, - #[graphql(name = "opposing")] opposing, + #[graphql(name = "supporting")] supporting, + #[graphql(name = "opposing")] opposing, } #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct NodeLink { - pub id: ID, + pub id: ID, pub creator: String, pub createdAt: i64, pub parent: String, @@ -101,37 +109,37 @@ pub struct NodeLink { pub seriesAnchor: Option, pub seriesEnd: Option, pub polarity: Option, - - #[graphql(name = "c_parentType")] + + #[graphql(name = "c_parentType")] pub c_parentType: NodeType, - #[graphql(name = "c_childType")] + #[graphql(name = "c_childType")] pub c_childType: NodeType, - #[graphql(name = "c_accessPolicyTargets")] - pub c_accessPolicyTargets: Vec, + #[graphql(name = "c_accessPolicyTargets")] + pub c_accessPolicyTargets: Vec, } impl NodeLink { - pub fn into_input(self, keep_parent_and_child: bool) -> NodeLinkInput { - NodeLinkInput { - parent: if keep_parent_and_child { Some(self.parent) } else { None }, - child: if keep_parent_and_child { Some(self.child) } else { None }, - group: self.group, - orderKey: self.orderKey, - form: self.form, - seriesAnchor: self.seriesAnchor, - seriesEnd: self.seriesEnd, - polarity: self.polarity, - } - } + pub fn into_input(self, keep_parent_and_child: bool) -> NodeLinkInput { + NodeLinkInput { + parent: if keep_parent_and_child { Some(self.parent) } else { None }, + child: if keep_parent_and_child { Some(self.child) } else { None }, + group: self.group, + orderKey: self.orderKey, + form: self.form, + seriesAnchor: self.seriesAnchor, + seriesEnd: self.seriesEnd, + polarity: self.polarity, + } + } } impl From for NodeLink { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(InputObject, Clone, Serialize, Deserialize)] pub struct NodeLinkInput { - /// Marked as optional, since in some contexts it's not needed. (eg. for add_child_node) + /// Marked as optional, since in some contexts it's not needed. (eg. for add_child_node) pub parent: Option, - /// Marked as optional, since in some contexts it's not needed. (eg. for add_child_node) + /// Marked as optional, since in some contexts it's not needed. (eg. for add_child_node) pub child: Option, pub group: ChildGroup, pub orderKey: OrderKey, @@ -151,8 +159,8 @@ pub struct NodeLinkUpdates { #[derive(Clone)] pub struct GQLSet_NodeLink { pub nodes: Vec } #[Object] impl GQLSet_NodeLink { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_NodeLink { - fn from(entries: Vec) -> GQLSet_NodeLink { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_NodeLink { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_NodeLink; @@ -167,12 +175,12 @@ impl GQLSet for GQLSet_NodeLink { #[derive(Default)] pub struct SubscriptionShard_NodeLink; #[Subscription] impl SubscriptionShard_NodeLink { - async fn nodeLinks<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "nodeLinks", filter).await - } - async fn nodeLink<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "nodeLinks", id).await - } + async fn nodeLinks<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "nodeLinks", filter).await + } + async fn nodeLink<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "nodeLinks", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/node_links_/node_link_validity.rs b/Packages/app-server/src/db/node_links_/node_link_validity.rs index 009f84667..62f3a29d7 100644 --- a/Packages/app-server/src/db/node_links_/node_link_validity.rs +++ b/Packages/app-server/src/db/node_links_/node_link_validity.rs @@ -1,31 +1,31 @@ -use rust_shared::async_graphql::{ID, SimpleObject, InputObject}; +use rust_shared::anyhow::{anyhow, bail, ensure, Context, Error}; +use rust_shared::async_graphql::Object; +use rust_shared::async_graphql::{InputObject, SimpleObject, ID}; +use rust_shared::db_constants::{GLOBAL_ROOT_NODE_ID, SYSTEM_USER_ID}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde_json::{Value, json}; -use rust_shared::db_constants::{SYSTEM_USER_ID, GLOBAL_ROOT_NODE_ID}; -use rust_shared::{async_graphql, serde_json, anyhow, GQLError}; -use rust_shared::async_graphql::{Object}; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Value}; +use rust_shared::utils::time::time_since_epoch_ms_i64; use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::anyhow::{anyhow, Error, ensure, bail, Context}; -use rust_shared::utils::time::{time_since_epoch_ms_i64}; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::{anyhow, async_graphql, serde_json, GQLError}; use tracing::info; use crate::db::_shared::access_policy_target::AccessPolicyTarget; use crate::db::_shared::common_errors::err_should_be_populated; -use crate::db::_shared::table_permissions::{does_policy_allow_x, CanVote, CanAddChild}; +use crate::db::_shared::table_permissions::{does_policy_allow_x, CanAddChild, CanVote}; use crate::db::access_policies::get_access_policy; use crate::db::access_policies_::_permission_set::{APAction, APTable}; use crate::db::commands::_command::command_boilerplate; use crate::db::general::permission_helpers::assert_user_can_add_child; -use crate::db::general::sign_in_::jwt_utils::{resolve_jwt_to_user_info, get_user_info_from_gql_ctx}; -use crate::db::node_links::{get_node_links, ChildGroup, NodeLink, NodeLinkInput, Polarity, CHILD_GROUPS_WITH_POLARITY_REQUIRED_OR_OPTIONAL, CHILD_GROUPS_WITH_POLARITY_REQUIRED}; +use crate::db::general::sign_in_::jwt_utils::{get_user_info_from_gql_ctx, resolve_jwt_to_user_info}; +use crate::db::node_links::{get_node_links, ChildGroup, NodeLink, NodeLinkInput, Polarity, CHILD_GROUPS_WITH_POLARITY_REQUIRED, CHILD_GROUPS_WITH_POLARITY_REQUIRED_OR_OPTIONAL}; use crate::db::nodes::get_node; -use crate::db::nodes_::_node::{Node}; +use crate::db::nodes_::_node::Node; use crate::db::nodes_::_node_type::{get_node_type_info, NodeType}; -use crate::db::users::{User, PermissionGroups, get_user}; +use crate::db::users::{get_user, PermissionGroups, User}; use crate::utils::db::accessors::AccessorContext; +use crate::utils::general::data_anchor::DataAnchorFor1; use rust_shared::utils::db::uuid::new_uuid_v4_as_b64; -use crate::utils::general::data_anchor::{DataAnchorFor1}; /// Does basic checking of validity of parent<>child linkage. See `assert_new_link_is_valid` for a more thorough validation. // sync: js[CheckLinkIsValid] @@ -57,7 +57,7 @@ pub fn assert_link_is_valid(parent_type: NodeType, child_type: NodeType, child_g ensure!(link_polarity.is_some(), "A link with an argument child must have a polarity specified."); } if link_polarity.is_some() { - ensure!(CHILD_GROUPS_WITH_POLARITY_REQUIRED_OR_OPTIONAL.contains(&child_group), r#"Only links in child-groups "truth", "relevance", "neutrality", or "freeform" can have a polarity specified."#); + ensure!(CHILD_GROUPS_WITH_POLARITY_REQUIRED_OR_OPTIONAL.contains(&child_group), r#"Only links in child-groups "truth", "relevance", "neutrality", or "freeform" can have a polarity specified."#); ensure!(child_type == NodeType::argument || child_type == NodeType::claim, "Only links with an argument child (or claim child, in sl-mode) can have a polarity specified."); } @@ -78,8 +78,10 @@ pub async fn assert_new_link_is_valid(ctx: &AccessorContext<'_>, parent_id: &str Some(actor) => (actor, &actor.permissionGroups), None => bail!("You're not signed in."), }; - if !permissions.basic { bail!("You lack basic permissions."); } - + if !permissions.basic { + bail!("You lack basic permissions."); + } + let parent = get_node(ctx, parent_id).await.with_context(|| "Parent data not found")?; // client-side version //if !does_policy_allow_x(ctx, actor_id, &parent.accessPolicy, APTable::nodes, APAction::addChild).await? { bail!("Parent node's permission policy does not grant you the ability to add children."); } @@ -89,13 +91,21 @@ pub async fn assert_new_link_is_valid(ctx: &AccessorContext<'_>, parent_id: &str }; if !guessedCanAddChild { bail!("Parent node's permission policy does not grant you the ability to add children."); }*/ // server-side version (on server, no need for this call-path to be called without actor) - if !parent.can_add_child(ctx, actor).await? { bail!("Parent node's permission policy does not grant you the ability to add children."); } + if !parent.can_add_child(ctx, actor).await? { + bail!("Parent node's permission policy does not grant you the ability to add children."); + } - if parent.id == GLOBAL_ROOT_NODE_ID && !permissions.admin { bail!("Only admins can add children to the global-root."); } - if parent.id == new_child_id { bail!("Cannot link node as its own child."); } + if parent.id == GLOBAL_ROOT_NODE_ID && !permissions.admin { + bail!("Only admins can add children to the global-root."); + } + if parent.id == new_child_id { + bail!("Cannot link node as its own child."); + } let is_already_child = get_node_links(ctx, Some(parent_id), Some(&new_child_id)).await?.len() > 0; - if is_already_child { bail!("Node is already a child of the parent."); } + if is_already_child { + bail!("Node is already a child of the parent."); + } assert_link_is_valid(parent.r#type, new_child_type, new_child_group, new_link_polarity) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/node_phrasings.rs b/Packages/app-server/src/db/node_phrasings.rs index 4aa09e278..673401724 100644 --- a/Packages/app-server/src/db/node_phrasings.rs +++ b/Packages/app-server/src/db/node_phrasings.rs @@ -1,46 +1,51 @@ +use futures_util::{stream, Stream, TryFutureExt}; use rust_shared::anyhow::Error; -use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::{SubError, serde_json, GQLError}; use rust_shared::async_graphql::{self, Enum}; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::serde_json::json; -use rust_shared::tokio_postgres::{Row, Client}; use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{serde_json, GQLError, SubError}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput}}; +use crate::utils::db::{ + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; -use crate::utils::db::accessors::{get_db_entry, AccessorContext, get_db_entries}; +use crate::utils::db::accessors::{get_db_entries, get_db_entry, AccessorContext}; use super::_shared::access_policy_target::AccessPolicyTarget; use super::_shared::attachments::TermAttachment; use super::commands::_command::{CanNullOrOmit, CanOmit}; use super::nodes::get_node; +#[rustfmt::skip] pub async fn get_node_phrasing(ctx: &AccessorContext<'_>, id: &str) -> Result { get_db_entry(ctx, "nodePhrasings", &Some(json!({ "id": {"equalTo": id} }))).await } +#[rustfmt::skip] pub async fn get_node_phrasings(ctx: &AccessorContext<'_>, node_id: &str) -> Result, Error> { get_db_entries(ctx, "nodePhrasings", &Some(json!({ "node": {"equalTo": node_id} }))).await } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Enum, Copy, Clone, Eq, PartialEq, Serialize, Deserialize)] pub enum NodePhrasingType { - #[graphql(name = "standard")] standard, - #[graphql(name = "simple")] simple, - #[graphql(name = "technical")] technical, - #[graphql(name = "humor")] humor, - #[graphql(name = "web")] web, + #[graphql(name = "standard")] standard, + #[graphql(name = "simple")] simple, + #[graphql(name = "technical")] technical, + #[graphql(name = "humor")] humor, + #[graphql(name = "web")] web, } /// Variant of NodePhrasing struct that keeps only the fields relevant for phrasings that are "embedded" within a node-revision. @@ -48,13 +53,13 @@ pub enum NodePhrasingType { #[graphql(input_name = "NodePhrasingEmbeddedInput")] //#[graphql(name = "NodePhrasing_Embedded", input_name = "NodePhrasing_EmbeddedInput")] # todo: use this approach once async-graphql is updated pub struct NodePhrasing_Embedded { - #[graphql(name = "text_base")] + #[graphql(name = "text_base")] pub text_base: String, - #[graphql(name = "text_negation")] + #[graphql(name = "text_negation")] pub text_negation: Option, - #[graphql(name = "text_question")] + #[graphql(name = "text_question")] pub text_question: Option, - #[graphql(name = "text_narrative")] + #[graphql(name = "text_narrative")] pub text_narrative: Option, pub note: Option, pub terms: Vec, @@ -62,41 +67,41 @@ pub struct NodePhrasing_Embedded { #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct NodePhrasing { - pub id: ID, + pub id: ID, pub creator: String, pub createdAt: i64, pub node: String, pub r#type: NodePhrasingType, - #[graphql(name = "text_base")] + #[graphql(name = "text_base")] pub text_base: String, - #[graphql(name = "text_negation")] + #[graphql(name = "text_negation")] pub text_negation: Option, - #[graphql(name = "text_question")] + #[graphql(name = "text_question")] pub text_question: Option, - #[graphql(name = "text_narrative")] + #[graphql(name = "text_narrative")] pub text_narrative: Option, pub note: Option, pub terms: Vec, pub references: Vec, - - #[graphql(name = "c_accessPolicyTargets")] - pub c_accessPolicyTargets: Vec, + + #[graphql(name = "c_accessPolicyTargets")] + pub c_accessPolicyTargets: Vec, } impl From for NodePhrasing { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(InputObject, Clone, Serialize, Deserialize)] pub struct NodePhrasingInput { pub node: String, pub r#type: NodePhrasingType, - #[graphql(name = "text_base")] + #[graphql(name = "text_base")] pub text_base: String, - #[graphql(name = "text_negation")] + #[graphql(name = "text_negation")] pub text_negation: Option, - #[graphql(name = "text_question")] + #[graphql(name = "text_question")] pub text_question: Option, - #[graphql(name = "text_narrative")] + #[graphql(name = "text_narrative")] pub text_narrative: Option, pub note: Option, pub terms: Vec, @@ -106,13 +111,13 @@ pub struct NodePhrasingInput { #[derive(InputObject, Serialize, Deserialize)] pub struct NodePhrasingUpdates { pub r#type: CanOmit, - #[graphql(name = "text_base")] + #[graphql(name = "text_base")] pub text_base: CanOmit, - #[graphql(name = "text_negation")] + #[graphql(name = "text_negation")] pub text_negation: CanNullOrOmit, - #[graphql(name = "text_question")] + #[graphql(name = "text_question")] pub text_question: CanNullOrOmit, - #[graphql(name = "text_narrative")] + #[graphql(name = "text_narrative")] pub text_narrative: CanNullOrOmit, pub note: CanNullOrOmit, pub terms: CanOmit>, @@ -122,8 +127,8 @@ pub struct NodePhrasingUpdates { #[derive(Clone)] pub struct GQLSet_NodePhrasing { pub nodes: Vec } #[Object] impl GQLSet_NodePhrasing { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_NodePhrasing { - fn from(entries: Vec) -> GQLSet_NodePhrasing { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_NodePhrasing { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_NodePhrasing; @@ -138,12 +143,12 @@ impl GQLSet for GQLSet_NodePhrasing { #[derive(Default)] pub struct SubscriptionShard_NodePhrasing; #[Subscription] impl SubscriptionShard_NodePhrasing { - async fn nodePhrasings<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "nodePhrasings", filter).await - } - async fn nodePhrasing<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "nodePhrasings", id).await - } + async fn nodePhrasings<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "nodePhrasings", filter).await + } + async fn nodePhrasing<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "nodePhrasings", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/node_ratings.rs b/Packages/app-server/src/db/node_ratings.rs index 71418a681..813acd202 100644 --- a/Packages/app-server/src/db/node_ratings.rs +++ b/Packages/app-server/src/db/node_ratings.rs @@ -1,34 +1,38 @@ -use rust_shared::itertools::Itertools; -use rust_shared::utils::general::{as_debug_str, as_json_str, enum_to_string}; -use rust_shared::utils::general_::extensions::ToOwnedV; -use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::{SubError, serde_json, GQLError}; -use rust_shared::anyhow::{anyhow, Error, ensure, bail}; +use futures_util::{stream, Stream, TryFutureExt}; +use rust_shared::anyhow::{anyhow, bail, ensure, Error}; use rust_shared::async_graphql; use rust_shared::async_graphql::Enum; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; +use rust_shared::itertools::Itertools; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::serde_json::json; -use rust_shared::tokio_postgres::{Row, Client}; use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::utils::general::{as_debug_str, as_json_str, enum_to_string}; +use rust_shared::utils::general_::extensions::ToOwnedV; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{serde_json, GQLError, SubError}; use crate::db::node_links::get_node_links; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; use crate::db::nodes::get_node; -use crate::utils::db::accessors::{get_db_entries, AccessorContext}; use crate::utils::db::accessors::get_db_entry; +use crate::utils::db::accessors::{get_db_entries, AccessorContext}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput}}; +use crate::utils::db::{ + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use super::_shared::access_policy_target::AccessPolicyTarget; use super::commands::_shared::rating_processor::get_argument_impact_pseudo_ratings; -use super::node_ratings_::_node_rating_type::{NodeRatingType, get_rating_type_info}; +use super::node_ratings_::_node_rating_type::{get_rating_type_info, NodeRatingType}; use super::nodes::get_node_children; -use super::nodes_::_node::{RatingSummary}; +use super::nodes_::_node::RatingSummary; use super::nodes_::_node_type::NodeType; +#[rustfmt::skip] pub async fn get_node_rating(ctx: &AccessorContext<'_>, id: &str) -> Result { get_db_entry(ctx, "nodeRatings", &Some(json!({ "id": {"equalTo": id} @@ -36,125 +40,124 @@ pub async fn get_node_rating(ctx: &AccessorContext<'_>, id: &str) -> Result, node_id: &str, rating_type: Option, user_ids: Option<&Vec>) -> Result, Error> { - if rating_type == Some(NodeRatingType::impact) { + if rating_type == Some(NodeRatingType::impact) { //Assert(userIDs == null, `Cannot currently use a userIDs filter for getting ratings of type "impact". (query-level optimization not yet added for that case)`); let node = get_node(ctx, &node_id).await?; let node_children = get_node_children(ctx, node_id).await?; let premises = node_children.into_iter().filter(|a| a.r#type == NodeType::claim).collect_vec(); return Ok(get_argument_impact_pseudo_ratings(ctx, &node, &premises, user_ids, false).await?); } - - get_node_ratings_base(ctx, node_id, rating_type, user_ids).await + + get_node_ratings_base(ctx, node_id, rating_type, user_ids).await } // variant needed to avoid need for async-recursion crate (probably temp) pub async fn get_node_ratings_base(ctx: &AccessorContext<'_>, node_id: &str, rating_type: Option, user_ids: Option<&Vec>) -> Result, Error> { - if rating_type == Some(NodeRatingType::impact) { + if rating_type == Some(NodeRatingType::impact) { bail!("Cannot call `get_node_ratings_base` with rating-type `impact`; use `get_node_ratings` function instead."); } - - let mut filter_map = serde_json::Map::new(); - filter_map.insert("node".to_owned(), json!({"equalTo": node_id})); - if let Some(rating_type) = rating_type { - filter_map.insert("type".to_owned(), json!({"equalTo": rating_type})); - } - if let Some(user_ids) = user_ids { - filter_map.insert("creator".to_owned(), json!({"in": user_ids})); - } - get_db_entries(ctx, "nodeRatings", &Some(JSONValue::Object(filter_map))).await + + let mut filter_map = serde_json::Map::new(); + filter_map.insert("node".to_owned(), json!({"equalTo": node_id})); + if let Some(rating_type) = rating_type { + filter_map.insert("type".to_owned(), json!({"equalTo": rating_type})); + } + if let Some(user_ids) = user_ids { + filter_map.insert("creator".to_owned(), json!({"in": user_ids})); + } + get_db_entries(ctx, "nodeRatings", &Some(JSONValue::Object(filter_map))).await } pub async fn get_node_rating_by_user(ctx: &AccessorContext<'_>, node_id: &str, rating_type: NodeRatingType, user_id: &str) -> Result { - let matches = get_node_ratings(ctx, node_id, Some(rating_type), Some(&vec![user_id.to_owned()])).await?; - let result: NodeRating = matches.into_iter().nth(0).ok_or(anyhow!("No rating found for the node+ratingType+userID combo."))?; + let matches = get_node_ratings(ctx, node_id, Some(rating_type), Some(&vec![user_id.to_owned()])).await?; + let result: NodeRating = matches.into_iter().nth(0).ok_or(anyhow!("No rating found for the node+ratingType+userID combo."))?; Ok(result) } // variant needed to avoid need for async-recursion crate (probably temp) pub async fn get_node_rating_by_user_base(ctx: &AccessorContext<'_>, node_id: &str, rating_type: NodeRatingType, user_id: &str) -> Result { - let matches = get_node_ratings_base(ctx, node_id, Some(rating_type), Some(&vec![user_id.to_owned()])).await?; - let result: NodeRating = matches.into_iter().nth(0).ok_or(anyhow!("No rating found for the node+ratingType+userID combo."))?; + let matches = get_node_ratings_base(ctx, node_id, Some(rating_type), Some(&vec![user_id.to_owned()])).await?; + let result: NodeRating = matches.into_iter().nth(0).ok_or(anyhow!("No rating found for the node+ratingType+userID combo."))?; Ok(result) } pub async fn get_rating_summary(ctx: &AccessorContext<'_>, node_id: &str, rating_type: NodeRatingType) -> Result { - let node = get_node(ctx, node_id).await?; - let rating_type_info = get_rating_type_info(rating_type); - Ok(match node.extras_known()?.ratingSummaries.and_then(|a| a.get(&enum_to_string(&rating_type)).cloned()) { - Some(rating_summary) => rating_summary, - None => { - // if rating-summary entry is missing, it must mean no one has rated the node yet, so return a corresponding RatingSummary object - RatingSummary { - average: None, - countsByRange: rating_type_info.valueRanges.iter().map(|_| 0).collect_vec(), - } - } - }) + let node = get_node(ctx, node_id).await?; + let rating_type_info = get_rating_type_info(rating_type); + Ok(match node.extras_known()?.ratingSummaries.and_then(|a| a.get(&enum_to_string(&rating_type)).cloned()) { + Some(rating_summary) => rating_summary, + None => { + // if rating-summary entry is missing, it must mean no one has rated the node yet, so return a corresponding RatingSummary object + RatingSummary { average: None, countsByRange: rating_type_info.valueRanges.iter().map(|_| 0).collect_vec() } + }, + }) } pub async fn get_rating_average(ctx: &AccessorContext<'_>, node_id: &str, rating_type: NodeRatingType, user_ids: Option>) -> Result { - //let node = get_node(ctx, &node_id).await?; - //if node.access_policy.permissions.nodes.vote == false { return Ok(100.0); } + //let node = get_node(ctx, &node_id).await?; + //if node.access_policy.permissions.nodes.vote == false { return Ok(100.0); } - if let Some(user_ids) = user_ids { - let ratings = get_node_ratings(ctx, node_id, Some(rating_type), Some(&user_ids)).await?; - return Ok(ratings_to_average(ratings)?); - } + if let Some(user_ids) = user_ids { + let ratings = get_node_ratings(ctx, node_id, Some(rating_type), Some(&user_ids)).await?; + return Ok(ratings_to_average(ratings)?); + } - let rating_summary: RatingSummary = get_rating_summary(ctx, node_id, rating_type).await?; - return Ok(rating_summary.average.unwrap_or(0.0)); + let rating_summary: RatingSummary = get_rating_summary(ctx, node_id, rating_type).await?; + return Ok(rating_summary.average.unwrap_or(0.0)); } // variant needed to avoid need for async-recursion crate (probably temp) pub async fn get_rating_average_base(ctx: &AccessorContext<'_>, node_id: &str, rating_type: NodeRatingType, user_ids: Option>) -> Result { - //let node = get_node(ctx, &node_id).await?; - //if node.access_policy.permissions.nodes.vote == false { return Ok(100.0); } + //let node = get_node(ctx, &node_id).await?; + //if node.access_policy.permissions.nodes.vote == false { return Ok(100.0); } - if let Some(user_ids) = user_ids { - let ratings = get_node_ratings_base(ctx, node_id, Some(rating_type), Some(&user_ids)).await?; - return Ok(ratings_to_average(ratings)?); - } + if let Some(user_ids) = user_ids { + let ratings = get_node_ratings_base(ctx, node_id, Some(rating_type), Some(&user_ids)).await?; + return Ok(ratings_to_average(ratings)?); + } - let rating_summary: RatingSummary = get_rating_summary(ctx, node_id, rating_type).await?; - return Ok(rating_summary.average.unwrap_or(0.0)); + let rating_summary: RatingSummary = get_rating_summary(ctx, node_id, rating_type).await?; + return Ok(rating_summary.average.unwrap_or(0.0)); } fn ratings_to_average(ratings: Vec) -> Result { - if ratings.len() == 0 { return Ok(0.0); } - let result = ratings.iter().map(|a| a.value).sum::() / ratings.len() as f64; - ensure!(result >= 0.0 && result <= 100.0, "Rating-average ({}) not in range. Invalid ratings: {:?}", result, ratings.iter().map(|a| a.value).filter(|a| !a.is_finite()).collect_vec()); - Ok(result) + if ratings.len() == 0 { + return Ok(0.0); + } + let result = ratings.iter().map(|a| a.value).sum::() / ratings.len() as f64; + ensure!(result >= 0.0 && result <= 100.0, "Rating-average ({}) not in range. Invalid ratings: {:?}", result, ratings.iter().map(|a| a.value).filter(|a| !a.is_finite()).collect_vec()); + Ok(result) } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct NodeRating { - pub id: ID, + pub id: ID, pub creator: String, pub createdAt: i64, - pub accessPolicy: String, - pub node: String, - pub r#type: NodeRatingType, + pub accessPolicy: String, + pub node: String, + pub r#type: NodeRatingType, pub value: f64, - - #[graphql(name = "c_accessPolicyTargets")] - pub c_accessPolicyTargets: Vec, + + #[graphql(name = "c_accessPolicyTargets")] + pub c_accessPolicyTargets: Vec, } impl From for NodeRating { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(InputObject, Clone, Serialize, Deserialize)] pub struct NodeRatingInput { - pub accessPolicy: String, - pub node: String, - pub r#type: NodeRatingType, + pub accessPolicy: String, + pub node: String, + pub r#type: NodeRatingType, pub value: f64, } #[derive(Clone)] pub struct GQLSet_NodeRating { pub nodes: Vec } #[Object] impl GQLSet_NodeRating { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_NodeRating { - fn from(entries: Vec) -> GQLSet_NodeRating { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_NodeRating { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_NodeRating; @@ -169,12 +172,12 @@ impl GQLSet for GQLSet_NodeRating { #[derive(Default)] pub struct SubscriptionShard_NodeRating; #[Subscription] impl SubscriptionShard_NodeRating { - async fn nodeRatings<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "nodeRatings", filter).await - } - async fn nodeRating<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "nodeRatings", id).await - } + async fn nodeRatings<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "nodeRatings", filter).await + } + async fn nodeRating<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "nodeRatings", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/node_ratings_/_node_rating_type.rs b/Packages/app-server/src/db/node_ratings_/_node_rating_type.rs index f4ab04203..6afb30e59 100644 --- a/Packages/app-server/src/db/node_ratings_/_node_rating_type.rs +++ b/Packages/app-server/src/db/node_ratings_/_node_rating_type.rs @@ -1,22 +1,22 @@ use std::collections::HashMap; -use rust_shared::once_cell::sync::Lazy; +use rust_shared::async_graphql; +use rust_shared::async_graphql::Enum; use rust_shared::itertools::Itertools; +use rust_shared::once_cell::sync::Lazy; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::async_graphql; -use rust_shared::async_graphql::{Enum}; use rust_shared::utils::general::average; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Enum, Copy, Clone, Eq, PartialEq, Serialize, Deserialize, Hash)] pub enum NodeRatingType { - #[graphql(name = "significance")] significance, - #[graphql(name = "neutrality")] neutrality, - #[graphql(name = "truth")] truth, - #[graphql(name = "relevance")] relevance, - #[graphql(name = "impact")] impact, + #[graphql(name = "significance")] significance, + #[graphql(name = "neutrality")] neutrality, + #[graphql(name = "truth")] truth, + #[graphql(name = "relevance")] relevance, + #[graphql(name = "impact")] impact, } } @@ -36,9 +36,13 @@ pub fn rating_value_is_in_range(value: f64, range: &ValueRange) -> bool { let mut min_adjusted = range.min; let mut max_adjusted = range.max; // we use different logic on left and right sides; when value is exactly between two ranges, categorize it as being in the range farther from 50 (the mid-point) - if left_side && min_adjusted != 0f64 { min_adjusted += 0.001; } - if right_side && max_adjusted != 100f64 { max_adjusted -= 0.001; } - + if left_side && min_adjusted != 0f64 { + min_adjusted += 0.001; + } + if right_side && max_adjusted != 100f64 { + max_adjusted -= 0.001; + } + return value >= min_adjusted && value <= max_adjusted; } @@ -53,33 +57,33 @@ pub struct RatingType_Info { } // sync:js pub static BASE_RATING_TYPE_INFO: Lazy> = Lazy::new(|| { - HashMap::from([ - (NodeRatingType::significance, RatingType_Info { - displayText: "Significance".to_owned(), - valueRanges: generate_val_ranges_from_labels(&["Pointless", "Unimportant", "Somewhat Important", "Important", "Extremely Important"]), - }), - (NodeRatingType::neutrality, RatingType_Info { - displayText: "Neutrality".to_owned(), - valueRanges: generate_val_ranges_from_labels(&["Unbiased", "Slightly Biased", "Moderately Biased", "Highly Biased", "Extremely Biased"]), - }), - (NodeRatingType::truth, RatingType_Info { - displayText: "Agreement".to_owned(), - //valueLabels: {0: "Thoroughly false", 25: "Mostly false", 50: "Somewhat true", 75: "Mostly true", 100: "Thoroughly true"}, - //valueLabels: {0: "Strongly disagree", 20: "Disagree", 35: "Somewhat disagree", 50: "Neutral", 65: "Somewhat agree", 80: "Agree", 100: "Strongly agree"}, - valueRanges: generate_val_ranges_from_labels(&["Strongly Disagree", "Disagree", "Somewhat Disagree", "Neutral", "Somewhat Agree", "Agree", "Strongly Agree"]), - //valueLabels: {0: "Disagree (strongly)", 20: "Disagree", 35: "Disagree (somewhat)", 50: "Neutral", 65: "Agree (somewhat)", 80: "Agree", 100: "Agree (strongly)"}, - }), - (NodeRatingType::relevance, RatingType_Info { - displayText: "Relevance".to_owned(), - //valueRanges: generate_val_ranges_from_labels(["Completely Irrelevant", "Slightly Relevant", "Moderately Relevant", "Highly Relevant", "Extremely Relevant"]), - valueRanges: generate_val_ranges_from_labels(&["Not Relevant", "Slightly Relevant", "Somewhat Relevant", "Relevant", "Substantially Relevant", "Highly Relevant", "Extremely Relevant"]), - }), - (NodeRatingType::impact, RatingType_Info { - displayText: "Impact".to_owned(), - //valueRanges: generate_val_ranges_from_labels(["Thoroughly False", "Mostly False", "Somewhat True", "Mostly True", "Game-Changer"]), - valueRanges: generate_val_ranges_from_labels(&["[unnamed range]"]), // must have one range entry, so UpdateNodeRatingSummaries() can store the impact-rating count, with consistent code - }), - ]) + HashMap::from([ + (NodeRatingType::significance, RatingType_Info { + displayText: "Significance".to_owned(), + valueRanges: generate_val_ranges_from_labels(&["Pointless", "Unimportant", "Somewhat Important", "Important", "Extremely Important"]), + }), + (NodeRatingType::neutrality, RatingType_Info { + displayText: "Neutrality".to_owned(), + valueRanges: generate_val_ranges_from_labels(&["Unbiased", "Slightly Biased", "Moderately Biased", "Highly Biased", "Extremely Biased"]), + }), + (NodeRatingType::truth, RatingType_Info { + displayText: "Agreement".to_owned(), + //valueLabels: {0: "Thoroughly false", 25: "Mostly false", 50: "Somewhat true", 75: "Mostly true", 100: "Thoroughly true"}, + //valueLabels: {0: "Strongly disagree", 20: "Disagree", 35: "Somewhat disagree", 50: "Neutral", 65: "Somewhat agree", 80: "Agree", 100: "Strongly agree"}, + valueRanges: generate_val_ranges_from_labels(&["Strongly Disagree", "Disagree", "Somewhat Disagree", "Neutral", "Somewhat Agree", "Agree", "Strongly Agree"]), + //valueLabels: {0: "Disagree (strongly)", 20: "Disagree", 35: "Disagree (somewhat)", 50: "Neutral", 65: "Agree (somewhat)", 80: "Agree", 100: "Agree (strongly)"}, + }), + (NodeRatingType::relevance, RatingType_Info { + displayText: "Relevance".to_owned(), + //valueRanges: generate_val_ranges_from_labels(["Completely Irrelevant", "Slightly Relevant", "Moderately Relevant", "Highly Relevant", "Extremely Relevant"]), + valueRanges: generate_val_ranges_from_labels(&["Not Relevant", "Slightly Relevant", "Somewhat Relevant", "Relevant", "Substantially Relevant", "Highly Relevant", "Extremely Relevant"]), + }), + (NodeRatingType::impact, RatingType_Info { + displayText: "Impact".to_owned(), + //valueRanges: generate_val_ranges_from_labels(["Thoroughly False", "Mostly False", "Somewhat True", "Mostly True", "Game-Changer"]), + valueRanges: generate_val_ranges_from_labels(&["[unnamed range]"]), // must have one range entry, so UpdateNodeRatingSummaries() can store the impact-rating count, with consistent code + }), + ]) }); pub fn get_rating_type_info(rating_type: NodeRatingType) -> &'static RatingType_Info { @@ -90,41 +94,61 @@ pub fn get_rating_type_info(rating_type: NodeRatingType) -> &'static RatingType_ // sync:js pub fn generate_val_ranges_from_labels(labels: &[&str]) -> Vec { let ranges: Vec<(f64, f64)> = match labels.len() { - 1 => vec![ - (0, 100) // center: 50 - ].into_iter().map(|(a, b)| (a as f64, b as f64)).collect_vec(), - // range covered by each entry: 20 [100/5 = 20] - 5 => vec![ - (0, 20), // center: 10 - (20, 40), // center: 30 - (40, 60), // center: 50 - (60, 80), // center: 70 - (80, 100) // center: 90 - ].into_iter().map(|(a, b)| (a as f64, b as f64)).collect_vec(), - // range covered by each entry: 14 (other than first and last, which each cover 15) [100/5 = 14.2857142857] - 7 => vec![ - (0, 15), // center: 8 (rounded up, since 50 is anchor) - (15, 30), // center: 22 - (30, 45), // center: 36 - (45, 55), // center: 50 - (55, 70), // center: 64 - (70, 85), // center: 78 - (85, 100) // center: 92 (rounded down, since 50 is anchor) - ].into_iter().map(|(a, b)| (a as f64, b as f64)).collect_vec(), - _ => panic!("Label-count ({}) doesn't match any of the implemented values (1,5,7).", labels.len()), - }; - ranges.into_iter().enumerate().map(|(index, range)| { - let label = labels[index]; - //const rangeDist = range[1] - range[0]; - let center_fractional = average(&[range.0, range.1]); - return ValueRange { - min: range.0, - max: range.1, - center: - if center_fractional.fract() == 0.0 { center_fractional } // if average is int, use that - else if range.0 < 50f64 { center_fractional.floor() + 1f64 } // else, if below 50 (anchor), round up toward it - else { center_fractional.floor() }, // else, must be above 50 (anchor), so round down toward it - label: label.to_owned(), - }; - }).collect_vec() -} \ No newline at end of file + 1 => vec![ + (0, 100), // center: 50 + ] + .into_iter() + .map(|(a, b)| (a as f64, b as f64)) + .collect_vec(), + // range covered by each entry: 20 [100/5 = 20] + 5 => vec![ + (0, 20), // center: 10 + (20, 40), // center: 30 + (40, 60), // center: 50 + (60, 80), // center: 70 + (80, 100), // center: 90 + ] + .into_iter() + .map(|(a, b)| (a as f64, b as f64)) + .collect_vec(), + // range covered by each entry: 14 (other than first and last, which each cover 15) [100/5 = 14.2857142857] + 7 => vec![ + (0, 15), // center: 8 (rounded up, since 50 is anchor) + (15, 30), // center: 22 + (30, 45), // center: 36 + (45, 55), // center: 50 + (55, 70), // center: 64 + (70, 85), // center: 78 + (85, 100), // center: 92 (rounded down, since 50 is anchor) + ] + .into_iter() + .map(|(a, b)| (a as f64, b as f64)) + .collect_vec(), + _ => panic!("Label-count ({}) doesn't match any of the implemented values (1,5,7).", labels.len()), + }; + ranges + .into_iter() + .enumerate() + .map(|(index, range)| { + let label = labels[index]; + //const rangeDist = range[1] - range[0]; + let center_fractional = average(&[range.0, range.1]); + return ValueRange { + min: range.0, + max: range.1, + center: if center_fractional.fract() == 0.0 { + center_fractional + } + // if average is int, use that + else if range.0 < 50f64 { + center_fractional.floor() + 1f64 + } + // else, if below 50 (anchor), round up toward it + else { + center_fractional.floor() + }, // else, must be above 50 (anchor), so round down toward it + label: label.to_owned(), + }; + }) + .collect_vec() +} diff --git a/Packages/app-server/src/db/node_revisions.rs b/Packages/app-server/src/db/node_revisions.rs index fb4c3e860..be82e7ec4 100644 --- a/Packages/app-server/src/db/node_revisions.rs +++ b/Packages/app-server/src/db/node_revisions.rs @@ -1,21 +1,25 @@ use deadpool_postgres::tokio_postgres::Row; +use futures_util::{stream, Stream, TryFutureExt}; use rust_shared::anyhow::Error; -use rust_shared::{SubError, GQLError}; use rust_shared::async_graphql; use rust_shared::async_graphql::ComplexObject; use rust_shared::async_graphql::Enum; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json; use rust_shared::serde_json::json; -use rust_shared::tokio_postgres::{Client}; +use rust_shared::tokio_postgres::Client; use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{GQLError, SubError}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput, accessors::{AccessorContext, get_db_entry}}}; +use crate::utils::db::{ + accessors::{get_db_entry, AccessorContext}, + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use super::_shared::access_policy_target::AccessPolicyTarget; use super::_shared::attachments::Attachment; @@ -23,71 +27,72 @@ use super::node_phrasings::NodePhrasing; use super::node_phrasings::NodePhrasing_Embedded; use super::nodes::get_node; +#[rustfmt::skip] pub async fn get_node_revision(ctx: &AccessorContext<'_>, id: &str) -> Result { get_db_entry(ctx, "nodeRevisions", &Some(json!({ "id": {"equalTo": id} }))).await } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(SimpleObject, InputObject, Clone, Serialize, Deserialize)] #[graphql(complex)] pub struct NodeRevision { - pub id: ID, - pub creator: String, - pub createdAt: i64, - pub node: String, - /// Warning: In rare cases, this can reference a node-revision that no longer exists. (eg. if admin force-deleted a node-revision) - pub replacedBy: Option, - pub phrasing: NodePhrasing_Embedded, - /*#[graphql(name = "phrasing_tsvector")] - #[serde(skip_serializing)] // makes-so when serializing the struct for saving to the db, this field is excluded (as it must be, since it's auto-generated) - pub phrasing_tsvector: String,*/ - pub displayDetails: Option, - pub attachments: Vec, - - #[graphql(name = "c_accessPolicyTargets")] - pub c_accessPolicyTargets: Vec, + pub id: ID, + pub creator: String, + pub createdAt: i64, + pub node: String, + /// Warning: In rare cases, this can reference a node-revision that no longer exists. (eg. if admin force-deleted a node-revision) + pub replacedBy: Option, + pub phrasing: NodePhrasing_Embedded, + /*#[graphql(name = "phrasing_tsvector")] + #[serde(skip_serializing)] // makes-so when serializing the struct for saving to the db, this field is excluded (as it must be, since it's auto-generated) + pub phrasing_tsvector: String,*/ + pub displayDetails: Option, + pub attachments: Vec, + + #[graphql(name = "c_accessPolicyTargets")] + pub c_accessPolicyTargets: Vec, } #[ComplexObject] impl NodeRevision { - /*#[graphql(visible = false)] - // todo: make-so the field has this as its actual type (delayed since it means a change in the graphql api) + /*#[graphql(visible = false)] + // todo: make-so the field has this as its actual type (delayed since it means a change in the graphql api) pub fn display_details_known(&self) -> Result, Error> { - Ok(match &self.displayDetails { - None => None, - Some(raw_data) => Some(serde_json::from_value(raw_data.clone())?), - }) + Ok(match &self.displayDetails { + None => None, + Some(raw_data) => Some(serde_json::from_value(raw_data.clone())?), + }) }*/ - #[graphql(visible = false)] - // field kept around only for backwards compatibility (refs: papers-app) - async fn note(&self) -> Option { - self.phrasing.note.clone() - } + #[graphql(visible = false)] + // field kept around only for backwards compatibility (refs: papers-app) + async fn note(&self) -> Option { + self.phrasing.note.clone() + } } impl NodeRevision { - pub fn into_input(self, keep_node: bool) -> NodeRevisionInput { + pub fn into_input(self, keep_node: bool) -> NodeRevisionInput { NodeRevisionInput { - node: if keep_node { Some(self.node) } else { None }, - phrasing: self.phrasing, - displayDetails: self.displayDetails, - attachments: self.attachments, + node: if keep_node { Some(self.node) } else { None }, + phrasing: self.phrasing, + displayDetails: self.displayDetails, + attachments: self.attachments, } } } impl From for NodeRevision { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(InputObject, Clone, Serialize, Deserialize, Default)] pub struct NodeRevisionInput { - /// Marked as optional, since in some contexts it's not needed. (eg. for add_node, add_child_node, etc.) - pub node: Option, - pub phrasing: NodePhrasing_Embedded, - pub displayDetails: Option, - pub attachments: Vec, + /// Marked as optional, since in some contexts it's not needed. (eg. for add_node, add_child_node, etc.) + pub node: Option, + pub phrasing: NodePhrasing_Embedded, + pub displayDetails: Option, + pub attachments: Vec, } #[derive(SimpleObject, InputObject, Clone, Serialize, Deserialize)] @@ -99,25 +104,25 @@ pub struct NodeRevisionDisplayDetails { } #[derive(Enum, Copy, Clone, Eq, PartialEq, Serialize, Deserialize, Hash, Debug)] pub enum ChildLayout { - #[graphql(name = "grouped")] grouped, - #[graphql(name = "dmStandard")] dmStandard, - #[graphql(name = "slStandard")] slStandard, - #[graphql(name = "flat")] flat, + #[graphql(name = "grouped")] grouped, + #[graphql(name = "dmStandard")] dmStandard, + #[graphql(name = "slStandard")] slStandard, + #[graphql(name = "flat")] flat, } #[derive(Enum, Copy, Clone, Eq, PartialEq, Serialize, Deserialize, Hash, Debug)] pub enum ChildOrdering { - //#[graphql(name = "unchanged")] unchanged, - #[graphql(name = "manual")] manual, - #[graphql(name = "date")] date, - #[graphql(name = "votes")] votes, - #[graphql(name = "reasonScore")] reasonScore, + //#[graphql(name = "unchanged")] unchanged, + #[graphql(name = "manual")] manual, + #[graphql(name = "date")] date, + #[graphql(name = "votes")] votes, + #[graphql(name = "reasonScore")] reasonScore, } #[derive(Clone)] pub struct GQLSet_NodeRevision { pub nodes: Vec } #[Object] impl GQLSet_NodeRevision { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_NodeRevision { - fn from(entries: Vec) -> GQLSet_NodeRevision { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_NodeRevision { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_NodeRevision; @@ -132,12 +137,12 @@ impl GQLSet for GQLSet_NodeRevision { #[derive(Default)] pub struct SubscriptionShard_NodeRevision; #[Subscription] impl SubscriptionShard_NodeRevision { - async fn nodeRevisions<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "nodeRevisions", filter).await - } - async fn nodeRevision<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "nodeRevisions", id).await - } + async fn nodeRevisions<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "nodeRevisions", filter).await + } + async fn nodeRevision<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "nodeRevisions", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/node_tags.rs b/Packages/app-server/src/db/node_tags.rs index e34997569..f9b4c126b 100644 --- a/Packages/app-server/src/db/node_tags.rs +++ b/Packages/app-server/src/db/node_tags.rs @@ -1,113 +1,119 @@ -use rust_shared::indexmap::IndexSet; +use futures_util::{stream, Stream, TryFutureExt}; use rust_shared::anyhow::Error; -use rust_shared::itertools::Itertools; -use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::{SubError, serde_json, GQLError}; use rust_shared::async_graphql; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; +use rust_shared::indexmap::IndexSet; +use rust_shared::itertools::Itertools; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::serde_json::json; -use rust_shared::tokio_postgres::{Row, Client}; use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{serde_json, GQLError, SubError}; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput, pg_row_to_json::{postgres_row_to_json_value, postgres_row_to_struct}}}; -use crate::utils::db::accessors::{get_db_entry, AccessorContext, get_db_entries}; +use crate::utils::db::accessors::{get_db_entries, get_db_entry, AccessorContext}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; +use crate::utils::db::{ + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, + pg_row_to_json::{postgres_row_to_json_value, postgres_row_to_struct}, +}; use super::_shared::access_policy_target::AccessPolicyTarget; -use super::commands::_command::{CanOmit, CanNullOrOmit}; +use super::commands::_command::{CanNullOrOmit, CanOmit}; use super::nodes::get_node; +#[rustfmt::skip] pub async fn get_node_tag(ctx: &AccessorContext<'_>, id: &str) -> Result { get_db_entry(ctx, "nodeTags", &Some(json!({ "id": {"equalTo": id} }))).await } +#[rustfmt::skip] pub async fn get_node_tags(ctx: &AccessorContext<'_>, node_id: &str) -> Result, Error> { get_db_entries(ctx, "nodeTags", &Some(json!({ "nodes": {"contains": [node_id]} }))).await } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct NodeTag { - pub id: ID, - pub creator: String, - pub createdAt: i64, - pub nodes: Vec, - pub labels: Option, - pub mirrorChildrenFromXToY: Option, - pub xIsExtendedByY: Option, - pub mutuallyExclusiveGroup: Option, - pub restrictMirroringOfX: Option, - pub cloneHistory: Option, - - #[graphql(name = "c_accessPolicyTargets")] - pub c_accessPolicyTargets: Vec, + pub id: ID, + pub creator: String, + pub createdAt: i64, + pub nodes: Vec, + pub labels: Option, + pub mirrorChildrenFromXToY: Option, + pub xIsExtendedByY: Option, + pub mutuallyExclusiveGroup: Option, + pub restrictMirroringOfX: Option, + pub cloneHistory: Option, + + #[graphql(name = "c_accessPolicyTargets")] + pub c_accessPolicyTargets: Vec, } impl From for NodeTag { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } impl NodeTag { - pub fn get_tag_comps(&self) -> Vec> { - // there's probably a cleaner way to do this - let mut result: Vec> = vec![]; - result.extend(self.labels.iter().map(|x| Box::new(x.clone()) as Box)); - result.extend(self.mirrorChildrenFromXToY.iter().map(|x| Box::new(x.clone()) as Box)); - result.extend(self.xIsExtendedByY.iter().map(|x| Box::new(x.clone()) as Box)); - result.extend(self.mutuallyExclusiveGroup.iter().map(|x| Box::new(x.clone()) as Box)); - result.extend(self.restrictMirroringOfX.iter().map(|x| Box::new(x.clone()) as Box)); - result.extend(self.cloneHistory.iter().map(|x| Box::new(x.clone()) as Box)); - result - } - pub fn calculate_new_nodes_list(&self) -> Vec { - let mut result = IndexSet::::new(); - for comp in self.get_tag_comps() { - result.extend(comp.get_node_ids()); - } - result.into_iter().collect::>() - } - pub fn to_input(&self) -> NodeTagInput { - NodeTagInput { - nodes: self.nodes.clone(), - labels: self.labels.clone(), - mirrorChildrenFromXToY: self.mirrorChildrenFromXToY.clone(), - xIsExtendedByY: self.xIsExtendedByY.clone(), - mutuallyExclusiveGroup: self.mutuallyExclusiveGroup.clone(), - restrictMirroringOfX: self.restrictMirroringOfX.clone(), - cloneHistory: self.cloneHistory.clone(), - } - } + pub fn get_tag_comps(&self) -> Vec> { + // there's probably a cleaner way to do this + let mut result: Vec> = vec![]; + result.extend(self.labels.iter().map(|x| Box::new(x.clone()) as Box)); + result.extend(self.mirrorChildrenFromXToY.iter().map(|x| Box::new(x.clone()) as Box)); + result.extend(self.xIsExtendedByY.iter().map(|x| Box::new(x.clone()) as Box)); + result.extend(self.mutuallyExclusiveGroup.iter().map(|x| Box::new(x.clone()) as Box)); + result.extend(self.restrictMirroringOfX.iter().map(|x| Box::new(x.clone()) as Box)); + result.extend(self.cloneHistory.iter().map(|x| Box::new(x.clone()) as Box)); + result + } + pub fn calculate_new_nodes_list(&self) -> Vec { + let mut result = IndexSet::::new(); + for comp in self.get_tag_comps() { + result.extend(comp.get_node_ids()); + } + result.into_iter().collect::>() + } + pub fn to_input(&self) -> NodeTagInput { + NodeTagInput { + nodes: self.nodes.clone(), + labels: self.labels.clone(), + mirrorChildrenFromXToY: self.mirrorChildrenFromXToY.clone(), + xIsExtendedByY: self.xIsExtendedByY.clone(), + mutuallyExclusiveGroup: self.mutuallyExclusiveGroup.clone(), + restrictMirroringOfX: self.restrictMirroringOfX.clone(), + cloneHistory: self.cloneHistory.clone(), + } + } } #[derive(InputObject, Clone, Serialize, Deserialize, Default)] pub struct NodeTagInput { - pub nodes: Vec, - pub labels: Option, - pub mirrorChildrenFromXToY: Option, - pub xIsExtendedByY: Option, - pub mutuallyExclusiveGroup: Option, - pub restrictMirroringOfX: Option, - pub cloneHistory: Option, + pub nodes: Vec, + pub labels: Option, + pub mirrorChildrenFromXToY: Option, + pub xIsExtendedByY: Option, + pub mutuallyExclusiveGroup: Option, + pub restrictMirroringOfX: Option, + pub cloneHistory: Option, } #[derive(InputObject, Serialize, Deserialize)] pub struct NodeTagUpdates { - pub nodes: CanOmit>, - pub labels: CanNullOrOmit, - pub mirrorChildrenFromXToY: CanNullOrOmit, - pub xIsExtendedByY: CanNullOrOmit, - pub mutuallyExclusiveGroup: CanNullOrOmit, - pub restrictMirroringOfX: CanNullOrOmit, - pub cloneHistory: CanNullOrOmit, + pub nodes: CanOmit>, + pub labels: CanNullOrOmit, + pub mirrorChildrenFromXToY: CanNullOrOmit, + pub xIsExtendedByY: CanNullOrOmit, + pub mutuallyExclusiveGroup: CanNullOrOmit, + pub restrictMirroringOfX: CanNullOrOmit, + pub cloneHistory: CanNullOrOmit, } pub trait TagComp { - fn get_node_ids(&self) -> Vec; + fn get_node_ids(&self) -> Vec; } #[derive(SimpleObject, InputObject, Clone, Serialize, Deserialize)] @@ -118,13 +124,13 @@ pub struct TagComp_Labels { pub labels: Vec, } impl TagComp for TagComp_Labels { - fn get_node_ids(&self) -> Vec { vec![self.nodeX.clone()] } + fn get_node_ids(&self) -> Vec { vec![self.nodeX.clone()] } } #[derive(SimpleObject, InputObject, Clone, Serialize, Deserialize)] #[graphql(input_name = "TagCompMirrorChildrenFromXToYInput")] pub struct TagComp_MirrorChildrenFromXToY { - pub nodeX: String, + pub nodeX: String, pub nodeY: String, pub mirrorSupporting: bool, pub mirrorOpposing: bool, @@ -132,54 +138,54 @@ pub struct TagComp_MirrorChildrenFromXToY { pub disableDirectChildren: bool, } impl TagComp for TagComp_MirrorChildrenFromXToY { - fn get_node_ids(&self) -> Vec { vec![self.nodeX.clone(), self.nodeY.clone()] } + fn get_node_ids(&self) -> Vec { vec![self.nodeX.clone(), self.nodeY.clone()] } } #[derive(SimpleObject, InputObject, Clone, Serialize, Deserialize)] #[graphql(input_name = "TagCompXIsExtendedByYInput")] pub struct TagComp_XIsExtendedByY { - pub nodeX: String, + pub nodeX: String, pub nodeY: String, } impl TagComp for TagComp_XIsExtendedByY { - fn get_node_ids(&self) -> Vec { vec![self.nodeX.clone(), self.nodeY.clone()] } + fn get_node_ids(&self) -> Vec { vec![self.nodeX.clone(), self.nodeY.clone()] } } #[derive(SimpleObject, InputObject, Clone, Serialize, Deserialize)] #[graphql(input_name = "TagCompMutuallyExclusiveGroupInput")] pub struct TagComp_MutuallyExclusiveGroup { - pub nodes: Vec, + pub nodes: Vec, pub mirrorXProsAsYCons: bool, } impl TagComp for TagComp_MutuallyExclusiveGroup { - fn get_node_ids(&self) -> Vec { self.nodes.clone() } + fn get_node_ids(&self) -> Vec { self.nodes.clone() } } #[derive(SimpleObject, InputObject, Clone, Serialize, Deserialize)] #[graphql(input_name = "TagCompRestrictMirroringOfXInput")] pub struct TagComp_RestrictMirroringOfX { - nodeX: String, + nodeX: String, blacklistAllMirrorParents: bool, blacklistedMirrorParents: Vec, } impl TagComp for TagComp_RestrictMirroringOfX { - fn get_node_ids(&self) -> Vec { vec![vec![self.nodeX.clone()], self.blacklistedMirrorParents.clone()].concat() } + fn get_node_ids(&self) -> Vec { vec![vec![self.nodeX.clone()], self.blacklistedMirrorParents.clone()].concat() } } #[derive(SimpleObject, InputObject, Clone, Serialize, Deserialize)] #[graphql(input_name = "TagCompCloneHistoryInput")] pub struct TagComp_CloneHistory { - pub cloneChain: Vec, + pub cloneChain: Vec, } impl TagComp for TagComp_CloneHistory { - fn get_node_ids(&self) -> Vec { self.cloneChain.clone() } + fn get_node_ids(&self) -> Vec { self.cloneChain.clone() } } #[derive(Clone)] pub struct GQLSet_NodeTag { pub nodes: Vec } #[Object] impl GQLSet_NodeTag { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_NodeTag { - fn from(entries: Vec) -> GQLSet_NodeTag { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_NodeTag { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_NodeTag; @@ -194,12 +200,12 @@ impl GQLSet for GQLSet_NodeTag { #[derive(Default)] pub struct SubscriptionShard_NodeTag; #[Subscription] impl SubscriptionShard_NodeTag { - async fn nodeTags<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "nodeTags", filter).await - } - async fn nodeTag<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "nodeTags", id).await - } + async fn nodeTags<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "nodeTags", filter).await + } + async fn nodeTag<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "nodeTags", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/nodes.rs b/Packages/app-server/src/db/nodes.rs index 2aee945c9..c1bd2e1e6 100644 --- a/Packages/app-server/src/db/nodes.rs +++ b/Packages/app-server/src/db/nodes.rs @@ -1,28 +1,32 @@ use futures_util::future::join_all; -use rust_shared::indexmap::IndexMap; -use rust_shared::anyhow::{Error, anyhow, bail}; -use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::{SubError, serde_json, GQLError, to_gql_err}; +use futures_util::{stream, Stream, TryFutureExt}; +use rust_shared::anyhow::{anyhow, bail, Error}; use rust_shared::async_graphql::{self, Enum}; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; +use rust_shared::indexmap::IndexMap; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::serde_json::json; -use rust_shared::tokio_postgres::{Row, Client}; use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{serde_json, to_gql_err, GQLError, SubError}; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput, accessors::{AccessorContext, get_db_entry}}}; +use crate::utils::db::{ + accessors::{get_db_entry, AccessorContext}, + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; -use super::general::permission_helpers::{is_user_creator_or_mod, assert_user_can_delete}; +use super::general::permission_helpers::{assert_user_can_delete, is_user_creator_or_mod}; use super::node_links::get_node_links; -use super::nodes_::_node::{Node}; +use super::nodes_::_node::Node; use super::nodes_::_node_type::NodeType; use super::users::User; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Clone)] pub struct GQLSet_Node { pub nodes: Vec } #[Object] impl GQLSet_Node { async fn nodes(&self) -> &Vec { &self.nodes } } @@ -53,6 +57,7 @@ impl GQLSet for GQLSet_Node { } +#[rustfmt::skip] pub async fn get_node(ctx: &AccessorContext<'_>, id: &str) -> Result { get_db_entry(ctx, "nodes", &Some(json!({ "id": {"equalTo": id} @@ -83,9 +88,13 @@ pub async fn get_node_parents(ctx: &AccessorContext<'_>, node_id: &str) -> Resul // sync:js pub async fn is_root_node(ctx: &AccessorContext<'_>, node: &Node) -> Result { - if node.r#type != NodeType::category { return Ok(false); } + if node.r#type != NodeType::category { + return Ok(false); + } let parents = get_node_links(ctx, None, Some(node.id.as_str())).await?; - if parents.len() != 0 { return Ok(false); } // todo: probably change this (map root-nodes can have "parents" now I think, due to restructuring) + if parents.len() != 0 { + return Ok(false); + } // todo: probably change this (map root-nodes can have "parents" now I think, due to restructuring) Ok(true) } @@ -93,7 +102,7 @@ pub async fn is_root_node(ctx: &AccessorContext<'_>, node: &Node) -> Result, actor: &User, node: &Node, as_part_of_map_delete: bool, parents_to_ignore: Vec, children_to_ignore: Vec) -> Result<(), Error> { // first check generic delete permissions assert_user_can_delete(&ctx, &actor, node).await?; - + let base_text = format!("Cannot delete node #{}, since ", node.id.as_str()); // todo: I think this should be removed now, since permissions are handled by generic access-policy check above if !is_user_creator_or_mod(actor, &node.creator) { @@ -114,4 +123,4 @@ pub async fn assert_user_can_delete_node(ctx: &AccessorContext<'_>, actor: &User bail!("Cannot delete this node (#{}) until all its children have been unlinked or deleted.", node.id.as_str()); } return Ok(()); -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/nodes_/_node.rs b/Packages/app-server/src/db/nodes_/_node.rs index 39324fe86..5a0c121c2 100644 --- a/Packages/app-server/src/db/nodes_/_node.rs +++ b/Packages/app-server/src/db/nodes_/_node.rs @@ -1,18 +1,21 @@ use deadpool_postgres::tokio_postgres::Row; +use rust_shared::async_graphql::{self, Enum, InputObject, SimpleObject, ID}; use rust_shared::indexmap::IndexMap; use rust_shared::serde_json; use rust_shared::utils::type_aliases::JSONValue; use rust_shared::{anyhow::Error, rust_macros::wrap_slow_macros}; -use rust_shared::async_graphql::{self, ID, Enum, SimpleObject, InputObject}; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use crate::db::commands::_command::{CanNullOrOmit, CanOmit}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; -use crate::{db::node_links::{get_node_links, ClaimForm, get_first_link_under_parent}, utils::db::accessors::AccessorContext}; +use crate::{ + db::node_links::{get_first_link_under_parent, get_node_links, ClaimForm}, + utils::db::accessors::AccessorContext, +}; use super::_node_type::NodeType; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Enum, Copy, Clone, Eq, PartialEq, Serialize, Deserialize)] pub enum ArgumentType { @@ -24,7 +27,7 @@ pub enum ArgumentType { #[derive(Clone, Serialize, Deserialize)] pub struct Node_Extras { pub ratingSummaries: Option>, - + // namespaces/patterns used/expected atm: "claimgen:" // commented; new approach is to use extras.TOOL_NAMESPACE.id //pub externalId: Option, @@ -59,7 +62,7 @@ impl Node { pub fn extras_known(&self) -> Result { Ok(serde_json::from_value(self.extras.clone())?) } - pub fn into_input(self, try_keep_extras: bool) -> NodeInput { + pub fn into_input(self, try_keep_extras: bool) -> NodeInput { let extras = match try_keep_extras { false => None, true => match self.extras { @@ -108,4 +111,4 @@ pub struct NodeUpdates { pub async fn get_node_form(ctx: &AccessorContext<'_>, node_id: &str, parent_id: &str) -> Result { let link = get_first_link_under_parent(ctx, &node_id, &parent_id).await?; Ok(link.form.unwrap_or(ClaimForm::base)) -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/nodes_/_node_type.rs b/Packages/app-server/src/db/nodes_/_node_type.rs index f9912270f..d78d12038 100644 --- a/Packages/app-server/src/db/nodes_/_node_type.rs +++ b/Packages/app-server/src/db/nodes_/_node_type.rs @@ -1,24 +1,24 @@ use std::collections::HashMap; -use rust_shared::once_cell::sync::Lazy; +use rust_shared::async_graphql; +use rust_shared::async_graphql::Enum; use rust_shared::itertools::Itertools; +use rust_shared::once_cell::sync::Lazy; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::async_graphql; -use rust_shared::async_graphql::{Enum}; use rust_shared::utils::general::average; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use crate::db::node_links::ChildGroup; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Enum, Copy, Clone, Eq, PartialEq, Serialize, Deserialize, Hash, Debug)] pub enum NodeType { - #[graphql(name = "category")] category, - #[graphql(name = "package")] package, - #[graphql(name = "multiChoiceQuestion")] multiChoiceQuestion, - #[graphql(name = "claim")] claim, - #[graphql(name = "argument")] argument, + #[graphql(name = "category")] category, + #[graphql(name = "package")] package, + #[graphql(name = "multiChoiceQuestion")] multiChoiceQuestion, + #[graphql(name = "claim")] claim, + #[graphql(name = "argument")] argument, } } @@ -30,6 +30,7 @@ pub struct NodeType_Info { pub childGroup_childTypes: HashMap>, } // sync:js +#[rustfmt::skip] pub static BASE_NODE_TYPE_INFO: Lazy> = Lazy::new(|| { HashMap::from([ (NodeType::category, NodeType_Info { @@ -69,4 +70,4 @@ pub static BASE_NODE_TYPE_INFO: Lazy> = Lazy::n pub fn get_node_type_info(node_type: NodeType) -> &'static NodeType_Info { //return BASE_NODE_TYPE_INFO[&node_type].clone(); return &BASE_NODE_TYPE_INFO[&node_type]; -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/shares.rs b/Packages/app-server/src/db/shares.rs index 58f688744..d3d77260b 100644 --- a/Packages/app-server/src/db/shares.rs +++ b/Packages/app-server/src/db/shares.rs @@ -1,69 +1,73 @@ -use rust_shared::serde_json::json; -use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::{SubError, serde_json, async_graphql, GQLError}; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject, Enum}; -use futures_util::{Stream, stream, TryFutureExt}; +use futures_util::{stream, Stream, TryFutureExt}; +use rust_shared::anyhow::Error; +use rust_shared::async_graphql::{Context, Enum, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::tokio_postgres::{Row, Client}; use rust_shared::serde; -use rust_shared::anyhow::Error; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{async_graphql, serde_json, GQLError, SubError}; -use crate::utils::db::accessors::{AccessorContext, get_db_entry}; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; +use crate::utils::db::accessors::{get_db_entry, AccessorContext}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput}}; +use crate::utils::db::{ + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use super::commands::_command::{CanNullOrOmit, CanOmit}; +#[rustfmt::skip] pub async fn get_share(ctx: &AccessorContext<'_>, id: &str) -> Result { get_db_entry(ctx, "shares", &Some(json!({ "id": {"equalTo": id} }))).await } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Enum, Copy, Clone, Eq, PartialEq, Serialize, Deserialize)] pub enum ShareType { - #[graphql(name = "map")] map, + #[graphql(name = "map")] map, } #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct Share { - pub id: ID, + pub id: ID, pub creator: String, pub createdAt: i64, - pub name: String, - pub r#type: ShareType, + pub name: String, + pub r#type: ShareType, pub mapID: Option, pub mapView: JSONValue, } impl From for Share { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(InputObject, Clone, Serialize, Deserialize)] pub struct ShareInput { - pub name: String, + pub name: String, pub r#type: ShareType, - pub mapID: Option, - pub mapView: JSONValue, + pub mapID: Option, + pub mapView: JSONValue, } #[derive(InputObject, Serialize, Deserialize)] pub struct ShareUpdates { - pub name: CanOmit, + pub name: CanOmit, //pub r#type: FieldUpdate, - pub mapID: CanNullOrOmit, - pub mapView: CanOmit, + pub mapID: CanNullOrOmit, + pub mapView: CanOmit, } #[derive(Clone)] pub struct GQLSet_Share { pub nodes: Vec } #[Object] impl GQLSet_Share { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_Share { - fn from(entries: Vec) -> GQLSet_Share { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_Share { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_Share; @@ -78,12 +82,12 @@ impl GQLSet for GQLSet_Share { #[derive(Default)] pub struct SubscriptionShard_Share; #[Subscription] impl SubscriptionShard_Share { - async fn shares<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "shares", filter).await - } - async fn share<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "shares", id).await - } + async fn shares<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "shares", filter).await + } + async fn share<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "shares", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/terms.rs b/Packages/app-server/src/db/terms.rs index 3fba55a86..bfb930f70 100644 --- a/Packages/app-server/src/db/terms.rs +++ b/Packages/app-server/src/db/terms.rs @@ -1,22 +1,27 @@ +use futures_util::{stream, Stream, TryFutureExt}; use rust_shared::anyhow::Error; -use rust_shared::{SubError, serde_json, futures, GQLError}; -use rust_shared::async_graphql::{self, MaybeUndefined, Enum}; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::async_graphql::{self, Enum, MaybeUndefined}; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::serde_json::json; -use rust_shared::tokio_postgres::{Row, Client}; use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::{futures, serde_json, GQLError, SubError}; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput, accessors::{AccessorContext, get_db_entry}}}; +use crate::utils::db::{ + accessors::{get_db_entry, AccessorContext}, + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use super::_shared::attachments::Attachment; -use super::commands::_command::{CanOmit, CanNullOrOmit}; -use super::{node_revisions::{get_node_revision}}; +use super::commands::_command::{CanNullOrOmit, CanOmit}; +use super::node_revisions::get_node_revision; +#[rustfmt::skip] pub async fn get_term(ctx: &AccessorContext<'_>, id: &str) -> Result { get_db_entry(ctx, "terms", &Some(json!({ "id": {"equalTo": id} @@ -24,17 +29,15 @@ pub async fn get_term(ctx: &AccessorContext<'_>, id: &str) -> Result, node_rev_id: &str) -> Result, Error> { - let rev = get_node_revision(ctx, node_rev_id).await?; - /*let empty = &vec![]; - let term_values = rev.phrasing["terms"].as_array().unwrap_or(empty);*/ - let terms_futures = rev.phrasing.terms.into_iter().map(|attachment| async move { - get_term(ctx, &attachment.id).await.unwrap() - }); - let terms: Vec = futures::future::join_all(terms_futures).await; - Ok(terms) + let rev = get_node_revision(ctx, node_rev_id).await?; + /*let empty = &vec![]; + let term_values = rev.phrasing["terms"].as_array().unwrap_or(empty);*/ + let terms_futures = rev.phrasing.terms.into_iter().map(|attachment| async move { get_term(ctx, &attachment.id).await.unwrap() }); + let terms: Vec = futures::future::join_all(terms_futures).await; + Ok(terms) } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Enum, Copy, Clone, Eq, PartialEq, Serialize, Deserialize)] pub enum TermType { @@ -47,53 +50,53 @@ pub enum TermType { #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct Term { - pub id: ID, + pub id: ID, pub creator: String, pub createdAt: i64, - pub accessPolicy: String, - pub name: String, + pub accessPolicy: String, + pub name: String, pub forms: Vec, - pub disambiguation: Option, - pub r#type: TermType, - pub definition: String, - pub note: Option, - pub attachments: Vec, + pub disambiguation: Option, + pub r#type: TermType, + pub definition: String, + pub note: Option, + pub attachments: Vec, } impl From for Term { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(InputObject, Clone, Serialize, Deserialize)] pub struct TermInput { - pub accessPolicy: String, - pub name: String, + pub accessPolicy: String, + pub name: String, pub forms: Vec, - pub disambiguation: Option, - pub r#type: TermType, - pub definition: String, - pub note: Option, - pub attachments: Vec, + pub disambiguation: Option, + pub r#type: TermType, + pub definition: String, + pub note: Option, + pub attachments: Vec, } #[derive(InputObject, Serialize, Deserialize)] pub struct TermUpdates { - pub accessPolicy: CanOmit, - pub name: CanOmit, + pub accessPolicy: CanOmit, + pub name: CanOmit, pub forms: CanOmit>, - pub disambiguation: CanNullOrOmit, - pub r#type: CanOmit, - pub definition: CanOmit, - pub note: CanNullOrOmit, - pub attachments: CanOmit>, + pub disambiguation: CanNullOrOmit, + pub r#type: CanOmit, + pub definition: CanOmit, + pub note: CanNullOrOmit, + pub attachments: CanOmit>, } #[derive(Clone)] pub struct GQLSet_Term { pub nodes: Vec } #[Object] impl GQLSet_Term { async fn nodes(&self) -> &Vec { &self.nodes } } //#[async_trait] impl GQLSet for GQLSet_Term { - fn from(entries: Vec) -> GQLSet_Term { Self { nodes: entries } } - //async fn nodes(&self) -> &Vec { &self.nodes } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_Term { Self { nodes: entries } } + //async fn nodes(&self) -> &Vec { &self.nodes } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_Term; @@ -108,12 +111,12 @@ impl GQLSet for GQLSet_Term { #[derive(Default)] pub struct SubscriptionShard_Term; #[Subscription] impl SubscriptionShard_Term { - async fn terms<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "terms", filter).await - } - async fn term<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "terms", id).await - } + async fn terms<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "terms", filter).await + } + async fn term<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "terms", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/timeline_steps.rs b/Packages/app-server/src/db/timeline_steps.rs index b0dd436a3..174a32f99 100644 --- a/Packages/app-server/src/db/timeline_steps.rs +++ b/Packages/app-server/src/db/timeline_steps.rs @@ -1,58 +1,64 @@ +use futures_util::{stream, Stream, TryFutureExt}; use rust_shared::anyhow::Error; -use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::{SubError, serde_json, futures, GQLError}; -use rust_shared::async_graphql::{self, MaybeUndefined, Enum}; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::async_graphql::{self, Enum, MaybeUndefined}; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::serde_json::json; -use rust_shared::tokio_postgres::{Row, Client}; use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{futures, serde_json, GQLError, SubError}; use crate::utils::db::accessors::get_db_entries; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; +use crate::utils::db::{ + accessors::{get_db_entry, AccessorContext}, + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use crate::utils::general::order_key::OrderKey; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput, accessors::{AccessorContext, get_db_entry}}}; use super::_shared::access_policy_target::AccessPolicyTarget; use super::_shared::attachments::Attachment; -use super::commands::_command::{CanOmit, CanNullOrOmit}; -use super::{node_revisions::{get_node_revision}}; +use super::commands::_command::{CanNullOrOmit, CanOmit}; +use super::node_revisions::get_node_revision; +#[rustfmt::skip] pub async fn get_timeline_step(ctx: &AccessorContext<'_>, id: &str) -> Result { get_db_entry(ctx, "timelineSteps", &Some(json!({ "id": {"equalTo": id} }))).await } +#[rustfmt::skip] pub async fn get_timeline_steps(ctx: &AccessorContext<'_>, timeline_id: &str) -> Result, Error> { get_db_entries(ctx, "timelineSteps", &Some(json!({ "timelineID": {"equalTo": timeline_id} }))).await } -wrap_slow_macros!{ +wrap_slow_macros! { // commented; these are the only options for now, but later we want the "group" to be a freeform field, eg. for marking which person is speaking for a given step /*#[derive(Enum, Copy, Clone, Eq, PartialEq, Serialize, Deserialize, Hash, Debug)] pub enum TimelineStepGroup { - #[graphql(name = "full")] full, - #[graphql(name = "left")] left, - #[graphql(name = "right")] right, - #[graphql(name = "center")] center, + #[graphql(name = "full")] full, + #[graphql(name = "left")] left, + #[graphql(name = "right")] right, + #[graphql(name = "center")] center, }*/ #[derive(SimpleObject, InputObject, Clone, Serialize, Deserialize)] #[graphql(input_name = "NodeRevealInput")] pub struct NodeReveal { path: String, - show: Option, - #[graphql(name = "show_revealDepth")] - show_revealDepth: Option, - changeFocusLevelTo: Option, - setExpandedTo: Option, - hide: Option, + show: Option, + #[graphql(name = "show_revealDepth")] + show_revealDepth: Option, + changeFocusLevelTo: Option, + setExpandedTo: Option, + hide: Option, } #[derive(Clone, Serialize, Deserialize)] @@ -61,41 +67,41 @@ pub fn timeline_step_extras_locked_subfields() -> Vec<&'static str> { vec![] } #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct TimelineStep { - pub id: ID, + pub id: ID, pub creator: String, pub createdAt: i64, - pub timelineID: String, - pub orderKey: OrderKey, + pub timelineID: String, + pub orderKey: OrderKey, pub groupID: String, - pub timeFromStart: Option, - pub timeFromLastStep: Option, - pub timeUntilNextStep: Option, + pub timeFromStart: Option, + pub timeFromLastStep: Option, + pub timeUntilNextStep: Option, pub message: String, pub extras: JSONValue, - #[graphql(name = "c_accessPolicyTargets")] - pub c_accessPolicyTargets: Vec, + #[graphql(name = "c_accessPolicyTargets")] + pub c_accessPolicyTargets: Vec, } impl From for TimelineStep { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(InputObject, Clone, Serialize, Deserialize)] pub struct TimelineStepInput { - pub timelineID: String, - pub orderKey: OrderKey, - pub groupID: String, - pub timeFromStart: Option, - pub timeFromLastStep: Option, - pub timeUntilNextStep: Option, + pub timelineID: String, + pub orderKey: OrderKey, + pub groupID: String, + pub timeFromStart: Option, + pub timeFromLastStep: Option, + pub timeUntilNextStep: Option, pub message: String, pub extras: CanOmit, } #[derive(InputObject, Serialize, Deserialize)] pub struct TimelineStepUpdates { - pub orderKey: CanOmit, - pub groupID: CanOmit, + pub orderKey: CanOmit, + pub groupID: CanOmit, pub timeFromStart: CanNullOrOmit, pub timeFromLastStep: CanNullOrOmit, pub timeUntilNextStep: CanNullOrOmit, @@ -106,8 +112,8 @@ pub struct TimelineStepUpdates { #[derive(Clone)] pub struct GQLSet_TimelineStep { pub nodes: Vec } #[Object] impl GQLSet_TimelineStep { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_TimelineStep { - fn from(entries: Vec) -> GQLSet_TimelineStep { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_TimelineStep { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_TimelineStep; @@ -122,12 +128,12 @@ impl GQLSet for GQLSet_TimelineStep { #[derive(Default)] pub struct SubscriptionShard_TimelineStep; #[Subscription] impl SubscriptionShard_TimelineStep { - async fn timelineSteps<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "timelineSteps", filter).await - } - async fn timelineStep<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "timelineSteps", id).await - } + async fn timelineSteps<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "timelineSteps", filter).await + } + async fn timelineStep<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "timelineSteps", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/timelines.rs b/Packages/app-server/src/db/timelines.rs index 856e121a8..b8b7edd45 100644 --- a/Packages/app-server/src/db/timelines.rs +++ b/Packages/app-server/src/db/timelines.rs @@ -1,41 +1,46 @@ +use futures_util::{stream, Stream, TryFutureExt}; use rust_shared::anyhow::Error; -use rust_shared::{SubError, serde_json, futures, GQLError}; -use rust_shared::async_graphql::{self, MaybeUndefined, Enum}; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::async_graphql::{self, Enum, MaybeUndefined}; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::serde_json::json; -use rust_shared::tokio_postgres::{Row, Client}; use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::{futures, serde_json, GQLError, SubError}; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput, accessors::{AccessorContext, get_db_entry}}}; +use crate::utils::db::{ + accessors::{get_db_entry, AccessorContext}, + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use super::_shared::attachments::Attachment; -use super::commands::_command::{CanOmit, CanNullOrOmit}; -use super::{node_revisions::{get_node_revision}}; +use super::commands::_command::{CanNullOrOmit, CanOmit}; +use super::node_revisions::get_node_revision; +#[rustfmt::skip] pub async fn get_timeline(ctx: &AccessorContext<'_>, id: &str) -> Result { get_db_entry(ctx, "timelines", &Some(json!({ "id": {"equalTo": id} }))).await } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct Timeline { - pub id: ID, + pub id: ID, pub creator: String, pub createdAt: i64, - pub accessPolicy: String, - pub mapID: String, - pub name: String, + pub accessPolicy: String, + pub mapID: String, + pub name: String, pub videoID: Option, - pub videoStartTime: Option, - pub videoHeightVSWidthPercent: Option, + pub videoStartTime: Option, + pub videoHeightVSWidthPercent: Option, //pub extras: JSONValue, } /*impl Timeline { @@ -44,7 +49,7 @@ pub struct Timeline { } }*/ impl From for Timeline { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } /*#[derive(Clone, Serialize, Deserialize)] @@ -53,30 +58,30 @@ pub struct Timeline_Extras { #[derive(InputObject, Clone, Serialize, Deserialize)] pub struct TimelineInput { - pub accessPolicy: String, - pub mapID: String, - pub name: String, + pub accessPolicy: String, + pub mapID: String, + pub name: String, pub videoID: Option, pub videoStartTime: Option, - pub videoHeightVSWidthPercent: Option, + pub videoHeightVSWidthPercent: Option, //pub extras: JSONValue, } #[derive(InputObject, Serialize, Deserialize)] pub struct TimelineUpdates { - pub accessPolicy: CanOmit, - pub name: CanOmit, - pub videoID: CanNullOrOmit, - pub videoStartTime: CanNullOrOmit, - pub videoHeightVSWidthPercent: CanNullOrOmit, + pub accessPolicy: CanOmit, + pub name: CanOmit, + pub videoID: CanNullOrOmit, + pub videoStartTime: CanNullOrOmit, + pub videoHeightVSWidthPercent: CanNullOrOmit, //pub extras: FieldUpdate, } #[derive(Clone)] pub struct GQLSet_Timeline { pub nodes: Vec } #[Object] impl GQLSet_Timeline { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_Timeline { - fn from(entries: Vec) -> GQLSet_Timeline { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_Timeline { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_Timeline; @@ -91,12 +96,12 @@ impl GQLSet for GQLSet_Timeline { #[derive(Default)] pub struct SubscriptionShard_Timeline; #[Subscription] impl SubscriptionShard_Timeline { - async fn timelines<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "timelines", filter).await - } - async fn timeline<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "timelines", id).await - } + async fn timelines<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "timelines", filter).await + } + async fn timeline<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "timelines", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/user_hiddens.rs b/Packages/app-server/src/db/user_hiddens.rs index 40aba6ed4..6a1bf520c 100644 --- a/Packages/app-server/src/db/user_hiddens.rs +++ b/Packages/app-server/src/db/user_hiddens.rs @@ -1,56 +1,60 @@ -use rust_shared::indexmap::IndexMap; -use rust_shared::serde_json::json; -use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::{SubError, serde_json, GQLError}; -use rust_shared::anyhow::{Error}; +use futures_util::{stream, Stream, TryFutureExt}; +use rust_shared::anyhow::Error; use rust_shared::async_graphql; -use rust_shared::async_graphql::{Context, Object, Schema, Subscription, ID, OutputType, SimpleObject, InputObject}; -use futures_util::{Stream, stream, TryFutureExt}; +use rust_shared::async_graphql::{Context, InputObject, Object, OutputType, Schema, SimpleObject, Subscription, ID}; +use rust_shared::indexmap::IndexMap; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::tokio_postgres::{Row, Client}; use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{serde_json, GQLError, SubError}; -use crate::utils::db::accessors::{AccessorContext, get_db_entries, get_db_entry}; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; +use crate::utils::db::accessors::{get_db_entries, get_db_entry, AccessorContext}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput}}; +use crate::utils::db::{ + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use super::commands::_command::{CanNullOrOmit, CanOmit}; +#[rustfmt::skip] pub async fn get_user_hidden(ctx: &AccessorContext<'_>, id: &str) -> Result { get_db_entry(ctx, "userHiddens", &Some(json!({ "id": {"equalTo": id} }))).await } pub async fn get_user_hiddens(ctx: &AccessorContext<'_>, email: Option) -> Result, Error> { - let mut filter_map = serde_json::Map::new(); - if let Some(email) = email { - filter_map.insert("email".to_owned(), json!({"equalTo": email})); - } - get_db_entries(ctx, "userHiddens", &Some(JSONValue::Object(filter_map))).await + let mut filter_map = serde_json::Map::new(); + if let Some(email) = email { + filter_map.insert("email".to_owned(), json!({"equalTo": email})); + } + get_db_entries(ctx, "userHiddens", &Some(JSONValue::Object(filter_map))).await } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct UserHidden { - pub id: ID, - pub email: String, - pub providerData: JSONValue, - pub backgroundID: Option, - #[graphql(name = "backgroundCustom_enabled")] - pub backgroundCustom_enabled: Option, - #[graphql(name = "backgroundCustom_color")] - pub backgroundCustom_color: Option, - #[graphql(name = "backgroundCustom_url")] - pub backgroundCustom_url: Option, - #[graphql(name = "backgroundCustom_position")] - pub backgroundCustom_position: Option, - pub addToStream: bool, - // todo: fix that this is not actually getting updated when user creates choosen a different access-policy for a new entry (or, rename this field) - pub lastAccessPolicy: Option, - pub extras: JSONValue, + pub id: ID, + pub email: String, + pub providerData: JSONValue, + pub backgroundID: Option, + #[graphql(name = "backgroundCustom_enabled")] + pub backgroundCustom_enabled: Option, + #[graphql(name = "backgroundCustom_color")] + pub backgroundCustom_color: Option, + #[graphql(name = "backgroundCustom_url")] + pub backgroundCustom_url: Option, + #[graphql(name = "backgroundCustom_position")] + pub backgroundCustom_position: Option, + pub addToStream: bool, + // todo: fix that this is not actually getting updated when user creates choosen a different access-policy for a new entry (or, rename this field) + pub lastAccessPolicy: Option, + pub extras: JSONValue, } impl UserHidden { pub fn extras_known(&self) -> Result { @@ -64,13 +68,13 @@ impl From for UserHidden { #[derive(InputObject, Serialize, Deserialize)] pub struct UserHiddenUpdates { pub backgroundID: CanNullOrOmit, - #[graphql(name = "backgroundCustom_enabled")] + #[graphql(name = "backgroundCustom_enabled")] pub backgroundCustom_enabled: CanNullOrOmit, - #[graphql(name = "backgroundCustom_color")] + #[graphql(name = "backgroundCustom_color")] pub backgroundCustom_color: CanNullOrOmit, - #[graphql(name = "backgroundCustom_url")] + #[graphql(name = "backgroundCustom_url")] pub backgroundCustom_url: CanNullOrOmit, - #[graphql(name = "backgroundCustom_position")] + #[graphql(name = "backgroundCustom_position")] pub backgroundCustom_position: CanNullOrOmit, pub addToStream: CanOmit, pub extras: CanOmit, @@ -78,27 +82,27 @@ pub struct UserHiddenUpdates { #[derive(Clone, Serialize, Deserialize)] pub struct UserHidden_Extras { - pub userFollows: Option>, - pub defaultAccessPolicy_nodeRatings: Option, + pub userFollows: Option>, + pub defaultAccessPolicy_nodeRatings: Option, } pub fn user_hidden_extras_locked_subfields() -> Vec<&'static str> { vec![] } #[derive(InputObject, Clone, Serialize, Deserialize)] pub struct UserFollow { pub markRatings: bool, - #[graphql(name = "markRatings_symbol")] + #[graphql(name = "markRatings_symbol")] pub markRatings_symbol: String, - #[graphql(name = "markRatings_color")] + #[graphql(name = "markRatings_color")] pub markRatings_color: String, - #[graphql(name = "markRatings_size")] + #[graphql(name = "markRatings_size")] pub markRatings_size: f64, } #[derive(Clone)] pub struct GQLSet_UserHidden { pub nodes: Vec } #[Object] impl GQLSet_UserHidden { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_UserHidden { - fn from(entries: Vec) -> GQLSet_UserHidden { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_UserHidden { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_UserHidden; @@ -113,12 +117,12 @@ impl GQLSet for GQLSet_UserHidden { #[derive(Default)] pub struct SubscriptionShard_UserHidden; #[Subscription] impl SubscriptionShard_UserHidden { - async fn userHiddens<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "userHiddens", filter).await - } - async fn userHidden<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "userHiddens", id).await - } + async fn userHiddens<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "userHiddens", filter).await + } + async fn userHidden<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "userHiddens", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/db/users.rs b/Packages/app-server/src/db/users.rs index 67f52077c..d89900021 100644 --- a/Packages/app-server/src/db/users.rs +++ b/Packages/app-server/src/db/users.rs @@ -1,61 +1,65 @@ -use rust_shared::{SubError, serde, serde_json, async_graphql, GQLError}; -use rust_shared::anyhow::{Error}; -use rust_shared::async_graphql::{Object, Result, Schema, Subscription, ID, async_stream, OutputType, scalar, EmptySubscription, SimpleObject, InputObject, Context}; -use futures_util::{Stream, stream, TryFutureExt, StreamExt, Future}; +use futures_util::{stream, Future, Stream, StreamExt, TryFutureExt}; +use rust_shared::anyhow::Error; +use rust_shared::async_graphql::{async_stream, scalar, Context, EmptySubscription, InputObject, Object, OutputType, Result, Schema, SimpleObject, Subscription, ID}; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::json; -use rust_shared::tokio_postgres::{Row, Client}; -use std::{time::Duration, pin::Pin, task::Poll}; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::{async_graphql, serde, serde_json, GQLError, SubError}; +use std::{pin::Pin, task::Poll, time::Duration}; use crate::utils::db::accessors::{get_db_entries, get_db_entry, AccessorContext}; -use crate::utils::db::generic_handlers::queries::{handle_generic_gql_doc_query, handle_generic_gql_collection_query}; +use crate::utils::db::generic_handlers::queries::{handle_generic_gql_collection_query, handle_generic_gql_doc_query}; use crate::utils::db::pg_row_to_json::postgres_row_to_struct; -use crate::utils::{db::{generic_handlers::{subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}}, filter::FilterInput}}; +use crate::utils::db::{ + filter::FilterInput, + generic_handlers::subscriptions::{handle_generic_gql_collection_subscription, handle_generic_gql_doc_subscription, GQLSet}, +}; use super::commands::_command::CanOmit; #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] //#[serde(crate = "rust_shared::serde")] #[allow(clippy::struct_excessive_bools)] pub struct PermissionGroups { - pub basic: bool, - pub verified: bool, - pub r#mod: bool, - pub admin: bool, + pub basic: bool, + pub verified: bool, + pub r#mod: bool, + pub admin: bool, } scalar!(PermissionGroups); impl PermissionGroups { - pub fn all_false() -> Self { - Self { basic: false, verified: false, r#mod: false, admin: false } - } + pub fn all_false() -> Self { + Self { basic: false, verified: false, r#mod: false, admin: false } + } } +#[rustfmt::skip] pub async fn get_user(ctx: &AccessorContext<'_>, id: &str) -> Result { get_db_entry(ctx, "users", &Some(json!({ "id": {"equalTo": id} }))).await } pub async fn get_users(ctx: &AccessorContext<'_>) -> Result, Error> { - get_db_entries(ctx, "users", &None).await + get_db_entries(ctx, "users", &None).await } // for postgresql<>rust scalar-type mappings (eg. pg's i8 = rust's i64), see: https://kotiri.com/2018/01/31/postgresql-diesel-rust-types.html -wrap_slow_macros!{ +wrap_slow_macros! { //type User = String; #[derive(SimpleObject, Clone, Serialize, Deserialize, Debug)] pub struct User { - pub id: ID, - pub displayName: String, - pub photoURL: Option, - pub joinDate: i64, - pub permissionGroups: PermissionGroups, - pub edits: i32, - pub lastEditAt: Option, + pub id: ID, + pub displayName: String, + pub photoURL: Option, + pub joinDate: i64, + pub permissionGroups: PermissionGroups, + pub edits: i32, + pub lastEditAt: Option, } impl From for User { - fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } + fn from(row: Row) -> Self { postgres_row_to_struct(row).unwrap() } } #[derive(InputObject, Serialize, Deserialize)] @@ -70,8 +74,8 @@ pub struct UserUpdates { #[derive(Clone)] pub struct GQLSet_User { pub nodes: Vec } #[Object] impl GQLSet_User { async fn nodes(&self) -> &Vec { &self.nodes } } impl GQLSet for GQLSet_User { - fn from(entries: Vec) -> GQLSet_User { Self { nodes: entries } } - fn nodes(&self) -> &Vec { &self.nodes } + fn from(entries: Vec) -> GQLSet_User { Self { nodes: entries } } + fn nodes(&self) -> &Vec { &self.nodes } } #[derive(Default)] pub struct QueryShard_User; @@ -86,12 +90,12 @@ impl GQLSet for GQLSet_User { #[derive(Default)] pub struct SubscriptionShard_User; #[Subscription] impl SubscriptionShard_User { - async fn users<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { - handle_generic_gql_collection_subscription::(ctx, "users", filter).await - } - async fn user<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { - handle_generic_gql_doc_subscription::(ctx, "users", id).await - } + async fn users<'a>(&self, ctx: &'a Context<'_>, filter: Option) -> impl Stream> + 'a { + handle_generic_gql_collection_subscription::(ctx, "users", filter).await + } + async fn user<'a>(&self, ctx: &'a Context<'_>, id: String) -> impl Stream, SubError>> + 'a { + handle_generic_gql_doc_subscription::(ctx, "users", id).await + } } -} \ No newline at end of file +} diff --git a/Packages/app-server/src/globals.rs b/Packages/app-server/src/globals.rs index 1484b3c59..883243474 100644 --- a/Packages/app-server/src/globals.rs +++ b/Packages/app-server/src/globals.rs @@ -1,98 +1,112 @@ #[cfg(unix)] use { - pyroscope::{pyroscope::PyroscopeAgentReady, PyroscopeAgent}, - pyroscope_pprofrs::{pprof_backend, PprofConfig} + pyroscope::{pyroscope::PyroscopeAgentReady, PyroscopeAgent}, + pyroscope_pprofrs::{pprof_backend, PprofConfig}, }; -use std::{alloc::System, panic, backtrace::Backtrace}; -use rust_shared::{domains::{get_env, is_prod}, flume, links::app_server_to_monitor_backend::Message_ASToMB, once_cell::sync::OnceCell, sentry::{self, ClientInitGuard}, tokio, utils::{errors_::backtrace_simplifier::{simplify_backtrace_str}, mtx::mtx::{MtxData, MtxDataWithExtraInfo, MtxGlobalMsg, MTX_GLOBAL_MESSAGE_SENDER_AND_RECEIVER}, type_aliases::{FReceiver, FSender}}}; -use tracing::{info, error}; use dotenv::dotenv; +use rust_shared::{ + domains::{get_env, is_prod}, + flume, + links::app_server_to_monitor_backend::Message_ASToMB, + once_cell::sync::OnceCell, + sentry::{self, ClientInitGuard}, + tokio, + utils::{ + errors_::backtrace_simplifier::simplify_backtrace_str, + mtx::mtx::{MtxData, MtxDataWithExtraInfo, MtxGlobalMsg, MTX_GLOBAL_MESSAGE_SENDER_AND_RECEIVER}, + type_aliases::{FReceiver, FSender}, + }, +}; +use std::{alloc::System, backtrace::Backtrace, panic}; +use tracing::{error, info}; -use crate::{utils::general::{mem_alloc::Trallocator, logging::set_up_logging, data_anchor::DataAnchorFor1}, links::monitor_backend_link::MESSAGE_SENDER_TO_MONITOR_BACKEND}; +use crate::{ + links::monitor_backend_link::MESSAGE_SENDER_TO_MONITOR_BACKEND, + utils::general::{data_anchor::DataAnchorFor1, logging::set_up_logging, mem_alloc::Trallocator}, +}; #[global_allocator] pub static GLOBAL: Trallocator = Trallocator::new(System); pub fn set_up_globals() -> Option { - //panic::always_abort(); - panic::set_hook(Box::new(|info| { - // do a simple println! first, to confirm our handler started running - println!("Got panic."); - // now do basic log of raw info, in case the panic occurred within the logging-system - println!("Panic info:{} [see next log-message for stack-trace]", info); + //panic::always_abort(); + panic::set_hook(Box::new(|info| { + // do a simple println! first, to confirm our handler started running + println!("Got panic."); + // now do basic log of raw info, in case the panic occurred within the logging-system + println!("Panic info:{} [see next log-message for stack-trace]", info); - //let stacktrace = Backtrace::capture(); - let stacktrace = Backtrace::force_capture(); - let stacktrace_str_simplified = simplify_backtrace_str(stacktrace.to_string(), true); + //let stacktrace = Backtrace::capture(); + let stacktrace = Backtrace::force_capture(); + let stacktrace_str_simplified = simplify_backtrace_str(stacktrace.to_string(), true); - error!("Panic stack-trace:\n==========\n{}", stacktrace_str_simplified); - std::process::abort(); - })); + error!("Panic stack-trace:\n==========\n{}", stacktrace_str_simplified); + std::process::abort(); + })); - dotenv().ok(); // load the environment variables from the ".env" file + dotenv().ok(); // load the environment variables from the ".env" file - let sentry_guard = set_up_sentry(); - set_up_logging(); - set_up_mtx_handler(); + let sentry_guard = set_up_sentry(); + set_up_logging(); + set_up_mtx_handler(); - GLOBAL.reset(); - info!("Memory used: {} bytes", GLOBAL.get()); + GLOBAL.reset(); + info!("Memory used: {} bytes", GLOBAL.get()); - sentry_guard + sentry_guard } #[cfg(unix)] pub fn set_up_globals_linux() -> PyroscopeAgent { - // configure pprof (profiling backend) + pyroscope - let pprof_config = PprofConfig::new().sample_rate(100); - let backend_impl = pprof_backend(pprof_config); - let agent = PyroscopeAgent::builder("http://pyroscope.monitoring.svc.cluster.local:4040/", "app-server").backend(backend_impl).build().unwrap(); - agent + // configure pprof (profiling backend) + pyroscope + let pprof_config = PprofConfig::new().sample_rate(100); + let backend_impl = pprof_backend(pprof_config); + let agent = PyroscopeAgent::builder("http://pyroscope.monitoring.svc.cluster.local:4040/", "app-server").backend(backend_impl).build().unwrap(); + agent } #[cfg(not(unix))] -pub fn set_up_globals_linux() -> Option { None } +pub fn set_up_globals_linux() -> Option { + None +} fn set_up_sentry() -> Option { - if is_prod() { - Some(sentry::init(("https://40c1e4f57e8b4bbeb1e5b0cf11abf9e9@o72324.ingest.sentry.io/155432", sentry::ClientOptions { - release: sentry::release_name!(), - environment: Some(get_env().into()), - integrations: vec![ - // added integrations: tracing (see logging.rs) - ], - ..Default::default() - }))) - } else { - None - } + if is_prod() { + Some(sentry::init(("https://40c1e4f57e8b4bbeb1e5b0cf11abf9e9@o72324.ingest.sentry.io/155432", sentry::ClientOptions { + release: sentry::release_name!(), + environment: Some(get_env().into()), + integrations: vec![ + // added integrations: tracing (see logging.rs) + ], + ..Default::default() + }))) + } else { + None + } } fn set_up_mtx_handler() { - let (msg_sender, msg_receiver): (FSender, FReceiver) = flume::unbounded(); - let msg_receiver_clone = msg_receiver.clone(); - MTX_GLOBAL_MESSAGE_SENDER_AND_RECEIVER.set((msg_sender, msg_receiver)).unwrap(); + let (msg_sender, msg_receiver): (FSender, FReceiver) = flume::unbounded(); + let msg_receiver_clone = msg_receiver.clone(); + MTX_GLOBAL_MESSAGE_SENDER_AND_RECEIVER.set((msg_sender, msg_receiver)).unwrap(); - tokio::spawn(async move { - loop { - let msg = msg_receiver_clone.recv_async().await.unwrap(); - match msg { - MtxGlobalMsg::NotifyMtxDataPossiblyChanged(mtx_data) => { - try_send_mtx_data_to_monitor_backend(mtx_data).await; - } - } - } - }); + tokio::spawn(async move { + loop { + let msg = msg_receiver_clone.recv_async().await.unwrap(); + match msg { + MtxGlobalMsg::NotifyMtxDataPossiblyChanged(mtx_data) => { + try_send_mtx_data_to_monitor_backend(mtx_data).await; + }, + } + } + }); } async fn try_send_mtx_data_to_monitor_backend(mtx_data: MtxDataWithExtraInfo) { - if &Some(mtx_data.data_as_str) != &mtx_data.last_data_as_str { - let mtx_simple = MtxData { - id: mtx_data.id, - section_lifetimes: mtx_data.section_lifetimes, - }; - if let Err(err) = MESSAGE_SENDER_TO_MONITOR_BACKEND.0.broadcast(Message_ASToMB::MtxEntryDone { mtx: mtx_simple }).await { - error!("Errored while broadcasting MtxEntryDone message. @error:{}", err); - } - } -} \ No newline at end of file + if &Some(mtx_data.data_as_str) != &mtx_data.last_data_as_str { + let mtx_simple = MtxData { id: mtx_data.id, section_lifetimes: mtx_data.section_lifetimes }; + if let Err(err) = MESSAGE_SENDER_TO_MONITOR_BACKEND.0.broadcast(Message_ASToMB::MtxEntryDone { mtx: mtx_simple }).await { + error!("Errored while broadcasting MtxEntryDone message. @error:{}", err); + } + } +} diff --git a/Packages/app-server/src/gql.rs b/Packages/app-server/src/gql.rs index f11f39307..b63ec9c8d 100644 --- a/Packages/app-server/src/gql.rs +++ b/Packages/app-server/src/gql.rs @@ -1,53 +1,7 @@ -use std::borrow::Cow; -use std::collections::HashMap; -use std::convert::Infallible; -use std::env; -use std::future::Future; -use std::str::FromStr; -use std::sync::{Arc, Mutex}; -use rust_shared::anyhow::{anyhow, bail}; -use rust_shared::async_graphql::http::{playground_source, GraphQLPlaygroundConfig, graphiql_source}; -use rust_shared::async_graphql::{extensions, Data, EmptyMutation, EmptySubscription, MergedObject, MergedSubscription, ObjectType, Result, Schema, ServerError, SubscriptionType, Variables}; -use rust_shared::bytes::Bytes; -use deadpool_postgres::{Pool, Manager}; -use rust_shared::http_body_util::Full; -use rust_shared::hyper::body::Body; -use rust_shared::hyper::header::CONTENT_LENGTH; -use rust_shared::hyper::{service}; -use rust_shared::anyhow::Error; -use rust_shared::hyper_util::client::legacy::connect::HttpConnector; -use rust_shared::hyper_util::client::legacy::Client; -use rust_shared::hyper_util::rt::TokioExecutor; -use rust_shared::rust_macros::{wrap_async_graphql, wrap_agql_schema_build, wrap_slow_macros, wrap_agql_schema_type}; -use rust_shared::serde_json::json; -use rust_shared::utils::auth::jwt_utils_base::UserJWTData; -use rust_shared::utils::db::agql_ext::gql_general_extension::CustomExtensionCreator; -use rust_shared::utils::net::{body_to_str, full_body_from_str, new_hyper_client_http, AxumBody, HyperClient, HyperClient_}; -use rust_shared::utils::type_aliases::JSONValue; -use rust_shared::{axum, serde_json, thiserror, to_anyhow, tower, tower_http}; -use tower::make::Shared; -use tower::{Service, ServiceExt, BoxError, service_fn}; -use tower_http::cors::{CorsLayer}; -use rust_shared::{async_graphql, async_graphql::futures_util::task::{Context, Poll}}; -use rust_shared::async_graphql::http::{WebSocketProtocols, WsMessage, ALL_WEBSOCKET_PROTOCOLS}; -use axum::http::{Method, HeaderValue}; -use axum::http::header::CONTENT_TYPE; -use axum::response::{self, IntoResponse}; -use axum::routing::{get, post, MethodFilter, on_service}; -use axum::{extract, Router}; -use axum::extract::ws::{CloseFrame, Message}; -use axum::extract::{FromRequest, WebSocketUpgrade}; -use axum::http::{self, uri::Uri, Request, Response, StatusCode}; -use axum::{ - extract::Extension, -}; -use tracing::{info, error}; -use rust_shared::url::Url; -use std::{convert::TryFrom, net::SocketAddr}; -use futures_util::future::{BoxFuture, Ready}; -use futures_util::stream::{SplitSink, SplitStream}; -use futures_util::{future, Sink, SinkExt, StreamExt, FutureExt, TryFutureExt, TryStreamExt}; -use crate::db::commands::_temp::clone_map_special::{SubscriptionShard_CloneMapSpecial}; +use crate::db::_general::{MutationShard_General, QueryShard_General, SubscriptionShard_General}; +use crate::db::access_policies::{QueryShard_AccessPolicy, SubscriptionShard_AccessPolicy}; +use crate::db::command_runs::{QueryShard_CommandRun, SubscriptionShard_CommandRun}; +use crate::db::commands::_temp::clone_map_special::SubscriptionShard_CloneMapSpecial; use crate::db::commands::add_access_policy::MutationShard_AddAccessPolicy; use crate::db::commands::add_argument_and_claim::MutationShard_AddArgumentAndClaim; use crate::db::commands::add_child_node::MutationShard_AddChildNode; @@ -77,16 +31,16 @@ use crate::db::commands::delete_timeline::MutationShard_DeleteTimeline; use crate::db::commands::delete_timeline_step::MutationShard_DeleteTimelineStep; use crate::db::commands::import_firestore_dump::MutationShard_ImportFirestoreDump; use crate::db::commands::link_node::MutationShard_LinkNode; -use crate::db::commands::run_command_batch::{SubscriptionShard_RunCommandBatch}; +use crate::db::commands::run_command_batch::SubscriptionShard_RunCommandBatch; use crate::db::commands::set_node_is_multi_premise_argument::MutationShard_SetNodeIsMultiPremiseArgument; use crate::db::commands::set_node_rating::MutationShard_SetNodeRating; use crate::db::commands::set_user_follow_data::MutationShard_SetUserFollowData; use crate::db::commands::transfer_nodes::MutationShard_TransferNodes; use crate::db::commands::update_access_policy::MutationShard_UpdateAccessPolicy; -use crate::db::commands::update_node_link::MutationShard_UpdateNodeLink; use crate::db::commands::update_map::MutationShard_UpdateMap; use crate::db::commands::update_media::MutationShard_UpdateMedia; use crate::db::commands::update_node::MutationShard_UpdateNode; +use crate::db::commands::update_node_link::MutationShard_UpdateNodeLink; use crate::db::commands::update_node_phrasing::MutationShard_UpdateNodePhrasing; use crate::db::commands::update_node_tag::MutationShard_UpdateNodeTag; use crate::db::commands::update_share::MutationShard_UpdateShare; @@ -95,323 +49,369 @@ use crate::db::commands::update_timeline::MutationShard_UpdateTimeline; use crate::db::commands::update_timeline_step::MutationShard_UpdateTimelineStep; use crate::db::commands::update_user::MutationShard_UpdateUser; use crate::db::commands::update_user_hidden::MutationShard_UpdateUserHidden; +use crate::db::feedback_proposals::{QueryShard_Proposal, SubscriptionShard_Proposal}; +use crate::db::feedback_user_infos::{QueryShard_UserInfo, SubscriptionShard_UserInfo}; use crate::db::general::backups::QueryShard_General_Backups; use crate::db::general::search::QueryShard_General_Search; use crate::db::general::sign_in::SubscriptionShard_SignIn; use crate::db::general::subtree::{MutationShard_General_Subtree, QueryShard_General_Subtree, SubscriptionShared_General_Subtree}; use crate::db::general::subtree_old::QueryShard_General_Subtree_Old; use crate::db::general::trusted_operators::QueryShard_General_TrustedOperators; -use crate::db::timeline_steps::{SubscriptionShard_TimelineStep, QueryShard_TimelineStep}; -use crate::db::timelines::{SubscriptionShard_Timeline, QueryShard_Timeline}; +use crate::db::global_data::{QueryShard_GlobalData, SubscriptionShard_GlobalData}; +use crate::db::map_node_edits::{QueryShard_NodeEdit, SubscriptionShard_NodeEdit}; +use crate::db::maps::{QueryShard_Map, SubscriptionShard_Map}; +use crate::db::medias::{QueryShard_Media, SubscriptionShard_Media}; +use crate::db::node_links::{QueryShard_NodeLink, SubscriptionShard_NodeLink}; +use crate::db::node_phrasings::{QueryShard_NodePhrasing, SubscriptionShard_NodePhrasing}; +use crate::db::node_ratings::{QueryShard_NodeRating, SubscriptionShard_NodeRating}; +use crate::db::node_revisions::{QueryShard_NodeRevision, SubscriptionShard_NodeRevision}; +use crate::db::node_tags::{QueryShard_NodeTag, SubscriptionShard_NodeTag}; +use crate::db::nodes::{QueryShard_Node, SubscriptionShard_Node}; +use crate::db::shares::{QueryShard_Share, SubscriptionShard_Share}; +use crate::db::terms::{QueryShard_Term, SubscriptionShard_Term}; +use crate::db::timeline_steps::{QueryShard_TimelineStep, SubscriptionShard_TimelineStep}; +use crate::db::timelines::{QueryShard_Timeline, SubscriptionShard_Timeline}; +use crate::db::user_hiddens::{QueryShard_UserHidden, SubscriptionShard_UserHidden}; +use crate::db::users::{QueryShard_User, SubscriptionShard_User}; +use crate::store::live_queries::LQStorageArc; use crate::store::storage::AppStateArc; use crate::utils::db::agql_ext::gql_request_storage::GQLRequestStorage; -use crate::db::_general::{MutationShard_General, QueryShard_General, SubscriptionShard_General}; -use crate::db::access_policies::{SubscriptionShard_AccessPolicy, QueryShard_AccessPolicy}; -use crate::db::command_runs::{SubscriptionShard_CommandRun, QueryShard_CommandRun}; -use crate::db::feedback_proposals::{SubscriptionShard_Proposal, QueryShard_Proposal}; -use crate::db::feedback_user_infos::{SubscriptionShard_UserInfo, QueryShard_UserInfo}; -use crate::db::global_data::{SubscriptionShard_GlobalData, QueryShard_GlobalData}; -use crate::db::map_node_edits::{SubscriptionShard_NodeEdit, QueryShard_NodeEdit}; -use crate::db::maps::{SubscriptionShard_Map, QueryShard_Map}; -use crate::db::medias::{SubscriptionShard_Media, QueryShard_Media}; -use crate::db::node_links::{SubscriptionShard_NodeLink, QueryShard_NodeLink}; -use crate::db::node_phrasings::{SubscriptionShard_NodePhrasing, QueryShard_NodePhrasing}; -use crate::db::node_ratings::{SubscriptionShard_NodeRating, QueryShard_NodeRating}; -use crate::db::node_revisions::{SubscriptionShard_NodeRevision, QueryShard_NodeRevision}; -use crate::db::node_tags::{SubscriptionShard_NodeTag, QueryShard_NodeTag}; -use crate::db::nodes::{SubscriptionShard_Node, QueryShard_Node}; -use crate::db::shares::{SubscriptionShard_Share, QueryShard_Share}; -use crate::db::terms::{SubscriptionShard_Term, QueryShard_Term}; -use crate::db::user_hiddens::{SubscriptionShard_UserHidden, QueryShard_UserHidden}; -use crate::db::users::{SubscriptionShard_User, QueryShard_User}; -use crate::store::live_queries::LQStorageArc; -use rust_shared::{async_graphql_axum}; -use async_graphql_axum::{GraphQLRequest, GraphQLResponse, GraphQLSubscription, GraphQLProtocol, GraphQLWebSocket, GraphQLBatchRequest}; +use async_graphql_axum::{GraphQLBatchRequest, GraphQLProtocol, GraphQLRequest, GraphQLResponse, GraphQLSubscription, GraphQLWebSocket}; +use axum::extract::ws::{CloseFrame, Message}; +use axum::extract::Extension; +use axum::extract::{FromRequest, WebSocketUpgrade}; +use axum::http::header::CONTENT_TYPE; +use axum::http::{self, uri::Uri, Request, Response, StatusCode}; +use axum::http::{HeaderValue, Method}; +use axum::response::{self, IntoResponse}; +use axum::routing::{get, on_service, post, MethodFilter}; +use axum::{extract, Router}; +use deadpool_postgres::{Manager, Pool}; +use futures_util::future::{BoxFuture, Ready}; +use futures_util::stream::{SplitSink, SplitStream}; +use futures_util::{future, FutureExt, Sink, SinkExt, StreamExt, TryFutureExt, TryStreamExt}; +use rust_shared::anyhow::Error; +use rust_shared::anyhow::{anyhow, bail}; +use rust_shared::async_graphql::http::{graphiql_source, playground_source, GraphQLPlaygroundConfig}; +use rust_shared::async_graphql::http::{WebSocketProtocols, WsMessage, ALL_WEBSOCKET_PROTOCOLS}; +use rust_shared::async_graphql::{extensions, Data, EmptyMutation, EmptySubscription, MergedObject, MergedSubscription, ObjectType, Result, Schema, ServerError, SubscriptionType, Variables}; +use rust_shared::async_graphql_axum; +use rust_shared::bytes::Bytes; +use rust_shared::http_body_util::Full; +use rust_shared::hyper::body::Body; +use rust_shared::hyper::header::CONTENT_LENGTH; +use rust_shared::hyper::service; +use rust_shared::hyper_util::client::legacy::connect::HttpConnector; +use rust_shared::hyper_util::client::legacy::Client; +use rust_shared::hyper_util::rt::TokioExecutor; +use rust_shared::rust_macros::{wrap_agql_schema_build, wrap_agql_schema_type, wrap_async_graphql, wrap_slow_macros}; +use rust_shared::serde_json::json; +use rust_shared::url::Url; +use rust_shared::utils::auth::jwt_utils_base::UserJWTData; +use rust_shared::utils::db::agql_ext::gql_general_extension::CustomExtensionCreator; +use rust_shared::utils::net::{body_to_str, full_body_from_str, new_hyper_client_http, AxumBody, HyperClient, HyperClient_}; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{ + async_graphql, + async_graphql::futures_util::task::{Context, Poll}, +}; +use rust_shared::{axum, serde_json, thiserror, to_anyhow, tower, tower_http}; +use std::borrow::Cow; +use std::collections::HashMap; +use std::convert::Infallible; +use std::env; +use std::future::Future; +use std::str::FromStr; +use std::sync::{Arc, Mutex}; +use std::{convert::TryFrom, net::SocketAddr}; +use tower::make::Shared; +use tower::{service_fn, BoxError, Service, ServiceExt}; +use tower_http::cors::CorsLayer; +use tracing::{error, info}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(MergedObject, Default)] pub struct QueryRoot( - QueryShard_General, QueryShard_General_Backups, QueryShard_General_Subtree, QueryShard_General_Subtree_Old, QueryShard_General_Search, QueryShard_General_TrustedOperators, - // table-specific - QueryShard_User, QueryShard_UserHidden, - QueryShard_GlobalData, - QueryShard_Map, - QueryShard_Term, - QueryShard_Timeline, QueryShard_TimelineStep, - QueryShard_AccessPolicy, - QueryShard_Media, - QueryShard_CommandRun, - //QueryShard_RunCommandBatch, - QueryShard_Proposal, - QueryShard_UserInfo, - QueryShard_Node, QueryShard_NodeLink, QueryShard_NodeEdit, QueryShard_NodePhrasing, QueryShard_NodeRating, QueryShard_NodeRevision, QueryShard_NodeTag, - QueryShard_Share, + QueryShard_General, QueryShard_General_Backups, QueryShard_General_Subtree, QueryShard_General_Subtree_Old, QueryShard_General_Search, QueryShard_General_TrustedOperators, + // table-specific + QueryShard_User, QueryShard_UserHidden, + QueryShard_GlobalData, + QueryShard_Map, + QueryShard_Term, + QueryShard_Timeline, QueryShard_TimelineStep, + QueryShard_AccessPolicy, + QueryShard_Media, + QueryShard_CommandRun, + //QueryShard_RunCommandBatch, + QueryShard_Proposal, + QueryShard_UserInfo, + QueryShard_Node, QueryShard_NodeLink, QueryShard_NodeEdit, QueryShard_NodePhrasing, QueryShard_NodeRating, QueryShard_NodeRevision, QueryShard_NodeTag, + QueryShard_Share, ); #[derive(MergedObject, Default)] pub struct MutationRoot( - MutationShard_General, MutationShard_General_Subtree, - // commands, matching standard add/delete/update pattern - MutationShard_AddAccessPolicy, MutationShard_AddMap, MutationShard_AddMedia, MutationShard_AddNodeLink, MutationShard_AddNodePhrasing, MutationShard_AddNodeTag, - MutationShard_AddShare, MutationShard_AddTerm, MutationShard_AddTimeline, MutationShard_AddTimelineStep, - MutationShard_DeleteAccessPolicy, MutationShard_DeleteMap, MutationShard_DeleteMedia, MutationShard_DeleteNodeLink, MutationShard_DeleteNodePhrasing, MutationShard_DeleteNodeTag, - MutationShard_DeleteShare, MutationShard_DeleteTerm, MutationShard_DeleteTimeline, MutationShard_DeleteTimelineStep, - MutationShard_UpdateAccessPolicy, MutationShard_UpdateMap, MutationShard_UpdateMedia, MutationShard_UpdateNodeLink, MutationShard_UpdateNodePhrasing, MutationShard_UpdateNodeTag, - MutationShard_UpdateShare, MutationShard_UpdateTerm, MutationShard_UpdateTimeline, MutationShard_UpdateTimelineStep, - // commands, others - MutationShard_AddArgumentAndClaim, MutationShard_AddChildNode, MutationShard_AddNodeRevision, - MutationShard_DeleteArgument, MutationShard_DeleteNode, MutationShard_DeleteNodeRating, MutationShard_DeleteNodeRevision, - MutationShard_ImportFirestoreDump, - MutationShard_LinkNode, - MutationShard_SetNodeIsMultiPremiseArgument, MutationShard_SetNodeRating, MutationShard_SetUserFollowData, - MutationShard_TransferNodes, - MutationShard_UpdateNode, MutationShard_UpdateUser, MutationShard_UpdateUserHidden, + MutationShard_General, MutationShard_General_Subtree, + // commands, matching standard add/delete/update pattern + MutationShard_AddAccessPolicy, MutationShard_AddMap, MutationShard_AddMedia, MutationShard_AddNodeLink, MutationShard_AddNodePhrasing, MutationShard_AddNodeTag, + MutationShard_AddShare, MutationShard_AddTerm, MutationShard_AddTimeline, MutationShard_AddTimelineStep, + MutationShard_DeleteAccessPolicy, MutationShard_DeleteMap, MutationShard_DeleteMedia, MutationShard_DeleteNodeLink, MutationShard_DeleteNodePhrasing, MutationShard_DeleteNodeTag, + MutationShard_DeleteShare, MutationShard_DeleteTerm, MutationShard_DeleteTimeline, MutationShard_DeleteTimelineStep, + MutationShard_UpdateAccessPolicy, MutationShard_UpdateMap, MutationShard_UpdateMedia, MutationShard_UpdateNodeLink, MutationShard_UpdateNodePhrasing, MutationShard_UpdateNodeTag, + MutationShard_UpdateShare, MutationShard_UpdateTerm, MutationShard_UpdateTimeline, MutationShard_UpdateTimelineStep, + // commands, others + MutationShard_AddArgumentAndClaim, MutationShard_AddChildNode, MutationShard_AddNodeRevision, + MutationShard_DeleteArgument, MutationShard_DeleteNode, MutationShard_DeleteNodeRating, MutationShard_DeleteNodeRevision, + MutationShard_ImportFirestoreDump, + MutationShard_LinkNode, + MutationShard_SetNodeIsMultiPremiseArgument, MutationShard_SetNodeRating, MutationShard_SetUserFollowData, + MutationShard_TransferNodes, + MutationShard_UpdateNode, MutationShard_UpdateUser, MutationShard_UpdateUserHidden, ); #[derive(MergedSubscription, Default)] pub struct SubscriptionRoot( - SubscriptionShard_CloneMapSpecial, SubscriptionShard_General, SubscriptionShared_General_Subtree, SubscriptionShard_SignIn, - // table-specific - SubscriptionShard_User, SubscriptionShard_UserHidden, - SubscriptionShard_GlobalData, - SubscriptionShard_Map, - SubscriptionShard_Term, - SubscriptionShard_Timeline, SubscriptionShard_TimelineStep, - SubscriptionShard_AccessPolicy, - SubscriptionShard_Media, - SubscriptionShard_CommandRun, - SubscriptionShard_RunCommandBatch, - SubscriptionShard_Proposal, - SubscriptionShard_UserInfo, - SubscriptionShard_Node, SubscriptionShard_NodeLink, SubscriptionShard_NodeEdit, SubscriptionShard_NodePhrasing, SubscriptionShard_NodeRating, SubscriptionShard_NodeRevision, SubscriptionShard_NodeTag, - SubscriptionShard_Share, + SubscriptionShard_CloneMapSpecial, SubscriptionShard_General, SubscriptionShared_General_Subtree, SubscriptionShard_SignIn, + // table-specific + SubscriptionShard_User, SubscriptionShard_UserHidden, + SubscriptionShard_GlobalData, + SubscriptionShard_Map, + SubscriptionShard_Term, + SubscriptionShard_Timeline, SubscriptionShard_TimelineStep, + SubscriptionShard_AccessPolicy, + SubscriptionShard_Media, + SubscriptionShard_CommandRun, + SubscriptionShard_RunCommandBatch, + SubscriptionShard_Proposal, + SubscriptionShard_UserInfo, + SubscriptionShard_Node, SubscriptionShard_NodeLink, SubscriptionShard_NodeEdit, SubscriptionShard_NodePhrasing, SubscriptionShard_NodeRating, SubscriptionShard_NodeRevision, SubscriptionShard_NodeTag, + SubscriptionShard_Share, ); } -pub type RootSchema = wrap_agql_schema_type!{ - Schema +pub type RootSchema = wrap_agql_schema_type! { + Schema }; const GRAPHQL_PATH_EXTERNAL: &str = "/app-server/graphql"; const GRAPHQL_PATH_INTERNAL: &str = "/graphql"; async fn graphiql() -> impl IntoResponse { - // use the DEV/PROD value from the "ENVIRONMENT" env-var, to determine what the app-server's URL is (maybe temp) - let app_server_host = if env::var("ENVIRONMENT").unwrap_or("DEV".to_owned()) == "DEV" { "localhost:5100" } else { "debatemap.app" }; - response::Html(graphiql_source(GRAPHQL_PATH_EXTERNAL, Some(&format!("wss://{app_server_host}{GRAPHQL_PATH_EXTERNAL}")))) + // use the DEV/PROD value from the "ENVIRONMENT" env-var, to determine what the app-server's URL is (maybe temp) + let app_server_host = if env::var("ENVIRONMENT").unwrap_or("DEV".to_owned()) == "DEV" { + "localhost:5100" + } else { + "debatemap.app" + }; + response::Html(graphiql_source(GRAPHQL_PATH_EXTERNAL, Some(&format!("wss://{app_server_host}{GRAPHQL_PATH_EXTERNAL}")))) } async fn graphql_playground() -> impl IntoResponse { - response::Html(playground_source( - GraphQLPlaygroundConfig::new(GRAPHQL_PATH_EXTERNAL).subscription_endpoint(GRAPHQL_PATH_EXTERNAL), - )) + response::Html(playground_source(GraphQLPlaygroundConfig::new(GRAPHQL_PATH_EXTERNAL).subscription_endpoint(GRAPHQL_PATH_EXTERNAL))) } // version based on actix-web example (https://github.com/async-graphql/examples/blob/1492794f9001cfe7a37058ba7be3c6461b0cc70a/actix-web/token-from-header/src/main.rs#L37) /*async fn websocket_handler(ws: WebSocketUpgrade) -> impl IntoResponse { - GraphQLSubscription::new(Schema::clone(&*schema)) - .with_data(data) - .on_connection_init(on_connection_init) - .start(&req, payload) + GraphQLSubscription::new(Schema::clone(&*schema)) + .with_data(data) + .on_connection_init(on_connection_init) + .start(&req, payload) }*/ // version based on poem example (https://github.com/async-graphql/examples/blob/1492794f9001cfe7a37058ba7be3c6461b0cc70a/poem/token-from-header/src/main.rs#L44) async fn graphql_websocket_handler(/*Extension(state): Extension,*/ Extension(schema): Extension, /*req: Request,*/ ws: WebSocketUpgrade, protocol: GraphQLProtocol) -> impl IntoResponse { - let mut data = async_graphql::Data::default(); - let request_storage = GQLRequestStorage::new(); - data.insert(request_storage); - /*if let Some(token) = get_token_from_headers(headers) { - data.insert(token); - }*/ - - // we cannot retrieve the raw "Request" using axum-extract while also retrieving the "WebSocketUpgrade", so just assume caller requested the "graphql-ws" protocol (it's what the main dm-client is using) - //let protocol = GraphQLProtocol(WebSocketProtocols::GraphQLWS); - - /*info!("Handling ws request:{:?} @ws:{:?}", req, ws); - let mut req_agql_parts = RequestParts::new(req); - let protocol = match GraphQLProtocol::from_request(&mut req_agql_parts).await { - Ok(protocol) => protocol, - Err(err) => { - //return response::Html(format!("
Error parsing graphql-protocol from request headers:{:?}
", err)) - //error!("/monitor-backend-link endpoint was called, but the caller was not an in-cluster pod! @callerIP:{}", addr.ip()); - let body_json_val = json!({"error": format!("Error parsing graphql-protocol from request headers:{:?}", err)}); - let body = Body::from(body_json_val.to_string()).boxed_unsync(); - return Response::builder().status(StatusCode::BAD_REQUEST).body(body).unwrap().into_response(); - }, - };*/ - - // NOTE: Don't be confused; the "protocol names" are confusingly misaligned with their "subprotocol header values". (https://github.com/async-graphql/async-graphql/issues/1196#issuecomment-1371985251) - //info!("Handling ws request:{:?} @protocol:{:?}", ws, protocol); - - let schema_clone = schema.clone(); - ws - .protocols(ALL_WEBSOCKET_PROTOCOLS) - .on_upgrade(move |stream| { - GraphQLWebSocket::new(stream, schema_clone, protocol) - .with_data(data) - //.on_connection_init(on_connection_init) - .serve() - }) + let mut data = async_graphql::Data::default(); + let request_storage = GQLRequestStorage::new(); + data.insert(request_storage); + /*if let Some(token) = get_token_from_headers(headers) { + data.insert(token); + }*/ + + // we cannot retrieve the raw "Request" using axum-extract while also retrieving the "WebSocketUpgrade", so just assume caller requested the "graphql-ws" protocol (it's what the main dm-client is using) + //let protocol = GraphQLProtocol(WebSocketProtocols::GraphQLWS); + + /*info!("Handling ws request:{:?} @ws:{:?}", req, ws); + let mut req_agql_parts = RequestParts::new(req); + let protocol = match GraphQLProtocol::from_request(&mut req_agql_parts).await { + Ok(protocol) => protocol, + Err(err) => { + //return response::Html(format!("
Error parsing graphql-protocol from request headers:{:?}
", err)) + //error!("/monitor-backend-link endpoint was called, but the caller was not an in-cluster pod! @callerIP:{}", addr.ip()); + let body_json_val = json!({"error": format!("Error parsing graphql-protocol from request headers:{:?}", err)}); + let body = Body::from(body_json_val.to_string()).boxed_unsync(); + return Response::builder().status(StatusCode::BAD_REQUEST).body(body).unwrap().into_response(); + }, + };*/ + + // NOTE: Don't be confused; the "protocol names" are confusingly misaligned with their "subprotocol header values". (https://github.com/async-graphql/async-graphql/issues/1196#issuecomment-1371985251) + //info!("Handling ws request:{:?} @protocol:{:?}", ws, protocol); + + let schema_clone = schema.clone(); + ws.protocols(ALL_WEBSOCKET_PROTOCOLS).on_upgrade(move |stream| { + GraphQLWebSocket::new(stream, schema_clone, protocol) + .with_data(data) + //.on_connection_init(on_connection_init) + .serve() + }) } /*pub async fn on_connection_init(value: serde_json::Value) -> Result { - #[derive(Deserialize)] - struct Payload { - token: String, - } - - // Coerce the connection params into our `Payload` struct so we can - // validate the token exists in the headers. - if let Ok(payload) = serde_json::from_value::(value) { - let mut data = Data::default(); - data.insert(Token(payload.token)); - Ok(data) - } else { - Err("Token is required".into()) - } + #[derive(Deserialize)] + struct Payload { + token: String, + } + + // Coerce the connection params into our `Payload` struct so we can + // validate the token exists in the headers. + if let Ok(payload) = serde_json::from_value::(value) { + let mut data = Data::default(); + data.insert(Token(payload.token)); + Ok(data) + } else { + Err("Token is required".into()) + } }*/ //pub type HyperClient = Client>; pub async fn extend_router(app: Router, storage_wrapper: AppStateArc) -> Router { - let schema = - wrap_agql_schema_build!{ - Schema::build(QueryRoot::default(), MutationRoot::default(), SubscriptionRoot::default()) - } - .data(storage_wrapper) - .extension(CustomExtensionCreator) - .finish(); - - let client_to_asjs = new_hyper_client_http(); - //let gql_subscription_service = GraphQLSubscription::new(schema.clone()); - - let result = app - .route("/graphiql", get(graphiql)) - .route("/gql-playground", get(graphql_playground)) - .route(GRAPHQL_PATH_INTERNAL, - // approach 1 (using standard routing functions) - //on_service(MethodFilter::GET, gql_subscription_service).post(handle_gql_query_or_mutation) - get(graphql_websocket_handler).post(handle_gql_query_or_mutation) - - // approach 2 (custom first-layer service-function) - // omitted for now; you can reference the "one-shot" pattern here: https://github.com/tokio-rs/axum/blob/422a883cb2a81fa6fbd2f2a1affa089304b7e47b/examples/http-proxy/src/main.rs#L40 - ) - //.fallback(get(handle_gql_query_or_mutation).merge(post(handle_gql_query_or_mutation))) - .layer(Extension(schema)) - .layer(Extension(client_to_asjs)); - - info!("Playground: http://localhost:[view readme]"); - result + let schema = wrap_agql_schema_build! { + Schema::build(QueryRoot::default(), MutationRoot::default(), SubscriptionRoot::default()) + } + .data(storage_wrapper) + .extension(CustomExtensionCreator) + .finish(); + + let client_to_asjs = new_hyper_client_http(); + //let gql_subscription_service = GraphQLSubscription::new(schema.clone()); + + let result = app + .route("/graphiql", get(graphiql)) + .route("/gql-playground", get(graphql_playground)) + .route( + GRAPHQL_PATH_INTERNAL, + // approach 1 (using standard routing functions) + //on_service(MethodFilter::GET, gql_subscription_service).post(handle_gql_query_or_mutation) + get(graphql_websocket_handler).post(handle_gql_query_or_mutation), // approach 2 (custom first-layer service-function) + // omitted for now; you can reference the "one-shot" pattern here: https://github.com/tokio-rs/axum/blob/422a883cb2a81fa6fbd2f2a1affa089304b7e47b/examples/http-proxy/src/main.rs#L40 + ) + //.fallback(get(handle_gql_query_or_mutation).merge(post(handle_gql_query_or_mutation))) + .layer(Extension(schema)) + .layer(Extension(client_to_asjs)); + + info!("Playground: http://localhost:[view readme]"); + result } pub async fn handle_gql_query_or_mutation(Extension(_client): Extension, Extension(schema): Extension, req: Request) -> Response { - let response_str = match have_own_graphql_handle_request(req, schema).await { - Ok(a) => a, - Err(err) => match err { - HandleGQLRequestError::Early(err) => json!({ - "errors": [{"message": err.to_string()}], - }).to_string(), - HandleGQLRequestError::Late(errors) => json!({ - "errors": errors, - }).to_string(), - }, - }; - - // send response (to frontend) - /*let mut response = Response::builder().body(axum::body::Body::from(response_str)).unwrap(); - response.headers_mut().append(CONTENT_TYPE, HeaderValue::from_static("content-type: application/json; charset=utf-8"));*/ - let response = Response::builder() - .header(CONTENT_TYPE, "application/json") - .body(response_str.into()) - .unwrap(); - response + let response_str = match have_own_graphql_handle_request(req, schema).await { + Ok(a) => a, + Err(err) => match err { + HandleGQLRequestError::Early(err) => json!({ + "errors": [{"message": err.to_string()}], + }) + .to_string(), + HandleGQLRequestError::Late(errors) => json!({ + "errors": errors, + }) + .to_string(), + }, + }; + + // send response (to frontend) + /*let mut response = Response::builder().body(axum::body::Body::from(response_str)).unwrap(); + response.headers_mut().append(CONTENT_TYPE, HeaderValue::from_static("content-type: application/json; charset=utf-8"));*/ + let response = Response::builder().header(CONTENT_TYPE, "application/json").body(response_str.into()).unwrap(); + response } #[derive(thiserror::Error, Debug)] pub enum HandleGQLRequestError { - #[error("Early gql error: {0:?}")] - Early(Error), - #[error("Late gql error: {0:?}")] - Late(Vec), + #[error("Early gql error: {0:?}")] + Early(Error), + #[error("Late gql error: {0:?}")] + Late(Vec), } impl From for HandleGQLRequestError { - fn from(err: Error) -> Self { - HandleGQLRequestError::Early(err) - } + fn from(err: Error) -> Self { + HandleGQLRequestError::Early(err) + } } /*impl From> for HandleGQLRequestError { - fn from(errors: Vec) -> Self { - HandleGQLRequestError::Late(errors) - } + fn from(errors: Vec) -> Self { + HandleGQLRequestError::Late(errors) + } }*/ pub async fn have_own_graphql_handle_request(req: Request, schema: RootSchema) -> Result { - use async_graphql::futures_util::TryFutureExt; - - // retrieve auth-data/JWT and such from http-headers - let gql_data_from_http_request = get_gql_data_from_http_request(&req)?; - - // read request's body (from frontend) - let req_as_str = body_to_str(req.into_body()).await?; - let req_as_json = JSONValue::from_str(&req_as_str).map_err(to_anyhow)?; - - // prepare request for graphql engine - //let gql_req = async_graphql::Request::new(req_as_str); - let gql_req = async_graphql::Request::new(req_as_json["query"].as_str().ok_or(anyhow!("The \"query\" field must be a string."))?); - let gql_req = match req_as_json["operationName"].as_str() { - Some(op_name) => gql_req.operation_name(op_name), - None => gql_req, - }; - let gql_req = gql_req.variables(Variables::from_json(req_as_json["variables"].clone())); - - // attach auth-data/JWT and such to async-graphql context-data - let gql_req = gql_req.data(gql_data_from_http_request); - - // send request to graphql engine, and read response - let temp1 = schema.execute(gql_req).await.into_result(); - match temp1 { - Ok(_) => {}, - Err(ref errors) => { - for err in errors { - error!("Test1:{:?}", err); - if let Some(ref source) = err.source { // get the error source - error!("Test1.5:{:?}", source); - if let Some(ref app_err) = source.downcast_ref::() { // cast to AppError - error!("Test2:{:?}", app_err); - } - } - } - }, - } - let gql_response: async_graphql::Response = temp1.map_err(HandleGQLRequestError::Late)?; - //let response_body: String = gql_response.data.to_string(); // this doesn't output valid json (eg. no quotes around keys) - let response_str: String = serde_json::to_string(&gql_response).map_err(to_anyhow)?; - - Ok(response_str) + use async_graphql::futures_util::TryFutureExt; + + // retrieve auth-data/JWT and such from http-headers + let gql_data_from_http_request = get_gql_data_from_http_request(&req)?; + + // read request's body (from frontend) + let req_as_str = body_to_str(req.into_body()).await?; + let req_as_json = JSONValue::from_str(&req_as_str).map_err(to_anyhow)?; + + // prepare request for graphql engine + //let gql_req = async_graphql::Request::new(req_as_str); + let gql_req = async_graphql::Request::new(req_as_json["query"].as_str().ok_or(anyhow!("The \"query\" field must be a string."))?); + let gql_req = match req_as_json["operationName"].as_str() { + Some(op_name) => gql_req.operation_name(op_name), + None => gql_req, + }; + let gql_req = gql_req.variables(Variables::from_json(req_as_json["variables"].clone())); + + // attach auth-data/JWT and such to async-graphql context-data + let gql_req = gql_req.data(gql_data_from_http_request); + + // send request to graphql engine, and read response + let temp1 = schema.execute(gql_req).await.into_result(); + match temp1 { + Ok(_) => {}, + Err(ref errors) => { + for err in errors { + error!("Test1:{:?}", err); + if let Some(ref source) = err.source { + // get the error source + error!("Test1.5:{:?}", source); + if let Some(ref app_err) = source.downcast_ref::() { + // cast to AppError + error!("Test2:{:?}", app_err); + } + } + } + }, + } + let gql_response: async_graphql::Response = temp1.map_err(HandleGQLRequestError::Late)?; + //let response_body: String = gql_response.data.to_string(); // this doesn't output valid json (eg. no quotes around keys) + let response_str: String = serde_json::to_string(&gql_response).map_err(to_anyhow)?; + + Ok(response_str) } pub fn get_gql_data_from_http_request(req: &Request) -> Result { - let mut data = GQLDataFromHTTPRequest { jwt: None, referrer: None }; - if let Some(header) = req.headers().get("authorization") { - //info!("Found authorization header:{}", header.to_str()?); - if let Some(parts) = header.to_str()?.split_once("Bearer ") { - //info!("Found bearer part2/jwt-string:{}", parts.1.to_owned()); - data.jwt = Some(parts.1.to_owned()); - } else { - bail!("An \"authorization\" header was present, but its value was unable to be parsed. @header_value:\"{}\"", header.to_str()?); - } - } - - if let Some(header) = req.headers().get("referrer") { - //info!("Found referrer header."); - if let Ok(referrer) = header.to_str() { - //info!("Found referrer part2:{}", referrer); - data.referrer = Some(referrer.to_owned()); - } - } - Ok(data) + let mut data = GQLDataFromHTTPRequest { jwt: None, referrer: None }; + if let Some(header) = req.headers().get("authorization") { + //info!("Found authorization header:{}", header.to_str()?); + if let Some(parts) = header.to_str()?.split_once("Bearer ") { + //info!("Found bearer part2/jwt-string:{}", parts.1.to_owned()); + data.jwt = Some(parts.1.to_owned()); + } else { + bail!("An \"authorization\" header was present, but its value was unable to be parsed. @header_value:\"{}\"", header.to_str()?); + } + } + + if let Some(header) = req.headers().get("referrer") { + //info!("Found referrer header."); + if let Ok(referrer) = header.to_str() { + //info!("Found referrer part2:{}", referrer); + data.referrer = Some(referrer.to_owned()); + } + } + Ok(data) } pub struct GQLDataFromHTTPRequest { - pub jwt: Option, - pub referrer: Option, -} \ No newline at end of file + pub jwt: Option, + pub referrer: Option, +} diff --git a/Packages/app-server/src/links/db_live_cache.rs b/Packages/app-server/src/links/db_live_cache.rs index e7a77fc1f..c5f86c2ae 100644 --- a/Packages/app-server/src/links/db_live_cache.rs +++ b/Packages/app-server/src/links/db_live_cache.rs @@ -1,80 +1,107 @@ -use std::{sync::Mutex, collections::{HashSet, HashMap}}; +use std::{ + collections::{HashMap, HashSet}, + sync::Mutex, +}; use futures_util::StreamExt; -use rust_shared::{once_cell::sync::Lazy, anyhow::{anyhow, Error, bail}, itertools::Itertools, tokio, to_anyhow, db_constants::{SYSTEM_USER_ID, SYSTEM_USER_EMAIL}, utils::{auth::jwt_utils_base::UserJWTData, general_::extensions::ToOwnedV, time::tokio_sleep}, serde_json::{json, self}}; +use rust_shared::{ + anyhow::{anyhow, bail, Error}, + db_constants::{SYSTEM_USER_EMAIL, SYSTEM_USER_ID}, + itertools::Itertools, + once_cell::sync::Lazy, + serde_json::{self, json}, + to_anyhow, tokio, + utils::{auth::jwt_utils_base::UserJWTData, general_::extensions::ToOwnedV, time::tokio_sleep}, +}; use tracing::error; -use crate::{db::{access_policies::{GQLSet_AccessPolicy}, users::{User, GQLSet_User}, access_policies_::_access_policy::AccessPolicy}, store::storage::AppStateArc, utils::db::generic_handlers::subscriptions::handle_generic_gql_collection_subscription_base}; +use crate::{ + db::{ + access_policies::GQLSet_AccessPolicy, + access_policies_::_access_policy::AccessPolicy, + users::{GQLSet_User, User}, + }, + store::storage::AppStateArc, + utils::db::generic_handlers::subscriptions::handle_generic_gql_collection_subscription_base, +}; pub fn start_db_live_cache(app_state: AppStateArc) { - let app_state_c1 = app_state.clone(); - // keep the ADMIN_USER_IDS_CACHE up to date - tokio::spawn(async move { - loop { - let system_user_jwt = UserJWTData { id: SYSTEM_USER_ID.o(), email: SYSTEM_USER_EMAIL.o(), readOnly: Some(true) }; - let mut stream = handle_generic_gql_collection_subscription_base::(app_state_c1.live_queries.clone(), Some(system_user_jwt), "users".o(), Some(json!({ - // todo: once live-query system supports matching on jsonb subfields, use that here - }))).await; - if let Result::<(), Error>::Err(err) = try { - loop { - let next_stream_result = stream.next().await.ok_or(anyhow!("Stream unexpectedly ended."))?; - let users: Vec = next_stream_result?.nodes; - // for now, we must filter to admin users here, because live-query system doesn't support matching on jsonb subfields yet - let admin_users = users.into_iter().filter(|a| a.permissionGroups.admin).collect_vec(); + let app_state_c1 = app_state.clone(); + // keep the ADMIN_USER_IDS_CACHE up to date + tokio::spawn(async move { + loop { + let system_user_jwt = UserJWTData { id: SYSTEM_USER_ID.o(), email: SYSTEM_USER_EMAIL.o(), readOnly: Some(true) }; + let mut stream = handle_generic_gql_collection_subscription_base::( + app_state_c1.live_queries.clone(), + Some(system_user_jwt), + "users".o(), + Some(json!({ + // todo: once live-query system supports matching on jsonb subfields, use that here + })), + ) + .await; + if let Result::<(), Error>::Err(err) = try { + loop { + let next_stream_result = stream.next().await.ok_or(anyhow!("Stream unexpectedly ended."))?; + let users: Vec = next_stream_result?.nodes; + // for now, we must filter to admin users here, because live-query system doesn't support matching on jsonb subfields yet + let admin_users = users.into_iter().filter(|a| a.permissionGroups.admin).collect_vec(); - let admin_user_ids: HashSet = admin_users.into_iter().map(|a| a.id.to_string()).collect(); - let mut cache = ADMIN_USER_IDS_CACHE.lock().map_err(to_anyhow)?; - *cache = admin_user_ids; - } - } { - error!("Error in db-live-cache updater for ACCESS_POLICIES_CACHE. Restarting live-query in a moment. @err:{:?}", err); - tokio_sleep(1000).await; - continue; - } - } - }); + let admin_user_ids: HashSet = admin_users.into_iter().map(|a| a.id.to_string()).collect(); + let mut cache = ADMIN_USER_IDS_CACHE.lock().map_err(to_anyhow)?; + *cache = admin_user_ids; + } + } { + error!("Error in db-live-cache updater for ACCESS_POLICIES_CACHE. Restarting live-query in a moment. @err:{:?}", err); + tokio_sleep(1000).await; + continue; + } + } + }); - let app_state_c2 = app_state.clone(); - // keep the ACCESS_POLICIES_CACHE up to date - tokio::spawn(async move { - loop { - let system_user_jwt = UserJWTData { id: SYSTEM_USER_ID.o(), email: SYSTEM_USER_EMAIL.o(), readOnly: Some(true) }; - let mut stream = handle_generic_gql_collection_subscription_base::(app_state_c2.live_queries.clone(), Some(system_user_jwt), "accessPolicies".o(), None).await; - if let Result::<(), Error>::Err(err) = try { - loop { - let next_stream_result = stream.next().await.ok_or(anyhow!("Stream unexpectedly ended."))?; - let access_policies: Vec = next_stream_result?.nodes; + let app_state_c2 = app_state.clone(); + // keep the ACCESS_POLICIES_CACHE up to date + tokio::spawn(async move { + loop { + let system_user_jwt = UserJWTData { id: SYSTEM_USER_ID.o(), email: SYSTEM_USER_EMAIL.o(), readOnly: Some(true) }; + let mut stream = handle_generic_gql_collection_subscription_base::(app_state_c2.live_queries.clone(), Some(system_user_jwt), "accessPolicies".o(), None).await; + if let Result::<(), Error>::Err(err) = try { + loop { + let next_stream_result = stream.next().await.ok_or(anyhow!("Stream unexpectedly ended."))?; + let access_policies: Vec = next_stream_result?.nodes; - let mut access_policies_map = HashMap::new(); - for policy in access_policies { - access_policies_map.insert(policy.id.to_string(), policy); - } + let mut access_policies_map = HashMap::new(); + for policy in access_policies { + access_policies_map.insert(policy.id.to_string(), policy); + } - let mut cache = ACCESS_POLICIES_CACHE.lock().map_err(to_anyhow)?; - *cache = access_policies_map; - } - } { - error!("Error in db-live-cache updater for ACCESS_POLICIES_CACHE. Restarting live-query in a moment. @err:{:?}", err); - tokio_sleep(1000).await; - continue; - } - } - }); + let mut cache = ACCESS_POLICIES_CACHE.lock().map_err(to_anyhow)?; + *cache = access_policies_map; + } + } { + error!("Error in db-live-cache updater for ACCESS_POLICIES_CACHE. Restarting live-query in a moment. @err:{:?}", err); + tokio_sleep(1000).await; + continue; + } + } + }); } static ADMIN_USER_IDS_CACHE: Lazy>> = Lazy::new(|| Mutex::new(HashSet::new())); pub fn get_admin_user_ids_cached() -> Result, Error> { - let cache = ADMIN_USER_IDS_CACHE.lock().map_err(to_anyhow)?; - let result: HashSet = cache.iter().cloned().collect(); - if result.len() == 0 { bail!("Admin user-ids has not yet been populated in the db-live-cache."); } - Ok(result) + let cache = ADMIN_USER_IDS_CACHE.lock().map_err(to_anyhow)?; + let result: HashSet = cache.iter().cloned().collect(); + if result.len() == 0 { + bail!("Admin user-ids has not yet been populated in the db-live-cache."); + } + Ok(result) } static ACCESS_POLICIES_CACHE: Lazy>> = Lazy::new(|| Mutex::new(HashMap::new())); pub fn get_access_policy_cached(policy_id: &str) -> Result { - let cache = ACCESS_POLICIES_CACHE.lock().map_err(to_anyhow)?; - let result = cache.get(policy_id).cloned().ok_or_else(|| anyhow!("Policy \"{}\" not found in cache. @policies_in_cache_count:{}", policy_id, cache.len()))?; - Ok(result) + let cache = ACCESS_POLICIES_CACHE.lock().map_err(to_anyhow)?; + let result = cache.get(policy_id).cloned().ok_or_else(|| anyhow!("Policy \"{}\" not found in cache. @policies_in_cache_count:{}", policy_id, cache.len()))?; + Ok(result) } -// todo: maybe add listener on `globalData` table's `dbReadOnly` field, and use it to block new write-transactions (eg. during migrations) \ No newline at end of file +// todo: maybe add listener on `globalData` table's `dbReadOnly` field, and use it to block new write-transactions (eg. during migrations) diff --git a/Packages/app-server/src/links/mod.rs b/Packages/app-server/src/links/mod.rs index 94f6b1a57..f68612361 100644 --- a/Packages/app-server/src/links/mod.rs +++ b/Packages/app-server/src/links/mod.rs @@ -2,5 +2,5 @@ pub mod db_live_cache; pub mod monitor_backend_link; pub mod pgclient; pub mod pgclient_ { - pub mod wal_structs; -} \ No newline at end of file + pub mod wal_structs; +} diff --git a/Packages/app-server/src/links/monitor_backend_link.rs b/Packages/app-server/src/links/monitor_backend_link.rs index 45c8a406f..8d0eda025 100644 --- a/Packages/app-server/src/links/monitor_backend_link.rs +++ b/Packages/app-server/src/links/monitor_backend_link.rs @@ -1,98 +1,120 @@ use std::net::SocketAddr; -use rust_shared::{axum, futures, http_body_util::{BodyExt, Full}, links::app_server_to_monitor_backend::Message_ASToMB, tower, tower_http}; -use axum::{Error, extract::{ws::{WebSocket, Message}, WebSocketUpgrade, Extension, ConnectInfo}, response::IntoResponse, body::HttpBody}; +use axum::{ + body::HttpBody, + extract::{ + ws::{Message, WebSocket}, + ConnectInfo, Extension, WebSocketUpgrade, + }, + response::IntoResponse, + Error, +}; +use futures::{ + sink::SinkExt, + stream::{SplitSink, SplitStream, StreamExt}, +}; use rust_shared::flume::Receiver; -use futures::{sink::SinkExt, stream::{StreamExt, SplitSink, SplitStream}}; -use rust_shared::hyper::{StatusCode, Response}; +use rust_shared::hyper::{Response, StatusCode}; use rust_shared::once_cell::sync::Lazy; -use rust_shared::{serde::{Serialize, Deserialize}, serde_json, tokio, utils::type_aliases::JSONValue}; use rust_shared::serde_json::json; +use rust_shared::{ + axum, futures, + http_body_util::{BodyExt, Full}, + links::app_server_to_monitor_backend::Message_ASToMB, + tower, tower_http, +}; +use rust_shared::{ + serde::{Deserialize, Serialize}, + serde_json, tokio, + utils::type_aliases::JSONValue, +}; use tracing::{error, warn}; -use crate::utils::{type_aliases::{ABReceiver, ABSender}}; +use crate::utils::type_aliases::{ABReceiver, ABSender}; pub fn is_addr_from_pod(addr: &SocketAddr) -> bool { - addr.ip().is_ipv4() && addr.ip().to_string().starts_with("10.") + addr.ip().is_ipv4() && addr.ip().to_string().starts_with("10.") } pub fn message_of_bad_gateway_for_non_pod_caller(endpoint_name: &str, addr: &SocketAddr) -> String { - error!("The endpoint \"{endpoint_name}\" was called, but the caller was not an in-cluster pod! @callerIP:{}", addr.ip()); - let body_json_val = json!({"error": format!("This endpoint is only meant to be used for in-cluster callers (ie. pods) atm.")}); - let message = body_json_val.to_string(); - return message; + error!("The endpoint \"{endpoint_name}\" was called, but the caller was not an in-cluster pod! @callerIP:{}", addr.ip()); + let body_json_val = json!({"error": format!("This endpoint is only meant to be used for in-cluster callers (ie. pods) atm.")}); + let message = body_json_val.to_string(); + return message; } pub fn http_response_of_bad_gateway_for_non_pod_caller(endpoint_name: &str, addr: &SocketAddr) -> Response { - let message = message_of_bad_gateway_for_non_pod_caller(endpoint_name, addr); - let body = axum::body::Body::from(message).boxed_unsync(); - //let body = Full::from(message); - Response::builder().status(StatusCode::BAD_GATEWAY).body(body).unwrap().into_response() + let message = message_of_bad_gateway_for_non_pod_caller(endpoint_name, addr); + let body = axum::body::Body::from(message).boxed_unsync(); + //let body = Full::from(message); + Response::builder().status(StatusCode::BAD_GATEWAY).body(body).unwrap().into_response() } /*pub fn axum_response_of_bad_gateway_for_non_pod_caller(endpoint_name: &str, addr: &SocketAddr) -> Response { - let message = message_of_bad_gateway_for_non_pod_caller(endpoint_name, addr); - (StatusCode::BAD_GATEWAY, message).into_response() + let message = message_of_bad_gateway_for_non_pod_caller(endpoint_name, addr); + (StatusCode::BAD_GATEWAY, message).into_response() }*/ pub async fn monitor_backend_link_handle_ws_upgrade( - //Extension(s1): Extension>, - ConnectInfo(addr): ConnectInfo, - ws: WebSocketUpgrade + //Extension(s1): Extension>, + ConnectInfo(addr): ConnectInfo, + ws: WebSocketUpgrade, ) -> impl IntoResponse { - if !is_addr_from_pod(&addr) { return http_response_of_bad_gateway_for_non_pod_caller("/monitor-backend-link", &addr);} + if !is_addr_from_pod(&addr) { + return http_response_of_bad_gateway_for_non_pod_caller("/monitor-backend-link", &addr); + } - //let r1 = s1.new_receiver(); - ws.on_upgrade(move |socket| handle_socket(socket, addr)).into_response() - //ws.on_upgrade(move |socket| handle_socket(socket, r1, addr)) + //let r1 = s1.new_receiver(); + ws.on_upgrade(move |socket| handle_socket(socket, addr)).into_response() + //ws.on_upgrade(move |socket| handle_socket(socket, r1, addr)) } async fn handle_socket(socket: WebSocket, _addr: SocketAddr) { - /*if !is_addr_from_pod(&addr) { - error!("/monitor-backend-link endpoint was called, but the caller was not an in-cluster pod! @callerIP:{}", addr.ip()); - return; - }*/ - - let (sender, receiver) = socket.split(); - tokio::spawn(write(sender)); - tokio::spawn(read(receiver)); + /*if !is_addr_from_pod(&addr) { + error!("/monitor-backend-link endpoint was called, but the caller was not an in-cluster pod! @callerIP:{}", addr.ip()); + return; + }*/ + + let (sender, receiver) = socket.split(); + tokio::spawn(write(sender)); + tokio::spawn(read(receiver)); } pub static MESSAGE_SENDER_TO_MONITOR_BACKEND: Lazy<(ABSender, ABReceiver)> = Lazy::new(|| { - let (mut s1, r1): (ABSender, ABReceiver) = async_broadcast::broadcast(10000); - s1.set_overflow(true); - // we need to return both, else the receiver is dropped, and the channel closes - (s1, r1) + let (mut s1, r1): (ABSender, ABReceiver) = async_broadcast::broadcast(10000); + s1.set_overflow(true); + // we need to return both, else the receiver is dropped, and the channel closes + (s1, r1) }); async fn read(mut receiver: SplitStream) { - while let Some(Ok(_msg)) = receiver.next().await { - /*match msg { - Text(json) => { - - }, - _ => {}, - }*/ - } + while let Some(Ok(_msg)) = receiver.next().await { + /*match msg { + Text(json) => { + + }, + _ => {}, + }*/ + } } async fn write(mut sender: SplitSink) { - let mut msg_receiver = MESSAGE_SENDER_TO_MONITOR_BACKEND.0.new_receiver(); - loop { - //let next_entry = match log_entry_receiver.recv_async().await { - //let next_entry = match log_entry_receiver.recv().await { - let next_msg = match msg_receiver.recv().await { - Ok(a) => a, - Err(_) => break, // if unwrap fails, break loop (since senders are dead anyway) - }; + let mut msg_receiver = MESSAGE_SENDER_TO_MONITOR_BACKEND.0.new_receiver(); + loop { + //let next_entry = match log_entry_receiver.recv_async().await { + //let next_entry = match log_entry_receiver.recv().await { + let next_msg = match msg_receiver.recv().await { + Ok(a) => a, + Err(_) => break, // if unwrap fails, break loop (since senders are dead anyway) + }; - //let next_entry_as_str = serde_json::to_string(&next_entry).unwrap_or_else(|_| "[failed to serialize LogEntry...]".to_string()); - let next_entry_as_str = serde_json::to_string(&next_msg).unwrap_or_else(|_| "[failed to serialize Message_ASToMB...]".to_string()); + //let next_entry_as_str = serde_json::to_string(&next_entry).unwrap_or_else(|_| "[failed to serialize LogEntry...]".to_string()); + let next_entry_as_str = serde_json::to_string(&next_msg).unwrap_or_else(|_| "[failed to serialize Message_ASToMB...]".to_string()); - // in any websocket error, break loop - match sender.send(Message::Text(next_entry_as_str)).await { - Ok(_res) => {}, - Err(err) => { - // only warn, since this is likely to just be a case where the app-server is being re-deployed - warn!("Websocket write-connection to monitor-backend errored:{err}"); - break; - }, - }; - } -} \ No newline at end of file + // in any websocket error, break loop + match sender.send(Message::Text(next_entry_as_str)).await { + Ok(_res) => {}, + Err(err) => { + // only warn, since this is likely to just be a case where the app-server is being re-deployed + warn!("Websocket write-connection to monitor-backend errored:{err}"); + break; + }, + }; + } +} diff --git a/Packages/app-server/src/links/pgclient.rs b/Packages/app-server/src/links/pgclient.rs index 89379a695..5510a1bca 100644 --- a/Packages/app-server/src/links/pgclient.rs +++ b/Packages/app-server/src/links/pgclient.rs @@ -1,113 +1,130 @@ -use std::{env, time::{SystemTime, UNIX_EPOCH, Duration}, task::{Poll}, cmp::max, collections::HashMap}; -use rust_shared::{tokio_postgres, bytes::{Bytes, self}, tokio, utils::{type_aliases::JSONValue, general_::extensions::ToOwnedV}, serde_json, indoc::formatdoc, once_cell::sync::Lazy, itertools::Itertools}; -use deadpool_postgres::{Manager, ManagerConfig, Pool, RecyclingMethod, Runtime, PoolConfig}; -use rust_shared::{futures, axum, tower, tower_http}; -use futures::{future, StreamExt, Sink, ready}; -use rust_shared::tokio::{join, select}; -use rust_shared::tokio_postgres::{NoTls, Client, SimpleQueryMessage, SimpleQueryRow, tls::NoTlsStream, Socket, Connection}; -use rust_shared::tokio_postgres::types::{PgLsn}; -use tracing::{info, debug, error, trace, warn}; +use deadpool_postgres::{Manager, ManagerConfig, Pool, PoolConfig, RecyclingMethod, Runtime}; +use futures::{future, ready, Sink, StreamExt}; use rust_shared::anyhow::{anyhow, Error}; use rust_shared::postgres_protocol::message::backend::{LogicalReplicationMessage, ReplicationMessage}; +use rust_shared::tokio::{join, select}; use rust_shared::tokio_postgres::replication::LogicalReplicationStream; +use rust_shared::tokio_postgres::types::PgLsn; +use rust_shared::tokio_postgres::{tls::NoTlsStream, Client, Connection, NoTls, SimpleQueryMessage, SimpleQueryRow, Socket}; +use rust_shared::{axum, futures, tower, tower_http}; +use rust_shared::{ + bytes::{self, Bytes}, + indoc::formatdoc, + itertools::Itertools, + once_cell::sync::Lazy, + serde_json, tokio, tokio_postgres, + utils::{general_::extensions::ToOwnedV, type_aliases::JSONValue}, +}; +use std::{ + cmp::max, + collections::HashMap, + env, + task::Poll, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; +use tracing::{debug, error, info, trace, warn}; -use crate::{store::{live_queries::{LQStorageArc}, storage::AppStateArc}, utils::{db::pg_stream_parsing::{LDChange, OldKeys}}, links::pgclient_::wal_structs::{TableInfo, ColumnInfo, wal_data_tuple_to_row_data}}; +use crate::{ + links::pgclient_::wal_structs::{wal_data_tuple_to_row_data, ColumnInfo, TableInfo}, + store::{live_queries::LQStorageArc, storage::AppStateArc}, + utils::db::pg_stream_parsing::{LDChange, OldKeys}, +}; pub fn start_pgclient_with_restart(app_state: AppStateArc) { - let _handler = tokio::spawn(async move { - let mut errors_hit = 0; - while errors_hit < 1000 { - let (client_replication, connection_replication) = create_client_advanced(true).await; - let result = start_streaming_changes(client_replication, connection_replication, app_state.live_queries.clone()).await; - match result { - Ok(result) => { - //println!("PGClient loop ended for some reason. Result:{:?}", result); - error!("PGClient loop ended for some reason; restarting shortly. Result:{:?}", result); - }, - Err(err) => { - error!("PGClient loop had error; restarting shortly. @error:{:?}", err); - errors_hit += 1; - } - }; - tokio::time::sleep(std::time::Duration::from_millis(1000)).await; - } - }); + let _handler = tokio::spawn(async move { + let mut errors_hit = 0; + while errors_hit < 1000 { + let (client_replication, connection_replication) = create_client_advanced(true).await; + let result = start_streaming_changes(client_replication, connection_replication, app_state.live_queries.clone()).await; + match result { + Ok(result) => { + //println!("PGClient loop ended for some reason. Result:{:?}", result); + error!("PGClient loop ended for some reason; restarting shortly. Result:{:?}", result); + }, + Err(err) => { + error!("PGClient loop had error; restarting shortly. @error:{:?}", err); + errors_hit += 1; + }, + }; + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + } + }); } async fn check_if_db_tables_exist(client: &Client) { - // now we can execute a simple statement just to confirm the connection was made - // had been commented; LogicalReplicationStream (used to?) expects all messages with client to be replication messages, so crashes if we run this test-query - let rows = q(&client, "SELECT '123'").await; - assert_eq!(rows[0].get(0).unwrap(), "123", "Simple data-free postgres query failed; something is wrong."); + // now we can execute a simple statement just to confirm the connection was made + // had been commented; LogicalReplicationStream (used to?) expects all messages with client to be replication messages, so crashes if we run this test-query + let rows = q(&client, "SELECT '123'").await; + assert_eq!(rows[0].get(0).unwrap(), "123", "Simple data-free postgres query failed; something is wrong."); - let rows = q(client, "SELECT table_name FROM information_schema.tables WHERE table_schema = 'app'").await; - let found_tables: Vec = rows.into_iter().map(|row| row.get("table_name").unwrap().to_string()).collect(); - info!("Found tables: {:?}", found_tables); - let expected_tables = vec!["users", "maps", "nodes", "nodeRevisions"]; - let missing_tables = expected_tables.iter().filter(|table| !found_tables.contains(&table.to_string())).collect_vec(); - if !missing_tables.is_empty() { - let setup_reminder = "\n\tDid you forget to initialize and seed your local database? See: https://github.com/debate-map/app#reset-db-local"; - error!("Missing tables in database (non-exhaustive): {:?}{}", missing_tables, setup_reminder); - panic!("Missing tables in database (non-exhaustive): {:?}{}", missing_tables, setup_reminder); - } + let rows = q(client, "SELECT table_name FROM information_schema.tables WHERE table_schema = 'app'").await; + let found_tables: Vec = rows.into_iter().map(|row| row.get("table_name").unwrap().to_string()).collect(); + info!("Found tables: {:?}", found_tables); + let expected_tables = vec!["users", "maps", "nodes", "nodeRevisions"]; + let missing_tables = expected_tables.iter().filter(|table| !found_tables.contains(&table.to_string())).collect_vec(); + if !missing_tables.is_empty() { + let setup_reminder = "\n\tDid you forget to initialize and seed your local database? See: https://github.com/debate-map/app#reset-db-local"; + error!("Missing tables in database (non-exhaustive): {:?}{}", missing_tables, setup_reminder); + panic!("Missing tables in database (non-exhaustive): {:?}{}", missing_tables, setup_reminder); + } } async fn q(client: &Client, query: &str) -> Vec { - let msgs = client.simple_query(query).await.unwrap(); - msgs.into_iter() - .filter_map(|msg| match msg { - SimpleQueryMessage::Row(row) => Some(row), - _ => None, - }) - .collect() + let msgs = client.simple_query(query).await.unwrap(); + msgs.into_iter() + .filter_map(|msg| match msg { + SimpleQueryMessage::Row(row) => Some(row), + _ => None, + }) + .collect() } pub fn get_tokio_postgres_config() -> tokio_postgres::Config { - // get connection info from env-vars - let ev = |name| { env::var(name).unwrap() }; - info!("Postgres connection-info: postgres://{}:@{}:{}/debate-map", ev("DB_USER"), ev("DB_ADDR"), ev("DB_PORT")); - - let mut cfg = tokio_postgres::Config::new(); - cfg.user(&ev("DB_USER")); - cfg.password(ev("DB_PASSWORD")); - cfg.host(&ev("DB_ADDR")); - cfg.port(ev("DB_PORT").parse::().unwrap()); - cfg.dbname("debate-map"); - cfg + // get connection info from env-vars + let ev = |name| env::var(name).unwrap(); + info!("Postgres connection-info: postgres://{}:@{}:{}/debate-map", ev("DB_USER"), ev("DB_ADDR"), ev("DB_PORT")); + + let mut cfg = tokio_postgres::Config::new(); + cfg.user(&ev("DB_USER")); + cfg.password(ev("DB_PASSWORD")); + cfg.host(&ev("DB_ADDR")); + cfg.port(ev("DB_PORT").parse::().unwrap()); + cfg.dbname("debate-map"); + cfg } /// Only use this if you need the for_replication option. (everything else should use clients taken from the shared pool) async fn create_client_advanced(for_replication: bool) -> (Client, Connection) { - let mut pg_cfg = get_tokio_postgres_config(); - if for_replication { - //db_config += " replication=database"; - //cfg.options(options); - pg_cfg.replication_mode(tokio_postgres::config::ReplicationMode::Logical); - } + let mut pg_cfg = get_tokio_postgres_config(); + if for_replication { + //db_config += " replication=database"; + //cfg.options(options); + pg_cfg.replication_mode(tokio_postgres::config::ReplicationMode::Logical); + } - // connect to the database - let (client, connection) = pg_cfg.connect(NoTls).await.unwrap(); - (client, connection) + // connect to the database + let (client, connection) = pg_cfg.connect(NoTls).await.unwrap(); + (client, connection) } pub fn create_db_pool() -> Pool { - let pg_cfg = get_tokio_postgres_config(); - let mgr_cfg = ManagerConfig { - recycling_method: RecyclingMethod::Fast, - // when using "SET ROLE rls_obeyer", this was needed; it's not needed anymore, now that we use "SET LOCAL ROLE rls_obeyer" (since that restricts the change to just the current transaction) - /*recycling_method: RecyclingMethod::Custom(formatdoc! {r#" - SET SESSION AUTHORIZATION DEFAULT; - -- or: RESET ROLE; - "#}),*/ - //recycling_method: RecyclingMethod::Verified, - //recycling_method: RecyclingMethod::Clean, - }; - let mgr = Manager::from_config(pg_cfg, NoTls, mgr_cfg); - //let pool_size = 1; - let pool_size = 30; - //let pool_size = 1050; - let pool = Pool::builder(mgr).max_size(pool_size).runtime(Runtime::Tokio1).build().unwrap(); - pool + let pg_cfg = get_tokio_postgres_config(); + let mgr_cfg = ManagerConfig { + recycling_method: RecyclingMethod::Fast, + // when using "SET ROLE rls_obeyer", this was needed; it's not needed anymore, now that we use "SET LOCAL ROLE rls_obeyer" (since that restricts the change to just the current transaction) + /*recycling_method: RecyclingMethod::Custom(formatdoc! {r#" + SET SESSION AUTHORIZATION DEFAULT; + -- or: RESET ROLE; + "#}),*/ + //recycling_method: RecyclingMethod::Verified, + //recycling_method: RecyclingMethod::Clean, + }; + let mgr = Manager::from_config(pg_cfg, NoTls, mgr_cfg); + //let pool_size = 1; + let pool_size = 30; + //let pool_size = 1050; + let pool = Pool::builder(mgr).max_size(pool_size).runtime(Runtime::Tokio1).build().unwrap(); + pool } /** @@ -117,196 +134,187 @@ pub fn create_db_pool() -> Pool { * 3) Connecting to postgres pod through shell, then running `pg_recvlogical`. * In this function, we use approach 2. */ -pub async fn start_streaming_changes( - client: Client, - connection: Connection, - storage_wrapper: LQStorageArc -) -> Result { -//) -> Result<(Client, Connection), tokio_postgres::Error> { - info!("Starting pgclient::start_streaming_changes..."); +pub async fn start_streaming_changes(client: Client, connection: Connection, storage_wrapper: LQStorageArc) -> Result { + //) -> Result<(Client, Connection), tokio_postgres::Error> { + info!("Starting pgclient::start_streaming_changes..."); + + // the connection object performs the actual communication with the database, so spawn it off to run on its own + let fut1 = tokio::spawn(async move { connection.await }); - // the connection object performs the actual communication with the database, so spawn it off to run on its own - let fut1 = tokio::spawn(async move { - connection.await - }); + let fut2 = tokio::spawn(async move { + check_if_db_tables_exist(&client).await; - let fut2 = tokio::spawn(async move { - check_if_db_tables_exist(&client).await; + //let slot_name = "slot"; + let slot_name = "slot_".to_owned() + &SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis().to_string(); + // todo: now that wal2json is not used for processing the logical-replication stream, probably remove all of the wal2json-specific handling code, to keep things tidy + //let slot_query = format!("CREATE_REPLICATION_SLOT {} TEMPORARY LOGICAL \"wal2json\"", slot_name); + let slot_query = format!("CREATE_REPLICATION_SLOT {} TEMPORARY LOGICAL \"pgoutput\" NOEXPORT_SNAPSHOT", slot_name); + let lsn = q(&client, &slot_query).await[0].get("consistent_point").unwrap().to_owned(); - //let slot_name = "slot"; - let slot_name = "slot_".to_owned() + &SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis().to_string(); - // todo: now that wal2json is not used for processing the logical-replication stream, probably remove all of the wal2json-specific handling code, to keep things tidy - //let slot_query = format!("CREATE_REPLICATION_SLOT {} TEMPORARY LOGICAL \"wal2json\"", slot_name); - let slot_query = format!("CREATE_REPLICATION_SLOT {} TEMPORARY LOGICAL \"pgoutput\" NOEXPORT_SNAPSHOT", slot_name); - let lsn = q(&client, &slot_query).await[0].get("consistent_point").unwrap().to_owned(); + client.simple_query("DROP PUBLICATION IF EXISTS dm_app_server_main").await.unwrap(); + client.simple_query("CREATE PUBLICATION dm_app_server_main FOR ALL TABLES").await.unwrap(); - client.simple_query("DROP PUBLICATION IF EXISTS dm_app_server_main").await.unwrap(); - client.simple_query("CREATE PUBLICATION dm_app_server_main FOR ALL TABLES").await.unwrap(); + //let query = format!("START_REPLICATION SLOT {} LOGICAL {}", slot_name, lsn); + let query = format!("START_REPLICATION SLOT {} LOGICAL {} (\"proto_version\" '1', \"publication_names\" 'dm_app_server_main')", slot_name, lsn); - //let query = format!("START_REPLICATION SLOT {} LOGICAL {}", slot_name, lsn); - let query = format!("START_REPLICATION SLOT {} LOGICAL {} (\"proto_version\" '1', \"publication_names\" 'dm_app_server_main')", slot_name, lsn); - - /*let duplex_stream = client.copy_both_simple::(&query).await.unwrap(); - let mut duplex_stream_pin = Box::pin(duplex_stream);*/ + /*let duplex_stream = client.copy_both_simple::(&query).await.unwrap(); + let mut duplex_stream_pin = Box::pin(duplex_stream);*/ - let stream_raw = client.copy_both_simple(&query).await.unwrap(); - let mut stream = Box::pin(LogicalReplicationStream::new(stream_raw)); + let stream_raw = client.copy_both_simple(&query).await.unwrap(); + let mut stream = Box::pin(LogicalReplicationStream::new(stream_raw)); - let mut wal_pos_last_processed: u64 = 0; - // hashmap - let mut table_infos = HashMap::::new(); + let mut wal_pos_last_processed: u64 = 0; + // hashmap + let mut table_infos = HashMap::::new(); - loop { - let event_res_opt = stream.as_mut().next().await; - if event_res_opt.is_none() { - info!("Duplex-stream from pgclient returned a None; breaking listen loop. (parent should spawn new connection soon)"); - break; - } - let event_res = event_res_opt.unwrap(); - let event = match event_res { - Ok(event) => event, - Err(err) => { - // if error is of type that signifies that the the connection has closed, just break the loop - if err.is_closed() { - warn!("Duplex-stream from pgclient returned a connecting-closing error; breaking listen loop. @error:{:?}", err); - break; - } - warn!("Duplex-stream from pgclient returned a non-connecting-closing error; resuming listen loop. @error:{:?}", err); - continue; - }, - }; + loop { + let event_res_opt = stream.as_mut().next().await; + if event_res_opt.is_none() { + info!("Duplex-stream from pgclient returned a None; breaking listen loop. (parent should spawn new connection soon)"); + break; + } + let event_res = event_res_opt.unwrap(); + let event = match event_res { + Ok(event) => event, + Err(err) => { + // if error is of type that signifies that the the connection has closed, just break the loop + if err.is_closed() { + warn!("Duplex-stream from pgclient returned a connecting-closing error; breaking listen loop. @error:{:?}", err); + break; + } + warn!("Duplex-stream from pgclient returned a non-connecting-closing error; resuming listen loop. @error:{:?}", err); + continue; + }, + }; - //let event_bytes = event_res.unwrap(); - //let event: LogicalReplicationMessage = serde_json::from_slice(&event_bytes).unwrap(); - //let event = event_res.unwrap(); + //let event_bytes = event_res.unwrap(); + //let event: LogicalReplicationMessage = serde_json::from_slice(&event_bytes).unwrap(); + //let event = event_res.unwrap(); - use ReplicationMessage::*; - use LogicalReplicationMessage::*; - // see here for list of message-types: https://www.postgresql.org/docs/10/protocol-replication.html - match event { - // type: XLogData (WAL data, ie. change of data in db) - XLogData(body) => { - wal_pos_last_processed = max(wal_pos_last_processed, body.wal_end()); - let core_data = body.into_data(); - debug!("Got XLogData/data-change event. @wal_pos_last_processed:{}", wal_pos_last_processed); - match core_data { - Relation(body2) => { - debug!("Got relation event:{:?}", body2); - table_infos.insert(body2.rel_id(), TableInfo { - name: body2.name().unwrap().to_owned(), - columns: body2.columns().into_iter().map(|c| ColumnInfo::from_column(c)).collect_vec(), - }); - }, - // todo: maybe rework my code to just use the existing LogicalReplicationMessage struct, rather than the LDChange struct (which was the data-structure sent by wal2json, but arguably not relevant anymore) - Insert(body2) => { - debug!("Got insert event:{:?}", body2); - let table_info = table_infos.get(&body2.rel_id()).unwrap(); - let new_data = wal_data_tuple_to_row_data(body2.tuple(), table_info, 100).unwrap(); - let change = LDChange { - kind: "insert".o(), - schema: "".o(), - table: table_info.name.o(), - //columnnames: Some(table_info.columns.iter().map(|a| a.name).collect()), - columnnames: Some(new_data.keys().map(|a| a.o()).collect_vec()), - columntypes: Some(table_info.columns.iter().map(|a| a.data_type.name().o()).collect()), - columnvalues: Some(new_data.values().map(|a| a.clone()).collect()), - oldkeys: None, - needs_wal2json_jsonval_fixes: Some(false), - }; - storage_wrapper.notify_of_ld_change(change).await; - }, - Update(body2) => { - debug!("Got update event:{:?}", body2); - let table_info = table_infos.get(&body2.rel_id()).unwrap(); - let new_data = wal_data_tuple_to_row_data(body2.new_tuple(), table_info, 100).unwrap(); - let change = LDChange { - kind: "update".o(), - schema: "".o(), - table: table_info.name.o(), - //columnnames: Some(table_info.columns.iter().map(|a| a.name).collect()), - columnnames: Some(new_data.keys().map(|a| a.o()).collect_vec()), - columntypes: Some(table_info.columns.iter().map(|a| a.data_type.name().o()).collect()), - columnvalues: Some(new_data.values().map(|a| a.clone()).collect()), - oldkeys: None, - needs_wal2json_jsonval_fixes: Some(false), - }; - storage_wrapper.notify_of_ld_change(change).await; - }, - Delete(body2) => { - debug!("Got delete event:{:?}", body2); - let table_info = table_infos.get(&body2.rel_id()).unwrap(); - let key_tuple = body2.key_tuple().ok_or(anyhow!("Delete event didn't have key-tuple!"))?; - let old_data_partial = wal_data_tuple_to_row_data(key_tuple, table_info, 100).unwrap(); - let change = LDChange { - kind: "delete".o(), - schema: "".o(), - table: table_info.name.o(), - //columnnames: Some(table_info.columns.iter().map(|a| a.name).collect()), - columnnames: None, - columntypes: None, - columnvalues: None, - oldkeys: Some(OldKeys { - keynames: old_data_partial.keys().map(|a| a.o()).collect_vec(), - keytypes: table_info.columns.iter().map(|a| a.data_type.name().o()).collect(), - keyvalues: old_data_partial.values().map(|a| a.clone()).collect(), - needs_wal2json_jsonval_fixes: Some(false), - }), - needs_wal2json_jsonval_fixes: Some(false), - }; - storage_wrapper.notify_of_ld_change(change).await; - }, - // ignore all other message-types - enum_type => { - debug!("Got other event: {:?}", enum_type); - }, - } - }, - // type: keepalive message - PrimaryKeepAlive(data) => { - let should_send_response = data.reply() == 1; - debug!("Got keepalive message:{:x?} @should_send_response:{}", data, should_send_response); + use LogicalReplicationMessage::*; + use ReplicationMessage::*; + // see here for list of message-types: https://www.postgresql.org/docs/10/protocol-replication.html + match event { + // type: XLogData (WAL data, ie. change of data in db) + XLogData(body) => { + wal_pos_last_processed = max(wal_pos_last_processed, body.wal_end()); + let core_data = body.into_data(); + debug!("Got XLogData/data-change event. @wal_pos_last_processed:{}", wal_pos_last_processed); + match core_data { + Relation(body2) => { + debug!("Got relation event:{:?}", body2); + table_infos.insert(body2.rel_id(), TableInfo { name: body2.name().unwrap().to_owned(), columns: body2.columns().into_iter().map(|c| ColumnInfo::from_column(c)).collect_vec() }); + }, + // todo: maybe rework my code to just use the existing LogicalReplicationMessage struct, rather than the LDChange struct (which was the data-structure sent by wal2json, but arguably not relevant anymore) + Insert(body2) => { + debug!("Got insert event:{:?}", body2); + let table_info = table_infos.get(&body2.rel_id()).unwrap(); + let new_data = wal_data_tuple_to_row_data(body2.tuple(), table_info, 100).unwrap(); + let change = LDChange { + kind: "insert".o(), + schema: "".o(), + table: table_info.name.o(), + //columnnames: Some(table_info.columns.iter().map(|a| a.name).collect()), + columnnames: Some(new_data.keys().map(|a| a.o()).collect_vec()), + columntypes: Some(table_info.columns.iter().map(|a| a.data_type.name().o()).collect()), + columnvalues: Some(new_data.values().map(|a| a.clone()).collect()), + oldkeys: None, + needs_wal2json_jsonval_fixes: Some(false), + }; + storage_wrapper.notify_of_ld_change(change).await; + }, + Update(body2) => { + debug!("Got update event:{:?}", body2); + let table_info = table_infos.get(&body2.rel_id()).unwrap(); + let new_data = wal_data_tuple_to_row_data(body2.new_tuple(), table_info, 100).unwrap(); + let change = LDChange { + kind: "update".o(), + schema: "".o(), + table: table_info.name.o(), + //columnnames: Some(table_info.columns.iter().map(|a| a.name).collect()), + columnnames: Some(new_data.keys().map(|a| a.o()).collect_vec()), + columntypes: Some(table_info.columns.iter().map(|a| a.data_type.name().o()).collect()), + columnvalues: Some(new_data.values().map(|a| a.clone()).collect()), + oldkeys: None, + needs_wal2json_jsonval_fixes: Some(false), + }; + storage_wrapper.notify_of_ld_change(change).await; + }, + Delete(body2) => { + debug!("Got delete event:{:?}", body2); + let table_info = table_infos.get(&body2.rel_id()).unwrap(); + let key_tuple = body2.key_tuple().ok_or(anyhow!("Delete event didn't have key-tuple!"))?; + let old_data_partial = wal_data_tuple_to_row_data(key_tuple, table_info, 100).unwrap(); + let change = LDChange { + kind: "delete".o(), + schema: "".o(), + table: table_info.name.o(), + //columnnames: Some(table_info.columns.iter().map(|a| a.name).collect()), + columnnames: None, + columntypes: None, + columnvalues: None, + oldkeys: Some(OldKeys { + keynames: old_data_partial.keys().map(|a| a.o()).collect_vec(), + keytypes: table_info.columns.iter().map(|a| a.data_type.name().o()).collect(), + keyvalues: old_data_partial.values().map(|a| a.clone()).collect(), + needs_wal2json_jsonval_fixes: Some(false), + }), + needs_wal2json_jsonval_fixes: Some(false), + }; + storage_wrapper.notify_of_ld_change(change).await; + }, + // ignore all other message-types + enum_type => { + debug!("Got other event: {:?}", enum_type); + }, + } + }, + // type: keepalive message + PrimaryKeepAlive(data) => { + let should_send_response = data.reply() == 1; + debug!("Got keepalive message:{:x?} @should_send_response:{}", data, should_send_response); - // todo: someday probably use a timer to send keepalive messages to server proactively, since in some cases server warning can come too late (https://github.com/sfackler/rust-postgres/pull/696#discussion_r789698737) - if should_send_response { - debug!("Responding to keepalive message/warning... @wal_pos_last_processed:{}", wal_pos_last_processed); - let lsn = PgLsn::from(wal_pos_last_processed); - let ts: i64 = PG_EPOCH.elapsed().unwrap().as_micros().try_into().unwrap(); - let request_server_response = 1; // If 1, the client requests the server to reply to this message immediately. This can be used to ping the server, to test if the connection is still healthy. - stream.as_mut().standby_status_update(lsn, lsn, lsn, ts, request_server_response).await?; - } - }, - // todo: maybe delay ingesting insert/update/delete events until after we get the corresponding "commit" message (unsure if this is necessary) - //Commit(commit) => {}, - _ => debug!("Got unknown replication event:{:?}", event), - } - } + // todo: someday probably use a timer to send keepalive messages to server proactively, since in some cases server warning can come too late (https://github.com/sfackler/rust-postgres/pull/696#discussion_r789698737) + if should_send_response { + debug!("Responding to keepalive message/warning... @wal_pos_last_processed:{}", wal_pos_last_processed); + let lsn = PgLsn::from(wal_pos_last_processed); + let ts: i64 = PG_EPOCH.elapsed().unwrap().as_micros().try_into().unwrap(); + let request_server_response = 1; // If 1, the client requests the server to reply to this message immediately. This can be used to ping the server, to test if the connection is still healthy. + stream.as_mut().standby_status_update(lsn, lsn, lsn, ts, request_server_response).await?; + } + }, + // todo: maybe delay ingesting insert/update/delete events until after we get the corresponding "commit" message (unsure if this is necessary) + //Commit(commit) => {}, + _ => debug!("Got unknown replication event:{:?}", event), + } + } - Ok(client) - /*let connection = join!(handle).0.unwrap(); - Ok((client, connection))*/ - }); + Ok(client) + /*let connection = join!(handle).0.unwrap(); + Ok((client, connection))*/ + }); - select! { - fut1_result_in_join_result = fut1 => { - match fut1_result_in_join_result { - Err(join_err) => Err(Error::new(join_err)), // recast the join error as our main error - Ok(fut1_result) => match fut1_result { - Err(err) => { - error!("Connection error in base connection-thread of pgclient::start_streaming_changes: {}", err); - Err(Error::new(err)) - }, - Ok(_) => Err(anyhow!("Base connection-thread of start_streaming_changes ended for some reason; returning, so can be restarted.")), - }, - } - }, - fut2_result_in_join_result = fut2 => { - match fut2_result_in_join_result { - Err(join_err) => Err(Error::new(join_err)), // recast the join error as our main error - Ok(fut2_result) => fut2_result, - } - }, - } + select! { + fut1_result_in_join_result = fut1 => { + match fut1_result_in_join_result { + Err(join_err) => Err(Error::new(join_err)), // recast the join error as our main error + Ok(fut1_result) => match fut1_result { + Err(err) => { + error!("Connection error in base connection-thread of pgclient::start_streaming_changes: {}", err); + Err(Error::new(err)) + }, + Ok(_) => Err(anyhow!("Base connection-thread of start_streaming_changes ended for some reason; returning, so can be restarted.")), + }, + } + }, + fut2_result_in_join_result = fut2 => { + match fut2_result_in_join_result { + Err(join_err) => Err(Error::new(join_err)), // recast the join error as our main error + Ok(fut2_result) => fut2_result, + } + }, + } } /// Postgres epoch is 2000-01-01T00:00:00Z -static PG_EPOCH: Lazy = Lazy::new(|| UNIX_EPOCH + Duration::from_secs(946_684_800)); \ No newline at end of file +static PG_EPOCH: Lazy = Lazy::new(|| UNIX_EPOCH + Duration::from_secs(946_684_800)); diff --git a/Packages/app-server/src/links/pgclient_/wal_structs.rs b/Packages/app-server/src/links/pgclient_/wal_structs.rs index 923c392cb..2aba01e69 100644 --- a/Packages/app-server/src/links/pgclient_/wal_structs.rs +++ b/Packages/app-server/src/links/pgclient_/wal_structs.rs @@ -1,172 +1,168 @@ -use rust_shared::anyhow::{anyhow, bail, Context, Error, ensure}; +use rust_shared::anyhow::{anyhow, bail, ensure, Context, Error}; use rust_shared::bytes::Bytes; use rust_shared::itertools::Itertools; use rust_shared::postgres_protocol::message::backend::{Column, Tuple}; -use rust_shared::serde_json::{Map, self}; +use rust_shared::serde_json::{self, Map}; use rust_shared::to_anyhow; -use rust_shared::tokio_postgres::types::{Type, FromSql}; +use rust_shared::tokio_postgres::types::{FromSql, Type}; use rust_shared::utils::general_::extensions::{IteratorV, ToOwnedV}; -use rust_shared::utils::type_aliases::{RowData, JSONValue}; +use rust_shared::utils::type_aliases::{JSONValue, RowData}; use serde::Deserialize; use crate::utils::db::pg_row_to_json::{pg_cell_to_json_value, StringCollector}; use crate::utils::db::pg_stream_parsing::parse_postgres_array_as_strings; -/// Table info, as collected from "Relation" messages, received from the `pgoutput` plugin. For example contents of `pgoutput` messages, see `@FormatExamples/PGOutput_Messages.md`. +/// Table info, as collected from "Relation" messages, received from the `pgoutput` plugin. For example contents of `pgoutput` messages, see `@FormatExamples/PGOutput_Messages.md`. pub struct TableInfo { - pub name: String, - pub columns: Vec, + pub name: String, + pub columns: Vec, } pub struct ColumnInfo { - pub name: String, - pub data_type: Type, + pub name: String, + pub data_type: Type, } impl ColumnInfo { - pub fn from_column(col: &Column) -> Self { - Self { - name: col.name().unwrap().to_owned(), - data_type: Type::from_oid(col.type_id() as u32).unwrap(), - } - } + pub fn from_column(col: &Column) -> Self { + Self { name: col.name().unwrap().to_owned(), data_type: Type::from_oid(col.type_id() as u32).unwrap() } + } } pub fn wal_data_tuple_to_struct<'a, T: for<'de> Deserialize<'de>>(data_tuple: &Tuple, table_info: &TableInfo) -> Result { - let as_json = wal_data_tuple_to_json_value(data_tuple, table_info, 100)?; - Ok(serde_json::from_value(as_json)?) + let as_json = wal_data_tuple_to_json_value(data_tuple, table_info, 100)?; + Ok(serde_json::from_value(as_json)?) } pub fn wal_data_tuple_to_json_value(data_tuple: &Tuple, table_info: &TableInfo, columns_to_process: usize) -> Result { - let row_data = wal_data_tuple_to_row_data(data_tuple, table_info, columns_to_process)?; - Ok(JSONValue::Object(row_data)) + let row_data = wal_data_tuple_to_row_data(data_tuple, table_info, columns_to_process)?; + Ok(JSONValue::Object(row_data)) } pub fn wal_data_tuple_to_row_data(data_tuple: &Tuple, table_info: &TableInfo, columns_to_process: usize) -> Result { - let mut result: Map = Map::new(); - for (i, column) in table_info.columns.iter().take(columns_to_process).enumerate() { - let json_value = data_tuple_entry_to_json_value(data_tuple, column, i)?; - result.insert(column.name.to_owned(), json_value); - } - Ok(result) + let mut result: Map = Map::new(); + for (i, column) in table_info.columns.iter().take(columns_to_process).enumerate() { + let json_value = data_tuple_entry_to_json_value(data_tuple, column, i)?; + result.insert(column.name.to_owned(), json_value); + } + Ok(result) } // keep overall structure in-sync with pg_row_to_json.rs pub fn data_tuple_entry_to_json_value(data_tuple: &Tuple, column: &ColumnInfo, column_i: usize) -> Result { - let f64_to_json_number = |raw_val: f64| -> Result { - let temp = serde_json::Number::from_f64(raw_val.into()).ok_or(anyhow!("invalid json-float"))?; - Ok(JSONValue::Number(temp)) - }; - Ok(match column.data_type { - // for rust-postgres <> postgres type-mappings: https://docs.rs/postgres/latest/postgres/types/trait.FromSql.html#types - // for postgres types: https://www.postgresql.org/docs/7.4/datatype.html#DATATYPE-TABLE - - // single types - Type::BOOL => get_basic(data_tuple, column, column_i, |a: bool| Ok(JSONValue::Bool(a)))?, - Type::INT2 => get_basic(data_tuple, column, column_i, |a: i16| Ok(JSONValue::Number(serde_json::Number::from(a))))?, - Type::INT4 => get_basic(data_tuple, column, column_i, |a: i32| Ok(JSONValue::Number(serde_json::Number::from(a))))?, - Type::INT8 => get_basic(data_tuple, column, column_i, |a: i64| Ok(JSONValue::Number(serde_json::Number::from(a))))?, - Type::TEXT | Type::VARCHAR => get_basic(data_tuple, column, column_i, |a: String| Ok(JSONValue::String(a)))?, - Type::JSON | Type::JSONB => get_basic(data_tuple, column, column_i, |a: JSONValue| Ok(a))?, - Type::FLOAT4 => get_basic(data_tuple, column, column_i, |a: f32| Ok(f64_to_json_number(a.into())?))?, - Type::FLOAT8 => get_basic(data_tuple, column, column_i, |a: f64| Ok(f64_to_json_number(a)?))?, - // these types require a custom StringCollector struct as an intermediary - //Type::TS_VECTOR => get_basic(data_tuple, column, column_i, |a: StringCollector| Ok(JSONValue::String(a.0)))?, - Type::TS_VECTOR => JSONValue::String("n/a".o()), // todo: implement actual handling here - - // array types - Type::BOOL_ARRAY => get_array(data_tuple, column, column_i, |a: bool| Ok(JSONValue::Bool(a)))?, - Type::INT2_ARRAY => get_array(data_tuple, column, column_i, |a: i16| Ok(JSONValue::Number(serde_json::Number::from(a))))?, - Type::INT4_ARRAY => get_array(data_tuple, column, column_i, |a: i32| Ok(JSONValue::Number(serde_json::Number::from(a))))?, - Type::INT8_ARRAY => get_array(data_tuple, column, column_i, |a: i64| Ok(JSONValue::Number(serde_json::Number::from(a))))?, - Type::TEXT_ARRAY | Type::VARCHAR_ARRAY => get_array(data_tuple, column, column_i, |a: String| Ok(JSONValue::String(a)))?, - Type::JSON_ARRAY | Type::JSONB_ARRAY => get_array(data_tuple, column, column_i, |a: JSONValue| Ok(a))?, - Type::FLOAT4_ARRAY => get_array(data_tuple, column, column_i, |a: f32| Ok(f64_to_json_number(a.into())?))?, - Type::FLOAT8_ARRAY => get_array(data_tuple, column, column_i, |a: f64| Ok(f64_to_json_number(a)?))?, - // these types require a custom StringCollector struct as an intermediary - //Type::TS_VECTOR_ARRAY => get_array(data_tuple, column, column_i, |a: StringCollector| Ok(JSONValue::String(a.0)))?, - Type::TS_VECTOR_ARRAY => JSONValue::Array(vec![]), // todo: implement actual handling here - - _ => bail!("Cannot convert pg-cell \"{}\" of type \"{}\" to a JSONValue.", column.name, column.data_type.name()), - }) + let f64_to_json_number = |raw_val: f64| -> Result { + let temp = serde_json::Number::from_f64(raw_val.into()).ok_or(anyhow!("invalid json-float"))?; + Ok(JSONValue::Number(temp)) + }; + Ok(match column.data_type { + // for rust-postgres <> postgres type-mappings: https://docs.rs/postgres/latest/postgres/types/trait.FromSql.html#types + // for postgres types: https://www.postgresql.org/docs/7.4/datatype.html#DATATYPE-TABLE + + // single types + Type::BOOL => get_basic(data_tuple, column, column_i, |a: bool| Ok(JSONValue::Bool(a)))?, + Type::INT2 => get_basic(data_tuple, column, column_i, |a: i16| Ok(JSONValue::Number(serde_json::Number::from(a))))?, + Type::INT4 => get_basic(data_tuple, column, column_i, |a: i32| Ok(JSONValue::Number(serde_json::Number::from(a))))?, + Type::INT8 => get_basic(data_tuple, column, column_i, |a: i64| Ok(JSONValue::Number(serde_json::Number::from(a))))?, + Type::TEXT | Type::VARCHAR => get_basic(data_tuple, column, column_i, |a: String| Ok(JSONValue::String(a)))?, + Type::JSON | Type::JSONB => get_basic(data_tuple, column, column_i, |a: JSONValue| Ok(a))?, + Type::FLOAT4 => get_basic(data_tuple, column, column_i, |a: f32| Ok(f64_to_json_number(a.into())?))?, + Type::FLOAT8 => get_basic(data_tuple, column, column_i, |a: f64| Ok(f64_to_json_number(a)?))?, + // these types require a custom StringCollector struct as an intermediary + //Type::TS_VECTOR => get_basic(data_tuple, column, column_i, |a: StringCollector| Ok(JSONValue::String(a.0)))?, + Type::TS_VECTOR => JSONValue::String("n/a".o()), // todo: implement actual handling here + + // array types + Type::BOOL_ARRAY => get_array(data_tuple, column, column_i, |a: bool| Ok(JSONValue::Bool(a)))?, + Type::INT2_ARRAY => get_array(data_tuple, column, column_i, |a: i16| Ok(JSONValue::Number(serde_json::Number::from(a))))?, + Type::INT4_ARRAY => get_array(data_tuple, column, column_i, |a: i32| Ok(JSONValue::Number(serde_json::Number::from(a))))?, + Type::INT8_ARRAY => get_array(data_tuple, column, column_i, |a: i64| Ok(JSONValue::Number(serde_json::Number::from(a))))?, + Type::TEXT_ARRAY | Type::VARCHAR_ARRAY => get_array(data_tuple, column, column_i, |a: String| Ok(JSONValue::String(a)))?, + Type::JSON_ARRAY | Type::JSONB_ARRAY => get_array(data_tuple, column, column_i, |a: JSONValue| Ok(a))?, + Type::FLOAT4_ARRAY => get_array(data_tuple, column, column_i, |a: f32| Ok(f64_to_json_number(a.into())?))?, + Type::FLOAT8_ARRAY => get_array(data_tuple, column, column_i, |a: f64| Ok(f64_to_json_number(a)?))?, + // these types require a custom StringCollector struct as an intermediary + //Type::TS_VECTOR_ARRAY => get_array(data_tuple, column, column_i, |a: StringCollector| Ok(JSONValue::String(a.0)))?, + Type::TS_VECTOR_ARRAY => JSONValue::Array(vec![]), // todo: implement actual handling here + + _ => bail!("Cannot convert pg-cell \"{}\" of type \"{}\" to a JSONValue.", column.name, column.data_type.name()), + }) } fn get_basic<'a, T: for<'de> Deserialize<'de>>(data_tuple: &'a Tuple, column: &ColumnInfo, column_i: usize, val_to_json_val: impl Fn(T) -> Result) -> Result { - let cell_data = data_tuple.tuple_data().get(column_i).ok_or(anyhow!("No data in tuple for column-index {column_i}."))?; - use rust_shared::postgres_protocol::message::backend::TupleData::*; - match cell_data { - Null => Ok(JSONValue::Null), - UnchangedToast => Ok(JSONValue::Null), - Text(val_as_bytes) => { - lds_text_to_json_value_using_pg_data_type(val_as_bytes, column.data_type.clone(), val_to_json_val) - }, - } + let cell_data = data_tuple.tuple_data().get(column_i).ok_or(anyhow!("No data in tuple for column-index {column_i}."))?; + use rust_shared::postgres_protocol::message::backend::TupleData::*; + match cell_data { + Null => Ok(JSONValue::Null), + UnchangedToast => Ok(JSONValue::Null), + Text(val_as_bytes) => lds_text_to_json_value_using_pg_data_type(val_as_bytes, column.data_type.clone(), val_to_json_val), + } } fn get_array<'a, T: for<'de> Deserialize<'de>>(data_tuple: &'a Tuple, column: &ColumnInfo, column_i: usize, val_to_json_val: impl Fn(T) -> Result) -> Result { - let cell_data = data_tuple.tuple_data().get(column_i).ok_or(anyhow!("No data in tuple for column-index {column_i}."))?; - use rust_shared::postgres_protocol::message::backend::TupleData::*; - match cell_data { - Null => Ok(JSONValue::Null), - UnchangedToast => Ok(JSONValue::Null), - Text(val_as_bytes) => { - let val_as_u8_slice = val_as_bytes.as_ref(); - - // we cannot use the FromSql implementations from rust-postgres, because the logical-replication streams use textual formats rather than binary formats (FromSql expects binary) - //let val_as_vec_of_type: Vec = Vec::::from_sql(&column.data_type, val_as_u8_slice).map_err(to_anyhow)?; - - let val_as_string = String::from_utf8_lossy(val_as_u8_slice).to_string(); - let val_as_vec_of_string: Vec = parse_postgres_array_as_strings(&val_as_string); - let val_as_vec_of_json_val = val_as_vec_of_string.into_iter().map(|item_as_str| { - let item_type = pg_array_type_to_basic_type(&column.data_type).ok_or(anyhow!("Column's data-type was not an array, despite func-caller thinking it was! @dataType:{}", column.data_type.name()))?; - let item_as_bytes = Bytes::copy_from_slice(item_as_str.as_bytes()); - lds_text_to_json_value_using_pg_data_type(&item_as_bytes, item_type, |val| val_to_json_val(val)) - }).try_collect2::>()?; - Ok(JSONValue::Array(val_as_vec_of_json_val)) - }, - } + let cell_data = data_tuple.tuple_data().get(column_i).ok_or(anyhow!("No data in tuple for column-index {column_i}."))?; + use rust_shared::postgres_protocol::message::backend::TupleData::*; + match cell_data { + Null => Ok(JSONValue::Null), + UnchangedToast => Ok(JSONValue::Null), + Text(val_as_bytes) => { + let val_as_u8_slice = val_as_bytes.as_ref(); + + // we cannot use the FromSql implementations from rust-postgres, because the logical-replication streams use textual formats rather than binary formats (FromSql expects binary) + //let val_as_vec_of_type: Vec = Vec::::from_sql(&column.data_type, val_as_u8_slice).map_err(to_anyhow)?; + + let val_as_string = String::from_utf8_lossy(val_as_u8_slice).to_string(); + let val_as_vec_of_string: Vec = parse_postgres_array_as_strings(&val_as_string); + let val_as_vec_of_json_val = val_as_vec_of_string + .into_iter() + .map(|item_as_str| { + let item_type = pg_array_type_to_basic_type(&column.data_type).ok_or(anyhow!("Column's data-type was not an array, despite func-caller thinking it was! @dataType:{}", column.data_type.name()))?; + let item_as_bytes = Bytes::copy_from_slice(item_as_str.as_bytes()); + lds_text_to_json_value_using_pg_data_type(&item_as_bytes, item_type, |val| val_to_json_val(val)) + }) + .try_collect2::>()?; + Ok(JSONValue::Array(val_as_vec_of_json_val)) + }, + } } fn pg_array_type_to_basic_type(array_type: &Type) -> Option { - match *array_type { - Type::BOOL_ARRAY => Some(Type::BOOL), - Type::INT2_ARRAY => Some(Type::INT2), - Type::INT4_ARRAY => Some(Type::INT4), - Type::INT8_ARRAY => Some(Type::INT8), - Type::TEXT_ARRAY => Some(Type::TEXT), - Type::VARCHAR_ARRAY => Some(Type::VARCHAR), - Type::JSON_ARRAY => Some(Type::JSON), - Type::JSONB_ARRAY => Some(Type::JSONB), - Type::FLOAT4_ARRAY => Some(Type::FLOAT4), - Type::FLOAT8_ARRAY => Some(Type::FLOAT8), - //Type::TS_VECTOR_ARRAY => Some(Type::TS_VECTOR), // not needed atm, since tsvector columns are currently ignored by data_tuple_entry_to_json_value - _ => None, - } + match *array_type { + Type::BOOL_ARRAY => Some(Type::BOOL), + Type::INT2_ARRAY => Some(Type::INT2), + Type::INT4_ARRAY => Some(Type::INT4), + Type::INT8_ARRAY => Some(Type::INT8), + Type::TEXT_ARRAY => Some(Type::TEXT), + Type::VARCHAR_ARRAY => Some(Type::VARCHAR), + Type::JSON_ARRAY => Some(Type::JSON), + Type::JSONB_ARRAY => Some(Type::JSONB), + Type::FLOAT4_ARRAY => Some(Type::FLOAT4), + Type::FLOAT8_ARRAY => Some(Type::FLOAT8), + //Type::TS_VECTOR_ARRAY => Some(Type::TS_VECTOR), // not needed atm, since tsvector columns are currently ignored by data_tuple_entry_to_json_value + _ => None, + } } fn lds_text_to_json_value_using_pg_data_type<'a, T: for<'de> Deserialize<'de>>(val_as_bytes: &Bytes, data_type: Type, val_to_json_val: impl Fn(T) -> Result) -> Result { - let val_as_u8_slice = val_as_bytes.as_ref(); - - if data_type == Type::TEXT || data_type == Type::VARCHAR { - return Ok(JSONValue::String(String::from_utf8_lossy(val_as_u8_slice).to_string())); - } - - if val_as_u8_slice.len() == 0 || val_as_u8_slice == b"null" { - return Ok(JSONValue::Null); - } - - if data_type == Type::BOOL { - if val_as_u8_slice == b"t" { - return Ok(JSONValue::Bool(true)); - } else if val_as_u8_slice == b"f" { - return Ok(JSONValue::Bool(false)); - } - bail!("Invalid text-value for boolean-column (should be `t` or `f`): {}", String::from_utf8_lossy(val_as_u8_slice).to_string()); - } - - // we cannot use the FromSql implementations from rust-postgres, because the logical-replication streams use textual formats rather than binary formats (FromSql expects binary) - /*let val_as_type: T = T::from_sql(&column.data_type, val_as_u8_slice).map_err(to_anyhow).with_context(|| { - format!("@type:{} @val_as_text:{}", column.data_type.name(), String::from_utf8_lossy(val_as_u8_slice).to_string()) - })?;*/ - - let val_as_type: T = serde_json::from_slice(val_as_u8_slice).map_err(to_anyhow).with_context(|| { - format!("@type:{} @val_as_text:{}", data_type.name(), String::from_utf8_lossy(val_as_u8_slice).to_string()) - })?; - - let val_as_json_val = val_to_json_val(val_as_type); - val_as_json_val -} \ No newline at end of file + let val_as_u8_slice = val_as_bytes.as_ref(); + + if data_type == Type::TEXT || data_type == Type::VARCHAR { + return Ok(JSONValue::String(String::from_utf8_lossy(val_as_u8_slice).to_string())); + } + + if val_as_u8_slice.len() == 0 || val_as_u8_slice == b"null" { + return Ok(JSONValue::Null); + } + + if data_type == Type::BOOL { + if val_as_u8_slice == b"t" { + return Ok(JSONValue::Bool(true)); + } else if val_as_u8_slice == b"f" { + return Ok(JSONValue::Bool(false)); + } + bail!("Invalid text-value for boolean-column (should be `t` or `f`): {}", String::from_utf8_lossy(val_as_u8_slice).to_string()); + } + + // we cannot use the FromSql implementations from rust-postgres, because the logical-replication streams use textual formats rather than binary formats (FromSql expects binary) + /*let val_as_type: T = T::from_sql(&column.data_type, val_as_u8_slice).map_err(to_anyhow).with_context(|| { + format!("@type:{} @val_as_text:{}", column.data_type.name(), String::from_utf8_lossy(val_as_u8_slice).to_string()) + })?;*/ + + let val_as_type: T = serde_json::from_slice(val_as_u8_slice).map_err(to_anyhow).with_context(|| format!("@type:{} @val_as_text:{}", data_type.name(), String::from_utf8_lossy(val_as_u8_slice).to_string()))?; + + let val_as_json_val = val_to_json_val(val_as_type); + val_as_json_val +} diff --git a/Packages/app-server/src/main.rs b/Packages/app-server/src/main.rs index c1bad33ce..d361e98eb 100644 --- a/Packages/app-server/src/main.rs +++ b/Packages/app-server/src/main.rs @@ -9,12 +9,10 @@ #![feature(negative_impls)] #![feature(try_blocks)] #![recursion_limit = "512"] - // for lock-chain checks #![allow(incomplete_features)] #![feature(adt_const_params)] #![feature(generic_const_exprs)] - // sync among all rust crates #![warn(clippy::all, clippy::pedantic, clippy::cargo)] #![allow( @@ -35,12 +33,25 @@ // to avoid false-positives, of certain functions, as well as for [Serialize/Deserialize]_Stub macro-usage (wrt private fields) dead_code, )] +#![feature(stmt_expr_attributes)] // allow attributes on expressions, eg. for disabling rustfmt per-expression -use rust_shared::{tokio, sentry, domains::{DomainsConstants, get_env}}; +use rust_shared::{ + domains::{get_env, DomainsConstants}, + sentry, tokio, +}; use store::storage::AppStateArc; -use tracing::{error}; +use tracing::error; -use crate::{links::{pgclient::{self, start_pgclient_with_restart}, db_live_cache::start_db_live_cache}, globals::{set_up_globals, set_up_globals_linux}, router::start_router, store::storage::AppState, utils::general::data_anchor::DataAnchorFor1}; +use crate::{ + globals::{set_up_globals, set_up_globals_linux}, + links::{ + db_live_cache::start_db_live_cache, + pgclient::{self, start_pgclient_with_restart}, + }, + router::start_router, + store::storage::AppState, + utils::general::data_anchor::DataAnchorFor1, +}; // folders (we only use "folder_x/mod.rs" files one-layer deep; keeps the mod-tree structure out of main.rs, while avoiding tons of mod.rs files littering the codebase) mod db; @@ -55,24 +66,27 @@ mod router; //#[tokio::main(flavor = "multi_thread", worker_threads = 7)] #[tokio::main] async fn main() { - let _sentry_guard = set_up_globals(); - #[cfg(unix)] let agent = set_up_globals_linux(); - #[cfg(unix)] let agent_running = agent.start().unwrap(); - println!("Setup of globals completed."); // have one regular print-line, in case logger has issues + let _sentry_guard = set_up_globals(); + #[cfg(unix)] + let agent = set_up_globals_linux(); + #[cfg(unix)] + let agent_running = agent.start().unwrap(); + println!("Setup of globals completed."); // have one regular print-line, in case logger has issues - let app_state = AppState::new_in_arc(); + let app_state = AppState::new_in_arc(); - // start pg-client; this monitors the database for changes, and pushes those change-events to live-query system - start_pgclient_with_restart(app_state.clone()); + // start pg-client; this monitors the database for changes, and pushes those change-events to live-query system + start_pgclient_with_restart(app_state.clone()); - // start db-live-cache; this launches some live-queries for certain data (eg. list of admin user-ids, and access-policies), and keeps those results in memory - start_db_live_cache(app_state.clone()); + // start db-live-cache; this launches some live-queries for certain data (eg. list of admin user-ids, and access-policies), and keeps those results in memory + start_db_live_cache(app_state.clone()); - // start router; this handles all "external web requests" - start_router(app_state).await; + // start router; this handles all "external web requests" + start_router(app_state).await; - #[cfg(unix)] { - let agent_ready = agent_running.stop().unwrap(); - agent_ready.shutdown(); - } -} \ No newline at end of file + #[cfg(unix)] + { + let agent_ready = agent_running.stop().unwrap(); + agent_ready.shutdown(); + } +} diff --git a/Packages/app-server/src/router.rs b/Packages/app-server/src/router.rs index b5a32bfdf..0ebadccc7 100644 --- a/Packages/app-server/src/router.rs +++ b/Packages/app-server/src/router.rs @@ -1,89 +1,109 @@ -use rust_shared::{anyhow::{bail, ensure}, axum::{self, extract::{ConnectInfo, Extension}, middleware::Next, response::{self, IntoResponse, Response}}, http_body_util::Full, tokio::net::TcpListener, tower_http::{self, cors::AllowOrigin, trace::TraceLayer}, utils::general::k8s_env}; -use rust_shared::hyper::{Request, Method}; use axum::{ - response::{Html}, - routing::{get}, - Router, http::{ - header::{CONTENT_TYPE, AUTHORIZATION} - }, middleware, + http::header::{AUTHORIZATION, CONTENT_TYPE}, + middleware, + response::Html, + routing::get, + Router, }; use rust_shared::anyhow::Error; +use rust_shared::hyper::{Method, Request}; +use rust_shared::{ + anyhow::{bail, ensure}, + axum::{ + self, + extract::{ConnectInfo, Extension}, + middleware::Next, + response::{self, IntoResponse, Response}, + }, + http_body_util::Full, + tokio::net::TcpListener, + tower_http::{self, cors::AllowOrigin, trace::TraceLayer}, + utils::general::k8s_env, +}; use rust_shared::{serde_json::json, tokio}; -use tower_http::cors::{CorsLayer}; +use tower_http::cors::CorsLayer; -use std::{ - net::{SocketAddr}, process::Command, -}; -use tracing::{info, error}; +use std::{net::SocketAddr, process::Command}; +use tracing::{error, info}; use tracing_subscriber::{self, Layer}; -use crate::{db::general::{backups::try_get_db_dump, sign_in, sign_in_::jwt_utils::resolve_jwt_to_user_info}, globals::{set_up_globals, GLOBAL}, gql::{self, get_gql_data_from_http_request}, links::{monitor_backend_link::{is_addr_from_pod, monitor_backend_link_handle_ws_upgrade, http_response_of_bad_gateway_for_non_pod_caller}, pgclient}, store::storage::{AppState, AppStateArc}, utils::{axum_logging_layer::print_request_response, db::accessors::AccessorContext, general::data_anchor::DataAnchorFor1}}; +use crate::{ + db::general::{backups::try_get_db_dump, sign_in, sign_in_::jwt_utils::resolve_jwt_to_user_info}, + globals::{set_up_globals, GLOBAL}, + gql::{self, get_gql_data_from_http_request}, + links::{ + monitor_backend_link::{http_response_of_bad_gateway_for_non_pod_caller, is_addr_from_pod, monitor_backend_link_handle_ws_upgrade}, + pgclient, + }, + store::storage::{AppState, AppStateArc}, + utils::{axum_logging_layer::print_request_response, db::accessors::AccessorContext, general::data_anchor::DataAnchorFor1}, +}; pub fn get_cors_layer() -> CorsLayer { - // ref: https://docs.rs/tower-http/latest/tower_http/cors/index.html - CorsLayer::new() - //.allow_origin(any()) - .allow_origin(AllowOrigin::predicate(|_, _| { true })) // must use true (ie. have response's "allowed-origin" always equal the request origin) instead of "*", since we have credential-inclusion enabled - //.allow_methods(any()), - //.allow_methods(vec![Method::GET, Method::POST]) - .allow_methods(vec![ - Method::GET, - Method::POST, - Method::PUT, - Method::DELETE, - Method::HEAD, - Method::OPTIONS, - Method::CONNECT, - Method::PATCH, - Method::TRACE, - ]) - //.allow_headers(vec!["*", "Authorization", HeaderName::any(), FORWARDED, "X-Forwarded-For", "X-Forwarded-Host", "X-Forwarded-Proto", "X-Requested-With"]) - .allow_headers(vec![ - CONTENT_TYPE, // needed, because the POST requests include a content-type header (which is not on the approved-by-default list) - AUTHORIZATION, // needed for attaching of auth-data - ]) - .allow_credentials(true) + // ref: https://docs.rs/tower-http/latest/tower_http/cors/index.html + CorsLayer::new() + //.allow_origin(any()) + .allow_origin(AllowOrigin::predicate(|_, _| true)) // must use true (ie. have response's "allowed-origin" always equal the request origin) instead of "*", since we have credential-inclusion enabled + //.allow_methods(any()), + //.allow_methods(vec![Method::GET, Method::POST]) + .allow_methods(vec![Method::GET, Method::POST, Method::PUT, Method::DELETE, Method::HEAD, Method::OPTIONS, Method::CONNECT, Method::PATCH, Method::TRACE]) + //.allow_headers(vec!["*", "Authorization", HeaderName::any(), FORWARDED, "X-Forwarded-For", "X-Forwarded-Host", "X-Forwarded-Proto", "X-Requested-With"]) + .allow_headers(vec![ + CONTENT_TYPE, // needed, because the POST requests include a content-type header (which is not on the approved-by-default list) + AUTHORIZATION, // needed for attaching of auth-data + ]) + .allow_credentials(true) } pub async fn start_router(app_state: AppStateArc) { - let app = Router::new() - .route("/", get(|| async { - Html(r#" + let app = Router::new() + .route( + "/", + get(|| async { + Html( + r#"

This is the URL for the app-server, which is not meant to be opened directly by your browser.

Navigate to debatemap.app instead. (or localhost:5100/localhost:5101, if running Debate Map locally)

- "#) - })) - .route("/basic-info", get(|ConnectInfo(addr): ConnectInfo| async move { - if !is_addr_from_pod(&addr) { return http_response_of_bad_gateway_for_non_pod_caller("/monitor-backend-link", &addr); } + "#, + ) + }), + ) + .route( + "/basic-info", + get(|ConnectInfo(addr): ConnectInfo| async move { + if !is_addr_from_pod(&addr) { + return http_response_of_bad_gateway_for_non_pod_caller("/monitor-backend-link", &addr); + } - let memUsed = GLOBAL.get(); - println!("Memory used: {memUsed} bytes"); - let res_json = json!({ - "memUsed": memUsed, - }); - //axum::response::Json(res_json) - //Full::from(res_json) - Response::builder().body(res_json.to_string()).unwrap().into_response() - })) - .route("/monitor-backend-link", get(monitor_backend_link_handle_ws_upgrade)); + let memUsed = GLOBAL.get(); + println!("Memory used: {memUsed} bytes"); + let res_json = json!({ + "memUsed": memUsed, + }); + //axum::response::Json(res_json) + //Full::from(res_json) + Response::builder().body(res_json.to_string()).unwrap().into_response() + }), + ) + .route("/monitor-backend-link", get(monitor_backend_link_handle_ws_upgrade)); - //let (client, connection) = pgclient::create_client(false).await; - let app = gql::extend_router(app, app_state.clone()).await; + //let (client, connection) = pgclient::create_client(false).await; + let app = gql::extend_router(app, app_state.clone()).await; - // add sign-in routes - let app = sign_in::extend_router(app).await; + // add sign-in routes + let app = sign_in::extend_router(app).await; - // cors layer apparently must be added after the stuff it needs to apply to - let app = app - .layer(Extension(app_state.clone())) - //.with_state(app_state.clone()) // for new version of axum apparently - .layer(Extension(middleware::from_fn::<_, Response>(print_request_response))) - .layer(get_cors_layer()) - .layer(TraceLayer::new_for_http()); + // cors layer apparently must be added after the stuff it needs to apply to + let app = app + .layer(Extension(app_state.clone())) + //.with_state(app_state.clone()) // for new version of axum apparently + .layer(Extension(middleware::from_fn::<_, Response>(print_request_response))) + .layer(get_cors_layer()) + .layer(TraceLayer::new_for_http()); - let addr = SocketAddr::from(([0, 0, 0, 0], 5110)); // ip of 0.0.0.0 means it can receive connections from outside this pod (eg. other pods, the load-balancer) - let listener = TcpListener::bind(&addr).await.unwrap(); - let server_fut = axum::serve(listener, app.into_make_service_with_connect_info::()); - info!("App-server launched. @env:{:?} @logical_cpus:{} @physical_cpus:{}", k8s_env(), num_cpus::get(), num_cpus::get_physical()); - server_fut.await.unwrap(); -} \ No newline at end of file + let addr = SocketAddr::from(([0, 0, 0, 0], 5110)); // ip of 0.0.0.0 means it can receive connections from outside this pod (eg. other pods, the load-balancer) + let listener = TcpListener::bind(&addr).await.unwrap(); + let server_fut = axum::serve(listener, app.into_make_service_with_connect_info::()); + info!("App-server launched. @env:{:?} @logical_cpus:{} @physical_cpus:{}", k8s_env(), num_cpus::get(), num_cpus::get_physical()); + server_fut.await.unwrap(); +} diff --git a/Packages/app-server/src/store/live_queries.rs b/Packages/app-server/src/store/live_queries.rs index 437d72b94..a167af42e 100644 --- a/Packages/app-server/src/store/live_queries.rs +++ b/Packages/app-server/src/store/live_queries.rs @@ -1,132 +1,125 @@ -use std::borrow::Cow; -use std::cell::RefCell; -use std::collections::{HashMap, HashSet}; -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::rc::Rc; -use std::str::FromStr; -use std::sync::Arc; -use rust_shared::anyhow::Error; -use rust_shared::async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; -use rust_shared::async_graphql::{Schema, MergedObject, MergedSubscription, ObjectType, Data, Result, SubscriptionType}; -use rust_shared::utils::mtx::mtx::Mtx; -use rust_shared::{futures, axum, tower, tower_http, new_mtx, flume}; -use axum::http::Method; +use axum::extract::ws::{CloseFrame, Message}; +use axum::extract::{FromRequest, WebSocketUpgrade}; use axum::http::header::CONTENT_TYPE; +use axum::http::Method; +use axum::http::{self, Request, Response, StatusCode}; use axum::response::{self, IntoResponse}; -use axum::routing::{get, post, MethodFilter, on_service}; +use axum::routing::{get, on_service, post, MethodFilter}; use axum::{extract, Router}; -use rust_shared::flume::{Sender, Receiver, unbounded}; +use futures_util::future::{BoxFuture, Ready}; +use futures_util::stream::{SplitSink, SplitStream}; +use futures_util::{future, FutureExt, Sink, SinkExt, Stream, StreamExt}; +use rust_shared::anyhow::Error; +use rust_shared::async_graphql::futures_util::task::{Context, Poll}; +use rust_shared::async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; +use rust_shared::async_graphql::http::{WebSocketProtocols, WsMessage, ALL_WEBSOCKET_PROTOCOLS}; +use rust_shared::async_graphql::{Data, MergedObject, MergedSubscription, ObjectType, Result, Schema, SubscriptionType}; +use rust_shared::flume::{unbounded, Receiver, Sender}; use rust_shared::serde::de::DeserializeOwned; use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::{json, Map}; use rust_shared::tokio; use rust_shared::tokio::sync::{mpsc, Mutex, RwLock, RwLockWriteGuard}; use rust_shared::tokio_postgres::{Client, Row}; -use tower::Service; -use tower_http::cors::{CorsLayer}; -use rust_shared::async_graphql::futures_util::task::{Context, Poll}; -use rust_shared::async_graphql::http::{WebSocketProtocols, WsMessage, ALL_WEBSOCKET_PROTOCOLS}; -use axum::extract::ws::{CloseFrame, Message}; -use axum::extract::{FromRequest, WebSocketUpgrade}; -use axum::http::{self, Request, Response, StatusCode}; -use futures_util::future::{BoxFuture, Ready}; -use futures_util::stream::{SplitSink, SplitStream}; -use futures_util::{future, Sink, SinkExt, Stream, StreamExt, FutureExt}; +use rust_shared::utils::mtx::mtx::Mtx; use rust_shared::uuid::Uuid; +use rust_shared::{axum, flume, futures, new_mtx, tower, tower_http}; +use std::borrow::Cow; +use std::cell::RefCell; +use std::collections::{HashMap, HashSet}; +use std::convert::Infallible; +use std::future::Future; +use std::pin::Pin; +use std::rc::Rc; +use std::str::FromStr; +use std::sync::Arc; +use tower::Service; +use tower_http::cors::CorsLayer; use tracing::error; use crate::store::live_queries_::lq_key::LQKey; use crate::utils::db::filter::{entry_matches_filter, QueryFilter}; use crate::utils::db::pg_stream_parsing::LDChange; -use crate::utils::db::queries::{get_entries_in_collection}; +use crate::utils::db::queries::get_entries_in_collection; use crate::utils::general::general::rw_locked_hashmap__get_entry_or_insert_with; -use crate::utils::type_aliases::{PGClientObject, DBPool, DBPoolArc}; +use crate::utils::type_aliases::{DBPool, DBPoolArc, PGClientObject}; -use super::live_queries_::lq_group::lq_group::{LQGroup}; +use super::live_queries_::lq_group::lq_group::LQGroup; use super::live_queries_::lq_group::lq_group_impl::get_lq_group_key; use super::live_queries_::lq_instance::LQEntryWatcher; pub enum DropLQWatcherMsg { - Drop_ByCollectionAndFilterAndStreamID(String, QueryFilter, Uuid), + Drop_ByCollectionAndFilterAndStreamID(String, QueryFilter, Uuid), } pub type LQStorageArc = Arc; pub struct LQStorage { - pub db_pool: Arc, - pub query_groups: RwLock>>, - pub channel_for_lq_watcher_drops__sender_base: Sender, - pub channel_for_lq_watcher_drops__receiver_base: Receiver, + pub db_pool: Arc, + pub query_groups: RwLock>>, + pub channel_for_lq_watcher_drops__sender_base: Sender, + pub channel_for_lq_watcher_drops__receiver_base: Receiver, } impl LQStorage { - fn new(db_pool: DBPoolArc) -> Self { - let (s1, r1): (Sender, Receiver) = flume::unbounded(); - Self { - db_pool, - query_groups: RwLock::new(HashMap::new()), - channel_for_lq_watcher_drops__sender_base: s1, - channel_for_lq_watcher_drops__receiver_base: r1, - } - } - pub fn new_in_arc(db_pool: DBPoolArc) -> LQStorageArc { - let wrapper = Arc::new(Self::new(db_pool)); + fn new(db_pool: DBPoolArc) -> Self { + let (s1, r1): (Sender, Receiver) = flume::unbounded(); + Self { db_pool, query_groups: RwLock::new(HashMap::new()), channel_for_lq_watcher_drops__sender_base: s1, channel_for_lq_watcher_drops__receiver_base: r1 } + } + pub fn new_in_arc(db_pool: DBPoolArc) -> LQStorageArc { + let wrapper = Arc::new(Self::new(db_pool)); - // start this listener for drop requests - let wrapper_clone = wrapper.clone(); - tokio::spawn(async move { - loop { - let drop_msg = wrapper_clone.channel_for_lq_watcher_drops__receiver_base.recv_async().await.unwrap(); - match drop_msg { - DropLQWatcherMsg::Drop_ByCollectionAndFilterAndStreamID(table_name, filter, stream_id) => { - let lq_key_for_group = LQKey::new_for_lq_group(table_name.clone(), filter.clone()); - let query_group = wrapper_clone.get_or_create_query_group(&lq_key_for_group).await; + // start this listener for drop requests + let wrapper_clone = wrapper.clone(); + tokio::spawn(async move { + loop { + let drop_msg = wrapper_clone.channel_for_lq_watcher_drops__receiver_base.recv_async().await.unwrap(); + match drop_msg { + DropLQWatcherMsg::Drop_ByCollectionAndFilterAndStreamID(table_name, filter, stream_id) => { + let lq_key_for_group = LQKey::new_for_lq_group(table_name.clone(), filter.clone()); + let query_group = wrapper_clone.get_or_create_query_group(&lq_key_for_group).await; - let lq_key_for_instance = LQKey::new_for_lqi(table_name, filter); - query_group.drop_lq_watcher(lq_key_for_instance, stream_id); - }, - }; - } - }); + let lq_key_for_instance = LQKey::new_for_lqi(table_name, filter); + query_group.drop_lq_watcher(lq_key_for_instance, stream_id); + }, + }; + } + }); - wrapper - } + wrapper + } - pub async fn get_or_create_query_group(&self, lq_key: &LQKey) -> Arc { - let lq_key_for_group = lq_key.as_shape_only(); - rw_locked_hashmap__get_entry_or_insert_with(&self.query_groups, lq_key_for_group.clone(), || { - LQGroup::new_in_arc(lq_key_for_group, self.db_pool.clone()) - }).await.0 - } + pub async fn get_or_create_query_group(&self, lq_key: &LQKey) -> Arc { + let lq_key_for_group = lq_key.as_shape_only(); + rw_locked_hashmap__get_entry_or_insert_with(&self.query_groups, lq_key_for_group.clone(), || LQGroup::new_in_arc(lq_key_for_group, self.db_pool.clone())).await.0 + } - /// Called from pgclient.rs - pub async fn notify_of_ld_change(&self, change: LDChange) { - let query_groups = self.query_groups.read().await; - for group in query_groups.values() { - group.notify_of_ld_change(change.clone()); - } - } + /// Called from pgclient.rs + pub async fn notify_of_ld_change(&self, change: LDChange) { + let query_groups = self.query_groups.read().await; + for group in query_groups.values() { + group.notify_of_ld_change(change.clone()); + } + } - /// Called from handlers.rs - pub async fn start_lq_watcher<'a, T: From + Serialize + DeserializeOwned>(&self, lq_key: &LQKey, stream_id: Uuid, mtx_p: Option<&Mtx>) -> Result<(Vec, LQEntryWatcher), Error> { - new_mtx!(mtx, "1:get or create query-group", mtx_p); - let group = self.get_or_create_query_group(lq_key).await; - mtx.section("2:start lq-watcher"); - group.start_lq_watcher(lq_key, stream_id, Some(&mtx)).await - } + /// Called from handlers.rs + pub async fn start_lq_watcher<'a, T: From + Serialize + DeserializeOwned>(&self, lq_key: &LQKey, stream_id: Uuid, mtx_p: Option<&Mtx>) -> Result<(Vec, LQEntryWatcher), Error> { + new_mtx!(mtx, "1:get or create query-group", mtx_p); + let group = self.get_or_create_query_group(lq_key).await; + mtx.section("2:start lq-watcher"); + group.start_lq_watcher(lq_key, stream_id, Some(&mtx)).await + } - /// Reacquires the data for a given doc/row from the database, and force-updates the live-query entries for it. - /// (temporary fix for bug where a `nodes/XXX` db-entry occasionally gets "stuck" -- ie. its live-query entry doesn't update, despite its db-data changing) - pub async fn refresh_lq_data(&self, table_name: String, entry_id: String) -> Result<(), Error> { - new_mtx!(mtx, "1:refresh_lq_data", None, Some(format!("@table_name:{table_name} @entry_id:{entry_id}"))); - mtx.log_call(None); - let query_groups = self.query_groups.read().await; - for group in query_groups.values() { - if group.lq_key.table_name == table_name { - group.refresh_lq_data_for_x(&entry_id).await?; - } - } - Ok(()) - } -} \ No newline at end of file + /// Reacquires the data for a given doc/row from the database, and force-updates the live-query entries for it. + /// (temporary fix for bug where a `nodes/XXX` db-entry occasionally gets "stuck" -- ie. its live-query entry doesn't update, despite its db-data changing) + pub async fn refresh_lq_data(&self, table_name: String, entry_id: String) -> Result<(), Error> { + new_mtx!(mtx, "1:refresh_lq_data", None, Some(format!("@table_name:{table_name} @entry_id:{entry_id}"))); + mtx.log_call(None); + let query_groups = self.query_groups.read().await; + for group in query_groups.values() { + if group.lq_key.table_name == table_name { + group.refresh_lq_data_for_x(&entry_id).await?; + } + } + Ok(()) + } +} diff --git a/Packages/app-server/src/store/live_queries_/lq_group/lq_batch/lq_batch.rs b/Packages/app-server/src/store/live_queries_/lq_group/lq_batch/lq_batch.rs index 43acf5fdd..19a165def 100644 --- a/Packages/app-server/src/store/live_queries_/lq_group/lq_batch/lq_batch.rs +++ b/Packages/app-server/src/store/live_queries_/lq_group/lq_batch/lq_batch.rs @@ -1,82 +1,81 @@ -use std::iter::{once, empty}; -use std::sync::Mutex; -use std::sync::atomic::{AtomicU64, Ordering}; -use std::{sync::Arc}; -use rust_shared::anyhow::{Error}; -use rust_shared::async_graphql::{Result}; -use deadpool_postgres::Pool; -use futures_util::{StreamExt, TryFutureExt, TryStreamExt}; -use rust_shared::indexmap::IndexMap; -use rust_shared::itertools::{chain, Itertools}; -use rust_shared::utils::mtx::mtx::Mtx; -use rust_shared::utils::type_aliases::RowData; -use rust_shared::{to_anyhow_with_extra, Lock, new_mtx, serde_json}; -use rust_shared::tokio::sync::{RwLock, Semaphore}; -use rust_shared::tokio_postgres::types::ToSql; -use rust_shared::tokio_postgres::{Row, RowStream}; -use lazy_static::lazy_static; -use tracing::{trace, error}; use crate::db::commands::_command::ToSqlWrapper; use crate::store::live_queries_::lq_group::lq_batch::sql_generator::prepare_sql_query; use crate::store::live_queries_::lq_instance::LQInstance; use crate::store::live_queries_::lq_key::LQKey; use crate::store::live_queries_::lq_param::LQParam; -use crate::utils::db::filter::{QueryFilter}; +use crate::utils::db::filter::QueryFilter; use crate::utils::db::pg_row_to_json::postgres_row_to_row_data; -use crate::utils::db::sql_fragment::{SF}; -use crate::utils::db::sql_param::{SQLParam}; +use crate::utils::db::sql_fragment::SQLFragment; +use crate::utils::db::sql_fragment::SF; +use crate::utils::db::sql_param::SQLParam; use crate::utils::general::general::{match_cond_to_iter, AtomicF64}; use crate::utils::type_aliases::PGClientObject; -use crate::{utils::{db::{sql_fragment::{SQLFragment}}}}; +use deadpool_postgres::Pool; +use futures_util::{StreamExt, TryFutureExt, TryStreamExt}; +use lazy_static::lazy_static; +use rust_shared::anyhow::Error; +use rust_shared::async_graphql::Result; +use rust_shared::indexmap::IndexMap; +use rust_shared::itertools::{chain, Itertools}; +use rust_shared::tokio::sync::{RwLock, Semaphore}; +use rust_shared::tokio_postgres::types::ToSql; +use rust_shared::tokio_postgres::{Row, RowStream}; +use rust_shared::utils::mtx::mtx::Mtx; +use rust_shared::utils::type_aliases::RowData; +use rust_shared::{new_mtx, serde_json, to_anyhow_with_extra, Lock}; +use std::iter::{empty, once}; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; +use std::sync::Mutex; +use tracing::{error, trace}; /// Use this struct to collect multiple queries and execute them in one go as a "batched query". /// The main use-case for this is to increase the performance of the live-query system, by allowing the server to obtain the "initial results" for multiple live-queries with the same "form" within one SQL query. /// It can also be used as a convenience wrapper around executing a single query; but for most standalone queries, `get_entries_in_collection[_basic]` will be more appropriate. //#[derive(Default)] pub struct LQBatch { - // from LQGroup - pub lq_key: LQKey, - //pub index_in_group: usize, - - /// Note that this map gets cleared as soon as its entries are committed to the wider LQGroup. (necessary, since these LQBatch structs are recycled) - pub query_instances: IndexMap>, - /// Despite being stored in LQBatch, this is currently managed outside of it, in LQGroupImpl. - pub execution_in_progress: bool, - //pub execution_time: Option, - //execution_time: AtomicF64, // a value of -1 means "not yet set", ie. execution hasn't happened yet - - //pub execution_in_progress: Mutex, - pub executions_completed: usize, + // from LQGroup + pub lq_key: LQKey, + //pub index_in_group: usize, + /// Note that this map gets cleared as soon as its entries are committed to the wider LQGroup. (necessary, since these LQBatch structs are recycled) + pub query_instances: IndexMap>, + /// Despite being stored in LQBatch, this is currently managed outside of it, in LQGroupImpl. + pub execution_in_progress: bool, + //pub execution_time: Option, + //execution_time: AtomicF64, // a value of -1 means "not yet set", ie. execution hasn't happened yet + + //pub execution_in_progress: Mutex, + pub executions_completed: usize, } impl LQBatch { - pub fn new(lq_key: LQKey) -> Self { - Self { - lq_key, - //index_in_group, - - query_instances: IndexMap::default(), - execution_in_progress: false, - //query_instances: RwLock::default(), - //execution_time: AtomicF64::new(-1f64), - //execution_in_progress: Mutex::new(false), - executions_completed: 0, - } - } - pub fn get_generation(&self) -> usize { - self.executions_completed - } - - /// Call this each cycle, after the batch's contents have been committed to the wider LQGroup. (necessary, since these LQBatch structs are recycled) - pub fn mark_generation_end_and_reset(&mut self) -> Vec<(LQKey, Arc)> { - self.executions_completed += 1; - self.query_instances.drain(..).collect_vec() - } - - /// Returns a set of LQParam instances with filler values; used for generating the column-names for the temp-table holding the param-sets. - pub fn lq_param_prototypes(&self) -> Vec { - // doesn't matter what these are; just need filler values - let lq_index_filler = 0; - //let filter_op_filler = FilterOp::EqualsX(JSONValue::String("n/a".to_owned())); + pub fn new(lq_key: LQKey) -> Self { + Self { + lq_key, + //index_in_group, + query_instances: IndexMap::default(), + execution_in_progress: false, + //query_instances: RwLock::default(), + //execution_time: AtomicF64::new(-1f64), + //execution_in_progress: Mutex::new(false), + executions_completed: 0, + } + } + pub fn get_generation(&self) -> usize { + self.executions_completed + } + + /// Call this each cycle, after the batch's contents have been committed to the wider LQGroup. (necessary, since these LQBatch structs are recycled) + pub fn mark_generation_end_and_reset(&mut self) -> Vec<(LQKey, Arc)> { + self.executions_completed += 1; + self.query_instances.drain(..).collect_vec() + } + + /// Returns a set of LQParam instances with filler values; used for generating the column-names for the temp-table holding the param-sets. + #[rustfmt::skip] + pub fn lq_param_prototypes(&self) -> Vec { + // doesn't matter what these are; just need filler values + let lq_index_filler = 0; + //let filter_op_filler = FilterOp::EqualsX(JSONValue::String("n/a".to_owned())); chain!( once(LQParam::LQIndex(lq_index_filler)), @@ -89,88 +88,86 @@ impl LQBatch { ).collect_vec() } - pub async fn execute(&mut self, client: &PGClientObject, parent_mtx: Option<&Mtx>) - //-> Result, Error> - -> Result<(), Error> - { - new_mtx!(mtx, "1:wait for pg-client", parent_mtx); - //mtx.current_section_extra_info = Some(format!("@table_name:{} @filters_sql:{}", instance.table_name, filters_sql)); - - let query_instance_vals: Vec<&Arc> = self.query_instances.values().collect(); - - mtx.section("1.1:wait for semaphore permit"); - let permit = SEMAPHORE__BATCH_EXECUTION.acquire().await.unwrap(); - - mtx.section("2:prepare the combined query"); - let lq_param_protos = self.lq_param_prototypes(); - let (sql_text, params) = prepare_sql_query(&self.lq_key.table_name, &lq_param_protos, &query_instance_vals, Some(&mtx))?; - - mtx.section("3:execute the combined query"); - let sql_info_str = format!("@sql_text:{sql_text} @params:{params:?}"); - trace!("Executing query-batch. {sql_info_str}"); - let rows = { - // todo: remove need for this check (this line should never be reached unless the batch has query-instances!) - if query_instance_vals.len() == 0 { - error!("Batch had execute() called, despite its `query_instances` field being empty! (this should never happen)"); - vec![] - } else { - let params_wrapped: Vec = params.into_iter().map(|a| ToSqlWrapper { data: a }).collect(); - let params_as_refs: Vec<&(dyn ToSql + Sync)> = params_wrapped.iter().map(|x| x as &(dyn ToSql + Sync)).collect(); - - let row_stream = client.query_raw(&sql_text, params_as_refs) - .await.map_err(|a| to_anyhow_with_extra(a, sql_info_str.clone()))?; - let rows: Vec = row_stream.try_collect() - .await.map_err(|a| to_anyhow_with_extra(a, sql_info_str.clone()))?; - rows - } - }; - - mtx.section("4:collect the rows into groups (while converting rows to row-data structs)"); - let mut lq_results: Vec> = query_instance_vals.iter().map(|_| vec![]).collect(); - for row in rows { - let lq_index: i64 = row.get("lq_index"); - // convert to RowData structs (the behavior of RowData/JSONValue is simpler/more-standardized than tokio_postgres::Row) - let columns_to_process = row.columns().len() - lq_param_protos.len(); - let row_data = postgres_row_to_row_data(row, columns_to_process)?; - lq_results[lq_index as usize].push(row_data); - } - - mtx.section("5:sort the entries within each result-set"); - let lq_results_converted: Vec> = lq_results.into_iter().map(|mut lq_results| { - // sort by id, so that order of our results here is consistent with order after live-query-updating modifications (see live_queries.rs) - lq_results.sort_by_key(|row_data| { - let id: String = row_data.get("id").unwrap().as_str().unwrap().to_owned(); - id - }); - lq_results - }).collect(); - - // drop semaphore permit (ie. if there's another thread waiting to enter the section of code above, allow them now) - drop(permit); - - mtx.section("6:commit the new result-sets"); - for (i, lq_results) in lq_results_converted.into_iter().enumerate() { - let lqi = query_instance_vals.get(i).unwrap(); - lqi.set_last_entries::<{Lock::unknown_prior}>(lq_results).await; - } - - //self.execution_time.store(time_since_epoch_ms(), Ordering::Relaxed); - - //Ok(lq_results_converted) - Ok(()) - } + pub async fn execute(&mut self, client: &PGClientObject, parent_mtx: Option<&Mtx>) -> Result<(), Error> { + new_mtx!(mtx, "1:wait for pg-client", parent_mtx); + //mtx.current_section_extra_info = Some(format!("@table_name:{} @filters_sql:{}", instance.table_name, filters_sql)); + + let query_instance_vals: Vec<&Arc> = self.query_instances.values().collect(); + + mtx.section("1.1:wait for semaphore permit"); + let permit = SEMAPHORE__BATCH_EXECUTION.acquire().await.unwrap(); + + mtx.section("2:prepare the combined query"); + let lq_param_protos = self.lq_param_prototypes(); + let (sql_text, params) = prepare_sql_query(&self.lq_key.table_name, &lq_param_protos, &query_instance_vals, Some(&mtx))?; + + mtx.section("3:execute the combined query"); + let sql_info_str = format!("@sql_text:{sql_text} @params:{params:?}"); + trace!("Executing query-batch. {sql_info_str}"); + let rows = { + // todo: remove need for this check (this line should never be reached unless the batch has query-instances!) + if query_instance_vals.len() == 0 { + error!("Batch had execute() called, despite its `query_instances` field being empty! (this should never happen)"); + vec![] + } else { + let params_wrapped: Vec = params.into_iter().map(|a| ToSqlWrapper { data: a }).collect(); + let params_as_refs: Vec<&(dyn ToSql + Sync)> = params_wrapped.iter().map(|x| x as &(dyn ToSql + Sync)).collect(); + + let row_stream = client.query_raw(&sql_text, params_as_refs).await.map_err(|a| to_anyhow_with_extra(a, sql_info_str.clone()))?; + let rows: Vec = row_stream.try_collect().await.map_err(|a| to_anyhow_with_extra(a, sql_info_str.clone()))?; + rows + } + }; + + mtx.section("4:collect the rows into groups (while converting rows to row-data structs)"); + let mut lq_results: Vec> = query_instance_vals.iter().map(|_| vec![]).collect(); + for row in rows { + let lq_index: i64 = row.get("lq_index"); + // convert to RowData structs (the behavior of RowData/JSONValue is simpler/more-standardized than tokio_postgres::Row) + let columns_to_process = row.columns().len() - lq_param_protos.len(); + let row_data = postgres_row_to_row_data(row, columns_to_process)?; + lq_results[lq_index as usize].push(row_data); + } + + mtx.section("5:sort the entries within each result-set"); + let lq_results_converted: Vec> = lq_results + .into_iter() + .map(|mut lq_results| { + // sort by id, so that order of our results here is consistent with order after live-query-updating modifications (see live_queries.rs) + lq_results.sort_by_key(|row_data| { + let id: String = row_data.get("id").unwrap().as_str().unwrap().to_owned(); + id + }); + lq_results + }) + .collect(); + + // drop semaphore permit (ie. if there's another thread waiting to enter the section of code above, allow them now) + drop(permit); + + mtx.section("6:commit the new result-sets"); + for (i, lq_results) in lq_results_converted.into_iter().enumerate() { + let lqi = query_instance_vals.get(i).unwrap(); + lqi.set_last_entries::<{ Lock::unknown_prior }>(lq_results).await; + } + + //self.execution_time.store(time_since_epoch_ms(), Ordering::Relaxed); + + //Ok(lq_results_converted) + Ok(()) + } } lazy_static! { - // limit the number of threads that are simultaneously executing lq-batches - // (this yields a better result, since it means requests will resolve "at full speed, but in order", rather than "at full speed, all at once, such that they all take a long time to complete") - static ref SEMAPHORE__BATCH_EXECUTION: Semaphore = Semaphore::new(get_batch_execution_concurrency_limit()); + // limit the number of threads that are simultaneously executing lq-batches + // (this yields a better result, since it means requests will resolve "at full speed, but in order", rather than "at full speed, all at once, such that they all take a long time to complete") + static ref SEMAPHORE__BATCH_EXECUTION: Semaphore = Semaphore::new(get_batch_execution_concurrency_limit()); } fn get_batch_execution_concurrency_limit() -> usize { - let logical_cpus = num_cpus::get(); - match logical_cpus { - // if device has 3+ cores, leave one core free, for the various other processing that needs to occur - 3.. => logical_cpus - 1, - _ => logical_cpus, - } -} \ No newline at end of file + let logical_cpus = num_cpus::get(); + match logical_cpus { + // if device has 3+ cores, leave one core free, for the various other processing that needs to occur + 3.. => logical_cpus - 1, + _ => logical_cpus, + } +} diff --git a/Packages/app-server/src/store/live_queries_/lq_group/lq_batch/sql_generator.rs b/Packages/app-server/src/store/live_queries_/lq_group/lq_batch/sql_generator.rs index 5f88bb7b2..d3ea65a3b 100644 --- a/Packages/app-server/src/store/live_queries_/lq_group/lq_batch/sql_generator.rs +++ b/Packages/app-server/src/store/live_queries_/lq_group/lq_batch/sql_generator.rs @@ -1,10 +1,17 @@ -use std::iter::{once, empty}; -use std::sync::atomic::{AtomicU64, Ordering}; -use std::{sync::Arc}; -use rust_shared::anyhow::{Error}; -use rust_shared::async_graphql::{Result}; +use crate::store::live_queries_::lq_instance::LQInstance; +use crate::store::live_queries_::lq_param::LQParam; +use crate::utils::db::filter::QueryFilter; +use crate::utils::db::pg_row_to_json::postgres_row_to_row_data; +use crate::utils::db::sql_fragment::SQLFragment; +use crate::utils::db::sql_fragment::SF; +use crate::utils::db::sql_ident::SQLIdent; +use crate::utils::db::sql_param::{SQLParam, SQLParamBoxed}; +use crate::utils::general::general::{match_cond_to_iter, AtomicF64}; +use crate::utils::type_aliases::PGClientObject; use deadpool_postgres::Pool; use futures_util::{StreamExt, TryFutureExt, TryStreamExt}; +use rust_shared::anyhow::Error; +use rust_shared::async_graphql::Result; use rust_shared::indexmap::IndexMap; use rust_shared::itertools::{chain, Itertools}; use rust_shared::new_mtx; @@ -12,20 +19,14 @@ use rust_shared::tokio::sync::RwLock; use rust_shared::tokio_postgres::Row; use rust_shared::utils::general_::extensions::IteratorV; use rust_shared::utils::mtx::mtx::Mtx; -use crate::store::live_queries_::lq_instance::LQInstance; -use crate::store::live_queries_::lq_param::LQParam; -use crate::utils::db::filter::{QueryFilter}; -use crate::utils::db::pg_row_to_json::postgres_row_to_row_data; -use crate::utils::db::sql_fragment::{SF}; -use crate::utils::db::sql_ident::SQLIdent; -use crate::utils::db::sql_param::{SQLParam, SQLParamBoxed}; -use crate::utils::general::general::{match_cond_to_iter, AtomicF64}; -use crate::utils::type_aliases::PGClientObject; -use crate::{utils::{db::{sql_fragment::{SQLFragment}}}}; +use std::iter::{empty, once}; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; +#[rustfmt::skip] pub fn prepare_sql_query(table_name: &str, lq_param_protos: &Vec, query_instance_vals: &Vec<&Arc>, mtx_p: Option<&Mtx>) -> Result<(String, Vec), Error> { - new_mtx!(mtx, "1:prep", mtx_p); - let lq_last_index = query_instance_vals.len() as i64 - 1; + new_mtx!(mtx, "1:prep", mtx_p); + let lq_last_index = query_instance_vals.len() as i64 - 1; // each entry of the root-chain is considered its own line, with `merge_lines()` adding line-breaks between them let mut combined_sql = SF::merge_lines(chain!( @@ -92,59 +93,54 @@ pub fn prepare_sql_query(table_name: &str, lq_param_protos: &Vec, query #[cfg(test)] mod tests { - use std::{sync::Arc, iter::once}; + use std::{iter::once, sync::Arc}; - use rust_shared::itertools::chain; - use rust_shared::utils::general_::extensions::ToOwnedV; - use rust_shared::{utils::time::time_since_epoch_ms, utils::type_aliases::JSONValue}; - use rust_shared::serde_json::json; - use crate::store::live_queries_::lq_instance::LQInstance; - use crate::store::live_queries_::lq_key::LQKey; - use crate::store::live_queries_::lq_param::LQParam; - use crate::utils::db::filter::{FilterOp, QueryFilter}; + use crate::store::live_queries_::lq_instance::LQInstance; + use crate::store::live_queries_::lq_key::LQKey; + use crate::store::live_queries_::lq_param::LQParam; + use crate::utils::db::filter::{FilterOp, QueryFilter}; + use rust_shared::itertools::chain; + use rust_shared::serde_json::json; + use rust_shared::utils::general_::extensions::ToOwnedV; + use rust_shared::{utils::time::time_since_epoch_ms, utils::type_aliases::JSONValue}; - use super::prepare_sql_query; + use super::prepare_sql_query; - // run in PowerShell using: `cargo test sql_generator_simple -- --nocapture` - #[test] - fn sql_generator_simple() { - //match std::panic::catch_unwind(|| { - let table_name = "maps"; - let lq_param_protos = vec![ - LQParam::LQIndex(0), - LQParam::FilterOpValue("id".to_owned(), 0, FilterOp::EqualsX(JSONValue::String("GLOBAL_ROOT_0000000001".to_owned()))), - ]; - let filter_input = json!({ - "id": { - "equalTo": "GLOBAL_ROOT_0000000001" - } - }); - let filter = QueryFilter::from_filter_input(&filter_input).unwrap(); - let lq_key = LQKey::new(table_name.o(), filter.o()); - let instance1 = Arc::new(LQInstance::new(lq_key, vec![])); - let query_instance_vals: Vec<&Arc> = vec![ - &instance1 - ]; + // run in PowerShell using: `cargo test sql_generator_simple -- --nocapture` + #[test] + fn sql_generator_simple() { + //match std::panic::catch_unwind(|| { + let table_name = "maps"; + let lq_param_protos = vec![LQParam::LQIndex(0), LQParam::FilterOpValue("id".to_owned(), 0, FilterOp::EqualsX(JSONValue::String("GLOBAL_ROOT_0000000001".to_owned())))]; + let filter_input = json!({ + "id": { + "equalTo": "GLOBAL_ROOT_0000000001" + } + }); + let filter = QueryFilter::from_filter_input(&filter_input).unwrap(); + let lq_key = LQKey::new(table_name.o(), filter.o()); + let instance1 = Arc::new(LQInstance::new(lq_key, vec![])); + let query_instance_vals: Vec<&Arc> = vec![&instance1]; - let start_time = time_since_epoch_ms(); - let mut last_print_time = start_time; - for i in 0..1_000_000_000 { - //for i in 0..10 { - let now = time_since_epoch_ms(); - if now - last_print_time >= 1000f64 { - let ms_since_start = now - start_time; - let seconds_since_start = ms_since_start / 1000f64; - let loops_per_second = (f64::from(i) + 1f64) / seconds_since_start; - let loop_time = ms_since_start / (f64::from(i) + 1f64); - println!("loopTime:{:.3}ms loopsPerSecond:{:.1} @timeSinceStart:{:.1}", loop_time, loops_per_second, seconds_since_start); - last_print_time = now; - } - //println!("Starting loop:{i}"); - prepare_sql_query(table_name, &lq_param_protos, &query_instance_vals, None).unwrap(); - } - /*}) { - Ok(_) => println!("Done!"), - Err(err) => println!("Got error:{err:?}") - };*/ - } -} \ No newline at end of file + let start_time = time_since_epoch_ms(); + let mut last_print_time = start_time; + for i in 0..1_000_000_000 { + //for i in 0..10 { + let now = time_since_epoch_ms(); + if now - last_print_time >= 1000f64 { + let ms_since_start = now - start_time; + let seconds_since_start = ms_since_start / 1000f64; + let loops_per_second = (f64::from(i) + 1f64) / seconds_since_start; + let loop_time = ms_since_start / (f64::from(i) + 1f64); + println!("loopTime:{:.3}ms loopsPerSecond:{:.1} @timeSinceStart:{:.1}", loop_time, loops_per_second, seconds_since_start); + last_print_time = now; + } + //println!("Starting loop:{i}"); + prepare_sql_query(table_name, &lq_param_protos, &query_instance_vals, None).unwrap(); + } + /*}) { + Ok(_) => println!("Done!"), + Err(err) => println!("Got error:{err:?}") + };*/ + } +} diff --git a/Packages/app-server/src/store/live_queries_/lq_group/lq_group.rs b/Packages/app-server/src/store/live_queries_/lq_group/lq_group.rs index a47fa5525..eef142aab 100644 --- a/Packages/app-server/src/store/live_queries_/lq_group/lq_group.rs +++ b/Packages/app-server/src/store/live_queries_/lq_group/lq_group.rs @@ -1,3 +1,34 @@ +use axum::extract::ws::{CloseFrame, Message}; +use axum::extract::{FromRequest, WebSocketUpgrade}; +use axum::http::header::CONTENT_TYPE; +use axum::http::Method; +use axum::http::{self, Request, Response, StatusCode}; +use axum::response::{self, IntoResponse}; +use axum::routing::{get, on_service, post, MethodFilter}; +use axum::{extract, Router}; +use futures_util::future::{BoxFuture, Ready}; +use futures_util::stream::{SplitSink, SplitStream}; +use futures_util::{future, FutureExt, Sink, SinkExt, Stream, StreamExt}; +use rust_shared::anyhow::{anyhow, bail, ensure, Error}; +use rust_shared::async_graphql::futures_util::task::{Context, Poll}; +use rust_shared::async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; +use rust_shared::async_graphql::http::{WebSocketProtocols, WsMessage, ALL_WEBSOCKET_PROTOCOLS}; +use rust_shared::async_graphql::{Data, MergedObject, MergedSubscription, ObjectType, Result, Schema, SubscriptionType}; +use rust_shared::flume::{unbounded, Receiver, Sender}; +use rust_shared::indexmap::IndexMap; +use rust_shared::itertools::Itertools; +use rust_shared::serde::de::DeserializeOwned; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Map}; +use rust_shared::tokio::sync::{mpsc, Mutex, RwLock}; +use rust_shared::tokio::time::{self, Instant}; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::utils::general_::extensions::ToOwnedV; +use rust_shared::utils::mtx::mtx::Mtx; +use rust_shared::utils::type_aliases::{FReceiver, FSender, JSONValue}; +use rust_shared::uuid::Uuid; +use rust_shared::{axum, check_lock_order, flume, futures, new_mtx, tower, tower_http, Lock}; +use rust_shared::{serde_json, tokio, utils::time::time_since_epoch_ms, RwLock_Tracked}; use std::borrow::Cow; use std::cell::RefCell; use std::collections::{HashMap, HashSet}; @@ -7,76 +38,44 @@ use std::mem; use std::pin::Pin; use std::rc::Rc; use std::str::FromStr; +use std::sync::atomic::{AtomicI64, AtomicU64, Ordering}; use std::sync::Arc; -use std::sync::atomic::{Ordering, AtomicU64, AtomicI64}; use std::time::Duration; -use rust_shared::async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; -use rust_shared::async_graphql::{Schema, MergedObject, MergedSubscription, ObjectType, Data, Result, SubscriptionType}; -use rust_shared::utils::general_::extensions::ToOwnedV; -use rust_shared::utils::mtx::mtx::Mtx; -use rust_shared::{futures, axum, tower, tower_http, Lock, check_lock_order, flume, new_mtx}; -use axum::http::Method; -use axum::http::header::CONTENT_TYPE; -use axum::response::{self, IntoResponse}; -use axum::routing::{get, post, MethodFilter, on_service}; -use axum::{extract, Router}; -use rust_shared::flume::{Sender, Receiver, unbounded}; -use rust_shared::indexmap::IndexMap; -use rust_shared::itertools::Itertools; -use rust_shared::utils::type_aliases::{JSONValue, FReceiver, FSender}; -use rust_shared::{utils::time::time_since_epoch_ms, RwLock_Tracked, tokio, serde_json}; -use rust_shared::serde::de::DeserializeOwned; -use rust_shared::serde::{Deserialize, Serialize}; -use rust_shared::serde_json::{json, Map}; -use rust_shared::tokio::sync::{mpsc, Mutex, RwLock}; -use rust_shared::tokio::time::{Instant, self}; -use rust_shared::tokio_postgres::{Client, Row}; use tower::Service; -use rust_shared::async_graphql::futures_util::task::{Context, Poll}; -use rust_shared::async_graphql::http::{WebSocketProtocols, WsMessage, ALL_WEBSOCKET_PROTOCOLS}; -use axum::extract::ws::{CloseFrame, Message}; -use axum::extract::{FromRequest, WebSocketUpgrade}; -use axum::http::{self, Request, Response, StatusCode}; -use futures_util::future::{BoxFuture, Ready}; -use futures_util::stream::{SplitSink, SplitStream}; -use futures_util::{future, Sink, SinkExt, Stream, StreamExt, FutureExt}; -use tracing::{info, warn, debug, error}; -use rust_shared::anyhow::{anyhow, Error, bail, ensure}; -use rust_shared::uuid::Uuid; +use tracing::{debug, error, info, warn}; use crate::store::live_queries_::lq_key::LQKey; -use crate::utils::db::filter::{entry_matches_filter, QueryFilter, FilterOp}; +use crate::utils::db::filter::{entry_matches_filter, FilterOp, QueryFilter}; use crate::utils::db::generic_handlers::subscriptions::json_maps_to_typed_entries; -use crate::utils::db::pg_stream_parsing::{LDChange}; -use crate::utils::db::queries::{get_entries_in_collection}; -use crate::utils::general::general::{AtomicF64}; -use crate::utils::type_aliases::{PGClientObject, ABReceiver, ABSender, DBPoolArc}; +use crate::utils::db::pg_stream_parsing::LDChange; +use crate::utils::db::queries::get_entries_in_collection; +use crate::utils::general::general::AtomicF64; +use crate::utils::type_aliases::{ABReceiver, ABSender, DBPoolArc, PGClientObject}; -use super::lq_group_impl::{LQGroupImpl, LQBatchMessage}; -use super::super::lq_instance::{LQInstance, LQEntryWatcher}; +use super::super::lq_instance::{LQEntryWatcher, LQInstance}; +use super::lq_group_impl::{LQBatchMessage, LQGroupImpl}; type RwLock_Std = std::sync::RwLock; #[derive(Debug)] pub enum LQGroup_InMsg { - /// (lq_key, force_queue_lqi, mtx_parent) - ScheduleLQIReadOrInitThenBroadcast(LQKey, Option>, Option), - DropLQWatcher(LQKey, Uuid), - NotifyOfLDChange(LDChange), - // /// (entry_id [ie. row uuid]) - //RefreshLQDataForX(String), - - /// (batch_index, mtx_parent) - OnBatchReachedTimeToExecute(usize, Option), + /// (lq_key, force_queue_lqi, mtx_parent) + ScheduleLQIReadOrInitThenBroadcast(LQKey, Option>, Option), + DropLQWatcher(LQKey, Uuid), + NotifyOfLDChange(LDChange), + // /// (entry_id [ie. row uuid]) + //RefreshLQDataForX(String), + /// (batch_index, mtx_parent) + OnBatchReachedTimeToExecute(usize, Option), - // from LQBatch (or tokio::spawn block in LQGroupImpl for it) - /// (batch_index, lqis_in_batch) - OnBatchCompleted(usize, Vec>), + // from LQBatch (or tokio::spawn block in LQGroupImpl for it) + /// (batch_index, lqis_in_batch) + OnBatchCompleted(usize, Vec>), } #[derive(Debug, Clone)] pub enum LQGroup_OutMsg { - /// (lq_key, lqi, just_initialized) - LQInstanceIsInitialized(LQKey, Arc, bool) + /// (lq_key, lqi, just_initialized) + LQInstanceIsInitialized(LQKey, Arc, bool), } // sync docs with LQGroupImpl @@ -84,178 +83,181 @@ pub enum LQGroup_OutMsg { /// The `LQGroup` struct is the "public interface" for the lq-group. Its methods are mostly async, with each "sending a message" to the "inner" `LQGroupImpl` struct, which queues up a set of calls and then processes them as a batch. /// When the batched processing in the `LQGroupImpl` completes, it sends a message back to the "waiting" `LQGroup`s, which then are able to have their async methods return. pub struct LQGroup { - inner: Mutex, + inner: Mutex, - pub lq_key: LQKey, - messages_in_sender: FSender, - messages_in_receiver: FReceiver, - messages_out_sender: ABSender, - messages_out_receiver: ABReceiver, + pub lq_key: LQKey, + messages_in_sender: FSender, + messages_in_receiver: FReceiver, + messages_out_sender: ABSender, + messages_out_receiver: ABReceiver, } impl LQGroup { - fn new(lq_key: LQKey, db_pool: DBPoolArc) -> Self { - let (s1, r1): (FSender, FReceiver) = flume::unbounded(); - // 2024-02-05: 1k capacity too small; upped to 10k (not yet certain if issue was from capacity being too small, or some sort of deadlock) - let (mut s2, r2): (ABSender, ABReceiver) = async_broadcast::broadcast(10000); + fn new(lq_key: LQKey, db_pool: DBPoolArc) -> Self { + let (s1, r1): (FSender, FReceiver) = flume::unbounded(); + // 2024-02-05: 1k capacity too small; upped to 10k (not yet certain if issue was from capacity being too small, or some sort of deadlock) + let (mut s2, r2): (ABSender, ABReceiver) = async_broadcast::broadcast(10000); - // nothing ever "consumes" the messages in the `messages_out` channel, so we must enable overflow (ie. deletion of old entries once queue-size cap is reached) - s2.set_overflow(true); + // nothing ever "consumes" the messages in the `messages_out` channel, so we must enable overflow (ie. deletion of old entries once queue-size cap is reached) + s2.set_overflow(true); - //let lq_key = LQKey::new(table_name, filter); - let new_self = Self { - inner: Mutex::new(LQGroupImpl::new(lq_key.clone(), db_pool, s1.clone(), s2.clone())), + //let lq_key = LQKey::new(table_name, filter); + let new_self = Self { + inner: Mutex::new(LQGroupImpl::new(lq_key.clone(), db_pool, s1.clone(), s2.clone())), + lq_key, + messages_in_sender: s1, + messages_in_receiver: r1, + messages_out_sender: s2, + messages_out_receiver: r2, + }; + new_self + } + pub fn new_in_arc(lq_key: LQKey, db_pool: DBPoolArc) -> Arc { + let wrapper = Arc::new(Self::new(lq_key, db_pool)); - lq_key, - messages_in_sender: s1, - messages_in_receiver: r1, - messages_out_sender: s2, - messages_out_receiver: r2, - }; - new_self - } - pub fn new_in_arc(lq_key: LQKey, db_pool: DBPoolArc) -> Arc { - let wrapper = Arc::new(Self::new(lq_key, db_pool)); + tokio::spawn(Self::message_loop(wrapper.clone())); - tokio::spawn(Self::message_loop(wrapper.clone())); + wrapper + } - wrapper - } + // message-handling + // ========== - // message-handling - // ========== + fn send_message(&self, message: LQGroup_InMsg) { + // unwrap is safe; LQGroup instances are never dropped, so the channel is also never dropped/closed + self.messages_in_sender.send(message).unwrap(); + } - fn send_message(&self, message: LQGroup_InMsg) { - // unwrap is safe; LQGroup instances are never dropped, so the channel is also never dropped/closed - self.messages_in_sender.send(message).unwrap(); - } + async fn message_loop(self: Arc) { + loop { + let message = self.messages_in_receiver.recv_async().await.unwrap(); + let mut inner = self.inner.lock().await; + let msg_as_str = format!("{:?}", message); + inner.notify_message_processed_or_sent(msg_as_str, false); + match message { + LQGroup_InMsg::ScheduleLQIReadOrInitThenBroadcast(lq_key, force_queue_lqi, parent_mtx) => { + inner.schedule_lqi_read_or_init_then_broadcast(&lq_key, force_queue_lqi, parent_mtx.as_ref()).await; + // Once lq-instance is initialized, LQGroupImpl will send a LQGroup_OutMsg::LQInstanceIsInitialized message back out. + // That message will be seen by the `get_initialized_lqi_for_key` func below, which will then return the lqi to the async caller. + }, + LQGroup_InMsg::DropLQWatcher(lq_key, uuid) => { + inner.drop_lq_watcher(&lq_key, uuid).await; + }, + LQGroup_InMsg::NotifyOfLDChange(change) => { + inner.notify_of_ld_change(&change).await; + }, + /*LQGroup_InMsg::RefreshLQDataForX(entry_id) => { + // ignore error; if db-request fails, we leave it up to user to retry (it's a temporary workaround anyway) + let _ = inner.refresh_lq_data_for_x(&entry_id).await; + },*/ + // from LQBatch (or tokio::spawn block in LQGroupImpl for it) + LQGroup_InMsg::OnBatchReachedTimeToExecute(batch_i, mtx_parent) => { + inner.execute_batch(batch_i, mtx_parent.as_ref()).await; + }, + LQGroup_InMsg::OnBatchCompleted(batch_i, lqis_in_batch) => { + inner.on_batch_completed(batch_i, lqis_in_batch).await; + }, + } + } + } - async fn message_loop(self: Arc) { - loop { - let message = self.messages_in_receiver.recv_async().await.unwrap(); - let mut inner = self.inner.lock().await; - let msg_as_str = format!("{:?}", message); - inner.notify_message_processed_or_sent(msg_as_str, false); - match message { - LQGroup_InMsg::ScheduleLQIReadOrInitThenBroadcast(lq_key, force_queue_lqi, parent_mtx) => { - inner.schedule_lqi_read_or_init_then_broadcast(&lq_key, force_queue_lqi, parent_mtx.as_ref()).await; - // Once lq-instance is initialized, LQGroupImpl will send a LQGroup_OutMsg::LQInstanceIsInitialized message back out. - // That message will be seen by the `get_initialized_lqi_for_key` func below, which will then return the lqi to the async caller. - }, - LQGroup_InMsg::DropLQWatcher(lq_key, uuid) => { - inner.drop_lq_watcher(&lq_key, uuid).await; - }, - LQGroup_InMsg::NotifyOfLDChange(change) => { - inner.notify_of_ld_change(&change).await; - }, - /*LQGroup_InMsg::RefreshLQDataForX(entry_id) => { - // ignore error; if db-request fails, we leave it up to user to retry (it's a temporary workaround anyway) - let _ = inner.refresh_lq_data_for_x(&entry_id).await; - },*/ - // from LQBatch (or tokio::spawn block in LQGroupImpl for it) - LQGroup_InMsg::OnBatchReachedTimeToExecute(batch_i, mtx_parent) => { - inner.execute_batch(batch_i, mtx_parent.as_ref()).await; - }, - LQGroup_InMsg::OnBatchCompleted(batch_i, lqis_in_batch) => { - inner.on_batch_completed(batch_i, lqis_in_batch).await; - }, - } - } - } + // Helper functions, outside of LQGroupImpl. This is the appropriate location for functions where either: + // 1) The function is intended to be called by external callers/threads. + // 2) The function is async, and will be "waiting" for a substantial length of time (too long to await in message-loop) + // ========== - // Helper functions, outside of LQGroupImpl. This is the appropriate location for functions where either: - // 1) The function is intended to be called by external callers/threads. - // 2) The function is async, and will be "waiting" for a substantial length of time (too long to await in message-loop) - // ========== - - pub async fn get_initialized_lqi_for_key(&self, lq_key: &LQKey, force_queue_lqi: Option>, mtx_p: Option) -> Arc { - let mut new_receiver = self.messages_out_receiver.new_receiver(); - self.send_message(LQGroup_InMsg::ScheduleLQIReadOrInitThenBroadcast(lq_key.clone(), force_queue_lqi, mtx_p)); - loop { - #[allow(irrefutable_let_patterns)] // needed atm, since only one enum-option defined - if let LQGroup_OutMsg::LQInstanceIsInitialized(lq_key2, lqi, _just_initialized) = new_receiver.recv().await.unwrap() && lq_key2 == *lq_key { - return lqi; - } - } - } - pub async fn start_lq_watcher<'a, T: From + Serialize + DeserializeOwned>(&self, lq_key: &LQKey, stream_id: Uuid, mtx_p: Option<&Mtx>) -> Result<(Vec, LQEntryWatcher), Error> { - new_mtx!(mtx, "1:get or create lqi", mtx_p); - /*new_mtx!(mtx2, "", Some(&mtx)); - let lqi = self.get_initialized_lqi_for_key(&lq_key, Some(tx2)).await;*/ - let lqi = self.get_initialized_lqi_for_key(&lq_key, None, Some(mtx.proxy())).await; + pub async fn get_initialized_lqi_for_key(&self, lq_key: &LQKey, force_queue_lqi: Option>, mtx_p: Option) -> Arc { + let mut new_receiver = self.messages_out_receiver.new_receiver(); + self.send_message(LQGroup_InMsg::ScheduleLQIReadOrInitThenBroadcast(lq_key.clone(), force_queue_lqi, mtx_p)); + loop { + #[allow(irrefutable_let_patterns)] // needed atm, since only one enum-option defined + if let LQGroup_OutMsg::LQInstanceIsInitialized(lq_key2, lqi, _just_initialized) = new_receiver.recv().await.unwrap() + && lq_key2 == *lq_key + { + return lqi; + } + } + } + pub async fn start_lq_watcher<'a, T: From + Serialize + DeserializeOwned>(&self, lq_key: &LQKey, stream_id: Uuid, mtx_p: Option<&Mtx>) -> Result<(Vec, LQEntryWatcher), Error> { + new_mtx!(mtx, "1:get or create lqi", mtx_p); + /*new_mtx!(mtx2, "", Some(&mtx)); + let lqi = self.get_initialized_lqi_for_key(&lq_key, Some(tx2)).await;*/ + let lqi = self.get_initialized_lqi_for_key(&lq_key, None, Some(mtx.proxy())).await; - mtx.section("2:get current result-set"); - let result_entries = lqi.last_entries.read().await.clone(); + mtx.section("2:get current result-set"); + let result_entries = lqi.last_entries.read().await.clone(); - mtx.section("3:convert result-set to rust types"); - let result_entries_as_type: Vec = json_maps_to_typed_entries(result_entries.clone()).map_err(|err| { - let err_new = err.context("Got an error within start_lq_watcher -> json_maps_to_typed_entries, implying invalid/corrupted field data in database."); - error!("{:?}", err_new); - err_new - })?; + mtx.section("3:convert result-set to rust types"); + let result_entries_as_type: Vec = json_maps_to_typed_entries(result_entries.clone()).map_err(|err| { + let err_new = err.context("Got an error within start_lq_watcher -> json_maps_to_typed_entries, implying invalid/corrupted field data in database."); + error!("{:?}", err_new); + err_new + })?; - mtx.section("4:get or create watcher, for the given stream"); - //let watcher = entry.get_or_create_watcher(stream_id); - let entries_count = result_entries.len(); - let (watcher, _watcher_is_new, new_watcher_count) = lqi.get_or_create_watcher(stream_id, result_entries).await; - let watcher_info_str = format!("@watcher_count_for_entry:{} @collection:{} @filter:{:?} @entries_count:{}", new_watcher_count, lq_key.table_name, lq_key.filter, entries_count); - debug!("LQ-watcher started. {}", watcher_info_str); + mtx.section("4:get or create watcher, for the given stream"); + //let watcher = entry.get_or_create_watcher(stream_id); + let entries_count = result_entries.len(); + let (watcher, _watcher_is_new, new_watcher_count) = lqi.get_or_create_watcher(stream_id, result_entries).await; + let watcher_info_str = format!("@watcher_count_for_entry:{} @collection:{} @filter:{:?} @entries_count:{}", new_watcher_count, lq_key.table_name, lq_key.filter, entries_count); + debug!("LQ-watcher started. {}", watcher_info_str); - Ok((result_entries_as_type, watcher.clone())) - } + Ok((result_entries_as_type, watcher.clone())) + } - pub fn drop_lq_watcher(&self, lq_key: LQKey, stream_id: Uuid) { - self.send_message(LQGroup_InMsg::DropLQWatcher(lq_key, stream_id)); - } - pub fn notify_of_ld_change(&self, change: LDChange) { - self.send_message(LQGroup_InMsg::NotifyOfLDChange(change)); - } - /*pub fn refresh_lq_data_for_x(&self, entry_id: String) { - self.send_message(LQGroup_InMsg::RefreshLQDataForX(entry_id)); - }*/ + pub fn drop_lq_watcher(&self, lq_key: LQKey, stream_id: Uuid) { + self.send_message(LQGroup_InMsg::DropLQWatcher(lq_key, stream_id)); + } + pub fn notify_of_ld_change(&self, change: LDChange) { + self.send_message(LQGroup_InMsg::NotifyOfLDChange(change)); + } + /*pub fn refresh_lq_data_for_x(&self, entry_id: String) { + self.send_message(LQGroup_InMsg::RefreshLQDataForX(entry_id)); + }*/ - /// Reacquires the data for a given doc/row from the database, and force-updates the live-query entry for it. - /// (temporary fix for bug where a `nodes/XXX` db-entry occasionally gets "stuck" -- ie. its live-query entry doesn't update, despite its db-data changing) - pub async fn refresh_lq_data_for_x(&self, entry_id: &str) -> Result<(), Error> { - /*new_mtx!(mtx, "1:get live_queries", None, Some(format!("@table_name:{} @entry_id:{}", self.table_name, entry_id))); - mtx.log_call(None);*/ + /// Reacquires the data for a given doc/row from the database, and force-updates the live-query entry for it. + /// (temporary fix for bug where a `nodes/XXX` db-entry occasionally gets "stuck" -- ie. its live-query entry doesn't update, despite its db-data changing) + pub async fn refresh_lq_data_for_x(&self, entry_id: &str) -> Result<(), Error> { + /*new_mtx!(mtx, "1:get live_queries", None, Some(format!("@table_name:{} @entry_id:{}", self.table_name, entry_id))); + mtx.log_call(None);*/ - // get read-lock for self.query_instances, clone the collection, then drop the lock immediately (to avoid deadlock with function-trees we're about to call) - let live_queries: IndexMap> = self.inner.lock().await.get_lqis_committed_cloned(); // in cloning the IndexMap, all the keys and value are cloned as well + // get read-lock for self.query_instances, clone the collection, then drop the lock immediately (to avoid deadlock with function-trees we're about to call) + let live_queries: IndexMap> = self.inner.lock().await.get_lqis_committed_cloned(); // in cloning the IndexMap, all the keys and value are cloned as well - for (lq_key, lqi) in live_queries.iter() { - let entry_for_id = lqi.get_last_entry_with_id(entry_id).await; - // if this lq-instance has no entries with the entry-id we want to refresh, then ignore it - if entry_for_id.is_none() { continue; } - - //self.get_or_create_lq_instance_in_progressing_batch(lq_key, Some(lqi.clone()), None).await?; - // first call retrieves the lqi (presumably from the commited-lqis list) - //let lqi = self.get_initialized_lqi_for_key(lq_key, None, None).await; - // this call forces the lqi to be re-queued in new/progressing batch - self.get_initialized_lqi_for_key(lq_key, Some(lqi.clone()), None).await; + for (lq_key, lqi) in live_queries.iter() { + let entry_for_id = lqi.get_last_entry_with_id(entry_id).await; + // if this lq-instance has no entries with the entry-id we want to refresh, then ignore it + if entry_for_id.is_none() { + continue; + } - let new_data = match lqi.get_last_entry_with_id(entry_id).await { - None => { - warn!("While force-refreshing lq-data, the new batch completed, but no entry was found with the given id. This could mean the entry was just deleted, but more likely it's a bug."); - continue; - }, - Some(a) => a, - }; - info!("While force-refreshing lq-data, got new-data. @table:{} @new_data:{:?}", self.lq_key.table_name, new_data); + //self.get_or_create_lq_instance_in_progressing_batch(lq_key, Some(lqi.clone()), None).await?; + // first call retrieves the lqi (presumably from the commited-lqis list) + //let lqi = self.get_initialized_lqi_for_key(lq_key, None, None).await; + // this call forces the lqi to be re-queued in new/progressing batch + self.get_initialized_lqi_for_key(lq_key, Some(lqi.clone()), None).await; - let new_data_as_change = LDChange { - table: self.lq_key.table_name.clone(), - kind: "update".to_owned(), - columnnames: Some(new_data.keys().map(|a| a.clone()).collect()), - columnvalues: Some(new_data.values().map(|a| a.clone()).collect()), - // marking the type as "unknown" is fine; the type is only needed when converting from-lds data into proper `JSONValue`s - columntypes: Some(new_data.keys().map(|_| "unknown".to_owned()).collect()), - oldkeys: None, - schema: "".to_owned(), - needs_wal2json_jsonval_fixes: Some(false), // don't apply fixes, since fixes already applied (if needed) during initial ingestion ("columntypes" being "unknown" precludes the fixes from running anyway) - }; + let new_data = match lqi.get_last_entry_with_id(entry_id).await { + None => { + warn!("While force-refreshing lq-data, the new batch completed, but no entry was found with the given id. This could mean the entry was just deleted, but more likely it's a bug."); + continue; + }, + Some(a) => a, + }; + info!("While force-refreshing lq-data, got new-data. @table:{} @new_data:{:?}", self.lq_key.table_name, new_data); - lqi.on_table_changed(&new_data_as_change, None).await; - } - Ok(()) - } -} \ No newline at end of file + let new_data_as_change = LDChange { + table: self.lq_key.table_name.clone(), + kind: "update".to_owned(), + columnnames: Some(new_data.keys().map(|a| a.clone()).collect()), + columnvalues: Some(new_data.values().map(|a| a.clone()).collect()), + // marking the type as "unknown" is fine; the type is only needed when converting from-lds data into proper `JSONValue`s + columntypes: Some(new_data.keys().map(|_| "unknown".to_owned()).collect()), + oldkeys: None, + schema: "".to_owned(), + needs_wal2json_jsonval_fixes: Some(false), // don't apply fixes, since fixes already applied (if needed) during initial ingestion ("columntypes" being "unknown" precludes the fixes from running anyway) + }; + + lqi.on_table_changed(&new_data_as_change, None).await; + } + Ok(()) + } +} diff --git a/Packages/app-server/src/store/live_queries_/lq_group/lq_group_impl.rs b/Packages/app-server/src/store/live_queries_/lq_group/lq_group_impl.rs index 1cc12cefa..73e4f8498 100644 --- a/Packages/app-server/src/store/live_queries_/lq_group/lq_group_impl.rs +++ b/Packages/app-server/src/store/live_queries_/lq_group/lq_group_impl.rs @@ -1,3 +1,34 @@ +use axum::extract::ws::{CloseFrame, Message}; +use axum::extract::{FromRequest, WebSocketUpgrade}; +use axum::http::header::CONTENT_TYPE; +use axum::http::Method; +use axum::http::{self, Request, Response, StatusCode}; +use axum::response::{self, IntoResponse}; +use axum::routing::{get, on_service, post, MethodFilter}; +use axum::{extract, Router}; +use futures_util::future::{BoxFuture, Ready}; +use futures_util::stream::{SplitSink, SplitStream}; +use futures_util::{future, FutureExt, Sink, SinkExt, Stream, StreamExt}; +use rust_shared::anyhow::{anyhow, bail, ensure, Error}; +use rust_shared::async_graphql::futures_util::task::{Context, Poll}; +use rust_shared::async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; +use rust_shared::async_graphql::http::{WebSocketProtocols, WsMessage, ALL_WEBSOCKET_PROTOCOLS}; +use rust_shared::async_graphql::{Data, MergedObject, MergedSubscription, ObjectType, Result, Schema, SubscriptionType}; +use rust_shared::flume::{unbounded, Receiver, Sender}; +use rust_shared::indexmap::IndexMap; +use rust_shared::itertools::Itertools; +use rust_shared::links::app_server_to_monitor_backend::Message_ASToMB; +use rust_shared::serde::de::DeserializeOwned; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Map}; +use rust_shared::tokio::sync::{mpsc, Mutex, RwLock}; +use rust_shared::tokio::time::{self, Instant}; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::utils::mtx::mtx::Mtx; +use rust_shared::utils::type_aliases::{FSender, JSONValue}; +use rust_shared::uuid::Uuid; +use rust_shared::{axum, check_lock_order, futures, new_mtx, tower, tower_http, Lock}; +use rust_shared::{serde_json, tokio, utils::time::time_since_epoch_ms, RwLock_Tracked}; use std::borrow::Cow; use std::cell::RefCell; use std::collections::{HashMap, HashSet}; @@ -7,102 +38,72 @@ use std::mem; use std::pin::Pin; use std::rc::Rc; use std::str::FromStr; +use std::sync::atomic::{AtomicI64, AtomicU64, Ordering}; use std::sync::Arc; -use std::sync::atomic::{Ordering, AtomicU64, AtomicI64}; use std::time::Duration; -use rust_shared::async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; -use rust_shared::async_graphql::{Schema, MergedObject, MergedSubscription, ObjectType, Data, Result, SubscriptionType}; -use rust_shared::links::app_server_to_monitor_backend::Message_ASToMB; -use rust_shared::utils::mtx::mtx::Mtx; -use rust_shared::{futures, axum, tower, tower_http, Lock, check_lock_order, new_mtx}; -use axum::http::Method; -use axum::http::header::CONTENT_TYPE; -use axum::response::{self, IntoResponse}; -use axum::routing::{get, post, MethodFilter, on_service}; -use axum::{extract, Router}; -use rust_shared::flume::{Sender, Receiver, unbounded}; -use rust_shared::indexmap::IndexMap; -use rust_shared::itertools::Itertools; -use rust_shared::utils::type_aliases::{JSONValue, FSender}; -use rust_shared::{utils::time::time_since_epoch_ms, RwLock_Tracked, tokio, serde_json}; -use rust_shared::serde::de::DeserializeOwned; -use rust_shared::serde::{Deserialize, Serialize}; -use rust_shared::serde_json::{json, Map}; -use rust_shared::tokio::sync::{mpsc, Mutex, RwLock}; -use rust_shared::tokio::time::{Instant, self}; -use rust_shared::tokio_postgres::{Client, Row}; use tower::Service; -use tower_http::cors::{CorsLayer}; -use rust_shared::async_graphql::futures_util::task::{Context, Poll}; -use rust_shared::async_graphql::http::{WebSocketProtocols, WsMessage, ALL_WEBSOCKET_PROTOCOLS}; -use axum::extract::ws::{CloseFrame, Message}; -use axum::extract::{FromRequest, WebSocketUpgrade}; -use axum::http::{self, Request, Response, StatusCode}; -use futures_util::future::{BoxFuture, Ready}; -use futures_util::stream::{SplitSink, SplitStream}; -use futures_util::{future, Sink, SinkExt, Stream, StreamExt, FutureExt}; -use tracing::{info, warn, debug, error}; -use rust_shared::anyhow::{anyhow, Error, bail, ensure}; -use rust_shared::uuid::Uuid; +use tower_http::cors::CorsLayer; +use tracing::{debug, error, info, warn}; use crate::links::monitor_backend_link::MESSAGE_SENDER_TO_MONITOR_BACKEND; use crate::store::live_queries_::lq_group::lq_group::LQGroup_OutMsg; use crate::store::live_queries_::lq_key::{filter_shape_from_filter, LQKey}; use crate::store::storage::AppStateArc; -use crate::utils::db::filter::{entry_matches_filter, QueryFilter, FilterOp}; -use crate::utils::db::pg_stream_parsing::{LDChange}; -use crate::utils::db::queries::{get_entries_in_collection}; -use crate::utils::general::general::{AtomicF64}; -use crate::utils::type_aliases::{PGClientObject, ABReceiver, ABSender, DBPoolArc}; +use crate::utils::db::filter::{entry_matches_filter, FilterOp, QueryFilter}; +use crate::utils::db::pg_stream_parsing::LDChange; +use crate::utils::db::queries::get_entries_in_collection; +use crate::utils::general::general::AtomicF64; +use crate::utils::type_aliases::{ABReceiver, ABSender, DBPoolArc, PGClientObject}; -use super::super::lq_instance::{LQInstance, LQEntryWatcher}; +use super::super::lq_instance::{LQEntryWatcher, LQInstance}; use super::lq_batch::lq_batch::LQBatch; use super::lq_group::LQGroup_InMsg; pub fn get_lq_group_key(table_name: &str, filter: &QueryFilter) -> String { - let filter_shape = filter_shape_from_filter(filter); - json!({ - "table": table_name, - "filter": filter_shape, - }).to_string() + let filter_shape = filter_shape_from_filter(filter); + json!({ + "table": table_name, + "filter": filter_shape, + }) + .to_string() } #[derive(Debug, Clone)] pub enum LQBatchMessage { - NotifyExecutionDone(usize), + NotifyExecutionDone(usize), } #[derive(Clone)] pub(super) struct LQGroup_BatchesMeta { - last_batch_execution_started_index: i64, - last_batch_execution_started_time: f64, - last_batch_committed_index: i64, - - /// Map of live-query instances that are awaiting population. - lqis_awaiting_population: IndexMap, - - /// Map of committed live-query instances. - lqis_committed: IndexMap>, + last_batch_execution_started_index: i64, + last_batch_execution_started_time: f64, + last_batch_committed_index: i64, + + /// Map of live-query instances that are awaiting population. + lqis_awaiting_population: IndexMap, + + /// Map of committed live-query instances. + lqis_committed: IndexMap>, } impl LQGroup_BatchesMeta { - fn new(/*batches_count: i64*/) -> Self { - Self { - last_batch_execution_started_index: -1, - last_batch_execution_started_time: time_since_epoch_ms(), - //last_batch_executed_index: -1, - last_batch_committed_index: -1, - lqis_awaiting_population: IndexMap::new(), - lqis_committed: IndexMap::new(), - } - } + fn new(/*batches_count: i64*/) -> Self { + Self { + last_batch_execution_started_index: -1, + last_batch_execution_started_time: time_since_epoch_ms(), + //last_batch_executed_index: -1, + last_batch_committed_index: -1, + lqis_awaiting_population: IndexMap::new(), + lqis_committed: IndexMap::new(), + } + } } #[derive(Clone)] struct LQIAwaitingPopulationInfo { - lqi: Arc, - batch_index: usize, - batch_generation: usize, - prior_lqis_in_batch: usize, + lqi: Arc, + batch_index: usize, + batch_generation: usize, + prior_lqis_in_batch: usize, } // sync docs with LQGroup @@ -110,447 +111,444 @@ struct LQIAwaitingPopulationInfo { /// The `LQGroup` struct is the "public interface" for the lq-group. Its methods are mostly async, with each "sending a message" to the "inner" `LQGroupImpl` struct, which queues up a set of calls and then processes them as a batch. /// When the batched processing in the `LQGroupImpl` completes, it sends a message back to the "waiting" `LQGroup`s, which then are able to have their async methods return. pub(super) struct LQGroupImpl { - // shape - lq_key: LQKey, + // shape + lq_key: LQKey, - db_pool: DBPoolArc, + db_pool: DBPoolArc, - // for coordination of currently-buffering batches - channel_for_batch_messages__sender_base: ABSender, - channel_for_batch_messages__receiver_base: ABReceiver, - - batches: Vec>>, - meta: LQGroup_BatchesMeta, + // for coordination of currently-buffering batches + channel_for_batch_messages__sender_base: ABSender, + channel_for_batch_messages__receiver_base: ABReceiver, - //source_sender_for_lq_watcher_drops: Sender, + batches: Vec>>, + meta: LQGroup_BatchesMeta, - messages_in_sender: FSender, - messages_out_sender: ABSender, - /// Stores a list of the debug-strings for messages that have been read/processed (through messages_in), as well as sent (through messages_out), in order of that processing/sending. - processed_or_sent_messages: Vec, + //source_sender_for_lq_watcher_drops: Sender, + messages_in_sender: FSender, + messages_out_sender: ABSender, + /// Stores a list of the debug-strings for messages that have been read/processed (through messages_in), as well as sent (through messages_out), in order of that processing/sending. + processed_or_sent_messages: Vec, } impl LQGroupImpl { - pub(super) fn new(lq_key: LQKey, db_pool: DBPoolArc, messages_in_sender: FSender, messages_out_sender: ABSender) -> Self { - // the size of this broadcast buffer should be at least as large as the number of batches (preferably with some extra room, in case of timing issues) - let (mut s1, r1): (ABSender, ABReceiver) = async_broadcast::broadcast(1000); - - // afaik, the only case where overflow can (and has been) occuring is when there are no callers waiting for the batch to execute (in execute_batch_x_once_ready) - // thus, it is fine to overflow/delete-old-entries, as no one cares about the entries in that case anyway - s1.set_overflow(true); - - let new_self = Self { - lq_key: lq_key.clone(), - - db_pool, - - // for now, have the cycling-set contain 500 entries; this is enough to avoid lock-conflicts, while not hammering memory-usage - batches: (0..500).map(|_| Arc::new(RwLock::new(LQBatch::new(lq_key.clone())))).collect_vec(), - meta: LQGroup_BatchesMeta::new(/*500*/), - - channel_for_batch_messages__sender_base: s1, - channel_for_batch_messages__receiver_base: r1, - - //source_sender_for_lq_watcher_drops: s1, - - messages_in_sender, - messages_out_sender, - processed_or_sent_messages: vec![], - }; - - new_self - } - - async fn send_message_out(&mut self, msg: LQGroup_OutMsg) { - let msg_as_str = format!("{:?}", msg); - self.messages_out_sender.broadcast(msg).await.unwrap(); - self.notify_message_processed_or_sent(msg_as_str, true); - } - /// This function should receive a stringified version of all messages sent through the LQGroup "messages_in" and "messages_out" channels. - pub(super) fn notify_message_processed_or_sent(&mut self, msg_as_str: String, sent: bool) { - self.processed_or_sent_messages.push(format!("{}{}", if sent { "Sent: " } else { "Processed: " }, msg_as_str)); - - // for debugging - /*if !msg_as_str.contains("IGJDsdE-TKGx-K7T4etO5Q") { return; } - println!("\t{} message just sent: {}", if sent { "OUT" } else { "IN" }, msg_as_str);*/ - } - pub(super) fn get_recent_messages_str(&self) -> String { - let mut recent_messages = self.processed_or_sent_messages.clone(); - recent_messages.reverse(); - let recent_messages = recent_messages.into_iter().take(10).collect_vec(); - format!("[newest first...] \n\t{}", recent_messages.join("\n\t")) - } - - // maybe temp; needed for refresh_lq_data... func in LQGroup - pub(super) fn get_lqis_committed_cloned(&self) -> IndexMap> { - self.meta.lqis_committed.clone() - } - - pub(super) fn get_buffering_batch(&mut self) -> (usize, &RwLock) { - //let index = self.batches.len() - 1; - //let index = meta.last_batch_buffering_started_index as usize; - let mut first_open_index = (self.meta.last_batch_execution_started_index + 1) as usize; - if first_open_index > self.batches.len() - 1 { - //println!("Looping around..."); - first_open_index = 0; - /*let temp = self.batches.get(first_open_index).unwrap(); - let temp2 = temp.write().await; - temp2.query_instances.drain(..);*/ - } - let result = self.batches.get(first_open_index).unwrap(); - (first_open_index, result) - } - - pub(super) async fn schedule_lqi_read_or_init_then_broadcast(&mut self, lq_key: &LQKey, force_queue_lqi: Option>, parent_mtx: Option<&Mtx>) -> Option> { - new_mtx!(mtx, "1:start loop to get/create the lq-instance", parent_mtx); - let mut i = -1; - loop { - i += 1; - mtx.section(format!("X:start loop with i={i}")); - match self.get_lq_instance_or_queue_it_in_progressing_batch(&lq_key, force_queue_lqi.clone(), Some(&mtx)).await { - Ok(result) => { - match result { - Some(lqi) => { - // if instance's initial contents are already populated, just return it (well, after broadcasting an event so async caller knows about it) - if lqi.last_entries_set_count.load(Ordering::SeqCst) > 0 { - self.send_message_out(LQGroup_OutMsg::LQInstanceIsInitialized(lqi.lq_key.clone(), lqi.clone(), false)).await; - mtx.section(format!("LQ-instance retrieved and returned, since already initialized. @lq_key:{}", lqi.lq_key)); - return Some(lqi); - } - // else, it must still be scheduled for population in a buffering/executing batch; so just return none (batch will automatically broadcast messages for the lqi's initialization once it happens) - else { - mtx.section(format!("LQ-instance already scheduled in a progressing-batch. @lq_key:{}", lqi.lq_key)); - return None; - } - }, - None => { - // if instance was scheduled for initialization in batch, return none (batch will automatically broadcast messages for the lqi's initialization once it happens) - mtx.section(format!("LQ-instance just scheduled in buffering-batch. @lq_key:{}", lq_key)); - return None; - }, - } - }, - // if we hit an error, retry in a bit - Err(err) => { - error!("Hit error during attempt to get or create lqi, probably due to multi-threading contention. Retrying in a bit. Error: {}", err); - mtx.section("X.1:waiting a bit, before retrying"); - time::sleep(Duration::from_millis(500)).await; - continue; - } - }; - } - } - - /// Finds the lq-instance for the given lq-key in the committed-lqis list (if present), else... - /// finds it in the lqis-awaiting-population list (if present), else... - /// creates it in the buffering-batch (and adds it to the awaiting-population list), then waits for its data-population and committal into `LQGroup.lqis_committed`. - async fn get_lq_instance_or_queue_it_in_progressing_batch(&mut self, lq_key: &LQKey, force_queue_lqi: Option>, mtx_p: Option<&Mtx>) -> Result>, Error> { - new_mtx!(mtx, "1:find lqi in an executing/buffering batch, or create new lqi in the buffering-batch", mtx_p); - let (batch_index, batch_generation, prior_lqis_in_batch, _lqi_in_batch, _lqi_just_initialized, _batch_msg_receiver) = 'getter_block: { - mtx.section("2:get batches_meta write-lock"); - - // create receiver now, so we start receiving all messages from this point - let batch_msg_receiver = self.channel_for_batch_messages__sender_base.new_receiver(); - - mtx.section("3:try to find lqi in lq-group's committed lqi's"); - if let Some(lqi) = self.meta.lqis_committed.get(lq_key) { - return Ok(Some(lqi.clone())); - } - - // if this lq_key is already added to a batch, and awaiting population, simply find the relevant lq-instance in that batch and return it - mtx.section("4:try to find lqi in relevant batch"); - if let Some(entry) = self.meta.lqis_awaiting_population.get(lq_key) && force_queue_lqi.is_none() { - //(entry.batch_index, entry.batch_generation, entry.prior_lqis_in_batch, entry.lqi.clone(), false, batch_msg_receiver) - return Ok(Some(entry.lqi.clone())); - } - // else, create a new lq-instance for this lq-key, and add it to the buffering-batch - else { - mtx.section("5:lqi not found in either lqis_committed or in lqis_awaiting_population; will queue it in progressing batch"); - - mtx.section("6:get batch write-lock, and insert"); - let (batch_index, batch_lock) = self.get_buffering_batch(); - - //drop(lqis_awaiting_population); - //drop(meta); - //check_lock_order::<{Lock::LQGroup_batches_meta}, {Lock::LQGroup_batches_x}>(); - let mut batch = batch_lock.write().await; - let batch_gen = batch.get_generation(); - - let lqis_in_batch = &mut batch.query_instances; - let prior_lqis_in_batch = lqis_in_batch.len(); - - let lqi = match force_queue_lqi { - Some(passed_lqi_arc) => { - lqis_in_batch.insert(lq_key.to_owned(), passed_lqi_arc.clone()); - passed_lqi_arc - }, - None => { - if let Some(lqi_arc) = lqis_in_batch.get(lq_key) { - warn!("The buffering batch contains an entry for this lq-key; this case would ideally be handled by the `lq_keys_awaiting_population` check above, but can miss if lqis_awaiting_population was changed between the drop(lqis_awaiting_population) above and this line; handling, by using the already-added-to-buffering-batch lqi."); - break 'getter_block (batch_index, batch_gen, prior_lqis_in_batch, lqi_arc.clone(), false, batch_msg_receiver); - } - - let entries = vec![]; - let new_lqi = LQInstance::new(lq_key.clone(), entries.clone()); - // let monitor-tool know about this lq-instance, even prior to its being populated with data or "committed" to the lq-group - new_lqi.send_self_to_monitor_backend(entries, 0).await; - - let new_lqi_arc = Arc::new(new_lqi); - lqis_in_batch.insert(lq_key.to_owned(), new_lqi_arc.clone()); - new_lqi_arc - } - }; - let batch_gen = batch.get_generation(); - drop(batch); - - mtx.section("7:insert lqi and such into lqis_awaiting_population"); - { - /*check_lock_order::<{Lock::LQGroup_batches_x}, {Lock::LQGroup_lqis_awaiting_population}>(); - let mut lqis_awaiting_population = self.lqis_awaiting_population.write("get_or_create_lq_instance_in_progressing_batch").await;*/ - //let mut meta = self.batches_meta.write("get_or_create_lq_instance_in_progressing_batch").await; - self.meta.lqis_awaiting_population.insert(lq_key.to_owned(), LQIAwaitingPopulationInfo { - batch_index, - batch_generation: batch_gen, - prior_lqis_in_batch, - lqi: lqi.clone(), - }); - } - - (batch_index, batch_gen, prior_lqis_in_batch, lqi, true, batch_msg_receiver) - } - }; - - mtx.section_2("3:schedule batch for execution", Some(format!("@prior_lqis_in_batch:{prior_lqis_in_batch}"))); - self.schedule_batch_for_execution(batch_index, batch_generation, Some(&mtx)).await?; - //Ok((lqi_in_batch, lqi_just_initialized)) - Ok(None) - } - - //async fn execute_batch_x_once_ready() {} - async fn schedule_batch_for_execution(&mut self, batch_i: usize, batch_generation: usize, mtx_p: Option<&Mtx>) -> Result<(), Error> { - new_mtx!(mtx, "1:prep", mtx_p, Some(format!("@batch_i:{batch_i} @batch_generation:{batch_generation}"))); - let last_batch_execution_time = self.meta.last_batch_execution_started_time; - - let batch_lock_arc_clone = self.batches.get(batch_i).unwrap().clone(); - let messages_in_sender_clone = self.messages_in_sender.clone(); - - mtx.section("2:wait for the correct time to execute"); - let mtx_proxy = mtx.proxy(); - tokio::spawn(async move { - // todo: fine-tune these settings, as well as scale-up algorithm - const LQ_BATCH_DURATION_MIN: f64 = 100f64; - //const LQ_BATCH_DURATION_MAX: f64 = 100f64; - let batch_end_time = last_batch_execution_time + LQ_BATCH_DURATION_MIN; - let time_till_batch_end = batch_end_time - time_since_epoch_ms(); - tokio::time::sleep(Duration::try_from_secs_f64(time_till_batch_end / 1000f64).unwrap_or(Duration::from_secs(0))).await; - - // NOTE: Multiple instances of this call-path may be executing concurrently; they "race" to be the one that makes it to this upcoming line. - // The first one to make it, sets the batch's "execution_in_progress" to true, so that the other concurrent calls all fail the if-check, and do nothing. - let mut batch = batch_lock_arc_clone.write().await; - if batch.get_generation() == batch_generation && !batch.execution_in_progress { - batch.execution_in_progress = true; - let _ = messages_in_sender_clone.send(LQGroup_InMsg::OnBatchReachedTimeToExecute(batch_i, Some(mtx_proxy))); - } - }); - - Ok(()) - } - - pub(super) async fn execute_batch(&mut self, batch_i: usize, mtx_p: Option<&Mtx>) { - //new_mtx!(mtx, "1:setup", mtx_p, Some(format!("@batch_i:{} @batch_generation:{} @batch_lqi_count:{}", batch_i, batch_generation, batch.query_instances.len())); - new_mtx!(mtx, "1:setup", mtx_p, Some(format!("@batch_i:{batch_i}"))); - //mtx_proxy.section("1:setup"); //, Some(format!("", ))); - - self.meta.last_batch_execution_started_index = batch_i as i64; - self.meta.last_batch_execution_started_time = time_since_epoch_ms(); - //drop(meta); // drop lock on meta prior to executing batch - - let client = self.db_pool.get().await.unwrap(); - //batch.execute(&client, Some(&mtx2)).await.expect("Executing the lq-batch failed!"); - - let batch_lock_arc_clone = self.batches.get(batch_i).unwrap().clone(); - let messages_in_sender_clone = self.messages_in_sender.clone(); - - mtx.section("2:start async part"); - let mut mtx_proxy = mtx.proxy(); - // we can't block the message-loop, so receive results in new thread, then simply send data through message - tokio::spawn(async move { - mtx_proxy.section("1:get write-lock on batch"); - //check_lock_order::<{Lock::LQGroup_batches_meta}, {Lock::LQGroup_batches_x}>(); - let mut batch = batch_lock_arc_clone.write().await; - //if batch.get_generation() == batch_generation { - - mtx_proxy.section("2:execute batch query"); - batch.execute(&client, Some(&mtx_proxy)).await.expect("Executing the lq-batch failed!"); - - //mtx2.section("3:reset batch, and drop batch write-lock"); - mtx_proxy.section("3:read batch results, and send-message about batch being completed"); - let instances_in_batch = batch.mark_generation_end_and_reset().into_iter().map(|a| a.1).collect_vec(); - // ignore send-error; if all receivers dropped, then result doesn't matter anyway - let _ = messages_in_sender_clone.send(LQGroup_InMsg::OnBatchCompleted(batch_i, instances_in_batch)); - - mtx_proxy.section("4:set execution_in_progress to false"); - batch.execution_in_progress = false; - drop(batch); // drop write-lock on batch - drop(mtx_proxy); // drop mtx_proxy explicitly, so timing-data is correct (drop of tokio threads is often delayed) - }); - } - - pub(super) async fn on_batch_completed(&mut self, batch_i: usize, instances_in_batch: Vec>) { - new_mtx!(mtx, "1:get length of instances_in_batch"); - let instances_in_batch_len = instances_in_batch.len(); - let instances_in_batch_lq_keys = instances_in_batch.iter().map(|a| a.lq_key.clone()).collect_vec(); - - mtx.section("5:commit the lqi's in batch to overall group, and update batches-meta"); - //mtx2.section_2("5:commit the lqi's in batch to overall group, and update batches-meta", Some(format!("@open-locks:{}", meta.lqis_committed.get_live_guards_str()))); - { - // Acquire lock on lq_keys_awaiting_population first; this way, code in `get_or_create_lq_instance_in_progressing_batch` knows that as long as it holds a read-lock: - // 1) It is safe to create the receiver that is then used to wait for the batch's execution. (ie. we have not yet broadcast the `LQBatchMessage::NotifyExecutionDone` message) - /*check_lock_order::<{Lock::LQGroup_batches_meta}, {Lock::LQGroup_lqis_awaiting_population}>(); - let mut lq_keys_awaiting_population = self.lqis_awaiting_population.write("execute_batch_x_once_ready").await;*/ - - //let instances_in_batch = batch.query_instances.read().await; - for new_lqi in instances_in_batch.into_iter() { - let key = &new_lqi.lq_key; - let old_lqi = self.meta.lqis_committed.insert(key.to_owned(), new_lqi.clone()); - if let Some(old_lqi) = old_lqi { - // if an old-lqi was replaced, but in fact that "old lqi" was the same lqi... - if Arc::ptr_eq(&new_lqi.clone(), &old_lqi) { - // ...then this "recommit" is just due to a `refresh_lq_data_for_x` call (see function below) and is fine; do nothing - } - // else, the new-lqi is in fact a new instance/allocation (which means we need to merge the old-lqi into the new-lqi, as smoother alternative to erroring/retrying) - else { - // drain the old-watchers vector; this way watchers will only be getting updates from the new-lqi - let old_watchers: Vec<(Uuid, LQEntryWatcher)> = old_lqi.entry_watchers.write().await.drain().collect_vec(); - let old_watchers_count = old_watchers.len(); - - //check_lock_order::<{Lock::LQGroup_batches_meta}, {Lock::LQInstance_last_entries}>(); - let latest_entries = new_lqi.last_entries.read().await.clone(); - - let new_lqi = self.meta.lqis_committed.get(key).ok_or(anyhow!("New-lqi not found!")).unwrap(); - //check_lock_order::<{Lock::LQGroup_batches_meta}, {Lock::LQInstance_entry_watchers}>(); - let mut new_watchers = new_lqi.entry_watchers.write().await; - for (old_stream_id, old_watcher) in old_watchers { - // since lqi that old-watcher was attached to might have had a not-yet-updated entries-set, send each watcher the latest entries-set - old_watcher.new_entries_channel_sender.send(latest_entries.clone()).unwrap(); - - new_watchers.insert(old_stream_id, old_watcher); - } - - // commented; not needed, since a call to this will occur soon back up in the `start_lq_watcher` function (through its call to `get_or_create_watcher`) - //new_lqi.send_self_to_monitor_backend(new_entries, entry_watchers.len()).await; - - error!("After batch completed, lq-instance was being committed, but an earlier entry was found; this should never happen. Nonetheless, as defensive programming, attempting to resolve by merging the watchers... {}", - format!( - "@batch_i:{} @old_count:{} @new_count:{} @final_count:{} @recent_messages:{}", - batch_i, /*batch_generation,*/ old_watchers_count, new_watchers.len() - old_watchers_count, new_watchers.len(), self.get_recent_messages_str() - ) - ); - } - } - } - mtx.current_section.extra_info = Some(format!("@group_lqi_count:{} @batch_lqi_count:{}", self.meta.lqis_committed.len(), instances_in_batch_len)); - - // now that we've "committed" this batch's lqi's to the lq-group, remove their lq-keys from the `lq_keys_awaiting_population` list - for lq_key in instances_in_batch_lq_keys { - if let Some(info) = self.meta.lqis_awaiting_population.shift_remove(&lq_key) { - self.send_message_out(LQGroup_OutMsg::LQInstanceIsInitialized(lq_key, info.lqi, true)).await; - } - } - - self.meta.last_batch_committed_index = batch_i as i64; - //meta.last_batch_buffering_started_index = batch_i as i64; - } - - mtx.section("6:send message notifying of execution being done"); - self.channel_for_batch_messages__sender_base.broadcast(LQBatchMessage::NotifyExecutionDone(batch_i)).await.unwrap(); - } - - /// If the caller function itself is a potential "completer" of the batch, then the `receiver` it provides to this function must be - /// created (using `self.channel_for_batch_messages__sender_base.new_receiver()`) *prior* to sending of the "batch completed" message. - async fn wait_for_batch_to_complete(&self, batch_i: usize, timeout_secs: u64, mut batch_msg_receiver: ABReceiver) -> Result<(), Error> { - loop { - let wait_for_execution_done = async { - loop { - let msg = batch_msg_receiver.recv().await.unwrap(); - match msg { - LQBatchMessage::NotifyExecutionDone(executed_batch_i) => { - if executed_batch_i == batch_i { - return; - } - }, - } - } - }; - match time::timeout(Duration::from_secs(timeout_secs), wait_for_execution_done).await { - // temp: if we timeout after X seconds, having failed to receive the "batch execution done" message, assume we "missed" the batch-execution... - Err(_err) => { - error!("Timed out waiting for confirmation of batch-execution completion. Retrying this request shortly... @table:{} @filter_shape:{}", self.lq_key.table_name, self.lq_key.filter); - // and so pass an error to parent (triggering a retry in a moment) - return Err(anyhow!("timed_out")); - }, - // the "batch execution is done" message was received; break out of the message-reading loop - Ok(_) => return Ok(()), - }; - } - } - - /*pub fn get_sender_for_lq_watcher_drops(&self) -> Sender { - self.source_sender_for_lq_watcher_drops.clone() - }*/ - pub(super) async fn drop_lq_watcher(&mut self, lq_key: &LQKey, stream_id: Uuid) { - new_mtx!(mtx, "1:get query_instances write-lock"); - debug!("Got lq-watcher drop request. @table:{} @filter:{} @stream_id:{}", lq_key.table_name, lq_key.filter, stream_id); - - mtx.section("2:get lq_instance for key, then get lq_instance.entry_watcher write-lock"); - let new_watcher_count = { - let lq_instance = match self.meta.lqis_committed.get_mut(lq_key) { - Some(a) => a, - None => return, // if entry already deleted, just ignore for now [maybe fixed after change to get_or_create_lq_instance?] - }; - //check_lock_order::<{Lock::LQGroup_batches_meta}, {Lock::LQInstance_entry_watchers}>(); - let mut entry_watchers = lq_instance.entry_watchers.write().await; - - mtx.section("3:update entry_watchers, then remove lq_instance (if no watchers), then complete"); - // commented the `.expect`, since was failing occasionally, and I don't have time to debug atm [maybe fixed after change to get_or_create_lq_instance?] - //let _removed_value = entry_watchers.remove(&stream_id).expect(&format!("Trying to drop LQWatcher, but failed, since no entry was found with this key:{}", lq_key)); - entry_watchers.remove(&stream_id); - - // only send update for lqi if we're not about to be deleted - if entry_watchers.len() > 0 { - // todo: try to find a way to provide an up-to-date result_entries without getting a read-lock here (since wasn't necessary before sending-to-backend behavior) - check_lock_order::<{Lock::LQInstance_entry_watchers}, {Lock::LQInstance_last_entries}>(); - let current_entries = lq_instance.last_entries.read().await.clone(); - lq_instance.send_self_to_monitor_backend(current_entries, entry_watchers.len()).await; - } - - entry_watchers.len() - }; - if new_watcher_count == 0 { - self.meta.lqis_committed.shift_remove(lq_key); - debug!("Watcher count for live-query entry dropped to 0, so removing."); - - //let lq_key = get_lq_instance_key(&self.table_name, &self.filter); - if let Err(err) = MESSAGE_SENDER_TO_MONITOR_BACKEND.0.broadcast(Message_ASToMB::LQInstanceUpdated { - table_name: lq_key.table_name.to_owned(), - filter: serde_json::to_value(lq_key.filter.clone()).unwrap(), - last_entries: vec![], - watchers_count: 0u32, - deleting: true, - }).await { - error!("Errored while broadcasting LQInstanceUpdated message. @error:{}", err); - } - } - - debug!("LQ-watcher drop complete. @watcher_count_for_entry:{} @lq_entry_count:{}", new_watcher_count, self.meta.lqis_committed.len()); - } - - pub(super) async fn notify_of_ld_change(&self, change: &LDChange) { - if self.lq_key.table_name != change.table { - return; - } - - for (_lq_key, lq_instance) in self.meta.lqis_committed.iter() { - lq_instance.on_table_changed(&change, None).await; - } - } -} \ No newline at end of file + pub(super) fn new(lq_key: LQKey, db_pool: DBPoolArc, messages_in_sender: FSender, messages_out_sender: ABSender) -> Self { + // the size of this broadcast buffer should be at least as large as the number of batches (preferably with some extra room, in case of timing issues) + let (mut s1, r1): (ABSender, ABReceiver) = async_broadcast::broadcast(1000); + + // afaik, the only case where overflow can (and has been) occuring is when there are no callers waiting for the batch to execute (in execute_batch_x_once_ready) + // thus, it is fine to overflow/delete-old-entries, as no one cares about the entries in that case anyway + s1.set_overflow(true); + + let new_self = Self { + lq_key: lq_key.clone(), + + db_pool, + + // for now, have the cycling-set contain 500 entries; this is enough to avoid lock-conflicts, while not hammering memory-usage + batches: (0..500).map(|_| Arc::new(RwLock::new(LQBatch::new(lq_key.clone())))).collect_vec(), + meta: LQGroup_BatchesMeta::new(/*500*/), + + channel_for_batch_messages__sender_base: s1, + channel_for_batch_messages__receiver_base: r1, + + //source_sender_for_lq_watcher_drops: s1, + messages_in_sender, + messages_out_sender, + processed_or_sent_messages: vec![], + }; + + new_self + } + + async fn send_message_out(&mut self, msg: LQGroup_OutMsg) { + let msg_as_str = format!("{:?}", msg); + self.messages_out_sender.broadcast(msg).await.unwrap(); + self.notify_message_processed_or_sent(msg_as_str, true); + } + /// This function should receive a stringified version of all messages sent through the LQGroup "messages_in" and "messages_out" channels. + pub(super) fn notify_message_processed_or_sent(&mut self, msg_as_str: String, sent: bool) { + self.processed_or_sent_messages.push(format!("{}{}", if sent { "Sent: " } else { "Processed: " }, msg_as_str)); + + // for debugging + /*if !msg_as_str.contains("IGJDsdE-TKGx-K7T4etO5Q") { return; } + println!("\t{} message just sent: {}", if sent { "OUT" } else { "IN" }, msg_as_str);*/ + } + pub(super) fn get_recent_messages_str(&self) -> String { + let mut recent_messages = self.processed_or_sent_messages.clone(); + recent_messages.reverse(); + let recent_messages = recent_messages.into_iter().take(10).collect_vec(); + format!("[newest first...] \n\t{}", recent_messages.join("\n\t")) + } + + // maybe temp; needed for refresh_lq_data... func in LQGroup + pub(super) fn get_lqis_committed_cloned(&self) -> IndexMap> { + self.meta.lqis_committed.clone() + } + + pub(super) fn get_buffering_batch(&mut self) -> (usize, &RwLock) { + //let index = self.batches.len() - 1; + //let index = meta.last_batch_buffering_started_index as usize; + let mut first_open_index = (self.meta.last_batch_execution_started_index + 1) as usize; + if first_open_index > self.batches.len() - 1 { + //println!("Looping around..."); + first_open_index = 0; + /*let temp = self.batches.get(first_open_index).unwrap(); + let temp2 = temp.write().await; + temp2.query_instances.drain(..);*/ + } + let result = self.batches.get(first_open_index).unwrap(); + (first_open_index, result) + } + + pub(super) async fn schedule_lqi_read_or_init_then_broadcast(&mut self, lq_key: &LQKey, force_queue_lqi: Option>, parent_mtx: Option<&Mtx>) -> Option> { + new_mtx!(mtx, "1:start loop to get/create the lq-instance", parent_mtx); + let mut i = -1; + loop { + i += 1; + mtx.section(format!("X:start loop with i={i}")); + match self.get_lq_instance_or_queue_it_in_progressing_batch(&lq_key, force_queue_lqi.clone(), Some(&mtx)).await { + Ok(result) => { + match result { + Some(lqi) => { + // if instance's initial contents are already populated, just return it (well, after broadcasting an event so async caller knows about it) + if lqi.last_entries_set_count.load(Ordering::SeqCst) > 0 { + self.send_message_out(LQGroup_OutMsg::LQInstanceIsInitialized(lqi.lq_key.clone(), lqi.clone(), false)).await; + mtx.section(format!("LQ-instance retrieved and returned, since already initialized. @lq_key:{}", lqi.lq_key)); + return Some(lqi); + } + // else, it must still be scheduled for population in a buffering/executing batch; so just return none (batch will automatically broadcast messages for the lqi's initialization once it happens) + else { + mtx.section(format!("LQ-instance already scheduled in a progressing-batch. @lq_key:{}", lqi.lq_key)); + return None; + } + }, + None => { + // if instance was scheduled for initialization in batch, return none (batch will automatically broadcast messages for the lqi's initialization once it happens) + mtx.section(format!("LQ-instance just scheduled in buffering-batch. @lq_key:{}", lq_key)); + return None; + }, + } + }, + // if we hit an error, retry in a bit + Err(err) => { + error!("Hit error during attempt to get or create lqi, probably due to multi-threading contention. Retrying in a bit. Error: {}", err); + mtx.section("X.1:waiting a bit, before retrying"); + time::sleep(Duration::from_millis(500)).await; + continue; + }, + }; + } + } + + /// Finds the lq-instance for the given lq-key in the committed-lqis list (if present), else... + /// finds it in the lqis-awaiting-population list (if present), else... + /// creates it in the buffering-batch (and adds it to the awaiting-population list), then waits for its data-population and committal into `LQGroup.lqis_committed`. + async fn get_lq_instance_or_queue_it_in_progressing_batch(&mut self, lq_key: &LQKey, force_queue_lqi: Option>, mtx_p: Option<&Mtx>) -> Result>, Error> { + new_mtx!(mtx, "1:find lqi in an executing/buffering batch, or create new lqi in the buffering-batch", mtx_p); + let (batch_index, batch_generation, prior_lqis_in_batch, _lqi_in_batch, _lqi_just_initialized, _batch_msg_receiver) = 'getter_block: { + mtx.section("2:get batches_meta write-lock"); + + // create receiver now, so we start receiving all messages from this point + let batch_msg_receiver = self.channel_for_batch_messages__sender_base.new_receiver(); + + mtx.section("3:try to find lqi in lq-group's committed lqi's"); + if let Some(lqi) = self.meta.lqis_committed.get(lq_key) { + return Ok(Some(lqi.clone())); + } + + // if this lq_key is already added to a batch, and awaiting population, simply find the relevant lq-instance in that batch and return it + mtx.section("4:try to find lqi in relevant batch"); + if let Some(entry) = self.meta.lqis_awaiting_population.get(lq_key) + && force_queue_lqi.is_none() + { + //(entry.batch_index, entry.batch_generation, entry.prior_lqis_in_batch, entry.lqi.clone(), false, batch_msg_receiver) + return Ok(Some(entry.lqi.clone())); + } + // else, create a new lq-instance for this lq-key, and add it to the buffering-batch + else { + mtx.section("5:lqi not found in either lqis_committed or in lqis_awaiting_population; will queue it in progressing batch"); + + mtx.section("6:get batch write-lock, and insert"); + let (batch_index, batch_lock) = self.get_buffering_batch(); + + //drop(lqis_awaiting_population); + //drop(meta); + //check_lock_order::<{Lock::LQGroup_batches_meta}, {Lock::LQGroup_batches_x}>(); + let mut batch = batch_lock.write().await; + let batch_gen = batch.get_generation(); + + let lqis_in_batch = &mut batch.query_instances; + let prior_lqis_in_batch = lqis_in_batch.len(); + + let lqi = match force_queue_lqi { + Some(passed_lqi_arc) => { + lqis_in_batch.insert(lq_key.to_owned(), passed_lqi_arc.clone()); + passed_lqi_arc + }, + None => { + if let Some(lqi_arc) = lqis_in_batch.get(lq_key) { + warn!("The buffering batch contains an entry for this lq-key; this case would ideally be handled by the `lq_keys_awaiting_population` check above, but can miss if lqis_awaiting_population was changed between the drop(lqis_awaiting_population) above and this line; handling, by using the already-added-to-buffering-batch lqi."); + break 'getter_block (batch_index, batch_gen, prior_lqis_in_batch, lqi_arc.clone(), false, batch_msg_receiver); + } + + let entries = vec![]; + let new_lqi = LQInstance::new(lq_key.clone(), entries.clone()); + // let monitor-tool know about this lq-instance, even prior to its being populated with data or "committed" to the lq-group + new_lqi.send_self_to_monitor_backend(entries, 0).await; + + let new_lqi_arc = Arc::new(new_lqi); + lqis_in_batch.insert(lq_key.to_owned(), new_lqi_arc.clone()); + new_lqi_arc + }, + }; + let batch_gen = batch.get_generation(); + drop(batch); + + mtx.section("7:insert lqi and such into lqis_awaiting_population"); + { + /*check_lock_order::<{Lock::LQGroup_batches_x}, {Lock::LQGroup_lqis_awaiting_population}>(); + let mut lqis_awaiting_population = self.lqis_awaiting_population.write("get_or_create_lq_instance_in_progressing_batch").await;*/ + //let mut meta = self.batches_meta.write("get_or_create_lq_instance_in_progressing_batch").await; + self.meta.lqis_awaiting_population.insert(lq_key.to_owned(), LQIAwaitingPopulationInfo { batch_index, batch_generation: batch_gen, prior_lqis_in_batch, lqi: lqi.clone() }); + } + + (batch_index, batch_gen, prior_lqis_in_batch, lqi, true, batch_msg_receiver) + } + }; + + mtx.section_2("3:schedule batch for execution", Some(format!("@prior_lqis_in_batch:{prior_lqis_in_batch}"))); + self.schedule_batch_for_execution(batch_index, batch_generation, Some(&mtx)).await?; + //Ok((lqi_in_batch, lqi_just_initialized)) + Ok(None) + } + + //async fn execute_batch_x_once_ready() {} + async fn schedule_batch_for_execution(&mut self, batch_i: usize, batch_generation: usize, mtx_p: Option<&Mtx>) -> Result<(), Error> { + new_mtx!(mtx, "1:prep", mtx_p, Some(format!("@batch_i:{batch_i} @batch_generation:{batch_generation}"))); + let last_batch_execution_time = self.meta.last_batch_execution_started_time; + + let batch_lock_arc_clone = self.batches.get(batch_i).unwrap().clone(); + let messages_in_sender_clone = self.messages_in_sender.clone(); + + mtx.section("2:wait for the correct time to execute"); + let mtx_proxy = mtx.proxy(); + tokio::spawn(async move { + // todo: fine-tune these settings, as well as scale-up algorithm + const LQ_BATCH_DURATION_MIN: f64 = 100f64; + //const LQ_BATCH_DURATION_MAX: f64 = 100f64; + let batch_end_time = last_batch_execution_time + LQ_BATCH_DURATION_MIN; + let time_till_batch_end = batch_end_time - time_since_epoch_ms(); + tokio::time::sleep(Duration::try_from_secs_f64(time_till_batch_end / 1000f64).unwrap_or(Duration::from_secs(0))).await; + + // NOTE: Multiple instances of this call-path may be executing concurrently; they "race" to be the one that makes it to this upcoming line. + // The first one to make it, sets the batch's "execution_in_progress" to true, so that the other concurrent calls all fail the if-check, and do nothing. + let mut batch = batch_lock_arc_clone.write().await; + if batch.get_generation() == batch_generation && !batch.execution_in_progress { + batch.execution_in_progress = true; + let _ = messages_in_sender_clone.send(LQGroup_InMsg::OnBatchReachedTimeToExecute(batch_i, Some(mtx_proxy))); + } + }); + + Ok(()) + } + + pub(super) async fn execute_batch(&mut self, batch_i: usize, mtx_p: Option<&Mtx>) { + //new_mtx!(mtx, "1:setup", mtx_p, Some(format!("@batch_i:{} @batch_generation:{} @batch_lqi_count:{}", batch_i, batch_generation, batch.query_instances.len())); + new_mtx!(mtx, "1:setup", mtx_p, Some(format!("@batch_i:{batch_i}"))); + //mtx_proxy.section("1:setup"); //, Some(format!("", ))); + + self.meta.last_batch_execution_started_index = batch_i as i64; + self.meta.last_batch_execution_started_time = time_since_epoch_ms(); + //drop(meta); // drop lock on meta prior to executing batch + + let client = self.db_pool.get().await.unwrap(); + //batch.execute(&client, Some(&mtx2)).await.expect("Executing the lq-batch failed!"); + + let batch_lock_arc_clone = self.batches.get(batch_i).unwrap().clone(); + let messages_in_sender_clone = self.messages_in_sender.clone(); + + mtx.section("2:start async part"); + let mut mtx_proxy = mtx.proxy(); + // we can't block the message-loop, so receive results in new thread, then simply send data through message + tokio::spawn(async move { + mtx_proxy.section("1:get write-lock on batch"); + //check_lock_order::<{Lock::LQGroup_batches_meta}, {Lock::LQGroup_batches_x}>(); + let mut batch = batch_lock_arc_clone.write().await; + //if batch.get_generation() == batch_generation { + + mtx_proxy.section("2:execute batch query"); + batch.execute(&client, Some(&mtx_proxy)).await.expect("Executing the lq-batch failed!"); + + //mtx2.section("3:reset batch, and drop batch write-lock"); + mtx_proxy.section("3:read batch results, and send-message about batch being completed"); + let instances_in_batch = batch.mark_generation_end_and_reset().into_iter().map(|a| a.1).collect_vec(); + // ignore send-error; if all receivers dropped, then result doesn't matter anyway + let _ = messages_in_sender_clone.send(LQGroup_InMsg::OnBatchCompleted(batch_i, instances_in_batch)); + + mtx_proxy.section("4:set execution_in_progress to false"); + batch.execution_in_progress = false; + drop(batch); // drop write-lock on batch + drop(mtx_proxy); // drop mtx_proxy explicitly, so timing-data is correct (drop of tokio threads is often delayed) + }); + } + + pub(super) async fn on_batch_completed(&mut self, batch_i: usize, instances_in_batch: Vec>) { + new_mtx!(mtx, "1:get length of instances_in_batch"); + let instances_in_batch_len = instances_in_batch.len(); + let instances_in_batch_lq_keys = instances_in_batch.iter().map(|a| a.lq_key.clone()).collect_vec(); + + mtx.section("5:commit the lqi's in batch to overall group, and update batches-meta"); + //mtx2.section_2("5:commit the lqi's in batch to overall group, and update batches-meta", Some(format!("@open-locks:{}", meta.lqis_committed.get_live_guards_str()))); + { + // Acquire lock on lq_keys_awaiting_population first; this way, code in `get_or_create_lq_instance_in_progressing_batch` knows that as long as it holds a read-lock: + // 1) It is safe to create the receiver that is then used to wait for the batch's execution. (ie. we have not yet broadcast the `LQBatchMessage::NotifyExecutionDone` message) + /*check_lock_order::<{Lock::LQGroup_batches_meta}, {Lock::LQGroup_lqis_awaiting_population}>(); + let mut lq_keys_awaiting_population = self.lqis_awaiting_population.write("execute_batch_x_once_ready").await;*/ + + //let instances_in_batch = batch.query_instances.read().await; + for new_lqi in instances_in_batch.into_iter() { + let key = &new_lqi.lq_key; + let old_lqi = self.meta.lqis_committed.insert(key.to_owned(), new_lqi.clone()); + if let Some(old_lqi) = old_lqi { + // if an old-lqi was replaced, but in fact that "old lqi" was the same lqi... + if Arc::ptr_eq(&new_lqi.clone(), &old_lqi) { + // ...then this "recommit" is just due to a `refresh_lq_data_for_x` call (see function below) and is fine; do nothing + } + // else, the new-lqi is in fact a new instance/allocation (which means we need to merge the old-lqi into the new-lqi, as smoother alternative to erroring/retrying) + else { + // drain the old-watchers vector; this way watchers will only be getting updates from the new-lqi + let old_watchers: Vec<(Uuid, LQEntryWatcher)> = old_lqi.entry_watchers.write().await.drain().collect_vec(); + let old_watchers_count = old_watchers.len(); + + //check_lock_order::<{Lock::LQGroup_batches_meta}, {Lock::LQInstance_last_entries}>(); + let latest_entries = new_lqi.last_entries.read().await.clone(); + + let new_lqi = self.meta.lqis_committed.get(key).ok_or(anyhow!("New-lqi not found!")).unwrap(); + //check_lock_order::<{Lock::LQGroup_batches_meta}, {Lock::LQInstance_entry_watchers}>(); + let mut new_watchers = new_lqi.entry_watchers.write().await; + for (old_stream_id, old_watcher) in old_watchers { + // since lqi that old-watcher was attached to might have had a not-yet-updated entries-set, send each watcher the latest entries-set + old_watcher.new_entries_channel_sender.send(latest_entries.clone()).unwrap(); + + new_watchers.insert(old_stream_id, old_watcher); + } + + // commented; not needed, since a call to this will occur soon back up in the `start_lq_watcher` function (through its call to `get_or_create_watcher`) + //new_lqi.send_self_to_monitor_backend(new_entries, entry_watchers.len()).await; + + error!( + "After batch completed, lq-instance was being committed, but an earlier entry was found; this should never happen. Nonetheless, as defensive programming, attempting to resolve by merging the watchers... {}", + format!("@batch_i:{} @old_count:{} @new_count:{} @final_count:{} @recent_messages:{}", batch_i, /*batch_generation,*/ old_watchers_count, new_watchers.len() - old_watchers_count, new_watchers.len(), self.get_recent_messages_str()) + ); + } + } + } + mtx.current_section.extra_info = Some(format!("@group_lqi_count:{} @batch_lqi_count:{}", self.meta.lqis_committed.len(), instances_in_batch_len)); + + // now that we've "committed" this batch's lqi's to the lq-group, remove their lq-keys from the `lq_keys_awaiting_population` list + for lq_key in instances_in_batch_lq_keys { + if let Some(info) = self.meta.lqis_awaiting_population.shift_remove(&lq_key) { + self.send_message_out(LQGroup_OutMsg::LQInstanceIsInitialized(lq_key, info.lqi, true)).await; + } + } + + self.meta.last_batch_committed_index = batch_i as i64; + //meta.last_batch_buffering_started_index = batch_i as i64; + } + + mtx.section("6:send message notifying of execution being done"); + self.channel_for_batch_messages__sender_base.broadcast(LQBatchMessage::NotifyExecutionDone(batch_i)).await.unwrap(); + } + + /// If the caller function itself is a potential "completer" of the batch, then the `receiver` it provides to this function must be + /// created (using `self.channel_for_batch_messages__sender_base.new_receiver()`) *prior* to sending of the "batch completed" message. + async fn wait_for_batch_to_complete(&self, batch_i: usize, timeout_secs: u64, mut batch_msg_receiver: ABReceiver) -> Result<(), Error> { + loop { + let wait_for_execution_done = async { + loop { + let msg = batch_msg_receiver.recv().await.unwrap(); + match msg { + LQBatchMessage::NotifyExecutionDone(executed_batch_i) => { + if executed_batch_i == batch_i { + return; + } + }, + } + } + }; + match time::timeout(Duration::from_secs(timeout_secs), wait_for_execution_done).await { + // temp: if we timeout after X seconds, having failed to receive the "batch execution done" message, assume we "missed" the batch-execution... + Err(_err) => { + error!("Timed out waiting for confirmation of batch-execution completion. Retrying this request shortly... @table:{} @filter_shape:{}", self.lq_key.table_name, self.lq_key.filter); + // and so pass an error to parent (triggering a retry in a moment) + return Err(anyhow!("timed_out")); + }, + // the "batch execution is done" message was received; break out of the message-reading loop + Ok(_) => return Ok(()), + }; + } + } + + /*pub fn get_sender_for_lq_watcher_drops(&self) -> Sender { + self.source_sender_for_lq_watcher_drops.clone() + }*/ + pub(super) async fn drop_lq_watcher(&mut self, lq_key: &LQKey, stream_id: Uuid) { + new_mtx!(mtx, "1:get query_instances write-lock"); + debug!("Got lq-watcher drop request. @table:{} @filter:{} @stream_id:{}", lq_key.table_name, lq_key.filter, stream_id); + + mtx.section("2:get lq_instance for key, then get lq_instance.entry_watcher write-lock"); + let new_watcher_count = { + let lq_instance = match self.meta.lqis_committed.get_mut(lq_key) { + Some(a) => a, + None => return, // if entry already deleted, just ignore for now [maybe fixed after change to get_or_create_lq_instance?] + }; + //check_lock_order::<{Lock::LQGroup_batches_meta}, {Lock::LQInstance_entry_watchers}>(); + let mut entry_watchers = lq_instance.entry_watchers.write().await; + + mtx.section("3:update entry_watchers, then remove lq_instance (if no watchers), then complete"); + // commented the `.expect`, since was failing occasionally, and I don't have time to debug atm [maybe fixed after change to get_or_create_lq_instance?] + //let _removed_value = entry_watchers.remove(&stream_id).expect(&format!("Trying to drop LQWatcher, but failed, since no entry was found with this key:{}", lq_key)); + entry_watchers.remove(&stream_id); + + // only send update for lqi if we're not about to be deleted + if entry_watchers.len() > 0 { + // todo: try to find a way to provide an up-to-date result_entries without getting a read-lock here (since wasn't necessary before sending-to-backend behavior) + check_lock_order::<{ Lock::LQInstance_entry_watchers }, { Lock::LQInstance_last_entries }>(); + let current_entries = lq_instance.last_entries.read().await.clone(); + lq_instance.send_self_to_monitor_backend(current_entries, entry_watchers.len()).await; + } + + entry_watchers.len() + }; + if new_watcher_count == 0 { + self.meta.lqis_committed.shift_remove(lq_key); + debug!("Watcher count for live-query entry dropped to 0, so removing."); + + //let lq_key = get_lq_instance_key(&self.table_name, &self.filter); + if let Err(err) = MESSAGE_SENDER_TO_MONITOR_BACKEND + .0 + .broadcast(Message_ASToMB::LQInstanceUpdated { + table_name: lq_key.table_name.to_owned(), + filter: serde_json::to_value(lq_key.filter.clone()).unwrap(), + last_entries: vec![], + watchers_count: 0u32, + deleting: true, + }) + .await + { + error!("Errored while broadcasting LQInstanceUpdated message. @error:{}", err); + } + } + + debug!("LQ-watcher drop complete. @watcher_count_for_entry:{} @lq_entry_count:{}", new_watcher_count, self.meta.lqis_committed.len()); + } + + pub(super) async fn notify_of_ld_change(&self, change: &LDChange) { + if self.lq_key.table_name != change.table { + return; + } + + for (_lq_key, lq_instance) in self.meta.lqis_committed.iter() { + lq_instance.on_table_changed(&change, None).await; + } + } +} diff --git a/Packages/app-server/src/store/live_queries_/lq_instance.rs b/Packages/app-server/src/store/live_queries_/lq_instance.rs index 3f5c7faf8..40cfa0334 100644 --- a/Packages/app-server/src/store/live_queries_/lq_instance.rs +++ b/Packages/app-server/src/store/live_queries_/lq_instance.rs @@ -1,243 +1,237 @@ -use std::borrow::Cow; -use std::cell::RefCell; -use std::collections::{HashMap, HashSet}; -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::rc::Rc; -use std::str::FromStr; -use std::sync::Arc; -use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; -use rust_shared::async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; -use rust_shared::async_graphql::{Schema, MergedObject, MergedSubscription, ObjectType, Data, Result, SubscriptionType}; -use axum::http::Method; +use axum::extract::ws::{CloseFrame, Message}; +use axum::extract::{FromRequest, WebSocketUpgrade}; use axum::http::header::CONTENT_TYPE; +use axum::http::Method; +use axum::http::{self, Request, Response, StatusCode}; use axum::response::{self, IntoResponse}; -use axum::routing::{get, post, MethodFilter, on_service}; +use axum::routing::{get, on_service, post, MethodFilter}; +use axum::Error; use axum::{extract, Router}; -use rust_shared::flume::{Sender, Receiver, unbounded, self}; +use futures_util::future::{BoxFuture, Ready}; +use futures_util::stream::{SplitSink, SplitStream}; +use futures_util::{future, FutureExt, Sink, SinkExt, Stream, StreamExt}; +use rust_shared::async_graphql::futures_util::task::{Context, Poll}; +use rust_shared::async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; +use rust_shared::async_graphql::http::{WebSocketProtocols, WsMessage, ALL_WEBSOCKET_PROTOCOLS}; +use rust_shared::async_graphql::{Data, MergedObject, MergedSubscription, ObjectType, Result, Schema, SubscriptionType}; +use rust_shared::flume::{self, unbounded, Receiver, Sender}; use rust_shared::links::app_server_to_monitor_backend::Message_ASToMB; use rust_shared::serde::de::DeserializeOwned; use rust_shared::serde::{Deserialize, Serialize}; -use rust_shared::serde_json::{json, Map, self}; +use rust_shared::serde_json::{self, json, Map}; use rust_shared::tokio::sync::{mpsc, Mutex, RwLock}; use rust_shared::tokio_postgres::{Client, Row}; use rust_shared::utils::mtx::mtx::Mtx; use rust_shared::utils::type_aliases::RowData; -use rust_shared::{futures, axum, tower, tower_http, Lock, check_lock_order, Assert, IsTrue, lock_as_usize_LQInstance_last_entries, new_mtx}; +use rust_shared::uuid::Uuid; +use rust_shared::{axum, check_lock_order, futures, lock_as_usize_LQInstance_last_entries, new_mtx, tower, tower_http, Assert, IsTrue, Lock}; +use std::borrow::Cow; +use std::cell::RefCell; +use std::collections::{HashMap, HashSet}; +use std::convert::Infallible; +use std::future::Future; +use std::pin::Pin; +use std::rc::Rc; +use std::str::FromStr; +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; +use std::sync::Arc; use tower::Service; -use tower_http::cors::{CorsLayer}; -use rust_shared::async_graphql::futures_util::task::{Context, Poll}; -use rust_shared::async_graphql::http::{WebSocketProtocols, WsMessage, ALL_WEBSOCKET_PROTOCOLS}; -use axum::extract::ws::{CloseFrame, Message}; -use axum::extract::{FromRequest, WebSocketUpgrade}; -use axum::http::{self, Request, Response, StatusCode}; -use axum::Error; -use futures_util::future::{BoxFuture, Ready}; -use futures_util::stream::{SplitSink, SplitStream}; -use futures_util::{future, Sink, SinkExt, Stream, StreamExt, FutureExt}; +use tower_http::cors::CorsLayer; use tracing::error; -use rust_shared::uuid::Uuid; use crate::links::monitor_backend_link::MESSAGE_SENDER_TO_MONITOR_BACKEND; use crate::utils::db::filter::{entry_matches_filter, QueryFilter}; -use crate::utils::db::pg_stream_parsing::{LDChange}; -use crate::utils::db::queries::{get_entries_in_collection}; +use crate::utils::db::pg_stream_parsing::LDChange; +use crate::utils::db::queries::get_entries_in_collection; use crate::utils::general::general::rw_locked_hashmap__get_entry_or_insert_with; use super::lq_key::LQKey; #[derive(Debug, Clone)] pub struct LQEntryWatcher { - pub new_entries_channel_sender: Sender>, - pub new_entries_channel_receiver: Receiver>, + pub new_entries_channel_sender: Sender>, + pub new_entries_channel_receiver: Receiver>, } impl LQEntryWatcher { - pub fn new() -> Self { - let (s1, r1): (Sender>, Receiver>) = flume::unbounded(); - Self { - new_entries_channel_sender: s1, - new_entries_channel_receiver: r1, - } - } + pub fn new() -> Self { + let (s1, r1): (Sender>, Receiver>) = flume::unbounded(); + Self { new_entries_channel_sender: s1, new_entries_channel_receiver: r1 } + } } /// Holds the data related to a specific query (ie. collection-name + filter). #[derive(Debug)] pub struct LQInstance { - pub lq_key: LQKey, - pub last_entries: RwLock>, - pub last_entries_set_count: AtomicU64, - pub entry_watchers: RwLock>, + pub lq_key: LQKey, + pub last_entries: RwLock>, + pub last_entries_set_count: AtomicU64, + pub entry_watchers: RwLock>, } impl LQInstance { - pub fn new(lq_key: LQKey, initial_entries: Vec) -> Self { - Self { - lq_key, - last_entries: RwLock::new(initial_entries), - last_entries_set_count: AtomicU64::new(0), - entry_watchers: RwLock::new(HashMap::new()), - } - } + pub fn new(lq_key: LQKey, initial_entries: Vec) -> Self { + Self { lq_key, last_entries: RwLock::new(initial_entries), last_entries_set_count: AtomicU64::new(0), entry_watchers: RwLock::new(HashMap::new()) } + } + + pub async fn send_self_to_monitor_backend(&self, entries: Vec, watcher_count: usize) { + //let lq_key = get_lq_instance_key(&self.table_name, &self.filter); + let message = self.get_lq_instance_updated_message(entries, watcher_count); + if let Err(err) = MESSAGE_SENDER_TO_MONITOR_BACKEND.0.broadcast(message).await { + error!("Errored while broadcasting LQInstanceUpdated message. @error:{}", err); + } + } + pub fn get_lq_instance_updated_message(&self, entries: Vec, watcher_count: usize) -> Message_ASToMB { + Message_ASToMB::LQInstanceUpdated { + table_name: self.lq_key.table_name.clone(), + filter: serde_json::to_value(self.lq_key.filter.clone()).unwrap(), + last_entries: entries, + watchers_count: watcher_count as u32, + deleting: false, // deletion event is sent from drop_lq_watcher func in lq_group.rs + } + } - pub async fn send_self_to_monitor_backend(&self, entries: Vec, watcher_count: usize) { - //let lq_key = get_lq_instance_key(&self.table_name, &self.filter); - let message = self.get_lq_instance_updated_message(entries, watcher_count); - if let Err(err) = MESSAGE_SENDER_TO_MONITOR_BACKEND.0.broadcast(message).await { - error!("Errored while broadcasting LQInstanceUpdated message. @error:{}", err); - } - } - pub fn get_lq_instance_updated_message(&self, entries: Vec, watcher_count: usize) -> Message_ASToMB { - Message_ASToMB::LQInstanceUpdated { - table_name: self.lq_key.table_name.clone(), - filter: serde_json::to_value(self.lq_key.filter.clone()).unwrap(), - last_entries: entries, - watchers_count: watcher_count as u32, - deleting: false, // deletion event is sent from drop_lq_watcher func in lq_group.rs - } - } + pub async fn get_or_create_watcher(&self, stream_id: Uuid, current_entries: Vec) -> (LQEntryWatcher, bool, usize) { + /*let entry_watchers = self.entry_watchers.write().await; + let create_new = !self.entry_watchers.contains_key(&stream_id); + let watcher = self.entry_watchers.entry(stream_id).or_insert_with(LQEntryWatcher::new); + (watcher, create_new)*/ + let (watcher, just_created, new_count) = rw_locked_hashmap__get_entry_or_insert_with(&self.entry_watchers, stream_id, LQEntryWatcher::new).await; + self.send_self_to_monitor_backend(current_entries, new_count).await; + (watcher, just_created, new_count) + } + /*pub fn get_or_create_watcher(&mut self, stream_id: Uuid) -> (&LQEntryWatcher, usize) { + let watcher = self.entry_watchers.entry(stream_id).or_insert(LQEntryWatcher::new()); + (&watcher, self.entry_watchers.len()) + /*if self.entry_watchers.contains_key(&stream_id) { + return (self.entry_watchers.get(&stream_id).unwrap(), self.entry_watchers.len()); + } + let watcher = LQEntryWatcher::new(); + self.entry_watchers.insert(stream_id, watcher); + (&watcher, self.entry_watchers.len())*/ + }*/ - pub async fn get_or_create_watcher(&self, stream_id: Uuid, current_entries: Vec) -> (LQEntryWatcher, bool, usize) { - /*let entry_watchers = self.entry_watchers.write().await; - let create_new = !self.entry_watchers.contains_key(&stream_id); - let watcher = self.entry_watchers.entry(stream_id).or_insert_with(LQEntryWatcher::new); - (watcher, create_new)*/ - let (watcher, just_created, new_count) = rw_locked_hashmap__get_entry_or_insert_with(&self.entry_watchers, stream_id, LQEntryWatcher::new).await; - self.send_self_to_monitor_backend(current_entries, new_count).await; - (watcher, just_created, new_count) + pub async fn on_table_changed(&self, change: &LDChange, mtx_p: Option<&Mtx>) { + new_mtx!(mtx, "1:get last_entries read-lock, clone, then drop lock", mtx_p); + let mut new_entries = self.last_entries.read().await.clone(); - } - /*pub fn get_or_create_watcher(&mut self, stream_id: Uuid) -> (&LQEntryWatcher, usize) { - let watcher = self.entry_watchers.entry(stream_id).or_insert(LQEntryWatcher::new()); - (&watcher, self.entry_watchers.len()) - /*if self.entry_watchers.contains_key(&stream_id) { - return (self.entry_watchers.get(&stream_id).unwrap(), self.entry_watchers.len()); - } - let watcher = LQEntryWatcher::new(); - self.entry_watchers.insert(stream_id, watcher); - (&watcher, self.entry_watchers.len())*/ - }*/ + mtx.section("2:calculate new_entries"); + let mut our_data_changed = false; + match change.kind.as_str() { + "insert" => { + let new_entry = change.new_data_as_map().unwrap(); + let filter_check_result = entry_matches_filter(&new_entry, &self.lq_key.filter).expect(&format!("Failed to execute filter match-check on new database entry. @table:{} @filter:{:?}", self.lq_key.table_name, self.lq_key.filter)); + if filter_check_result { + new_entries.push(new_entry); + our_data_changed = true; + } + }, + "update" => { + let new_data = change.new_data_as_map().unwrap(); + // find entry (ie. row/doc) with the given id, in new_entries (ie. the new set of values that will be pushed to clients for this query) + let entry_index = new_entries.iter_mut().position(|a| a["id"].as_str() == new_data["id"].as_str()); + match entry_index { + Some(entry_index) => { + // update the target entry's data to reflect the current change + let entry = new_entries.get_mut(entry_index).unwrap(); + for key in new_data.keys() { + entry.insert(key.to_owned(), new_data[key].clone()); + our_data_changed = true; + } + // check if the entry still matches the query's filter (if not, remove the entry from the query's results) + let filter_check_result = entry_matches_filter(entry, &self.lq_key.filter).expect(&format!("Failed to execute filter match-check on updated database entry. @table:{} @filter:{:?}", self.lq_key.table_name, self.lq_key.filter)); + if !filter_check_result { + new_entries.remove(entry_index); + our_data_changed = true; + } + }, + None => { + // if the modified entry wasn't part of the result-set, it must not have matched the filter before the update; but check if it matches now + let filter_check_result = entry_matches_filter(&new_data, &self.lq_key.filter).expect(&format!("Failed to execute filter match-check on updated database entry. @table:{} @filter:{:?}", self.lq_key.table_name, self.lq_key.filter)); + if filter_check_result { + new_entries.push(new_data); + our_data_changed = true; + } + }, + }; + }, + "delete" => { + let id = change.get_row_id(); + let entry_index = new_entries.iter().position(|a| a["id"].as_str().unwrap() == id); + match entry_index { + Some(entry_index) => { + new_entries.remove(entry_index); + our_data_changed = true; + }, + None => {}, + }; + }, + _ => { + // ignore any other types of change (no need to even tell the watchers about it) + return; + }, + }; + if !our_data_changed { + return; + } - pub async fn on_table_changed(&self, change: &LDChange, mtx_p: Option<&Mtx>) { - new_mtx!(mtx, "1:get last_entries read-lock, clone, then drop lock", mtx_p); - let mut new_entries = self.last_entries.read().await.clone(); + new_entries.sort_by_key(|a| a["id"].as_str().unwrap().to_owned()); // sort entries by id, so there is a consistent ordering - mtx.section("2:calculate new_entries"); - let mut our_data_changed = false; - match change.kind.as_str() { - "insert" => { - let new_entry = change.new_data_as_map().unwrap(); - let filter_check_result = entry_matches_filter(&new_entry, &self.lq_key.filter) - .expect(&format!("Failed to execute filter match-check on new database entry. @table:{} @filter:{:?}", self.lq_key.table_name, self.lq_key.filter)); - if filter_check_result { - new_entries.push(new_entry); - our_data_changed = true; - } - }, - "update" => { - let new_data = change.new_data_as_map().unwrap(); - // find entry (ie. row/doc) with the given id, in new_entries (ie. the new set of values that will be pushed to clients for this query) - let entry_index = new_entries.iter_mut().position(|a| a["id"].as_str() == new_data["id"].as_str()); - match entry_index { - Some(entry_index) => { - // update the target entry's data to reflect the current change - let entry = new_entries.get_mut(entry_index).unwrap(); - for key in new_data.keys() { - entry.insert(key.to_owned(), new_data[key].clone()); - our_data_changed = true; - } - // check if the entry still matches the query's filter (if not, remove the entry from the query's results) - let filter_check_result = entry_matches_filter(entry, &self.lq_key.filter) - .expect(&format!("Failed to execute filter match-check on updated database entry. @table:{} @filter:{:?}", self.lq_key.table_name, self.lq_key.filter)); - if !filter_check_result { - new_entries.remove(entry_index); - our_data_changed = true; - } - }, - None => { - // if the modified entry wasn't part of the result-set, it must not have matched the filter before the update; but check if it matches now - let filter_check_result = entry_matches_filter(&new_data, &self.lq_key.filter) - .expect(&format!("Failed to execute filter match-check on updated database entry. @table:{} @filter:{:?}", self.lq_key.table_name, self.lq_key.filter)); - if filter_check_result { - new_entries.push(new_data); - our_data_changed = true; - } - }, - }; - }, - "delete" => { - let id = change.get_row_id(); - let entry_index = new_entries.iter().position(|a| a["id"].as_str().unwrap() == id); - match entry_index { - Some(entry_index) => { - new_entries.remove(entry_index); - our_data_changed = true; - }, - None => {}, - }; - }, - _ => { - // ignore any other types of change (no need to even tell the watchers about it) - return; - }, - }; - if !our_data_changed { return; } + mtx.section("3:get entry_watchers read-lock, then notify each watcher of new_entries"); + let entry_watchers = self.entry_watchers.read().await; + for (_watcher_stream_id, watcher) in entry_watchers.iter() { + watcher.new_entries_channel_sender.send(new_entries.clone()).unwrap(); + } - new_entries.sort_by_key(|a| a["id"].as_str().unwrap().to_owned()); // sort entries by id, so there is a consistent ordering - - mtx.section("3:get entry_watchers read-lock, then notify each watcher of new_entries"); - let entry_watchers = self.entry_watchers.read().await; - for (_watcher_stream_id, watcher) in entry_watchers.iter() { - watcher.new_entries_channel_sender.send(new_entries.clone()).unwrap(); - } + mtx.section("4:update the last_entries list"); + self.set_last_entries::<{ Lock::LQInstance_entry_watchers }>(new_entries.clone()).await; - mtx.section("4:update the last_entries list"); - self.set_last_entries::<{Lock::LQInstance_entry_watchers}>(new_entries.clone()).await; + self.send_self_to_monitor_backend(new_entries, entry_watchers.len()).await; + } - self.send_self_to_monitor_backend(new_entries, entry_watchers.len()).await; - } + pub async fn set_last_entries(&self, mut new_entries: Vec) + where + Assert<{ (PRIOR_LOCK as usize) < lock_as_usize_LQInstance_last_entries!() }>: IsTrue, + { + //check_lock_order_usize::<{PRIOR_LOCK as usize}, {Lock::LQInstance_last_entries as usize}>(); + let mut last_entries = self.last_entries.write().await; + last_entries.drain(..); + last_entries.append(&mut new_entries); + self.last_entries_set_count.fetch_add(1, Ordering::SeqCst); + } - pub async fn set_last_entries(&self, mut new_entries: Vec) - where Assert::<{(PRIOR_LOCK as usize) < lock_as_usize_LQInstance_last_entries!()}>: IsTrue - { - //check_lock_order_usize::<{PRIOR_LOCK as usize}, {Lock::LQInstance_last_entries as usize}>(); - let mut last_entries = self.last_entries.write().await; - last_entries.drain(..); - last_entries.append(&mut new_entries); - self.last_entries_set_count.fetch_add(1, Ordering::SeqCst); - } + pub async fn get_last_entry_with_id(&self, entry_id: &str) -> Option { + let last_entries = self.last_entries.read().await; + last_entries + .iter() + .find(|entry2| { + let entry2_id = entry2.get("id").and_then(|a| a.as_str()).map_or("", |a| a); + entry2_id == entry_id + }) + .cloned() + } - pub async fn get_last_entry_with_id(&self, entry_id: &str) -> Option { - let last_entries = self.last_entries.read().await; - last_entries.iter().find(|entry2| { - let entry2_id = entry2.get("id").and_then(|a| a.as_str()).map_or("", |a| a); - entry2_id == entry_id - }).cloned() - } - - /*pub async fn await_next_entries(&mut self, stream_id: Uuid) -> Vec { - let watcher = self.get_or_create_watcher(stream_id); - let new_result = watcher.new_entries_channel_receiver.recv_async().await.unwrap(); - new_result - }*/ + /*pub async fn await_next_entries(&mut self, stream_id: Uuid) -> Vec { + let watcher = self.get_or_create_watcher(stream_id); + let new_result = watcher.new_entries_channel_receiver.recv_async().await.unwrap(); + new_result + }*/ } /*impl Drop for LQInstance { - fn drop(&mut self) { - let table_name = self.table_name.to_owned(); - let filter = self.filter.clone(); - // there might be an issue here where this async-chain ends up broadcasting later than it should, causing it to "overwrite" some "later" event - // todo: fix this possible issue (perhaps by storing timestamp here, then canceling broadcast if another broadcast occurs before our actual broadcast) - tokio::spawn(async move { - //let lq_key = get_lq_instance_key(&self.table_name, &self.filter); - if let Err(err) = MESSAGE_SENDER_TO_MONITOR_BACKEND.0.broadcast(Message_ASToMB::LQInstanceUpdated { - table_name, - filter: serde_json::to_value(filter).unwrap(), - last_entries: vec![], - watchers_count: 0u32, - deleting: true, - }).await { - error!("Errored while broadcasting LQInstanceUpdated message. @error:{}", err); - } - }); - } -}*/ \ No newline at end of file + fn drop(&mut self) { + let table_name = self.table_name.to_owned(); + let filter = self.filter.clone(); + // there might be an issue here where this async-chain ends up broadcasting later than it should, causing it to "overwrite" some "later" event + // todo: fix this possible issue (perhaps by storing timestamp here, then canceling broadcast if another broadcast occurs before our actual broadcast) + tokio::spawn(async move { + //let lq_key = get_lq_instance_key(&self.table_name, &self.filter); + if let Err(err) = MESSAGE_SENDER_TO_MONITOR_BACKEND.0.broadcast(Message_ASToMB::LQInstanceUpdated { + table_name, + filter: serde_json::to_value(filter).unwrap(), + last_entries: vec![], + watchers_count: 0u32, + deleting: true, + }).await { + error!("Errored while broadcasting LQInstanceUpdated message. @error:{}", err); + } + }); + } +}*/ diff --git a/Packages/app-server/src/store/live_queries_/lq_key.rs b/Packages/app-server/src/store/live_queries_/lq_key.rs index 8c87144fa..973fd93f6 100644 --- a/Packages/app-server/src/store/live_queries_/lq_key.rs +++ b/Packages/app-server/src/store/live_queries_/lq_key.rs @@ -1,25 +1,36 @@ -use std::{cmp::Ordering}; +use std::cmp::Ordering; -use rust_shared::{serde_json::{self, json}, anyhow::ensure, utils::type_aliases::JSONValue, anyhow::Error, itertools::Itertools}; -use serde::{Deserialize, Serialize, Serializer, Deserializer}; +use rust_shared::{ + anyhow::ensure, + anyhow::Error, + itertools::Itertools, + serde_json::{self, json}, + utils::type_aliases::JSONValue, +}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use crate::utils::db::filter::{QueryFilter, FilterOp}; +use crate::utils::db::filter::{FilterOp, QueryFilter}; pub fn filter_shape_from_filter(filter: &QueryFilter) -> QueryFilter { - let mut filter_shape = filter.clone(); - for (field_name, field_filter) in filter_shape.field_filters.clone().iter() { - let field_filter_mut = filter_shape.field_filters.get_mut(field_name).unwrap(); - field_filter_mut.filter_ops = field_filter.filter_ops.clone().iter().map(|op| { - let op_with_vals_stripped = match op { - FilterOp::EqualsX(_val) => FilterOp::EqualsX(JSONValue::Null), - FilterOp::IsWithinX(vals) => FilterOp::IsWithinX(vals.iter().map(|_| JSONValue::Null).collect_vec()), - FilterOp::ContainsAllOfX(vals) => FilterOp::ContainsAllOfX(vals.iter().map(|_| JSONValue::Null).collect_vec()), - FilterOp::ContainsAnyOfX(vals) => FilterOp::ContainsAnyOfX(vals.iter().map(|_| JSONValue::Null).collect_vec()), - }; - op_with_vals_stripped - }).collect_vec(); - } - filter_shape + let mut filter_shape = filter.clone(); + for (field_name, field_filter) in filter_shape.field_filters.clone().iter() { + let field_filter_mut = filter_shape.field_filters.get_mut(field_name).unwrap(); + field_filter_mut.filter_ops = field_filter + .filter_ops + .clone() + .iter() + .map(|op| { + let op_with_vals_stripped = match op { + FilterOp::EqualsX(_val) => FilterOp::EqualsX(JSONValue::Null), + FilterOp::IsWithinX(vals) => FilterOp::IsWithinX(vals.iter().map(|_| JSONValue::Null).collect_vec()), + FilterOp::ContainsAllOfX(vals) => FilterOp::ContainsAllOfX(vals.iter().map(|_| JSONValue::Null).collect_vec()), + FilterOp::ContainsAnyOfX(vals) => FilterOp::ContainsAnyOfX(vals.iter().map(|_| JSONValue::Null).collect_vec()), + }; + op_with_vals_stripped + }) + .collect_vec(); + } + filter_shape } /// A "live query key" is the "signature" of a live-query group or instance. @@ -27,76 +38,94 @@ pub fn filter_shape_from_filter(filter: &QueryFilter) -> QueryFilter { /// When used for an instance, it represents the specific filter used in the instance. (eg. `{table:"maps",filter:{id:{equalTo:"SOME_MAP_ID_HERE"}}}`) #[derive(Clone)] pub struct LQKey { - pub table_name: String, - pub filter: QueryFilter, - // cached json-representation of the key's data (for easy use in hashmaps) - pub _str: String, + pub table_name: String, + pub filter: QueryFilter, + // cached json-representation of the key's data (for easy use in hashmaps) + pub _str: String, } impl LQKey { - pub fn new(table_name: String, filter: QueryFilter) -> LQKey { - let data = LQKeyData { table_name, filter }; - let _str = serde_json::to_string(&data).unwrap(); - LQKey { table_name: data.table_name, filter: data.filter, _str } - } - pub fn new_for_lqi(table_name: String, filter: QueryFilter) -> LQKey { - Self::new(table_name, filter) - } - pub fn new_for_lq_group(table_name: String, filter_or_filter_shape: QueryFilter) -> LQKey { - let filter_shape = filter_shape_from_filter(&filter_or_filter_shape); - Self::new(table_name, filter_shape) - } - pub fn new_for_lq_group_strict(table_name: String, filter_shape: QueryFilter) -> Result { - filter_shape.ensure_shape_only()?; - Ok(Self::new(table_name, filter_shape)) - } + pub fn new(table_name: String, filter: QueryFilter) -> LQKey { + let data = LQKeyData { table_name, filter }; + let _str = serde_json::to_string(&data).unwrap(); + LQKey { table_name: data.table_name, filter: data.filter, _str } + } + pub fn new_for_lqi(table_name: String, filter: QueryFilter) -> LQKey { + Self::new(table_name, filter) + } + pub fn new_for_lq_group(table_name: String, filter_or_filter_shape: QueryFilter) -> LQKey { + let filter_shape = filter_shape_from_filter(&filter_or_filter_shape); + Self::new(table_name, filter_shape) + } + pub fn new_for_lq_group_strict(table_name: String, filter_shape: QueryFilter) -> Result { + filter_shape.ensure_shape_only()?; + Ok(Self::new(table_name, filter_shape)) + } - pub fn as_shape_only(&self) -> LQKey { - let filter_shape = filter_shape_from_filter(&self.filter); - LQKey::new(self.table_name.clone(), filter_shape) - } + pub fn as_shape_only(&self) -> LQKey { + let filter_shape = filter_shape_from_filter(&self.filter); + LQKey::new(self.table_name.clone(), filter_shape) + } - /*pub fn table_name(&self) -> &str { &self.data.table_name } - pub fn filter(&self) -> &QueryFilter { &self.data.filter }*/ + /*pub fn table_name(&self) -> &str { &self.data.table_name } + pub fn filter(&self) -> &QueryFilter { &self.data.filter }*/ } impl Serialize for LQKey { - fn serialize(&self, serializer: S) -> Result where S: Serializer { - LQKeyData { table_name: self.table_name.clone(), filter: self.filter.clone() }.serialize(serializer) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + LQKeyData { table_name: self.table_name.clone(), filter: self.filter.clone() }.serialize(serializer) + } } impl<'de> Deserialize<'de> for LQKey { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { - let data = LQKeyData::deserialize(deserializer)?; - Ok(LQKey::new(data.table_name, data.filter)) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data = LQKeyData::deserialize(deserializer)?; + Ok(LQKey::new(data.table_name, data.filter)) + } } // private struct used for making serialization/deserialization easier (through use of derive macros) #[derive(Clone, Serialize, Deserialize)] struct LQKeyData { - table_name: String, - filter: QueryFilter, + table_name: String, + filter: QueryFilter, } // pass-through traits // ========== impl std::fmt::Display for LQKey { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { std::fmt::Display::fmt(&self._str, f) } + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + std::fmt::Display::fmt(&self._str, f) + } } impl std::fmt::Debug for LQKey { - // Is this good? Or should it return debug-string for LQKeyData struct instead? - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { std::fmt::Debug::fmt(&self._str, f) } + // Is this good? Or should it return debug-string for LQKeyData struct instead? + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + std::fmt::Debug::fmt(&self._str, f) + } } impl Eq for LQKey {} impl PartialEq for LQKey { - fn eq(&self, other: &LQKey) -> bool { self._str.eq(&other._str) } + fn eq(&self, other: &LQKey) -> bool { + self._str.eq(&other._str) + } } impl Ord for LQKey { - fn cmp(&self, other: &LQKey) -> Ordering { self._str.cmp(&other._str) } + fn cmp(&self, other: &LQKey) -> Ordering { + self._str.cmp(&other._str) + } } impl PartialOrd for LQKey { - fn partial_cmp(&self, other: &LQKey) -> Option { self._str.partial_cmp(&other._str) } + fn partial_cmp(&self, other: &LQKey) -> Option { + self._str.partial_cmp(&other._str) + } } impl std::hash::Hash for LQKey { - fn hash(&self, state: &mut H) { self._str.hash(state); } -} \ No newline at end of file + fn hash(&self, state: &mut H) { + self._str.hash(state); + } +} diff --git a/Packages/app-server/src/store/live_queries_/lq_param.rs b/Packages/app-server/src/store/live_queries_/lq_param.rs index e111c0ede..3ca32225e 100644 --- a/Packages/app-server/src/store/live_queries_/lq_param.rs +++ b/Packages/app-server/src/store/live_queries_/lq_param.rs @@ -1,79 +1,79 @@ -use std::{fmt::Display, sync::atomic::AtomicI32, iter::{once, Once, empty}}; -use rust_shared::anyhow::{anyhow, bail, Context, Error, ensure}; -use rust_shared::itertools::{Itertools, chain}; -use rust_shared::regex::{Regex, Captures}; +use crate::utils::{ + db::{ + filter::FilterOp, + sql_fragment::{SQLFragment, SF}, + sql_ident::SQLIdent, + sql_param::SQLParam, + }, + general::general::match_cond_to_iter, +}; +use rust_shared::anyhow::{anyhow, bail, ensure, Context, Error}; +use rust_shared::itertools::{chain, Itertools}; +use rust_shared::regex::{Captures, Regex}; use rust_shared::serde_json::Map; use rust_shared::tokio_postgres::types::ToSql; -use crate::{utils::{db::{filter::FilterOp, sql_param::{SQLParam}, sql_fragment::{SQLFragment, SF}, sql_ident::SQLIdent}, general::{general::match_cond_to_iter}}}; +use std::{ + fmt::Display, + iter::{empty, once, Once}, + sync::atomic::AtomicI32, +}; use super::lq_instance::LQInstance; pub enum LQParam { - LQIndex(usize), - /// Tuple meaning: (field_name, index_of_filter_op_for_field, filter_op) - FilterOpValue(String, usize, FilterOp), + LQIndex(usize), + /// Tuple meaning: (field_name, index_of_filter_op_for_field, filter_op) + FilterOpValue(String, usize, FilterOp), } impl LQParam { - /*pub fn new(name: String, source_field__name: Option, source_field__param_index: Option) -> Result { - if let Some(field_name) = source_field__name { - ensure!(name != "lq_index", "Invalid name for param, for field \"{field_name}\"; the identifier \"sql_index\" is reserved!"); - } - Ok(Self { name, source_field__name, source_field__param_index }) - }*/ - pub fn name(&self) -> String { - match self { - LQParam::LQIndex(_) => "lq_index".to_owned(), - LQParam::FilterOpValue(field_name, op_index, _) => { - format!("{}_{}", field_name, op_index) - } - } - } + /*pub fn new(name: String, source_field__name: Option, source_field__param_index: Option) -> Result { + if let Some(field_name) = source_field__name { + ensure!(name != "lq_index", "Invalid name for param, for field \"{field_name}\"; the identifier \"sql_index\" is reserved!"); + } + Ok(Self { name, source_field__name, source_field__param_index }) + }*/ + pub fn name(&self) -> String { + match self { + LQParam::LQIndex(_) => "lq_index".to_owned(), + LQParam::FilterOpValue(field_name, op_index, _) => { + format!("{}_{}", field_name, op_index) + }, + } + } - pub fn instantiate_param_using_lq_instance_data(&self, lq_index: usize, lq_instance: &LQInstance) -> Result { - let proto = self; - match proto { - LQParam::LQIndex(_) => Ok(LQParam::LQIndex(lq_index)), - LQParam::FilterOpValue(field_name, op_i, _) => { - let field_filter_for_lq_instance = lq_instance.lq_key.filter.field_filters.get(field_name) - .ok_or(anyhow!("LQ-instance had no filter-value for field \"{field_name}\"."))?; - let filter_op = field_filter_for_lq_instance.filter_ops.get(*op_i) - .ok_or(anyhow!("Field-filter had no filter-op with index \"{op_i}\". @path:{}/{} @lq_instance_filter:{}", field_name, op_i, lq_instance.lq_key.filter))?; - Ok(LQParam::FilterOpValue(field_name.to_owned(), *op_i, filter_op.clone())) - } - } - } + pub fn instantiate_param_using_lq_instance_data(&self, lq_index: usize, lq_instance: &LQInstance) -> Result { + let proto = self; + match proto { + LQParam::LQIndex(_) => Ok(LQParam::LQIndex(lq_index)), + LQParam::FilterOpValue(field_name, op_i, _) => { + let field_filter_for_lq_instance = lq_instance.lq_key.filter.field_filters.get(field_name).ok_or(anyhow!("LQ-instance had no filter-value for field \"{field_name}\"."))?; + let filter_op = field_filter_for_lq_instance.filter_ops.get(*op_i).ok_or(anyhow!("Field-filter had no filter-op with index \"{op_i}\". @path:{}/{} @lq_instance_filter:{}", field_name, op_i, lq_instance.lq_key.filter))?; + Ok(LQParam::FilterOpValue(field_name.to_owned(), *op_i, filter_op.clone())) + }, + } + } - pub fn get_sql_for_value(&self) -> Result { - match self { - LQParam::LQIndex(lq_index) => { - //SQLParam::Value_Float(f64::try_from(*lq_index)?).into_value_fragment() // this doesn't work fsr - //SQLParam::Value_Float(*lq_index as f64).into_value_fragment() - //SQLParam::Value_Int(*lq_index as i64).into_value_fragment() - Ok(SQLFragment::value(*lq_index as i64)) - }, - LQParam::FilterOpValue(_, _, op) => { - op.get_sql_for_value() - } - } - } + pub fn get_sql_for_value(&self) -> Result { + match self { + LQParam::LQIndex(lq_index) => { + //SQLParam::Value_Float(f64::try_from(*lq_index)?).into_value_fragment() // this doesn't work fsr + //SQLParam::Value_Float(*lq_index as f64).into_value_fragment() + //SQLParam::Value_Int(*lq_index as i64).into_value_fragment() + Ok(SQLFragment::value(*lq_index as i64)) + }, + LQParam::FilterOpValue(_, _, op) => op.get_sql_for_value(), + } + } - pub fn get_sql_for_application(&self, left_container_name: &str, right_container_name: &str) -> Result { - match self { - LQParam::LQIndex(..) => bail!("Invalid call to get_sql_for_application, for an LQParam::LQIndex."), - LQParam::FilterOpValue(field_name, _, op) => { - let field_name_fragment = SF::merge(vec![ - SF::ident(SQLIdent::new(left_container_name.to_owned())?), - SF::lit("."), - SF::ident(SQLIdent::new(field_name.to_owned())?), - ]); - let lq_param_name_fragment = SF::merge(vec![ - SF::ident(SQLIdent::new(right_container_name.to_owned())?), - SF::lit("."), - SF::ident(SQLIdent::new(self.name())?), - ]); - - Ok(op.get_sql_for_application(field_name_fragment, lq_param_name_fragment)) - } - } - } -} \ No newline at end of file + pub fn get_sql_for_application(&self, left_container_name: &str, right_container_name: &str) -> Result { + match self { + LQParam::LQIndex(..) => bail!("Invalid call to get_sql_for_application, for an LQParam::LQIndex."), + LQParam::FilterOpValue(field_name, _, op) => { + let field_name_fragment = SF::merge(vec![SF::ident(SQLIdent::new(left_container_name.to_owned())?), SF::lit("."), SF::ident(SQLIdent::new(field_name.to_owned())?)]); + let lq_param_name_fragment = SF::merge(vec![SF::ident(SQLIdent::new(right_container_name.to_owned())?), SF::lit("."), SF::ident(SQLIdent::new(self.name())?)]); + + Ok(op.get_sql_for_application(field_name_fragment, lq_param_name_fragment)) + }, + } + } +} diff --git a/Packages/app-server/src/store/mod.rs b/Packages/app-server/src/store/mod.rs index c7f13955c..1d94569c8 100644 --- a/Packages/app-server/src/store/mod.rs +++ b/Packages/app-server/src/store/mod.rs @@ -1,15 +1,15 @@ -pub mod storage; pub mod live_queries; +pub mod storage; pub mod live_queries_ { - pub mod lq_key; - pub mod lq_group { - pub mod lq_batch { - pub mod lq_batch; - pub mod sql_generator; - } - pub mod lq_group; - pub mod lq_group_impl; - } - pub mod lq_instance; - pub mod lq_param; -} \ No newline at end of file + pub mod lq_key; + pub mod lq_group { + pub mod lq_batch { + pub mod lq_batch; + pub mod sql_generator; + } + pub mod lq_group; + pub mod lq_group_impl; + } + pub mod lq_instance; + pub mod lq_param; +} diff --git a/Packages/app-server/src/store/storage.rs b/Packages/app-server/src/store/storage.rs index 6d78f6432..9bc2a8844 100644 --- a/Packages/app-server/src/store/storage.rs +++ b/Packages/app-server/src/store/storage.rs @@ -1,3 +1,28 @@ +use axum::extract::ws::{CloseFrame, Message}; +use axum::extract::{FromRequest, WebSocketUpgrade}; +use axum::http::header::CONTENT_TYPE; +use axum::http::Method; +use axum::http::{self, Request, Response, StatusCode}; +use axum::response::{self, IntoResponse}; +use axum::routing::{get, on_service, post, MethodFilter}; +use axum::Error; +use axum::{extract, Router}; +use futures_util::future::{BoxFuture, Ready}; +use futures_util::stream::{SplitSink, SplitStream}; +use futures_util::{future, FutureExt, Sink, SinkExt, Stream, StreamExt}; +use rust_shared::async_graphql::futures_util::task::{Context, Poll}; +use rust_shared::async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; +use rust_shared::async_graphql::http::{WebSocketProtocols, WsMessage, ALL_WEBSOCKET_PROTOCOLS}; +use rust_shared::async_graphql::{self, Data, MergedObject, MergedSubscription, ObjectType, Result, Schema, SubscriptionType}; +use rust_shared::flume::{unbounded, Receiver, Sender}; +use rust_shared::hyper::Uri; +use rust_shared::serde::de::DeserializeOwned; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::{json, Map}; +use rust_shared::tokio::sync::{mpsc, Mutex, RwLock}; +use rust_shared::tokio_postgres::{Client, Row}; +use rust_shared::uuid::Uuid; +use rust_shared::{axum, futures, tower, tower_http}; use std::borrow::Cow; use std::cell::RefCell; use std::collections::{HashMap, HashSet}; @@ -7,71 +32,46 @@ use std::pin::Pin; use std::rc::Rc; use std::str::FromStr; use std::sync::Arc; -use rust_shared::hyper::Uri; -use rust_shared::async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; -use rust_shared::async_graphql::{Schema, MergedObject, MergedSubscription, ObjectType, Data, Result, SubscriptionType, self}; -use rust_shared::{futures, axum, tower, tower_http}; -use axum::http::Method; -use axum::http::header::CONTENT_TYPE; -use axum::response::{self, IntoResponse}; -use axum::routing::{get, post, MethodFilter, on_service}; -use axum::{extract, Router}; -use rust_shared::flume::{Sender, Receiver, unbounded}; -use rust_shared::serde::de::DeserializeOwned; -use rust_shared::serde::{Deserialize, Serialize}; -use rust_shared::serde_json::{json, Map}; -use rust_shared::tokio::sync::{mpsc, Mutex, RwLock}; -use rust_shared::tokio_postgres::{Client, Row}; use tower::Service; -use tower_http::cors::{CorsLayer}; -use rust_shared::async_graphql::futures_util::task::{Context, Poll}; -use rust_shared::async_graphql::http::{WebSocketProtocols, WsMessage, ALL_WEBSOCKET_PROTOCOLS}; -use axum::extract::ws::{CloseFrame, Message}; -use axum::extract::{FromRequest, WebSocketUpgrade}; -use axum::http::{self, Request, Response, StatusCode}; -use axum::Error; -use futures_util::future::{BoxFuture, Ready}; -use futures_util::stream::{SplitSink, SplitStream}; -use futures_util::{future, Sink, SinkExt, Stream, StreamExt, FutureExt}; -use rust_shared::uuid::Uuid; +use tower_http::cors::CorsLayer; use crate::links::pgclient::create_db_pool; -use crate::utils::type_aliases::{ABSender, ABReceiver, DBPool}; +use crate::utils::type_aliases::{ABReceiver, ABSender, DBPool}; use super::live_queries::{LQStorage, LQStorageArc}; #[derive(Clone, Debug)] pub enum SignInMsg { - GotCallbackData(Uri), + GotCallbackData(Uri), } pub type AppStateArc = Arc; pub struct AppState { - pub db_pool: Arc, + pub db_pool: Arc, - pub channel_for_sign_in_messages__sender_base: ABSender, - pub channel_for_sign_in_messages__receiver_base: ABReceiver, + pub channel_for_sign_in_messages__sender_base: ABSender, + pub channel_for_sign_in_messages__receiver_base: ABReceiver, - pub live_queries: LQStorageArc, + pub live_queries: LQStorageArc, } impl AppState { - fn new() -> Self { - let (s1, r1): (ABSender, ABReceiver) = async_broadcast::broadcast(1000); - let db_pool = Arc::new(create_db_pool()); - Self { - db_pool: db_pool.clone(), - channel_for_sign_in_messages__sender_base: s1, - channel_for_sign_in_messages__receiver_base: r1, - live_queries: LQStorage::new_in_arc(db_pool.clone()), - } - } - pub fn new_in_arc() -> AppStateArc { - Arc::new(Self::new()) - } + fn new() -> Self { + let (s1, r1): (ABSender, ABReceiver) = async_broadcast::broadcast(1000); + let db_pool = Arc::new(create_db_pool()); + Self { + db_pool: db_pool.clone(), + channel_for_sign_in_messages__sender_base: s1, + channel_for_sign_in_messages__receiver_base: r1, + live_queries: LQStorage::new_in_arc(db_pool.clone()), + } + } + pub fn new_in_arc() -> AppStateArc { + Arc::new(Self::new()) + } } // helpers, for getting some common data out of async-graphql's context-data pub fn get_app_state_from_gql_ctx<'a>(gql_ctx: &'a async_graphql::Context<'a>) -> &'a AppStateArc { - let app_state = gql_ctx.data::().unwrap(); - app_state -} \ No newline at end of file + let app_state = gql_ctx.data::().unwrap(); + app_state +} diff --git a/Packages/app-server/src/utils/axum_logging_layer.rs b/Packages/app-server/src/utils/axum_logging_layer.rs index e1679b629..2c4232c93 100644 --- a/Packages/app-server/src/utils/axum_logging_layer.rs +++ b/Packages/app-server/src/utils/axum_logging_layer.rs @@ -1,54 +1,48 @@ -use rust_shared::{axum, futures, tower, tower_http, utils::net::body_to_bytes}; use axum::{ - body::{Body, Bytes}, - http::{Request, StatusCode}, - middleware::{self, Next}, - response::{IntoResponse, Response}, - routing::post, - Router, + body::{Body, Bytes}, + http::{Request, StatusCode}, + middleware::{self, Next}, + response::{IntoResponse, Response}, + routing::post, + Router, }; +use rust_shared::{axum, futures, tower, tower_http, utils::net::body_to_bytes}; use std::net::SocketAddr; /*pub fn get_axum_logging_layer() -> FromFnLayer, axum::middleware::Next) -> impl futures_util::Future>> { - middleware::from_fn(print_request_response) + middleware::from_fn(print_request_response) }*/ -pub async fn print_request_response( - req: Request, - next: Next, -) -> Result, (StatusCode, String)> { - let (parts, body) = req.into_parts(); - let bytes = buffer_and_print("request", body).await?; - let req = Request::from_parts(parts, Body::from(bytes)); +pub async fn print_request_response(req: Request, next: Next) -> Result, (StatusCode, String)> { + let (parts, body) = req.into_parts(); + let bytes = buffer_and_print("request", body).await?; + let req = Request::from_parts(parts, Body::from(bytes)); - let res = next.run(req).await; + let res = next.run(req).await; - let (parts, body) = res.into_parts(); - let bytes = buffer_and_print("response", body).await?; - let res2 = Response::from_parts(parts, Body::from(bytes)); + let (parts, body) = res.into_parts(); + let bytes = buffer_and_print("response", body).await?; + let res2 = Response::from_parts(parts, Body::from(bytes)); - Ok(res2) + Ok(res2) } pub async fn buffer_and_print(direction: &str, body: B) -> Result where - B: axum::body::HttpBody, - B::Error: std::fmt::Display + std::fmt::Debug, + B: axum::body::HttpBody, + B::Error: std::fmt::Display + std::fmt::Debug, { - let bytes = match body_to_bytes(body).await { - Ok(bytes) => bytes, - Err(err) => { - return Err(( - StatusCode::BAD_REQUEST, - format!("failed to read {} body: {}", direction, err), - )); - } - }; - - if let Ok(body) = std::str::from_utf8(&bytes) { - //tracing::debug!("{} body = {:?}", direction, body); - println!("{} body = {:?}", direction, body); - } - - Ok(bytes) -} \ No newline at end of file + let bytes = match body_to_bytes(body).await { + Ok(bytes) => bytes, + Err(err) => { + return Err((StatusCode::BAD_REQUEST, format!("failed to read {} body: {}", direction, err))); + }, + }; + + if let Ok(body) = std::str::from_utf8(&bytes) { + //tracing::debug!("{} body = {:?}", direction, body); + println!("{} body = {:?}", direction, body); + } + + Ok(bytes) +} diff --git a/Packages/app-server/src/utils/db/accessors.rs b/Packages/app-server/src/utils/db/accessors.rs index 2ce59f827..f1217f56e 100644 --- a/Packages/app-server/src/utils/db/accessors.rs +++ b/Packages/app-server/src/utils/db/accessors.rs @@ -1,12 +1,12 @@ use std::sync::atomic::{AtomicBool, Ordering}; -use futures_util::{TryStreamExt, Future}; +use deadpool_postgres::{Pool, Transaction}; +use futures_util::{Future, TryStreamExt}; +use rust_shared::anyhow::{anyhow, bail, ensure, Error}; use rust_shared::async_graphql; use rust_shared::serde::Serialize; use rust_shared::tokio_postgres::IsolationLevel; -use rust_shared::tokio_postgres::{Row, types::ToSql}; -use rust_shared::anyhow::{anyhow, Error, ensure, bail}; -use deadpool_postgres::{Transaction, Pool}; +use rust_shared::tokio_postgres::{types::ToSql, Row}; use rust_shared::utils::auth::jwt_utils_base::UserJWTData; use rust_shared::utils::general_::extensions::ToOwnedV; use tracing::error; @@ -15,152 +15,163 @@ use crate::db::general::sign_in_::jwt_utils::{get_user_jwt_data_from_gql_ctx, tr use crate::db::users::get_user; use crate::store::storage::get_app_state_from_gql_ctx; use crate::utils::type_aliases::DBPool; -use crate::{utils::{db::{sql_fragment::SQLFragment, filter::{FilterInput, QueryFilter}, queries::get_entries_in_collection_base}, general::{data_anchor::{DataAnchor, DataAnchorFor1}}, type_aliases::PGClientObject}, db::commands::_command::ToSqlWrapper}; - -use super::generic_handlers::queries::{self, get_db_entry_base, get_db_entries_base}; +use crate::{ + db::commands::_command::ToSqlWrapper, + utils::{ + db::{ + filter::{FilterInput, QueryFilter}, + queries::get_entries_in_collection_base, + sql_fragment::SQLFragment, + }, + general::data_anchor::{DataAnchor, DataAnchorFor1}, + type_aliases::PGClientObject, + }, +}; + +use super::generic_handlers::queries::{self, get_db_entries_base, get_db_entry_base}; use super::transactions::{start_read_transaction, start_write_transaction}; /// Helper function to defer constraints in a database transaction. /// This is generally used to avoid foreign-key constraint violations, when multiple rows (linked with each other through foreign-key constraints) are being updated within the same command/transaction. -pub async fn defer_constraints(tx: &Transaction<'_>) -> Result<(), Error>{ - tx.execute("SET CONSTRAINTS ALL DEFERRED", &[]).await?; - Ok(()) +pub async fn defer_constraints(tx: &Transaction<'_>) -> Result<(), Error> { + tx.execute("SET CONSTRAINTS ALL DEFERRED", &[]).await?; + Ok(()) } -pub async fn trigger_deferred_constraints(tx: &Transaction<'_>) -> Result<(), Error>{ - // this triggers previously-deferred constraints to be checked immediately (see: https://www.postgresql.org/docs/current/sql-set-constraints.html) - tx.execute("SET CONSTRAINTS ALL IMMEDIATE", &[]).await?; - Ok(()) +pub async fn trigger_deferred_constraints(tx: &Transaction<'_>) -> Result<(), Error> { + // this triggers previously-deferred constraints to be checked immediately (see: https://www.postgresql.org/docs/current/sql-set-constraints.html) + tx.execute("SET CONSTRAINTS ALL IMMEDIATE", &[]).await?; + Ok(()) } pub struct AccessorContext<'a> { - pub gql_ctx: Option<&'a async_graphql::Context<'a>>, - pub tx: Transaction<'a>, - pub only_validate: bool, - rls_enabled: AtomicBool, + pub gql_ctx: Option<&'a async_graphql::Context<'a>>, + pub tx: Transaction<'a>, + pub only_validate: bool, + rls_enabled: AtomicBool, } impl<'a> AccessorContext<'a> { - // base constructor - pub fn new_raw(gql_ctx: Option<&'a async_graphql::Context<'a>>, tx: Transaction<'a>, only_validate: bool, rls_enabled: bool) -> Self { - Self { gql_ctx, tx, only_validate, rls_enabled: AtomicBool::new(rls_enabled) } - } - - // low-level constructors - pub async fn new_read_base(anchor: &'a mut DataAnchorFor1, gql_ctx: Option<&'a async_graphql::Context<'a>>, db_pool: &DBPool, user: Option, mut bypass_rls: bool, isolation_level: IsolationLevel) -> Result, Error> { - let tx = start_read_transaction(anchor, db_pool, isolation_level).await?; - let user_id_or_placeholder = user.as_ref().map(|a| a.id.clone()).unwrap_or("".o()); - tx.execute("SELECT set_config('app.current_user_id', $1, true)", &[&user_id_or_placeholder]).await?; - /*let user_is_admin = TODO; - tx.execute("SELECT set_config('app.current_user_admin', $1, true)", &[&user_is_admin]).await?;*/ - let new_self = Self { gql_ctx, tx, only_validate: false, rls_enabled: AtomicBool::new(false) }; // rls not enabled quite yet; we'll do that in a moment - - // if user is admin, set bypass_rls to true (an optimization, to remove the need for having the RLS rules involved at all) - // todo: maybe change `bypass_rls` to an enum named `rls_apply`, with values "Bypass", "BypassIfAdmin", and "Apply" - if let Some(user_id) = user.map(|a| a.id) { - let user_admin = get_user(&new_self, &user_id).await?.permissionGroups.admin; - if user_admin { - bypass_rls = true; - new_self.rls_enabled.store(true, Ordering::SeqCst); - } - } - - // if bypass_rls is false, then enforce rls-policies (for this transaction) by switching to the "rls_obeyer" role - if !bypass_rls { - new_self.enable_rls().await?; - } - - Ok(new_self) - } - pub async fn new_write_base(anchor: &'a mut DataAnchorFor1, gql_ctx: Option<&'a async_graphql::Context<'a>>, db_pool: &DBPool, user: Option, bypass_rls: bool) -> Result, Error> { - Self::new_write_advanced_base(anchor, gql_ctx, db_pool, user, bypass_rls, Some(false)).await - } - pub async fn new_write_advanced_base(anchor: &'a mut DataAnchorFor1, gql_ctx: Option<&'a async_graphql::Context<'a>>, db_pool: &DBPool, user: Option, bypass_rls: bool, only_validate: Option) -> Result, Error> { - if !bypass_rls { - match &user { - None => bail!("Cannot create write transaction without a user JWT (ie. auth-data) supplied."), - Some(jwt_data) => { - let jwt_read_only = jwt_data.readOnly.unwrap_or(false); - ensure!(!jwt_read_only, "Cannot create write transaction using a read-only JWT."); - }, - } - } - - let tx = start_write_transaction(anchor, db_pool).await?; - tx.execute("SELECT set_config('app.current_user_id', $1, true)", &[&user.map(|a| a.id).unwrap_or("".o())]).await?; - let only_validate = only_validate.unwrap_or(false); - let new_self = Self { gql_ctx, tx, only_validate, rls_enabled: AtomicBool::new(false) }; // rls not enabled quite yet; we'll do that in a moment - - // Some commands (eg. deleteNode) need foreign-key contraint-deferring till end of transaction, so just do so always. - // This is safer, since it protects against "forgotten deferral" in commands where an fk-constraint is *temporarily violated* -- but only in an "uncommon conditional branch". - // (Deferring always is not much of a negative anyway; instant constraint-checking doesn't improve debugging much in this context, since fk-violations are generally easy to identify once triggered.) + // base constructor + pub fn new_raw(gql_ctx: Option<&'a async_graphql::Context<'a>>, tx: Transaction<'a>, only_validate: bool, rls_enabled: bool) -> Self { + Self { gql_ctx, tx, only_validate, rls_enabled: AtomicBool::new(rls_enabled) } + } + + // low-level constructors + pub async fn new_read_base(anchor: &'a mut DataAnchorFor1, gql_ctx: Option<&'a async_graphql::Context<'a>>, db_pool: &DBPool, user: Option, mut bypass_rls: bool, isolation_level: IsolationLevel) -> Result, Error> { + let tx = start_read_transaction(anchor, db_pool, isolation_level).await?; + let user_id_or_placeholder = user.as_ref().map(|a| a.id.clone()).unwrap_or("".o()); + tx.execute("SELECT set_config('app.current_user_id', $1, true)", &[&user_id_or_placeholder]).await?; + /*let user_is_admin = TODO; + tx.execute("SELECT set_config('app.current_user_admin', $1, true)", &[&user_is_admin]).await?;*/ + let new_self = Self { gql_ctx, tx, only_validate: false, rls_enabled: AtomicBool::new(false) }; // rls not enabled quite yet; we'll do that in a moment + + // if user is admin, set bypass_rls to true (an optimization, to remove the need for having the RLS rules involved at all) + // todo: maybe change `bypass_rls` to an enum named `rls_apply`, with values "Bypass", "BypassIfAdmin", and "Apply" + if let Some(user_id) = user.map(|a| a.id) { + let user_admin = get_user(&new_self, &user_id).await?.permissionGroups.admin; + if user_admin { + bypass_rls = true; + new_self.rls_enabled.store(true, Ordering::SeqCst); + } + } + + // if bypass_rls is false, then enforce rls-policies (for this transaction) by switching to the "rls_obeyer" role + if !bypass_rls { + new_self.enable_rls().await?; + } + + Ok(new_self) + } + pub async fn new_write_base(anchor: &'a mut DataAnchorFor1, gql_ctx: Option<&'a async_graphql::Context<'a>>, db_pool: &DBPool, user: Option, bypass_rls: bool) -> Result, Error> { + Self::new_write_advanced_base(anchor, gql_ctx, db_pool, user, bypass_rls, Some(false)).await + } + pub async fn new_write_advanced_base(anchor: &'a mut DataAnchorFor1, gql_ctx: Option<&'a async_graphql::Context<'a>>, db_pool: &DBPool, user: Option, bypass_rls: bool, only_validate: Option) -> Result, Error> { + if !bypass_rls { + match &user { + None => bail!("Cannot create write transaction without a user JWT (ie. auth-data) supplied."), + Some(jwt_data) => { + let jwt_read_only = jwt_data.readOnly.unwrap_or(false); + ensure!(!jwt_read_only, "Cannot create write transaction using a read-only JWT."); + }, + } + } + + let tx = start_write_transaction(anchor, db_pool).await?; + tx.execute("SELECT set_config('app.current_user_id', $1, true)", &[&user.map(|a| a.id).unwrap_or("".o())]).await?; + let only_validate = only_validate.unwrap_or(false); + let new_self = Self { gql_ctx, tx, only_validate, rls_enabled: AtomicBool::new(false) }; // rls not enabled quite yet; we'll do that in a moment + + // Some commands (eg. deleteNode) need foreign-key contraint-deferring till end of transaction, so just do so always. + // This is safer, since it protects against "forgotten deferral" in commands where an fk-constraint is *temporarily violated* -- but only in an "uncommon conditional branch". + // (Deferring always is not much of a negative anyway; instant constraint-checking doesn't improve debugging much in this context, since fk-violations are generally easy to identify once triggered.) defer_constraints(&new_self.tx).await?; - // if bypass_rls is false, then enforce rls-policies (for this transaction) by switching to the "rls_obeyer" role - if !bypass_rls { - new_self.enable_rls().await?; - } - - Ok(new_self) - } - - // high-level constructors - pub async fn new_read(anchor: &'a mut DataAnchorFor1, gql_ctx: &'a async_graphql::Context<'a>, bypass_rls: bool) -> Result, Error> { - Ok(Self::new_read_base(anchor, Some(gql_ctx), &get_app_state_from_gql_ctx(gql_ctx).db_pool, try_get_user_jwt_data_from_gql_ctx(gql_ctx).await?, bypass_rls, IsolationLevel::Serializable).await?) - } - pub async fn new_write(anchor: &'a mut DataAnchorFor1, gql_ctx: &'a async_graphql::Context<'a>, bypass_rls: bool) -> Result, Error> { - Ok(Self::new_write_base(anchor, Some(gql_ctx), &get_app_state_from_gql_ctx(gql_ctx).db_pool, try_get_user_jwt_data_from_gql_ctx(gql_ctx).await?, bypass_rls).await?) - } - pub async fn new_write_advanced(anchor: &'a mut DataAnchorFor1, gql_ctx: &'a async_graphql::Context<'a>, bypass_rls: bool, only_validate: Option) -> Result, Error> { - Ok(Self::new_write_advanced_base(anchor, Some(gql_ctx), &get_app_state_from_gql_ctx(gql_ctx).db_pool, try_get_user_jwt_data_from_gql_ctx(gql_ctx).await?, bypass_rls, only_validate).await?) - } - - // other methods - pub async fn enable_rls(&self) -> Result<(), Error> { - ensure!(!self.rls_enabled.load(Ordering::SeqCst), "RLS is already enabled. Since our current usages are simple, this is unexpected, and thus considered an error."); - self.rls_enabled.store(true, Ordering::SeqCst); - - self.tx.execute("SET LOCAL ROLE rls_obeyer", &[]).await?; - /*self.tx.execute("SET LOCAL ROLE rls_obeyer", &[]).await?.map_err(|err| { - // if we hit an error while trying to re-enable RLS, then just kill the pg-pool connection (defensive programming vs tricks/exploits) - self.tx.client().__private_api_close(); - err - })?);*/ - Ok(()) - } - pub async fn disable_rls(&self) -> Result<(), Error> { - ensure!(self.rls_enabled.load(Ordering::SeqCst), "RLS is already disabled. Since our current usages are simple, this is unexpected, and thus considered an error."); - self.rls_enabled.store(false, Ordering::SeqCst); - - self.tx.execute("RESET ROLE", &[]).await?; - Ok(()) - } - pub async fn with_rls_disabled>>(&self, f: impl FnOnce() -> Fut, simple_err_for_client: Option<&str>) -> Result<(), Error> { - self.disable_rls().await?; - let result = f().await; - self.enable_rls().await?; - match simple_err_for_client { - None => result, - Some(simple_err_for_client) => result.map_err(|err| { - // log full error to app-server log, but return a generic error to client (we generally don't want data from rls-disabled block to be leaked to client) - error!("{} @fullError:{:?}", simple_err_for_client, err); - anyhow!("{}", simple_err_for_client) - }), - } - } + // if bypass_rls is false, then enforce rls-policies (for this transaction) by switching to the "rls_obeyer" role + if !bypass_rls { + new_self.enable_rls().await?; + } + + Ok(new_self) + } + + // high-level constructors + pub async fn new_read(anchor: &'a mut DataAnchorFor1, gql_ctx: &'a async_graphql::Context<'a>, bypass_rls: bool) -> Result, Error> { + Ok(Self::new_read_base(anchor, Some(gql_ctx), &get_app_state_from_gql_ctx(gql_ctx).db_pool, try_get_user_jwt_data_from_gql_ctx(gql_ctx).await?, bypass_rls, IsolationLevel::Serializable).await?) + } + pub async fn new_write(anchor: &'a mut DataAnchorFor1, gql_ctx: &'a async_graphql::Context<'a>, bypass_rls: bool) -> Result, Error> { + Ok(Self::new_write_base(anchor, Some(gql_ctx), &get_app_state_from_gql_ctx(gql_ctx).db_pool, try_get_user_jwt_data_from_gql_ctx(gql_ctx).await?, bypass_rls).await?) + } + pub async fn new_write_advanced(anchor: &'a mut DataAnchorFor1, gql_ctx: &'a async_graphql::Context<'a>, bypass_rls: bool, only_validate: Option) -> Result, Error> { + Ok(Self::new_write_advanced_base(anchor, Some(gql_ctx), &get_app_state_from_gql_ctx(gql_ctx).db_pool, try_get_user_jwt_data_from_gql_ctx(gql_ctx).await?, bypass_rls, only_validate).await?) + } + + // other methods + pub async fn enable_rls(&self) -> Result<(), Error> { + ensure!(!self.rls_enabled.load(Ordering::SeqCst), "RLS is already enabled. Since our current usages are simple, this is unexpected, and thus considered an error."); + self.rls_enabled.store(true, Ordering::SeqCst); + + self.tx.execute("SET LOCAL ROLE rls_obeyer", &[]).await?; + /*self.tx.execute("SET LOCAL ROLE rls_obeyer", &[]).await?.map_err(|err| { + // if we hit an error while trying to re-enable RLS, then just kill the pg-pool connection (defensive programming vs tricks/exploits) + self.tx.client().__private_api_close(); + err + })?);*/ + Ok(()) + } + pub async fn disable_rls(&self) -> Result<(), Error> { + ensure!(self.rls_enabled.load(Ordering::SeqCst), "RLS is already disabled. Since our current usages are simple, this is unexpected, and thus considered an error."); + self.rls_enabled.store(false, Ordering::SeqCst); + + self.tx.execute("RESET ROLE", &[]).await?; + Ok(()) + } + pub async fn with_rls_disabled>>(&self, f: impl FnOnce() -> Fut, simple_err_for_client: Option<&str>) -> Result<(), Error> { + self.disable_rls().await?; + let result = f().await; + self.enable_rls().await?; + match simple_err_for_client { + None => result, + Some(simple_err_for_client) => result.map_err(|err| { + // log full error to app-server log, but return a generic error to client (we generally don't want data from rls-disabled block to be leaked to client) + error!("{} @fullError:{:?}", simple_err_for_client, err); + anyhow!("{}", simple_err_for_client) + }), + } + } } /*pub struct TxTempAdminUpgradeWrapper<'a> { - pub ctx: &'a AccessorContext<'a>, + pub ctx: &'a AccessorContext<'a>, } impl<'a> Drop for TxTempAdminUpgradeWrapper<'a> { - fn drop(&mut self) { - self.ctx.set_tx_role_restricted().await.unwrap(); - } + fn drop(&mut self) { + self.ctx.set_tx_role_restricted().await.unwrap(); + } }*/ pub async fn get_db_entry<'a, T: From + Serialize>(ctx: &AccessorContext<'a>, table_name: &str, filter_json: &Option) -> Result { - let result_option = get_db_entry_base(ctx, table_name, filter_json).await?; - let result = result_option.ok_or(anyhow!(r#"No entries found in table "{table_name}" matching filter:{filter_json:?}"#))?; - Ok(result) + let result_option = get_db_entry_base(ctx, table_name, filter_json).await?; + let result = result_option.ok_or(anyhow!(r#"No entries found in table "{table_name}" matching filter:{filter_json:?}"#))?; + Ok(result) } pub async fn get_db_entries<'a, T: From + Serialize>(ctx: &AccessorContext<'a>, table_name: &str, filter_json: &Option) -> Result, Error> { - get_db_entries_base(ctx, table_name, filter_json).await -} \ No newline at end of file + get_db_entries_base(ctx, table_name, filter_json).await +} diff --git a/Packages/app-server/src/utils/db/agql_ext/gql_request_storage.rs b/Packages/app-server/src/utils/db/agql_ext/gql_request_storage.rs index fdf663972..5343bf76f 100644 --- a/Packages/app-server/src/utils/db/agql_ext/gql_request_storage.rs +++ b/Packages/app-server/src/utils/db/agql_ext/gql_request_storage.rs @@ -1,16 +1,14 @@ -use std::sync::{Arc}; +use std::sync::Arc; -use rust_shared::{utils::auth::jwt_utils_base::UserJWTData, tokio::sync::{RwLock}}; +use rust_shared::{tokio::sync::RwLock, utils::auth::jwt_utils_base::UserJWTData}; /// This struct "stores data local to the current graphql request", with a primary usage being for sharing data among graphql calls, within the same "long-running websocket request". /// For example, a websocket request may have two graphql calls: The 1st one verifies and stores a user-provided JWT with auth data; the 2nd one then does some mutation, using that auth data. pub struct GQLRequestStorage { - pub jwt: Arc>>, + pub jwt: Arc>>, } impl GQLRequestStorage { - pub fn new() -> Self { - Self { - jwt: Arc::new(RwLock::new(None)), - } - } -} \ No newline at end of file + pub fn new() -> Self { + Self { jwt: Arc::new(RwLock::new(None)) } + } +} diff --git a/Packages/app-server/src/utils/db/agql_ext/gql_result_stream.rs b/Packages/app-server/src/utils/db/agql_ext/gql_result_stream.rs index 4bfe92d97..bdf268c12 100644 --- a/Packages/app-server/src/utils/db/agql_ext/gql_result_stream.rs +++ b/Packages/app-server/src/utils/db/agql_ext/gql_result_stream.rs @@ -14,67 +14,67 @@ use crate::store::storage::{Storage, LQStorage}; use super::{general::GQLSet, type_aliases::JSONValue}; pub struct GQLResultStream<'a, ResultT> { - // set at init - pub id: Uuid, - storage_wrapper: Arc>>, - collection_name: String, - filter: Filter, - // for changes - waker: Option, - new_results_to_return: VecDeque, + // set at init + pub id: Uuid, + storage_wrapper: Arc>>, + collection_name: String, + filter: Filter, + // for changes + waker: Option, + new_results_to_return: VecDeque, } pub fn new( - impl<'a, ResultT: Send + Sync + 'static> GQLResultStream<'a, ResultT> { - storage_wrapper: Arc>>, - collection_name: &str, - filter: Filter, - first_result: ResultT, - ) -> Self { - Self { - id: Uuid::new_v4(), - storage_wrapper: storage_wrapper, - collection_name: collection_name.to_owned(), - filter: filter, - waker: None, - new_results_to_return: VecDeque::from(vec![first_result]), - } - } - /*pub fn create_channel(&mut self) -> Sender { - let (tx, rx): (Sender, Receiver) = mpsc::channel(100); - let self_arc = Arc::new(self); - tokio::spawn(async move { - loop { - let new_result = rx.recv().await.unwrap(); - self_arc.push_result(new_result); - } - }); - return tx; - }*/ + impl<'a, ResultT: Send + Sync + 'static> GQLResultStream<'a, ResultT> { + storage_wrapper: Arc>>, + collection_name: &str, + filter: Filter, + first_result: ResultT, + ) -> Self { + Self { + id: Uuid::new_v4(), + storage_wrapper: storage_wrapper, + collection_name: collection_name.to_owned(), + filter: filter, + waker: None, + new_results_to_return: VecDeque::from(vec![first_result]), + } + } + /*pub fn create_channel(&mut self) -> Sender { + let (tx, rx): (Sender, Receiver) = mpsc::channel(100); + let self_arc = Arc::new(self); + tokio::spawn(async move { + loop { + let new_result = rx.recv().await.unwrap(); + self_arc.push_result(new_result); + } + }); + return tx; + }*/ - pub fn push_result(&mut self, new_result: ResultT) { - self.new_results_to_return.push_back(new_result); - self.waker.clone().unwrap().wake(); - } + pub fn push_result(&mut self, new_result: ResultT) { + self.new_results_to_return.push_back(new_result); + self.waker.clone().unwrap().wake(); + } } impl<'a, ResultT> Unpin for GQLResultStream<'a, ResultT> {} // enables the mutations of self below impl<'a, ResultT> Stream for GQLResultStream<'a, ResultT> { - type Item = ResultT; - fn poll_next(mut self: Pin<&mut Self>, c: &mut std::task::Context<'_>) -> Poll::Item>> { - self.waker = Some(c.waker().clone()); - let next_result = self.new_results_to_return.pop_front(); - match next_result { - Some(next_result) => Poll::Ready(Some(next_result)), - None => Poll::Pending, - } - } + type Item = ResultT; + fn poll_next(mut self: Pin<&mut Self>, c: &mut std::task::Context<'_>) -> Poll::Item>> { + self.waker = Some(c.waker().clone()); + let next_result = self.new_results_to_return.pop_front(); + match next_result { + Some(next_result) => Poll::Ready(Some(next_result)), + None => Poll::Pending, + } + } } impl<'a, ResultT> Drop for GQLResultStream<'a, ResultT> { - fn drop(&mut self) { - //println!("Stream_WithDropListener got dropped. @address:{:p} @collection:{} @filter:{:?}", self, self.collection_name, self.filter); - //let mut storage: LQStorage = storage_wrapper.to_owned(); - //let storage = self.storage_wrapper.lock.await; - let mut guard = self.storage_wrapper.lock(); - let storage = guard.as_mut().unwrap(); - storage.notify_lq_end(self.collection_name.as_str(), &self.filter, self.id); - } -}*/ \ No newline at end of file + fn drop(&mut self) { + //println!("Stream_WithDropListener got dropped. @address:{:p} @collection:{} @filter:{:?}", self, self.collection_name, self.filter); + //let mut storage: LQStorage = storage_wrapper.to_owned(); + //let storage = self.storage_wrapper.lock.await; + let mut guard = self.storage_wrapper.lock(); + let storage = guard.as_mut().unwrap(); + storage.notify_lq_end(self.collection_name.as_str(), &self.filter, self.id); + } +}*/ diff --git a/Packages/app-server/src/utils/db/agql_ext/gql_utils.rs b/Packages/app-server/src/utils/db/agql_ext/gql_utils.rs index dd4c48605..dda4d9948 100644 --- a/Packages/app-server/src/utils/db/agql_ext/gql_utils.rs +++ b/Packages/app-server/src/utils/db/agql_ext/gql_utils.rs @@ -1,19 +1,23 @@ use std::collections::BTreeMap; -use rust_shared::{async_graphql::{self, parser::types::ExecutableDocument, SimpleObject, InputObject, OutputType, InputValueError, Value, ScalarType, Scalar, InputType, InputValueResult, Name}, rust_macros::wrap_slow_macros, indexmap::IndexMap}; -use serde::{Serialize, Deserialize}; +use rust_shared::{ + async_graphql::{self, parser::types::ExecutableDocument, InputObject, InputType, InputValueError, InputValueResult, Name, OutputType, Scalar, ScalarType, SimpleObject, Value}, + indexmap::IndexMap, + rust_macros::wrap_slow_macros, +}; +use serde::{Deserialize, Serialize}; pub fn get_root_fields_in_doc(doc: ExecutableDocument) -> Vec { - let mut query_fields: Vec = vec![]; - for op in doc.operations.iter() { - let (_name, def) = op; - for selection_item in &def.node.selection_set.node.items { - if let async_graphql::parser::types::Selection::Field(field) = &selection_item.node { - query_fields.push(field.node.name.to_string()); - } - } - }; - query_fields + let mut query_fields: Vec = vec![]; + for op in doc.operations.iter() { + let (_name, def) = op; + for selection_item in &def.node.selection_set.node.items { + if let async_graphql::parser::types::Selection::Field(field) = &selection_item.node { + query_fields.push(field.node.name.to_string()); + } + } + } + query_fields } //wrap_slow_macros!{ @@ -23,65 +27,68 @@ pub struct IndexMapAGQL(pub IndexMap); // makes-so you can call functions on IndexMapAGQL as though it were an IndexMap (ie. without having to do .0) impl std::ops::Deref for IndexMapAGQL { - type Target = IndexMap; - fn deref(&self) -> &Self::Target { - &self.0 - } + type Target = IndexMap; + fn deref(&self) -> &Self::Target { + &self.0 + } } impl std::ops::DerefMut for IndexMapAGQL { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } // makes it a bit easier to go from IndexMap -> IndexMapAGQL /*impl From> for IndexMap { - fn from(val: IndexMapAGQL) -> Self { - val.0 - } + fn from(val: IndexMapAGQL) -> Self { + val.0 + } }*/ // makes it a bit easier to go from IndexMap -> IndexMapAGQL /*impl Into> for IndexMap { - fn into(self) -> IndexMapAGQL { - IndexMapAGQL(self) - } + fn into(self) -> IndexMapAGQL { + IndexMapAGQL(self) + } }*/ /// A scalar that can represent any JSON Object value. #[Scalar(name = "JSONObject")] -impl ScalarType for IndexMapAGQL where T: OutputType + InputType { - fn parse(value: Value) -> InputValueResult { - match value { - Value::Object(map) => { - let mut result = IndexMapAGQL(IndexMap::new()); - for (key, value) in map.into_iter() { - result.insert(key.to_string(), T::parse(Some(value)).map_err(InputValueError::propagate)?); - } - Ok(result) - }, - _ => Err(InputValueError::expected_type(value)), - } - } +impl ScalarType for IndexMapAGQL +where + T: OutputType + InputType, +{ + fn parse(value: Value) -> InputValueResult { + match value { + Value::Object(map) => { + let mut result = IndexMapAGQL(IndexMap::new()); + for (key, value) in map.into_iter() { + result.insert(key.to_string(), T::parse(Some(value)).map_err(InputValueError::propagate)?); + } + Ok(result) + }, + _ => Err(InputValueError::expected_type(value)), + } + } - fn to_value(&self) -> Value { - let mut map = IndexMap::new(); - for (name, value) in self.iter() { - map.insert(Name::new(name), value.to_value()); - } - Value::Object(map) - } + fn to_value(&self) -> Value { + let mut map = IndexMap::new(); + for (name, value) in self.iter() { + map.insert(Name::new(name), value.to_value()); + } + Value::Object(map) + } } //} #[cfg(test)] mod tests { - use rust_shared::async_graphql; + use rust_shared::async_graphql; - use crate::utils::db::agql_ext::gql_utils::get_root_fields_in_doc; + use crate::utils::db::agql_ext::gql_utils::get_root_fields_in_doc; - #[test] - fn test_get_root_fields_in_doc() { - let query = r#" + #[test] + fn test_get_root_fields_in_doc() { + let query = r#" query { subtree1(id: 1) { id @@ -91,8 +98,8 @@ mod tests { } } "#; - let doc = async_graphql::parser::parse_query(query).unwrap(); - let root_fields = get_root_fields_in_doc(doc); - assert_eq!(root_fields, vec!["subtree1", "subtree2"]); - } -} \ No newline at end of file + let doc = async_graphql::parser::parse_query(query).unwrap(); + let root_fields = get_root_fields_in_doc(doc); + assert_eq!(root_fields, vec!["subtree1", "subtree2"]); + } +} diff --git a/Packages/app-server/src/utils/db/filter.rs b/Packages/app-server/src/utils/db/filter.rs index 2e4bda0f1..b2170ec05 100644 --- a/Packages/app-server/src/utils/db/filter.rs +++ b/Packages/app-server/src/utils/db/filter.rs @@ -1,81 +1,94 @@ -use std::{fmt::Display, iter::{once, empty}}; -use rust_shared::{anyhow::{anyhow, bail, Context, Error, ensure}, utils::{type_aliases::{JSONValue, RowData}, general_::extensions::IteratorV}, serde_json}; -use rust_shared::indexmap::IndexMap; +use super::{ + sql_fragment::{SQLFragment, SF}, + sql_ident::SQLIdent, + sql_param::SQLParamBoxed, +}; +use crate::{store::live_queries_::lq_param::LQParam, utils::general::general::match_cond_to_iter}; use rust_shared::async_graphql; -use rust_shared::rust_macros::{wrap_slow_macros, unchanged}; -use rust_shared::serde::{Serialize, Deserialize}; -use crate::{utils::{general::{general::match_cond_to_iter}}, store::live_queries_::lq_param::{LQParam}}; +use rust_shared::indexmap::IndexMap; use rust_shared::itertools::{chain, Itertools}; +use rust_shared::rust_macros::{unchanged, wrap_slow_macros}; +use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::Map; use rust_shared::tokio_postgres::types::ToSql; -use rust_shared::serde; -use super::{sql_fragment::{SQLFragment, SF}, sql_ident::{SQLIdent}, sql_param::SQLParamBoxed}; +use rust_shared::{ + anyhow::{anyhow, bail, ensure, Context, Error}, + serde_json, + utils::{ + general_::extensions::IteratorV, + type_aliases::{JSONValue, RowData}, + }, +}; +use std::{ + fmt::Display, + iter::{empty, once}, +}; //pub type Filter = Option>; pub type FilterInput = JSONValue; // we use JSONValue, because it has the InputType trait (unlike Map<...>, for some reason) -wrap_slow_macros!{ +wrap_slow_macros! { /// Structure specifying a set of filters used for rows in a table. /// This struct may contain the actual values that are being filtered for, OR it may just contain the "shape" of a set of filters. (as checked by ensure_shape_only) #[derive(Debug, Serialize, Deserialize)] pub struct QueryFilter { - pub field_filters: IndexMap, + pub field_filters: IndexMap, } } impl QueryFilter { - pub fn empty() -> Self { - Self { - field_filters: IndexMap::new(), - } - } - // example filter: Some(Object({"id": Object({"equalTo": String("t5gRdPS9TW6HrTKS2l2IaZ")})})) - pub fn from_filter_input_opt(input: &Option) -> Result { - match input { - Some(input) => Self::from_filter_input(input), - // if no input, just return an empty filter (has same effect as "no filter", so best to unify) - None => Ok(QueryFilter::empty()), - } - } - pub fn from_filter_input(input: &FilterInput) -> Result { - let mut result = QueryFilter { field_filters: IndexMap::new() }; + pub fn empty() -> Self { + Self { field_filters: IndexMap::new() } + } + // example filter: Some(Object({"id": Object({"equalTo": String("t5gRdPS9TW6HrTKS2l2IaZ")})})) + pub fn from_filter_input_opt(input: &Option) -> Result { + match input { + Some(input) => Self::from_filter_input(input), + // if no input, just return an empty filter (has same effect as "no filter", so best to unify) + None => Ok(QueryFilter::empty()), + } + } + pub fn from_filter_input(input: &FilterInput) -> Result { + let mut result = QueryFilter { field_filters: IndexMap::new() }; - for (field_name, field_filters_json) in input.as_object().ok_or_else(|| anyhow!("Filter root-structure was not an object!"))?.iter() { - let mut field_filter = FieldFilter::default(); - //if let Some((filter_type, filter_value)) = field_filters.as_object().unwrap().iter().next() { - for (op_json, op_val_json) in field_filters_json.as_object().ok_or_else(|| anyhow!("Filter-structure for field {field_name} was not an object!"))? { - let op_val_json_clone = op_val_json.clone(); - let op: FilterOp = match op_json.as_str() { - "equalTo" => FilterOp::EqualsX(op_val_json_clone), - "in" => { - let vals = op_val_json_clone.as_array().ok_or(anyhow!("Filter-op of type \"in\" requires an array value!"))?; - FilterOp::IsWithinX(vals.to_vec()) - }, - "contains" => { - let vals = op_val_json_clone.as_array().ok_or(anyhow!("Filter-op of type \"contains\" requires an array value!"))?; - FilterOp::ContainsAllOfX(vals.to_vec()) - }, - "containsAny" => { - let vals = op_val_json_clone.as_array().ok_or(anyhow!("Filter-op of type \"containsAny\" requires an array value!"))?; - FilterOp::ContainsAnyOfX(vals.to_vec()) - }, - _ => bail!(r#"Invalid filter-op "{op_json}" specified. Supported: equalTo, in, contains, containsAny."#), - }; - field_filter.filter_ops.push(op); - } - result.field_filters.insert(field_name.to_owned(), field_filter); - } + for (field_name, field_filters_json) in input.as_object().ok_or_else(|| anyhow!("Filter root-structure was not an object!"))?.iter() { + let mut field_filter = FieldFilter::default(); + //if let Some((filter_type, filter_value)) = field_filters.as_object().unwrap().iter().next() { + for (op_json, op_val_json) in field_filters_json.as_object().ok_or_else(|| anyhow!("Filter-structure for field {field_name} was not an object!"))? { + let op_val_json_clone = op_val_json.clone(); + let op: FilterOp = match op_json.as_str() { + "equalTo" => FilterOp::EqualsX(op_val_json_clone), + "in" => { + let vals = op_val_json_clone.as_array().ok_or(anyhow!("Filter-op of type \"in\" requires an array value!"))?; + FilterOp::IsWithinX(vals.to_vec()) + }, + "contains" => { + let vals = op_val_json_clone.as_array().ok_or(anyhow!("Filter-op of type \"contains\" requires an array value!"))?; + FilterOp::ContainsAllOfX(vals.to_vec()) + }, + "containsAny" => { + let vals = op_val_json_clone.as_array().ok_or(anyhow!("Filter-op of type \"containsAny\" requires an array value!"))?; + FilterOp::ContainsAnyOfX(vals.to_vec()) + }, + _ => bail!(r#"Invalid filter-op "{op_json}" specified. Supported: equalTo, in, contains, containsAny."#), + }; + field_filter.filter_ops.push(op); + } + result.field_filters.insert(field_name.to_owned(), field_filter); + } - Ok(result) - } + Ok(result) + } - pub fn is_empty(&self) -> bool { - self.field_filters.len() == 0 - } + pub fn is_empty(&self) -> bool { + self.field_filters.len() == 0 + } - pub fn ensure_shape_only(&self) -> Result<(), Error> { + #[rustfmt::skip] + pub fn ensure_shape_only(&self) -> Result<(), Error> { for (_field_name, field_filter) in &self.field_filters { for op in &field_filter.filter_ops { match op { @@ -89,93 +102,91 @@ impl QueryFilter { Ok(()) } - /// This method does not use batching; if you want batching, use `LQBatch`. - pub fn get_sql_for_application(&self) -> Result { - if self.is_empty() { - return Ok(SF::lit("")); - } - - let mut parts: Vec = vec![]; - parts.push(SF::lit("(")); - for (i, (field_name, field_filter)) in self.field_filters.iter().enumerate() { - if i > 0 { - parts.push(SF::lit(") AND (")); - } - //if let Some((filter_type, filter_value)) = field_filters.as_object().unwrap().iter().next() { - for op in field_filter.filter_ops.iter() { - parts.push(op.get_sql_for_application( - SF::ident(SQLIdent::new(field_name.clone())?), - op.get_sql_for_value()?, - )); - } - } - parts.push(SF::lit(")")); - - let combined_fragment = SF::merge(parts); - Ok(combined_fragment) - } + /// This method does not use batching; if you want batching, use `LQBatch`. + pub fn get_sql_for_application(&self) -> Result { + if self.is_empty() { + return Ok(SF::lit("")); + } + + let mut parts: Vec = vec![]; + parts.push(SF::lit("(")); + for (i, (field_name, field_filter)) in self.field_filters.iter().enumerate() { + if i > 0 { + parts.push(SF::lit(") AND (")); + } + //if let Some((filter_type, filter_value)) = field_filters.as_object().unwrap().iter().next() { + for op in field_filter.filter_ops.iter() { + parts.push(op.get_sql_for_application(SF::ident(SQLIdent::new(field_name.clone())?), op.get_sql_for_value()?)); + } + } + parts.push(SF::lit(")")); + + let combined_fragment = SF::merge(parts); + Ok(combined_fragment) + } } impl Clone for QueryFilter { - fn clone(&self) -> Self { - /*let mut field_filters: IndexMap = IndexMap::new(); - for (key, value) in self.field_filters.iter() { - field_filters.insert(key.clone(), value.clone()); - } - Self { field_filters }*/ - Self { field_filters: self.field_filters.clone() } - } + fn clone(&self) -> Self { + /*let mut field_filters: IndexMap = IndexMap::new(); + for (key, value) in self.field_filters.iter() { + field_filters.insert(key.clone(), value.clone()); + } + Self { field_filters }*/ + Self { field_filters: self.field_filters.clone() } + } } impl Display for QueryFilter { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - /*if self.is_empty() { - f.write_str("n/a"); - } else { - f.write_fmt(format_args!("{self:?}")); - } - Ok(())*/ - // is this correct? - if self.is_empty() { - "n/a".fmt(f) - } else { - format_args!("{self:?}").fmt(f) - } - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + /*if self.is_empty() { + f.write_str("n/a"); + } else { + f.write_fmt(format_args!("{self:?}")); + } + Ok(())*/ + // is this correct? + if self.is_empty() { + "n/a".fmt(f) + } else { + format_args!("{self:?}").fmt(f) + } + } } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct FieldFilter { - pub filter_ops: Vec, + pub filter_ops: Vec, } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum FilterOp { - EqualsX(JSONValue), - IsWithinX(Vec), - ContainsAllOfX(Vec), - ContainsAnyOfX(Vec), + EqualsX(JSONValue), + IsWithinX(Vec), + ContainsAllOfX(Vec), + ContainsAnyOfX(Vec), } } impl FilterOp { - /// The job of this function is just to provide the SQL-fragment for the value being compared against. - /// The SQL for the "comparison operator" is provided in the "match" within the `get_sql_for_application` method below. - pub fn get_sql_for_value(&self) -> Result { - Ok(match self { - FilterOp::EqualsX(val) => { - /*let temp = json_value_to_guessed_sql_value_param(&val)?; - SF::value(*temp)*/ - json_value_to_guessed_sql_value_param_fragment(&val)? - }, - FilterOp::IsWithinX(vals) => json_vals_to_sql_array_fragment(&vals)?, - FilterOp::ContainsAllOfX(vals) => json_vals_to_sql_array_fragment(&vals)?, - FilterOp::ContainsAnyOfX(vals) => json_vals_to_sql_array_fragment(&vals)?, - }) - } + /// The job of this function is just to provide the SQL-fragment for the value being compared against. + /// The SQL for the "comparison operator" is provided in the "match" within the `get_sql_for_application` method below. + pub fn get_sql_for_value(&self) -> Result { + Ok(match self { + FilterOp::EqualsX(val) => { + /*let temp = json_value_to_guessed_sql_value_param(&val)?; + SF::value(*temp)*/ + json_value_to_guessed_sql_value_param_fragment(&val)? + }, + FilterOp::IsWithinX(vals) => json_vals_to_sql_array_fragment(&vals)?, + FilterOp::ContainsAllOfX(vals) => json_vals_to_sql_array_fragment(&vals)?, + FilterOp::ContainsAnyOfX(vals) => json_vals_to_sql_array_fragment(&vals)?, + }) + } - pub fn get_sql_for_application(&self, ref_to_val_in_db: SQLFragment, ref_to_val_in_filter_op: SQLFragment) -> SQLFragment { + #[rustfmt::skip] + pub fn get_sql_for_application(&self, ref_to_val_in_db: SQLFragment, ref_to_val_in_filter_op: SQLFragment) -> SQLFragment { let bracket_plus_val_in_db = SF::merge(vec![ SF::lit("("), ref_to_val_in_db, @@ -215,108 +226,111 @@ impl FilterOp { } pub fn entry_matches_filter(entry: &RowData, filter: &QueryFilter) -> Result { - for (field_name, field_filter) in filter.field_filters.iter() { - // consider "field doesn't exist" to be the same as "field exists, and is set to null" (since that's how the filter-system is meant to work) - let field_value = entry.get(field_name).or(Some(&serde_json::Value::Null)).unwrap(); - - for op in &field_filter.filter_ops { - match op { - FilterOp::EqualsX(val) => { - if field_value != val { - return Ok(false); - } - }, - // see: https://www.postgresql.org/docs/current/functions-comparisons.html - FilterOp::IsWithinX(vals) => { - if !vals.contains(field_value) { - return Ok(false); - } - }, - // atm, we are assuming the caller is using "contains" in the array sense (ie. array contains all the items specified): https://www.postgresql.org/docs/current/functions-array.html - // but support for other versions (eg. "contains json-subtree") may be added (as new filter-op-types) in the future - FilterOp::ContainsAllOfX(vals) => { - for val in vals { - if !field_value.as_array().with_context(|| "Field value was not an array!")?.contains(&val) { - return Ok(false); - } - } - }, - FilterOp::ContainsAnyOfX(vals) => { - for val in vals { - if field_value.as_array().with_context(|| "Field value was not an array!")?.contains(&val) { - return Ok(true); - } - } - return Ok(false); - }, - } - } - } - Ok(true) -} + for (field_name, field_filter) in filter.field_filters.iter() { + // consider "field doesn't exist" to be the same as "field exists, and is set to null" (since that's how the filter-system is meant to work) + let field_value = entry.get(field_name).or(Some(&serde_json::Value::Null)).unwrap(); + for op in &field_filter.filter_ops { + match op { + FilterOp::EqualsX(val) => { + if field_value != val { + return Ok(false); + } + }, + // see: https://www.postgresql.org/docs/current/functions-comparisons.html + FilterOp::IsWithinX(vals) => { + if !vals.contains(field_value) { + return Ok(false); + } + }, + // atm, we are assuming the caller is using "contains" in the array sense (ie. array contains all the items specified): https://www.postgresql.org/docs/current/functions-array.html + // but support for other versions (eg. "contains json-subtree") may be added (as new filter-op-types) in the future + FilterOp::ContainsAllOfX(vals) => { + for val in vals { + if !field_value.as_array().with_context(|| "Field value was not an array!")?.contains(&val) { + return Ok(false); + } + } + }, + FilterOp::ContainsAnyOfX(vals) => { + for val in vals { + if field_value.as_array().with_context(|| "Field value was not an array!")?.contains(&val) { + return Ok(true); + } + } + return Ok(false); + }, + } + } + } + Ok(true) +} /// This function tries to convert an anonymous json-value into a type with ToSql implemented, for use as a sql-param. /// It's a bit inelegant here, in that we assume we want json-value scalars to map to pg-type scalars, and non-scalars to pg-type "jsonb", when that's not necessarily the case. /// That said, it's sufficient for our purposes, since we only use this for live-query "filters", where these simple rules work fine. //pub fn json_value_to_guessed_sql_value_param(json_val: &JSONValue) -> Result { pub fn json_value_to_guessed_sql_value_param_fragment(json_val: &JSONValue) -> Result { - match json_val { - JSONValue::Null => Ok(SF::value(Box::new(Option::::None))), - JSONValue::Bool(val) => Ok(SF::value(Box::new(*val))), - JSONValue::Number(val) => { - if let Some(val_i64) = val.as_i64() { - return Ok(SF::value(Box::new(val_i64))) - /*let val_i32 = i32::try_from(val_i64)?; - return Ok(SQLParam::Value_Int(val_i32));*/ - } - if let Some(val_f64) = val.as_f64() { - return Ok(SF::value(Box::new(val_f64))); - } - Err(anyhow!("Invalid \"number\":{}", val)) - }, - JSONValue::String(val) => Ok(SF::value(Box::new(val.to_owned()))), - JSONValue::Array(data) => { - //if data.iter().all(|a| a.is_string()) { return Ok(SF::value(Box::new(json_val.clone()))) } - let bool_vals = data.iter().filter(|a| a.is_boolean()).map(|a| a.as_bool().unwrap()).collect_vec(); - let i64_vals = data.iter().filter(|a| a.is_number() && a.as_i64().is_some()).map(|a| a.as_i64().unwrap()).collect_vec(); - let f64_vals = data.iter().filter(|a| a.is_number() && a.as_f64().is_some()).map(|a| a.as_f64().unwrap()).collect_vec(); - let string_vals = data.iter().filter(|a| a.is_string()).map(|a| a.as_str().unwrap().to_owned()).collect_vec(); - /*let array_vals = data.iter().filter(|a| a.is_array()).map(|a| a.as_array().unwrap().to_owned()).collect_vec(); - let object_vals = data.iter().filter(|a| a.is_object()).map(|a| a.as_object().unwrap().to_owned()).collect_vec();*/ + match json_val { + JSONValue::Null => Ok(SF::value(Box::new(Option::::None))), + JSONValue::Bool(val) => Ok(SF::value(Box::new(*val))), + JSONValue::Number(val) => { + if let Some(val_i64) = val.as_i64() { + return Ok(SF::value(Box::new(val_i64))); + /*let val_i32 = i32::try_from(val_i64)?; + return Ok(SQLParam::Value_Int(val_i32));*/ + } + if let Some(val_f64) = val.as_f64() { + return Ok(SF::value(Box::new(val_f64))); + } + Err(anyhow!("Invalid \"number\":{}", val)) + }, + JSONValue::String(val) => Ok(SF::value(Box::new(val.to_owned()))), + JSONValue::Array(data) => { + //if data.iter().all(|a| a.is_string()) { return Ok(SF::value(Box::new(json_val.clone()))) } + let bool_vals = data.iter().filter(|a| a.is_boolean()).map(|a| a.as_bool().unwrap()).collect_vec(); + let i64_vals = data.iter().filter(|a| a.is_number() && a.as_i64().is_some()).map(|a| a.as_i64().unwrap()).collect_vec(); + let f64_vals = data.iter().filter(|a| a.is_number() && a.as_f64().is_some()).map(|a| a.as_f64().unwrap()).collect_vec(); + let string_vals = data.iter().filter(|a| a.is_string()).map(|a| a.as_str().unwrap().to_owned()).collect_vec(); + /*let array_vals = data.iter().filter(|a| a.is_array()).map(|a| a.as_array().unwrap().to_owned()).collect_vec(); + let object_vals = data.iter().filter(|a| a.is_object()).map(|a| a.as_object().unwrap().to_owned()).collect_vec();*/ - let val_list_lengths = vec![bool_vals.len(), i64_vals.len(), f64_vals.len(), string_vals.len()]; - let most_matches_for_list = val_list_lengths.into_iter().max().unwrap(); - if most_matches_for_list > 0 { - if bool_vals.len() == most_matches_for_list { return Ok(SF::value(Box::new(bool_vals))) } - if i64_vals.len() == most_matches_for_list { return Ok(SF::value(Box::new(i64_vals))) } - if f64_vals.len() == most_matches_for_list { return Ok(SF::value(Box::new(f64_vals))) } - if string_vals.len() == most_matches_for_list { return Ok(SF::value(Box::new(string_vals))) } - /*if array_vals.len() == most_matches_for_list { return Ok(SF::value(Box::new(array_vals))) } - if object_vals.len() == most_matches_for_list { return Ok(SF::value(Box::new(object_vals))) }*/ - } + let val_list_lengths = vec![bool_vals.len(), i64_vals.len(), f64_vals.len(), string_vals.len()]; + let most_matches_for_list = val_list_lengths.into_iter().max().unwrap(); + if most_matches_for_list > 0 { + if bool_vals.len() == most_matches_for_list { + return Ok(SF::value(Box::new(bool_vals))); + } + if i64_vals.len() == most_matches_for_list { + return Ok(SF::value(Box::new(i64_vals))); + } + if f64_vals.len() == most_matches_for_list { + return Ok(SF::value(Box::new(f64_vals))); + } + if string_vals.len() == most_matches_for_list { + return Ok(SF::value(Box::new(string_vals))); + } + /*if array_vals.len() == most_matches_for_list { return Ok(SF::value(Box::new(array_vals))) } + if object_vals.len() == most_matches_for_list { return Ok(SF::value(Box::new(object_vals))) }*/ + } - // fallback to jsonb - Ok(SF::value(Box::new(json_val.clone()))) - // todo: make sure this is correct - }, - JSONValue::Object(_data) => { - Ok(SF::value(Box::new(json_val.clone()))) - // todo: make sure this is correct - }, - /*_ => { - //SQLParam::Value(op_val.to_string().replace('\"', "'").replace('[', "(").replace(']', ")")) - bail!("Conversion from this type of json-value ({json_val:?}) to a SQLParam is not yet implemented. Instead, provide one of: Null, Bool, Number, String, Array, Object"); - },*/ - } + // fallback to jsonb + Ok(SF::value(Box::new(json_val.clone()))) + // todo: make sure this is correct + }, + JSONValue::Object(_data) => { + Ok(SF::value(Box::new(json_val.clone()))) + // todo: make sure this is correct + }, /*_ => { + //SQLParam::Value(op_val.to_string().replace('\"', "'").replace('[', "(").replace(']', ")")) + bail!("Conversion from this type of json-value ({json_val:?}) to a SQLParam is not yet implemented. Instead, provide one of: Null, Bool, Number, String, Array, Object"); + },*/ + } } pub fn json_vals_to_sql_array_fragment(json_vals: &Vec) -> Result { - Ok(SF::merge(chain!( - SF::lit("array[").once(), - json_vals_to_fragments(json_vals)?, - SF::lit("]").once(), - ).collect_vec())) + Ok(SF::merge(chain!(SF::lit("array[").once(), json_vals_to_fragments(json_vals)?, SF::lit("]").once(),).collect_vec())) } +#[rustfmt::skip] pub fn json_vals_to_fragments(json_vals: &Vec) -> Result, Error> { json_vals.iter().enumerate().map(|(i, val)| -> Result { Ok(SQLFragment::merge(chain!( @@ -328,4 +342,4 @@ pub fn json_vals_to_fragments(json_vals: &Vec) -> Result>() -} \ No newline at end of file +} diff --git a/Packages/app-server/src/utils/db/generic_handlers/queries.rs b/Packages/app-server/src/utils/db/generic_handlers/queries.rs index 65cf893e3..8deb33375 100644 --- a/Packages/app-server/src/utils/db/generic_handlers/queries.rs +++ b/Packages/app-server/src/utils/db/generic_handlers/queries.rs @@ -1,39 +1,75 @@ -use std::{any::TypeId, pin::Pin, task::{Poll, Waker}, time::{Duration, Instant, SystemTime, UNIX_EPOCH}, cell::RefCell}; -use rust_shared::{anyhow::{bail, Context, Error, anyhow}, async_graphql, serde_json, tokio, utils::{type_aliases::JSONValue, general_::extensions::ToOwnedV, auth::jwt_utils_base::UserJWTData}, new_mtx, flume, to_sub_err, domains::DomainsConstants, GQLError}; -use rust_shared::async_graphql::{Result, async_stream::{stream, self}, OutputType, Object, Positioned, parser::types::Field}; -use deadpool_postgres::{Transaction, Pool}; -use rust_shared::flume::{Sender, Receiver}; -use futures_util::{Stream, StreamExt, Future, stream, TryFutureExt, TryStreamExt}; -use rust_shared::SubError; -use rust_shared::serde::{Serialize, Deserialize, de::DeserializeOwned}; +use deadpool_postgres::{Pool, Transaction}; +use futures_util::{stream, Future, Stream, StreamExt, TryFutureExt, TryStreamExt}; +use metrics::{counter, histogram}; +use rust_shared::async_graphql::{ + async_stream::{self, stream}, + parser::types::Field, + Object, OutputType, Positioned, Result, +}; +use rust_shared::flume::{Receiver, Sender}; +use rust_shared::serde::{de::DeserializeOwned, Deserialize, Serialize}; use rust_shared::serde_json::{json, Map}; -use rust_shared::tokio_postgres::{Client, Row, types::ToSql}; +use rust_shared::tokio_postgres::{types::ToSql, Client, Row}; use rust_shared::uuid::Uuid; -use metrics::{counter, histogram}; +use rust_shared::SubError; +use rust_shared::{ + anyhow::{anyhow, bail, Context, Error}, + async_graphql, + domains::DomainsConstants, + flume, new_mtx, serde_json, to_sub_err, tokio, + utils::{auth::jwt_utils_base::UserJWTData, general_::extensions::ToOwnedV, type_aliases::JSONValue}, + GQLError, +}; +use std::{ + any::TypeId, + cell::RefCell, + pin::Pin, + task::{Poll, Waker}, + time::{Duration, Instant, SystemTime, UNIX_EPOCH}, +}; use tracing::error; -use crate::{store::{live_queries::{LQStorageArc, LQStorage, DropLQWatcherMsg}, live_queries_::lq_key::LQKey, storage::{AppStateArc, get_app_state_from_gql_ctx}}, utils::{type_aliases::{PGClientObject}, db::{rls::rls_applier::{RLSApplier}, sql_fragment::SQLFragment, queries::get_entries_in_collection_base, accessors::{AccessorContext}}, general::data_anchor::DataAnchorFor1}, db::{general::sign_in_::jwt_utils::try_get_user_jwt_data_from_gql_ctx, commands::_command::ToSqlWrapper}}; -use super::super::{filter::{QueryFilter, FilterInput}, rls::{rls_applier::{self}, rls_policies::UsesRLS}}; +use super::super::{ + filter::{FilterInput, QueryFilter}, + rls::{ + rls_applier::{self}, + rls_policies::UsesRLS, + }, +}; +use crate::{ + db::{commands::_command::ToSqlWrapper, general::sign_in_::jwt_utils::try_get_user_jwt_data_from_gql_ctx}, + store::{ + live_queries::{DropLQWatcherMsg, LQStorage, LQStorageArc}, + live_queries_::lq_key::LQKey, + storage::{get_app_state_from_gql_ctx, AppStateArc}, + }, + utils::{ + db::{accessors::AccessorContext, queries::get_entries_in_collection_base, rls::rls_applier::RLSApplier, sql_fragment::SQLFragment}, + general::data_anchor::DataAnchorFor1, + type_aliases::PGClientObject, + }, +}; //T: 'static + UsesRLS + From + Serialize + DeserializeOwned + Send + Clone, //GQLSetVariant: 'static + GQLSet + Send + Clone + Sync, pub async fn get_db_entry_base<'a, T: From + Serialize>(ctx: &AccessorContext<'a>, table_name: &str, filter: &Option) -> Result, Error> { - let entries = get_db_entries_base(ctx, table_name, filter).await?; - let entry = entries.into_iter().nth(0); - Ok(entry) + let entries = get_db_entries_base(ctx, table_name, filter).await?; + let entry = entries.into_iter().nth(0); + Ok(entry) } pub async fn get_db_entries_base<'a, T: From + Serialize>(ctx: &AccessorContext<'a>, table_name: &str, filter: &Option) -> Result, Error> { - let query_func = |mut sql: SQLFragment| async move { - let (sql_text, params) = sql.into_query_args()?; - let debug_info_str = format!("@sqlText:{}\n@params:{:?}", &sql_text, ¶ms); + let query_func = |mut sql: SQLFragment| async move { + let (sql_text, params) = sql.into_query_args()?; + let debug_info_str = format!("@sqlText:{}\n@params:{:?}", &sql_text, ¶ms); - let params_wrapped: Vec = params.into_iter().map(|a| ToSqlWrapper { data: a }).collect(); - let params_as_refs: Vec<&(dyn ToSql + Sync)> = params_wrapped.iter().map(|x| x as &(dyn ToSql + Sync)).collect(); + let params_wrapped: Vec = params.into_iter().map(|a| ToSqlWrapper { data: a }).collect(); + let params_as_refs: Vec<&(dyn ToSql + Sync)> = params_wrapped.iter().map(|x| x as &(dyn ToSql + Sync)).collect(); - // query_raw supposedly allows dynamically-constructed params-vecs, but the only way I've been able to get it working is by locking the vector to a single concrete type - // see here: https://github.com/sfackler/rust-postgres/issues/445#issuecomment-1086774095 - //let params: Vec = params.into_iter().map(|a| a.as_ref().to_string()).collect(); + // query_raw supposedly allows dynamically-constructed params-vecs, but the only way I've been able to get it working is by locking the vector to a single concrete type + // see here: https://github.com/sfackler/rust-postgres/issues/445#issuecomment-1086774095 + //let params: Vec = params.into_iter().map(|a| a.as_ref().to_string()).collect(); + #[rustfmt::skip] ctx.tx.query_raw(&sql_text, params_as_refs).await .map_err(|err| { anyhow!("Got error while running query, for getting db-entries. @error:{}\n{}", err.to_string(), &debug_info_str) @@ -41,24 +77,25 @@ pub async fn get_db_entries_base<'a, T: From + Serialize>(ctx: &AccessorCon .try_collect().await.map_err(|err| { anyhow!("Got error while collecting results of db-query, for getting db-entries. @error:{}\n{}", err.to_string(), &debug_info_str) }) - }; + }; - let filter = QueryFilter::from_filter_input_opt(filter)?; - let (_entries, entries_as_type) = get_entries_in_collection_base(query_func, table_name.to_owned(), &filter, None).await?; // pass no mtx, because we don't care about optimizing the "subtree" endpoint atm - Ok(entries_as_type) + let filter = QueryFilter::from_filter_input_opt(filter)?; + let (_entries, entries_as_type) = get_entries_in_collection_base(query_func, table_name.to_owned(), &filter, None).await?; // pass no mtx, because we don't care about optimizing the "subtree" endpoint atm + Ok(entries_as_type) } pub async fn handle_generic_gql_collection_query + Serialize>(gql_ctx: &async_graphql::Context<'_>, table_name: &str, filter: Option) -> Result, GQLError> { - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; - Ok(get_db_entries_base(&ctx, table_name, &filter).await?) + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; + Ok(get_db_entries_base(&ctx, table_name, &filter).await?) } pub async fn handle_generic_gql_doc_query + Serialize>(gql_ctx: &async_graphql::Context<'_>, table_name: &str, id: String) -> Result, GQLError> { - let mut anchor = DataAnchorFor1::empty(); // holds pg-client - let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; + let mut anchor = DataAnchorFor1::empty(); // holds pg-client + let ctx = AccessorContext::new_read(&mut anchor, gql_ctx, false).await?; + #[rustfmt::skip] Ok(get_db_entry_base(&ctx, table_name, &Some(json!({ "id": {"equalTo": id} }))).await?) } -//macro_rules! standard_table_endpoints { ... } \ No newline at end of file +//macro_rules! standard_table_endpoints { ... } diff --git a/Packages/app-server/src/utils/db/generic_handlers/subscriptions.rs b/Packages/app-server/src/utils/db/generic_handlers/subscriptions.rs index f29fa606c..249ea51c3 100644 --- a/Packages/app-server/src/utils/db/generic_handlers/subscriptions.rs +++ b/Packages/app-server/src/utils/db/generic_handlers/subscriptions.rs @@ -1,19 +1,49 @@ -use std::{any::TypeId, pin::Pin, task::{Poll, Waker}, time::{Duration, Instant, SystemTime, UNIX_EPOCH}, cell::RefCell}; -use rust_shared::{anyhow::{bail, Context, Error}, async_graphql, serde_json, tokio, utils::{type_aliases::JSONValue, general_::extensions::ToOwnedV, auth::jwt_utils_base::UserJWTData}, new_mtx, flume, to_sub_err, domains::DomainsConstants}; -use rust_shared::async_graphql::{Result, async_stream::{stream, self}, OutputType, Object, Positioned, parser::types::Field}; use deadpool_postgres::Pool; -use rust_shared::flume::{Sender, Receiver}; -use futures_util::{Stream, StreamExt, Future, stream, TryFutureExt}; -use rust_shared::SubError; -use rust_shared::serde::{Serialize, Deserialize, de::DeserializeOwned}; +use futures_util::{stream, Future, Stream, StreamExt, TryFutureExt}; +use metrics::{counter, histogram}; +use rust_shared::async_graphql::{ + async_stream::{self, stream}, + parser::types::Field, + Object, OutputType, Positioned, Result, +}; +use rust_shared::flume::{Receiver, Sender}; +use rust_shared::serde::{de::DeserializeOwned, Deserialize, Serialize}; use rust_shared::serde_json::{json, Map}; -use rust_shared::tokio_postgres::{Client, Row, types::ToSql}; +use rust_shared::tokio_postgres::{types::ToSql, Client, Row}; use rust_shared::uuid::Uuid; -use metrics::{counter, histogram}; +use rust_shared::SubError; +use rust_shared::{ + anyhow::{bail, Context, Error}, + async_graphql, + domains::DomainsConstants, + flume, new_mtx, serde_json, to_sub_err, tokio, + utils::{auth::jwt_utils_base::UserJWTData, general_::extensions::ToOwnedV, type_aliases::JSONValue}, +}; +use std::{ + any::TypeId, + cell::RefCell, + pin::Pin, + task::{Poll, Waker}, + time::{Duration, Instant, SystemTime, UNIX_EPOCH}, +}; use tracing::error; -use crate::{store::{live_queries::{LQStorageArc, LQStorage, DropLQWatcherMsg}, live_queries_::lq_key::LQKey, storage::{AppStateArc, get_app_state_from_gql_ctx}}, utils::{type_aliases::{PGClientObject}, db::rls::rls_applier::{RLSApplier}}, db::{general::sign_in_::jwt_utils::try_get_user_jwt_data_from_gql_ctx}}; -use super::super::{filter::{QueryFilter, FilterInput}, rls::{rls_applier::{self}, rls_policies::UsesRLS}}; +use super::super::{ + filter::{FilterInput, QueryFilter}, + rls::{ + rls_applier::{self}, + rls_policies::UsesRLS, + }, +}; +use crate::{ + db::general::sign_in_::jwt_utils::try_get_user_jwt_data_from_gql_ctx, + store::{ + live_queries::{DropLQWatcherMsg, LQStorage, LQStorageArc}, + live_queries_::lq_key::LQKey, + storage::{get_app_state_from_gql_ctx, AppStateArc}, + }, + utils::{db::rls::rls_applier::RLSApplier, type_aliases::PGClientObject}, +}; // helpers // ========== @@ -23,226 +53,224 @@ use super::super::{filter::{QueryFilter, FilterInput}, rls::{rls_applier::{self} //#[async_trait] pub trait GQLSet { - fn from(entries: Vec) -> Self; - //async fn nodes(&self) -> &Vec; - fn nodes(&self) -> &Vec; + fn from(entries: Vec) -> Self; + //async fn nodes(&self) -> &Vec; + fn nodes(&self) -> &Vec; } pub fn json_values_to_typed_entries + Serialize + DeserializeOwned>(json_entries: Vec) -> Result, Error> { - let mut result = Vec::new(); - for entry in json_entries { - let entry = serde_json::from_value(entry.clone()).with_context(|| format!("JSON structure failed to deserialize:{}", entry.to_string()))?; - result.push(entry); - } - Ok(result) + let mut result = Vec::new(); + for entry in json_entries { + let entry = serde_json::from_value(entry.clone()).with_context(|| format!("JSON structure failed to deserialize:{}", entry.to_string()))?; + result.push(entry); + } + Ok(result) } pub fn json_maps_to_typed_entries + Serialize + DeserializeOwned>(json_entries: Vec>) -> Result, Error> { - let mut result = Vec::new(); - for entry in json_entries { - let entry = serde_json::Value::Object(entry); - let entry = serde_json::from_value(entry.clone()).with_context(|| format!("JSON structure failed to deserialize:{}", entry.to_string()))?; - result.push(entry); - } - Ok(result) + let mut result = Vec::new(); + for entry in json_entries { + let entry = serde_json::Value::Object(entry); + let entry = serde_json::from_value(entry.clone()).with_context(|| format!("JSON structure failed to deserialize:{}", entry.to_string()))?; + result.push(entry); + } + Ok(result) } - - pub struct Stream_WithDropListener<'a, T> { - inner_stream: Pin + 'a + Send>>, - table_name: String, - filter: QueryFilter, - stream_id: Uuid, - sender_for_lq_watcher_drops: Sender, + inner_stream: Pin + 'a + Send>>, + table_name: String, + filter: QueryFilter, + stream_id: Uuid, + sender_for_lq_watcher_drops: Sender, } impl<'a, T> Stream_WithDropListener<'a, T> { - pub fn new(inner_stream_new: impl Stream + 'a + Send, table_name: &str, filter: QueryFilter, stream_id: Uuid, sender_for_lq_watcher_drops: Sender) -> Self { - Self { - inner_stream: Box::pin(inner_stream_new), - table_name: table_name.to_owned(), - filter, - stream_id, - sender_for_lq_watcher_drops, - } - } + pub fn new(inner_stream_new: impl Stream + 'a + Send, table_name: &str, filter: QueryFilter, stream_id: Uuid, sender_for_lq_watcher_drops: Sender) -> Self { + Self { inner_stream: Box::pin(inner_stream_new), table_name: table_name.to_owned(), filter, stream_id, sender_for_lq_watcher_drops } + } } impl<'a, T> Drop for Stream_WithDropListener<'a, T> { - fn drop(&mut self) { - //println!("Stream_WithDropListener got dropped. @address:{:p} @table:{} @filter:{:?}", self, self.table_name, self.filter); - - // the receivers of the channel below may all be dropped, causing the `send()` to return a SendError; ignore this, since it is expected (for the streams returned by `stream_for_error`) - #[allow(unused_must_use)] - { - self.sender_for_lq_watcher_drops.send(DropLQWatcherMsg::Drop_ByCollectionAndFilterAndStreamID(self.table_name.clone(), self.filter.clone(), self.stream_id)); - } - } + fn drop(&mut self) { + //println!("Stream_WithDropListener got dropped. @address:{:p} @table:{} @filter:{:?}", self, self.table_name, self.filter); + + // the receivers of the channel below may all be dropped, causing the `send()` to return a SendError; ignore this, since it is expected (for the streams returned by `stream_for_error`) + #[allow(unused_must_use)] + { + self.sender_for_lq_watcher_drops.send(DropLQWatcherMsg::Drop_ByCollectionAndFilterAndStreamID(self.table_name.clone(), self.filter.clone(), self.stream_id)); + } + } } impl<'a, T> Stream for Stream_WithDropListener<'a, T> { - type Item = T; - fn poll_next(mut self: Pin<&mut Self>, c: &mut std::task::Context<'_>) -> Poll::Item>> { - self.inner_stream.as_mut().poll_next(c) - } + type Item = T; + fn poll_next(mut self: Pin<&mut Self>, c: &mut std::task::Context<'_>) -> Poll::Item>> { + self.inner_stream.as_mut().poll_next(c) + } } // generic handlers // ========== -pub async fn handle_generic_gql_doc_subscription<'a, - T: 'static + UsesRLS + From + Serialize + DeserializeOwned + Send + Sync + Clone ->(ctx: &'a async_graphql::Context<'a>, table_name: &'a str, id: String) -> impl Stream, SubError>> + 'a { - //let ctx: &'static async_graphql::Context<'_> = Box::leak(Box::new(ctx)); - let app_state = get_app_state_from_gql_ctx(ctx).clone(); - let jwt_data = try_get_user_jwt_data_from_gql_ctx(ctx).await.unwrap_or_else(|_| None); - let lq_storage = app_state.live_queries.clone(); - let table_name = table_name.to_owned(); - handle_generic_gql_doc_subscription_base(lq_storage, jwt_data, table_name, id).await +pub async fn handle_generic_gql_doc_subscription<'a, T: 'static + UsesRLS + From + Serialize + DeserializeOwned + Send + Sync + Clone>(ctx: &'a async_graphql::Context<'a>, table_name: &'a str, id: String) -> impl Stream, SubError>> + 'a { + //let ctx: &'static async_graphql::Context<'_> = Box::leak(Box::new(ctx)); + let app_state = get_app_state_from_gql_ctx(ctx).clone(); + let jwt_data = try_get_user_jwt_data_from_gql_ctx(ctx).await.unwrap_or_else(|_| None); + let lq_storage = app_state.live_queries.clone(); + let table_name = table_name.to_owned(); + handle_generic_gql_doc_subscription_base(lq_storage, jwt_data, table_name, id).await +} +pub async fn handle_generic_gql_doc_subscription_base<'a, T: 'static + UsesRLS + From + Serialize + DeserializeOwned + Send + Sync + Clone>( + lq_storage: LQStorageArc, jwt_data: Option, table_name: String, id: String, +) -> impl Stream, SubError>> + 'a { + let result = tokio::spawn(async move { + let table_name = &table_name; + + new_mtx!(mtx, "1", None, Some(format!("@table_name:{table_name} @id:{id}"))); + let stream_for_error = |err: Error| { + //return stream::once(async { Err(err) }); + let base_stream = async_stream::stream! { + //yield Err(SubError::new(err.to_string())); + yield Err(to_sub_err(err)); + }; + let (s1, _r1): (Sender, Receiver) = flume::unbounded(); + Stream_WithDropListener::<'static, Result, SubError>>::new(base_stream, table_name, QueryFilter::empty(), Uuid::new_v4(), s1) + }; + + mtx.section("2"); + //tokio::time::sleep(std::time::Duration::from_millis(123456789)).await; // temp + let filter_json = json!({"id": {"equalTo": id}}); + let filter = match QueryFilter::from_filter_input(&filter_json) { + Ok(a) => a, + Err(err) => return stream_for_error(err), + }; + //let filter = QueryFilter::from_filter_input_opt(&Some(filter_json)).unwrap(); + let (entry_as_type, stream_id, sender_for_dropping_lq_watcher, lq_entry_receiver_clone) = { + let lq_key = LQKey::new(table_name.o(), filter.o()); + /*let mut stream = GQLResultStream::new(storage_wrapper.clone(), table_name, filter.clone(), GQLSetVariant::from(entries)); + let stream_id = stream.id.clone();*/ + let stream_id = Uuid::new_v4(); + //let (mut entries_as_type, watcher) = storage.start_lq_watcher::(table_name, &filter, stream_id, ctx, Some(&mtx)).await; + let (mut entries_as_type, watcher) = match lq_storage.start_lq_watcher::(&lq_key, stream_id, Some(&mtx)).await { + Ok(a) => a, + Err(err) => { + // an error in start_lq_watcher is likely from failed data-deserialization; since user may not have permission to access all entries (since this is before filtering), only return a generic error in production + if DomainsConstants::new().on_server_and_prod { + error!("{:?}", err); + return stream_for_error(Error::msg("Failed to start live query watcher. The full error has been logged to the app-server.")); + } + return stream_for_error(err); + }, + }; + let entry_as_type = entries_as_type.pop(); + + (entry_as_type, stream_id, lq_storage.channel_for_lq_watcher_drops__sender_base.clone(), watcher.new_entries_channel_receiver.clone()) + }; + + mtx.section("3"); + //let filter_clone = filter.clone(); + let base_stream = async_stream::stream! { + let mut rls_applier = RLSApplier::new(jwt_data); + if let (next_result, _changed) = rls_applier.filter_next_result_for_doc(entry_as_type) { + yield Ok(next_result); + } + + loop { + let next_entries = match lq_entry_receiver_clone.recv_async().await { + Ok(a) => a, + Err(_) => break, // if unwrap fails, break loop (since senders are dead anyway) + }; + let mut next_entries_as_type: Vec = json_maps_to_typed_entries(next_entries).map_err(to_sub_err)?; + let next_entry_as_type: Option = next_entries_as_type.pop(); + + // only yield next-result if it has changed after filtering (otherwise seeing an "unchanged update" leaks knowledge that a hidden, matching entry was changed) + if let (next_result, changed) = rls_applier.filter_next_result_for_doc(next_entry_as_type) && changed { + yield Ok(next_result); + } + } + }; + Stream_WithDropListener::new(base_stream, table_name, filter, stream_id, sender_for_dropping_lq_watcher) + //base_stream + }) + .await + .unwrap(); + result } -pub async fn handle_generic_gql_doc_subscription_base<'a, - T: 'static + UsesRLS + From + Serialize + DeserializeOwned + Send + Sync + Clone ->(lq_storage: LQStorageArc, jwt_data: Option, table_name: String, id: String) -> impl Stream, SubError>> + 'a { - let result = tokio::spawn(async move { - let table_name = &table_name; - - new_mtx!(mtx, "1", None, Some(format!("@table_name:{table_name} @id:{id}"))); - let stream_for_error = |err: Error| { - //return stream::once(async { Err(err) }); - let base_stream = async_stream::stream! { - //yield Err(SubError::new(err.to_string())); - yield Err(to_sub_err(err)); - }; - let (s1, _r1): (Sender, Receiver) = flume::unbounded(); - Stream_WithDropListener::<'static, Result, SubError>>::new(base_stream, table_name, QueryFilter::empty(), Uuid::new_v4(), s1) - }; - - mtx.section("2"); - //tokio::time::sleep(std::time::Duration::from_millis(123456789)).await; // temp - let filter_json = json!({"id": {"equalTo": id}}); - let filter = match QueryFilter::from_filter_input(&filter_json) { Ok(a) => a, Err(err) => return stream_for_error(err) }; - //let filter = QueryFilter::from_filter_input_opt(&Some(filter_json)).unwrap(); - let (entry_as_type, stream_id, sender_for_dropping_lq_watcher, lq_entry_receiver_clone) = { - let lq_key = LQKey::new(table_name.o(), filter.o()); - /*let mut stream = GQLResultStream::new(storage_wrapper.clone(), table_name, filter.clone(), GQLSetVariant::from(entries)); - let stream_id = stream.id.clone();*/ - let stream_id = Uuid::new_v4(); - //let (mut entries_as_type, watcher) = storage.start_lq_watcher::(table_name, &filter, stream_id, ctx, Some(&mtx)).await; - let (mut entries_as_type, watcher) = match lq_storage.start_lq_watcher::(&lq_key, stream_id, Some(&mtx)).await { - Ok(a) => a, - Err(err) => { - // an error in start_lq_watcher is likely from failed data-deserialization; since user may not have permission to access all entries (since this is before filtering), only return a generic error in production - if DomainsConstants::new().on_server_and_prod { - error!("{:?}", err); - return stream_for_error(Error::msg("Failed to start live query watcher. The full error has been logged to the app-server.")); - } - return stream_for_error(err); - }, - }; - let entry_as_type = entries_as_type.pop(); - - (entry_as_type, stream_id, lq_storage.channel_for_lq_watcher_drops__sender_base.clone(), watcher.new_entries_channel_receiver.clone()) - }; - - mtx.section("3"); - //let filter_clone = filter.clone(); - let base_stream = async_stream::stream! { - let mut rls_applier = RLSApplier::new(jwt_data); - if let (next_result, _changed) = rls_applier.filter_next_result_for_doc(entry_as_type) { - yield Ok(next_result); - } - - loop { - let next_entries = match lq_entry_receiver_clone.recv_async().await { - Ok(a) => a, - Err(_) => break, // if unwrap fails, break loop (since senders are dead anyway) - }; - let mut next_entries_as_type: Vec = json_maps_to_typed_entries(next_entries).map_err(to_sub_err)?; - let next_entry_as_type: Option = next_entries_as_type.pop(); - - // only yield next-result if it has changed after filtering (otherwise seeing an "unchanged update" leaks knowledge that a hidden, matching entry was changed) - if let (next_result, changed) = rls_applier.filter_next_result_for_doc(next_entry_as_type) && changed { - yield Ok(next_result); - } - } - }; - Stream_WithDropListener::new(base_stream, table_name, filter, stream_id, sender_for_dropping_lq_watcher) - //base_stream - }).await.unwrap(); - result + +pub async fn handle_generic_gql_collection_subscription<'a, T: 'static + UsesRLS + From + Serialize + DeserializeOwned + Send + Clone, GQLSetVariant: 'static + GQLSet + Send + Clone + Sync>( + ctx: &'a async_graphql::Context<'a>, table_name: &'a str, filter_json: Option, +) -> impl Stream> + 'a { + let app_state = get_app_state_from_gql_ctx(ctx).clone(); + let jwt_data = try_get_user_jwt_data_from_gql_ctx(ctx).await.unwrap_or_else(|_| None); + let lq_storage = app_state.live_queries.clone(); + let table_name = table_name.to_owned(); + handle_generic_gql_collection_subscription_base(lq_storage, jwt_data, table_name, filter_json).await } +pub async fn handle_generic_gql_collection_subscription_base<'a, T: 'static + UsesRLS + From + Serialize + DeserializeOwned + Send + Clone, GQLSetVariant: 'static + GQLSet + Send + Clone + Sync>( + lq_storage: LQStorageArc, jwt_data: Option, table_name: String, filter_json: Option, +) -> impl Stream> + 'a { + let result = tokio::spawn(async move { + let table_name = &table_name; // is this actually needed? + + new_mtx!(mtx, "1", None, Some(format!("@table_name:{table_name} @filter:{filter_json:?}"))); + let stream_for_error = |err: Error| { + //return stream::once(async { Err(err) }); + let base_stream = async_stream::stream! { + //yield Err(SubError::new(err.to_string())); + yield Err(to_sub_err(err)); + }; + let (s1, _r1): (Sender, Receiver) = flume::unbounded(); + Stream_WithDropListener::new(base_stream, table_name, QueryFilter::empty(), Uuid::new_v4(), s1) + }; + + mtx.section("2"); + let filter = match QueryFilter::from_filter_input_opt(&filter_json) { + Ok(a) => a, + Err(err) => return stream_for_error(err), + }; + //let filter = QueryFilter::from_filter_input_opt(&filter_json).unwrap(); + let (entries_as_type, stream_id, sender_for_dropping_lq_watcher, lq_entry_receiver_clone) = { + let lq_key = LQKey::new(table_name.o(), filter.o()); + /*let mut stream = GQLResultStream::new(storage_wrapper.clone(), collection_name, filter.clone(), GQLSetVariant::from(entries)); + let stream_id = stream.id.clone();*/ + let stream_id = Uuid::new_v4(); + let (entries_as_type, watcher) = match lq_storage.start_lq_watcher::(&lq_key, stream_id, Some(&mtx)).await { + Ok(a) => a, + Err(err) => { + // an error in start_lq_watcher is likely from failed data-deserialization; since user may not have permission to access all entries (since this is before filtering), only return a generic error in production + if DomainsConstants::new().on_server_and_prod { + error!("{:?}", err); + return stream_for_error(Error::msg("Failed to start live query watcher. The full error has been logged on the app-server.")); + } + return stream_for_error(err); + }, + }; + + (entries_as_type, stream_id, lq_storage.channel_for_lq_watcher_drops__sender_base.clone(), watcher.new_entries_channel_receiver.clone()) + }; + + mtx.section("3"); + //let filter_clone = filter.clone(); + let base_stream = async_stream::stream! { + let mut rls_applier = RLSApplier::new(jwt_data); + if let (next_result, _changed) = rls_applier.filter_next_result_for_collection(entries_as_type) { + yield Ok(GQLSetVariant::from(next_result)); + } + + loop { + let next_entries = match lq_entry_receiver_clone.recv_async().await { + Ok(a) => a, + Err(_) => break, // if unwrap fails, break loop (since senders are dead anyway) + }; + let next_entries_as_type: Vec = json_maps_to_typed_entries(next_entries).map_err(to_sub_err)?; -pub async fn handle_generic_gql_collection_subscription<'a, - T: 'static + UsesRLS + From + Serialize + DeserializeOwned + Send + Clone, - GQLSetVariant: 'static + GQLSet + Send + Clone + Sync, ->(ctx: &'a async_graphql::Context<'a>, table_name: &'a str, filter_json: Option) -> impl Stream> + 'a { - let app_state = get_app_state_from_gql_ctx(ctx).clone(); - let jwt_data = try_get_user_jwt_data_from_gql_ctx(ctx).await.unwrap_or_else(|_| None); - let lq_storage = app_state.live_queries.clone(); - let table_name = table_name.to_owned(); - handle_generic_gql_collection_subscription_base(lq_storage, jwt_data, table_name, filter_json).await + // only yield next-result if it has changed after filtering (otherwise seeing an "unchanged update" leaks knowledge that a hidden, matching entry was changed) + if let (next_result, changed) = rls_applier.filter_next_result_for_collection(next_entries_as_type) && changed { + yield Ok(GQLSetVariant::from(next_result)); + } + } + }; + Stream_WithDropListener::new(base_stream, table_name, filter, stream_id, sender_for_dropping_lq_watcher) + //base_stream + }) + .await + .unwrap(); + result } -pub async fn handle_generic_gql_collection_subscription_base<'a, - T: 'static + UsesRLS + From + Serialize + DeserializeOwned + Send + Clone, - GQLSetVariant: 'static + GQLSet + Send + Clone + Sync, ->(lq_storage: LQStorageArc, jwt_data: Option, table_name: String, filter_json: Option) -> impl Stream> + 'a { - let result = tokio::spawn(async move { - let table_name = &table_name; // is this actually needed? - - new_mtx!(mtx, "1", None, Some(format!("@table_name:{table_name} @filter:{filter_json:?}"))); - let stream_for_error = |err: Error| { - //return stream::once(async { Err(err) }); - let base_stream = async_stream::stream! { - //yield Err(SubError::new(err.to_string())); - yield Err(to_sub_err(err)); - }; - let (s1, _r1): (Sender, Receiver) = flume::unbounded(); - Stream_WithDropListener::new(base_stream, table_name, QueryFilter::empty(), Uuid::new_v4(), s1) - }; - - mtx.section("2"); - let filter = match QueryFilter::from_filter_input_opt(&filter_json) { Ok(a) => a, Err(err) => return stream_for_error(err) }; - //let filter = QueryFilter::from_filter_input_opt(&filter_json).unwrap(); - let (entries_as_type, stream_id, sender_for_dropping_lq_watcher, lq_entry_receiver_clone) = { - let lq_key = LQKey::new(table_name.o(), filter.o()); - /*let mut stream = GQLResultStream::new(storage_wrapper.clone(), collection_name, filter.clone(), GQLSetVariant::from(entries)); - let stream_id = stream.id.clone();*/ - let stream_id = Uuid::new_v4(); - let (entries_as_type, watcher) = match lq_storage.start_lq_watcher::(&lq_key, stream_id, Some(&mtx)).await { - Ok(a) => a, - Err(err) => { - // an error in start_lq_watcher is likely from failed data-deserialization; since user may not have permission to access all entries (since this is before filtering), only return a generic error in production - if DomainsConstants::new().on_server_and_prod { - error!("{:?}", err); - return stream_for_error(Error::msg("Failed to start live query watcher. The full error has been logged on the app-server.")); - } - return stream_for_error(err); - }, - }; - - (entries_as_type, stream_id, lq_storage.channel_for_lq_watcher_drops__sender_base.clone(), watcher.new_entries_channel_receiver.clone()) - }; - - mtx.section("3"); - //let filter_clone = filter.clone(); - let base_stream = async_stream::stream! { - let mut rls_applier = RLSApplier::new(jwt_data); - if let (next_result, _changed) = rls_applier.filter_next_result_for_collection(entries_as_type) { - yield Ok(GQLSetVariant::from(next_result)); - } - - loop { - let next_entries = match lq_entry_receiver_clone.recv_async().await { - Ok(a) => a, - Err(_) => break, // if unwrap fails, break loop (since senders are dead anyway) - }; - let next_entries_as_type: Vec = json_maps_to_typed_entries(next_entries).map_err(to_sub_err)?; - - // only yield next-result if it has changed after filtering (otherwise seeing an "unchanged update" leaks knowledge that a hidden, matching entry was changed) - if let (next_result, changed) = rls_applier.filter_next_result_for_collection(next_entries_as_type) && changed { - yield Ok(GQLSetVariant::from(next_result)); - } - } - }; - Stream_WithDropListener::new(base_stream, table_name, filter, stream_id, sender_for_dropping_lq_watcher) - //base_stream - }).await.unwrap(); - result -} \ No newline at end of file diff --git a/Packages/app-server/src/utils/db/pg_row_to_json.rs b/Packages/app-server/src/utils/db/pg_row_to_json.rs index 01b484c17..1fdb3fd27 100644 --- a/Packages/app-server/src/utils/db/pg_row_to_json.rs +++ b/Packages/app-server/src/utils/db/pg_row_to_json.rs @@ -1,98 +1,100 @@ -use rust_shared::anyhow::{anyhow, bail, Context, Error, ensure}; -use rust_shared::async_graphql::{Result}; -use futures_util::{StreamExt}; +use futures_util::StreamExt; +use rust_shared::anyhow::{anyhow, bail, ensure, Context, Error}; +use rust_shared::async_graphql::Result; use rust_shared::serde::Deserialize; -use rust_shared::serde_json::{json, Map, self}; -use rust_shared::tokio_postgres::{Column, types}; -use rust_shared::tokio_postgres::types::{Type, FromSql}; -use rust_shared::tokio_postgres::{Row}; +use rust_shared::serde_json::{self, json, Map}; +use rust_shared::tokio_postgres::types::{FromSql, Type}; +use rust_shared::tokio_postgres::Row; +use rust_shared::tokio_postgres::{types, Column}; use rust_shared::utils::general_::extensions::IteratorV; use rust_shared::utils::type_aliases::{JSONValue, RowData}; pub fn postgres_row_to_struct<'a, T: for<'de> Deserialize<'de>>(row: Row) -> Result { - let as_json = postgres_row_to_json_value(row, 100)?; - Ok(serde_json::from_value(as_json)?) + let as_json = postgres_row_to_json_value(row, 100)?; + Ok(serde_json::from_value(as_json)?) } pub fn postgres_row_to_json_value(row: Row, columns_to_process: usize) -> Result { - let row_data = postgres_row_to_row_data(row, columns_to_process)?; - Ok(JSONValue::Object(row_data)) + let row_data = postgres_row_to_row_data(row, columns_to_process)?; + Ok(JSONValue::Object(row_data)) } pub fn postgres_row_to_row_data(row: Row, columns_to_process: usize) -> Result { - let mut result: Map = Map::new(); - for (i, column) in row.columns().iter().take(columns_to_process).enumerate() { - let name = column.name(); - /*let value = row.get(name); - result.insert(name.to_string(), value);*/ - let json_value = pg_cell_to_json_value(&row, column, i)?; - result.insert(name.to_string(), json_value); - } - Ok(result) + let mut result: Map = Map::new(); + for (i, column) in row.columns().iter().take(columns_to_process).enumerate() { + let name = column.name(); + /*let value = row.get(name); + result.insert(name.to_string(), value);*/ + let json_value = pg_cell_to_json_value(&row, column, i)?; + result.insert(name.to_string(), json_value); + } + Ok(result) } // keep overall structure in-sync with wal_structs.rs pub fn pg_cell_to_json_value(row: &Row, column: &Column, column_i: usize) -> Result { - let f64_to_json_number = |raw_val: f64| -> Result { - let temp = serde_json::Number::from_f64(raw_val.into()).ok_or(anyhow!("invalid json-float"))?; - Ok(JSONValue::Number(temp)) - }; - Ok(match *column.type_() { - // for rust-postgres <> postgres type-mappings: https://docs.rs/postgres/latest/postgres/types/trait.FromSql.html#types - // for postgres types: https://www.postgresql.org/docs/7.4/datatype.html#DATATYPE-TABLE + let f64_to_json_number = |raw_val: f64| -> Result { + let temp = serde_json::Number::from_f64(raw_val.into()).ok_or(anyhow!("invalid json-float"))?; + Ok(JSONValue::Number(temp)) + }; + Ok(match *column.type_() { + // for rust-postgres <> postgres type-mappings: https://docs.rs/postgres/latest/postgres/types/trait.FromSql.html#types + // for postgres types: https://www.postgresql.org/docs/7.4/datatype.html#DATATYPE-TABLE - // single types - Type::BOOL => get_basic(row, column, column_i, |a: bool| Ok(JSONValue::Bool(a)))?, - Type::INT2 => get_basic(row, column, column_i, |a: i16| Ok(JSONValue::Number(serde_json::Number::from(a))))?, - Type::INT4 => get_basic(row, column, column_i, |a: i32| Ok(JSONValue::Number(serde_json::Number::from(a))))?, - Type::INT8 => get_basic(row, column, column_i, |a: i64| Ok(JSONValue::Number(serde_json::Number::from(a))))?, - Type::TEXT | Type::VARCHAR => get_basic(row, column, column_i, |a: String| Ok(JSONValue::String(a)))?, - Type::JSON | Type::JSONB => get_basic(row, column, column_i, |a: JSONValue| Ok(a))?, - Type::FLOAT4 => get_basic(row, column, column_i, |a: f32| Ok(f64_to_json_number(a.into())?))?, - Type::FLOAT8 => get_basic(row, column, column_i, |a: f64| Ok(f64_to_json_number(a)?))?, - // these types require a custom StringCollector struct as an intermediary - Type::TS_VECTOR => get_basic(row, column, column_i, |a: StringCollector| Ok(JSONValue::String(a.0)))?, + // single types + Type::BOOL => get_basic(row, column, column_i, |a: bool| Ok(JSONValue::Bool(a)))?, + Type::INT2 => get_basic(row, column, column_i, |a: i16| Ok(JSONValue::Number(serde_json::Number::from(a))))?, + Type::INT4 => get_basic(row, column, column_i, |a: i32| Ok(JSONValue::Number(serde_json::Number::from(a))))?, + Type::INT8 => get_basic(row, column, column_i, |a: i64| Ok(JSONValue::Number(serde_json::Number::from(a))))?, + Type::TEXT | Type::VARCHAR => get_basic(row, column, column_i, |a: String| Ok(JSONValue::String(a)))?, + Type::JSON | Type::JSONB => get_basic(row, column, column_i, |a: JSONValue| Ok(a))?, + Type::FLOAT4 => get_basic(row, column, column_i, |a: f32| Ok(f64_to_json_number(a.into())?))?, + Type::FLOAT8 => get_basic(row, column, column_i, |a: f64| Ok(f64_to_json_number(a)?))?, + // these types require a custom StringCollector struct as an intermediary + Type::TS_VECTOR => get_basic(row, column, column_i, |a: StringCollector| Ok(JSONValue::String(a.0)))?, - // array types - Type::BOOL_ARRAY => get_array(row, column, column_i, |a: bool| Ok(JSONValue::Bool(a)))?, - Type::INT2_ARRAY => get_array(row, column, column_i, |a: i16| Ok(JSONValue::Number(serde_json::Number::from(a))))?, - Type::INT4_ARRAY => get_array(row, column, column_i, |a: i32| Ok(JSONValue::Number(serde_json::Number::from(a))))?, - Type::INT8_ARRAY => get_array(row, column, column_i, |a: i64| Ok(JSONValue::Number(serde_json::Number::from(a))))?, - Type::TEXT_ARRAY | Type::VARCHAR_ARRAY => get_array(row, column, column_i, |a: String| Ok(JSONValue::String(a)))?, - Type::JSON_ARRAY | Type::JSONB_ARRAY => get_array(row, column, column_i, |a: JSONValue| Ok(a))?, - Type::FLOAT4_ARRAY => get_array(row, column, column_i, |a: f32| Ok(f64_to_json_number(a.into())?))?, - Type::FLOAT8_ARRAY => get_array(row, column, column_i, |a: f64| Ok(f64_to_json_number(a)?))?, - // these types require a custom StringCollector struct as an intermediary - Type::TS_VECTOR_ARRAY => get_array(row, column, column_i, |a: StringCollector| Ok(JSONValue::String(a.0)))?, + // array types + Type::BOOL_ARRAY => get_array(row, column, column_i, |a: bool| Ok(JSONValue::Bool(a)))?, + Type::INT2_ARRAY => get_array(row, column, column_i, |a: i16| Ok(JSONValue::Number(serde_json::Number::from(a))))?, + Type::INT4_ARRAY => get_array(row, column, column_i, |a: i32| Ok(JSONValue::Number(serde_json::Number::from(a))))?, + Type::INT8_ARRAY => get_array(row, column, column_i, |a: i64| Ok(JSONValue::Number(serde_json::Number::from(a))))?, + Type::TEXT_ARRAY | Type::VARCHAR_ARRAY => get_array(row, column, column_i, |a: String| Ok(JSONValue::String(a)))?, + Type::JSON_ARRAY | Type::JSONB_ARRAY => get_array(row, column, column_i, |a: JSONValue| Ok(a))?, + Type::FLOAT4_ARRAY => get_array(row, column, column_i, |a: f32| Ok(f64_to_json_number(a.into())?))?, + Type::FLOAT8_ARRAY => get_array(row, column, column_i, |a: f64| Ok(f64_to_json_number(a)?))?, + // these types require a custom StringCollector struct as an intermediary + Type::TS_VECTOR_ARRAY => get_array(row, column, column_i, |a: StringCollector| Ok(JSONValue::String(a.0)))?, - _ => bail!("Cannot convert pg-cell \"{}\" of type \"{}\" to a JSONValue.", column.name(), column.type_().name()), - }) + _ => bail!("Cannot convert pg-cell \"{}\" of type \"{}\" to a JSONValue.", column.name(), column.type_().name()), + }) } fn get_basic<'a, T: FromSql<'a>>(row: &'a Row, column: &Column, column_i: usize, val_to_json_val: impl Fn(T) -> Result) -> Result { - let raw_val = row.try_get::<_, Option>(column_i).with_context(|| format!("column_name:{}", column.name()))?; - raw_val.map_or(Ok(JSONValue::Null), val_to_json_val) + let raw_val = row.try_get::<_, Option>(column_i).with_context(|| format!("column_name:{}", column.name()))?; + raw_val.map_or(Ok(JSONValue::Null), val_to_json_val) } fn get_array<'a, T: FromSql<'a>>(row: &'a Row, column: &Column, column_i: usize, val_to_json_val: impl Fn(T) -> Result) -> Result { - let raw_val_array = row.try_get::<_, Option>>(column_i).with_context(|| format!("column_name:{}", column.name()))?; - Ok(match raw_val_array { - /*Some(val_array) => { - let mut result = vec![]; - for val in val_array { - result.push(val_to_json_val(val)?); - } - JSONValue::Array(result) - },*/ - Some(val_array) => JSONValue::Array(val_array.into_iter().map(|a| val_to_json_val(a)).try_collect2::>()?), - None => JSONValue::Null, - }) + let raw_val_array = row.try_get::<_, Option>>(column_i).with_context(|| format!("column_name:{}", column.name()))?; + Ok(match raw_val_array { + /*Some(val_array) => { + let mut result = vec![]; + for val in val_array { + result.push(val_to_json_val(val)?); + } + JSONValue::Array(result) + },*/ + Some(val_array) => JSONValue::Array(val_array.into_iter().map(|a| val_to_json_val(a)).try_collect2::>()?), + None => JSONValue::Null, + }) } pub struct StringCollector(pub String); impl FromSql<'_> for StringCollector { - fn from_sql(_: &Type, raw: &[u8]) -> Result> { - /*let result = std::str::from_utf8_lossy(raw); - Ok(StringCollector(result.to_owned()))*/ - let result = String::from_utf8_lossy(raw); - Ok(StringCollector(result.to_string())) - } - fn accepts(_ty: &Type) -> bool { true } -} \ No newline at end of file + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + /*let result = std::str::from_utf8_lossy(raw); + Ok(StringCollector(result.to_owned()))*/ + let result = String::from_utf8_lossy(raw); + Ok(StringCollector(result.to_string())) + } + fn accepts(_ty: &Type) -> bool { + true + } +} diff --git a/Packages/app-server/src/utils/db/pg_stream_parsing.rs b/Packages/app-server/src/utils/db/pg_stream_parsing.rs index 266da1396..5a4386f9b 100644 --- a/Packages/app-server/src/utils/db/pg_stream_parsing.rs +++ b/Packages/app-server/src/utils/db/pg_stream_parsing.rs @@ -1,228 +1,229 @@ +use rust_shared::async_graphql; use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde; +use rust_shared::serde_json::{self, Map}; use rust_shared::utils::type_aliases::RowData; use rust_shared::{serde::Deserialize, utils::type_aliases::JSONValue}; -use rust_shared::serde_json::{Map, self}; -use rust_shared::async_graphql; -use rust_shared::serde; #[cfg(test)] mod tests { - use crate::utils::db::pg_stream_parsing::parse_postgres_array_as_strings; + use crate::utils::db::pg_stream_parsing::parse_postgres_array_as_strings; - #[test] - fn simple() { - let simple_source = "{example}"; - let simple_result = parse_postgres_array_as_strings(simple_source); - assert_eq!(simple_result, vec!["example"]); - } - #[test] - fn escaped() { - let escaped_source = r#"{"example \"text\" with quotes in it",123123}"#; - let escaped_result = parse_postgres_array_as_strings(escaped_source); - assert_eq!(escaped_result, vec![r#"example "text" with quotes in it"#, "123123"]); - } + #[test] + fn simple() { + let simple_source = "{example}"; + let simple_result = parse_postgres_array_as_strings(simple_source); + assert_eq!(simple_result, vec!["example"]); + } + #[test] + fn escaped() { + let escaped_source = r#"{"example \"text\" with quotes in it",123123}"#; + let escaped_result = parse_postgres_array_as_strings(escaped_source); + assert_eq!(escaped_result, vec![r#"example "text" with quotes in it"#, "123123"]); + } } /// See: https://github.com/eulerto/wal2json/issues/221#issuecomment-1025143441 /// View the tests above for examples, and intended functionality. pub fn parse_postgres_array(array_str: &str, items_are_serialized: bool) -> JSONValue { - let result_as_strings: Vec = parse_postgres_array_as_strings(array_str); - let result_as_json_values = result_as_strings.into_iter().map(|item_as_str| { - if items_are_serialized { - serde_json::from_str(&item_as_str).unwrap() - } else { - serde_json::Value::String(item_as_str) - } - }).collect(); - let result_within_json_array = serde_json::Value::Array(result_as_json_values); - result_within_json_array + let result_as_strings: Vec = parse_postgres_array_as_strings(array_str); + let result_as_json_values = result_as_strings + .into_iter() + .map(|item_as_str| { + if items_are_serialized { + serde_json::from_str(&item_as_str).unwrap() + } else { + serde_json::Value::String(item_as_str) + } + }) + .collect(); + let result_within_json_array = serde_json::Value::Array(result_as_json_values); + result_within_json_array } pub fn parse_postgres_array_as_strings(array_str: &str) -> Vec { - let chars_struct = array_str.chars(); - let chars = chars_struct.collect::>(); - // todo: make sure this is fine as-is (it might need to accept number values as well; not sure about the pathway and/or frontend handling) - let mut result_as_strings: Vec = vec![]; + let chars_struct = array_str.chars(); + let chars = chars_struct.collect::>(); + // todo: make sure this is fine as-is (it might need to accept number values as well; not sure about the pathway and/or frontend handling) + let mut result_as_strings: Vec = vec![]; - let mut in_quote = false; - let mut in_entry = false; - let mut last_char_was_escape_backslash = false; - //let mut current_entry_str: Option = None; - let mut current_entry_str: String = String::new(); // empty means none + let mut in_quote = false; + let mut in_entry = false; + let mut last_char_was_escape_backslash = false; + //let mut current_entry_str: Option = None; + let mut current_entry_str: String = String::new(); // empty means none - /*let mut end_current_entry = || { - result_as_strings.push(current_entry_str.unwrap()); - current_entry_str = None; - in_quote = false; - in_entry = false; - };*/ + /*let mut end_current_entry = || { + result_as_strings.push(current_entry_str.unwrap()); + current_entry_str = None; + in_quote = false; + in_entry = false; + };*/ - //for (i, ch) in chars.enumerate() { - //let chars_length = chars.into_iter().count(); - let chars_length = chars.len(); - for (i, ch) in chars.into_iter().enumerate() { - if last_char_was_escape_backslash { - last_char_was_escape_backslash = false; - //current_entry_str.unwrap().push(ch); - current_entry_str.push(ch); - continue; - } + //for (i, ch) in chars.enumerate() { + //let chars_length = chars.into_iter().count(); + let chars_length = chars.len(); + for (i, ch) in chars.into_iter().enumerate() { + if last_char_was_escape_backslash { + last_char_was_escape_backslash = false; + //current_entry_str.unwrap().push(ch); + current_entry_str.push(ch); + continue; + } - match ch { - '{' if i == 0 => {}, - '}' if i == chars_length - 1 => { - //if current_entry_str.is_some() { - if !current_entry_str.is_empty() { - //end_current_entry(); - { - /*result_as_strings.push(current_entry_str.unwrap()); - current_entry_str = None;*/ - result_as_strings.push(current_entry_str); - current_entry_str = String::new(); - in_quote = false; - in_entry = false; - } - } - }, - '\\' => { - last_char_was_escape_backslash = true; - }, - '"' => { - in_quote = !in_quote; - // if just left a quote - if !in_quote { - //end_current_entry(); - { - result_as_strings.push(current_entry_str); - current_entry_str = String::new(); - in_quote = false; - in_entry = false; - } - } - }, - // ie. if just left a quote - ',' if !in_entry => {}, - // if hit a separator after a non-quoted entry - ',' if in_entry && !in_quote => { - //end_current_entry(); - { - result_as_strings.push(current_entry_str); - current_entry_str = String::new(); - in_quote = false; - in_entry = false; - } - }, - _ => { - // if hit start of entry - //if current_entry_str.is_none() { - if current_entry_str.is_empty() { - //current_entry_str = Some(String::new()); - current_entry_str = String::new(); - in_entry = true; - } - current_entry_str.push(ch); - } - }; - } - result_as_strings + match ch { + '{' if i == 0 => {}, + '}' if i == chars_length - 1 => { + //if current_entry_str.is_some() { + if !current_entry_str.is_empty() { + //end_current_entry(); + { + /*result_as_strings.push(current_entry_str.unwrap()); + current_entry_str = None;*/ + result_as_strings.push(current_entry_str); + current_entry_str = String::new(); + in_quote = false; + in_entry = false; + } + } + }, + '\\' => { + last_char_was_escape_backslash = true; + }, + '"' => { + in_quote = !in_quote; + // if just left a quote + if !in_quote { + //end_current_entry(); + { + result_as_strings.push(current_entry_str); + current_entry_str = String::new(); + in_quote = false; + in_entry = false; + } + } + }, + // ie. if just left a quote + ',' if !in_entry => {}, + // if hit a separator after a non-quoted entry + ',' if in_entry && !in_quote => { + //end_current_entry(); + { + result_as_strings.push(current_entry_str); + current_entry_str = String::new(); + in_quote = false; + in_entry = false; + } + }, + _ => { + // if hit start of entry + //if current_entry_str.is_none() { + if current_entry_str.is_empty() { + //current_entry_str = Some(String::new()); + current_entry_str = String::new(); + in_entry = true; + } + current_entry_str.push(ch); + }, + }; + } + result_as_strings } /// For example contents (as received directly from wal2json), see `@FormatExamples/WAL2JSON_Messages.md`. #[derive(Debug, Clone, Deserialize)] pub struct LDChange { - pub kind: String, - /// Present in data from lds, but not used for anything atm (within app-server). - pub schema: String, - pub table: String, - pub columnnames: Option>, - pub columntypes: Option>, - pub columnvalues: Option>, - pub oldkeys: Option, + pub kind: String, + /// Present in data from lds, but not used for anything atm (within app-server). + pub schema: String, + pub table: String, + pub columnnames: Option>, + pub columntypes: Option>, + pub columnvalues: Option>, + pub oldkeys: Option, - // custom - pub needs_wal2json_jsonval_fixes: Option, + // custom + pub needs_wal2json_jsonval_fixes: Option, } impl LDChange { - pub fn new_data_as_map(&self) -> Option { - //let new_entry = JSONValue::Object(); - //let new_entry = json!({}); - let mut new_entry: RowData = Map::new(); - for (i, key) in self.columnnames.as_ref()?.iter().enumerate() { - let typ = self.columntypes.as_ref()?.get(i).unwrap(); - let value = self.columnvalues.as_ref()?.get(i).unwrap(); - let value_final = if self.needs_wal2json_jsonval_fixes.unwrap_or(true) { // if unspecified, try to apply fixes - clone_ldchange_val_0with_type_fixes(value, typ) - } else { - value.clone() - }; - new_entry.insert(key.to_owned(), value_final); - } - //*new_entry.as_object().unwrap() - Some(new_entry) - } - /// Tries to get row-id from `oldkeys` data; else, falls back to using the new-data (ie. from `columnvalues`). - pub fn get_row_id(&self) -> String { - let id_from_oldkeys = self.oldkeys.clone() - .and_then(|a| a.data_as_map().get("id").cloned()) - .and_then(|a| a.as_str().map(|b| b.to_owned())); - match id_from_oldkeys { - Some(id) => id, - None => { - let new_data_as_map = self.new_data_as_map(); - new_data_as_map.unwrap().get("id").unwrap().as_str().map(|a| a.to_owned()).unwrap() - }, - } - } + pub fn new_data_as_map(&self) -> Option { + //let new_entry = JSONValue::Object(); + //let new_entry = json!({}); + let mut new_entry: RowData = Map::new(); + for (i, key) in self.columnnames.as_ref()?.iter().enumerate() { + let typ = self.columntypes.as_ref()?.get(i).unwrap(); + let value = self.columnvalues.as_ref()?.get(i).unwrap(); + let value_final = if self.needs_wal2json_jsonval_fixes.unwrap_or(true) { + // if unspecified, try to apply fixes + clone_ldchange_val_0with_type_fixes(value, typ) + } else { + value.clone() + }; + new_entry.insert(key.to_owned(), value_final); + } + //*new_entry.as_object().unwrap() + Some(new_entry) + } + /// Tries to get row-id from `oldkeys` data; else, falls back to using the new-data (ie. from `columnvalues`). + pub fn get_row_id(&self) -> String { + let id_from_oldkeys = self.oldkeys.clone().and_then(|a| a.data_as_map().get("id").cloned()).and_then(|a| a.as_str().map(|b| b.to_owned())); + match id_from_oldkeys { + Some(id) => id, + None => { + let new_data_as_map = self.new_data_as_map(); + new_data_as_map.unwrap().get("id").unwrap().as_str().map(|a| a.to_owned()).unwrap() + }, + } + } } fn clone_ldchange_val_0with_type_fixes(value: &JSONValue, typ: &str) -> JSONValue { - if typ.ends_with("[]") { - let item_type_as_bytes = &typ.as_bytes()[..typ.find("[]").unwrap()]; - let item_type = String::from_utf8(item_type_as_bytes.to_vec()).unwrap(); - return parse_postgres_array(value.as_str().unwrap(), item_type == "jsonb"); - } - // like above, except for alternate naming scheme (eg. "_text" instead of "text[]") - /*if typ.starts_with("_") { - let item_type_as_bytes = &typ.as_bytes()[1..]; - let item_type = String::from_utf8(item_type_as_bytes.to_vec()).unwrap(); - return parse_postgres_array(value.as_str().unwrap(), item_type == "jsonb"); - }*/ - match typ { - "jsonb" => { - // the wal2json-sourced LDChange vals of type jsonb are initially stored as strings - // convert that to a serde_json::Value::Object, so serde_json::from_value(...) can auto-deserialize it to a nested struct - match value.as_str() { - Some(val_as_str) => { - serde_json::from_str(val_as_str).unwrap() - }, - None => serde_json::Value::Null, - } - }, - _ => value.clone(), - } + if typ.ends_with("[]") { + let item_type_as_bytes = &typ.as_bytes()[..typ.find("[]").unwrap()]; + let item_type = String::from_utf8(item_type_as_bytes.to_vec()).unwrap(); + return parse_postgres_array(value.as_str().unwrap(), item_type == "jsonb"); + } + // like above, except for alternate naming scheme (eg. "_text" instead of "text[]") + /*if typ.starts_with("_") { + let item_type_as_bytes = &typ.as_bytes()[1..]; + let item_type = String::from_utf8(item_type_as_bytes.to_vec()).unwrap(); + return parse_postgres_array(value.as_str().unwrap(), item_type == "jsonb"); + }*/ + match typ { + "jsonb" => { + // the wal2json-sourced LDChange vals of type jsonb are initially stored as strings + // convert that to a serde_json::Value::Object, so serde_json::from_value(...) can auto-deserialize it to a nested struct + match value.as_str() { + Some(val_as_str) => serde_json::from_str(val_as_str).unwrap(), + None => serde_json::Value::Null, + } + }, + _ => value.clone(), + } } -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(Debug, Clone, Deserialize)] pub struct OldKeys { - pub keynames: Vec, - pub keytypes: Vec, - pub keyvalues: Vec, + pub keynames: Vec, + pub keytypes: Vec, + pub keyvalues: Vec, - // custom - pub needs_wal2json_jsonval_fixes: Option, + // custom + pub needs_wal2json_jsonval_fixes: Option, } } impl OldKeys { - pub fn data_as_map(&self) -> RowData { - let mut new_entry: RowData = Map::new(); - for (i, key) in self.keynames.iter().enumerate() { - let typ = self.keytypes.get(i).unwrap(); - let value = self.keyvalues.get(i).unwrap(); - let value_final = if self.needs_wal2json_jsonval_fixes.unwrap_or(true) { // if unspecified, try to apply fixes - clone_ldchange_val_0with_type_fixes(value, typ) - } else { - value.clone() - }; - new_entry.insert(key.to_owned(), value_final); - } - new_entry - } -} \ No newline at end of file + pub fn data_as_map(&self) -> RowData { + let mut new_entry: RowData = Map::new(); + for (i, key) in self.keynames.iter().enumerate() { + let typ = self.keytypes.get(i).unwrap(); + let value = self.keyvalues.get(i).unwrap(); + let value_final = if self.needs_wal2json_jsonval_fixes.unwrap_or(true) { + // if unspecified, try to apply fixes + clone_ldchange_val_0with_type_fixes(value, typ) + } else { + value.clone() + }; + new_entry.insert(key.to_owned(), value_final); + } + new_entry + } +} diff --git a/Packages/app-server/src/utils/db/queries.rs b/Packages/app-server/src/utils/db/queries.rs index bcecb8ac3..3b4db8d36 100644 --- a/Packages/app-server/src/utils/db/queries.rs +++ b/Packages/app-server/src/utils/db/queries.rs @@ -1,96 +1,116 @@ -use std::{any::TypeId, pin::Pin, task::{Poll, Waker}, time::{Duration, Instant, SystemTime, UNIX_EPOCH}, cell::RefCell}; -use rust_shared::{anyhow::{bail, Context, Error}, serde_json, async_graphql, to_anyhow, new_mtx, utils::{mtx::mtx::Mtx, type_aliases::RowData}}; -use rust_shared::async_graphql::{Result, async_stream::{stream, self}, OutputType, Object, Positioned, parser::types::Field}; use deadpool_postgres::{Pool, Transaction}; +use futures_util::{stream, Future, Stream, StreamExt, TryFutureExt, TryStreamExt}; +use metrics::{counter, histogram}; +use rust_shared::async_graphql::{ + async_stream::{self, stream}, + parser::types::Field, + Object, OutputType, Positioned, Result, +}; use rust_shared::flume::Sender; -use futures_util::{Stream, StreamExt, Future, stream, TryFutureExt, TryStreamExt}; -use rust_shared::serde::{Serialize, Deserialize, de::DeserializeOwned}; +use rust_shared::serde::{de::DeserializeOwned, Deserialize, Serialize}; use rust_shared::serde_json::{json, Map}; -use rust_shared::tokio_postgres::{Client, Row, types::ToSql, Statement}; -use tracing::{info, trace, debug}; +use rust_shared::tokio_postgres::{types::ToSql, Client, Row, Statement}; use rust_shared::uuid::Uuid; -use metrics::{counter, histogram}; +use rust_shared::{ + anyhow::{bail, Context, Error}, + async_graphql, new_mtx, serde_json, to_anyhow, + utils::{mtx::mtx::Mtx, type_aliases::RowData}, +}; +use std::{ + any::TypeId, + cell::RefCell, + pin::Pin, + task::{Poll, Waker}, + time::{Duration, Instant, SystemTime, UNIX_EPOCH}, +}; +use tracing::{debug, info, trace}; -use crate::{store::{live_queries::{LQStorageArc, LQStorage, DropLQWatcherMsg}, storage::get_app_state_from_gql_ctx}, utils::{db::{sql_fragment::{SQLFragment}, sql_ident::SQLIdent}, type_aliases::PGClientObject,}, db::commands::_command::ToSqlWrapper}; -use super::{filter::QueryFilter, accessors::AccessorContext}; +use super::{accessors::AccessorContext, filter::QueryFilter}; +use crate::{ + db::commands::_command::ToSqlWrapper, + store::{ + live_queries::{DropLQWatcherMsg, LQStorage, LQStorageArc}, + storage::get_app_state_from_gql_ctx, + }, + utils::{ + db::{sql_fragment::SQLFragment, sql_ident::SQLIdent}, + type_aliases::PGClientObject, + }, +}; /*type QueryFunc_ResultType = Result, tokio_postgres::Error>; type QueryFunc = Box< - dyn Fn(&str, &[&(dyn ToSql + Sync)]) - -> - Pin - >> + dyn Fn(&str, &[&(dyn ToSql + Sync)]) + -> + Pin + >> >; fn force_boxed(f: fn(&str, &[&(dyn ToSql + Sync)]) -> T) -> QueryFunc where - T: Future + 'static, + T: Future + 'static, { - Box::new(move |a, b| Box::pin(f(a, b))) + Box::new(move |a, b| Box::pin(f(a, b))) }*/ //pub type QueryFunc = FnOnce(String/*, &'a [&(dyn ToSql + Sync)]*/) -> QueryFuncReturn; /*pub type QueryFunc = dyn FnOnce(SQLFragment) -> QueryFuncReturn; pub type QueryFuncReturn = dyn Future, tokio_postgres::Error>>;*/ -pub async fn get_entries_in_collection_base + Serialize, QueryFunc, QueryFuncReturn>( - query_func: QueryFunc, table_name: String, filter: &QueryFilter, parent_mtx: Option<&Mtx>, -) -> Result<(Vec, Vec), Error> - where - QueryFunc: FnOnce(SQLFragment) -> QueryFuncReturn, - QueryFuncReturn: Future, Error>>, +pub async fn get_entries_in_collection_base + Serialize, QueryFunc, QueryFuncReturn>(query_func: QueryFunc, table_name: String, filter: &QueryFilter, parent_mtx: Option<&Mtx>) -> Result<(Vec, Vec), Error> +where + QueryFunc: FnOnce(SQLFragment) -> QueryFuncReturn, + QueryFuncReturn: Future, Error>>, { - new_mtx!(mtx, "1:run query", parent_mtx); - //let filters_sql = get_sql_for_query_filter(filter, None, None).with_context(|| format!("Got error while getting sql for filter:{filter:?}"))?; - let filters_sql = filter.get_sql_for_application().with_context(|| format!("Got error while getting sql for filter:{filter:?}"))?; - let filters_sql_str = filters_sql.to_string(); // workaround for difficulty implementing Clone for SQLFragment () - mtx.current_section.extra_info = Some(format!("@table_name:{table_name} @filters_sql:{filters_sql}")); - - let where_sql = match filters_sql.sql_text.len() { - 0..=2 => SQLFragment::lit(""), - _ => SQLFragment::merge(vec![SQLFragment::lit(" WHERE "), filters_sql]), - }; - info!("Running where clause. @table:{table_name} @where:{where_sql} @filter:{filter:?}"); - let final_query = SQLFragment::merge(vec![ - SQLFragment::new("SELECT * FROM $I", vec![Box::new(SQLIdent::new(table_name.clone())?)]), - where_sql, - ]); - let mut rows = query_func(final_query).await - .with_context(|| format!("Error running select command for entries in table. @table:{table_name} @filters_sql:{filters_sql_str}"))?; + new_mtx!(mtx, "1:run query", parent_mtx); + //let filters_sql = get_sql_for_query_filter(filter, None, None).with_context(|| format!("Got error while getting sql for filter:{filter:?}"))?; + let filters_sql = filter.get_sql_for_application().with_context(|| format!("Got error while getting sql for filter:{filter:?}"))?; + let filters_sql_str = filters_sql.to_string(); // workaround for difficulty implementing Clone for SQLFragment () + mtx.current_section.extra_info = Some(format!("@table_name:{table_name} @filters_sql:{filters_sql}")); - mtx.section("2:sort and convert"); - // sort by id, so that order of our results here is consistent with order after live-query-updating modifications (see live_queries.rs) - rows.sort_by_key(|a| a.get::<&str, String>("id")); + let where_sql = match filters_sql.sql_text.len() { + 0..=2 => SQLFragment::lit(""), + _ => SQLFragment::merge(vec![SQLFragment::lit(" WHERE "), filters_sql]), + }; + info!("Running where clause. @table:{table_name} @where:{where_sql} @filter:{filter:?}"); + let final_query = SQLFragment::merge(vec![SQLFragment::new("SELECT * FROM $I", vec![Box::new(SQLIdent::new(table_name.clone())?)]), where_sql]); + let mut rows = query_func(final_query).await.with_context(|| format!("Error running select command for entries in table. @table:{table_name} @filters_sql:{filters_sql_str}"))?; - let entries_as_type: Vec = rows.into_iter().map(|r| r.into()).collect(); - let entries: Vec = entries_as_type.iter().map(|r| { - let json_val = serde_json::to_value(r).unwrap(); - json_val.as_object().unwrap().clone() - }).collect(); + mtx.section("2:sort and convert"); + // sort by id, so that order of our results here is consistent with order after live-query-updating modifications (see live_queries.rs) + rows.sort_by_key(|a| a.get::<&str, String>("id")); - Ok((entries, entries_as_type)) + let entries_as_type: Vec = rows.into_iter().map(|r| r.into()).collect(); + let entries: Vec = entries_as_type + .iter() + .map(|r| { + let json_val = serde_json::to_value(r).unwrap(); + json_val.as_object().unwrap().clone() + }) + .collect(); + + Ok((entries, entries_as_type)) } pub async fn get_entries_in_collection + Serialize>(ctx: &AccessorContext<'_>, table_name: String, filter: &QueryFilter, parent_mtx: Option<&Mtx>) -> Result<(Vec, Vec), Error> { - /*new_mtx!(mtx, "1:wait for pg-client", parent_mtx); - let pool = &get_app_state_from_gql_ctx(ctx).db_pool; - let client = pool.get().await.unwrap();*/ + /*new_mtx!(mtx, "1:wait for pg-client", parent_mtx); + let pool = &get_app_state_from_gql_ctx(ctx).db_pool; + let client = pool.get().await.unwrap();*/ + + //mtx.section("2:get entries"); + new_mtx!(mtx, "1:get entries", parent_mtx); + let query_func = |mut sql: SQLFragment| async move { + let (sql_text, params) = sql.into_query_args()?; + info!("Running sql fragment. @sql_text:{sql_text} @params:{params:?}"); - //mtx.section("2:get entries"); - new_mtx!(mtx, "1:get entries", parent_mtx); - let query_func = |mut sql: SQLFragment| async move { - let (sql_text, params) = sql.into_query_args()?; - info!("Running sql fragment. @sql_text:{sql_text} @params:{params:?}"); + /*let temp1: Vec> = params.into_iter().map(strip_send_from_tosql_sync_send).collect(); + let temp2: Vec<&(dyn ToSql + Sync)> = temp1.iter().map(|a| a.as_ref()).collect(); + client.query(&sql_text, temp2.as_slice()).await*/ - /*let temp1: Vec> = params.into_iter().map(strip_send_from_tosql_sync_send).collect(); - let temp2: Vec<&(dyn ToSql + Sync)> = temp1.iter().map(|a| a.as_ref()).collect(); - client.query(&sql_text, temp2.as_slice()).await*/ - - let params_wrapped: Vec = params.into_iter().map(|a| ToSqlWrapper { data: a }).collect(); - let params_as_refs: Vec<&(dyn ToSql + Sync)> = params_wrapped.iter().map(|x| x as &(dyn ToSql + Sync)).collect(); + let params_wrapped: Vec = params.into_iter().map(|a| ToSqlWrapper { data: a }).collect(); + let params_as_refs: Vec<&(dyn ToSql + Sync)> = params_wrapped.iter().map(|x| x as &(dyn ToSql + Sync)).collect(); - ctx.tx.query_raw(&sql_text, params_as_refs).await.map_err(to_anyhow)? - .try_collect().await.map_err(to_anyhow) - }; - let (entries, entries_as_type) = get_entries_in_collection_base(query_func, table_name, filter, Some(&mtx)).await?; - Ok((entries, entries_as_type)) -} \ No newline at end of file + ctx.tx.query_raw(&sql_text, params_as_refs).await.map_err(to_anyhow)?.try_collect().await.map_err(to_anyhow) + }; + let (entries, entries_as_type) = get_entries_in_collection_base(query_func, table_name, filter, Some(&mtx)).await?; + Ok((entries, entries_as_type)) +} diff --git a/Packages/app-server/src/utils/db/rls/rls_applier.rs b/Packages/app-server/src/utils/db/rls/rls_applier.rs index 42779a655..9854298f4 100644 --- a/Packages/app-server/src/utils/db/rls/rls_applier.rs +++ b/Packages/app-server/src/utils/db/rls/rls_applier.rs @@ -1,4 +1,9 @@ -use rust_shared::{utils::{auth::jwt_utils_base::UserJWTData, type_aliases::JSONValue}, async_graphql, itertools::Itertools, serde_json}; +use rust_shared::{ + async_graphql, + itertools::Itertools, + serde_json, + utils::{auth::jwt_utils_base::UserJWTData, type_aliases::JSONValue}, +}; use serde::Serialize; use crate::db::{general::sign_in_::jwt_utils::get_user_jwt_data_from_gql_ctx, terms::Term}; @@ -6,47 +11,48 @@ use crate::db::{general::sign_in_::jwt_utils::get_user_jwt_data_from_gql_ctx, te use super::rls_policies::UsesRLS; pub struct RLSApplier { - pub jwt_data: Option, - - //pub last_result_collection: Vec, - //pub last_result_doc: Vec, - - // For the new<>old comparisons, why do we use `serde_json::to_string` rather than `Eq::eq`? - // Because serialization is actually slightly faster, when you need to store the previous value: https://stackoverflow.com/questions/75003821/speed-of-comparing-structs-using-deriveeq-versus-deriveserialize#comment132359839_75003887 - pub last_result_json: Option, + pub jwt_data: Option, + + //pub last_result_collection: Vec, + //pub last_result_doc: Vec, + + // For the new<>old comparisons, why do we use `serde_json::to_string` rather than `Eq::eq`? + // Because serialization is actually slightly faster, when you need to store the previous value: https://stackoverflow.com/questions/75003821/speed-of-comparing-structs-using-deriveeq-versus-deriveserialize#comment132359839_75003887 + pub last_result_json: Option, } impl RLSApplier { - pub fn new(jwt_data: Option) -> Self { - Self { - jwt_data, - last_result_json: None, - } - } - /*pub async fn new(gql_ctx: &async_graphql::Context<'_>) -> Self { - let jwt_data = get_user_jwt_data_from_gql_ctx(gql_ctx).await?; - Self::new(jwt_data) - }*/ - - pub fn filter_next_result_for_collection(&mut self, next_result: Vec) -> (Vec, bool) { - let user_id = self.jwt_data.as_ref().map(|a| a.id.as_str()); - let next_result_final = next_result.into_iter().filter(|a| a.can_access_cached(user_id)).collect_vec(); - let next_result_final_json = serde_json::to_string(&next_result_final).unwrap(); - if let Some(last_result_json) = &self.last_result_json && &next_result_final_json == last_result_json { - return (next_result_final, false); - } - - self.last_result_json = Some(next_result_final_json); - (next_result_final, true) - } - pub fn filter_next_result_for_doc(&mut self, next_result: Option) -> (Option, bool) { - let user_id = self.jwt_data.as_ref().map(|a| a.id.as_str()); - let next_result_final = next_result.filter(|a| a.can_access_cached(user_id)); - let next_result_final_json = serde_json::to_string(&next_result_final).unwrap(); - if let Some(last_result_json) = &self.last_result_json && &next_result_final_json == last_result_json { - return (next_result_final, false); - } - - self.last_result_json = Some(next_result_final_json); - (next_result_final, true) - } -} \ No newline at end of file + pub fn new(jwt_data: Option) -> Self { + Self { jwt_data, last_result_json: None } + } + /*pub async fn new(gql_ctx: &async_graphql::Context<'_>) -> Self { + let jwt_data = get_user_jwt_data_from_gql_ctx(gql_ctx).await?; + Self::new(jwt_data) + }*/ + + pub fn filter_next_result_for_collection(&mut self, next_result: Vec) -> (Vec, bool) { + let user_id = self.jwt_data.as_ref().map(|a| a.id.as_str()); + let next_result_final = next_result.into_iter().filter(|a| a.can_access_cached(user_id)).collect_vec(); + let next_result_final_json = serde_json::to_string(&next_result_final).unwrap(); + if let Some(last_result_json) = &self.last_result_json + && &next_result_final_json == last_result_json + { + return (next_result_final, false); + } + + self.last_result_json = Some(next_result_final_json); + (next_result_final, true) + } + pub fn filter_next_result_for_doc(&mut self, next_result: Option) -> (Option, bool) { + let user_id = self.jwt_data.as_ref().map(|a| a.id.as_str()); + let next_result_final = next_result.filter(|a| a.can_access_cached(user_id)); + let next_result_final_json = serde_json::to_string(&next_result_final).unwrap(); + if let Some(last_result_json) = &self.last_result_json + && &next_result_final_json == last_result_json + { + return (next_result_final, false); + } + + self.last_result_json = Some(next_result_final_json); + (next_result_final, true) + } +} diff --git a/Packages/app-server/src/utils/db/rls/rls_helpers.rs b/Packages/app-server/src/utils/db/rls/rls_helpers.rs index 4e44e0063..4fab0af14 100644 --- a/Packages/app-server/src/utils/db/rls/rls_helpers.rs +++ b/Packages/app-server/src/utils/db/rls/rls_helpers.rs @@ -1,97 +1,113 @@ use std::collections::{HashMap, HashSet}; -use rust_shared::{utils::{auth::jwt_utils_base::UserJWTData, general_::extensions::ToOwnedV}, anyhow::Error}; -use tracing::{warn, info}; +use rust_shared::{ + anyhow::Error, + utils::{auth::jwt_utils_base::UserJWTData, general_::extensions::ToOwnedV}, +}; +use tracing::{info, warn}; -use crate::{db::{access_policies_::{_access_policy::AccessPolicy, _permission_set::APTable}, _shared::access_policy_target::AccessPolicyTarget, access_policies::get_access_policy}, links::db_live_cache::{get_admin_user_ids_cached, get_access_policy_cached}, utils::db::accessors::AccessorContext}; +use crate::{ + db::{ + _shared::access_policy_target::AccessPolicyTarget, + access_policies::get_access_policy, + access_policies_::{_access_policy::AccessPolicy, _permission_set::APTable}, + }, + links::db_live_cache::{get_access_policy_cached, get_admin_user_ids_cached}, + utils::db::accessors::AccessorContext, +}; // sync:sql[RLSHelpers.sql] pub(super) fn is_user_admin_or_creator(user_id: Option<&str>, creator_id: &str) -> bool { - is_user_admin(user_id) || is_user_creator(user_id, creator_id) + is_user_admin(user_id) || is_user_creator(user_id, creator_id) } pub(super) fn is_user_creator(user_id: Option<&str>, creator_id: &str) -> bool { - if let Some(user_id) = user_id && user_id == creator_id { true } else { false } + if let Some(user_id) = user_id + && user_id == creator_id + { + true + } else { + false + } } pub(super) fn is_user_admin(user_id: Option<&str>) -> bool { - try_is_user_admin(user_id).unwrap_or_else(|err| { - warn!("Got error in try_is_user_admin (should only happen rarely): {:?}", err); - false - }) + try_is_user_admin(user_id).unwrap_or_else(|err| { + warn!("Got error in try_is_user_admin (should only happen rarely): {:?}", err); + false + }) } pub(super) fn try_is_user_admin(user_id: Option<&str>) -> Result { - match user_id { - Some(user_id) => { - let admin_user_ids: HashSet = get_admin_user_ids_cached()?; - //info!("admin_user_ids: {:?} @me_id:{}", admin_user_ids, user_id); - Ok(admin_user_ids.contains(user_id)) - }, - None => Ok(false), - } + match user_id { + Some(user_id) => { + let admin_user_ids: HashSet = get_admin_user_ids_cached()?; + //info!("admin_user_ids: {:?} @me_id:{}", admin_user_ids, user_id); + Ok(admin_user_ids.contains(user_id)) + }, + None => Ok(false), + } } // policy checks (wrappers around functions in _permission_set.rs, since it retrieves the policies from the special cache) // ========== pub(super) fn does_policy_allow_access(user_id: Option<&str>, policy_id: &str, table: APTable) -> bool { - try_does_policy_allow_access(user_id, policy_id, table).unwrap_or_else(|err| { - warn!("Got error in try_does_policy_allow_access (should only happen rarely): {:?}", err); - false - }) + try_does_policy_allow_access(user_id, policy_id, table).unwrap_or_else(|err| { + warn!("Got error in try_does_policy_allow_access (should only happen rarely): {:?}", err); + false + }) } pub(super) fn try_does_policy_allow_access(user_id: Option<&str>, policy_id: &str, table: APTable) -> Result { - let policy: AccessPolicy = get_access_policy_cached(policy_id)?; - if policy.permissions.for_table(table).access - && policy.permission_extends_for_user_and_table(user_id, table).map(|a| a.access.clone()) != Some(false) { - return Ok(true); - } + let policy: AccessPolicy = get_access_policy_cached(policy_id)?; + if policy.permissions.for_table(table).access && policy.permission_extends_for_user_and_table(user_id, table).map(|a| a.access.clone()) != Some(false) { + return Ok(true); + } - if policy.permission_extends_for_user_and_table(user_id, table).map(|a| a.access) == Some(true) { - return Ok(true); - } + if policy.permission_extends_for_user_and_table(user_id, table).map(|a| a.access) == Some(true) { + return Ok(true); + } - Ok(false) + Ok(false) } //pub(super) fn try_does_policy_allow_access_base(user_id: Option<&str>, policy: &AccessPolicy, table: APTable) -> Result { ... } pub(super) fn do_policies_allow_access(user_id: Option<&str>, policy_targets: &Vec) -> bool { - try_do_policies_allow_access(user_id, policy_targets).unwrap_or_else(|err| { - warn!("Got error in try_do_policies_allow_access (should only happen rarely): {:?}", err); - false - }) + try_do_policies_allow_access(user_id, policy_targets).unwrap_or_else(|err| { + warn!("Got error in try_do_policies_allow_access (should only happen rarely): {:?}", err); + false + }) } pub(super) fn try_do_policies_allow_access(user_id: Option<&str>, policy_targets: &Vec) -> Result { - // The `c_accessPolicyTargets` fields should always[*] have at least one entry in them; if not, something is wrong, so play it safe and reject access. + // The `c_accessPolicyTargets` fields should always[*] have at least one entry in them; if not, something is wrong, so play it safe and reject access. // (Most tables enforce non-emptiness of this field with a row constraint, [*]but nodeTags is an exception; its associated nodes may be deleted, leaving it without any targets.) // (This line thus serves to prevent "orphaned node-tags" from being visible by non-admins, as well as a general-purpose "second instance" of the non-emptiness check.) - if policy_targets.is_empty() { - return Ok(false); - } - - for target in policy_targets { - if !does_policy_allow_access(user_id, &target.policy_id, target.ap_table) { - return Ok(false); - } - } + if policy_targets.is_empty() { + return Ok(false); + } - Ok(true) + for target in policy_targets { + if !does_policy_allow_access(user_id, &target.policy_id, target.ap_table) { + return Ok(false); + } + } + + Ok(true) } /*pub(super) async fn try_do_policies_allow_access_ctx(ctx: &AccessorContext<'_>, user_id: Option<&str>, policy_targets: &Vec) -> Result { - // The `c_accessPolicyTargets` fields should always have at least one entry in them; if not, something is wrong, so play it safe and reject access. + // The `c_accessPolicyTargets` fields should always have at least one entry in them; if not, something is wrong, so play it safe and reject access. // (Most tables enforce non-emptiness of this field with a row constraint, but nodeTags is an exception; its associated nodes may be deleted, leaving it without any targets.) // (This line thus serves to prevent "orphaned node-tags" from being visible by non-admins, as well as a general-purpose "second instance" of the non-emptiness check.) - if policy_targets.is_empty() { - return Ok(false); - } - - for target in policy_targets { - let policy = get_access_policy(ctx, &target.policy_id).await?; - if !try_does_policy_allow_access_base(user_id, &policy, target.policy_subfield)? { - return Ok(false); - } - } + if policy_targets.is_empty() { + return Ok(false); + } + + for target in policy_targets { + let policy = get_access_policy(ctx, &target.policy_id).await?; + if !try_does_policy_allow_access_base(user_id, &policy, target.policy_subfield)? { + return Ok(false); + } + } - Ok(true) -}*/ \ No newline at end of file + Ok(true) +}*/ diff --git a/Packages/app-server/src/utils/db/rls/rls_policies.rs b/Packages/app-server/src/utils/db/rls/rls_policies.rs index 62ed6dc5f..b5fb97030 100644 --- a/Packages/app-server/src/utils/db/rls/rls_policies.rs +++ b/Packages/app-server/src/utils/db/rls/rls_policies.rs @@ -1,123 +1,167 @@ -use rust_shared::{utils::{auth::jwt_utils_base::UserJWTData, general_::extensions::ToOwnedV}, anyhow::{bail, anyhow}, anyhow::Error}; +use rust_shared::{ + anyhow::Error, + anyhow::{anyhow, bail}, + utils::{auth::jwt_utils_base::UserJWTData, general_::extensions::ToOwnedV}, +}; use tracing::info; -use crate::{db::{terms::Term, access_policies::{get_access_policy}, map_node_edits::MapNodeEdit, user_hiddens::UserHidden, command_runs::CommandRun, node_tags::NodeTag, node_revisions::NodeRevision, node_ratings::NodeRating, node_phrasings::NodePhrasing, node_links::NodeLink, nodes_::_node::Node, maps::Map, medias::Media, feedback_proposals::Proposal, shares::Share, global_data::GlobalData, users::User, access_policies_::{_permission_set::{APAction, APTable}, _access_policy::AccessPolicy}, _shared::access_policy_target::AccessPolicyTarget, timelines::Timeline, timeline_steps::TimelineStep}, links::db_live_cache::get_access_policy_cached}; +use crate::{ + db::{ + _shared::access_policy_target::AccessPolicyTarget, + access_policies::get_access_policy, + access_policies_::{ + _access_policy::AccessPolicy, + _permission_set::{APAction, APTable}, + }, + command_runs::CommandRun, + feedback_proposals::Proposal, + global_data::GlobalData, + map_node_edits::MapNodeEdit, + maps::Map, + medias::Media, + node_links::NodeLink, + node_phrasings::NodePhrasing, + node_ratings::NodeRating, + node_revisions::NodeRevision, + node_tags::NodeTag, + nodes_::_node::Node, + shares::Share, + terms::Term, + timeline_steps::TimelineStep, + timelines::Timeline, + user_hiddens::UserHidden, + users::User, + }, + links::db_live_cache::get_access_policy_cached, +}; -use super::rls_helpers::{is_user_creator, does_policy_allow_access, do_policies_allow_access, is_user_admin, is_user_admin_or_creator}; +use super::rls_helpers::{do_policies_allow_access, does_policy_allow_access, is_user_admin, is_user_admin_or_creator, is_user_creator}; // sync:sql[RLSPolicies.sql] pub trait UsesRLS { - fn can_access_cached(&self, user_id: Option<&str>) -> bool; + fn can_access_cached(&self, user_id: Option<&str>) -> bool; } // empty policies (ie. can always be viewed by anyone) [these functions are not needed in sql version] // ========== impl UsesRLS for AccessPolicy { - fn can_access_cached(&self, _user_id: Option<&str>) -> bool { true } + fn can_access_cached(&self, _user_id: Option<&str>) -> bool { + true + } } impl UsesRLS for Share { - fn can_access_cached(&self, _user_id: Option<&str>) -> bool { true } + fn can_access_cached(&self, _user_id: Option<&str>) -> bool { + true + } } impl UsesRLS for GlobalData { - fn can_access_cached(&self, _user_id: Option<&str>) -> bool { true } + fn can_access_cached(&self, _user_id: Option<&str>) -> bool { + true + } } impl UsesRLS for User { - fn can_access_cached(&self, _user_id: Option<&str>) -> bool { true } + fn can_access_cached(&self, _user_id: Option<&str>) -> bool { + true + } } // likely to be removed at some point impl UsesRLS for Proposal { - fn can_access_cached(&self, _user_id: Option<&str>) -> bool { true } + fn can_access_cached(&self, _user_id: Option<&str>) -> bool { + true + } } impl UsesRLS for crate::db::feedback_user_infos::UserInfo { - fn can_access_cached(&self, _user_id: Option<&str>) -> bool { true } + fn can_access_cached(&self, _user_id: Option<&str>) -> bool { + true + } } // simple RLS policies (where to access, it must be that: user is creator, user is admin, entry's policy allows general access [without user-specific block], or entry's policy has user-specific grant) // ========== impl UsesRLS for Term { - fn can_access_cached(&self, user_id: Option<&str>) -> bool { - is_user_admin_or_creator(user_id, &self.creator) || does_policy_allow_access(user_id, &self.accessPolicy, APTable::terms) - } + fn can_access_cached(&self, user_id: Option<&str>) -> bool { + is_user_admin_or_creator(user_id, &self.creator) || does_policy_allow_access(user_id, &self.accessPolicy, APTable::terms) + } } impl UsesRLS for Media { - fn can_access_cached(&self, user_id: Option<&str>) -> bool { - is_user_admin_or_creator(user_id, &self.creator) || does_policy_allow_access(user_id, &self.accessPolicy, APTable::medias) - } + fn can_access_cached(&self, user_id: Option<&str>) -> bool { + is_user_admin_or_creator(user_id, &self.creator) || does_policy_allow_access(user_id, &self.accessPolicy, APTable::medias) + } } impl UsesRLS for Map { - fn can_access_cached(&self, user_id: Option<&str>) -> bool { - is_user_admin_or_creator(user_id, &self.creator) || does_policy_allow_access(user_id, &self.accessPolicy, APTable::maps) - } + fn can_access_cached(&self, user_id: Option<&str>) -> bool { + is_user_admin_or_creator(user_id, &self.creator) || does_policy_allow_access(user_id, &self.accessPolicy, APTable::maps) + } } impl UsesRLS for Node { - fn can_access_cached(&self, user_id: Option<&str>) -> bool { - is_user_admin_or_creator(user_id, &self.creator) || does_policy_allow_access(user_id, &self.accessPolicy, APTable::nodes) - } + fn can_access_cached(&self, user_id: Option<&str>) -> bool { + is_user_admin_or_creator(user_id, &self.creator) || does_policy_allow_access(user_id, &self.accessPolicy, APTable::nodes) + } } impl UsesRLS for Timeline { - fn can_access_cached(&self, user_id: Option<&str>) -> bool { - is_user_admin_or_creator(user_id, &self.creator) || does_policy_allow_access(user_id, &self.accessPolicy, APTable::others) - } + fn can_access_cached(&self, user_id: Option<&str>) -> bool { + is_user_admin_or_creator(user_id, &self.creator) || does_policy_allow_access(user_id, &self.accessPolicy, APTable::others) + } } // derivative RLS policies (where to access an entry, the RLS policies of its associated objects must all pass) // ========== impl UsesRLS for NodeLink { - fn can_access_cached(&self, user_id: Option<&str>) -> bool { - is_user_admin_or_creator(user_id, &self.creator) || do_policies_allow_access(user_id, &self.c_accessPolicyTargets) - } + fn can_access_cached(&self, user_id: Option<&str>) -> bool { + is_user_admin_or_creator(user_id, &self.creator) || do_policies_allow_access(user_id, &self.c_accessPolicyTargets) + } } impl UsesRLS for NodePhrasing { - fn can_access_cached(&self, user_id: Option<&str>) -> bool { - is_user_admin_or_creator(user_id, &self.creator) || do_policies_allow_access(user_id, &self.c_accessPolicyTargets) - } + fn can_access_cached(&self, user_id: Option<&str>) -> bool { + is_user_admin_or_creator(user_id, &self.creator) || do_policies_allow_access(user_id, &self.c_accessPolicyTargets) + } } impl UsesRLS for NodeRating { - fn can_access_cached(&self, user_id: Option<&str>) -> bool { - is_user_admin_or_creator(user_id, &self.creator) || is_user_creator(user_id, &self.creator) || do_policies_allow_access(user_id, &self.c_accessPolicyTargets) - } + fn can_access_cached(&self, user_id: Option<&str>) -> bool { + is_user_admin_or_creator(user_id, &self.creator) || is_user_creator(user_id, &self.creator) || do_policies_allow_access(user_id, &self.c_accessPolicyTargets) + } } impl UsesRLS for NodeRevision { - fn can_access_cached(&self, user_id: Option<&str>) -> bool { - //info!("Test1 @is_user_admin:{} @do_policies_allow_access:{}", is_user_admin(user_id), do_policies_allow_access(user_id, &self.c_accessPolicyTargets)); - is_user_admin_or_creator(user_id, &self.creator) || do_policies_allow_access(user_id, &self.c_accessPolicyTargets) - } + fn can_access_cached(&self, user_id: Option<&str>) -> bool { + //info!("Test1 @is_user_admin:{} @do_policies_allow_access:{}", is_user_admin(user_id), do_policies_allow_access(user_id, &self.c_accessPolicyTargets)); + is_user_admin_or_creator(user_id, &self.creator) || do_policies_allow_access(user_id, &self.c_accessPolicyTargets) + } } impl UsesRLS for NodeTag { - fn can_access_cached(&self, user_id: Option<&str>) -> bool { - is_user_admin_or_creator(user_id, &self.creator) || do_policies_allow_access(user_id, &self.c_accessPolicyTargets) - } + fn can_access_cached(&self, user_id: Option<&str>) -> bool { + is_user_admin_or_creator(user_id, &self.creator) || do_policies_allow_access(user_id, &self.c_accessPolicyTargets) + } } impl UsesRLS for TimelineStep { - fn can_access_cached(&self, user_id: Option<&str>) -> bool { - is_user_admin_or_creator(user_id, &self.creator) || do_policies_allow_access(user_id, &self.c_accessPolicyTargets) - } + fn can_access_cached(&self, user_id: Option<&str>) -> bool { + is_user_admin_or_creator(user_id, &self.creator) || do_policies_allow_access(user_id, &self.c_accessPolicyTargets) + } } // unique RLS policies // ========== impl UsesRLS for MapNodeEdit { - fn can_access_cached(&self, user_id: Option<&str>) -> bool { - //is_user_admin_or_creator(user_id, creator) || do_policies_allow_access(user_id, accessPolicyTargets) - is_user_admin(user_id) || do_policies_allow_access(user_id, &self.c_accessPolicyTargets) - } + fn can_access_cached(&self, user_id: Option<&str>) -> bool { + //is_user_admin_or_creator(user_id, creator) || do_policies_allow_access(user_id, accessPolicyTargets) + is_user_admin(user_id) || do_policies_allow_access(user_id, &self.c_accessPolicyTargets) + } } impl UsesRLS for UserHidden { - fn can_access_cached(&self, user_id: Option<&str>) -> bool { - is_user_admin(user_id) || user_id == Some(self.id.as_str()) - } + fn can_access_cached(&self, user_id: Option<&str>) -> bool { + is_user_admin(user_id) || user_id == Some(self.id.as_str()) + } } impl UsesRLS for CommandRun { - fn can_access_cached(&self, user_id: Option<&str>) -> bool { - is_user_admin(user_id) || ( - // public_base = true, iff the Command class has "canShowInStream" enabled, and the user has "addToStream" enabled (see CommandMacros/General.ts) - self.public_base - && do_policies_allow_access(user_id, &self.c_accessPolicyTargets) - ) - } -} \ No newline at end of file + fn can_access_cached(&self, user_id: Option<&str>) -> bool { + is_user_admin(user_id) + || ( + // public_base = true, iff the Command class has "canShowInStream" enabled, and the user has "addToStream" enabled (see CommandMacros/General.ts) + self.public_base && do_policies_allow_access(user_id, &self.c_accessPolicyTargets) + ) + } +} diff --git a/Packages/app-server/src/utils/db/sql_fragment.rs b/Packages/app-server/src/utils/db/sql_fragment.rs index 0b37fe752..bb534895e 100644 --- a/Packages/app-server/src/utils/db/sql_fragment.rs +++ b/Packages/app-server/src/utils/db/sql_fragment.rs @@ -1,10 +1,14 @@ -use std::{fmt::Display, sync::atomic::AtomicI32, iter::{once, Once}}; -use rust_shared::anyhow::{anyhow, bail, Context, Error, ensure}; +use lazy_static::lazy_static; +use rust_shared::anyhow::{anyhow, bail, ensure, Context, Error}; use rust_shared::itertools::Itertools; -use rust_shared::regex::{Regex, Captures}; +use rust_shared::regex::{Captures, Regex}; use rust_shared::serde_json::Map; use rust_shared::tokio_postgres::types::ToSql; -use lazy_static::lazy_static; +use std::{ + fmt::Display, + iter::{once, Once}, + sync::atomic::AtomicI32, +}; use super::sql_param::{SQLParam, SQLParamBoxed}; @@ -12,115 +16,120 @@ use super::sql_param::{SQLParam, SQLParamBoxed}; pub type SF = SQLFragment; #[derive(Clone)] // can't do this atm, since can't have ToSql+Clone for params field (see: https://github.com/rust-lang/rust/issues/32220) pub struct SQLFragment { - pub sql_text: String, - pub params: Vec, + pub sql_text: String, + pub params: Vec, } /*impl Clone for SQLFragment { - fn clone(&self) -> Self { - SQLFragment { - sql_text: self.sql_text.clone(), - params: self.params. - } - } - fn clone_from(&mut self, source: &Self) { - - } + fn clone(&self) -> Self { + SQLFragment { + sql_text: self.sql_text.clone(), + params: self.params. + } + } + fn clone_from(&mut self, source: &Self) { + + } }*/ impl SQLFragment { - /// For param-placeholders in sql_text, use $I for identifiers, and $V for values. - /// Note: In sql-text, don't add quotes around these markers/placeholders. (only exceptions are some complex structures, eg. the outer quotes and brackets for jsonb arrays) - pub fn new(sql_text: &'static str, params: Vec) -> Self { - Self { - sql_text: sql_text.to_owned(), - //params: params.into_iter().map(|a| Box::new(a) as ParamType).collect(), - params: params, - } - } - pub fn lit(sql_text: &'static str) -> Self { - Self::new(sql_text, vec![]) - } - // helpers for "raw" param-fragments - pub fn ident(param: T) -> Self { - SQLFragment::new("$I", vec![Box::new(param)]) - } - pub fn value(param: T) -> Self { - SQLFragment::new("$V", vec![Box::new(param)]) - } + /// For param-placeholders in sql_text, use $I for identifiers, and $V for values. + /// Note: In sql-text, don't add quotes around these markers/placeholders. (only exceptions are some complex structures, eg. the outer quotes and brackets for jsonb arrays) + pub fn new(sql_text: &'static str, params: Vec) -> Self { + Self { + sql_text: sql_text.to_owned(), + //params: params.into_iter().map(|a| Box::new(a) as ParamType).collect(), + params, + } + } + pub fn lit(sql_text: &'static str) -> Self { + Self::new(sql_text, vec![]) + } + // helpers for "raw" param-fragments + pub fn ident(param: T) -> Self { + SQLFragment::new("$I", vec![Box::new(param)]) + } + pub fn value(param: T) -> Self { + SQLFragment::new("$V", vec![Box::new(param)]) + } - /// Wraps this fragment in a `once` iterator; this makes it easy to use in the itertool `chain!(...)` macro - pub fn once(self) -> Once { - once(self) - } + /// Wraps this fragment in a `once` iterator; this makes it easy to use in the itertool `chain!(...)` macro + pub fn once(self) -> Once { + once(self) + } - /// Only use this when you have to: when the number/placement of Identifiers in the SQL query-text is dynamic. - /*pub fn INTERPOLATED_SQL(sql_text: String, params: Vec) -> Self { - Self { - sql_text: sql_text, - params: params, - } - }*/ + /// Only use this when you have to: when the number/placement of Identifiers in the SQL query-text is dynamic. + /*pub fn INTERPOLATED_SQL(sql_text: String, params: Vec) -> Self { + Self { + sql_text: sql_text, + params: params, + } + }*/ - pub fn merge(fragments: Vec) -> SQLFragment { - let mut sql_text = "".to_owned(); - let mut params: Vec = vec![]; - for fragment in fragments { - sql_text += &fragment.sql_text; - for param in fragment.params { - params.push(param); - } - } - Self { sql_text, params } - } - /// Like `merge()`, except having a SQLFragment of `\n` inserted between each provided fragment. - pub fn merge_lines(line_fragments: Vec) -> SQLFragment { - let mut final_fragments = vec![]; - for (i, frag) in line_fragments.into_iter().enumerate() { - if i > 0 { - final_fragments.push(Self::lit("\n")); - } - final_fragments.push(frag); - } - Self::merge(final_fragments) - } + pub fn merge(fragments: Vec) -> SQLFragment { + let mut sql_text = "".to_owned(); + let mut params: Vec = vec![]; + for fragment in fragments { + sql_text += &fragment.sql_text; + for param in fragment.params { + params.push(param); + } + } + Self { sql_text, params } + } + /// Like `merge()`, except having a SQLFragment of `\n` inserted between each provided fragment. + pub fn merge_lines(line_fragments: Vec) -> SQLFragment { + let mut final_fragments = vec![]; + for (i, frag) in line_fragments.into_iter().enumerate() { + if i > 0 { + final_fragments.push(Self::lit("\n")); + } + final_fragments.push(frag); + } + Self::merge(final_fragments) + } - pub fn into_query_args(&mut self) -> Result<(String, Vec), Error> { - let sql_base = std::mem::replace(&mut self.sql_text, "".to_owned()); - lazy_static! { - static ref REGEX_PLACEHOLDER: Regex = Regex::new(r"\$[IV]").unwrap(); - } + pub fn into_query_args(&mut self) -> Result<(String, Vec), Error> { + let sql_base = std::mem::replace(&mut self.sql_text, "".to_owned()); + lazy_static! { + static ref REGEX_PLACEHOLDER: Regex = Regex::new(r"\$[IV]").unwrap(); + } - let mut next_match_index = 0; - let mut next_value_id = 1; - let mut error = None; - let sql_final = REGEX_PLACEHOLDER.replace_all(&sql_base, |caps: &Captures| { - let result = (|| { - //println!("Replacing sql-param placeholder at:{:?}", caps.get(0).to_owned()); - let caps_g0 = caps.get(0).ok_or(anyhow!("Capture was missing/invalid."))?; - let match_index = next_match_index; - next_match_index += 1; - let param = self.params.get(match_index).with_context(|| format!("SQL query-string references param with index {match_index}, but no corresponding param was found."))?; - let slot_index_offered = next_value_id; + let mut next_match_index = 0; + let mut next_value_id = 1; + let mut error = None; + let sql_final = REGEX_PLACEHOLDER + .replace_all(&sql_base, |caps: &Captures| { + let result = (|| { + //println!("Replacing sql-param placeholder at:{:?}", caps.get(0).to_owned()); + let caps_g0 = caps.get(0).ok_or(anyhow!("Capture was missing/invalid."))?; + let match_index = next_match_index; + next_match_index += 1; + let param = self.params.get(match_index).with_context(|| format!("SQL query-string references param with index {match_index}, but no corresponding param was found."))?; + let slot_index_offered = next_value_id; - let (consume_slot, slot_type_required, interpolation_text) = param.prep_integrate(slot_index_offered)?; - // defensive - if caps_g0.as_str() != slot_type_required { - return Err(anyhow!("Placeholder-type provided ({}) does not match the type required ({}) for the value provided.", caps_g0.as_str(), slot_type_required)) - } - if consume_slot { - next_value_id += 1; - } - Ok(interpolation_text) - })(); - result.map_err(|err| error = Some(err)).unwrap_or_default() - }).into_owned(); - //error.map_or(Ok(()), |a| Err(a))?; - if let Some(err) = error { return Err(err); } + let (consume_slot, slot_type_required, interpolation_text) = param.prep_integrate(slot_index_offered)?; + // defensive + if caps_g0.as_str() != slot_type_required { + return Err(anyhow!("Placeholder-type provided ({}) does not match the type required ({}) for the value provided.", caps_g0.as_str(), slot_type_required)); + } + if consume_slot { + next_value_id += 1; + } + Ok(interpolation_text) + })(); + result.map_err(|err| error = Some(err)).unwrap_or_default() + }) + .into_owned(); + //error.map_or(Ok(()), |a| Err(a))?; + if let Some(err) = error { + return Err(err); + } + + // defensive + let placeholders_found = next_match_index; + ensure!(placeholders_found == self.params.len(), "Placeholder and param lengths differ!"); - // defensive - let placeholders_found = next_match_index; - ensure!(placeholders_found == self.params.len(), "Placeholder and param lengths differ!"); - - let params_base = std::mem::replace(&mut self.params, vec![]); + let params_base = std::mem::replace(&mut self.params, vec![]); + #[rustfmt::skip] let params_final = params_base.into_iter().filter(|a| { match a.prep_integrate(0) { Ok(result) => { @@ -130,13 +139,13 @@ impl SQLFragment { Err(_err) => false, } }).collect(); - - Ok((sql_final, params_final)) - } + + Ok((sql_final, params_final)) + } } impl Display for SQLFragment { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_fmt(format_args!("(sql:\"{}\", params:{:?})", self.sql_text, self.params))?; - Ok(()) - } -} \ No newline at end of file + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_fmt(format_args!("(sql:\"{}\", params:{:?})", self.sql_text, self.params))?; + Ok(()) + } +} diff --git a/Packages/app-server/src/utils/db/sql_ident.rs b/Packages/app-server/src/utils/db/sql_ident.rs index bc397dfe6..7dd0b9d92 100644 --- a/Packages/app-server/src/utils/db/sql_ident.rs +++ b/Packages/app-server/src/utils/db/sql_ident.rs @@ -1,67 +1,86 @@ -use std::{fmt::Display, sync::atomic::AtomicI32, iter::{once, Once}}; -use rust_shared::{anyhow::{anyhow, bail, Context, Error, ensure}, bytes}; +use lazy_static::lazy_static; use rust_shared::itertools::Itertools; -use rust_shared::regex::{Regex, Captures}; -use rust_shared::BasicError; +use rust_shared::regex::{Captures, Regex}; use rust_shared::serde_json::Map; -use rust_shared::{tokio_postgres, tokio_postgres::types::{ToSql, WrongType}}; -use lazy_static::lazy_static; +use rust_shared::BasicError; +use rust_shared::{ + anyhow::{anyhow, bail, ensure, Context, Error}, + bytes, +}; +use rust_shared::{ + tokio_postgres, + tokio_postgres::types::{ToSql, WrongType}, +}; +use std::{ + fmt::Display, + iter::{once, Once}, + sync::atomic::AtomicI32, +}; -use super::{sql_fragment::SQLFragment, sql_param::{SQLParamBoxed, SQLParam, ToSQLFragment, SQLParam_}}; +use super::{ + sql_fragment::SQLFragment, + sql_param::{SQLParam, SQLParamBoxed, SQLParam_, ToSQLFragment}, +}; #[derive(Debug, Clone)] pub struct SQLIdent { - pub name: String, + pub name: String, } impl SQLIdent { - pub fn new(name: String) -> Result { - // defensive (actually: atm, this is required for safety); do extra checks to ensure identifiers only ever consist of alphanumerics and underscores - lazy_static! { - static ref REGEX_SAFE_IDENT: Regex = Regex::new(r"^[a-zA-Z0-9_]+$").unwrap(); - } - ensure!(REGEX_SAFE_IDENT.is_match(&name), "An identifier was attempted to be used that contained invalid characters! Attempted identifier:{name}"); - Ok(Self { - name - }) - } - pub fn new_boxed(name: String) -> Result { - Ok(Box::new(Self::new(name)?)) - } + pub fn new(name: String) -> Result { + // defensive (actually: atm, this is required for safety); do extra checks to ensure identifiers only ever consist of alphanumerics and underscores + lazy_static! { + static ref REGEX_SAFE_IDENT: Regex = Regex::new(r"^[a-zA-Z0-9_]+$").unwrap(); + } + ensure!(REGEX_SAFE_IDENT.is_match(&name), "An identifier was attempted to be used that contained invalid characters! Attempted identifier:{name}"); + Ok(Self { name }) + } + pub fn new_boxed(name: String) -> Result { + Ok(Box::new(Self::new(name)?)) + } } // implemented merely to fulfill the type-constraint on SQLParam impl ToSql for SQLIdent { - fn to_sql(&self, _ty: &tokio_postgres::types::Type, _out: &mut bytes::BytesMut) -> Result> where Self: Sized { - panic!("A SQLIdent instance was attempted to be serialized as a sql-param!"); - } - fn accepts(_ty: &tokio_postgres::types::Type) -> bool where Self: Sized { - panic!("A SQLIdent instance was attempted to be serialized as a sql-param!"); - } - fn to_sql_checked(&self, _typ: &tokio_postgres::types::Type, _out: &mut bytes::BytesMut) -> Result> { - panic!("A SQLIdent instance was attempted to be serialized as a sql-param!"); - } + fn to_sql(&self, _ty: &tokio_postgres::types::Type, _out: &mut bytes::BytesMut) -> Result> + where + Self: Sized, + { + panic!("A SQLIdent instance was attempted to be serialized as a sql-param!"); + } + fn accepts(_ty: &tokio_postgres::types::Type) -> bool + where + Self: Sized, + { + panic!("A SQLIdent instance was attempted to be serialized as a sql-param!"); + } + fn to_sql_checked(&self, _typ: &tokio_postgres::types::Type, _out: &mut bytes::BytesMut) -> Result> { + panic!("A SQLIdent instance was attempted to be serialized as a sql-param!"); + } } impl SQLParam_ for SQLIdent { - fn prep_integrate(&self, _offered_slot: i32) -> Result<(bool, &str, String), Error> { - // defensive (actually: atm, this is required for safety); do extra checks to ensure identifiers only ever consist of alphanumerics and underscores - lazy_static! { - static ref REGEX_SAFE_IDENT: Regex = Regex::new(r"^[a-zA-Z0-9_]+$").unwrap(); - } - ensure!(REGEX_SAFE_IDENT.is_match(&self.name), "An identifier was attempted to be used that contained invalid characters! Attempted identifier:{}", &self.name); + fn prep_integrate(&self, _offered_slot: i32) -> Result<(bool, &str, String), Error> { + // defensive (actually: atm, this is required for safety); do extra checks to ensure identifiers only ever consist of alphanumerics and underscores + lazy_static! { + static ref REGEX_SAFE_IDENT: Regex = Regex::new(r"^[a-zA-Z0-9_]+$").unwrap(); + } + ensure!(REGEX_SAFE_IDENT.is_match(&self.name), "An identifier was attempted to be used that contained invalid characters! Attempted identifier:{}", &self.name); - //format!("${}", match_id) - // temp; interpolate the identifier directly into the query-str (don't know a way to avoid it atm) - Ok((false, "$I", format!("\"{}\"", self.name))) - } - fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { self.to_sql_checked(ty, out) } + //format!("${}", match_id) + // temp; interpolate the identifier directly into the query-str (don't know a way to avoid it atm) + Ok((false, "$I", format!("\"{}\"", self.name))) + } + fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { + self.to_sql_checked(ty, out) + } } impl SQLParam for SQLIdent {} /*impl ToSQLFragment for SQLIdent { - fn into_ident_fragment(self) -> Result { - Ok(SQLFragment::new("$I", vec![self])) - } - fn into_value_fragment(self) -> Result { - Err(anyhow!("Cannot convert a SQLIdent into a value SQLFragment.")) - } + fn into_ident_fragment(self) -> Result { + Ok(SQLFragment::new("$I", vec![self])) + } + fn into_value_fragment(self) -> Result { + Err(anyhow!("Cannot convert a SQLIdent into a value SQLFragment.")) + } }*/ // Send is needed, else can't be used across .await points @@ -70,4 +89,4 @@ impl SQLParam for SQLIdent {} //pub type ParamType = Box; //pub type ParamType = String; -//pub trait ToSql_Clone: ToSql + Clone {} \ No newline at end of file +//pub trait ToSql_Clone: ToSql + Clone {} diff --git a/Packages/app-server/src/utils/db/sql_param.rs b/Packages/app-server/src/utils/db/sql_param.rs index e15b9a324..f7db5ad59 100644 --- a/Packages/app-server/src/utils/db/sql_param.rs +++ b/Packages/app-server/src/utils/db/sql_param.rs @@ -1,13 +1,21 @@ -use std::{fmt::Display, sync::atomic::AtomicI32, iter::{once, Once}}; -use rust_shared::{tokio_postgres, anyhow::{anyhow, bail, Context, Error, ensure}, bytes, utils::type_aliases::JSONValue}; -use rust_shared::bytes::BytesMut; use dyn_clone::DynClone; +use lazy_static::lazy_static; +use rust_shared::bytes::BytesMut; use rust_shared::itertools::Itertools; -use rust_shared::regex::{Regex, Captures}; -use rust_shared::BasicError; +use rust_shared::regex::{Captures, Regex}; use rust_shared::serde_json::Map; -use rust_shared::tokio_postgres::types::{ToSql, WrongType, Type, IsNull, Kind}; -use lazy_static::lazy_static; +use rust_shared::tokio_postgres::types::{IsNull, Kind, ToSql, Type, WrongType}; +use rust_shared::BasicError; +use rust_shared::{ + anyhow::{anyhow, bail, ensure, Context, Error}, + bytes, tokio_postgres, + utils::type_aliases::JSONValue, +}; +use std::{ + fmt::Display, + iter::{once, Once}, + sync::atomic::AtomicI32, +}; use super::sql_fragment::SQLFragment; @@ -16,13 +24,13 @@ use super::sql_fragment::SQLFragment; /*pub trait SQLParam: ToSql + /*Clone +*/ Sync + std::fmt::Debug + 'static { pub type SQLParam_ = SQLParam;*/ pub trait SQLParam_ /*: /*ToSql +*/ /*Clone +*/ Sync + std::fmt::Debug + 'static*/ { - /// * Returned tuple's first-val is whether to "consume" the parameter-slot that was offered. (ie. whether it gets sent to db as an "actual" query-parameter) - /// * Returned tuple's second-val is the "type" of parameter-slot that the offered slot must match. - /// * Returned tuple's third-val is the text to interpolate into the query-string for this param. - fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error>; - - // test - fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result>; + /// * Returned tuple's first-val is whether to "consume" the parameter-slot that was offered. (ie. whether it gets sent to db as an "actual" query-parameter) + /// * Returned tuple's second-val is the "type" of parameter-slot that the offered slot must match. + /// * Returned tuple's third-val is the text to interpolate into the query-string for this param. + fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error>; + + // test + fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result>; } pub trait SQLParam: SQLParam_ + DynClone + /*ToSql +*/ /*+ Clone*/ /*+ ?Sized*/ Send + Sync + std::fmt::Debug + 'static {} @@ -33,173 +41,201 @@ pub type SQLParamBoxed = Box; }*/ //impl ToSql for Box { /*impl ToSql for Box { - fn to_sql(&self, ty: &Type, out: &mut BytesMut) -> Result> where Self: Sized { - let derefed: &dyn ToSql = &**self; - derefed.to_sql(ty, out) - } - fn accepts(ty: &Type) -> bool where Self: Sized { - T::accepts(ty) - } - fn to_sql_checked(&self, ty: &Type, out: &mut BytesMut) -> Result> { - let derefed: &dyn ToSql = &**self; - derefed.to_sql_checked(ty, out) - } + fn to_sql(&self, ty: &Type, out: &mut BytesMut) -> Result> where Self: Sized { + let derefed: &dyn ToSql = &**self; + derefed.to_sql(ty, out) + } + fn accepts(ty: &Type) -> bool where Self: Sized { + T::accepts(ty) + } + fn to_sql_checked(&self, ty: &Type, out: &mut BytesMut) -> Result> { + let derefed: &dyn ToSql = &**self; + derefed.to_sql_checked(ty, out) + } }*/ impl SQLParam_ for Box { - fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { - //(**self).prep_integrate(offered_slot) - T::prep_integrate(self, offered_slot) - } - fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { - (**self).to_sql_checked_(ty, out) - } + fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { + //(**self).prep_integrate(offered_slot) + T::prep_integrate(self, offered_slot) + } + fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { + (**self).to_sql_checked_(ty, out) + } } impl SQLParam for Box {} // rather than this, just use `Box::new(x)` /*pub fn sql_param_boxed(val: T) -> SQLParamBoxed { - Box::new(val) + Box::new(val) }*/ // implement SQLParam for basic rust types // for tokio-postgres <> postgres type-mapping: https://docs.rs/postgres/latest/postgres/types/trait.ToSql.html#types // for postgres types: https://www.postgresql.org/docs/7.4/datatype.html#DATATYPE-TABLE -impl SQLParam_ for Option where T: SQLParam { - fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { - match self { - Some(val) => val.prep_integrate(offered_slot), - None => Ok((true, "$V", format!("${}", offered_slot))), - } - } - fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { - match self { - Some(val) => val.to_sql_checked_(ty, out), - None => { - //Option::::None.to_sql_checked(ty, out) - // for now, we're just gonna say that a None value is valid for all column-types in database (I don't know how to check nullability of db column atm, to compare) - // todo: make sure this won't cause problems - Ok(IsNull::Yes) - }, - } - } +impl SQLParam_ for Option +where + T: SQLParam, +{ + fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { + match self { + Some(val) => val.prep_integrate(offered_slot), + None => Ok((true, "$V", format!("${}", offered_slot))), + } + } + fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { + match self { + Some(val) => val.to_sql_checked_(ty, out), + None => { + //Option::::None.to_sql_checked(ty, out) + // for now, we're just gonna say that a None value is valid for all column-types in database (I don't know how to check nullability of db column atm, to compare) + // todo: make sure this won't cause problems + Ok(IsNull::Yes) + }, + } + } } impl SQLParam for Option where T: SQLParam {} fn prep_integrate_val(offered_slot: i32, type_annotation: &str) -> Result<(bool, &str, String), Error> { - let interpolation_str = format!("${}{}", offered_slot, type_annotation); - Ok((true, "$V", interpolation_str)) + let interpolation_str = format!("${}{}", offered_slot, type_annotation); + Ok((true, "$V", interpolation_str)) } impl SQLParam_ for bool { - fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { prep_integrate_val(offered_slot, "::bool") } - fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { self.to_sql_checked(ty, out) } + fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { + prep_integrate_val(offered_slot, "::bool") + } + fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { + self.to_sql_checked(ty, out) + } } impl SQLParam for bool {} impl SQLParam_ for i32 { - fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { prep_integrate_val(offered_slot, "::int4") } - fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { self.to_sql_checked(ty, out) } + fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { + prep_integrate_val(offered_slot, "::int4") + } + fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { + self.to_sql_checked(ty, out) + } } impl SQLParam for i32 {} impl SQLParam_ for i64 { - fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { prep_integrate_val(offered_slot, "::int8") } - fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { self.to_sql_checked(ty, out) } + fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { + prep_integrate_val(offered_slot, "::int8") + } + fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { + self.to_sql_checked(ty, out) + } } impl SQLParam for i64 {} impl SQLParam_ for f32 { - fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { prep_integrate_val(offered_slot, "::float4") } - fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { self.to_sql_checked(ty, out) } + fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { + prep_integrate_val(offered_slot, "::float4") + } + fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { + self.to_sql_checked(ty, out) + } } impl SQLParam for f32 {} impl SQLParam_ for f64 { - fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { prep_integrate_val(offered_slot, "::float8") } - fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { self.to_sql_checked(ty, out) } + fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { + prep_integrate_val(offered_slot, "::float8") + } + fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { + self.to_sql_checked(ty, out) + } } impl SQLParam for f64 {} impl SQLParam_ for String { - fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { prep_integrate_val(offered_slot, "::text") } - fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { self.to_sql_checked(ty, out) } + fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { + prep_integrate_val(offered_slot, "::text") + } + fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { + self.to_sql_checked(ty, out) + } } impl SQLParam for String {} impl SQLParam_ for JSONValue { - fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { prep_integrate_val(offered_slot, "::jsonb") } - fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { self.to_sql_checked(ty, out) } + fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { + prep_integrate_val(offered_slot, "::jsonb") + } + fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { + self.to_sql_checked(ty, out) + } } impl SQLParam for JSONValue {} pub trait ToSQLFragment { - fn into_ident_fragment(self) -> Result; - fn into_value_fragment(self) -> Result; + fn into_ident_fragment(self) -> Result; + fn into_value_fragment(self) -> Result; } /*impl SQLParam_ for Vec { - fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { - //prep_integrate_val(offered_slot, "::bool") - match self.get(0) { - Some(first_val) => { - let mut result = first_val.prep_integrate(offered_slot)?; - result.2 = result.2 + "[]"; - Ok(result) - }, - // for empty-vector case, postgres is fine with the type-annotation being left empty - None => prep_integrate_val(offered_slot, ""), - } - } - fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { - //self.to_sql_checked(ty, out) - //<&[T] as SQLParam_>::to_sql_checked_(self.get(0).unwrap(), ty, out) - /*match self.get(0) { - Some(first_val) => first_val.to_sql_checked_(ty, out), - None => { - // I don't know how to handle the empty-vector case, so just assume it's valid // todo: make sure this doesn't cause problems - //Ok(IsNull::No) - // empty vectors serialize the same way regardless of type (thankfully), so - let empty_vec: Vec = vec![]; - empty_vec.to_sql_checked(ty, out) - }, - }*/ - } + fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { + //prep_integrate_val(offered_slot, "::bool") + match self.get(0) { + Some(first_val) => { + let mut result = first_val.prep_integrate(offered_slot)?; + result.2 = result.2 + "[]"; + Ok(result) + }, + // for empty-vector case, postgres is fine with the type-annotation being left empty + None => prep_integrate_val(offered_slot, ""), + } + } + fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { + //self.to_sql_checked(ty, out) + //<&[T] as SQLParam_>::to_sql_checked_(self.get(0).unwrap(), ty, out) + /*match self.get(0) { + Some(first_val) => first_val.to_sql_checked_(ty, out), + None => { + // I don't know how to handle the empty-vector case, so just assume it's valid // todo: make sure this doesn't cause problems + //Ok(IsNull::No) + // empty vectors serialize the same way regardless of type (thankfully), so + let empty_vec: Vec = vec![]; + empty_vec.to_sql_checked(ty, out) + }, + }*/ + } } impl SQLParam for Vec {}*/ impl SQLParam_ for Vec { - fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { - //prep_integrate_val(offered_slot, "::bool") - match self.get(0) { - Some(first_val) => { - let mut result = first_val.prep_integrate(offered_slot)?; - result.2 = result.2 + "[]"; - Ok(result) - }, - // for empty-vector case, postgres is fine with the type-annotation being left empty - None => prep_integrate_val(offered_slot, ""), - } - } - fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { - self.to_sql_checked(ty, out) - } + fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { + //prep_integrate_val(offered_slot, "::bool") + match self.get(0) { + Some(first_val) => { + let mut result = first_val.prep_integrate(offered_slot)?; + result.2 = result.2 + "[]"; + Ok(result) + }, + // for empty-vector case, postgres is fine with the type-annotation being left empty + None => prep_integrate_val(offered_slot, ""), + } + } + fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { + self.to_sql_checked(ty, out) + } } impl SQLParam for Vec {} /// Note: Always serializes as a value sql-param. #[derive(Debug, Clone)] pub struct CustomPGSerializer { - pg_type: String, - data: T, + pg_type: String, + data: T, } impl CustomPGSerializer { - pub fn new(pg_type: String, data: T) -> Self { - if pg_type.len() > 0 && !pg_type.starts_with("::") { - panic!("Invalid pg-type string; it must start with \"::\". @provided:{}", pg_type); - } - Self { - pg_type, - data, - } - } + pub fn new(pg_type: String, data: T) -> Self { + if pg_type.len() > 0 && !pg_type.starts_with("::") { + panic!("Invalid pg-type string; it must start with \"::\". @provided:{}", pg_type); + } + Self { pg_type, data } + } } impl SQLParam_ for CustomPGSerializer { - fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { - //Ok((true, "$V", self.pg_type.clone())) - prep_integrate_val(offered_slot, self.pg_type.as_str()) - } - fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { - self.data.to_sql_checked(ty, out) - } -} -impl SQLParam for CustomPGSerializer {} \ No newline at end of file + fn prep_integrate(&self, offered_slot: i32) -> Result<(bool, &str, String), Error> { + //Ok((true, "$V", self.pg_type.clone())) + prep_integrate_val(offered_slot, self.pg_type.as_str()) + } + fn to_sql_checked_(&self, ty: &tokio_postgres::types::Type, out: &mut bytes::BytesMut) -> Result> { + self.data.to_sql_checked(ty, out) + } +} +impl SQLParam for CustomPGSerializer {} diff --git a/Packages/app-server/src/utils/db/transactions.rs b/Packages/app-server/src/utils/db/transactions.rs index e1bbac7eb..07b4fdb4f 100644 --- a/Packages/app-server/src/utils/db/transactions.rs +++ b/Packages/app-server/src/utils/db/transactions.rs @@ -1,42 +1,52 @@ +use deadpool_postgres::{Pool, Transaction}; use futures_util::TryStreamExt; +use rust_shared::anyhow::{anyhow, Error}; use rust_shared::async_graphql; use rust_shared::serde::Serialize; use rust_shared::tokio_postgres::IsolationLevel; use rust_shared::{tokio_postgres, tokio_postgres::Row}; -use rust_shared::anyhow::{anyhow, Error}; -use deadpool_postgres::{Transaction, Pool}; use crate::store::storage::get_app_state_from_gql_ctx; use crate::utils::type_aliases::DBPool; -use crate::utils::{db::{sql_fragment::SQLFragment, filter::{FilterInput, QueryFilter}, queries::get_entries_in_collection_base}, general::{data_anchor::{DataAnchor, DataAnchorFor1}}, type_aliases::PGClientObject}; +use crate::utils::{ + db::{ + filter::{FilterInput, QueryFilter}, + queries::get_entries_in_collection_base, + sql_fragment::SQLFragment, + }, + general::data_anchor::{DataAnchor, DataAnchorFor1}, + type_aliases::PGClientObject, +}; pub async fn get_client_from_gql_ctx<'a>(ctx: &async_graphql::Context<'_>) -> Result { - let pool = &get_app_state_from_gql_ctx(ctx).db_pool; - Ok(pool.get().await.unwrap()) + let pool = &get_app_state_from_gql_ctx(ctx).db_pool; + Ok(pool.get().await.unwrap()) } /// You should almost always use `AccessorContext::new_read` (or variant) instead, since that's higher-level and will handle RLS and such for you. (safer) pub async fn start_read_transaction<'a>(anchor: &'a mut DataAnchorFor1, db_pool: &DBPool, isolation_level: IsolationLevel) -> Result, Error> { - // get client, then store it in anchor object the caller gave us a mut-ref to - *anchor = DataAnchor::holding1(db_pool.get().await?); - // now retrieve client from storage-slot we assigned to in the previous line - let client = anchor.val1.as_mut().unwrap(); - + // get client, then store it in anchor object the caller gave us a mut-ref to + *anchor = DataAnchor::holding1(db_pool.get().await?); + // now retrieve client from storage-slot we assigned to in the previous line + let client = anchor.val1.as_mut().unwrap(); + + #[rustfmt::skip] let tx = client.build_transaction() //.isolation_level(tokio_postgres::IsolationLevel::Serializable).start().await?; // use with serializable+deferrable+readonly, so that the transaction is guaranteed to not fail (see doc for "deferrable") [there may be a better way] .isolation_level(isolation_level).deferrable(true).read_only(true) .start().await?; - Ok(tx) + Ok(tx) } /// You should almost always use `AccessorContext::new_write` (or variant) instead, since that's higher-level and will handle RLS and such for you. (safer) pub async fn start_write_transaction<'a>(anchor: &'a mut DataAnchorFor1, db_pool: &DBPool) -> Result, Error> { - // get client, then store it in anchor object the caller gave us a mut-ref to - *anchor = DataAnchor::holding1(db_pool.get().await?); - // now retrieve client from storage-slot we assigned to in the previous line - let client = anchor.val1.as_mut().unwrap(); - + // get client, then store it in anchor object the caller gave us a mut-ref to + *anchor = DataAnchor::holding1(db_pool.get().await?); + // now retrieve client from storage-slot we assigned to in the previous line + let client = anchor.val1.as_mut().unwrap(); + + #[rustfmt::skip] let tx = client.build_transaction() .isolation_level(tokio_postgres::IsolationLevel::Serializable).deferrable(true) // todo: confirm whether this should be deferrable:true or not .start().await?; - Ok(tx) -} \ No newline at end of file + Ok(tx) +} diff --git a/Packages/app-server/src/utils/general/data_anchor.rs b/Packages/app-server/src/utils/general/data_anchor.rs index 267d8b03c..59cd77d6c 100644 --- a/Packages/app-server/src/utils/general/data_anchor.rs +++ b/Packages/app-server/src/utils/general/data_anchor.rs @@ -1,19 +1,27 @@ /// This "data anchor" struct helps expand function-based encapsulation to more cases: /// * Those where you want to construct object X in a function, then construct a derivative/ref-using object Y and return it, without hitting Rust borrow-checker errors from object X's lifetime ending in that function. -/// +/// /// With this "data anchor", you can easily construct a longer-persistence "container" for that object X, without needing to know the exact shape of data needed, nor having to pass the same arguments in two places. /// See here for more info (both the linked answer, and the rest of the question/answers): https://stackoverflow.com/a/72925407 pub struct DataAnchor { - pub val1: Option, - pub val2: Option, - pub val3: Option, + pub val1: Option, + pub val2: Option, + pub val3: Option, } impl DataAnchor { - pub fn empty() -> Self { Self { val1: None, val2: None, val3: None } } - pub fn holding1(val1: T1) -> Self { Self { val1: Some(val1), val2: None, val3: None } } - pub fn holding2(val1: T1, val2: T2) -> Self { Self { val1: Some(val1), val2: Some(val2), val3: None } } - pub fn holding3(val1: T1, val2: T2, val3: T3) -> Self { Self { val1: Some(val1), val2: Some(val2), val3: Some(val3) } } + pub fn empty() -> Self { + Self { val1: None, val2: None, val3: None } + } + pub fn holding1(val1: T1) -> Self { + Self { val1: Some(val1), val2: None, val3: None } + } + pub fn holding2(val1: T1, val2: T2) -> Self { + Self { val1: Some(val1), val2: Some(val2), val3: None } + } + pub fn holding3(val1: T1, val2: T2, val3: T3) -> Self { + Self { val1: Some(val1), val2: Some(val2), val3: Some(val3) } + } } pub type DataAnchorFor3 = DataAnchor; pub type DataAnchorFor2 = DataAnchor; -pub type DataAnchorFor1 = DataAnchor; \ No newline at end of file +pub type DataAnchorFor1 = DataAnchor; diff --git a/Packages/app-server/src/utils/general/general.rs b/Packages/app-server/src/utils/general/general.rs index 1ff0332d5..5b02aadfb 100644 --- a/Packages/app-server/src/utils/general/general.rs +++ b/Packages/app-server/src/utils/general/general.rs @@ -1,21 +1,39 @@ -use std::{any::TypeId, pin::Pin, task::{Poll, Waker}, time::{Duration, Instant, SystemTime, UNIX_EPOCH}, cell::RefCell, collections::HashMap, iter::{once, empty}, fmt::Display, sync::atomic::{Ordering, AtomicU64}}; -use rust_shared::{anyhow::{anyhow, bail, Context, Error}, serde_json, utils::{type_aliases::JSONValue, mtx::mtx::Mtx}, new_mtx}; -use rust_shared::async_graphql::{Result, async_stream::{stream, self}, OutputType, Object, Positioned, parser::types::Field}; use deadpool_postgres::Pool; +use rust_shared::async_graphql::{ + async_stream::{self, stream}, + parser::types::Field, + Object, OutputType, Positioned, Result, +}; use rust_shared::flume::Sender; +use rust_shared::{ + anyhow::{anyhow, bail, Context, Error}, + new_mtx, serde_json, + utils::{mtx::mtx::Mtx, type_aliases::JSONValue}, +}; +use std::{ + any::TypeId, + cell::RefCell, + collections::HashMap, + fmt::Display, + iter::{empty, once}, + pin::Pin, + sync::atomic::{AtomicU64, Ordering}, + task::{Poll, Waker}, + time::{Duration, Instant, SystemTime, UNIX_EPOCH}, +}; //use flurry::Guard; -use futures_util::{Stream, StreamExt, Future, stream, TryFutureExt}; +use futures_util::{stream, Future, Stream, StreamExt, TryFutureExt}; use rust_shared::itertools::Itertools; -use rust_shared::serde::{Serialize, Deserialize, de::DeserializeOwned}; +use rust_shared::serde::{de::DeserializeOwned, Deserialize, Serialize}; use rust_shared::serde_json::{json, Map}; use rust_shared::tokio::sync::RwLock; -use rust_shared::tokio_postgres::{Client, Row, types::ToSql}; +use rust_shared::tokio_postgres::{types::ToSql, Client, Row}; use rust_shared::uuid::Uuid; //use rust_shared::tokio::sync::Mutex; use metrics::{counter, histogram}; use std::hash::Hash; -use crate::{store::live_queries::{LQStorageArc, LQStorage, DropLQWatcherMsg}}; +use crate::store::live_queries::{DropLQWatcherMsg, LQStorage, LQStorageArc}; /// Alternative to `my_hash_map.entry(key).or_insert_with(...)`, for when the hashmap is wrapped in a RwLock, and you want a "write" lock to only be obtained if a "read" lock is insufficient. (see: https://stackoverflow.com/a/57057033) /// Returns tuple of: @@ -23,72 +41,74 @@ use crate::{store::live_queries::{LQStorageArc, LQStorage, DropLQWatcherMsg}}; /// * 1: `true` if the entry didn't exist and had to be created -- `false` otherwise. /// * 2: The new number of entries in the map. pub async fn rw_locked_hashmap__get_entry_or_insert_with(map: &RwLock>, key: K, insert_func: impl FnOnce() -> V) -> (V, bool, usize) - where K: Sized, K: Hash + Eq +where + K: Sized, + K: Hash + Eq, { - //new_mtx!(mtx, "1", mtx_p); - { - let map_read = map.read().await; - //mtx.section("1.1"); - //println!("1.1, key:{:?}", key); - if let Some(val) = map_read.get(&key) { - let val_clone = val.clone(); - let count = map_read.len(); - return (val_clone, false, count); - } - } - - //mtx.section("2"); - let mut map_write = map.write().await; - //mtx.section("2.1"); - //println!("2.1, key:{:?}", key); - // use entry().or_insert_with() in case another thread inserted the same key while we were unlocked above - let val_clone = map_write.entry(key).or_insert_with(insert_func).clone(); - let count = map_write.len(); - (val_clone, true, count) + //new_mtx!(mtx, "1", mtx_p); + { + let map_read = map.read().await; + //mtx.section("1.1"); + //println!("1.1, key:{:?}", key); + if let Some(val) = map_read.get(&key) { + let val_clone = val.clone(); + let count = map_read.len(); + return (val_clone, false, count); + } + } + + //mtx.section("2"); + let mut map_write = map.write().await; + //mtx.section("2.1"); + //println!("2.1, key:{:?}", key); + // use entry().or_insert_with() in case another thread inserted the same key while we were unlocked above + let val_clone = map_write.entry(key).or_insert_with(insert_func).clone(); + let count = map_write.len(); + (val_clone, true, count) } /*pub fn flurry_hashmap_into_hashmap(map: &flurry::HashMap, guard: Guard<'_>) -> HashMap { - let mut result = HashMap::new(); - for (key, value) in map.iter(&guard) { - result.insert(key.clone(), value.clone()); - } - result + let mut result = HashMap::new(); + for (key, value) in map.iter(&guard) { + result.insert(key.clone(), value.clone()); + } + result } pub fn flurry_hashmap_into_json_map(map: &flurry::HashMap, guard: Guard<'_>, sort: bool) -> Result, serde_json::Error> { - let mut result = Map::new(); - if sort { - for (key, value) in map.iter(&guard).sorted_by_key(|a| a.0) { - result.insert(key.to_string(), serde_json::to_value(value)?); - } - } else { - for (key, value) in map.iter(&guard) { - result.insert(key.to_string(), serde_json::to_value(value)?); - } - } - Ok(result) + let mut result = Map::new(); + if sort { + for (key, value) in map.iter(&guard).sorted_by_key(|a| a.0) { + result.insert(key.to_string(), serde_json::to_value(value)?); + } + } else { + for (key, value) in map.iter(&guard) { + result.insert(key.to_string(), serde_json::to_value(value)?); + } + } + Ok(result) }*/ pub fn match_cond_to_iter(cond_x: bool, iter_y: impl Iterator + 'static, iter_z: impl Iterator + 'static) -> Box> { - match cond_x { - true => Box::new(iter_y), - false => Box::new(iter_z) - } + match cond_x { + true => Box::new(iter_y), + false => Box::new(iter_z), + } } pub struct AtomicF64 { - storage: AtomicU64, + storage: AtomicU64, } impl AtomicF64 { - pub fn new(value: f64) -> Self { - let as_u64 = value.to_bits(); - Self { storage: AtomicU64::new(as_u64) } - } - pub fn store(&self, value: f64, ordering: Ordering) { - let as_u64 = value.to_bits(); - self.storage.store(as_u64, ordering) - } - pub fn load(&self, ordering: Ordering) -> f64 { - let as_u64 = self.storage.load(ordering); - f64::from_bits(as_u64) - } -} \ No newline at end of file + pub fn new(value: f64) -> Self { + let as_u64 = value.to_bits(); + Self { storage: AtomicU64::new(as_u64) } + } + pub fn store(&self, value: f64, ordering: Ordering) { + let as_u64 = value.to_bits(); + self.storage.store(as_u64, ordering) + } + pub fn load(&self, ordering: Ordering) -> f64 { + let as_u64 = self.storage.load(ordering); + f64::from_bits(as_u64) + } +} diff --git a/Packages/app-server/src/utils/general/logging.rs b/Packages/app-server/src/utils/general/logging.rs index 666213a1b..56efceb2c 100644 --- a/Packages/app-server/src/utils/general/logging.rs +++ b/Packages/app-server/src/utils/general/logging.rs @@ -1,138 +1,141 @@ use std::collections::HashSet; use std::sync::Mutex; -use std::{fmt, collections::HashMap, ops::Sub}; +use std::{collections::HashMap, fmt, ops::Sub}; -use rust_shared::flume::{Sender, Receiver, TrySendError}; +use rust_shared::flume::{Receiver, Sender, TrySendError}; use rust_shared::futures::executor::block_on; use rust_shared::indexmap::IndexMap; use rust_shared::itertools::Itertools; use rust_shared::links::app_server_to_monitor_backend::{LogEntry, Message_ASToMB}; use rust_shared::once_cell::sync::Lazy; -use rust_shared::{sentry, to_anyhow}; -use rust_shared::utils::time::time_since_epoch_ms; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::json; -use tracing::{Level, error, Subscriber, Metadata, subscriber::Interest, span, Event, metadata::LevelFilter, field::{Visit, Field}}; -use tracing_subscriber::{filter, Layer, prelude::__tracing_subscriber_SubscriberExt, util::SubscriberInitExt, layer::{Filter, Context}}; +use rust_shared::utils::time::time_since_epoch_ms; +use rust_shared::{sentry, to_anyhow}; +use tracing::{ + error, + field::{Field, Visit}, + metadata::LevelFilter, + span, + subscriber::Interest, + Event, Level, Metadata, Subscriber, +}; +use tracing_subscriber::{ + filter, + layer::{Context, Filter}, + prelude::__tracing_subscriber_SubscriberExt, + util::SubscriberInitExt, + Layer, +}; -use crate::{utils::type_aliases::ABSender, links::monitor_backend_link::{MESSAGE_SENDER_TO_MONITOR_BACKEND}}; +use crate::{links::monitor_backend_link::MESSAGE_SENDER_TO_MONITOR_BACKEND, utils::type_aliases::ABSender}; pub fn does_event_match_conditions(metadata: &Metadata, levels_to_include: &[Level]) -> bool { - if !levels_to_include.contains(metadata.level()) { - return false; - } - true + if !levels_to_include.contains(metadata.level()) { + return false; + } + true } // create index-map in once cell static OBSERVED_TRACING_EVENT_TARGETS: Lazy>> = Lazy::new(|| Mutex::new(Vec::new())); pub fn target_matches(target: &str, module_paths: &[&str]) -> bool { - for module_path in module_paths { - if target == *module_path || target.starts_with(&format!("{}::", module_path)) { - return true; - } - } - false + for module_path in module_paths { + if target == *module_path || target.starts_with(&format!("{}::", module_path)) { + return true; + } + } + false } pub fn should_event_be_printed(metadata: &Metadata) -> bool { - let target = metadata.target(); - - // when you enable this, only do it temporarily, to check the list of tracing targets - let mut cache = OBSERVED_TRACING_EVENT_TARGETS.lock().unwrap(); - if !cache.contains(&target.to_owned()) { - cache.push(target.to_owned()); - //println!("Tracing targets observed so far: {}", cache.iter().format(", ")); - println!("Observed new target in tracing: {}", target); - } - - match target { - t if target_matches(t, &["app_server", "rust_shared"]) => { - does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN, Level::INFO]) - //should_event_be_kept_according_to_x(metadata, &[Level::ERROR, Level::WARN, Level::INFO, Level::DEBUG]) - }, - t if target_matches(t, &["async-graphql"]) => { - does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN]) - }, - // temp - //t if target_matches(t, &["hyper"]) => true, - //t if target_matches(t, &["tower_http::trace::on_request", "tower_http::trace::on_response"]) => true, - t if target_matches(t, &["tower_http::"]) => true, - // fallback - _ => false, - } + let target = metadata.target(); + + // when you enable this, only do it temporarily, to check the list of tracing targets + let mut cache = OBSERVED_TRACING_EVENT_TARGETS.lock().unwrap(); + if !cache.contains(&target.to_owned()) { + cache.push(target.to_owned()); + //println!("Tracing targets observed so far: {}", cache.iter().format(", ")); + println!("Observed new target in tracing: {}", target); + } + + match target { + t if target_matches(t, &["app_server", "rust_shared"]) => { + does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN, Level::INFO]) + //should_event_be_kept_according_to_x(metadata, &[Level::ERROR, Level::WARN, Level::INFO, Level::DEBUG]) + }, + t if target_matches(t, &["async-graphql"]) => does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN]), + // temp + //t if target_matches(t, &["hyper"]) => true, + //t if target_matches(t, &["tower_http::trace::on_request", "tower_http::trace::on_response"]) => true, + t if target_matches(t, &["tower_http::"]) => true, + // fallback + _ => false, + } } pub fn should_event_be_sent_to_monitor(metadata: &Metadata) -> bool { - match metadata.target() { - a if a.starts_with("app_server") || a.starts_with("rust_shared") => { - //does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN, Level::INFO, Level::DEBUG, Level::TRACE]) - // don't send TRACE atm, because that's intended for logging that's potentially *very* verbose, and could conceivably cause local network congestion - // (long-term, the plan is to make a way for the monitor tool to request that verbose data for a time-slice the user specifies, if/when needed) - does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN, Level::INFO, Level::DEBUG]) - }, - "async-graphql" => { - does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN]) - }, - _ => false - } + match metadata.target() { + a if a.starts_with("app_server") || a.starts_with("rust_shared") => { + //does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN, Level::INFO, Level::DEBUG, Level::TRACE]) + // don't send TRACE atm, because that's intended for logging that's potentially *very* verbose, and could conceivably cause local network congestion + // (long-term, the plan is to make a way for the monitor tool to request that verbose data for a time-slice the user specifies, if/when needed) + does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN, Level::INFO, Level::DEBUG]) + }, + "async-graphql" => does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN]), + _ => false, + } } pub fn set_up_logging() { - let printing_layer_func = filter::filter_fn(move |metadata| { - should_event_be_printed(metadata) - }); + let printing_layer_func = filter::filter_fn(move |metadata| should_event_be_printed(metadata)); - let printing_layer = tracing_subscriber::fmt::layer().with_filter(printing_layer_func); - let sending_layer = Layer_WithIntercept {}; - - // IMPORTANT NOTE: For some reason, calls to `log::warn` and such get logged to the standard-out (probably passing through `printing_layer` above), but NOT to the `sending_layer`. - // So until the source issue is investigated, make sure to always using `tracing::X` instead of `log::X` in the codebase. (else those log-messages won't get sent to monitor-tool) - tracing_subscriber::registry() - .with(sending_layer) - .with(printing_layer) - .with(sentry::integrations::tracing::layer()) - .init(); + let printing_layer = tracing_subscriber::fmt::layer().with_filter(printing_layer_func); + let sending_layer = Layer_WithIntercept {}; + + // IMPORTANT NOTE: For some reason, calls to `log::warn` and such get logged to the standard-out (probably passing through `printing_layer` above), but NOT to the `sending_layer`. + // So until the source issue is investigated, make sure to always using `tracing::X` instead of `log::X` in the codebase. (else those log-messages won't get sent to monitor-tool) + tracing_subscriber::registry().with(sending_layer).with(printing_layer).with(sentry::integrations::tracing::layer()).init(); } pub struct Layer_WithIntercept {} impl Layer for Layer_WithIntercept { - fn on_event(&self, event: &Event<'_>, _ctx: Context<'_, S>) { - let metadata = event.metadata(); - if should_event_be_sent_to_monitor(metadata) { - let mut entry = LogEntry { - time: time_since_epoch_ms(), - level: metadata.level().to_string(), - target: metadata.target().to_owned(), - span_name: metadata.name().to_owned(), - message: "[to be loaded...]".to_owned(), - }; + fn on_event(&self, event: &Event<'_>, _ctx: Context<'_, S>) { + let metadata = event.metadata(); + if should_event_be_sent_to_monitor(metadata) { + let mut entry = LogEntry { + time: time_since_epoch_ms(), + level: metadata.level().to_string(), + target: metadata.target().to_owned(), + span_name: metadata.name().to_owned(), + message: "[to be loaded...]".to_owned(), + }; - let mut visitor = CollectorVisitor::default(); - event.record(&mut visitor); - // todo: make-so this handles all fields - entry.message = visitor.field_values.get("message").map(|a| a.to_owned()).unwrap_or_else(|| "[n/a]".to_string()); + let mut visitor = CollectorVisitor::default(); + event.record(&mut visitor); + // todo: make-so this handles all fields + entry.message = visitor.field_values.get("message").map(|a| a.to_owned()).unwrap_or_else(|| "[n/a]".to_string()); - //let start = std::time::Instant::now(); - block_on(async { - match MESSAGE_SENDER_TO_MONITOR_BACKEND.0.broadcast(Message_ASToMB::LogEntryAdded { entry }).await { - Ok(_) => {}, - // if a send fails (ie. no receivers attached yet), that's fine; just print a message - Err(entry) => println!("Local-only log-entry (since bridge to monitor not yet set up):{entry:?}") - }; - }); - // typical results: 0.01ms (which seems fine; if we're logging so much that 0.01ms is a problem, we're very likely logging too much...) - //println!("Time taken:{}", start.elapsed().as_secs_f64() * 1000f64); - } - } + //let start = std::time::Instant::now(); + block_on(async { + match MESSAGE_SENDER_TO_MONITOR_BACKEND.0.broadcast(Message_ASToMB::LogEntryAdded { entry }).await { + Ok(_) => {}, + // if a send fails (ie. no receivers attached yet), that's fine; just print a message + Err(entry) => println!("Local-only log-entry (since bridge to monitor not yet set up):{entry:?}"), + }; + }); + // typical results: 0.01ms (which seems fine; if we're logging so much that 0.01ms is a problem, we're very likely logging too much...) + //println!("Time taken:{}", start.elapsed().as_secs_f64() * 1000f64); + } + } } #[derive(Default)] pub struct CollectorVisitor { - pub field_values: IndexMap, + pub field_values: IndexMap, } impl Visit for CollectorVisitor { - /// Visit a value implementing `fmt::Debug`. - fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { - self.field_values.insert(field.name().to_owned(), format!("{:?}", value)); - } -} \ No newline at end of file + /// Visit a value implementing `fmt::Debug`. + fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { + self.field_values.insert(field.name().to_owned(), format!("{:?}", value)); + } +} diff --git a/Packages/app-server/src/utils/general/mem_alloc.rs b/Packages/app-server/src/utils/general/mem_alloc.rs index b9914afd0..4c7c4a30d 100644 --- a/Packages/app-server/src/utils/general/mem_alloc.rs +++ b/Packages/app-server/src/utils/general/mem_alloc.rs @@ -4,25 +4,25 @@ use std::sync::atomic::{AtomicU64, Ordering}; pub struct Trallocator(pub A, AtomicU64); unsafe impl GlobalAlloc for Trallocator { - unsafe fn alloc(&self, l: Layout) -> *mut u8 { - self.1.fetch_add(l.size() as u64, Ordering::SeqCst); - self.0.alloc(l) - } - unsafe fn dealloc(&self, ptr: *mut u8, l: Layout) { - self.0.dealloc(ptr, l); - self.1.fetch_sub(l.size() as u64, Ordering::SeqCst); - } + unsafe fn alloc(&self, l: Layout) -> *mut u8 { + self.1.fetch_add(l.size() as u64, Ordering::SeqCst); + self.0.alloc(l) + } + unsafe fn dealloc(&self, ptr: *mut u8, l: Layout) { + self.0.dealloc(ptr, l); + self.1.fetch_sub(l.size() as u64, Ordering::SeqCst); + } } impl Trallocator { - pub const fn new(a: A) -> Self { - Trallocator(a, AtomicU64::new(0)) - } + pub const fn new(a: A) -> Self { + Trallocator(a, AtomicU64::new(0)) + } - pub fn reset(&self) { - self.1.store(0, Ordering::SeqCst); - } - pub fn get(&self) -> u64 { - self.1.load(Ordering::SeqCst) - } -} \ No newline at end of file + pub fn reset(&self) { + self.1.store(0, Ordering::SeqCst); + } + pub fn get(&self) -> u64 { + self.1.load(Ordering::SeqCst) + } +} diff --git a/Packages/app-server/src/utils/general/order_key.rs b/Packages/app-server/src/utils/general/order_key.rs index ae43aba04..b88230df9 100644 --- a/Packages/app-server/src/utils/general/order_key.rs +++ b/Packages/app-server/src/utils/general/order_key.rs @@ -1,23 +1,24 @@ use std::{ - cmp::Ordering, - fmt::{Debug, Display}, + cmp::Ordering, + fmt::{Debug, Display}, }; -use lexicon_fractional_index::{key_between, float64_approx}; +use lexicon_fractional_index::{float64_approx, key_between}; use rust_shared::{ - anyhow::Error, - async_graphql::{self as async_graphql, InputValueResult, Scalar, ScalarType, Value, InputValueError}, - to_anyhow, utils::general_::extensions::ToOwnedV, + anyhow::Error, + async_graphql::{self as async_graphql, InputValueError, InputValueResult, Scalar, ScalarType, Value}, + to_anyhow, + utils::general_::extensions::ToOwnedV, }; use serde::{Deserialize, Deserializer, Serialize, Serializer}; /*pub fn lexicon_key_between(key1: Option<&LexiconKey>, key2: Option<&LexiconKey>) -> Result { - Ok(match (key1, key2) { - (None, None) => LexiconKey::default(), - (Some(key1), None) => key1.bisect_end().map_err(to_anyhow)?, - (None, Some(key2)) => key2.bisect_beginning().map_err(to_anyhow)?, - (Some(key1), Some(key2)) => key1.bisect(key2).map_err(to_anyhow)?, - }) + Ok(match (key1, key2) { + (None, None) => LexiconKey::default(), + (Some(key1), None) => key1.bisect_end().map_err(to_anyhow)?, + (None, Some(key2)) => key2.bisect_beginning().map_err(to_anyhow)?, + (Some(key1), Some(key2)) => key1.bisect(key2).map_err(to_anyhow)?, + }) }*/ // NOTE: The `lexicon_fractional_index` crate can panic for certain invalid/unexpected inputs. (eg. strings containing "^" or "_") @@ -25,70 +26,76 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; //#[derive(SimpleObject, InputObject)] pub struct OrderKey { - pub key: String, + pub key: String, } impl OrderKey { - pub fn mid() -> OrderKey { - OrderKey { key: key_between(&None, &None).unwrap() } - } - pub fn validate(key: &str) -> Result<(), Error> { - //float64_approx(key).map_err(to_anyhow)?; - // the base library's `validate_key` function is private, so pass key to its `key_between` function instead (since it calls `validate_key` internally) - key_between(&Some(key.o()), &None).map_err(to_anyhow)?; - Ok(()) - } + pub fn mid() -> OrderKey { + OrderKey { key: key_between(&None, &None).unwrap() } + } + pub fn validate(key: &str) -> Result<(), Error> { + //float64_approx(key).map_err(to_anyhow)?; + // the base library's `validate_key` function is private, so pass key to its `key_between` function instead (since it calls `validate_key` internally) + key_between(&Some(key.o()), &None).map_err(to_anyhow)?; + Ok(()) + } - pub fn new(str: &str) -> Result { - Self::validate(str)?; - Ok(OrderKey { key: str.o() }) - } - pub fn prev(&self) -> Result { - Ok(OrderKey { key: key_between(&None, &Some(self.key.o())).map_err(to_anyhow)? }) - } - pub fn next(&self) -> Result { - Ok(OrderKey { key: key_between(&Some(self.key.o()), &None).map_err(to_anyhow)? }) - } - pub fn between(&self, other: &OrderKey) -> Result { - // swap order when self is greater than other (base library enforces this restriction) - if self.key > other.key { - return other.between(self); - } - Ok(OrderKey { key: key_between(&Some(self.key.o()), &Some(other.key.o())).map_err(to_anyhow)? }) - } + pub fn new(str: &str) -> Result { + Self::validate(str)?; + Ok(OrderKey { key: str.o() }) + } + pub fn prev(&self) -> Result { + Ok(OrderKey { key: key_between(&None, &Some(self.key.o())).map_err(to_anyhow)? }) + } + pub fn next(&self) -> Result { + Ok(OrderKey { key: key_between(&Some(self.key.o()), &None).map_err(to_anyhow)? }) + } + pub fn between(&self, other: &OrderKey) -> Result { + // swap order when self is greater than other (base library enforces this restriction) + if self.key > other.key { + return other.between(self); + } + Ok(OrderKey { key: key_between(&Some(self.key.o()), &Some(other.key.o())).map_err(to_anyhow)? }) + } } // added traits // ========== impl Clone for OrderKey { - fn clone(&self) -> Self { - OrderKey::new(&self.key.to_string()).unwrap() - } + fn clone(&self) -> Self { + OrderKey::new(&self.key.to_string()).unwrap() + } } impl Serialize for OrderKey { - fn serialize(&self, serializer: S) -> Result where S: Serializer { - //serializer.serialize_str(self.inner.to_string().as_str()) - self.key.to_string().serialize(serializer) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + //serializer.serialize_str(self.inner.to_string().as_str()) + self.key.to_string().serialize(serializer) + } } impl<'de> Deserialize<'de> for OrderKey { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { - let str_val = String::deserialize(deserializer)?; - Ok(OrderKey::new(&str_val).map_err(|err| serde::de::Error::custom(err.to_string()))?) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let str_val = String::deserialize(deserializer)?; + Ok(OrderKey::new(&str_val).map_err(|err| serde::de::Error::custom(err.to_string()))?) + } } #[Scalar] impl ScalarType for OrderKey { - fn parse(value: Value) -> InputValueResult { - match value { - Value::String(str_val) => Ok(OrderKey::new(&str_val).map_err(|e| InputValueError::custom(e))?), - _ => Err(InputValueError::custom("OrderKey must be a string")), - } - } - fn to_value(&self) -> Value { - Value::String(self.key.to_string()) - } + fn parse(value: Value) -> InputValueResult { + match value { + Value::String(str_val) => Ok(OrderKey::new(&str_val).map_err(|e| InputValueError::custom(e))?), + _ => Err(InputValueError::custom("OrderKey must be a string")), + } + } + fn to_value(&self) -> Value { + Value::String(self.key.to_string()) + } } // pass-through traits @@ -96,14 +103,22 @@ impl ScalarType for OrderKey { impl Eq for OrderKey {} impl PartialEq for OrderKey { - fn eq(&self, other: &OrderKey) -> bool { self.key.eq(&other.key) } + fn eq(&self, other: &OrderKey) -> bool { + self.key.eq(&other.key) + } } impl Ord for OrderKey { - fn cmp(&self, other: &OrderKey) -> Ordering { self.key.cmp(&other.key) } + fn cmp(&self, other: &OrderKey) -> Ordering { + self.key.cmp(&other.key) + } } impl PartialOrd for OrderKey { - fn partial_cmp(&self, other: &OrderKey) -> Option { self.key.partial_cmp(&other.key) } + fn partial_cmp(&self, other: &OrderKey) -> Option { + self.key.partial_cmp(&other.key) + } } impl Display for OrderKey { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { std::fmt::Display::fmt(&self.key, f) } + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + std::fmt::Display::fmt(&self.key, f) + } } diff --git a/Packages/app-server/src/utils/http.rs b/Packages/app-server/src/utils/http.rs index 9832c813f..6a4bd9ce6 100644 --- a/Packages/app-server/src/utils/http.rs +++ b/Packages/app-server/src/utils/http.rs @@ -1,41 +1,41 @@ -use rust_shared::{axum::http, utils::net::{body_to_bytes, AxumBody}}; use futures_util::TryStreamExt; -use rust_shared::hyper::{Request}; +use rust_shared::hyper::Request; +use rust_shared::{ + axum::http, + utils::net::{body_to_bytes, AxumBody}, +}; pub async fn clone_request(req: Request) -> (Request, Request) { - let (parts, body) = req.into_parts(); - //clone_request_from_parts(parts, body, "sdf".to_owned()).await - clone_request_from_parts(parts, body).await + let (parts, body) = req.into_parts(); + //clone_request_from_parts(parts, body, "sdf".to_owned()).await + clone_request_from_parts(parts, body).await } pub async fn clone_request_from_parts( - parts: http::request::Parts, body: AxumBody, - // modifications - //new_url: String + parts: http::request::Parts, + body: AxumBody, + // modifications + //new_url: String ) -> (Request, Request) { - let new_url = parts.uri; + let new_url = parts.uri; - /*let entire_body_as_vec = body - .try_fold(Vec::new(), |mut data, chunk| async move { - data.extend_from_slice(&chunk); - Ok(data) - }).await;*/ - let entire_body_as_vec = body_to_bytes(body).await.unwrap().to_vec(); + /*let entire_body_as_vec = body + .try_fold(Vec::new(), |mut data, chunk| async move { + data.extend_from_slice(&chunk); + Ok(data) + }).await;*/ + let entire_body_as_vec = body_to_bytes(body).await.unwrap().to_vec(); - let body_str = String::from_utf8(entire_body_as_vec).expect("response was not valid utf-8"); - let mut request_builder_1 = Request::builder().uri(new_url.clone()).method(parts.method.as_str()); - let mut request_builder_2 = Request::builder().uri(new_url).method(parts.method.as_str()); + let body_str = String::from_utf8(entire_body_as_vec).expect("response was not valid utf-8"); + let mut request_builder_1 = Request::builder().uri(new_url.clone()).method(parts.method.as_str()); + let mut request_builder_2 = Request::builder().uri(new_url).method(parts.method.as_str()); - for (header_name, header_value) in parts.headers.iter() { - request_builder_1 = request_builder_1.header(header_name.as_str(), header_value); - request_builder_2 = request_builder_2.header(header_name.as_str(), header_value); - } + for (header_name, header_value) in parts.headers.iter() { + request_builder_1 = request_builder_1.header(header_name.as_str(), header_value); + request_builder_2 = request_builder_2.header(header_name.as_str(), header_value); + } - let req1 = request_builder_1 - .body(AxumBody::from(body_str.clone())) - .unwrap(); - let req2 = request_builder_2 - .body(AxumBody::from(body_str.clone())) - .unwrap(); + let req1 = request_builder_1.body(AxumBody::from(body_str.clone())).unwrap(); + let req2 = request_builder_2.body(AxumBody::from(body_str.clone())).unwrap(); - (req1, req2) -} \ No newline at end of file + (req1, req2) +} diff --git a/Packages/app-server/src/utils/mod.rs b/Packages/app-server/src/utils/mod.rs index 57ad3a660..f89038ddc 100644 --- a/Packages/app-server/src/utils/mod.rs +++ b/Packages/app-server/src/utils/mod.rs @@ -1,38 +1,38 @@ pub mod axum_logging_layer; pub mod db { - pub mod accessors; - pub mod agql_ext { - pub mod gql_request_storage; - pub mod gql_result_stream; - pub mod gql_utils; - } - pub mod filter; - pub mod generic_handlers { - pub mod queries; - pub mod subscriptions; - } - pub mod sql_fragment; - pub mod pg_stream_parsing; - pub mod pg_row_to_json; - pub mod queries; - pub mod rls { - pub mod rls_applier; - pub mod rls_helpers; - pub mod rls_policies; - } - pub mod sql_ident; - pub mod sql_param; - pub mod transactions; + pub mod accessors; + pub mod agql_ext { + pub mod gql_request_storage; + pub mod gql_result_stream; + pub mod gql_utils; + } + pub mod filter; + pub mod generic_handlers { + pub mod queries; + pub mod subscriptions; + } + pub mod pg_row_to_json; + pub mod pg_stream_parsing; + pub mod queries; + pub mod sql_fragment; + pub mod rls { + pub mod rls_applier; + pub mod rls_helpers; + pub mod rls_policies; + } + pub mod sql_ident; + pub mod sql_param; + pub mod transactions; } pub mod general { - pub mod data_anchor; - pub mod general; - pub mod logging; - pub mod mem_alloc; - pub mod order_key; + pub mod data_anchor; + pub mod general; + pub mod logging; + pub mod mem_alloc; + pub mod order_key; } pub mod http; pub mod type_aliases; pub mod quick_tests { - pub mod quick1; -} \ No newline at end of file + pub mod quick1; +} diff --git a/Packages/app-server/src/utils/quick_tests/quick1.rs b/Packages/app-server/src/utils/quick_tests/quick1.rs index 19aa0f01c..dfef656fa 100644 --- a/Packages/app-server/src/utils/quick_tests/quick1.rs +++ b/Packages/app-server/src/utils/quick_tests/quick1.rs @@ -1,2 +1,2 @@ // file to paste code for quick type-checking (eg. for subsequent sharing through https://play.rust-lang.org) -// ========== \ No newline at end of file +// ========== diff --git a/Packages/app-server/src/utils/type_aliases.rs b/Packages/app-server/src/utils/type_aliases.rs index c69b20b6a..19a5993df 100644 --- a/Packages/app-server/src/utils/type_aliases.rs +++ b/Packages/app-server/src/utils/type_aliases.rs @@ -15,4 +15,4 @@ pub type PGClientObject = Object; // channels pub type ABSender = async_broadcast::Sender; -pub type ABReceiver = async_broadcast::Receiver; \ No newline at end of file +pub type ABReceiver = async_broadcast::Receiver; diff --git a/Packages/monitor-backend/src/gql/_general.rs b/Packages/monitor-backend/src/gql/_general.rs index 3cf394061..f5a1fe10f 100644 --- a/Packages/monitor-backend/src/gql/_general.rs +++ b/Packages/monitor-backend/src/gql/_general.rs @@ -1,8 +1,16 @@ -use rust_shared::itertools::Itertools; -use rust_shared::anyhow::{anyhow, Context, Error, bail, ensure}; -use rust_shared::async_graphql::{Object, Result, Schema, Subscription, ID, async_stream, OutputType, scalar, EmptySubscription, SimpleObject, InputObject, self}; +use futures::executor::block_on; +use futures_util::{stream, Future, Stream, StreamExt, TryFutureExt}; +use rust_shared::anyhow::{anyhow, bail, ensure, Context, Error}; +use rust_shared::async_graphql::{self, async_stream, scalar, EmptySubscription, InputObject, Object, OutputType, Result, Schema, SimpleObject, Subscription, ID}; use rust_shared::flume::{Receiver, Sender}; +use rust_shared::hyper::{Method, Request}; +use rust_shared::itertools::Itertools; use rust_shared::links::app_server_to_monitor_backend::LogEntry; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; +use rust_shared::serde_json::json; +use rust_shared::tokio_postgres::Client; use rust_shared::utils::_k8s::{get_k8s_pod_basic_infos, get_or_create_k8s_secret, try_get_k8s_secret}; use rust_shared::utils::futures::make_reliable; use rust_shared::utils::general_::extensions::ToOwnedV; @@ -10,46 +18,38 @@ use rust_shared::utils::k8s::cert_handling::get_reqwest_client_with_k8s_certs; use rust_shared::utils::mtx::mtx::{MtxData, MtxDataForAGQL}; use rust_shared::utils::net::{body_to_str, new_hyper_client_http}; use rust_shared::utils::time::time_since_epoch_ms_i64; -use rust_shared::{futures, axum, tower, tower_http, GQLError, base64, reqwest}; use rust_shared::utils::type_aliases::JSONValue; -use futures::executor::block_on; -use futures_util::{Stream, stream, TryFutureExt, StreamExt, Future}; -use rust_shared::hyper::{Method, Request}; -use rust_shared::rust_macros::wrap_slow_macros; use rust_shared::SubError; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::serde_json::json; -use rust_shared::tokio_postgres::{Client}; -use rust_shared::serde; -use tracing::{error, info}; +use rust_shared::{axum, base64, futures, reqwest, tower, tower_http, GQLError}; use std::fs::File; use std::io::Read; -use std::{env, fs}; use std::path::Path; use std::str::FromStr; -use std::{time::Duration, pin::Pin, task::Poll}; +use std::{env, fs}; +use std::{pin::Pin, task::Poll, time::Duration}; +use tracing::{error, info}; -use crate::testing::general::{execute_test_sequence, TestSequence}; -use crate::{GeneralMessage}; use crate::migrations::v2::migrate_db_to_v2; use crate::store::storage::{AppStateArc, LQInstance_Partial}; -use crate::utils::type_aliases::{ABSender, ABReceiver}; +use crate::testing::general::{execute_test_sequence, TestSequence}; +use crate::utils::type_aliases::{ABReceiver, ABSender}; +use crate::GeneralMessage; pub fn admin_key_is_correct(admin_key: String, log_message_if_wrong: bool) -> bool { - let result = admin_key == env::var("MONITOR_BACKEND_ADMIN_KEY").unwrap(); - if !result && log_message_if_wrong { - error!("Admin-key is incorrect! Submitted:{}", admin_key); - } - return result; + let result = admin_key == env::var("MONITOR_BACKEND_ADMIN_KEY").unwrap(); + if !result && log_message_if_wrong { + error!("Admin-key is incorrect! Submitted:{}", admin_key); + } + return result; } pub fn ensure_admin_key_is_correct(admin_key: String, log_message_if_wrong: bool) -> Result<(), Error> { - if !admin_key_is_correct(admin_key, log_message_if_wrong) { - return Err(anyhow!("Admin-key is incorrect!")); - } - Ok(()) + if !admin_key_is_correct(admin_key, log_message_if_wrong) { + return Err(anyhow!("Admin-key is incorrect!")); + } + Ok(()) } -wrap_slow_macros!{ +wrap_slow_macros! { // queries // ========== @@ -58,174 +58,174 @@ wrap_slow_macros!{ pub struct QueryShard_General; #[Object] impl QueryShard_General { - /// async-graphql requires there to be at least one entry under the Query section - async fn empty(&self) -> &str { "" } - - async fn mtxResults(&self, ctx: &async_graphql::Context<'_>, admin_key: String, start_time: f64, end_time: f64) -> Result, GQLError> { - ensure_admin_key_is_correct(admin_key, true)?; - - let app_state = ctx.data::().unwrap(); - let mtx_results = app_state.mtx_results.read().await.to_vec(); - let mtx_results_filtered = mtx_results.into_iter().filter(|mtx| { - for lifetime in mtx.section_lifetimes.values() { - let section_start = lifetime.start_time; - let section_end = match lifetime.duration { - Some(duration) => section_start + duration, - None => f64::MAX, // for this context of filtering, consider a not-yet-ended section to extend to max-time - }; - if section_start < end_time && section_end > start_time { - return true; - } - } - false - }).collect_vec(); - let mtx_results_filtered_transformed = mtx_results_filtered.into_iter().map(|mtx| MtxDataForAGQL::from_base(&mtx)).collect_vec(); - Ok(mtx_results_filtered_transformed) - } - - async fn lqInstances(&self, ctx: &async_graphql::Context<'_>, admin_key: String) -> Result, GQLError> { - ensure_admin_key_is_correct(admin_key, true)?; - - let app_state = ctx.data::().unwrap(); - let lqis: Vec = app_state.lqi_data.read().await.values().map(|a| a.clone()).collect(); - /*let lqis_filtered: Vec = lqis.into_iter().filter(|mtx| { - for lifetime in mtx.section_lifetimes.values() { - let section_start = lifetime.start_time; - let section_end = match lifetime.duration { - Some(duration) => section_start + duration, - None => f64::MAX, // for this context of filtering, consider a not-yet-ended section to extend to max-time - }; - if section_start < end_time && section_end > start_time { - return true; - } - } - false - }).collect();*/ - Ok(lqis) - } - - async fn basicInfo(&self, _ctx: &async_graphql::Context<'_>, admin_key: String) -> Result { - ensure_admin_key_is_correct(admin_key, true)?; - - let basic_info = get_basic_info_from_app_server().await?; - Ok(basic_info) - } - - async fn getGrafanaPassword(&self, _ctx: &async_graphql::Context<'_>, admin_key: String) -> Result { - ensure_admin_key_is_correct(admin_key, true)?; - - /*match try_get_k8s_secret("loki-stack-grafana".o(), "monitoring").await? { - Some(secret) => { - let password = secret.data.get("admin-password").ok_or(anyhow!("Field \"admin-password\" missing!"))?.as_str().ok_or(anyhow!("Field \"admin-password\" not a string!"))?; - Ok(password.to_owned()) - }, - None => Err(anyhow!("Could not find the \"loki-stack-grafana\" secret in kubernetes."))?, - }*/ - let secret = get_or_create_k8s_secret("loki-stack-grafana".o(), "monitoring", None).await?; - let password_encoded = secret.data.get("admin-password").ok_or(anyhow!("Field \"admin-password\" missing!"))?.as_str().ok_or(anyhow!("Field \"admin-password\" not a string!"))?; - let password_bytes = base64::decode(password_encoded)?; - //let password_bytes = base64::decode_config(password_encoded, URL_SAFE_NO_PAD)?; - let password = String::from_utf8(password_bytes)?; - Ok(password.to_owned()) - } - - async fn queryLoki(&self, _ctx: &async_graphql::Context<'_>, input: QueryLokiInput) -> Result { - let QueryLokiInput { adminKey, query, startTime, endTime, limit } = input; - ensure_admin_key_is_correct(adminKey, true)?; - - let endTime = endTime.unwrap_or((time_since_epoch_ms_i64() + 10000) * 1_000_000); // add 10s, in case of clock drift - let limit = limit.unwrap_or(10000); - - let log_entries = query_loki(query, startTime, endTime, limit).await?; - Ok(QueryLokiResult { logEntries: log_entries }) - } + /// async-graphql requires there to be at least one entry under the Query section + async fn empty(&self) -> &str { "" } + + async fn mtxResults(&self, ctx: &async_graphql::Context<'_>, admin_key: String, start_time: f64, end_time: f64) -> Result, GQLError> { + ensure_admin_key_is_correct(admin_key, true)?; + + let app_state = ctx.data::().unwrap(); + let mtx_results = app_state.mtx_results.read().await.to_vec(); + let mtx_results_filtered = mtx_results.into_iter().filter(|mtx| { + for lifetime in mtx.section_lifetimes.values() { + let section_start = lifetime.start_time; + let section_end = match lifetime.duration { + Some(duration) => section_start + duration, + None => f64::MAX, // for this context of filtering, consider a not-yet-ended section to extend to max-time + }; + if section_start < end_time && section_end > start_time { + return true; + } + } + false + }).collect_vec(); + let mtx_results_filtered_transformed = mtx_results_filtered.into_iter().map(|mtx| MtxDataForAGQL::from_base(&mtx)).collect_vec(); + Ok(mtx_results_filtered_transformed) + } + + async fn lqInstances(&self, ctx: &async_graphql::Context<'_>, admin_key: String) -> Result, GQLError> { + ensure_admin_key_is_correct(admin_key, true)?; + + let app_state = ctx.data::().unwrap(); + let lqis: Vec = app_state.lqi_data.read().await.values().map(|a| a.clone()).collect(); + /*let lqis_filtered: Vec = lqis.into_iter().filter(|mtx| { + for lifetime in mtx.section_lifetimes.values() { + let section_start = lifetime.start_time; + let section_end = match lifetime.duration { + Some(duration) => section_start + duration, + None => f64::MAX, // for this context of filtering, consider a not-yet-ended section to extend to max-time + }; + if section_start < end_time && section_end > start_time { + return true; + } + } + false + }).collect();*/ + Ok(lqis) + } + + async fn basicInfo(&self, _ctx: &async_graphql::Context<'_>, admin_key: String) -> Result { + ensure_admin_key_is_correct(admin_key, true)?; + + let basic_info = get_basic_info_from_app_server().await?; + Ok(basic_info) + } + + async fn getGrafanaPassword(&self, _ctx: &async_graphql::Context<'_>, admin_key: String) -> Result { + ensure_admin_key_is_correct(admin_key, true)?; + + /*match try_get_k8s_secret("loki-stack-grafana".o(), "monitoring").await? { + Some(secret) => { + let password = secret.data.get("admin-password").ok_or(anyhow!("Field \"admin-password\" missing!"))?.as_str().ok_or(anyhow!("Field \"admin-password\" not a string!"))?; + Ok(password.to_owned()) + }, + None => Err(anyhow!("Could not find the \"loki-stack-grafana\" secret in kubernetes."))?, + }*/ + let secret = get_or_create_k8s_secret("loki-stack-grafana".o(), "monitoring", None).await?; + let password_encoded = secret.data.get("admin-password").ok_or(anyhow!("Field \"admin-password\" missing!"))?.as_str().ok_or(anyhow!("Field \"admin-password\" not a string!"))?; + let password_bytes = base64::decode(password_encoded)?; + //let password_bytes = base64::decode_config(password_encoded, URL_SAFE_NO_PAD)?; + let password = String::from_utf8(password_bytes)?; + Ok(password.to_owned()) + } + + async fn queryLoki(&self, _ctx: &async_graphql::Context<'_>, input: QueryLokiInput) -> Result { + let QueryLokiInput { adminKey, query, startTime, endTime, limit } = input; + ensure_admin_key_is_correct(adminKey, true)?; + + let endTime = endTime.unwrap_or((time_since_epoch_ms_i64() + 10000) * 1_000_000); // add 10s, in case of clock drift + let limit = limit.unwrap_or(10000); + + let log_entries = query_loki(query, startTime, endTime, limit).await?; + Ok(QueryLokiResult { logEntries: log_entries }) + } } #[derive(InputObject, Deserialize)] pub struct QueryLokiInput { - adminKey: String, - query: String, - startTime: i64, - endTime: Option, - limit: Option + adminKey: String, + query: String, + startTime: i64, + endTime: Option, + limit: Option } #[derive(SimpleObject, Debug)] struct QueryLokiResult { - logEntries: Vec, + logEntries: Vec, } pub async fn query_loki(query: String, startTime: i64, endTime: i64, limit: i64) -> Result, Error> { - let params_str = rust_shared::url::form_urlencoded::Serializer::new(String::new()) - .append_pair("direction", "BACKWARD") - //.append_pair("direction", "FORWARD") // commented, since makes behavior confusing (seems neither exactly limit-from-start nor limit-from-end) - .append_pair("query", &query) - .append_pair("start", &startTime.to_string()) - .append_pair("end", &endTime.to_string()) - .append_pair("limit", &limit.to_string()) - //.append_pair("step", &30.to_string()) - .finish(); - //info!("Querying loki with params-string:{}", params_str); - let response_as_str = - //reqwest::get(format!("http://loki-stack.monitoring.svc.cluster.local:3100/loki/api/v1/query_range?{params_str}")).await? - reqwest::get(format!("http://http-metrics.tcp.loki-stack.monitoring.svc.cluster.local:3100/loki/api/v1/query_range?{params_str}")).await? - .text().await?; - let res_as_json = JSONValue::from_str(&response_as_str).with_context(|| format!("Response text:{}", response_as_str))?; - //println!("Done! Response:{}", res_as_json); - - let result: Result<_, Error> = try { - let e = || anyhow!("Response json didn't match expected structure. @response_str:{}", response_as_str); - let results = res_as_json.get("data").ok_or(e())?.get("result").ok_or(e())?.as_array().ok_or(e())?; - if results.len() > 0 { - let log_entries = results.get(0).ok_or(e())?.get("values").ok_or(e())?.as_array().ok_or(e())?; - log_entries.to_owned() - } else { - vec![] - } - }; - let log_entries = result?; - Ok(log_entries) + let params_str = rust_shared::url::form_urlencoded::Serializer::new(String::new()) + .append_pair("direction", "BACKWARD") + //.append_pair("direction", "FORWARD") // commented, since makes behavior confusing (seems neither exactly limit-from-start nor limit-from-end) + .append_pair("query", &query) + .append_pair("start", &startTime.to_string()) + .append_pair("end", &endTime.to_string()) + .append_pair("limit", &limit.to_string()) + //.append_pair("step", &30.to_string()) + .finish(); + //info!("Querying loki with params-string:{}", params_str); + let response_as_str = + //reqwest::get(format!("http://loki-stack.monitoring.svc.cluster.local:3100/loki/api/v1/query_range?{params_str}")).await? + reqwest::get(format!("http://http-metrics.tcp.loki-stack.monitoring.svc.cluster.local:3100/loki/api/v1/query_range?{params_str}")).await? + .text().await?; + let res_as_json = JSONValue::from_str(&response_as_str).with_context(|| format!("Response text:{}", response_as_str))?; + //println!("Done! Response:{}", res_as_json); + + let result: Result<_, Error> = try { + let e = || anyhow!("Response json didn't match expected structure. @response_str:{}", response_as_str); + let results = res_as_json.get("data").ok_or(e())?.get("result").ok_or(e())?.as_array().ok_or(e())?; + if results.len() > 0 { + let log_entries = results.get(0).ok_or(e())?.get("values").ok_or(e())?.as_array().ok_or(e())?; + log_entries.to_owned() + } else { + vec![] + } + }; + let log_entries = result?; + Ok(log_entries) } pub async fn get_basic_info_from_app_server() -> Result { - let client = new_hyper_client_http(); - let req = rust_shared::hyper::Request::builder() - .method(Method::GET) - .uri("http://dm-app-server.default.svc.cluster.local:5110/basic-info") - .header("Content-Type", "application/json") - .body(json!({}).to_string().into())?; - let res = client.request(req).await?; - let res_as_json_str = body_to_str(res.into_body()).await?; - let res_as_json = JSONValue::from_str(&res_as_json_str)?; - //println!("Done! Response:{}", res_as_json); - - Ok(res_as_json) + let client = new_hyper_client_http(); + let req = rust_shared::hyper::Request::builder() + .method(Method::GET) + .uri("http://dm-app-server.default.svc.cluster.local:5110/basic-info") + .header("Content-Type", "application/json") + .body(json!({}).to_string().into())?; + let res = client.request(req).await?; + let res_as_json_str = body_to_str(res.into_body()).await?; + let res_as_json = JSONValue::from_str(&res_as_json_str)?; + //println!("Done! Response:{}", res_as_json); + + Ok(res_as_json) } pub async fn tell_k8s_to_restart_app_server() -> Result { - info!("Beginning request to restart the app-server."); - let token = fs::read_to_string("/var/run/secrets/kubernetes.io/serviceaccount/token")?; - let k8s_host = env::var("KUBERNETES_SERVICE_HOST")?; - let k8s_port = env::var("KUBERNETES_PORT_443_TCP_PORT")?; - - // supply "true", to ignore pods that are already terminating [edit: this doesn't actually work, because terminating pods still show up as "running"; see below for working fix, through use of creation-time field] - let k8s_pods = get_k8s_pod_basic_infos("default", true).await?; - info!("Got k8s_pods: {:?}", k8s_pods); - let app_server_pod_info = k8s_pods.iter().filter(|a| a.name.starts_with("dm-app-server-")) - .sorted_by_key(|a| &a.creation_time_str).last() // sort by creation-time, then find last (this way we kill the most recent, if multiple pod matches exist) - .ok_or(anyhow!("App-server pod not found in list of active pods."))?.to_owned(); - - let client = get_reqwest_client_with_k8s_certs()?; - let req = client.delete(format!("https://{k8s_host}:{k8s_port}/api/v1/namespaces/default/pods/{}", app_server_pod_info.name)) - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {token}")) - .body(json!({}).to_string()).build()?; - let res = client.execute(req).await?; - - let res_as_json_str = res.text().await?; - //info!("Got response from k8s server, on trying to restart pod \"{app_server_pod_name}\": {}", res_as_json_str); - let res_as_json = JSONValue::from_str(&res_as_json_str)?; - - Ok(res_as_json) + info!("Beginning request to restart the app-server."); + let token = fs::read_to_string("/var/run/secrets/kubernetes.io/serviceaccount/token")?; + let k8s_host = env::var("KUBERNETES_SERVICE_HOST")?; + let k8s_port = env::var("KUBERNETES_PORT_443_TCP_PORT")?; + + // supply "true", to ignore pods that are already terminating [edit: this doesn't actually work, because terminating pods still show up as "running"; see below for working fix, through use of creation-time field] + let k8s_pods = get_k8s_pod_basic_infos("default", true).await?; + info!("Got k8s_pods: {:?}", k8s_pods); + let app_server_pod_info = k8s_pods.iter().filter(|a| a.name.starts_with("dm-app-server-")) + .sorted_by_key(|a| &a.creation_time_str).last() // sort by creation-time, then find last (this way we kill the most recent, if multiple pod matches exist) + .ok_or(anyhow!("App-server pod not found in list of active pods."))?.to_owned(); + + let client = get_reqwest_client_with_k8s_certs()?; + let req = client.delete(format!("https://{k8s_host}:{k8s_port}/api/v1/namespaces/default/pods/{}", app_server_pod_info.name)) + .header("Content-Type", "application/json") + .header("Authorization", format!("Bearer {token}")) + .body(json!({}).to_string()).build()?; + let res = client.execute(req).await?; + + let res_as_json_str = res.text().await?; + //info!("Got response from k8s server, on trying to restart pod \"{app_server_pod_name}\": {}", res_as_json_str); + let res_as_json = JSONValue::from_str(&res_as_json_str)?; + + Ok(res_as_json) } // mutations @@ -233,84 +233,84 @@ pub async fn tell_k8s_to_restart_app_server() -> Result { #[derive(SimpleObject, Deserialize)] struct GenericMutation_Result { - message: String, + message: String, } #[derive(SimpleObject)] struct StartMigration_Result { - #[graphql(name = "migrationID")] - migrationID: String, + #[graphql(name = "migrationID")] + migrationID: String, } #[derive(Default)] pub struct MutationShard_General; #[Object] impl MutationShard_General { - /*async fn clearLogEntries(&self, ctx: &async_graphql::Context<'_>, admin_key: String) -> Result { - ensure_admin_key_is_correct(admin_key, true)?; - - let app_state = ctx.data::().unwrap(); - let mut mtx_results = app_state.mtx_results.write().await; - mtx_results.clear(); - - Ok(GenericMutation_Result { - message: "success".to_string(), - }) - }*/ - - async fn restartAppServer(&self, _ctx: &async_graphql::Context<'_>, admin_key: String) -> Result { - ensure_admin_key_is_correct(admin_key, true)?; - - tell_k8s_to_restart_app_server().await?; - - Ok(GenericMutation_Result { - message: "success".to_string(), - }) - } - - async fn clearMtxResults(&self, ctx: &async_graphql::Context<'_>, admin_key: String) -> Result { - ensure_admin_key_is_correct(admin_key, true)?; - - let app_state = ctx.data::().unwrap(); - let mut mtx_results = app_state.mtx_results.write().await; - mtx_results.clear(); - - Ok(GenericMutation_Result { - message: "success".to_string(), - }) - } - - async fn startMigration(&self, ctx: &async_graphql::Context<'_>, admin_key: String, to_version: usize) -> Result { - ensure_admin_key_is_correct(admin_key, true)?; - - let msg_sender = ctx.data::>().unwrap(); - let migration_result = match to_version { - 2 => migrate_db_to_v2(msg_sender.clone()).await, - _ => Err(anyhow!("No migration-code exists for migrating to version {to_version}!")), - }; - if let Err(ref err) = migration_result { - error!("Got error while running migration:{}", err); - } - let migration_id = migration_result?; - - Ok(StartMigration_Result { - migrationID: migration_id, - }) - } - - async fn executeTestSequence(&self, ctx: &async_graphql::Context<'_>, admin_key: String, sequence: TestSequence) -> Result { - ensure_admin_key_is_correct(admin_key.clone(), true)?; - - //let message = execute_test_sequence_on_app_server(admin_key, sequence).await?; - - let msg_sender = ctx.data::>().unwrap(); - execute_test_sequence(sequence, msg_sender.clone()).await?; - /*if let Err(ref err) = migration_result { - error!("Got error while running migration:{}", err); - }*/ - - Ok(GenericMutation_Result { - message: "success".to_owned(), - }) - } + /*async fn clearLogEntries(&self, ctx: &async_graphql::Context<'_>, admin_key: String) -> Result { + ensure_admin_key_is_correct(admin_key, true)?; + + let app_state = ctx.data::().unwrap(); + let mut mtx_results = app_state.mtx_results.write().await; + mtx_results.clear(); + + Ok(GenericMutation_Result { + message: "success".to_string(), + }) + }*/ + + async fn restartAppServer(&self, _ctx: &async_graphql::Context<'_>, admin_key: String) -> Result { + ensure_admin_key_is_correct(admin_key, true)?; + + tell_k8s_to_restart_app_server().await?; + + Ok(GenericMutation_Result { + message: "success".to_string(), + }) + } + + async fn clearMtxResults(&self, ctx: &async_graphql::Context<'_>, admin_key: String) -> Result { + ensure_admin_key_is_correct(admin_key, true)?; + + let app_state = ctx.data::().unwrap(); + let mut mtx_results = app_state.mtx_results.write().await; + mtx_results.clear(); + + Ok(GenericMutation_Result { + message: "success".to_string(), + }) + } + + async fn startMigration(&self, ctx: &async_graphql::Context<'_>, admin_key: String, to_version: usize) -> Result { + ensure_admin_key_is_correct(admin_key, true)?; + + let msg_sender = ctx.data::>().unwrap(); + let migration_result = match to_version { + 2 => migrate_db_to_v2(msg_sender.clone()).await, + _ => Err(anyhow!("No migration-code exists for migrating to version {to_version}!")), + }; + if let Err(ref err) = migration_result { + error!("Got error while running migration:{}", err); + } + let migration_id = migration_result?; + + Ok(StartMigration_Result { + migrationID: migration_id, + }) + } + + async fn executeTestSequence(&self, ctx: &async_graphql::Context<'_>, admin_key: String, sequence: TestSequence) -> Result { + ensure_admin_key_is_correct(admin_key.clone(), true)?; + + //let message = execute_test_sequence_on_app_server(admin_key, sequence).await?; + + let msg_sender = ctx.data::>().unwrap(); + execute_test_sequence(sequence, msg_sender.clone()).await?; + /*if let Err(ref err) = migration_result { + error!("Got error while running migration:{}", err); + }*/ + + Ok(GenericMutation_Result { + message: "success".to_owned(), + }) + } } // subscriptions @@ -318,145 +318,145 @@ struct StartMigration_Result { #[derive(SimpleObject)] struct PingResult { - pong: String, - refreshPage: bool, + pong: String, + refreshPage: bool, } #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct MigrationLogEntry { - pub text: String, + pub text: String, } #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct TestingLogEntry { - pub text: String, + pub text: String, } //#[derive(Clone, SimpleObject)] pub struct GQLSet_LogEntry { pub nodes: Vec } #[derive(Default)] pub struct SubscriptionShard_General; #[Subscription] impl SubscriptionShard_General { - #[graphql(name = "_ping")] - async fn _ping(&self, _ctx: &async_graphql::Context<'_>) -> impl Stream { - let pong = "pong".to_owned(); - // create the listed file in the app-server pod (eg. using Lens), if you've made an update that you need all clients to refresh for - let refreshPage = Path::new("./refreshPageForAllUsers_enabled").exists(); - - stream::once(async move { PingResult { - pong, - refreshPage, - } }) - } - - async fn logEntries<'a>(&self, ctx: &'a async_graphql::Context<'_>, admin_key: String) -> impl Stream, SubError>> + 'a { - let msg_sender = ctx.data::>().unwrap(); - let mut msg_receiver = msg_sender.new_receiver(); - // msg_receiver.len() includes entries from before its creation, so set the messages_processed variable appropriately - let mut messages_processed = msg_receiver.len(); - - //let result = tokio::spawn(async move { - let base_stream = async_stream::stream! { - if !admin_key_is_correct(admin_key, true) { yield Err(SubError::new(format!("Admin-key is incorrect!"))); return; } - - //yield Ok(LogEntry::default()); - let mut new_entries = vec![]; // use buffer, for more efficient transfer+rerendering - //let mut entries_sent = 0; - loop { - //global_tick_helper().await; - - //let mut msg_receiver = Pin::new(&mut temp); - //use postage::prelude::Stream; - - /*let next_msg = msg_receiver.recv().unwrap(); - match next_msg { - GeneralMessage_Flume::LogEntryAdded(entry) => { - new_entries.push(entry); - }, - }*/ - - //println!("Waiting..."); - //match msg_receiver.recv().await { - //match msg_receiver.next().await { - match make_reliable(msg_receiver.recv(), Duration::from_millis(10)).await { - Err(_err) => break, // channel closed (program must have crashed), end loop - Ok(msg) => { - //println!("Msg#:{messages_processed} @msg:{:?}", msg); - match msg { - GeneralMessage::MigrateLogMessageAdded(_text) => {}, - GeneralMessage::TestingLogMessageAdded(_text) => {}, - GeneralMessage::LogEntryAdded(entry) => { - //entries_sent += 1; - //entry.message = entries_sent.to_string() + " " + &entry.message; - new_entries.push(entry); - }, - } - messages_processed += 1; - } - } - - // if no more messages bufferred up, and we've collected some new log-entries, then send that set of new-entries to the client - //if msg_receiver.is_empty() && !new_entries.is_empty() { - let messages_still_buffered = msg_receiver.len().checked_sub(messages_processed).unwrap_or(0); - //println!("@messages_still_buffered:{messages_still_buffered} @part1:{} @part2:{}", msg_receiver.len(), messages_processed); - if messages_still_buffered == 0 && !new_entries.is_empty() { - yield Ok(new_entries); - new_entries = vec![]; - } - } - }; - base_stream - /*}).await.unwrap(); - result*/ - } - - async fn migrateLogEntries<'a>(&self, ctx: &'a async_graphql::Context<'_>, admin_key: String) -> impl Stream> + 'a { - let msg_sender = ctx.data::>().unwrap(); - let mut msg_receiver = msg_sender.new_receiver(); - - let base_stream = async_stream::stream! { - if !admin_key_is_correct(admin_key, true) { yield Err(SubError::new(format!("Admin-key is incorrect!"))); return; } - - yield Ok(MigrationLogEntry { text: "Stream started...".to_owned() }); - loop { - //use postage::prelude::Stream; - - //let next_msg = msg_receiver.recv().await.unwrap(); - match msg_receiver.recv().await { - //match msg_receiver.next().await { - Err(_err) => break, // channel closed (program must have crashed), end loop - Ok(msg) => match msg { - GeneralMessage::LogEntryAdded(_entry) => {}, - GeneralMessage::TestingLogMessageAdded(_text) => {}, - GeneralMessage::MigrateLogMessageAdded(text) => { - yield Ok(MigrationLogEntry { text }); - }, - } - } - } - }; - base_stream - } - - async fn testingLogEntries<'a>(&self, ctx: &'a async_graphql::Context<'_>, admin_key: String) -> impl Stream> + 'a { - let msg_sender = ctx.data::>().unwrap(); - let mut msg_receiver = msg_sender.new_receiver(); - - let base_stream = async_stream::stream! { - if !admin_key_is_correct(admin_key, true) { yield Err(SubError::new(format!("Admin-key is incorrect!"))); return; } - - yield Ok(TestingLogEntry { text: "Stream started...".to_owned() }); - loop { - match msg_receiver.recv().await { - Err(_err) => break, // channel closed (program must have crashed), end loop - Ok(msg) => match msg { - GeneralMessage::LogEntryAdded(_entry) => {}, - GeneralMessage::MigrateLogMessageAdded(_text) => {}, - GeneralMessage::TestingLogMessageAdded(text) => { - yield Ok(TestingLogEntry { text }); - }, - } - } - } - }; - base_stream - } + #[graphql(name = "_ping")] + async fn _ping(&self, _ctx: &async_graphql::Context<'_>) -> impl Stream { + let pong = "pong".to_owned(); + // create the listed file in the app-server pod (eg. using Lens), if you've made an update that you need all clients to refresh for + let refreshPage = Path::new("./refreshPageForAllUsers_enabled").exists(); + + stream::once(async move { PingResult { + pong, + refreshPage, + } }) + } + + async fn logEntries<'a>(&self, ctx: &'a async_graphql::Context<'_>, admin_key: String) -> impl Stream, SubError>> + 'a { + let msg_sender = ctx.data::>().unwrap(); + let mut msg_receiver = msg_sender.new_receiver(); + // msg_receiver.len() includes entries from before its creation, so set the messages_processed variable appropriately + let mut messages_processed = msg_receiver.len(); + + //let result = tokio::spawn(async move { + let base_stream = async_stream::stream! { + if !admin_key_is_correct(admin_key, true) { yield Err(SubError::new(format!("Admin-key is incorrect!"))); return; } + + //yield Ok(LogEntry::default()); + let mut new_entries = vec![]; // use buffer, for more efficient transfer+rerendering + //let mut entries_sent = 0; + loop { + //global_tick_helper().await; + + //let mut msg_receiver = Pin::new(&mut temp); + //use postage::prelude::Stream; + + /*let next_msg = msg_receiver.recv().unwrap(); + match next_msg { + GeneralMessage_Flume::LogEntryAdded(entry) => { + new_entries.push(entry); + }, + }*/ + + //println!("Waiting..."); + //match msg_receiver.recv().await { + //match msg_receiver.next().await { + match make_reliable(msg_receiver.recv(), Duration::from_millis(10)).await { + Err(_err) => break, // channel closed (program must have crashed), end loop + Ok(msg) => { + //println!("Msg#:{messages_processed} @msg:{:?}", msg); + match msg { + GeneralMessage::MigrateLogMessageAdded(_text) => {}, + GeneralMessage::TestingLogMessageAdded(_text) => {}, + GeneralMessage::LogEntryAdded(entry) => { + //entries_sent += 1; + //entry.message = entries_sent.to_string() + " " + &entry.message; + new_entries.push(entry); + }, + } + messages_processed += 1; + } + } + + // if no more messages bufferred up, and we've collected some new log-entries, then send that set of new-entries to the client + //if msg_receiver.is_empty() && !new_entries.is_empty() { + let messages_still_buffered = msg_receiver.len().checked_sub(messages_processed).unwrap_or(0); + //println!("@messages_still_buffered:{messages_still_buffered} @part1:{} @part2:{}", msg_receiver.len(), messages_processed); + if messages_still_buffered == 0 && !new_entries.is_empty() { + yield Ok(new_entries); + new_entries = vec![]; + } + } + }; + base_stream + /*}).await.unwrap(); + result*/ + } + + async fn migrateLogEntries<'a>(&self, ctx: &'a async_graphql::Context<'_>, admin_key: String) -> impl Stream> + 'a { + let msg_sender = ctx.data::>().unwrap(); + let mut msg_receiver = msg_sender.new_receiver(); + + let base_stream = async_stream::stream! { + if !admin_key_is_correct(admin_key, true) { yield Err(SubError::new(format!("Admin-key is incorrect!"))); return; } + + yield Ok(MigrationLogEntry { text: "Stream started...".to_owned() }); + loop { + //use postage::prelude::Stream; + + //let next_msg = msg_receiver.recv().await.unwrap(); + match msg_receiver.recv().await { + //match msg_receiver.next().await { + Err(_err) => break, // channel closed (program must have crashed), end loop + Ok(msg) => match msg { + GeneralMessage::LogEntryAdded(_entry) => {}, + GeneralMessage::TestingLogMessageAdded(_text) => {}, + GeneralMessage::MigrateLogMessageAdded(text) => { + yield Ok(MigrationLogEntry { text }); + }, + } + } + } + }; + base_stream + } + + async fn testingLogEntries<'a>(&self, ctx: &'a async_graphql::Context<'_>, admin_key: String) -> impl Stream> + 'a { + let msg_sender = ctx.data::>().unwrap(); + let mut msg_receiver = msg_sender.new_receiver(); + + let base_stream = async_stream::stream! { + if !admin_key_is_correct(admin_key, true) { yield Err(SubError::new(format!("Admin-key is incorrect!"))); return; } + + yield Ok(TestingLogEntry { text: "Stream started...".to_owned() }); + loop { + match msg_receiver.recv().await { + Err(_err) => break, // channel closed (program must have crashed), end loop + Ok(msg) => match msg { + GeneralMessage::LogEntryAdded(_entry) => {}, + GeneralMessage::MigrateLogMessageAdded(_text) => {}, + GeneralMessage::TestingLogMessageAdded(text) => { + yield Ok(TestingLogEntry { text }); + }, + } + } + } + }; + base_stream + } } -} \ No newline at end of file +} diff --git a/Packages/monitor-backend/src/gql_.rs b/Packages/monitor-backend/src/gql_.rs index f4ba99470..a171e3d3e 100644 --- a/Packages/monitor-backend/src/gql_.rs +++ b/Packages/monitor-backend/src/gql_.rs @@ -1,3 +1,36 @@ +use crate::gql::_general::{MutationShard_General, QueryShard_General, SubscriptionShard_General}; +use crate::store::storage::AppStateArc; +use crate::utils::type_aliases::{ABReceiver, ABSender}; +use crate::GeneralMessage; +use axum::extract::ws::{CloseFrame, Message}; +use axum::extract::Extension; +use axum::extract::{FromRequest, WebSocketUpgrade}; +use axum::http::header::CONTENT_TYPE; +use axum::http::{self, uri::Uri, Request, Response, StatusCode}; +use axum::http::{HeaderValue, Method}; +use axum::response::{self, IntoResponse}; +use axum::routing::{get, on_service, post, MethodFilter}; +use axum::Error; +use axum::{extract, Router}; +use deadpool_postgres::{Manager, Pool}; +use futures_util::future::{BoxFuture, Ready}; +use futures_util::stream::{SplitSink, SplitStream}; +use futures_util::{future, FutureExt, Sink, SinkExt, StreamExt, TryFutureExt, TryStreamExt}; +use rust_shared::async_graphql::futures_util::task::{Context, Poll}; +use rust_shared::async_graphql::http::{graphiql_source, playground_source, GraphQLPlaygroundConfig}; +use rust_shared::async_graphql::http::{WebSocketProtocols, WsMessage, ALL_WEBSOCKET_PROTOCOLS}; +use rust_shared::async_graphql::{self, Data, EmptyMutation, EmptySubscription, MergedObject, MergedSubscription, ObjectType, Result, Schema, SubscriptionType, Variables}; +use rust_shared::async_graphql_axum::{GraphQLBatchRequest, GraphQLProtocol, GraphQLRequest, GraphQLResponse, GraphQLSubscription, GraphQLWebSocket}; +use rust_shared::bytes::Bytes; +use rust_shared::flume::{unbounded, Receiver, Sender}; +use rust_shared::hyper::header::CONTENT_LENGTH; +use rust_shared::rust_macros::{wrap_agql_schema_build, wrap_agql_schema_type, wrap_async_graphql, wrap_slow_macros}; +use rust_shared::tokio_postgres::Client; +use rust_shared::url::Url; +use rust_shared::utils::db::agql_ext::gql_general_extension::CustomExtensionCreator; +use rust_shared::utils::net::{body_to_str, AxumBody}; +use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{axum, serde_json, tower, tower_http}; use std::borrow::Cow; use std::collections::HashMap; use std::convert::Infallible; @@ -5,138 +38,101 @@ use std::env; use std::future::Future; use std::str::FromStr; use std::sync::{Arc, Mutex}; -use rust_shared::async_graphql::http::{playground_source, GraphQLPlaygroundConfig, graphiql_source}; -use rust_shared::async_graphql::{Schema, MergedObject, MergedSubscription, ObjectType, Data, Result, SubscriptionType, EmptyMutation, EmptySubscription, Variables, self}; -use rust_shared::bytes::Bytes; -use deadpool_postgres::{Pool, Manager}; -use rust_shared::hyper::header::CONTENT_LENGTH; -use rust_shared::rust_macros::{wrap_async_graphql, wrap_agql_schema_build, wrap_slow_macros, wrap_agql_schema_type}; -use rust_shared::tokio_postgres::{Client}; -use rust_shared::utils::db::agql_ext::gql_general_extension::CustomExtensionCreator; -use rust_shared::utils::net::{body_to_str, AxumBody}; -use rust_shared::utils::type_aliases::JSONValue; -use tower::make::Shared; -use tower::{Service, ServiceExt, BoxError, service_fn}; -use rust_shared::async_graphql::futures_util::task::{Context, Poll}; -use rust_shared::async_graphql::http::{WebSocketProtocols, WsMessage, ALL_WEBSOCKET_PROTOCOLS}; -use rust_shared::{axum, tower, tower_http, serde_json}; -use axum::http::{Method, HeaderValue}; -use axum::http::header::CONTENT_TYPE; -use axum::response::{self, IntoResponse}; -use axum::routing::{get, post, MethodFilter, on_service}; -use axum::{extract, Router}; -use axum::extract::ws::{CloseFrame, Message}; -use axum::extract::{FromRequest, WebSocketUpgrade}; -use axum::http::{self, uri::Uri, Request, Response, StatusCode}; -use axum::Error; -use axum::{ - extract::Extension, -}; -use rust_shared::url::Url; use std::{convert::TryFrom, net::SocketAddr}; -use futures_util::future::{BoxFuture, Ready}; -use futures_util::stream::{SplitSink, SplitStream}; -use futures_util::{future, Sink, SinkExt, StreamExt, FutureExt, TryFutureExt, TryStreamExt}; -use crate::{GeneralMessage}; -use crate::gql::_general::{MutationShard_General, QueryShard_General, SubscriptionShard_General}; -use crate::store::storage::AppStateArc; -use crate::utils::type_aliases::{ABSender, ABReceiver}; -use rust_shared::async_graphql_axum::{GraphQLRequest, GraphQLResponse, GraphQLSubscription, GraphQLProtocol, GraphQLWebSocket, GraphQLBatchRequest}; -use rust_shared::flume::{Sender, Receiver, unbounded}; +use tower::make::Shared; +use tower::{service_fn, BoxError, Service, ServiceExt}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(MergedObject, Default)] pub struct QueryRoot( - QueryShard_General, + QueryShard_General, ); #[derive(MergedObject, Default)] pub struct MutationRoot( - MutationShard_General, + MutationShard_General, ); #[derive(MergedSubscription, Default)] pub struct SubscriptionRoot( - SubscriptionShard_General, + SubscriptionShard_General, ); } -pub type RootSchema = wrap_agql_schema_type!{ - Schema +pub type RootSchema = wrap_agql_schema_type! { + Schema }; /*async fn graphiql() -> impl IntoResponse { - // use the DEV/PROD value from the "ENVIRONMENT" env-var, to determine what the app-server's URL is (maybe temp) - let app_server_host = if env::var("ENVIRONMENT").unwrap_or("DEV".to_owned()) == "DEV" { "localhost:5100" } else { "debates.app" }; - response::Html(graphiql_source("/graphql", Some(&format!("wss://{app_server_host}/app-server/graphql")))) + // use the DEV/PROD value from the "ENVIRONMENT" env-var, to determine what the app-server's URL is (maybe temp) + let app_server_host = if env::var("ENVIRONMENT").unwrap_or("DEV".to_owned()) == "DEV" { "localhost:5100" } else { "debates.app" }; + response::Html(graphiql_source("/graphql", Some(&format!("wss://{app_server_host}/app-server/graphql")))) }*/ async fn graphql_playground() -> impl IntoResponse { - response::Html(playground_source( - GraphQLPlaygroundConfig::new("/app-server/graphql").subscription_endpoint("/app-server/graphql"), - )) + response::Html(playground_source(GraphQLPlaygroundConfig::new("/app-server/graphql").subscription_endpoint("/app-server/graphql"))) } /*async fn graphql_handler(schema: Extension, req: GraphQLRequest) -> GraphQLResponse { - schema.execute(req.into_inner()).await.into() + schema.execute(req.into_inner()).await.into() }*/ pub async fn have_own_graphql_handle_request(req: Request, schema: RootSchema) -> String { - // read request's body (from frontend) - let req_as_str: String = body_to_str(req.into_body()).await.unwrap(); - let req_as_json = JSONValue::from_str(&req_as_str).unwrap(); - - // prepare request for graphql engine - //let gql_req = async_graphql::Request::new(req_as_str); - let gql_req = async_graphql::Request::new(req_as_json["query"].as_str().unwrap()); - let gql_req = match req_as_json["operationName"].as_str() { - Some(op_name) => gql_req.operation_name(op_name), - None => gql_req, - }; - let gql_req = gql_req.variables(Variables::from_json(req_as_json["variables"].clone())); - - // send request to graphql engine, and read response - let gql_response = schema.execute(gql_req).await; - //let response_body: String = gql_response.data.to_string(); // this doesn't output valid json (eg. no quotes around keys) - let response_str: String = serde_json::to_string(&gql_response).unwrap(); - - response_str + // read request's body (from frontend) + let req_as_str: String = body_to_str(req.into_body()).await.unwrap(); + let req_as_json = JSONValue::from_str(&req_as_str).unwrap(); + + // prepare request for graphql engine + //let gql_req = async_graphql::Request::new(req_as_str); + let gql_req = async_graphql::Request::new(req_as_json["query"].as_str().unwrap()); + let gql_req = match req_as_json["operationName"].as_str() { + Some(op_name) => gql_req.operation_name(op_name), + None => gql_req, + }; + let gql_req = gql_req.variables(Variables::from_json(req_as_json["variables"].clone())); + + // send request to graphql engine, and read response + let gql_response = schema.execute(gql_req).await; + //let response_body: String = gql_response.data.to_string(); // this doesn't output valid json (eg. no quotes around keys) + let response_str: String = serde_json::to_string(&gql_response).unwrap(); + + response_str } pub async fn graphql_handler(Extension(schema): Extension, req: Request) -> Response { - let response_str = have_own_graphql_handle_request(req, schema).await; + let response_str = have_own_graphql_handle_request(req, schema).await; - // send response (to frontend) - let mut response = Response::builder().body(axum::body::Body::from(response_str)).unwrap(); - response.headers_mut().append(CONTENT_TYPE, HeaderValue::from_static("content-type: application/json; charset=utf-8")); - return response; + // send response (to frontend) + let mut response = Response::builder().body(axum::body::Body::from(response_str)).unwrap(); + response.headers_mut().append(CONTENT_TYPE, HeaderValue::from_static("content-type: application/json; charset=utf-8")); + return response; } pub async fn extend_router( - app: Router, - msg_sender: ABSender, msg_receiver: ABReceiver, - //msg_sender_test: Sender, msg_receiver_test: Receiver, - app_state: AppStateArc + app: Router, + msg_sender: ABSender, + msg_receiver: ABReceiver, + //msg_sender_test: Sender, msg_receiver_test: Receiver, + app_state: AppStateArc, ) -> Router { - let schema = - wrap_agql_schema_build!{ - Schema::build(QueryRoot::default(), MutationRoot::default(), SubscriptionRoot::default()) - } - .data(msg_sender) - .data(msg_receiver) - /*.data(msg_sender_test) - .data(msg_receiver_test)*/ - .data(app_state) - .extension(CustomExtensionCreator) - .finish(); - - let gql_subscription_service = GraphQLSubscription::new(schema.clone()); - - let result = app - //.route("/graphiql", get(graphiql)) - .route("/gql-playground", get(graphql_playground)) - .route("/graphql", on_service(MethodFilter::GET, gql_subscription_service).post(graphql_handler)) - .layer(Extension(schema)); - - result -} \ No newline at end of file + let schema = wrap_agql_schema_build! { + Schema::build(QueryRoot::default(), MutationRoot::default(), SubscriptionRoot::default()) + } + .data(msg_sender) + .data(msg_receiver) + /*.data(msg_sender_test) + .data(msg_receiver_test)*/ + .data(app_state) + .extension(CustomExtensionCreator) + .finish(); + + let gql_subscription_service = GraphQLSubscription::new(schema.clone()); + + let result = app + //.route("/graphiql", get(graphiql)) + .route("/gql-playground", get(graphql_playground)) + .route("/graphql", on_service(MethodFilter::GET, gql_subscription_service).post(graphql_handler)) + .layer(Extension(schema)); + + result +} diff --git a/Packages/monitor-backend/src/links/app_server_link.rs b/Packages/monitor-backend/src/links/app_server_link.rs index 16bfc7c7f..23242e5c2 100644 --- a/Packages/monitor-backend/src/links/app_server_link.rs +++ b/Packages/monitor-backend/src/links/app_server_link.rs @@ -1,137 +1,141 @@ - use std::time::Duration; -use rust_shared::async_graphql::{SimpleObject, Json}; -use rust_shared::flume::Sender; use futures_util::StreamExt; +use rust_shared::async_graphql::{Json, SimpleObject}; +use rust_shared::flume::Sender; use rust_shared::indexmap::IndexMap; use rust_shared::links::app_server_to_monitor_backend::Message_ASToMB; use rust_shared::rust_macros::wrap_slow_macros; use rust_shared::serde::{Deserialize, Serialize}; -use rust_shared::serde_json::{json, self}; -use rust_shared::tokio::{time, self}; -use rust_shared::utils::type_aliases::JSONValue; -use tracing::{debug, error, info, trace}; +use rust_shared::serde_json::{self, json}; +use rust_shared::tokio::{self, time}; +use rust_shared::tokio_tungstenite::{ + connect_async, + tungstenite::{connect, Message}, +}; use rust_shared::url::Url; -use rust_shared::tokio_tungstenite::{tungstenite::{connect, Message}, connect_async}; +use rust_shared::utils::type_aliases::JSONValue; use rust_shared::uuid::Uuid; +use tracing::{debug, error, info, trace}; -use crate::{GeneralMessage, utils::type_aliases::{ABSender}, store::storage::{AppStateArc, LQInstance_Partial}}; +use crate::{ + store::storage::{AppStateArc, LQInstance_Partial}, + utils::type_aliases::ABSender, + GeneralMessage, +}; pub async fn connect_to_app_server(app_state: AppStateArc, sender: ABSender) { - loop { - tokio::time::sleep(Duration::from_secs(5)).await; + loop { + tokio::time::sleep(Duration::from_secs(5)).await; - let url = Url::parse("ws://dm-app-server.default.svc.cluster.local:5110/monitor-backend-link").unwrap(); - let connect_attempt_fut = connect_async(url); - let (mut socket, response) = match time::timeout(Duration::from_secs(3), connect_attempt_fut).await { - // if timeout happens, just ignore (there might have been local network glitch or something) - Err(_err) => { - error!("Timed out trying to connect to app-server..."); - continue; - }, - Ok(connect_result) => { - match connect_result { - Ok(a) => a, - Err(err) => { - error!("Couldn't connect to app-server websocket endpoint:{}", err); - continue; - } - } - }, - }; - info!("Connection made with app-server websocket endpoint. @response:{response:?}"); + let url = Url::parse("ws://dm-app-server.default.svc.cluster.local:5110/monitor-backend-link").unwrap(); + let connect_attempt_fut = connect_async(url); + let (mut socket, response) = match time::timeout(Duration::from_secs(3), connect_attempt_fut).await { + // if timeout happens, just ignore (there might have been local network glitch or something) + Err(_err) => { + error!("Timed out trying to connect to app-server..."); + continue; + }, + Ok(connect_result) => match connect_result { + Ok(a) => a, + Err(err) => { + error!("Couldn't connect to app-server websocket endpoint:{}", err); + continue; + }, + }, + }; + info!("Connection made with app-server websocket endpoint. @response:{response:?}"); - /*match socket.write_message(Message::Text(json!({ - "action": "listen", - "data": { - "some": ["data1", "data2"] - } - }).to_string())) { - Ok(_) => {}, - Err(err) => { - debug!("Link with app-server lost:{}", err); - return; - }, - }*/ + /*match socket.write_message(Message::Text(json!({ + "action": "listen", + "data": { + "some": ["data1", "data2"] + } + }).to_string())) { + Ok(_) => {}, + Err(err) => { + debug!("Link with app-server lost:{}", err); + return; + }, + }*/ - loop { - let msg = match socket.next().await { - None => continue, - Some(entry) => match entry { - Ok(msg) => msg, - Err(err) => { - error!("Error reading message from link with app-server:{}", err); - break; - } - }, - }; - let msg_as_str = msg.into_text().unwrap(); - let msg: Message_ASToMB = match serde_json::from_str(&msg_as_str) { - Ok(a) => a, - Err(err) => { - error!("Got error converting message-string into Message_ASToMB. @msg_str:{msg_as_str} @err:{err}"); - continue; - } - }; + loop { + let msg = match socket.next().await { + None => continue, + Some(entry) => match entry { + Ok(msg) => msg, + Err(err) => { + error!("Error reading message from link with app-server:{}", err); + break; + }, + }, + }; + let msg_as_str = msg.into_text().unwrap(); + let msg: Message_ASToMB = match serde_json::from_str(&msg_as_str) { + Ok(a) => a, + Err(err) => { + error!("Got error converting message-string into Message_ASToMB. @msg_str:{msg_as_str} @err:{err}"); + continue; + }, + }; - match msg { - Message_ASToMB::LogEntryAdded { entry } => { - //println!("Received log-entry:{}", msg_as_str); - match sender.broadcast(GeneralMessage::LogEntryAdded(entry)).await { - Ok(_) => { - //println!("Test1:{count}"); - //println!("Test1"); - }, - Err(err) => error!("Cannot send log-entry; all receivers were dropped. @err:{err}"), - } - }, - Message_ASToMB::MtxEntryDone { mtx } => { - trace!("Got mtx-result:{}", serde_json::to_string_pretty(&mtx).unwrap()); + match msg { + Message_ASToMB::LogEntryAdded { entry } => { + //println!("Received log-entry:{}", msg_as_str); + match sender.broadcast(GeneralMessage::LogEntryAdded(entry)).await { + Ok(_) => { + //println!("Test1:{count}"); + //println!("Test1"); + }, + Err(err) => error!("Cannot send log-entry; all receivers were dropped. @err:{err}"), + } + }, + Message_ASToMB::MtxEntryDone { mtx } => { + trace!("Got mtx-result:{}", serde_json::to_string_pretty(&mtx).unwrap()); - let mut mtx_results = app_state.mtx_results.write().await; - if let Some(existing_entry) = mtx_results.iter().enumerate().find(|(_, entry)| entry.id == mtx.id) { - let index = existing_entry.0; - mtx_results.remove(index); - } + let mut mtx_results = app_state.mtx_results.write().await; + if let Some(existing_entry) = mtx_results.iter().enumerate().find(|(_, entry)| entry.id == mtx.id) { + let index = existing_entry.0; + mtx_results.remove(index); + } - mtx_results.push(mtx); - if mtx_results.len() > 5000 { - let entries_to_remove = mtx_results.len() - 5000; - mtx_results.drain(0..entries_to_remove); - } - }, - Message_ASToMB::LQInstanceUpdated { table_name, filter, last_entries, watchers_count, deleting } => { - trace!("LQ-instance updated:{}", json!({ - "table_name": table_name, - "filter": filter, - "last_entries": last_entries, - "watchers_count": watchers_count, - "deleting": deleting, - }).to_string()); + mtx_results.push(mtx); + if mtx_results.len() > 5000 { + let entries_to_remove = mtx_results.len() - 5000; + mtx_results.drain(0..entries_to_remove); + } + }, + Message_ASToMB::LQInstanceUpdated { table_name, filter, last_entries, watchers_count, deleting } => { + trace!( + "LQ-instance updated:{}", + json!({ + "table_name": table_name, + "filter": filter, + "last_entries": last_entries, + "watchers_count": watchers_count, + "deleting": deleting, + }) + .to_string() + ); - let mut lqi_data = app_state.lqi_data.write().await; - let key = get_lq_instance_key(&table_name, &filter); - match deleting { - false => lqi_data.insert(key, LQInstance_Partial { - table_name, - filter, - last_entries: Json::from(last_entries), - entry_watcher_count: watchers_count as usize, - }), - true => lqi_data.remove(&key), - }; - } - } - } - } + let mut lqi_data = app_state.lqi_data.write().await; + let key = get_lq_instance_key(&table_name, &filter); + match deleting { + false => lqi_data.insert(key, LQInstance_Partial { table_name, filter, last_entries: Json::from(last_entries), entry_watcher_count: watchers_count as usize }), + true => lqi_data.remove(&key), + }; + }, + } + } + } } // from lq_instance.rs in app-server fn get_lq_instance_key(table_name: &str, filter: &JSONValue) -> String { - //format!("@table:{} @filter:{:?}", table_name, filter) - json!({ - "table": table_name, - "filter": filter, - }).to_string() -} \ No newline at end of file + //format!("@table:{} @filter:{:?}", table_name, filter) + json!({ + "table": table_name, + "filter": filter, + }) + .to_string() +} diff --git a/Packages/monitor-backend/src/links/pod_proxies.rs b/Packages/monitor-backend/src/links/pod_proxies.rs index 89e44db03..a481d1ecd 100644 --- a/Packages/monitor-backend/src/links/pod_proxies.rs +++ b/Packages/monitor-backend/src/links/pod_proxies.rs @@ -1,37 +1,37 @@ -use std::convert::Infallible; -use std::net::{IpAddr, Ipv4Addr}; -use std::str::FromStr; -use rust_shared::anyhow::{anyhow, Error, bail, ensure}; +use async_graphql_axum::{GraphQLRequest, GraphQLResponse, GraphQLSubscription}; +use axum::body::HttpBody; +use axum::extract::{Extension, FromRequest}; +use axum::http::header::CONTENT_TYPE; +use axum::http::{uri::Uri, Request, Response}; +use axum::http::{HeaderValue, Method}; +use axum::response::{self, IntoResponse}; +use axum::routing::{get, on_service, post, MethodFilter}; +use axum::{extract, Json, Router}; +use futures::future::{self, Future}; +use rust_shared::anyhow::{anyhow, bail, ensure, Error}; +use rust_shared::async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; +use rust_shared::async_graphql::{self, MergedObject, MergedSubscription, Schema, Variables}; use rust_shared::axum::extract::Path; use rust_shared::bytes::Bytes; use rust_shared::domains::DomainsConstants; use rust_shared::http_body_util::Full; +use rust_shared::hyper::service::service_fn; use rust_shared::hyper::{body::Body, StatusCode}; use rust_shared::hyper_util::client::legacy::connect::HttpConnector; use rust_shared::itertools::Itertools; use rust_shared::reqwest::header::SET_COOKIE; +use rust_shared::serde_json::{self, json}; +use rust_shared::url::Url; use rust_shared::utils::general_::extensions::ToOwnedV; use rust_shared::utils::net::{full_body_from_str, hyper_response_to_axum_response, AxumBody, AxumResult, AxumResultE, AxumResultI, HyperClient}; -use rust_shared::{futures, axum, tower, tower_http, async_graphql_axum, base64}; -use axum::body::HttpBody; -use rust_shared::hyper::service::{service_fn}; -use axum::extract::{FromRequest, Extension}; -use axum::http::{Method, HeaderValue}; -use axum::http::header::CONTENT_TYPE; -use axum::response::{self, IntoResponse}; -use axum::routing::{get, post, MethodFilter, on_service}; -use axum::{extract, Router, Json}; -use axum::http::{uri::Uri, Request, Response}; -use rust_shared::async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; -use rust_shared::async_graphql::{Schema, MergedObject, MergedSubscription, Variables, self}; -use async_graphql_axum::{GraphQLRequest, GraphQLResponse, GraphQLSubscription}; -use rust_shared::serde_json::{json, self}; use rust_shared::utils::type_aliases::JSONValue; -use tracing::info; -use rust_shared::url::Url; +use rust_shared::{async_graphql_axum, axum, base64, futures, tower, tower_http}; +use std::convert::Infallible; +use std::net::{IpAddr, Ipv4Addr}; +use std::str::FromStr; use std::{convert::TryFrom, net::SocketAddr}; -use tower_http::cors::{CorsLayer}; -use futures::future::{self, Future}; +use tower_http::cors::CorsLayer; +use tracing::info; use crate::gql::_general::ensure_admin_key_is_correct; @@ -41,12 +41,15 @@ pub const ALERTMANAGER_URL: &str = "http://loki-stack-prometheus-alertmanager.mo /// Endpoint needed to workaround cross-domain cookie restrictions, for when monitor-client is served by webpack. /// See CookieTransferHelper.tsx for the client-side handling of the exchange. pub async fn store_admin_key_cookie(_req: Request) -> AxumResultI { - let response_result: Result<_, Error> = try { - if !DomainsConstants::new().on_server_and_dev { Err(anyhow!("Can only use this helper in a dev cluster."))?; } + let response_result: Result<_, Error> = try { + if !DomainsConstants::new().on_server_and_dev { + Err(anyhow!("Can only use this helper in a dev cluster."))?; + } - let response = Response::builder() - .header(CONTENT_TYPE, "text/html; charset=utf-8") - .body(AxumBody::new(r#""#.o())) - .unwrap(); - response - }; - match response_result { - Ok(response) => Ok(response), - Err(err) => { - let response_json = json!({ "error": format!("Error occurred. @error:{}", err) }); - let response = Response::builder() - .status(StatusCode::INTERNAL_SERVER_ERROR) - //.header(CONTENT_TYPE, "text/html; charset=utf-8") - .header(CONTENT_TYPE, "application/json; charset=utf-8") - .body(AxumBody::new(response_json.to_string())) - .unwrap(); - Ok(response) - } - } + "# + .o(), + )) + .unwrap(); + response + }; + match response_result { + Ok(response) => Ok(response), + Err(err) => { + let response_json = json!({ "error": format!("Error occurred. @error:{}", err) }); + let response = Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + //.header(CONTENT_TYPE, "text/html; charset=utf-8") + .header(CONTENT_TYPE, "application/json; charset=utf-8") + .body(AxumBody::new(response_json.to_string())) + .unwrap(); + Ok(response) + }, + } } pub fn get_admin_key_from_proxy_request(req: &Request) -> Result { - // use cookies (instead of eg. an "admin-key" header) so the key gets sent with every proxy-request (ie. from the proxied page loading its subresources) - if let Some(cookie_str) = req.headers().get("cookie") { - let cookie_entries = cookie_str.to_str()?.split("; ").collect_vec(); - for cookie_entry in cookie_entries { - let (cookie_name, cookie_value) = cookie_entry.split_once("=").ok_or(anyhow!("Invalid cookie-str"))?; - if cookie_name == "adminKey" { - let admin_key_base64 = cookie_value; - let admin_key = String::from_utf8(base64::decode(admin_key_base64)?)?; - return Ok(admin_key); - } - } - } - bail!("A \"cookie\" header must be provided, with an \"adminKey\" cookie."); + // use cookies (instead of eg. an "admin-key" header) so the key gets sent with every proxy-request (ie. from the proxied page loading its subresources) + if let Some(cookie_str) = req.headers().get("cookie") { + let cookie_entries = cookie_str.to_str()?.split("; ").collect_vec(); + for cookie_entry in cookie_entries { + let (cookie_name, cookie_value) = cookie_entry.split_once("=").ok_or(anyhow!("Invalid cookie-str"))?; + if cookie_name == "adminKey" { + let admin_key_base64 = cookie_value; + let admin_key = String::from_utf8(base64::decode(admin_key_base64)?)?; + return Ok(admin_key); + } + } + } + bail!("A \"cookie\" header must be provided, with an \"adminKey\" cookie."); } pub async fn maybe_proxy_to_prometheus(Extension(client): Extension, req: Request) -> AxumResultI { - let response_result: Result<_, Error> = try { - let admin_key = get_admin_key_from_proxy_request(&req)?; - ensure_admin_key_is_correct(admin_key, true)?; - proxy_to_service_at_port(client, req, PROMETHEUS_URL.to_owned()).await? - }; - finalize_proxy_response(response_result, "prometheus").await + let response_result: Result<_, Error> = try { + let admin_key = get_admin_key_from_proxy_request(&req)?; + ensure_admin_key_is_correct(admin_key, true)?; + proxy_to_service_at_port(client, req, PROMETHEUS_URL.to_owned()).await? + }; + finalize_proxy_response(response_result, "prometheus").await } pub async fn maybe_proxy_to_alertmanager(Extension(client): Extension, req: Request) -> AxumResultI { - let response_result: Result<_, Error> = try { - let admin_key = get_admin_key_from_proxy_request(&req)?; - ensure_admin_key_is_correct(admin_key, true)?; - proxy_to_service_at_port(client, req, ALERTMANAGER_URL.to_owned()).await? - }; - finalize_proxy_response(response_result, "alertmanager").await + let response_result: Result<_, Error> = try { + let admin_key = get_admin_key_from_proxy_request(&req)?; + ensure_admin_key_is_correct(admin_key, true)?; + proxy_to_service_at_port(client, req, ALERTMANAGER_URL.to_owned()).await? + }; + finalize_proxy_response(response_result, "alertmanager").await } async fn finalize_proxy_response(response_result: AxumResultE, service_name: &str) -> AxumResultI { - match response_result { - Ok(response) => Ok(response), - Err(err) => { - let response_json = json!({ "error": format!("Error occurred during setup of proxy to {} service. @error:{}", service_name, err) }); - let response = Response::builder() - .status(StatusCode::INTERNAL_SERVER_ERROR) - .header(CONTENT_TYPE, "application/json; charset=utf-8") - .body(AxumBody::new(response_json.to_string())) - .unwrap(); - Ok(response) - } - } + match response_result { + Ok(response) => Ok(response), + Err(err) => { + let response_json = json!({ "error": format!("Error occurred during setup of proxy to {} service. @error:{}", service_name, err) }); + let response = Response::builder().status(StatusCode::INTERNAL_SERVER_ERROR).header(CONTENT_TYPE, "application/json; charset=utf-8").body(AxumBody::new(response_json.to_string())).unwrap(); + Ok(response) + }, + } } pub async fn proxy_to_service_at_port(client: HyperClient, mut req: Request, uri_base: String) -> AxumResultE { - let path = req.uri().path(); - let path_and_query = req - .uri() - .path_and_query() - .map_or(path, |v| v.as_str()); - let path_and_query_fixed = format!("/{}", path_and_query.split("/").skip(3).join("/")); + let path = req.uri().path(); + let path_and_query = req.uri().path_and_query().map_or(path, |v| v.as_str()); + let path_and_query_fixed = format!("/{}", path_and_query.split("/").skip(3).join("/")); - //let uri = format!("http://127.0.0.1:{}{}", port, path_query); - //let uri = format!("{}{}", APP_SERVER_JS_URL, path_query); - let uri = format!("{}{}", uri_base, path_and_query_fixed); - //println!("Sending proxy request to:{}", uri); + //let uri = format!("http://127.0.0.1:{}{}", port, path_query); + //let uri = format!("{}{}", APP_SERVER_JS_URL, path_query); + let uri = format!("{}{}", uri_base, path_and_query_fixed); + //println!("Sending proxy request to:{}", uri); - *req.uri_mut() = Uri::try_from(uri.clone())?; + *req.uri_mut() = Uri::try_from(uri.clone())?; - match client.request(req).await { - Ok(response) => Ok(hyper_response_to_axum_response(response).await), - // one example of why this can fail: if the target pod crashed - Err(err) => { - let json = json!({ "error": format!("Error occurred while trying to send get command to pod at uri \"{}\":{}", uri, err) }); - Ok(Response::builder().status(StatusCode::BAD_GATEWAY) - .header(CONTENT_TYPE, "application/json; charset=utf-8") - .body(AxumBody::new(json.to_string()))?) - }, - } -} \ No newline at end of file + match client.request(req).await { + Ok(response) => Ok(hyper_response_to_axum_response(response).await), + // one example of why this can fail: if the target pod crashed + Err(err) => { + let json = json!({ "error": format!("Error occurred while trying to send get command to pod at uri \"{}\":{}", uri, err) }); + Ok(Response::builder().status(StatusCode::BAD_GATEWAY).header(CONTENT_TYPE, "application/json; charset=utf-8").body(AxumBody::new(json.to_string()))?) + }, + } +} diff --git a/Packages/monitor-backend/src/main.rs b/Packages/monitor-backend/src/main.rs index 6db453e25..21040594e 100644 --- a/Packages/monitor-backend/src/main.rs +++ b/Packages/monitor-backend/src/main.rs @@ -21,104 +21,108 @@ // to avoid false-positives, of certain functions, as well as for [Serialize/Deserialize]_Stub macro-usage (wrt private fields) dead_code, )] +#![feature(stmt_expr_attributes)] // allow attributes on expressions, eg. for disabling rustfmt per-expression +use axum::{ + http::{header::CONTENT_TYPE, Method}, + middleware, + response::{self, Html, IntoResponse}, + routing::{any_service, get, get_service, post}, + Router, +}; use rust_shared::async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; use rust_shared::axum::http::HeaderName; use rust_shared::axum::routing::{on_service, MethodFilter}; use rust_shared::axum::Extension; use rust_shared::bytes::Bytes; +use rust_shared::flume::{unbounded, Receiver, Sender}; use rust_shared::http_body_util::combinators::BoxBody; use rust_shared::http_body_util::Full; use rust_shared::hyper::body::Body; +use rust_shared::hyper::{ + header::{self, FORWARDED}, + Request, Response, StatusCode, Uri, +}; use rust_shared::hyper_util::client::legacy::Client; use rust_shared::hyper_util::rt::TokioExecutor; use rust_shared::itertools::Itertools; use rust_shared::links::app_server_to_monitor_backend::LogEntry; use rust_shared::tokio::net::TcpListener; +use rust_shared::tokio::{ + runtime::Runtime, + sync::{broadcast, Mutex}, +}; use rust_shared::tower_http::cors::AllowOrigin; use rust_shared::utils::general::k8s_env; use rust_shared::utils::net::{body_to_bytes, new_hyper_client_http, AxumBody}; -use rust_shared::{futures, axum, tower, tower_http, tokio}; -use axum::{ - response::{Html, self, IntoResponse}, - routing::{get, any_service, post, get_service}, - Router, http::{ - Method, - header::{CONTENT_TYPE} - }, - middleware, +use rust_shared::utils::type_aliases::{FReceiver, FSender}; +use rust_shared::{axum, futures, tokio, tower, tower_http}; +use std::{ + backtrace::Backtrace, + collections::HashSet, + convert::Infallible, + net::{IpAddr, SocketAddr}, + panic, + str::FromStr, + sync::Arc, + time::Duration, }; -use rust_shared::hyper::{Request, Response, StatusCode, header::{FORWARDED, self}, Uri}; use tower::ServiceExt; -use tower_http::{cors::{CorsLayer}, services::ServeFile}; +use tower_http::services::ServeDir; +use tower_http::{cors::CorsLayer, services::ServeFile}; use tracing::{error, info, Level, Metadata}; -use tracing_subscriber::{Layer, filter, prelude::__tracing_subscriber_SubscriberExt, util::SubscriberInitExt}; -use rust_shared::{utils::type_aliases::{FSender, FReceiver}}; -use std::{ - collections::HashSet, - net::{SocketAddr, IpAddr}, - sync::{Arc}, panic, backtrace::Backtrace, convert::Infallible, str::FromStr, time::Duration, -}; -use rust_shared::tokio::{sync::{broadcast, Mutex}, runtime::Runtime}; -use rust_shared::flume::{Sender, Receiver, unbounded}; -use tower_http::{services::ServeDir}; +use tracing_subscriber::{filter, prelude::__tracing_subscriber_SubscriberExt, util::SubscriberInitExt, Layer}; -use crate::links::pod_proxies::{maybe_proxy_to_prometheus, maybe_proxy_to_alertmanager, store_admin_key_cookie}; -use crate::{store::storage::{AppState, AppStateArc}, links::app_server_link::connect_to_app_server, utils::type_aliases::{ABReceiver, ABSender}}; +use crate::links::pod_proxies::{maybe_proxy_to_alertmanager, maybe_proxy_to_prometheus, store_admin_key_cookie}; +use crate::{ + links::app_server_link::connect_to_app_server, + store::storage::{AppState, AppStateArc}, + utils::type_aliases::{ABReceiver, ABSender}, +}; mod gql_; mod gql { - pub mod _general; + pub mod _general; } mod pgclient; mod links { - pub mod app_server_link; - pub mod pod_proxies; + pub mod app_server_link; + pub mod pod_proxies; } mod utils { - //pub mod general; - pub mod type_aliases; + //pub mod general; + pub mod type_aliases; } mod store { - pub mod storage; + pub mod storage; } mod testing { - pub mod general; + pub mod general; } /*mod connections { - pub mod from_app_server; + pub mod from_app_server; }*/ mod migrations { - pub mod v2; + pub mod v2; } pub fn get_cors_layer() -> CorsLayer { - // ref: https://docs.rs/tower-http/latest/tower_http/cors/index.html - CorsLayer::new() - .allow_origin(AllowOrigin::predicate(|_, _| { true })) // must use true (ie. have response's "allowed-origin" always equal the request origin) instead of "*", since we have credential-inclusion enabled - .allow_methods(vec![ - Method::GET, - Method::POST, - Method::PUT, - Method::DELETE, - Method::HEAD, - Method::OPTIONS, - Method::CONNECT, - Method::PATCH, - Method::TRACE, - ]) - .allow_headers(vec![ - CONTENT_TYPE, // needed, because the POST requests include a content-type header (which is not on the approved-by-default list) - HeaderName::from_str("admin-key").unwrap(), - ]) - .allow_credentials(true) + // ref: https://docs.rs/tower-http/latest/tower_http/cors/index.html + CorsLayer::new() + .allow_origin(AllowOrigin::predicate(|_, _| true)) // must use true (ie. have response's "allowed-origin" always equal the request origin) instead of "*", since we have credential-inclusion enabled + .allow_methods(vec![Method::GET, Method::POST, Method::PUT, Method::DELETE, Method::HEAD, Method::OPTIONS, Method::CONNECT, Method::PATCH, Method::TRACE]) + .allow_headers(vec![ + CONTENT_TYPE, // needed, because the POST requests include a content-type header (which is not on the approved-by-default list) + HeaderName::from_str("admin-key").unwrap(), + ]) + .allow_credentials(true) } #[derive(Clone, Debug)] pub enum GeneralMessage { - LogEntryAdded(LogEntry), - MigrateLogMessageAdded(String), - TestingLogMessageAdded(String), + LogEntryAdded(LogEntry), + MigrateLogMessageAdded(String), + TestingLogMessageAdded(String), } // for some very-strange reason, using the tokio::broadcast::[Sender/Receiver] to transmit LogEntry's (from app_server_link.rs to _general.rs) silently fails @@ -127,133 +131,129 @@ pub enum GeneralMessage { // ...but wasn't able to discover the "difference" between MigrateLogMessageAdded and LogEntryAdded pathway that would explain it (and thus suggest a proper solution) /*#[derive(Clone, Debug)] pub enum GeneralMessage_Flume { - LogEntryAdded(LogEntry), + LogEntryAdded(LogEntry), }*/ fn set_up_globals() { - panic::set_hook(Box::new(|info| { - let stacktrace = Backtrace::force_capture(); - error!("Got panic. @info:{}\n@stackTrace:{}", info, stacktrace); - std::process::abort(); - })); + panic::set_hook(Box::new(|info| { + let stacktrace = Backtrace::force_capture(); + error!("Got panic. @info:{}\n@stackTrace:{}", info, stacktrace); + std::process::abort(); + })); - //tracing_subscriber::fmt::init(); // install global collector configured based on RUST_LOG env var - let printing_layer = tracing_subscriber::fmt::layer().with_filter(filter::filter_fn(move |metadata| { - should_event_be_printed(metadata) - })); - tracing_subscriber::registry().with(printing_layer).init(); + //tracing_subscriber::fmt::init(); // install global collector configured based on RUST_LOG env var + let printing_layer = tracing_subscriber::fmt::layer().with_filter(filter::filter_fn(move |metadata| should_event_be_printed(metadata))); + tracing_subscriber::registry().with(printing_layer).init(); } pub fn does_event_match_conditions(metadata: &Metadata, levels_to_include: &[Level]) -> bool { - if !levels_to_include.contains(metadata.level()) { - return false; - } - true + if !levels_to_include.contains(metadata.level()) { + return false; + } + true } pub fn should_event_be_printed(metadata: &Metadata) -> bool { - match metadata.target() { - a if a.starts_with("monitor_backend") || a.starts_with("rust_shared") => { - //does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN, Level::INFO]) - does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN, Level::INFO, Level::DEBUG]) - //does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN, Level::INFO, Level::DEBUG, Level::TRACE]) - }, - "async-graphql" => { - does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN]) - }, - _ => false - } + match metadata.target() { + a if a.starts_with("monitor_backend") || a.starts_with("rust_shared") => { + //does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN, Level::INFO]) + does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN, Level::INFO, Level::DEBUG]) + //does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN, Level::INFO, Level::DEBUG, Level::TRACE]) + }, + "async-graphql" => does_event_match_conditions(metadata, &[Level::ERROR, Level::WARN]), + _ => false, + } } #[tokio::main] async fn main() { - set_up_globals(); - info!("Setup of globals completed."); // have one regular print-line, in case logger has issues - - let app_state = AppStateArc::new(AppState::default()); + set_up_globals(); + info!("Setup of globals completed."); // have one regular print-line, in case logger has issues - let app = Router::new() - /*.route("/", get(|| async { Html(r#" -

This is the URL for the monitor-backend.

-
- "#) }))*/ - //.route("/send-mtx-results", post(send_mtx_results)) - .route("/storeAdminKeyCookie", get(store_admin_key_cookie)) - .route("/proxy/prometheus", get(maybe_proxy_to_prometheus)) - .route("/proxy/prometheus/*path", get(maybe_proxy_to_prometheus)) - .route("/proxy/alertmanager", get(maybe_proxy_to_alertmanager)) - .route("/proxy/alertmanager/*path", get(maybe_proxy_to_alertmanager)) - .fallback(get(handler)); + let app_state = AppStateArc::new(AppState::default()); - let (mut msg_sender, msg_receiver): (ABSender, ABReceiver) = async_broadcast::broadcast(10000); - msg_sender.set_overflow(true); - tokio::spawn(connect_to_app_server(app_state.clone(), msg_sender.clone())); + let app = Router::new() + /*.route("/", get(|| async { Html(r#" +

This is the URL for the monitor-backend.

+

Navigate to debatemap.app instead. (or localhost:5100/localhost:5101, if running Debate Map locally)

+ "#) }))*/ + //.route("/send-mtx-results", post(send_mtx_results)) + .route("/storeAdminKeyCookie", get(store_admin_key_cookie)) + .route("/proxy/prometheus", get(maybe_proxy_to_prometheus)) + .route("/proxy/prometheus/*path", get(maybe_proxy_to_prometheus)) + .route("/proxy/alertmanager", get(maybe_proxy_to_alertmanager)) + .route("/proxy/alertmanager/*path", get(maybe_proxy_to_alertmanager)) + .fallback(get(handler)); - let app = gql_::extend_router(app, msg_sender, msg_receiver, /*msg_sender_test, msg_receiver_test,*/ app_state.clone()).await; + let (mut msg_sender, msg_receiver): (ABSender, ABReceiver) = async_broadcast::broadcast(10000); + msg_sender.set_overflow(true); + tokio::spawn(connect_to_app_server(app_state.clone(), msg_sender.clone())); - let client_for_proxying = new_hyper_client_http(); - // cors layer apparently must be added after the stuff it needs to apply to - let app = app - .layer(Extension(app_state)) - .layer(Extension(client_for_proxying)) - .layer(get_cors_layer()); + let app = gql_::extend_router(app, msg_sender, msg_receiver, /*msg_sender_test, msg_receiver_test,*/ app_state.clone()).await; - let addr = SocketAddr::from(([0, 0, 0, 0], 5130)); // ip of 0.0.0.0 means it can receive connections from outside this pod (eg. other pods, the load-balancer) - let listener = TcpListener::bind(&addr).await.unwrap(); - let server_fut = axum::serve(listener, app.into_make_service_with_connect_info::()); - info!("Monitor-backend launched. @env:{:?}", k8s_env()); - server_fut.await.unwrap(); + let client_for_proxying = new_hyper_client_http(); + // cors layer apparently must be added after the stuff it needs to apply to + let app = app.layer(Extension(app_state)).layer(Extension(client_for_proxying)).layer(get_cors_layer()); + + let addr = SocketAddr::from(([0, 0, 0, 0], 5130)); // ip of 0.0.0.0 means it can receive connections from outside this pod (eg. other pods, the load-balancer) + let listener = TcpListener::bind(&addr).await.unwrap(); + let server_fut = axum::serve(listener, app.into_make_service_with_connect_info::()); + info!("Monitor-backend launched. @env:{:?}", k8s_env()); + server_fut.await.unwrap(); } async fn handler(req: Request) -> Result, (StatusCode, String)> { - let uri = req.uri(); - //info!("BaseURI:{}", uri); - let (scheme, authority, path, _query) = { - let temp = uri.clone().into_parts(); - let scheme = "https"; //temp.scheme.map_or("".to_owned(), |a| a.to_string()); - let authority = "debatemap.app"; //temp.authority.map_or("".to_owned(), |a| a.to_string()); - let path = temp.path_and_query.clone().map_or("".to_owned(), |a| a.path().to_string()); - let query = temp.path_and_query.map_or("".to_owned(), |a| a.query().unwrap_or("").to_owned()); - (scheme, authority, path, query) - }; - - // try resolving path from "/Dist" folder - if let Ok(uri_variant) = Uri::from_str(&format!("{scheme}://{authority}/Dist/{path}")) { - let res = get_static_file(uri_variant.clone()).await?; - if res.status() != StatusCode::NOT_FOUND { return Ok(res); } - } + let uri = req.uri(); + //info!("BaseURI:{}", uri); + let (scheme, authority, path, _query) = { + let temp = uri.clone().into_parts(); + let scheme = "https"; //temp.scheme.map_or("".to_owned(), |a| a.to_string()); + let authority = "debatemap.app"; //temp.authority.map_or("".to_owned(), |a| a.to_string()); + let path = temp.path_and_query.clone().map_or("".to_owned(), |a| a.path().to_string()); + let query = temp.path_and_query.map_or("".to_owned(), |a| a.query().unwrap_or("").to_owned()); + (scheme, authority, path, query) + }; + + // try resolving path from "/Dist" folder + if let Ok(uri_variant) = Uri::from_str(&format!("{scheme}://{authority}/Dist/{path}")) { + let res = get_static_file(uri_variant.clone()).await?; + if res.status() != StatusCode::NOT_FOUND { + return Ok(res); + } + } - // try resolving path from "/Resources" folder - if let Ok(uri_variant) = Uri::from_str(&format!("{scheme}://{authority}/Resources/{path}")) { - let res = get_static_file(uri_variant.clone()).await?; - if res.status() != StatusCode::NOT_FOUND { return Ok(res); } - } + // try resolving path from "/Resources" folder + if let Ok(uri_variant) = Uri::from_str(&format!("{scheme}://{authority}/Resources/{path}")) { + let res = get_static_file(uri_variant.clone()).await?; + if res.status() != StatusCode::NOT_FOUND { + return Ok(res); + } + } - // if all else fails, just resolve to "/Dist/index.html" - //println!("Test:{}", format!("{scheme}://{authority}/Dist/index.html")); - if let Ok(uri_variant) = Uri::from_str(&format!("{scheme}://{authority}/Dist/index.html")) { - let res = get_static_file(uri_variant.clone()).await?; - //println!("Response for index.html: {:?}", res); - if res.status() != StatusCode::NOT_FOUND { return Ok(res); } - } + // if all else fails, just resolve to "/Dist/index.html" + //println!("Test:{}", format!("{scheme}://{authority}/Dist/index.html")); + if let Ok(uri_variant) = Uri::from_str(&format!("{scheme}://{authority}/Dist/index.html")) { + let res = get_static_file(uri_variant.clone()).await?; + //println!("Response for index.html: {:?}", res); + if res.status() != StatusCode::NOT_FOUND { + return Ok(res); + } + } - return Err((StatusCode::INTERNAL_SERVER_ERROR, format!("Something went wrong; failed to resolve URI to a resource."))); + return Err((StatusCode::INTERNAL_SERVER_ERROR, format!("Something went wrong; failed to resolve URI to a resource."))); } async fn get_static_file(uri: Uri) -> Result, (StatusCode, String)> { - //println!("URI:{}", uri); - let req = Request::builder().uri(uri).body(Full::new(Bytes::new())).unwrap(); - let root_resolve_folder = "../monitor-client"; + //println!("URI:{}", uri); + let req = Request::builder().uri(uri).body(Full::new(Bytes::new())).unwrap(); + let root_resolve_folder = "../monitor-client"; - // `ServeDir` implements `tower::Service` so we can call it with `tower::ServiceExt::oneshot` - match ServeDir::new(root_resolve_folder).oneshot(req).await { - Ok(res) => { - let (parts, body) = res.into_parts(); - let bytes = body_to_bytes(body).await.unwrap(); - Ok(Response::from_parts(parts, AxumBody::from(bytes))) - }, - Err(err) => Err(( - StatusCode::INTERNAL_SERVER_ERROR, - format!("Something went wrong: {}", err), - )), - } -} \ No newline at end of file + // `ServeDir` implements `tower::Service` so we can call it with `tower::ServiceExt::oneshot` + match ServeDir::new(root_resolve_folder).oneshot(req).await { + Ok(res) => { + let (parts, body) = res.into_parts(); + let bytes = body_to_bytes(body).await.unwrap(); + Ok(Response::from_parts(parts, AxumBody::from(bytes))) + }, + Err(err) => Err((StatusCode::INTERNAL_SERVER_ERROR, format!("Something went wrong: {}", err))), + } +} diff --git a/Packages/monitor-backend/src/migrations/v2.rs b/Packages/monitor-backend/src/migrations/v2.rs index dd8000cba..a6aec6eb6 100644 --- a/Packages/monitor-backend/src/migrations/v2.rs +++ b/Packages/monitor-backend/src/migrations/v2.rs @@ -1,88 +1,104 @@ -use rust_shared::{anyhow::Error, tokio, tokio_postgres, serde_json}; use rust_shared::flume::Sender; -use tracing::{error, info}; use rust_shared::utils::type_aliases::JSONValue; +use rust_shared::{anyhow::Error, serde_json, tokio, tokio_postgres}; +use tracing::{error, info}; -use crate::{GeneralMessage, pgclient::create_client_advanced, utils::type_aliases::{ABSender}}; +use crate::{pgclient::create_client_advanced, utils::type_aliases::ABSender, GeneralMessage}; pub async fn migrate_db_to_v2(msg_sender: ABSender) -> Result { - let (mut client, connection) = create_client_advanced(false).await; - // the connection object performs the actual communication with the database, so spawn it off to run on its own - // (maybe switch to using a shared program-wide pool, to avoid the need for this) - let _handle = tokio::spawn(async move { - if let Err(e) = connection.await { - error!("connection error: {}", e); - } - //return connection; - }); - - let migration_id = "1".to_owned(); - /*let log = |text: &str| { - info!("MigrateLog: {text}"); - //msg_sender.send(GeneralMessage::MigrateLogMessageAdded(text.to_owned())).unwrap(); - match msg_sender.broadcast(GeneralMessage::MigrateLogMessageAdded(text.to_owned())).await { - Ok(_) => {}, - Err(err) => error!("Cannot send migrate-log-entry; all receivers were dropped. @err:{err}"), - } - };*/ - let log = |text: String| { - let msg_sender_clone = msg_sender.clone(); - async move { - info!("MigrateLog: {text}"); - match msg_sender_clone.broadcast(GeneralMessage::MigrateLogMessageAdded(text.to_owned())).await { - Ok(_) => {}, - Err(err) => error!("Cannot send migrate-log-entry; all receivers were dropped. @err:{err}"), - } - } - }; + let (mut client, connection) = create_client_advanced(false).await; + // the connection object performs the actual communication with the database, so spawn it off to run on its own + // (maybe switch to using a shared program-wide pool, to avoid the need for this) + let _handle = tokio::spawn(async move { + if let Err(e) = connection.await { + error!("connection error: {}", e); + } + //return connection; + }); + + let migration_id = "1".to_owned(); + /*let log = |text: &str| { + info!("MigrateLog: {text}"); + //msg_sender.send(GeneralMessage::MigrateLogMessageAdded(text.to_owned())).unwrap(); + match msg_sender.broadcast(GeneralMessage::MigrateLogMessageAdded(text.to_owned())).await { + Ok(_) => {}, + Err(err) => error!("Cannot send migrate-log-entry; all receivers were dropped. @err:{err}"), + } + };*/ + let log = |text: String| { + let msg_sender_clone = msg_sender.clone(); + async move { + info!("MigrateLog: {text}"); + match msg_sender_clone.broadcast(GeneralMessage::MigrateLogMessageAdded(text.to_owned())).await { + Ok(_) => {}, + Err(err) => error!("Cannot send migrate-log-entry; all receivers were dropped. @err:{err}"), + } + } + }; - log("Starting migration to version: 2".to_owned()).await; - let tx = client.build_transaction().isolation_level(tokio_postgres::IsolationLevel::Serializable).start().await?; + log("Starting migration to version: 2".to_owned()).await; + let tx = client.build_transaction().isolation_level(tokio_postgres::IsolationLevel::Serializable).start().await?; - log("Adding new column...".to_owned()).await; - tx.execute(r#"ALTER TABLE app."nodeRevisions" ADD attachments jsonb NOT NULL DEFAULT '[]'::json;"#, &[]).await?; + log("Adding new column...".to_owned()).await; + tx.execute(r#"ALTER TABLE app."nodeRevisions" ADD attachments jsonb NOT NULL DEFAULT '[]'::json;"#, &[]).await?; - log("Updating rows...".to_owned()).await; - let rows = tx.query(r#"SELECT * from app."nodeRevisions""#, &[]).await?; - for row in rows { - let id: String = row.get("id"); - let equation_val: JSONValue = row.try_get("equation").unwrap_or(JSONValue::Null); - let references_val: JSONValue = row.try_get("references").unwrap_or(JSONValue::Null); - let quote_val: JSONValue = row.try_get("quote").unwrap_or(JSONValue::Null); - let media_val: JSONValue = row.try_get("media").unwrap_or(JSONValue::Null); - - tx.execute(r#" + log("Updating rows...".to_owned()).await; + let rows = tx.query(r#"SELECT * from app."nodeRevisions""#, &[]).await?; + for row in rows { + let id: String = row.get("id"); + let equation_val: JSONValue = row.try_get("equation").unwrap_or(JSONValue::Null); + let references_val: JSONValue = row.try_get("references").unwrap_or(JSONValue::Null); + let quote_val: JSONValue = row.try_get("quote").unwrap_or(JSONValue::Null); + let media_val: JSONValue = row.try_get("media").unwrap_or(JSONValue::Null); + + tx.execute( + r#" UPDATE app."nodeRevisions" SET "attachments"='[]'::jsonb WHERE id=$1; - "#, &[&id]).await?; - - if !equation_val.is_null() || !references_val.is_null() || !quote_val.is_null() || !media_val.is_null() { - let mut attachment_map = serde_json::Map::new(); - if !equation_val.is_null() { attachment_map.insert("equation".to_owned(), equation_val); } - if !references_val.is_null() { attachment_map.insert("references".to_owned(), references_val); } - if !quote_val.is_null() { attachment_map.insert("quote".to_owned(), quote_val); } - if !media_val.is_null() { attachment_map.insert("media".to_owned(), media_val); } + "#, + &[&id], + ) + .await?; + + if !equation_val.is_null() || !references_val.is_null() || !quote_val.is_null() || !media_val.is_null() { + let mut attachment_map = serde_json::Map::new(); + if !equation_val.is_null() { + attachment_map.insert("equation".to_owned(), equation_val); + } + if !references_val.is_null() { + attachment_map.insert("references".to_owned(), references_val); + } + if !quote_val.is_null() { + attachment_map.insert("quote".to_owned(), quote_val); + } + if !media_val.is_null() { + attachment_map.insert("media".to_owned(), media_val); + } - let attachment = JSONValue::Object(attachment_map); - let attachments_array = JSONValue::Array(vec![attachment]); + let attachment = JSONValue::Object(attachment_map); + let attachments_array = JSONValue::Array(vec![attachment]); - tx.execute(r#" + tx.execute( + r#" UPDATE app."nodeRevisions" SET "attachments"=$1 WHERE id=$2; - "#, &[&attachments_array, &id]).await?; - } - } + "#, + &[&attachments_array, &id], + ) + .await?; + } + } - log("Deleting old columns...".to_owned()).await; - tx.execute(r#"ALTER TABLE app."nodeRevisions" DROP COLUMN equation;"#, &[]).await?; - tx.execute(r#"ALTER TABLE app."nodeRevisions" DROP COLUMN media;"#, &[]).await?; - tx.execute(r#"ALTER TABLE app."nodeRevisions" DROP COLUMN "references";"#, &[]).await?; - tx.execute(r#"ALTER TABLE app."nodeRevisions" DROP COLUMN "quote";"#, &[]).await?; + log("Deleting old columns...".to_owned()).await; + tx.execute(r#"ALTER TABLE app."nodeRevisions" DROP COLUMN equation;"#, &[]).await?; + tx.execute(r#"ALTER TABLE app."nodeRevisions" DROP COLUMN media;"#, &[]).await?; + tx.execute(r#"ALTER TABLE app."nodeRevisions" DROP COLUMN "references";"#, &[]).await?; + tx.execute(r#"ALTER TABLE app."nodeRevisions" DROP COLUMN "quote";"#, &[]).await?; - log("Committing transaction...".to_owned()).await; - tx.commit().await?; - log("Migration complete!".to_owned()).await; - return Ok(migration_id); -} \ No newline at end of file + log("Committing transaction...".to_owned()).await; + tx.commit().await?; + log("Migration complete!".to_owned()).await; + return Ok(migration_id); +} diff --git a/Packages/monitor-backend/src/pgclient.rs b/Packages/monitor-backend/src/pgclient.rs index 0a3d78f95..7a98d3477 100644 --- a/Packages/monitor-backend/src/pgclient.rs +++ b/Packages/monitor-backend/src/pgclient.rs @@ -1,49 +1,53 @@ -use std::{env, time::{SystemTime, UNIX_EPOCH}, task::{Poll}}; +use deadpool_postgres::{Manager, ManagerConfig, Pool, PoolConfig, RecyclingMethod, Runtime}; use rust_shared::bytes::Bytes; -use deadpool_postgres::{Manager, ManagerConfig, Pool, RecyclingMethod, Runtime, PoolConfig}; -use rust_shared::futures::{future, StreamExt, Sink, ready}; +use rust_shared::futures::{future, ready, Sink, StreamExt}; use rust_shared::tokio::join; -use rust_shared::{tokio_postgres, tokio_postgres::{NoTls, Client, SimpleQueryMessage, SimpleQueryRow, tls::NoTlsStream, Socket, Connection}}; -use tracing::info; use rust_shared::utils::type_aliases::JSONValue; - +use rust_shared::{ + tokio_postgres, + tokio_postgres::{tls::NoTlsStream, Client, Connection, NoTls, SimpleQueryMessage, SimpleQueryRow, Socket}, +}; +use std::{ + env, + task::Poll, + time::{SystemTime, UNIX_EPOCH}, +}; +use tracing::info; pub fn get_tokio_postgres_config() -> tokio_postgres::Config { - // get connection info from env-vars - let ev = |name| { env::var(name).unwrap() }; - info!("Postgres connection-info: postgres://{}:@{}:{}/debate-map", ev("DB_USER"), ev("DB_ADDR"), ev("DB_PORT")); - - let mut cfg = tokio_postgres::Config::new(); - cfg.user(&ev("DB_USER")); - cfg.password(ev("DB_PASSWORD")); - cfg.host(&ev("DB_ADDR")); - cfg.port(ev("DB_PORT").parse::().unwrap()); - cfg.dbname("debate-map"); - cfg + // get connection info from env-vars + let ev = |name| env::var(name).unwrap(); + info!("Postgres connection-info: postgres://{}:@{}:{}/debate-map", ev("DB_USER"), ev("DB_ADDR"), ev("DB_PORT")); + + let mut cfg = tokio_postgres::Config::new(); + cfg.user(&ev("DB_USER")); + cfg.password(ev("DB_PASSWORD")); + cfg.host(&ev("DB_ADDR")); + cfg.port(ev("DB_PORT").parse::().unwrap()); + cfg.dbname("debate-map"); + cfg } /// Only use this if you need the for_replication option. (everything else should use clients taken from the shared pool) pub async fn create_client_advanced(for_replication: bool) -> (Client, Connection) { - let mut pg_cfg = get_tokio_postgres_config(); - if for_replication { - //db_config += " replication=database"; - //cfg.options(options); - pg_cfg.replication_mode(tokio_postgres::config::ReplicationMode::Logical); - } + let mut pg_cfg = get_tokio_postgres_config(); + if for_replication { + //db_config += " replication=database"; + //cfg.options(options); + pg_cfg.replication_mode(tokio_postgres::config::ReplicationMode::Logical); + } - // connect to the database - let (client, connection) = pg_cfg.connect(NoTls).await.unwrap(); - (client, connection) + // connect to the database + let (client, connection) = pg_cfg.connect(NoTls).await.unwrap(); + (client, connection) } pub fn create_db_pool() -> Pool { - let pg_cfg = get_tokio_postgres_config(); - let mgr_cfg = ManagerConfig { - recycling_method: RecyclingMethod::Fast - }; - let mgr = Manager::from_config(pg_cfg, NoTls, mgr_cfg); - //let pool_size = 1; - let pool_size = 30; - let pool = Pool::builder(mgr).max_size(pool_size).runtime(Runtime::Tokio1).build().unwrap(); - pool -} \ No newline at end of file + let pg_cfg = get_tokio_postgres_config(); + let mgr_cfg = ManagerConfig { recycling_method: RecyclingMethod::Fast }; + let mgr = Manager::from_config(pg_cfg, NoTls, mgr_cfg); + //let pool_size = 1; + let pool_size = 30; + let pool = Pool::builder(mgr).max_size(pool_size).runtime(Runtime::Tokio1).build().unwrap(); + pool +} diff --git a/Packages/monitor-backend/src/store/storage.rs b/Packages/monitor-backend/src/store/storage.rs index 1da1f59d1..777311741 100644 --- a/Packages/monitor-backend/src/store/storage.rs +++ b/Packages/monitor-backend/src/store/storage.rs @@ -1,46 +1,62 @@ -use rust_shared::{async_graphql, async_graphql::{SimpleObject, Json}, utils::{mtx::mtx::MtxData, type_aliases::{JSONValue, RowData}}}; -use rust_shared::{futures, axum, tower, tower_http}; use axum::{ - response::{Html}, - routing::{get, any_service, post, get_service}, - Router, http::{ - Method, - header::{CONTENT_TYPE} - }, + http::{header::CONTENT_TYPE, Method}, + response::Html, + routing::{any_service, get, get_service, post}, + Router, +}; +use rust_shared::flume::{unbounded, Receiver, Sender}; +use rust_shared::hyper::{ + header::{self, FORWARDED}, + service::service_fn, + Request, Response, StatusCode, Uri, }; -use rust_shared::hyper::{service::{service_fn}, Request, Response, StatusCode, header::{FORWARDED, self}, Uri}; use rust_shared::indexmap::IndexMap; use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::serde::{Serialize, Deserialize}; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::Serializer; -use tower::ServiceExt; -use tower_http::{cors::{CorsLayer}, services::ServeFile}; +use rust_shared::tokio::{ + runtime::Runtime, + sync::{broadcast, Mutex, RwLock}, +}; use rust_shared::uuid::Uuid; +use rust_shared::{ + async_graphql, + async_graphql::{Json, SimpleObject}, + utils::{ + mtx::mtx::MtxData, + type_aliases::{JSONValue, RowData}, + }, +}; +use rust_shared::{axum, futures, tower, tower_http}; use std::{ - collections::{HashSet, HashMap, BTreeMap}, - net::{SocketAddr, IpAddr}, - sync::{Arc}, panic, backtrace::Backtrace, convert::Infallible, str::FromStr, + backtrace::Backtrace, + collections::{BTreeMap, HashMap, HashSet}, + convert::Infallible, + net::{IpAddr, SocketAddr}, + panic, + str::FromStr, + sync::Arc, }; -use rust_shared::tokio::{sync::{broadcast, Mutex, RwLock}, runtime::Runtime}; -use rust_shared::flume::{Sender, Receiver, unbounded}; -use tower_http::{services::ServeDir}; +use tower::ServiceExt; +use tower_http::services::ServeDir; +use tower_http::{cors::CorsLayer, services::ServeFile}; pub type AppStateArc = Arc; #[derive(Default)] pub struct AppState { - //pub mtx_results: RwLock>, - pub mtx_results: RwLock>, - pub lqi_data: RwLock>, + //pub mtx_results: RwLock>, + pub mtx_results: RwLock>, + pub lqi_data: RwLock>, } #[derive(SimpleObject)] // in monitor-backend only #[derive(Debug, Clone, Serialize, Deserialize)] pub struct LQInstance_Partial { - pub table_name: String, - pub filter: JSONValue, - //pub last_entries: Vec, - pub last_entries: Json>, - pub entry_watcher_count: usize, + pub table_name: String, + pub filter: JSONValue, + //pub last_entries: Vec, + pub last_entries: Json>, + pub entry_watcher_count: usize, } /*wrap_slow_macros!{ @@ -48,12 +64,12 @@ pub struct LQInstance_Partial { // derived from struct in app-server/.../mtx.rs #[derive(SimpleObject, Clone, Serialize, Deserialize)] pub struct Mtx { - //pub id: Arc, - pub id: String, // changed to String, since agql's OutputType is not implemented for Uuid + //pub id: Arc, + pub id: String, // changed to String, since agql's OutputType is not implemented for Uuid - // tell serde to serialize the HashMap using the ordered_map function, which collects the entries into a temporary BTreeMap (which is sorted) - #[serde(serialize_with = "crate::utils::general::ordered_map")] - pub section_lifetimes: HashMap, + // tell serde to serialize the HashMap using the ordered_map function, which collects the entries into a temporary BTreeMap (which is sorted) + #[serde(serialize_with = "crate::utils::general::ordered_map")] + pub section_lifetimes: HashMap, } -}*/ \ No newline at end of file +}*/ diff --git a/Packages/monitor-backend/src/testing/general.rs b/Packages/monitor-backend/src/testing/general.rs index 10d80b4b5..598f5e732 100644 --- a/Packages/monitor-backend/src/testing/general.rs +++ b/Packages/monitor-backend/src/testing/general.rs @@ -1,34 +1,37 @@ use std::env; use std::str::FromStr; -use rust_shared::chrono::{Utc, SecondsFormat}; -use rust_shared::hyper::Method; use rust_shared::anyhow::{anyhow, Error}; +use rust_shared::chrono::{SecondsFormat, Utc}; +use rust_shared::db_constants::{SYSTEM_USER_EMAIL, SYSTEM_USER_ID}; +use rust_shared::flume::Sender; +use rust_shared::hyper::Method; use rust_shared::hyper_util::client::legacy::connect::HttpConnector; use rust_shared::hyper_util::client::legacy::Client; use rust_shared::hyper_util::rt::TokioExecutor; use rust_shared::jwt_simple::prelude::{Claims, MACLike}; +use rust_shared::rust_macros::wrap_slow_macros; +use rust_shared::serde; +use rust_shared::serde::{Deserialize, Serialize}; use rust_shared::serde_json::json; use rust_shared::utils::auth::jwt_utils_base::{get_or_create_jwt_key_hs256, UserJWTData}; -use rust_shared::utils::db::uuid::{new_uuid_v4_as_b64_id, new_uuid_v4_as_b64}; -use rust_shared::utils::general::{f64_to_str_rounded, f64_to_percent_str}; +use rust_shared::utils::db::uuid::{new_uuid_v4_as_b64, new_uuid_v4_as_b64_id}; +use rust_shared::utils::general::{f64_to_percent_str, f64_to_str_rounded}; use rust_shared::utils::general_::extensions::ToOwnedV; use rust_shared::utils::net::{body_to_str, new_hyper_client_http}; -use rust_shared::utils::time::{time_since_epoch_ms_i64, tokio_sleep, tokio_sleep_until, time_since_epoch_ms}; +use rust_shared::utils::time::{time_since_epoch_ms, time_since_epoch_ms_i64, tokio_sleep, tokio_sleep_until}; use rust_shared::utils::type_aliases::JWTDuration; -use rust_shared::{async_graphql, async_graphql::{SimpleObject, InputObject}}; -use rust_shared::db_constants::{SYSTEM_USER_ID, SYSTEM_USER_EMAIL}; -use rust_shared::flume::Sender; -use rust_shared::rust_macros::wrap_slow_macros; -use rust_shared::{self as rust_shared, serde_json, tokio, to_sub_err}; -use rust_shared::serde::{Serialize, Deserialize}; -use rust_shared::serde; +use rust_shared::utils::type_aliases::{FReceiver, FSender, JSONValue}; +use rust_shared::{self as rust_shared, serde_json, to_sub_err, tokio}; +use rust_shared::{ + async_graphql, + async_graphql::{InputObject, SimpleObject}, +}; use tracing::{error, info}; -use rust_shared::utils::type_aliases::{JSONValue, FSender, FReceiver}; -use crate::{GeneralMessage, pgclient::create_client_advanced, utils::type_aliases::{ABSender}}; +use crate::{pgclient::create_client_advanced, utils::type_aliases::ABSender, GeneralMessage}; -wrap_slow_macros!{ +wrap_slow_macros! { #[derive(SimpleObject, InputObject, Debug, Clone, Serialize, Deserialize)] //#[serde(crate = "rust_shared::serde")] pub struct TestSequence { @@ -39,8 +42,8 @@ pub struct TestStep { enabled: Option, preWait: Option, postWait: Option, - waitTillComplete: Option, - waitTillDurationX: Option, + waitTillComplete: Option, + waitTillDurationX: Option, stepBatch: Option, //signIn: Option, @@ -49,7 +52,7 @@ pub struct TestStep { #[derive(SimpleObject, InputObject, Debug, Clone, Serialize, Deserialize)] pub struct TS_StepBatch { steps: Vec, - repeatCount: Option, + repeatCount: Option, } /*#[derive(SimpleObject, InputObject, Debug, Clone, Serialize, Deserialize)] pub struct TS_SignIn { @@ -58,230 +61,243 @@ pub struct TS_SignIn { #[derive(SimpleObject, InputObject, Debug, Clone, Serialize, Deserialize)] pub struct TS_AddNodeRevision { nodeID: String, - text: Option, + text: Option, } } fn flatten_steps(steps: Vec) -> Vec { - let mut result: Vec = vec![]; - for step in steps { - if !step.enabled.unwrap_or(true) { continue; } + let mut result: Vec = vec![]; + for step in steps { + if !step.enabled.unwrap_or(true) { + continue; + } + + if let Some(batch) = &step.stepBatch { + let substeps_flat_unrepeated = flatten_steps(batch.steps.clone()); + let mut substeps_final: Vec = vec![]; + let repeat_count = batch.repeatCount.unwrap_or(1); + for _i in 0..repeat_count { + substeps_final.extend(substeps_flat_unrepeated.clone()); + } - if let Some(batch) = &step.stepBatch { - let substeps_flat_unrepeated = flatten_steps(batch.steps.clone()); - let mut substeps_final: Vec = vec![]; - let repeat_count = batch.repeatCount.unwrap_or(1); - for _i in 0..repeat_count { - substeps_final.extend(substeps_flat_unrepeated.clone()); - } - - // only flatten/unwrap this step-batch if it actually has substeps (else the wait-values would be lost/ignored) - let substeps_final_len = substeps_final.len(); - if substeps_final_len > 0 { - // add the container-step's wait-values to the first/last steps in its flattened output - if step.preWait.is_some() { - substeps_final[0].preWait = Some(substeps_final[0].preWait.unwrap_or(0) + step.preWait.unwrap()); - } - if step.postWait.is_some() { - substeps_final[substeps_final_len - 1].postWait = Some(substeps_final[substeps_final_len - 1].postWait.unwrap_or(0) + step.postWait.unwrap()); - } + // only flatten/unwrap this step-batch if it actually has substeps (else the wait-values would be lost/ignored) + let substeps_final_len = substeps_final.len(); + if substeps_final_len > 0 { + // add the container-step's wait-values to the first/last steps in its flattened output + if step.preWait.is_some() { + substeps_final[0].preWait = Some(substeps_final[0].preWait.unwrap_or(0) + step.preWait.unwrap()); + } + if step.postWait.is_some() { + substeps_final[substeps_final_len - 1].postWait = Some(substeps_final[substeps_final_len - 1].postWait.unwrap_or(0) + step.postWait.unwrap()); + } - result.extend(substeps_final); - continue; - } - } + result.extend(substeps_final); + continue; + } + } - result.push(step); - } - result + result.push(step); + } + result } pub async fn execute_test_sequence(sequence: TestSequence, msg_sender: ABSender) -> Result<(), Error> { - /*let log = |text: &str| { - let msg_sender_clone = msg_sender.clone(); - let text_clone = text.clone(); - async move { - info!("TestingLog: {text_clone}"); - //msg_sender.send(GeneralMessage::TestingLogMessageAdded(text.to_owned())).unwrap(); - match msg_sender_clone.broadcast(GeneralMessage::TestingLogMessageAdded(text_clone.to_owned())).await { - Ok(_) => {}, - Err(err) => error!("Cannot send testing-log-entry; all receivers were dropped. @err:{err}"), - } - } - };*/ - let log = |text: String| { - let msg_sender_clone = msg_sender.clone(); - async move { - info!("TestingLog: {text}"); - //msg_sender.send(GeneralMessage::TestingLogMessageAdded(text.to_owned())).unwrap(); - match msg_sender_clone.broadcast(GeneralMessage::TestingLogMessageAdded(text.to_owned())).await { - Ok(_) => {}, - Err(err) => error!("Cannot send testing-log-entry; all receivers were dropped. @err:{err}"), - } - } - }; + /*let log = |text: &str| { + let msg_sender_clone = msg_sender.clone(); + let text_clone = text.clone(); + async move { + info!("TestingLog: {text_clone}"); + //msg_sender.send(GeneralMessage::TestingLogMessageAdded(text.to_owned())).unwrap(); + match msg_sender_clone.broadcast(GeneralMessage::TestingLogMessageAdded(text_clone.to_owned())).await { + Ok(_) => {}, + Err(err) => error!("Cannot send testing-log-entry; all receivers were dropped. @err:{err}"), + } + } + };*/ + let log = |text: String| { + let msg_sender_clone = msg_sender.clone(); + async move { + info!("TestingLog: {text}"); + //msg_sender.send(GeneralMessage::TestingLogMessageAdded(text.to_owned())).unwrap(); + match msg_sender_clone.broadcast(GeneralMessage::TestingLogMessageAdded(text.to_owned())).await { + Ok(_) => {}, + Err(err) => error!("Cannot send testing-log-entry; all receivers were dropped. @err:{err}"), + } + } + }; - let root_steps_len = sequence.steps.len(); - let flattened_steps = flatten_steps(sequence.steps); - let flattened_steps_len = flattened_steps.len(); + let root_steps_len = sequence.steps.len(); + let flattened_steps = flatten_steps(sequence.steps); + let flattened_steps_len = flattened_steps.len(); - // set up receiver for errors directly encountered/returned during the test-steps' execution - let mut steps_with_errors = 0i64; - let (err_sender, err_receiver): (FSender, FReceiver) = flume::unbounded(); + // set up receiver for errors directly encountered/returned during the test-steps' execution + let mut steps_with_errors = 0i64; + let (err_sender, err_receiver): (FSender, FReceiver) = flume::unbounded(); - // set up receiver for general warnings/errors that app-server notifies us of, during the execution period (some may be unrelated, but still useful metric) - let mut other_warnings = 0i64; - let mut other_errors = 0i64; - let mut msg_receiver = msg_sender.new_receiver(); + // set up receiver for general warnings/errors that app-server notifies us of, during the execution period (some may be unrelated, but still useful metric) + let mut other_warnings = 0i64; + let mut other_errors = 0i64; + let mut msg_receiver = msg_sender.new_receiver(); - log(format!("Starting execution of test-sequence. @steps:{} (root: {})", flattened_steps_len, root_steps_len)).await; - for (i, step) in flattened_steps.into_iter().enumerate() { - log(format!("Executing test-step #{}:{}", i, serde_json::to_string(&step).unwrap())).await; - let start_time = time_since_epoch_ms_i64(); - let pre_wait = step.preWait.unwrap_or(0); - let post_wait = step.postWait.unwrap_or(0); - let min_duration = step.waitTillDurationX.unwrap_or(0); + log(format!("Starting execution of test-sequence. @steps:{} (root: {})", flattened_steps_len, root_steps_len)).await; + for (i, step) in flattened_steps.into_iter().enumerate() { + log(format!("Executing test-step #{}:{}", i, serde_json::to_string(&step).unwrap())).await; + let start_time = time_since_epoch_ms_i64(); + let pre_wait = step.preWait.unwrap_or(0); + let post_wait = step.postWait.unwrap_or(0); + let min_duration = step.waitTillDurationX.unwrap_or(0); - tokio_sleep(pre_wait).await; - execute_test_step(i as i64, step, err_sender.clone()).await; - tokio_sleep(post_wait).await; - - tokio_sleep_until(start_time + min_duration).await; + tokio_sleep(pre_wait).await; + execute_test_step(i as i64, step, err_sender.clone()).await; + tokio_sleep(post_wait).await; - // if this is the last step, wait 1 extra second, so that final "check for error notifications" loops don't miss out on error-notifications that were slightly delayed - if i == flattened_steps_len - 1 { - tokio_sleep(1000).await; - } + tokio_sleep_until(start_time + min_duration).await; - // do these error-checking loops "as we go", so that the entries adding to the testing-log are placed "closer to where/when they happened" - loop { - match err_receiver.try_recv() { - Ok((flattened_step_i, err)) => { - steps_with_errors += 1; - log(format!("Test-step #{flattened_step_i} failed. @err:{}", err)).await; - }, - Err(flume::TryRecvError::Empty) => break, - Err(flume::TryRecvError::Disconnected) => break, - } - } - loop { - match msg_receiver.try_recv() { - Ok(msg) => match msg { - GeneralMessage::LogEntryAdded(entry) => { - if entry.level == "WARN" { - other_warnings += 1; - log(format!("Received notification of a (possibly unrelated) warning on app-server, during (or shortly before) execution of step #{i}. @warning:{}", entry.message)).await; - } else if entry.level == "ERROR" { - other_errors += 1; - log(format!("Received notification of a (possibly unrelated) error on app-server, during (or shortly before) execution of step #{i}. @error:{}", entry.message)).await; - } - }, - _ => {}, - }, - Err(_) => break, - } - } - } + // if this is the last step, wait 1 extra second, so that final "check for error notifications" loops don't miss out on error-notifications that were slightly delayed + if i == flattened_steps_len - 1 { + tokio_sleep(1000).await; + } - let x_rel = |x: i64| -> String { f64_to_percent_str(x as f64 / flattened_steps_len as f64, 2) }; - log(format!("Ending execution of test-sequence. @steps:{} (root: {}) @steps_with_errors:{} ({}) @asjs_warnings:{} ({}, as ratio) @asjs_errors:{} ({}, as ratio)", - flattened_steps_len, root_steps_len, steps_with_errors, x_rel(steps_with_errors), other_warnings, x_rel(other_warnings), other_errors, x_rel(other_errors))).await; - return Ok(()); + // do these error-checking loops "as we go", so that the entries adding to the testing-log are placed "closer to where/when they happened" + loop { + match err_receiver.try_recv() { + Ok((flattened_step_i, err)) => { + steps_with_errors += 1; + log(format!("Test-step #{flattened_step_i} failed. @err:{}", err)).await; + }, + Err(flume::TryRecvError::Empty) => break, + Err(flume::TryRecvError::Disconnected) => break, + } + } + loop { + match msg_receiver.try_recv() { + Ok(msg) => match msg { + GeneralMessage::LogEntryAdded(entry) => { + if entry.level == "WARN" { + other_warnings += 1; + log(format!("Received notification of a (possibly unrelated) warning on app-server, during (or shortly before) execution of step #{i}. @warning:{}", entry.message)).await; + } else if entry.level == "ERROR" { + other_errors += 1; + log(format!("Received notification of a (possibly unrelated) error on app-server, during (or shortly before) execution of step #{i}. @error:{}", entry.message)).await; + } + }, + _ => {}, + }, + Err(_) => break, + } + } + } + + let x_rel = |x: i64| -> String { f64_to_percent_str(x as f64 / flattened_steps_len as f64, 2) }; + log(format!( + "Ending execution of test-sequence. @steps:{} (root: {}) @steps_with_errors:{} ({}) @asjs_warnings:{} ({}, as ratio) @asjs_errors:{} ({}, as ratio)", + flattened_steps_len, + root_steps_len, + steps_with_errors, + x_rel(steps_with_errors), + other_warnings, + x_rel(other_warnings), + other_errors, + x_rel(other_errors) + )) + .await; + return Ok(()); } type TestStepErrorMessage = (i64, String); -async fn execute_test_step<'a>(flattened_index: i64, step: TestStep, err_sender: FSender) { //-> Result<(), Error> { - //if let Some(comp) = step.signIn {} - let fut = { - if let Some(comp) = step.addNodeRevision { - post_request_to_app_server(json!({ - "variables": { - "input": { - "mapID": "GLOBAL_MAP_00000000001", - "revision": { - "node": comp.nodeID, - "phrasing": { - "terms": [], - "text_base": comp.text - .unwrap_or("ValForTestRevision_At:[datetime-ms]".o()) - // some special values that can be used in the text - .replace("[time]", &time_since_epoch_ms_i64().to_string()) - .replace("[datetime]", Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true).replace("T", " ").replace("Z", "").as_str()) - .replace("[datetime-ms]", Utc::now().to_rfc3339_opts(SecondsFormat::Millis, true).replace("T", " ").replace("Z", "").as_str()) - .replace("[uuid]", &new_uuid_v4_as_b64()) - }, - "attachments": [] - } - } - }, - "query": "mutation($input: AddNodeRevisionInput) { addNodeRevision(input: $input) { id } }" - })) - } else { - unimplemented!(); - } - }; +async fn execute_test_step<'a>(flattened_index: i64, step: TestStep, err_sender: FSender) { + //-> Result<(), Error> { + //if let Some(comp) = step.signIn {} + let fut = { + if let Some(comp) = step.addNodeRevision { + post_request_to_app_server(json!({ + "variables": { + "input": { + "mapID": "GLOBAL_MAP_00000000001", + "revision": { + "node": comp.nodeID, + "phrasing": { + "terms": [], + "text_base": comp.text + .unwrap_or("ValForTestRevision_At:[datetime-ms]".o()) + // some special values that can be used in the text + .replace("[time]", &time_since_epoch_ms_i64().to_string()) + .replace("[datetime]", Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true).replace("T", " ").replace("Z", "").as_str()) + .replace("[datetime-ms]", Utc::now().to_rfc3339_opts(SecondsFormat::Millis, true).replace("T", " ").replace("Z", "").as_str()) + .replace("[uuid]", &new_uuid_v4_as_b64()) + }, + "attachments": [] + } + } + }, + "query": "mutation($input: AddNodeRevisionInput) { addNodeRevision(input: $input) { id } }" + })) + } else { + unimplemented!(); + } + }; - let handle_graphql_response = move |response: JSONValue, err_sender: &FSender| { - let _: Result = try { - let generic_err = || anyhow!("[generic-error, never logged]"); - for err_obj in response.get("errors").ok_or_else(generic_err)?.as_array().ok_or_else(generic_err)? { - let err_message = err_obj.get("message").ok_or_else(generic_err)?.as_str().ok_or_else(generic_err)?; - error!("Got graphql error during awaited execution of test-step #{}:{:?}", flattened_index, err_message); + let handle_graphql_response = move |response: JSONValue, err_sender: &FSender| { + let _: Result = try { + let generic_err = || anyhow!("[generic-error, never logged]"); + for err_obj in response.get("errors").ok_or_else(generic_err)?.as_array().ok_or_else(generic_err)? { + let err_message = err_obj.get("message").ok_or_else(generic_err)?.as_str().ok_or_else(generic_err)?; + error!("Got graphql error during awaited execution of test-step #{}:{:?}", flattened_index, err_message); - // If message fails to send, it means channel is closed; which means this is a late-response (ie. after `execute_test_sequence` function completion) operating within the tokio::spawn block below. - // Ignoring this failed message-send is not ideal, but haven't worked out a better solution atm. (better than panicking anyway) - let _ = err_sender.send((flattened_index, err_message.to_string())); - } - 0 - }; - }; - let handle_rust_error = move |err: Error, err_sender: &FSender| { - error!("Got rust error during awaited execution of test-step #{}:{:?}", flattened_index, err); + // If message fails to send, it means channel is closed; which means this is a late-response (ie. after `execute_test_sequence` function completion) operating within the tokio::spawn block below. + // Ignoring this failed message-send is not ideal, but haven't worked out a better solution atm. (better than panicking anyway) + let _ = err_sender.send((flattened_index, err_message.to_string())); + } + 0 + }; + }; + let handle_rust_error = move |err: Error, err_sender: &FSender| { + error!("Got rust error during awaited execution of test-step #{}:{:?}", flattened_index, err); - // If message fails to send, it means channel is closed; which means this is a late-response (ie. after `execute_test_sequence` function completion) operating within the tokio::spawn block below. - // Ignoring this failed message-send is not ideal, but haven't worked out a better solution atm. (better than panicking anyway) - let _ = err_sender.send((flattened_index, err.to_string())); - }; + // If message fails to send, it means channel is closed; which means this is a late-response (ie. after `execute_test_sequence` function completion) operating within the tokio::spawn block below. + // Ignoring this failed message-send is not ideal, but haven't worked out a better solution atm. (better than panicking anyway) + let _ = err_sender.send((flattened_index, err.to_string())); + }; - if step.waitTillComplete.unwrap_or(true) { - match fut.await { - Ok(response) => handle_graphql_response(response, &err_sender), - Err(err) => handle_rust_error(err, &err_sender), - } - } else { - tokio::spawn(async move { - match fut.await { - Ok(response) => handle_graphql_response(response, &err_sender), - Err(err) => handle_rust_error(err, &err_sender), - } - }); - } - //Ok(()) + if step.waitTillComplete.unwrap_or(true) { + match fut.await { + Ok(response) => handle_graphql_response(response, &err_sender), + Err(err) => handle_rust_error(err, &err_sender), + } + } else { + tokio::spawn(async move { + match fut.await { + Ok(response) => handle_graphql_response(response, &err_sender), + Err(err) => handle_rust_error(err, &err_sender), + } + }); + } + //Ok(()) } async fn post_request_to_app_server(message: serde_json::Value) -> Result { - // maybe temp; on every request, create a new JWT, authenticating this request as the system-user (less complicated, albeit not terribly elegant) - let user_data = UserJWTData { id: SYSTEM_USER_ID.o(), email: SYSTEM_USER_EMAIL.o(), readOnly: Some(false) }; - let jwt_duration = 60 * 60 * 24 * 7; // = 604800 seconds = 1 week - let key = get_or_create_jwt_key_hs256().await.map_err(to_sub_err)?; - let claims = Claims::with_custom_claims(user_data, JWTDuration::from_secs(jwt_duration.try_into().map_err(to_sub_err)?)); - let jwt = key.authenticate(claims).map_err(to_sub_err)?; - //info!("Generated dev JWT:{}", jwt); + // maybe temp; on every request, create a new JWT, authenticating this request as the system-user (less complicated, albeit not terribly elegant) + let user_data = UserJWTData { id: SYSTEM_USER_ID.o(), email: SYSTEM_USER_EMAIL.o(), readOnly: Some(false) }; + let jwt_duration = 60 * 60 * 24 * 7; // = 604800 seconds = 1 week + let key = get_or_create_jwt_key_hs256().await.map_err(to_sub_err)?; + let claims = Claims::with_custom_claims(user_data, JWTDuration::from_secs(jwt_duration.try_into().map_err(to_sub_err)?)); + let jwt = key.authenticate(claims).map_err(to_sub_err)?; + //info!("Generated dev JWT:{}", jwt); - let client = new_hyper_client_http(); - let req = rust_shared::hyper::Request::builder() - .method(Method::POST) - .uri("http://dm-app-server.default.svc.cluster.local:5110/graphql") - .header("Content-Type", "application/json") - // temp; use db-password as way to prove this request is from an internal pod, and thus doesn't need to be signed-in - //.header("SecretForRunAsSystem", env::var("DB_PASSWORD").unwrap()) - .header("authorization", format!("Bearer {}", jwt)) - .body(message.to_string().into())?; - let res = client.request(req).await?; - let res_as_json_str = body_to_str(res.into_body()).await?; - let res_as_json = JSONValue::from_str(&res_as_json_str)?; - println!("Done! Response:{}", res_as_json); + let client = new_hyper_client_http(); + let req = rust_shared::hyper::Request::builder() + .method(Method::POST) + .uri("http://dm-app-server.default.svc.cluster.local:5110/graphql") + .header("Content-Type", "application/json") + // temp; use db-password as way to prove this request is from an internal pod, and thus doesn't need to be signed-in + //.header("SecretForRunAsSystem", env::var("DB_PASSWORD").unwrap()) + .header("authorization", format!("Bearer {}", jwt)) + .body(message.to_string().into())?; + let res = client.request(req).await?; + let res_as_json_str = body_to_str(res.into_body()).await?; + let res_as_json = JSONValue::from_str(&res_as_json_str)?; + println!("Done! Response:{}", res_as_json); - Ok(res_as_json) -} \ No newline at end of file + Ok(res_as_json) +} diff --git a/Packages/monitor-backend/src/utils/type_aliases.rs b/Packages/monitor-backend/src/utils/type_aliases.rs index 7d40c6c12..0d1924200 100644 --- a/Packages/monitor-backend/src/utils/type_aliases.rs +++ b/Packages/monitor-backend/src/utils/type_aliases.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use deadpool::managed::Object; use deadpool_postgres::Manager; -use rust_shared::serde_json::{Map, self}; +use rust_shared::serde_json::{self, Map}; // sync with type_aliases.rs in app-server // ========== @@ -14,4 +14,4 @@ pub type PGClientObject = Object; // channels pub type ABSender = async_broadcast::Sender; -pub type ABReceiver = async_broadcast::Receiver; \ No newline at end of file +pub type ABReceiver = async_broadcast::Receiver; diff --git a/Packages/rust-macros/src/cached_expand.rs b/Packages/rust-macros/src/cached_expand.rs index 2c9a3a1d9..25581dbca 100644 --- a/Packages/rust-macros/src/cached_expand.rs +++ b/Packages/rust-macros/src/cached_expand.rs @@ -1,118 +1,115 @@ extern crate proc_macro; extern crate syn; -use std::{env, fs}; +use std::io::{BufRead, BufReader}; use std::process::{Command, Stdio}; -use std::io::{BufReader, BufRead}; use std::str::FromStr; +use std::{env, fs}; -use proc_macro2::{TokenStream}; +use proc_macro2::TokenStream; pub fn cached_expand_impl(input: TokenStream) -> TokenStream { - if let Ok(val) = env::var("IN_DOCKER") { - if val == "1" { - println!("Running in Docker, so just returning original input-tokens."); - return input; - } - } - - let _input_str = input.to_string(); - //println!("Input: {:?}", input); - - let mut group_id = "".to_owned(); - let mut exclude_lines_with = "NEVER_MATCHING".to_owned(); - { - let mut past_token_strings: Vec = Vec::new(); - for token in input.clone() { - println!("Got token:{token}"); - let token_str = format!("{token}"); - - /*if token_str.starts_with("CEID_") && last_token_str.is_some() && last_token_str.unwrap() == "struct" { - result = token_str["CEID_".len()..].to_string(); - break; - }*/ - if let Some(token_5_back) = past_token_strings.get((past_token_strings.len() as isize - 5) as usize) { - if token_5_back == "ce_args" { - let mut lines = token_str.split("\n"); - // example line: id = "access_policies" - if let Some(id_line) = lines.find(|a| a.contains("id = ")) { - group_id = id_line.split("\"").collect::>()[1].to_owned(); - } - // example line: excludeLinesWith = "#[graphql(name" - if let Some(exclude_lines_with_0line) = lines.find(|a| a.contains("excludeLinesWith = ")) { - exclude_lines_with = exclude_lines_with_0line.split("\"").collect::>()[1].to_owned(); - } - break; - } - } - - past_token_strings.push(token_str); - /*if past_token_strings.len() > 3 { - past_token_strings.pop_front(); - }*/ - - // if we've processed 30 tokens, and still haven't reached a ce_args string, give up (it should be at the very top) - if past_token_strings.len() > 30 { - break; - } - } - } - if group_id == "" { - panic!("Could not find cached_expand args. Provide it using pattern: const ce_args: &str = r#\"id = \"\"\"#;"); - } - println!("Found args. @id:{group_id} @excludeLinesWith:{exclude_lines_with}"); - let cache_folder_path = env::current_dir().unwrap().join("target").join("cached_expand").join("expansions"); - let cache_input_path = cache_folder_path.join(group_id.clone() + "_Input"); - let cache_output_path = cache_folder_path.join(group_id.clone() + "_Output"); - - //if env::var("FOR_CACHED_EXPAND").is_ok_with(|a| a == "1") { - //if let Ok(val) = env::var("FOR_CACHED_EXPAND") && val == "1" { - const SPECIAL_MESSAGE_1: &str = "FOR_CACHED_EXPAND is true, so only adding markers."; - const SPECIAL_MESSAGE_2: &str = "FOR_RUST_ANALYZER is true, so only adding markers."; - let only_add_markers = { - let mut temp = false; - // if this macro is running as part of the "cargo expand" command (of a parent instance of this macro), then... - if let Ok(val) = env::var("FOR_CACHED_EXPAND") { - if val == "1" { - println!("{}", SPECIAL_MESSAGE_1); - temp = true; - } - } - // if this macro is running as part of rust-analyzer, then... - // todo: find alternative (one I didn't hard-code into settings.json) - if let Ok(val) = env::var("FOR_RUST_ANALYZER") { - if val == "1" { - println!("{}", SPECIAL_MESSAGE_2); - temp = true; - } - } - temp - }; - if only_add_markers { - /*let pre_tokens = TokenStream::from(quote! { struct StartMarker {} });*/ - let pre_tokens = TokenStream::from_str(format!("struct StartMarker_{group_id} {{}}").as_str()).unwrap(); - //println!("PreTokens:{}", pre_tokens); - //let post_tokens = TokenStream::from(quote! { struct EndMarker {} }); - let post_tokens = TokenStream::from_str(format!("struct EndMarker_{group_id} {{}}").as_str()).unwrap(); - return pre_tokens.into_iter() - .chain(input) - .chain(post_tokens.into_iter()) - .collect(); - } - - // check for a cache-hit - if cache_input_path.exists() && fs::read_to_string(cache_input_path.clone()).unwrap() == input.to_string() && cache_output_path.exists() { - let cached_output = fs::read_to_string(cache_output_path.clone()).unwrap(); - println!("Cache hit! Cached-output length: {}", cached_output.len()); - return TokenStream::from_str(&cached_output).unwrap(); - } - - //println!("Env-vars:{}", env::vars().map(|(var_name, var_value)| format!("{var_name}: {var_value}")).collect::>().join("\n")); - - let working_dir = env::current_dir().unwrap(); - let working_dir_str = working_dir.as_path().display().to_string(); - println!("Working-dir:{}", working_dir_str); - - let mut cmd = + if let Ok(val) = env::var("IN_DOCKER") { + if val == "1" { + println!("Running in Docker, so just returning original input-tokens."); + return input; + } + } + + let _input_str = input.to_string(); + //println!("Input: {:?}", input); + + let mut group_id = "".to_owned(); + let mut exclude_lines_with = "NEVER_MATCHING".to_owned(); + { + let mut past_token_strings: Vec = Vec::new(); + for token in input.clone() { + println!("Got token:{token}"); + let token_str = format!("{token}"); + + /*if token_str.starts_with("CEID_") && last_token_str.is_some() && last_token_str.unwrap() == "struct" { + result = token_str["CEID_".len()..].to_string(); + break; + }*/ + if let Some(token_5_back) = past_token_strings.get((past_token_strings.len() as isize - 5) as usize) { + if token_5_back == "ce_args" { + let mut lines = token_str.split("\n"); + // example line: id = "access_policies" + if let Some(id_line) = lines.find(|a| a.contains("id = ")) { + group_id = id_line.split("\"").collect::>()[1].to_owned(); + } + // example line: excludeLinesWith = "#[graphql(name" + if let Some(exclude_lines_with_0line) = lines.find(|a| a.contains("excludeLinesWith = ")) { + exclude_lines_with = exclude_lines_with_0line.split("\"").collect::>()[1].to_owned(); + } + break; + } + } + + past_token_strings.push(token_str); + /*if past_token_strings.len() > 3 { + past_token_strings.pop_front(); + }*/ + + // if we've processed 30 tokens, and still haven't reached a ce_args string, give up (it should be at the very top) + if past_token_strings.len() > 30 { + break; + } + } + } + if group_id == "" { + panic!("Could not find cached_expand args. Provide it using pattern: const ce_args: &str = r#\"id = \"\"\"#;"); + } + println!("Found args. @id:{group_id} @excludeLinesWith:{exclude_lines_with}"); + let cache_folder_path = env::current_dir().unwrap().join("target").join("cached_expand").join("expansions"); + let cache_input_path = cache_folder_path.join(group_id.clone() + "_Input"); + let cache_output_path = cache_folder_path.join(group_id.clone() + "_Output"); + + //if env::var("FOR_CACHED_EXPAND").is_ok_with(|a| a == "1") { + //if let Ok(val) = env::var("FOR_CACHED_EXPAND") && val == "1" { + const SPECIAL_MESSAGE_1: &str = "FOR_CACHED_EXPAND is true, so only adding markers."; + const SPECIAL_MESSAGE_2: &str = "FOR_RUST_ANALYZER is true, so only adding markers."; + let only_add_markers = { + let mut temp = false; + // if this macro is running as part of the "cargo expand" command (of a parent instance of this macro), then... + if let Ok(val) = env::var("FOR_CACHED_EXPAND") { + if val == "1" { + println!("{}", SPECIAL_MESSAGE_1); + temp = true; + } + } + // if this macro is running as part of rust-analyzer, then... + // todo: find alternative (one I didn't hard-code into settings.json) + if let Ok(val) = env::var("FOR_RUST_ANALYZER") { + if val == "1" { + println!("{}", SPECIAL_MESSAGE_2); + temp = true; + } + } + temp + }; + if only_add_markers { + /*let pre_tokens = TokenStream::from(quote! { struct StartMarker {} });*/ + let pre_tokens = TokenStream::from_str(format!("struct StartMarker_{group_id} {{}}").as_str()).unwrap(); + //println!("PreTokens:{}", pre_tokens); + //let post_tokens = TokenStream::from(quote! { struct EndMarker {} }); + let post_tokens = TokenStream::from_str(format!("struct EndMarker_{group_id} {{}}").as_str()).unwrap(); + return pre_tokens.into_iter().chain(input).chain(post_tokens.into_iter()).collect(); + } + + // check for a cache-hit + if cache_input_path.exists() && fs::read_to_string(cache_input_path.clone()).unwrap() == input.to_string() && cache_output_path.exists() { + let cached_output = fs::read_to_string(cache_output_path.clone()).unwrap(); + println!("Cache hit! Cached-output length: {}", cached_output.len()); + return TokenStream::from_str(&cached_output).unwrap(); + } + + //println!("Env-vars:{}", env::vars().map(|(var_name, var_value)| format!("{var_name}: {var_value}")).collect::>().join("\n")); + + let working_dir = env::current_dir().unwrap(); + let working_dir_str = working_dir.as_path().display().to_string(); + println!("Working-dir:{}", working_dir_str); + + let mut cmd = // todo: fix that the user must call "cargo +nightly build" instead of just "cargo build" (for the initial command to start things off) Command::new("cargo")//.arg("+nightly") .arg("expand") @@ -122,7 +119,7 @@ pub fn cached_expand_impl(input: TokenStream) -> TokenStream { .env("FOR_CACHED_EXPAND", "1") //.current_dir(working_dir) .current_dir(working_dir_str + "/Packages/app-server") - + //.output() .stdout(Stdio::piped()) //.stderr(Stdio::piped()) @@ -130,69 +127,67 @@ pub fn cached_expand_impl(input: TokenStream) -> TokenStream { .unwrap(); - //String::from_utf8_lossy(&expand_command_output.stderr) - let mut expanded_code = "".to_owned(); - { - /*let stderr = cmd.stderr.as_mut().unwrap(); - let stderr_reader = BufReader::new(stderr); - let stderr_lines = stderr_reader.lines(); - for (i, line) in stderr_lines.enumerate() { - let line_str = line.unwrap().to_string(); - println!("Err({i}): {line_str}"); - }*/ - - let stdout = cmd.stdout.as_mut().unwrap(); - let stdout_reader = BufReader::new(stdout); - let stdout_lines = stdout_reader.lines(); - - let mut start_marker_hit = false; - let mut end_marker_hit = false; - for (_i, line) in stdout_lines.enumerate() { - let line_str = line.unwrap().to_string(); - //println!("Read({i}): {line_str}"); - - if line_str.contains(format!("StartMarker_{group_id}").as_str()) { - start_marker_hit = true; - } - - if start_marker_hit && !end_marker_hit && line_str != SPECIAL_MESSAGE_1 && line_str != SPECIAL_MESSAGE_2 { - if line_str.contains(exclude_lines_with.as_str()) { - println!("Ignoring line, based on excludeLinesWith setting:{}", line_str); - } else { - //println!("FoundExpanded({i}): {line_str}"); - expanded_code += &line_str; - } - } - - // run this after, so end-marker is still included - if line_str.contains(format!("EndMarker_{group_id}").as_str()) { - end_marker_hit = true; - } - } - } - - cmd.wait().unwrap(); - - //println!("Expanded code:[[[{}]]]", expanded_code); - println!("Expanded code length:{}", expanded_code.len()); - if expanded_code.is_empty() { - panic!("Expanded-code is empty! Terminating..."); - } - - println!("Caching input-code and expanded-code to:{}", cache_folder_path.as_path().display().to_string()); - fs::create_dir_all(cache_folder_path).unwrap(); - fs::write(cache_input_path.clone(), input.to_string()).unwrap(); - fs::write(cache_output_path.clone(), expanded_code.clone()).unwrap(); - println!("Writes done! Proofs:[{}, {}]", - fs::read_to_string(cache_input_path.clone()).unwrap().len(), - fs::read_to_string(cache_output_path.clone()).unwrap().len()); - - return TokenStream::from_str(&expanded_code).unwrap(); + //String::from_utf8_lossy(&expand_command_output.stderr) + let mut expanded_code = "".to_owned(); + { + /*let stderr = cmd.stderr.as_mut().unwrap(); + let stderr_reader = BufReader::new(stderr); + let stderr_lines = stderr_reader.lines(); + for (i, line) in stderr_lines.enumerate() { + let line_str = line.unwrap().to_string(); + println!("Err({i}): {line_str}"); + }*/ + + let stdout = cmd.stdout.as_mut().unwrap(); + let stdout_reader = BufReader::new(stdout); + let stdout_lines = stdout_reader.lines(); + + let mut start_marker_hit = false; + let mut end_marker_hit = false; + for (_i, line) in stdout_lines.enumerate() { + let line_str = line.unwrap().to_string(); + //println!("Read({i}): {line_str}"); + + if line_str.contains(format!("StartMarker_{group_id}").as_str()) { + start_marker_hit = true; + } + + if start_marker_hit && !end_marker_hit && line_str != SPECIAL_MESSAGE_1 && line_str != SPECIAL_MESSAGE_2 { + if line_str.contains(exclude_lines_with.as_str()) { + println!("Ignoring line, based on excludeLinesWith setting:{}", line_str); + } else { + //println!("FoundExpanded({i}): {line_str}"); + expanded_code += &line_str; + } + } + + // run this after, so end-marker is still included + if line_str.contains(format!("EndMarker_{group_id}").as_str()) { + end_marker_hit = true; + } + } + } + + cmd.wait().unwrap(); + + //println!("Expanded code:[[[{}]]]", expanded_code); + println!("Expanded code length:{}", expanded_code.len()); + if expanded_code.is_empty() { + panic!("Expanded-code is empty! Terminating..."); + } + + println!("Caching input-code and expanded-code to:{}", cache_folder_path.as_path().display().to_string()); + fs::create_dir_all(cache_folder_path).unwrap(); + fs::write(cache_input_path.clone(), input.to_string()).unwrap(); + fs::write(cache_output_path.clone(), expanded_code.clone()).unwrap(); + println!("Writes done! Proofs:[{}, {}]", fs::read_to_string(cache_input_path.clone()).unwrap().len(), fs::read_to_string(cache_output_path.clone()).unwrap().len()); + + return TokenStream::from_str(&expanded_code).unwrap(); } /*//TokenStream::new() input //input.into_iter().take(1).collect() /*input.to_string().parse::().unwrap().into_iter().chain( - input.to_string().replace("MyStruct", "MyStruct2").parse::().unwrap().into_iter() -).collect()*/ */ \ No newline at end of file + input.to_string().replace("MyStruct", "MyStruct2").parse::().unwrap().into_iter() +).collect()*/ */ diff --git a/Packages/rust-macros/src/lib.rs b/Packages/rust-macros/src/lib.rs index a1b599afd..eded0fdf2 100644 --- a/Packages/rust-macros/src/lib.rs +++ b/Packages/rust-macros/src/lib.rs @@ -1,27 +1,27 @@ -use std::{env}; +use std::env; use std::time::Instant; use cached_expand::cached_expand_impl; use proc_macro2::TokenStream; +use quote::quote; +use syn::{parse_macro_input, DeriveInput}; use wrap_agql_schema_build::{wrap_agql_schema_build_impl, wrap_agql_schema_type_impl}; use wrap_async_graphql::wrap_async_graphql_impl; use wrap_serde_macros::wrap_serde_macros_impl; -use quote::{quote}; -use syn::{parse_macro_input, DeriveInput}; extern crate proc_macro; extern crate syn; -mod utils; mod cached_expand; -mod wrap_async_graphql; +mod utils; mod wrap_agql_schema_build; +mod wrap_async_graphql; mod wrap_serde_macros; #[proc_macro] /// Use this as a way of "commenting" a macro temporarily, without needing to find the end-bracket. pub fn unchanged(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - input + input } // higher level (ie. combining multiple macros into one) @@ -29,31 +29,31 @@ pub fn unchanged(input: proc_macro::TokenStream) -> proc_macro::TokenStream { #[proc_macro] pub fn wrap_slow_macros(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let proceed = { - let mut temp = false; - if let Ok(val) = env::var("FOR_RUST_ANALYZER") { - if val == "1" { - //println!("Macro wrap_slow_macros: Modifying tokens, since FOR_RUST_ANALYZER is true."); - temp = true; - } - } - temp - }; - if !proceed { - return input; - } - - let output = TokenStream::from(input); - let t1 = Instant::now(); - let output = wrap_async_graphql_impl(output, true); - let t2 = Instant::now(); - let output = wrap_serde_macros_impl(output, true); - let t3 = Instant::now(); - - // typical result, in dm-repo: [11ms, 6ms] - println!("Macro wrap_slow_macros: Timings = [{}ms, {}ms]", (t2 - t1).as_millis(), (t3 - t2).as_millis()); - - proc_macro::TokenStream::from(output) + let proceed = { + let mut temp = false; + if let Ok(val) = env::var("FOR_RUST_ANALYZER") { + if val == "1" { + //println!("Macro wrap_slow_macros: Modifying tokens, since FOR_RUST_ANALYZER is true."); + temp = true; + } + } + temp + }; + if !proceed { + return input; + } + + let output = TokenStream::from(input); + let t1 = Instant::now(); + let output = wrap_async_graphql_impl(output, true); + let t2 = Instant::now(); + let output = wrap_serde_macros_impl(output, true); + let t3 = Instant::now(); + + // typical result, in dm-repo: [11ms, 6ms] + println!("Macro wrap_slow_macros: Timings = [{}ms, {}ms]", (t2 - t1).as_millis(), (t3 - t2).as_millis()); + + proc_macro::TokenStream::from(output) } // base macros @@ -61,32 +61,32 @@ pub fn wrap_slow_macros(input: proc_macro::TokenStream) -> proc_macro::TokenStre #[proc_macro] pub fn wrap_agql_schema_type(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let output = wrap_agql_schema_type_impl(TokenStream::from(input), false); - proc_macro::TokenStream::from(output) + let output = wrap_agql_schema_type_impl(TokenStream::from(input), false); + proc_macro::TokenStream::from(output) } #[proc_macro] pub fn wrap_agql_schema_build(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let output = wrap_agql_schema_build_impl(TokenStream::from(input), false); - proc_macro::TokenStream::from(output) + let output = wrap_agql_schema_build_impl(TokenStream::from(input), false); + proc_macro::TokenStream::from(output) } #[proc_macro] pub fn cached_expand(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let output = cached_expand_impl(TokenStream::from(input)); - proc_macro::TokenStream::from(output) + let output = cached_expand_impl(TokenStream::from(input)); + proc_macro::TokenStream::from(output) } #[proc_macro] pub fn wrap_async_graphql(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let output = wrap_async_graphql_impl(TokenStream::from(input), false); - proc_macro::TokenStream::from(output) + let output = wrap_async_graphql_impl(TokenStream::from(input), false); + proc_macro::TokenStream::from(output) } #[proc_macro] pub fn wrap_serde_macros(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let output = wrap_serde_macros_impl(TokenStream::from(input), false); - proc_macro::TokenStream::from(output) + let output = wrap_serde_macros_impl(TokenStream::from(input), false); + proc_macro::TokenStream::from(output) } // derive macros @@ -95,67 +95,66 @@ pub fn wrap_serde_macros(input: proc_macro::TokenStream) -> proc_macro::TokenStr #[proc_macro_derive(Serialize_Stub)] pub fn serialize_stub(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let ast = parse_macro_input!(input as DeriveInput); - let struct_name = &ast.ident; - - /*let orig_span = ast.ident.span(); - proc_macro::TokenStream::from(quote_spanned! {orig_span=> - #[automatically_derived] - impl serde::Serialize for #struct_name { - fn serialize<__S>(&self, __serializer: __S) -> serde::__private::Result<__S::Ok, __S::Error> where __S: serde::Serializer { - Err(serde::ser::Error::custom("This is a placeholder generated by the Serialize_Stub macro, for quick resolution during cargo-check. You should not be seeing this at runtime.")) - } - } - })*/ - proc_macro::TokenStream::from(quote! { - #[doc(hidden)] - #[allow(non_upper_case_globals,unused_attributes,unused_qualifications)] - const _: () = { - #[allow(unused_extern_crates,clippy::useless_attribute)] - extern crate serde as _serde; - //extern crate rust_shared::serde as _serde; - //use rust_shared::serde as _serde; - - #[automatically_derived] - impl _serde::Serialize for #struct_name { - fn serialize<__S>(&self, __serializer: __S) -> _serde::__private::Result<__S::Ok, __S::Error> where __S: _serde::Serializer { - Err(_serde::ser::Error::custom("This is a placeholder generated by the Serialize_Stub macro, for quick resolution during cargo-check. You should not be seeing this at runtime.")) - } - } - }; - }) + let ast = parse_macro_input!(input as DeriveInput); + let struct_name = &ast.ident; + + /*let orig_span = ast.ident.span(); + proc_macro::TokenStream::from(quote_spanned! {orig_span=> + #[automatically_derived] + impl serde::Serialize for #struct_name { + fn serialize<__S>(&self, __serializer: __S) -> serde::__private::Result<__S::Ok, __S::Error> where __S: serde::Serializer { + Err(serde::ser::Error::custom("This is a placeholder generated by the Serialize_Stub macro, for quick resolution during cargo-check. You should not be seeing this at runtime.")) + } + } + })*/ + proc_macro::TokenStream::from(quote! { + #[doc(hidden)] + #[allow(non_upper_case_globals,unused_attributes,unused_qualifications)] + const _: () = { + #[allow(unused_extern_crates,clippy::useless_attribute)] + extern crate serde as _serde; + //extern crate rust_shared::serde as _serde; + //use rust_shared::serde as _serde; + + #[automatically_derived] + impl _serde::Serialize for #struct_name { + fn serialize<__S>(&self, __serializer: __S) -> _serde::__private::Result<__S::Ok, __S::Error> where __S: _serde::Serializer { + Err(_serde::ser::Error::custom("This is a placeholder generated by the Serialize_Stub macro, for quick resolution during cargo-check. You should not be seeing this at runtime.")) + } + } + }; + }) } - #[proc_macro_derive(Deserialize_Stub)] pub fn deserialize_stub(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let ast = parse_macro_input!(input as DeriveInput); - let struct_name = &ast.ident; - - /*let orig_span = ast.ident.span(); - proc_macro::TokenStream::from(quote_spanned! {orig_span=> - #[automatically_derived] - impl <'de> serde::Deserialize<'de> for #struct_name { - fn deserialize<__D>(__deserializer:__D) -> serde::__private::Result where __D: serde::Deserializer<'de> { - Err(serde::de::Error::custom("This is a placeholder generated by the Deserialize_Stub macro, for quick resolution during cargo-check. You should not be seeing this at runtime.")) - } - } - })*/ - proc_macro::TokenStream::from(quote! { - #[doc(hidden)] - #[allow(non_upper_case_globals,unused_attributes,unused_qualifications)] - const _: () = { - #[allow(unused_extern_crates,clippy::useless_attribute)] - extern crate serde as _serde; - //extern crate rust_shared::serde as _serde; - //use rust_shared::serde as _serde; - - #[automatically_derived] - impl <'de> _serde::Deserialize<'de> for #struct_name { - fn deserialize<__D>(__deserializer:__D) -> _serde::__private::Result where __D: _serde::Deserializer<'de> { - Err(_serde::de::Error::custom("This is a placeholder generated by the Deserialize_Stub macro, for quick resolution during cargo-check. You should not be seeing this at runtime.")) - } - } - }; - }) -} \ No newline at end of file + let ast = parse_macro_input!(input as DeriveInput); + let struct_name = &ast.ident; + + /*let orig_span = ast.ident.span(); + proc_macro::TokenStream::from(quote_spanned! {orig_span=> + #[automatically_derived] + impl <'de> serde::Deserialize<'de> for #struct_name { + fn deserialize<__D>(__deserializer:__D) -> serde::__private::Result where __D: serde::Deserializer<'de> { + Err(serde::de::Error::custom("This is a placeholder generated by the Deserialize_Stub macro, for quick resolution during cargo-check. You should not be seeing this at runtime.")) + } + } + })*/ + proc_macro::TokenStream::from(quote! { + #[doc(hidden)] + #[allow(non_upper_case_globals,unused_attributes,unused_qualifications)] + const _: () = { + #[allow(unused_extern_crates,clippy::useless_attribute)] + extern crate serde as _serde; + //extern crate rust_shared::serde as _serde; + //use rust_shared::serde as _serde; + + #[automatically_derived] + impl <'de> _serde::Deserialize<'de> for #struct_name { + fn deserialize<__D>(__deserializer:__D) -> _serde::__private::Result where __D: _serde::Deserializer<'de> { + Err(_serde::de::Error::custom("This is a placeholder generated by the Deserialize_Stub macro, for quick resolution during cargo-check. You should not be seeing this at runtime.")) + } + } + }; + }) +} diff --git a/Packages/rust-macros/src/utils.rs b/Packages/rust-macros/src/utils.rs index fcfdf4a71..58b387723 100644 --- a/Packages/rust-macros/src/utils.rs +++ b/Packages/rust-macros/src/utils.rs @@ -1,128 +1,110 @@ -use std::{collections::HashMap}; -use proc_macro2::{TokenStream, TokenTree, Group, Delimiter}; +use proc_macro2::{Delimiter, Group, TokenStream, TokenTree}; +use std::collections::HashMap; // level-2 helpers // ========== pub fn remove_token_sequences_for_macros(tokens: TokenStream, macros_to_remove: &'static [&'static str]) -> TokenStream { - remove_token_sequences_matching(tokens, get_slot_checks_for_removing_macros(macros_to_remove)) + remove_token_sequences_matching(tokens, get_slot_checks_for_removing_macros(macros_to_remove)) } pub fn get_slot_checks_for_removing_macros(macros_to_remove: &'static [&'static str]) -> Vec { - let is_macro_to_block = Box::new(|token: &TokenTree| { - match token { - TokenTree::Group(data) => { - if data.delimiter() == Delimiter::Bracket { - let children: Vec = data.stream().into_iter().collect(); - if let Some(first_child) = children.get(0) { - if let TokenTree::Ident(data) = first_child { - if macros_to_remove.contains(&data.to_string().as_str()) { - return true; - } - } - } - } - false - }, - _ => false, - } - }); - let is_hash = Box::new(|token: &TokenTree| { - match token { - TokenTree::Punct(data) if data.as_char() == '#' => true, - _ => false, - } - }); - - vec![ - is_hash, - is_macro_to_block, - ] + let is_macro_to_block = Box::new(|token: &TokenTree| match token { + TokenTree::Group(data) => { + if data.delimiter() == Delimiter::Bracket { + let children: Vec = data.stream().into_iter().collect(); + if let Some(first_child) = children.get(0) { + if let TokenTree::Ident(data) = first_child { + if macros_to_remove.contains(&data.to_string().as_str()) { + return true; + } + } + } + } + false + }, + _ => false, + }); + let is_hash = Box::new(|token: &TokenTree| match token { + TokenTree::Punct(data) if data.as_char() == '#' => true, + _ => false, + }); + + vec![is_hash, is_macro_to_block] } pub fn remove_token_sequences_for_derive_macros(tokens: TokenStream, derive_macros_to_remove: &'static [&'static str]) -> TokenStream { - let result = tokens; + let result = tokens; - let is_derive_macro_to_block = Box::new(|token: &TokenTree| { - match token { - TokenTree::Ident(data) if derive_macros_to_remove.contains(&data.to_string().as_str()) => true, - _ => false, - } - }); - let is_comma = Box::new(|token: &TokenTree| { - match token { - TokenTree::Punct(data) if data.as_char() == ',' => true, - _ => false - } - }); + let is_derive_macro_to_block = Box::new(|token: &TokenTree| match token { + TokenTree::Ident(data) if derive_macros_to_remove.contains(&data.to_string().as_str()) => true, + _ => false, + }); + let is_comma = Box::new(|token: &TokenTree| match token { + TokenTree::Punct(data) if data.as_char() == ',' => true, + _ => false, + }); - // first remove any target-macros, matching pattern: `MACRO,` (ie. non-last derive-macro) - let result = remove_token_sequences_matching(result, vec![ - is_derive_macro_to_block.clone(), - is_comma.clone(), - ]); - // then remove any target-macros, matching pattern: `,MACRO` (ie. non-first derive-macro) - let result = remove_token_sequences_matching(result, vec![ - is_comma.clone(), - is_derive_macro_to_block.clone(), - ]); - // then remove any target-macros, matching pattern: `MACRO` (ie. standalone derive-macro) - let result = remove_token_sequences_matching(result, vec![ - is_derive_macro_to_block.clone(), - ]); + // first remove any target-macros, matching pattern: `MACRO,` (ie. non-last derive-macro) + let result = remove_token_sequences_matching(result, vec![is_derive_macro_to_block.clone(), is_comma.clone()]); + // then remove any target-macros, matching pattern: `,MACRO` (ie. non-first derive-macro) + let result = remove_token_sequences_matching(result, vec![is_comma.clone(), is_derive_macro_to_block.clone()]); + // then remove any target-macros, matching pattern: `MACRO` (ie. standalone derive-macro) + let result = remove_token_sequences_matching(result, vec![is_derive_macro_to_block.clone()]); - result + result } // level-1 helpers (token-modification functions) // ========== pub fn remove_token_sequences_matching(tokens: TokenStream, mut slot_checks: Vec) -> TokenStream { - let mut slots: Vec = Vec::new(); - for check in slot_checks.drain(0..slot_checks.len()) { - slots.push((check, None)); - } - replace_token_sequences_matching(tokens, &slots) + let mut slots: Vec = Vec::new(); + for check in slot_checks.drain(0..slot_checks.len()) { + slots.push((check, None)); + } + replace_token_sequences_matching(tokens, &slots) } pub type Slot = (SlotCheck, SlotReplacement); pub type SlotCheck = Box bool>; pub type SlotReplacement = Option>; pub fn replace_token_sequences_matching(tokens: TokenStream, slots: &Vec) -> TokenStream { - let mut token_replacements_planned: HashMap = HashMap::new(); - - let mut tokens_so_far = Vec::new(); - for token in tokens { - //println!("Processing token:{}", token.to_string()); - tokens_so_far.push(token); - if tokens_so_far.len() >= slots.len() { - let token_index_for_first_slot = tokens_so_far.len() - slots.len(); - let all_checks_pass = slots.iter().enumerate().all(|(i, slot)| { - let token = &tokens_so_far[token_index_for_first_slot + i]; - let check = &slot.0; - return check(token); - }); - if all_checks_pass { - //println!("Blocking this token, and the {} before it.", tokens_for_slots.len() - 1); - for (i2, slot) in slots.iter().enumerate() { - let index_to_replace = token_index_for_first_slot + i2; - token_replacements_planned.insert(index_to_replace, slot.1.clone()); - } - } - } - } + let mut token_replacements_planned: HashMap = HashMap::new(); - let mut result: Vec = Vec::new(); - for (i, token) in tokens_so_far.into_iter().enumerate() { - if token_replacements_planned.contains_key(&i) { - let replace_with = token_replacements_planned.remove(&i).unwrap(); // remove the entry from the hash-map while retrieving - if let Some(mut replace_with) = replace_with { - result.append(&mut replace_with); - } - continue; - } - result.push(token); - } + let mut tokens_so_far = Vec::new(); + for token in tokens { + //println!("Processing token:{}", token.to_string()); + tokens_so_far.push(token); + if tokens_so_far.len() >= slots.len() { + let token_index_for_first_slot = tokens_so_far.len() - slots.len(); + let all_checks_pass = slots.iter().enumerate().all(|(i, slot)| { + let token = &tokens_so_far[token_index_for_first_slot + i]; + let check = &slot.0; + return check(token); + }); + if all_checks_pass { + //println!("Blocking this token, and the {} before it.", tokens_for_slots.len() - 1); + for (i2, slot) in slots.iter().enumerate() { + let index_to_replace = token_index_for_first_slot + i2; + token_replacements_planned.insert(index_to_replace, slot.1.clone()); + } + } + } + } + + let mut result: Vec = Vec::new(); + for (i, token) in tokens_so_far.into_iter().enumerate() { + if token_replacements_planned.contains_key(&i) { + let replace_with = token_replacements_planned.remove(&i).unwrap(); // remove the entry from the hash-map while retrieving + if let Some(mut replace_with) = replace_with { + result.append(&mut replace_with); + } + continue; + } + result.push(token); + } + #[rustfmt::skip] let result_processed: Vec = result.into_iter().map(|token| { match token { TokenTree::Group(data) => { @@ -133,5 +115,5 @@ pub fn replace_token_sequences_matching(tokens: TokenStream, slots: &Vec) } }).collect(); - TokenStream::from_iter(result_processed) -} \ No newline at end of file + TokenStream::from_iter(result_processed) +} diff --git a/Packages/rust-macros/src/wrap_agql_schema_build.rs b/Packages/rust-macros/src/wrap_agql_schema_build.rs index 4e726cfe7..7e30c20a6 100644 --- a/Packages/rust-macros/src/wrap_agql_schema_build.rs +++ b/Packages/rust-macros/src/wrap_agql_schema_build.rs @@ -1,11 +1,11 @@ -use std::{env}; +use proc_macro2::TokenStream; +use std::env; use std::str::FromStr; -use proc_macro2::{TokenStream}; use crate::wrap_async_graphql::SKIP_AGQL_WRAPPING; /// Only needed to be used if you need to have the `Schema` type present. -/// +/// /// Usage example: /// ``` /// pub type RootSchema = wrap_agql_schema_type!{ @@ -13,45 +13,55 @@ use crate::wrap_async_graphql::SKIP_AGQL_WRAPPING; /// }; /// ``` pub fn wrap_agql_schema_type_impl(input: TokenStream, force_proceed: bool) -> TokenStream { - if SKIP_AGQL_WRAPPING { return input; } // can set this flag to true temporarily, to make debugging easier + if SKIP_AGQL_WRAPPING { + return input; + } // can set this flag to true temporarily, to make debugging easier - let proceed = force_proceed || { - let mut temp = false; - if let Ok(val) = env::var("STRIP_ASYNC_GRAPHQL") { - if val == "1" { - println!("Macro wrap_agql_schema_type: Modifying tokens, since STRIP_ASYNC_GRAPHQL is true."); - temp = true; - } - } - temp - }; - if !proceed { - return input; - } - - return TokenStream::from_str(r#" + let proceed = force_proceed || { + let mut temp = false; + if let Ok(val) = env::var("STRIP_ASYNC_GRAPHQL") { + if val == "1" { + println!("Macro wrap_agql_schema_type: Modifying tokens, since STRIP_ASYNC_GRAPHQL is true."); + temp = true; + } + } + temp + }; + if !proceed { + return input; + } + + return TokenStream::from_str( + r#" Schema - "#).unwrap(); + "#, + ) + .unwrap(); } pub fn wrap_agql_schema_build_impl(input: TokenStream, force_proceed: bool) -> TokenStream { - if SKIP_AGQL_WRAPPING { return input; } // can set this flag to true temporarily, to make debugging easier + if SKIP_AGQL_WRAPPING { + return input; + } // can set this flag to true temporarily, to make debugging easier + + let proceed = force_proceed || { + let mut temp = false; + if let Ok(val) = env::var("STRIP_ASYNC_GRAPHQL") { + if val == "1" { + println!("Macro wrap_agql_scheme_build: Modifying tokens, since STRIP_ASYNC_GRAPHQL is true."); + temp = true; + } + } + temp + }; + if !proceed { + return input; + } - let proceed = force_proceed || { - let mut temp = false; - if let Ok(val) = env::var("STRIP_ASYNC_GRAPHQL") { - if val == "1" { - println!("Macro wrap_agql_scheme_build: Modifying tokens, since STRIP_ASYNC_GRAPHQL is true."); - temp = true; - } - } - temp - }; - if !proceed { - return input; - } - - return TokenStream::from_str(r#" + return TokenStream::from_str( + r#" Schema::build(EmptyMutation, EmptyMutation, EmptySubscription) - "#).unwrap(); -} \ No newline at end of file + "#, + ) + .unwrap(); +} diff --git a/Packages/rust-macros/src/wrap_async_graphql.rs b/Packages/rust-macros/src/wrap_async_graphql.rs index 036420b7f..e3eb11057 100644 --- a/Packages/rust-macros/src/wrap_async_graphql.rs +++ b/Packages/rust-macros/src/wrap_async_graphql.rs @@ -1,5 +1,9 @@ -use std::{env, str::FromStr, time::{SystemTime, UNIX_EPOCH}}; -use proc_macro2::{TokenStream}; +use proc_macro2::TokenStream; +use std::{ + env, + str::FromStr, + time::{SystemTime, UNIX_EPOCH}, +}; use crate::utils::{remove_token_sequences_for_derive_macros, remove_token_sequences_for_macros}; @@ -11,43 +15,43 @@ pub const SKIP_AGQL_WRAPPING: bool = false; //pub const SKIP_AGQL_WRAPPING: bool = true; pub fn wrap_async_graphql_impl(input: TokenStream, force_proceed: bool) -> TokenStream { - if SKIP_AGQL_WRAPPING { return input; } // can set this flag to true temporarily, to make debugging easier - - let proceed = force_proceed || { - let mut temp = false; - if let Ok(val) = env::var("STRIP_ASYNC_GRAPHQL") { - if val == "1" { - println!("Macro wrap_async_graphql: Modifying tokens, since STRIP_ASYNC_GRAPHQL is true."); - temp = true; - } - } - temp - }; - if !proceed { - return input; - } - - let output = input.clone(); - let output = remove_graphql_tags(output); - //output - - // add unused alias of async_graphql crate (so that if dev forgets "use rust_shared::async_graphql", they'll get a reminder even in cargo-check) - /*let pre_tokens = proc_macro::TokenStream::from(quote! { - use async_graphql as _async_graphql_unused_alias; - });*/ - let pre_tokens = TokenStream::from_str(format!("use async_graphql as _async_graphql_unused_alias_{};", SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_nanos()).as_str()).unwrap(); - pre_tokens.into_iter() - .chain(output) - .collect() + if SKIP_AGQL_WRAPPING { + return input; + } // can set this flag to true temporarily, to make debugging easier + + let proceed = force_proceed || { + let mut temp = false; + if let Ok(val) = env::var("STRIP_ASYNC_GRAPHQL") { + if val == "1" { + println!("Macro wrap_async_graphql: Modifying tokens, since STRIP_ASYNC_GRAPHQL is true."); + temp = true; + } + } + temp + }; + if !proceed { + return input; + } + + let output = input.clone(); + let output = remove_graphql_tags(output); + //output + + // add unused alias of async_graphql crate (so that if dev forgets "use rust_shared::async_graphql", they'll get a reminder even in cargo-check) + /*let pre_tokens = proc_macro::TokenStream::from(quote! { + use async_graphql as _async_graphql_unused_alias; + });*/ + let pre_tokens = TokenStream::from_str(format!("use async_graphql as _async_graphql_unused_alias_{};", SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_nanos()).as_str()).unwrap(); + pre_tokens.into_iter().chain(output).collect() } static MACROS_TO_REMOVE: &'static [&'static str] = &["graphql", "Object", "Subscription"]; static DERIVE_MACROS_TO_REMOVE: &'static [&'static str] = &["SimpleObject", "MergedObject", "MergedSubscription", "InputObject"]; fn remove_graphql_tags(tokens: TokenStream) -> TokenStream { - let mut result = tokens; - result = remove_token_sequences_for_macros(result, MACROS_TO_REMOVE); - result = remove_token_sequences_for_derive_macros(result, DERIVE_MACROS_TO_REMOVE); - result + let mut result = tokens; + result = remove_token_sequences_for_macros(result, MACROS_TO_REMOVE); + result = remove_token_sequences_for_derive_macros(result, DERIVE_MACROS_TO_REMOVE); + result } // tests (run these with "cargo test -- --nocapture" to see log output) @@ -55,14 +59,15 @@ fn remove_graphql_tags(tokens: TokenStream) -> TokenStream { #[cfg(test)] mod tests { - use std::str::FromStr; - use proc_macro2::TokenStream; + use proc_macro2::TokenStream; + use std::str::FromStr; - use crate::wrap_async_graphql::remove_graphql_tags; + use crate::wrap_async_graphql::remove_graphql_tags; - #[test] - fn test1() { - let tokens = TokenStream::from_str(r#" + #[test] + fn test1() { + let tokens = TokenStream::from_str( + r#" #[Object] struct Test1 { #[graphql(name = "public_base")] @@ -79,78 +84,80 @@ mod tests { #[Subscription] impl SubscriptionShard_CommandRun {} - "#); - println!("Tokens:{:#?}", tokens); - /* output: - ========== - Tokens:Ok(TokenStream [ - Punct { char: '#', spacing: Alone }, - Group { delimiter: Bracket, stream: TokenStream [ - Ident { sym: Object }, - ] }, - Ident { sym: struct }, - Ident { sym: Test1 }, - Group { delimiter: Brace, stream: TokenStream [ - Punct { char: '#', spacing: Alone }, - Group { delimiter: Bracket, stream: TokenStream [ - Ident { sym: graphql }, - Group { delimiter: Parenthesis, stream: TokenStream [ - Ident { sym: name }, - Punct { char: '=', spacing: Alone }, - Literal { lit: "public_base" }, - ] }, - ] }, - Ident { sym: public_base }, - Punct { char: ':', spacing: Alone }, - Ident { sym: bool, }, - Punct { char: ';', spacing: Alone }, - ]}, - Punct { char: '#', spacing: Alone }, - Group { delimiter: Bracket, stream: TokenStream [ - Ident { sym: derive }, - Group { delimiter: Parenthesis, stream: TokenStream [ - Ident { sym: MergedObject }, - Punct { char: ',', spacing: Alone }, - Ident { sym: Default }, - ] }, - ] }, - Ident { sym: pub }, - Ident { sym: struct }, - Ident { sym: QueryRoot }, - Punct { char: ';', spacing: Alone }, - Punct { char: '#', spacing: Alone }, - Group { delimiter: Bracket, stream: TokenStream [ - Ident { sym: derive }, - Group { delimiter: Parenthesis, stream: TokenStream [ - Ident { sym: MergedSubscription }, - Punct { char: ',', spacing: Alone }, - Ident { sym: Default }, - ] }, - ] }, - Ident { sym: pub }, - Ident { sym: struct }, - Ident { sym: SubscriptionRoot }, - Punct { char: ';', spacing: Alone }, - Punct { char: '#', spacing: Alone }, - Group { delimiter: Bracket, stream: TokenStream [ - Ident { sym: SimpleObject }, - ] }, - Ident { sym: struct }, - Ident { sym: Test2 }, - Group { delimiter: Brace, stream: TokenStream [] }, - Punct { char: '#', spacing: Alone }, - Group { delimiter: Bracket, stream: TokenStream [ - Ident { sym: Subscription }, - ] }, - Ident { sym: impl }, - Ident { sym: SubscriptionShard_CommandRun }, - Group { delimiter: Brace, stream: TokenStream [] }, - ])*/ - } - - #[test] - fn filter_out_graphql() { - let tokens = TokenStream::from_str(r#" + "#, + ); + println!("Tokens:{:#?}", tokens); + /* output: + ========== + Tokens:Ok(TokenStream [ + Punct { char: '#', spacing: Alone }, + Group { delimiter: Bracket, stream: TokenStream [ + Ident { sym: Object }, + ] }, + Ident { sym: struct }, + Ident { sym: Test1 }, + Group { delimiter: Brace, stream: TokenStream [ + Punct { char: '#', spacing: Alone }, + Group { delimiter: Bracket, stream: TokenStream [ + Ident { sym: graphql }, + Group { delimiter: Parenthesis, stream: TokenStream [ + Ident { sym: name }, + Punct { char: '=', spacing: Alone }, + Literal { lit: "public_base" }, + ] }, + ] }, + Ident { sym: public_base }, + Punct { char: ':', spacing: Alone }, + Ident { sym: bool, }, + Punct { char: ';', spacing: Alone }, + ]}, + Punct { char: '#', spacing: Alone }, + Group { delimiter: Bracket, stream: TokenStream [ + Ident { sym: derive }, + Group { delimiter: Parenthesis, stream: TokenStream [ + Ident { sym: MergedObject }, + Punct { char: ',', spacing: Alone }, + Ident { sym: Default }, + ] }, + ] }, + Ident { sym: pub }, + Ident { sym: struct }, + Ident { sym: QueryRoot }, + Punct { char: ';', spacing: Alone }, + Punct { char: '#', spacing: Alone }, + Group { delimiter: Bracket, stream: TokenStream [ + Ident { sym: derive }, + Group { delimiter: Parenthesis, stream: TokenStream [ + Ident { sym: MergedSubscription }, + Punct { char: ',', spacing: Alone }, + Ident { sym: Default }, + ] }, + ] }, + Ident { sym: pub }, + Ident { sym: struct }, + Ident { sym: SubscriptionRoot }, + Punct { char: ';', spacing: Alone }, + Punct { char: '#', spacing: Alone }, + Group { delimiter: Bracket, stream: TokenStream [ + Ident { sym: SimpleObject }, + ] }, + Ident { sym: struct }, + Ident { sym: Test2 }, + Group { delimiter: Brace, stream: TokenStream [] }, + Punct { char: '#', spacing: Alone }, + Group { delimiter: Bracket, stream: TokenStream [ + Ident { sym: Subscription }, + ] }, + Ident { sym: impl }, + Ident { sym: SubscriptionShard_CommandRun }, + Group { delimiter: Brace, stream: TokenStream [] }, + ])*/ + } + + #[test] + fn filter_out_graphql() { + let tokens = TokenStream::from_str( + r#" #[Object] struct Test1 { #[graphql(name = "public_base")] @@ -169,11 +176,13 @@ mod tests { #[Subscription] impl SubscriptionShard_CommandRun {} - "#).unwrap(); - let tokens_filtered = remove_graphql_tags(tokens); - assert_eq!( - tokens_filtered.to_string().chars().filter(|c| !c.is_whitespace()).collect::(), - r##" + "#, + ) + .unwrap(); + let tokens_filtered = remove_graphql_tags(tokens); + assert_eq!( + tokens_filtered.to_string().chars().filter(|c| !c.is_whitespace()).collect::(), + r##" struct Test1 { public_base: bool; #[some_other_macro] @@ -189,7 +198,10 @@ mod tests { struct Test2 {} impl SubscriptionShard_CommandRun {} - "##.chars().filter(|c| !c.is_whitespace()).collect::() - ); - } -} \ No newline at end of file + "## + .chars() + .filter(|c| !c.is_whitespace()) + .collect::() + ); + } +} diff --git a/Packages/rust-macros/src/wrap_serde_macros.rs b/Packages/rust-macros/src/wrap_serde_macros.rs index 97a2b56a3..c20910290 100644 --- a/Packages/rust-macros/src/wrap_serde_macros.rs +++ b/Packages/rust-macros/src/wrap_serde_macros.rs @@ -1,63 +1,61 @@ +use proc_macro2::{Ident, Span, TokenStream, TokenTree}; use std::{env, str::FromStr}; -use proc_macro2::{TokenStream, TokenTree, Ident, Span}; -use crate::utils::{replace_token_sequences_matching, Slot, remove_token_sequences_for_macros}; +use crate::utils::{remove_token_sequences_for_macros, replace_token_sequences_matching, Slot}; // test-approach, of just stripping all the async-graphql macros for cargo-check (since presumably not needed at that point) // ========== pub fn wrap_serde_macros_impl(input: TokenStream, force_proceed: bool) -> TokenStream { - let proceed = force_proceed || { - let mut temp = false; - if let Ok(val) = env::var("FOR_RUST_ANALYZER") { - if val == "1" { - println!("Macro wrap_serde_macros: Modifying tokens, since FOR_RUST_ANALYZER is true."); - temp = true; - } - } - temp - }; - if !proceed { - return input; - } - - let output = input.clone(); - let output = replace_serde_macros(output); - output + let proceed = force_proceed || { + let mut temp = false; + if let Ok(val) = env::var("FOR_RUST_ANALYZER") { + if val == "1" { + println!("Macro wrap_serde_macros: Modifying tokens, since FOR_RUST_ANALYZER is true."); + temp = true; + } + } + temp + }; + if !proceed { + return input; + } + + let output = input.clone(); + let output = replace_serde_macros(output); + output } static MACROS_TO_REMOVE: &'static [&'static str] = &["serde"]; static DERIVE_MACRO_REPLACEMENTS_FROM: &'static [&'static str] = &["Serialize", "Deserialize"]; static DERIVE_MACRO_REPLACEMENTS_TO: &'static [&'static str] = &["rust_shared::rust_macros::Serialize_Stub", "rust_shared::rust_macros::Deserialize_Stub"]; fn replace_serde_macros(tokens: TokenStream) -> TokenStream { - let mut result = tokens; - - result = remove_token_sequences_for_macros(result, MACROS_TO_REMOVE); - - for (i, from_macro) in DERIVE_MACRO_REPLACEMENTS_FROM.iter().enumerate() { - let mut slots: Vec = Vec::new(); - let check = Box::new(|token: &TokenTree| { - match token { - TokenTree::Ident(data) if data.to_string() == from_macro.to_owned() => { - //println!("Found match!:{}", token); - true - }, - _ => false, - } - }); - - let replacement_str = DERIVE_MACRO_REPLACEMENTS_TO[i]; - let replacement_tokens: Vec = if replacement_str.contains("::") { - TokenStream::from_str(replacement_str).unwrap().into_iter().collect() - } else { - vec![ - TokenTree::Ident(Ident::new(replacement_str, Span::call_site())) - ] - }; - slots.push((check, Some(replacement_tokens))); - - result = replace_token_sequences_matching(result, &slots); - } - - result -} \ No newline at end of file + let mut result = tokens; + + result = remove_token_sequences_for_macros(result, MACROS_TO_REMOVE); + + for (i, from_macro) in DERIVE_MACRO_REPLACEMENTS_FROM.iter().enumerate() { + let mut slots: Vec = Vec::new(); + let check = Box::new(|token: &TokenTree| { + match token { + TokenTree::Ident(data) if data.to_string() == from_macro.to_owned() => { + //println!("Found match!:{}", token); + true + }, + _ => false, + } + }); + + let replacement_str = DERIVE_MACRO_REPLACEMENTS_TO[i]; + let replacement_tokens: Vec = if replacement_str.contains("::") { + TokenStream::from_str(replacement_str).unwrap().into_iter().collect() + } else { + vec![TokenTree::Ident(Ident::new(replacement_str, Span::call_site()))] + }; + slots.push((check, Some(replacement_tokens))); + + result = replace_token_sequences_matching(result, &slots); + } + + result +} diff --git a/Packages/rust-shared/src/db_constants.rs b/Packages/rust-shared/src/db_constants.rs index 2670552ee..ec3d028ec 100644 --- a/Packages/rust-shared/src/db_constants.rs +++ b/Packages/rust-shared/src/db_constants.rs @@ -9,4 +9,4 @@ pub const SYSTEM_POLICY_PUBLIC_GOVERNED_NAME: &'static str = "Public, governed ( pub const SYSTEM_POLICY_PRIVATE_GOVERNED_NAME: &'static str = "Private, governed (standard)"; pub const GLOBAL_MAP_ID: &'static str = "GLOBAL_MAP_00000000001"; -pub const GLOBAL_ROOT_NODE_ID: &'static str = "GLOBAL_ROOT_0000000001"; \ No newline at end of file +pub const GLOBAL_ROOT_NODE_ID: &'static str = "GLOBAL_ROOT_0000000001"; diff --git a/Packages/rust-shared/src/domains.rs b/Packages/rust-shared/src/domains.rs index 72898e97e..cdc847117 100644 --- a/Packages/rust-shared/src/domains.rs +++ b/Packages/rust-shared/src/domains.rs @@ -6,75 +6,83 @@ use reqwest::Url; use crate::utils::general_::extensions::ToOwnedV; pub fn get_env() -> String { - env::var("ENVIRONMENT").unwrap_or("".to_string()) + env::var("ENVIRONMENT").unwrap_or("".to_string()) +} +pub fn is_dev() -> bool { + get_env() == "dev" +} +pub fn is_prod() -> bool { + get_env() == "prod" } -pub fn is_dev() -> bool { get_env() == "dev" } -pub fn is_prod() -> bool { get_env() == "prod" } // sync:js // ========== pub struct DomainsConstants { - pub prod_domain: &'static str, - pub recognized_web_server_hosts: &'static [&'static str], - pub on_server_and_dev: bool, - pub on_server_and_prod: bool, + pub prod_domain: &'static str, + pub recognized_web_server_hosts: &'static [&'static str], + pub on_server_and_dev: bool, + pub on_server_and_prod: bool, } impl DomainsConstants { - pub fn new() -> Self { - //let ON_SERVER = env::var("ENVIRONMENT").is_some(); - let ON_SERVER = true; - let ENV = get_env(); - Self { - prod_domain: "debatemap.app", - //prod_domain: "debates.app", // temp - recognized_web_server_hosts: &[ - "localhost:5100", "localhost:5200", // load-balancer service (exposes web-server, plus other services, eg. /grafana) - "localhost:5101", // local webpack (alternative to web-server pod, when doing local development) - "localhost:5130", // monitor (serves both frontend and backend requests) - "localhost:5131", // monitor, local webpack (alternative to monitor pod's frontend-serving, when doing local development) - "localhost:5150", "localhost:5250", // pyroscope - // direct to server - "9m2x1z.nodes.c1.or1.k8s.ovh.us", - "debatemap.societylibrary.org", - // through cloudflare - "debatemap.app", - "debates.app", - "debating.app", - ], - on_server_and_dev: ON_SERVER && ENV == "dev", - on_server_and_prod: ON_SERVER && ENV == "prod", - } - } + pub fn new() -> Self { + //let ON_SERVER = env::var("ENVIRONMENT").is_some(); + let ON_SERVER = true; + let ENV = get_env(); + Self { + prod_domain: "debatemap.app", + //prod_domain: "debates.app", // temp + recognized_web_server_hosts: &[ + "localhost:5100", // load-balancer service (exposes web-server, plus other services, eg. /grafana) + "localhost:5200", // + "localhost:5101", // local webpack (alternative to web-server pod, when doing local development) + "localhost:5130", // monitor (serves both frontend and backend requests) + "localhost:5131", // monitor, local webpack (alternative to monitor pod's frontend-serving, when doing local development) + "localhost:5150", // pyroscope + "localhost:5250", // + // direct to server + "9m2x1z.nodes.c1.or1.k8s.ovh.us", + "debatemap.societylibrary.org", + // through cloudflare + "debatemap.app", + "debates.app", + "debating.app", + ], + on_server_and_dev: ON_SERVER && ENV == "dev", + on_server_and_prod: ON_SERVER && ENV == "prod", + } + } } #[derive(PartialEq, Eq)] pub enum ServerPod { - WebServer, - AppServer, - Monitor, - Grafana, - Pyroscope, + WebServer, + AppServer, + Monitor, + Grafana, + Pyroscope, } pub struct GetServerURL_Options { - pub claimed_client_url: Option, - pub restrict_to_recognized_hosts: bool, - - pub force_localhost: bool, - pub force_https: bool, + pub claimed_client_url: Option, + pub restrict_to_recognized_hosts: bool, + + pub force_localhost: bool, + pub force_https: bool, } pub fn get_server_url(server_pod: ServerPod, subpath: &str, opts: GetServerURL_Options) -> Result { - let DomainsConstants { prod_domain, recognized_web_server_hosts, on_server_and_dev, on_server_and_prod: _ } = DomainsConstants::new(); + let DomainsConstants { prod_domain, recognized_web_server_hosts, on_server_and_dev, on_server_and_prod: _ } = DomainsConstants::new(); - // process claimed-client-url + // process claimed-client-url println!("GetServerURL_claimedClientURL: {:?}", opts.claimed_client_url); let claimed_client_url = opts.claimed_client_url.map(|str| Url::parse(&str).unwrap()); - let should_trust_claimed_client_url = if let Some(client_url) = &claimed_client_url { - !opts.restrict_to_recognized_hosts || recognized_web_server_hosts.contains(&client_url.host_str().unwrap()) || on_server_and_dev - } else { false }; - let claimed_client_url_trusted = if should_trust_claimed_client_url { claimed_client_url.clone() } else { None }; + let should_trust_claimed_client_url = if let Some(client_url) = &claimed_client_url { + !opts.restrict_to_recognized_hosts || recognized_web_server_hosts.contains(&client_url.host_str().unwrap()) || on_server_and_dev + } else { + false + }; + let claimed_client_url_trusted = if should_trust_claimed_client_url { claimed_client_url.clone() } else { None }; let mut server_url: Url; @@ -83,7 +91,7 @@ pub fn get_server_url(server_pod: ServerPod, subpath: &str, opts: GetServerURL_O if let Some(client_url) = claimed_client_url_trusted.as_ref() { let port_str = if let Some(port) = client_url.port() { format!(":{}", port) } else { "".o() }; - server_url = Url::parse(&format!("{}//{}{}", client_url.scheme(), client_url.host_str().unwrap(), port_str)).unwrap(); + server_url = Url::parse(&format!("{}//{}{}", client_url.scheme(), client_url.host_str().unwrap(), port_str)).unwrap(); } else { //Assert(webServerHosts.includes(referrerURL.host), `Client sent invalid referrer host (${referrerURL.host}).`); let guessed_to_be_local = opts.force_localhost || on_server_and_dev; @@ -91,65 +99,62 @@ pub fn get_server_url(server_pod: ServerPod, subpath: &str, opts: GetServerURL_O server_url = Url::parse("http://localhost:5100").unwrap(); // standard local-k8s entry-point } else { server_url = Url::parse(&format!("https://{}", prod_domain))?; - } - } - - let backend_is_remote = !opts.force_localhost && ( - claimed_client_url_trusted.map(|a| a.query_pairs().any(|b| b.0 == "db" && b.1 == "prod")).unwrap_or(false) - || server_url.host_str() == Some(prod_domain) - ); - - // section 2: set subdomain/port - // ========== - - match server_pod { - ServerPod::WebServer => { - // for simply deciding between localhost:5100 and localhost:5101, we don't need the claimed-client-url to be "trusted" - if claimed_client_url.map(|a| a.port()) == Some(Some(5101)) { - server_url.set_port(Some(5101)).unwrap(); - } - }, - ServerPod::Pyroscope => { - server_url.set_host(Some("localhost")).unwrap(); // pyroscope only accessible using port-forwards atm - server_url.set_port(Some(if backend_is_remote { 5250 } else { 5150 })).unwrap(); - }, - _ => {}, - } - - // section 3: set path - // ========== + } + } + + let backend_is_remote = !opts.force_localhost && (claimed_client_url_trusted.map(|a| a.query_pairs().any(|b| b.0 == "db" && b.1 == "prod")).unwrap_or(false) || server_url.host_str() == Some(prod_domain)); + + // section 2: set subdomain/port + // ========== + + match server_pod { + ServerPod::WebServer => { + // for simply deciding between localhost:5100 and localhost:5101, we don't need the claimed-client-url to be "trusted" + if claimed_client_url.map(|a| a.port()) == Some(Some(5101)) { + server_url.set_port(Some(5101)).unwrap(); + } + }, + ServerPod::Pyroscope => { + server_url.set_host(Some("localhost")).unwrap(); // pyroscope only accessible using port-forwards atm + server_url.set_port(Some(if backend_is_remote { 5250 } else { 5150 })).unwrap(); + }, + _ => {}, + } + + // section 3: set path + // ========== ensure!(subpath.starts_with("/"), "Subpath must start with a forward-slash."); - let mut subpath_final = subpath.to_string(); - match server_pod { - ServerPod::WebServer => {}, - ServerPod::AppServer => { - subpath_final = format!("/app-server{}", subpath_final); - }, - ServerPod::Monitor => { - subpath_final = format!("/monitor{}", subpath_final); - }, - ServerPod::Grafana => { - subpath_final = format!("/grafana{}", subpath_final); - }, - ServerPod::Pyroscope => {}, - } - server_url.set_path(&subpath_final); - - // section 4: special-case handling - // ========== - - // if this app-server is PROD, but we have a "localhost" host, user must be using the "?db=prod" flag - /*if on_server_and_prod && (claimed_client_url.as_ref().unwrap().host_str().unwrap() == "localhost:5100" || claimed_client_url.as_ref().unwrap().host_str().unwrap() == "localhost:5101") { - if subpath == "/auth/google/callback" { - subpath = "/auth/google/callback_returnToLocalhost"; - server_url.set_path(subpath); - } - }*/ - - if opts.force_https { - server_url.set_scheme("https").unwrap(); - } - - Ok(server_url.to_string()) -} \ No newline at end of file + let mut subpath_final = subpath.to_string(); + match server_pod { + ServerPod::WebServer => {}, + ServerPod::AppServer => { + subpath_final = format!("/app-server{}", subpath_final); + }, + ServerPod::Monitor => { + subpath_final = format!("/monitor{}", subpath_final); + }, + ServerPod::Grafana => { + subpath_final = format!("/grafana{}", subpath_final); + }, + ServerPod::Pyroscope => {}, + } + server_url.set_path(&subpath_final); + + // section 4: special-case handling + // ========== + + // if this app-server is PROD, but we have a "localhost" host, user must be using the "?db=prod" flag + /*if on_server_and_prod && (claimed_client_url.as_ref().unwrap().host_str().unwrap() == "localhost:5100" || claimed_client_url.as_ref().unwrap().host_str().unwrap() == "localhost:5101") { + if subpath == "/auth/google/callback" { + subpath = "/auth/google/callback_returnToLocalhost"; + server_url.set_path(subpath); + } + }*/ + + if opts.force_https { + server_url.set_scheme("https").unwrap(); + } + + Ok(server_url.to_string()) +} diff --git a/Packages/rust-shared/src/lib.rs b/Packages/rust-shared/src/lib.rs index 1d61c6aa3..048062da5 100644 --- a/Packages/rust-shared/src/lib.rs +++ b/Packages/rust-shared/src/lib.rs @@ -2,12 +2,10 @@ #![feature(iterator_try_collect)] #![feature(try_trait_v2)] #![feature(try_trait_v2_residual)] - // for lock-chain checks #![allow(incomplete_features)] #![feature(adt_const_params)] #![feature(generic_const_exprs)] - // sync among all rust crates #![warn(clippy::all, clippy::pedantic, clippy::cargo)] #![allow( @@ -28,90 +26,91 @@ // to avoid false-positives, of certain functions, as well as for [Serialize/Deserialize]_Stub macro-usage (wrt private fields) dead_code, )] +#![feature(stmt_expr_attributes)] // allow attributes on expressions, eg. for disabling rustfmt per-expression -use std::time::{UNIX_EPOCH, SystemTime, Duration}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; // subcrate re-exports (todo: probably replace with "pub use ? as ?;" syntax, as seen here: https://www.reddit.com/r/rust/comments/ayibls/comment/ei0ypg3) -pub extern crate rust_macros; pub extern crate anyhow; pub extern crate async_graphql; pub extern crate async_graphql_axum; pub extern crate axum; +pub extern crate base64; pub extern crate bytes; -pub extern crate indoc; -pub extern crate itertools; -pub extern crate regex; -pub extern crate once_cell; -pub extern crate uuid; -pub extern crate url; +pub extern crate chrono; +pub extern crate flume; pub extern crate futures; pub extern crate http_body_util; pub extern crate hyper; pub extern crate hyper_util; +pub extern crate indexmap; +pub extern crate indoc; +pub extern crate itertools; +pub extern crate jwt_simple; +pub extern crate once_cell; pub extern crate postgres_protocol; +pub extern crate regex; pub extern crate reqwest; +pub extern crate rust_macros; +pub extern crate sentry; pub extern crate serde; pub extern crate serde_json; pub extern crate thiserror; pub extern crate tokio; pub extern crate tokio_postgres; +pub extern crate tokio_tungstenite; pub extern crate tower; pub extern crate tower_http; pub extern crate tower_service; -pub extern crate jwt_simple; -pub extern crate chrono; -pub extern crate flume; -pub extern crate indexmap; -pub extern crate tokio_tungstenite; -pub extern crate base64; -pub extern crate sentry; +pub extern crate url; +pub extern crate uuid; // this crate's modules pub mod db_constants; pub mod domains; pub mod links { - pub mod app_server_to_monitor_backend; + pub mod app_server_to_monitor_backend; } pub mod utils { - pub mod auth { - pub mod jwt_utils_base; - } - pub mod db { - pub mod uuid; - pub mod agql_ext { - pub mod gql_general_extension; - } - } - pub mod errors; - pub mod errors_ { - pub mod backtrace_simplifier; - } - pub mod futures; - pub mod general; - pub mod general_ { - pub mod extensions; - pub mod func_types; - pub mod serde; - } - pub mod _k8s; - pub mod k8s { - pub mod cert_handling; - pub mod k8s_structs; - pub mod k8s_client; - pub mod upgrade; - } - pub mod locks { - pub mod check_lock_order; - pub mod rwlock_tracked; - } - pub mod mtx { - pub mod mtx; - } - pub mod net; - pub mod time; - pub mod type_aliases; + pub mod auth { + pub mod jwt_utils_base; + } + pub mod db { + pub mod uuid; + pub mod agql_ext { + pub mod gql_general_extension; + } + } + pub mod errors; + pub mod errors_ { + pub mod backtrace_simplifier; + } + pub mod futures; + pub mod general; + pub mod general_ { + pub mod extensions; + pub mod func_types; + pub mod serde; + } + pub mod _k8s; + pub mod k8s { + pub mod cert_handling; + pub mod k8s_client; + pub mod k8s_structs; + pub mod upgrade; + } + pub mod locks { + pub mod check_lock_order; + pub mod rwlock_tracked; + } + pub mod mtx { + pub mod mtx; + } + pub mod net; + pub mod time; + pub mod type_aliases; } pub use utils::errors::*; pub use utils::locks::check_lock_order::*; -pub use utils::locks::rwlock_tracked::*; \ No newline at end of file +pub use utils::locks::rwlock_tracked::*; diff --git a/Packages/rust-shared/src/links/app_server_to_monitor_backend.rs b/Packages/rust-shared/src/links/app_server_to_monitor_backend.rs index 3c1788c7c..e761f242c 100644 --- a/Packages/rust-shared/src/links/app_server_to_monitor_backend.rs +++ b/Packages/rust-shared/src/links/app_server_to_monitor_backend.rs @@ -3,9 +3,12 @@ use async_graphql::SimpleObject; use rust_macros::wrap_slow_macros; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; -use crate::{utils::{mtx::mtx::{MtxData, MtxDataWithExtraInfo}, type_aliases::{JSONValue, RowData}}}; +use crate::utils::{ + mtx::mtx::{MtxData, MtxDataWithExtraInfo}, + type_aliases::{JSONValue, RowData}, +}; // this alias is needed, since `wrap_serde_macros.rs` inserts refs to, eg. `rust_shared::rust_macros::Serialize_Stub` use crate as rust_shared; @@ -15,38 +18,38 @@ wrap_slow_macros! { // This struct is basically only used by logging.rs in app-server, but is placed here, since we also use it in the Message_ASToMB struct. #[derive(SimpleObject, Clone, Serialize, Deserialize, Debug, Default)] pub struct LogEntry { - pub time: f64, - /// Logging levels: (as interpreted in the debate-map codebase) - /// * ERROR: Indicates some flaw in the codebase that should be fixed, or an issue in the user/externally supplied data serious enough that the given operation did not proceed. - /// * WARN: Indicates some unexpected state that *might* be pointing toward an error/thing-to-fix, but could also just be something unusual. - /// * INFO: Something significant enough that it should show in the process' standard output. - /// * DEBUG: For low-level information that's fine to stream to the monitor-backend. - /// * TRACE: For low-level information that's not fine to stream to the monitor-backend. (eg. due to the expected trigger-rate being too high, to where it might congest the local network, or other layer of processing) - pub level: String, - pub target: String, - pub span_name: String, - pub message: String, + pub time: f64, + /// Logging levels: (as interpreted in the debate-map codebase) + /// * ERROR: Indicates some flaw in the codebase that should be fixed, or an issue in the user/externally supplied data serious enough that the given operation did not proceed. + /// * WARN: Indicates some unexpected state that *might* be pointing toward an error/thing-to-fix, but could also just be something unusual. + /// * INFO: Something significant enough that it should show in the process' standard output. + /// * DEBUG: For low-level information that's fine to stream to the monitor-backend. + /// * TRACE: For low-level information that's not fine to stream to the monitor-backend. (eg. due to the expected trigger-rate being too high, to where it might congest the local network, or other layer of processing) + pub level: String, + pub target: String, + pub span_name: String, + pub message: String, } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum Message_MBToAS { - //TODO, + //TODO, } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum Message_ASToMB { - LogEntryAdded { entry: LogEntry }, - MtxEntryDone { mtx: MtxData }, - LQInstanceUpdated { - //key: String, - // we don't want to place LQKey struct in rust-shared, so pass generic string and JSONValue instead - table_name: String, - filter: JSONValue, - - last_entries: Vec, - watchers_count: u32, - deleting: bool, - }, + LogEntryAdded { entry: LogEntry }, + MtxEntryDone { mtx: MtxData }, + LQInstanceUpdated { + //key: String, + // we don't want to place LQKey struct in rust-shared, so pass generic string and JSONValue instead + table_name: String, + filter: JSONValue, + + last_entries: Vec, + watchers_count: u32, + deleting: bool, + }, } -} \ No newline at end of file +} diff --git a/Packages/rust-shared/src/utils/_k8s.rs b/Packages/rust-shared/src/utils/_k8s.rs index d9e78033c..af3b3ceb7 100644 --- a/Packages/rust-shared/src/utils/_k8s.rs +++ b/Packages/rust-shared/src/utils/_k8s.rs @@ -1,192 +1,206 @@ -use std::{env, fs::{self, File}, str::FromStr, sync::Arc, io::Read, time::SystemTime}; - -use anyhow::{Context, anyhow, Error, bail, ensure}; +use std::{ + env, + fs::{self, File}, + io::Read, + str::FromStr, + sync::Arc, + time::SystemTime, +}; + +use super::{k8s::cert_handling::get_reqwest_client_with_k8s_certs, type_aliases::JSONValue}; +use anyhow::{anyhow, bail, ensure, Context, Error}; use axum::http; use bytes::Bytes; use futures::StreamExt; use http_body_util::{Empty, Full}; -use hyper::{upgrade, body::Body}; +use hyper::{body::Body, upgrade}; use hyper_rustls::HttpsConnector; use itertools::Itertools; -use reqwest::{Url, Body as ReqwestBody}; +use reqwest::{Body as ReqwestBody, Url}; use rustls::ClientConfig; -use serde_json::{json, self}; -use tokio_tungstenite::{WebSocketStream, connect_async, connect_async_tls_with_config, tungstenite::{self, Message, protocol::WebSocketConfig}}; +use serde_json::{self, json}; +use tokio_tungstenite::{ + connect_async, connect_async_tls_with_config, + tungstenite::{self, protocol::WebSocketConfig, Message}, + WebSocketStream, +}; use tower::ServiceBuilder; -use super::{type_aliases::JSONValue, k8s::cert_handling::get_reqwest_client_with_k8s_certs}; -use tracing::{info, error, instrument::WithSubscriber, warn}; +use tracing::{error, info, instrument::WithSubscriber, warn}; -use crate::{domains::{get_server_url, DomainsConstants}, utils::k8s::{cert_handling::{get_hyper_client_with_k8s_certs, get_rustls_config_that_accepts_k8s_certs}, k8s_client::{upgrade_to_websocket}, k8s_structs::K8sSecret}}; +use crate::{ + domains::{get_server_url, DomainsConstants}, + utils::k8s::{ + cert_handling::{get_hyper_client_with_k8s_certs, get_rustls_config_that_accepts_k8s_certs}, + k8s_client::upgrade_to_websocket, + k8s_structs::K8sSecret, + }, +}; #[derive(Debug)] pub struct K8sPodBasicInfo { - pub name: String, - //pub creation_time: i64, - pub creation_time_str: String, + pub name: String, + //pub creation_time: i64, + pub creation_time_str: String, } pub async fn get_k8s_pod_basic_infos(namespace: &str, filter_to_running_pods: bool) -> Result, Error> { - let token = fs::read_to_string("/var/run/secrets/kubernetes.io/serviceaccount/token")?; - let k8s_host = env::var("KUBERNETES_SERVICE_HOST")?; - let k8s_port = env::var("KUBERNETES_PORT_443_TCP_PORT")?; - - let client = get_reqwest_client_with_k8s_certs()?; - let pod_filters_str = if filter_to_running_pods { "?fieldSelector=status.phase=Running" } else { "" }; - let req = client.get(format!("https://{k8s_host}:{k8s_port}/api/v1/namespaces/{namespace}/pods{pod_filters_str}")) - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {token}")) - .body(json!({}).to_string()).build()?; - let res = client.execute(req).await?; - - let res_as_json_str = res.text().await?; - //info!("Got list of k8s pods (in namespace \"{namespace}\"): {}", res_as_json_str); - let res_as_json = JSONValue::from_str(&res_as_json_str)?; - - let pod_infos = (|| { - let mut pod_infos: Vec = vec![]; - for pod_info_json in res_as_json.as_object()?.get("items")?.as_array()? { - let metadata = pod_info_json.as_object()?.get("metadata")?.as_object()?; - let pod_name = metadata.get("name")?.as_str()?; - let creation_time_str = metadata.get("creationTimestamp")?.as_str()?; - //let creation_time = chrono::DateTime::parse_from_rfc3339(creation_time_str)?; - pod_infos.push({ - K8sPodBasicInfo { name: pod_name.to_owned(), creation_time_str: creation_time_str.to_owned() } - }); - } - Some(pod_infos) - })().ok_or_else(|| anyhow!("Response from kubernetes API is malformed:{res_as_json_str}"))?; - - Ok(pod_infos) + let token = fs::read_to_string("/var/run/secrets/kubernetes.io/serviceaccount/token")?; + let k8s_host = env::var("KUBERNETES_SERVICE_HOST")?; + let k8s_port = env::var("KUBERNETES_PORT_443_TCP_PORT")?; + + let client = get_reqwest_client_with_k8s_certs()?; + let pod_filters_str = if filter_to_running_pods { "?fieldSelector=status.phase=Running" } else { "" }; + let req = client.get(format!("https://{k8s_host}:{k8s_port}/api/v1/namespaces/{namespace}/pods{pod_filters_str}")).header("Content-Type", "application/json").header("Authorization", format!("Bearer {token}")).body(json!({}).to_string()).build()?; + let res = client.execute(req).await?; + + let res_as_json_str = res.text().await?; + //info!("Got list of k8s pods (in namespace \"{namespace}\"): {}", res_as_json_str); + let res_as_json = JSONValue::from_str(&res_as_json_str)?; + + let pod_infos = (|| { + let mut pod_infos: Vec = vec![]; + for pod_info_json in res_as_json.as_object()?.get("items")?.as_array()? { + let metadata = pod_info_json.as_object()?.get("metadata")?.as_object()?; + let pod_name = metadata.get("name")?.as_str()?; + let creation_time_str = metadata.get("creationTimestamp")?.as_str()?; + //let creation_time = chrono::DateTime::parse_from_rfc3339(creation_time_str)?; + pod_infos.push(K8sPodBasicInfo { name: pod_name.to_owned(), creation_time_str: creation_time_str.to_owned() }); + } + Some(pod_infos) + })() + .ok_or_else(|| anyhow!("Response from kubernetes API is malformed:{res_as_json_str}"))?; + + Ok(pod_infos) } pub async fn try_get_k8s_secret(name: String, namespace: &str) -> Result, Error> { - match get_or_create_k8s_secret(name, namespace, None).await { - Ok(secret) => Ok(Some(secret)), - Err(err) => { - if err.to_string().contains("No k8s secret found named ") { - Ok(None) - } else { - Err(err) - } - } - } + match get_or_create_k8s_secret(name, namespace, None).await { + Ok(secret) => Ok(Some(secret)), + Err(err) => { + if err.to_string().contains("No k8s secret found named ") { + Ok(None) + } else { + Err(err) + } + }, + } } pub async fn get_or_create_k8s_secret(name: String, namespace: &str, new_data_if_missing: Option) -> Result { - info!("Beginning request to get/create the k8s-secret named \"{name}\"."); - let token = fs::read_to_string("/var/run/secrets/kubernetes.io/serviceaccount/token")?; - let k8s_host = env::var("KUBERNETES_SERVICE_HOST")?; - let k8s_port = env::var("KUBERNETES_PORT_443_TCP_PORT")?; - - let client = get_reqwest_client_with_k8s_certs()?; - - let req = client.get(format!("https://{k8s_host}:{k8s_port}/api/v1/namespaces/{namespace}/secrets/{name}")) - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {token}")) - .body(json!({}).to_string()).build()?; - let res = client.execute(req).await?; - let res_as_json = res.json::().await?; - //if DomainsConstants::new().on_server_and_dev { info!("Got response from k8s server, on trying to get secret \"{name}\": {}", res_as_json); } - // check for failure by checking for a "code" field in the response (if it succeeded, the response json will simply be the secret's json-data) - if res_as_json["code"].is_null() { - let secret: K8sSecret = serde_json::from_value(res_as_json)?; - return Ok(secret); - } - - if let Some(new_data_if_missing) = new_data_if_missing { - let new_secret = K8sSecret { - apiVersion: "v1".to_owned(), - data: new_data_if_missing, - metadata: json!({ - "name": name, - "namespace": namespace, - }), - kind: "Secret".to_owned(), - r#type: "Opaque".to_owned() - }; - let new_secret_json = serde_json::to_string(&new_secret)?; - - let req = client.post(format!("https://{k8s_host}:{k8s_port}/api/v1/namespaces/{namespace}/secrets")) - //let req = client.put(format!("https://{k8s_host}:{k8s_port}/api/v1/namespaces/{namespace}/secrets/{name}")) - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {token}")) - .body(new_secret_json).build()?; - let res = client.execute(req).await?; - - let res_as_str = res.text().await?; - info!("Got response from k8s server, on trying to create secret \"{name}\": {}", res_as_str); - - Ok(new_secret) - } else { - bail!("No k8s secret found named \"{}\". Since new_data_if_missing was None, returning this error to indicate no matching secret found. @retrieval_attempt_response:{}", name, res_as_json); - } + info!("Beginning request to get/create the k8s-secret named \"{name}\"."); + let token = fs::read_to_string("/var/run/secrets/kubernetes.io/serviceaccount/token")?; + let k8s_host = env::var("KUBERNETES_SERVICE_HOST")?; + let k8s_port = env::var("KUBERNETES_PORT_443_TCP_PORT")?; + + let client = get_reqwest_client_with_k8s_certs()?; + + let req = client.get(format!("https://{k8s_host}:{k8s_port}/api/v1/namespaces/{namespace}/secrets/{name}")).header("Content-Type", "application/json").header("Authorization", format!("Bearer {token}")).body(json!({}).to_string()).build()?; + let res = client.execute(req).await?; + let res_as_json = res.json::().await?; + //if DomainsConstants::new().on_server_and_dev { info!("Got response from k8s server, on trying to get secret \"{name}\": {}", res_as_json); } + // check for failure by checking for a "code" field in the response (if it succeeded, the response json will simply be the secret's json-data) + if res_as_json["code"].is_null() { + let secret: K8sSecret = serde_json::from_value(res_as_json)?; + return Ok(secret); + } + + if let Some(new_data_if_missing) = new_data_if_missing { + let new_secret = K8sSecret { + apiVersion: "v1".to_owned(), + data: new_data_if_missing, + metadata: json!({ + "name": name, + "namespace": namespace, + }), + kind: "Secret".to_owned(), + r#type: "Opaque".to_owned(), + }; + let new_secret_json = serde_json::to_string(&new_secret)?; + + let req = client + .post(format!("https://{k8s_host}:{k8s_port}/api/v1/namespaces/{namespace}/secrets")) + //let req = client.put(format!("https://{k8s_host}:{k8s_port}/api/v1/namespaces/{namespace}/secrets/{name}")) + .header("Content-Type", "application/json") + .header("Authorization", format!("Bearer {token}")) + .body(new_secret_json) + .build()?; + let res = client.execute(req).await?; + + let res_as_str = res.text().await?; + info!("Got response from k8s server, on trying to create secret \"{name}\": {}", res_as_str); + + Ok(new_secret) + } else { + bail!("No k8s secret found named \"{}\". Since new_data_if_missing was None, returning this error to indicate no matching secret found. @retrieval_attempt_response:{}", name, res_as_json); + } } pub async fn exec_command_in_another_pod(pod_namespace: &str, pod_name: &str, container: Option<&str>, command_name: &str, command_args: Vec, allow_utf8_lossy: bool) -> Result { - info!("Beginning request to run command in another pod. @target_pod:{} @command_name:{} @command_args:{:?}", pod_name, command_name, command_args); - let token = fs::read_to_string("/var/run/secrets/kubernetes.io/serviceaccount/token").context("Failed to retrieve k8s service-account token.")?; - /*let k8s_host = env::var("KUBERNETES_SERVICE_HOST")?; - let k8s_port = env::var("KUBERNETES_PORT_443_TCP_PORT")?;*/ - - let mut query_str = format!("?command={}", command_name); - for arg in &command_args { - query_str.push_str(&format!("&command={}", arg)); - } - if let Some(container) = container { - query_str.push_str(&format!("&container={}", container)); - } - query_str.push_str("&stdin=true&stderr=true&stdout=true&tty=true"); - - // using hyper - let client = get_hyper_client_with_k8s_certs().context("Failed to create hyper client with k8s certs.")?; - let req = hyper::Request::builder().uri(format!("https://kubernetes.default.svc.cluster.local/api/v1/namespaces/{}/pods/{}/exec{}", pod_namespace, pod_name, query_str)) - .method("GET") - .header("Authorization", format!("Bearer {token}")) - //.body(Full::new(Bytes::new())) - .body(Empty::::new()) - .unwrap(); - let response = upgrade_to_websocket(client, req).await.context("Failed to upgrade to websocket.")?; - - let mut res_as_str = String::new(); - let mut response_remaining = response; - loop { - let (next_item, rest_of_response) = response_remaining.into_future().await; - response_remaining = rest_of_response; - match next_item { - Some(Ok(item)) => { - let item_into_text = match item { - // so far anyway, all of the asked-for content has been returned within `Message::Binary` chunks - Message::Binary(data) => match allow_utf8_lossy { - true => String::from_utf8_lossy(&data).to_string(), - false => String::from_utf8(data).map_err(|_| anyhow!("Got non-utf8 data from k8s exec endpoint, and allow_utf8_lossy was false."))?, - }, - // but we'll keep processing text chunks as well, in case they are used in some cases - Message::Text(string) => { - warn!("Got unexpected text-chunk. @len:{} @string:{}", string.len(), string); - string - }, - msg => { - println!("Ignoring web-socket message:{:?}", msg); - continue; - }, - }; - let item_as_str = item_into_text.as_str(); - let item_as_chars = item_as_str.chars().collect_vec(); - let pos_of_first_soh = item_as_chars.iter().position(|ch| *ch == char::from_u32(0x0001).unwrap()); - //println!("Got item. @len:{} @sohChar1st:{:?} @sohCharLast:{:?}", item_as_str.len(), pos_of_first_soh); - - // ignore items without the `0x0001` char (SOH control character) at start; these are just the k8s metadata chunks output at end (should only be 2 of these) - if pos_of_first_soh != Some(0) { - continue; - } - - // chop off the `0x0001` char (SOH control character) at start of each "actual data" chunk - let item_as_str_cleaned = item_as_chars[1..].iter().cloned().collect::(); - res_as_str.push_str(&item_as_str_cleaned); - } - Some(Err(e)) => return Err(e.into()), - None => break, - } - } - - info!("Got response from k8s server, on trying to run command using exec. @command:\"{} {}\" @response_len: {}", command_name, command_args.join(" "), res_as_str.len()); - Ok(res_as_str) -} \ No newline at end of file + info!("Beginning request to run command in another pod. @target_pod:{} @command_name:{} @command_args:{:?}", pod_name, command_name, command_args); + let token = fs::read_to_string("/var/run/secrets/kubernetes.io/serviceaccount/token").context("Failed to retrieve k8s service-account token.")?; + /*let k8s_host = env::var("KUBERNETES_SERVICE_HOST")?; + let k8s_port = env::var("KUBERNETES_PORT_443_TCP_PORT")?;*/ + + let mut query_str = format!("?command={}", command_name); + for arg in &command_args { + query_str.push_str(&format!("&command={}", arg)); + } + if let Some(container) = container { + query_str.push_str(&format!("&container={}", container)); + } + query_str.push_str("&stdin=true&stderr=true&stdout=true&tty=true"); + + // using hyper + let client = get_hyper_client_with_k8s_certs().context("Failed to create hyper client with k8s certs.")?; + let req = hyper::Request::builder() + .uri(format!("https://kubernetes.default.svc.cluster.local/api/v1/namespaces/{}/pods/{}/exec{}", pod_namespace, pod_name, query_str)) + .method("GET") + .header("Authorization", format!("Bearer {token}")) + //.body(Full::new(Bytes::new())) + .body(Empty::::new()) + .unwrap(); + let response = upgrade_to_websocket(client, req).await.context("Failed to upgrade to websocket.")?; + + let mut res_as_str = String::new(); + let mut response_remaining = response; + loop { + let (next_item, rest_of_response) = response_remaining.into_future().await; + response_remaining = rest_of_response; + match next_item { + Some(Ok(item)) => { + let item_into_text = match item { + // so far anyway, all of the asked-for content has been returned within `Message::Binary` chunks + Message::Binary(data) => match allow_utf8_lossy { + true => String::from_utf8_lossy(&data).to_string(), + false => String::from_utf8(data).map_err(|_| anyhow!("Got non-utf8 data from k8s exec endpoint, and allow_utf8_lossy was false."))?, + }, + // but we'll keep processing text chunks as well, in case they are used in some cases + Message::Text(string) => { + warn!("Got unexpected text-chunk. @len:{} @string:{}", string.len(), string); + string + }, + msg => { + println!("Ignoring web-socket message:{:?}", msg); + continue; + }, + }; + let item_as_str = item_into_text.as_str(); + let item_as_chars = item_as_str.chars().collect_vec(); + let pos_of_first_soh = item_as_chars.iter().position(|ch| *ch == char::from_u32(0x0001).unwrap()); + //println!("Got item. @len:{} @sohChar1st:{:?} @sohCharLast:{:?}", item_as_str.len(), pos_of_first_soh); + + // ignore items without the `0x0001` char (SOH control character) at start; these are just the k8s metadata chunks output at end (should only be 2 of these) + if pos_of_first_soh != Some(0) { + continue; + } + + // chop off the `0x0001` char (SOH control character) at start of each "actual data" chunk + let item_as_str_cleaned = item_as_chars[1..].iter().cloned().collect::(); + res_as_str.push_str(&item_as_str_cleaned); + }, + Some(Err(e)) => return Err(e.into()), + None => break, + } + } + + info!("Got response from k8s server, on trying to run command using exec. @command:\"{} {}\" @response_len: {}", command_name, command_args.join(" "), res_as_str.len()); + Ok(res_as_str) +} diff --git a/Packages/rust-shared/src/utils/auth/jwt_utils_base.rs b/Packages/rust-shared/src/utils/auth/jwt_utils_base.rs index b334a519d..eae01dc04 100644 --- a/Packages/rust-shared/src/utils/auth/jwt_utils_base.rs +++ b/Packages/rust-shared/src/utils/auth/jwt_utils_base.rs @@ -2,64 +2,64 @@ use std::collections::{HashMap, HashSet}; use std::env; use std::time::Duration; +use crate::db_constants::SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME; +use crate::utils::_k8s::get_or_create_k8s_secret; +use crate::utils::db::uuid::{new_uuid_v4_as_b64, new_uuid_v4_as_b64_id}; +use crate::utils::futures::make_reliable; +use crate::utils::general::get_uri_params; +use crate::utils::time::time_since_epoch_ms_i64; +use crate::utils::type_aliases::JSONValue; +use crate::{async_graphql, serde_json, to_anyhow, to_sub_err, to_sub_err_in_stream, SubError}; use anyhow::{anyhow, Error}; -use axum::response::IntoResponse; -use axum::{Router, response}; use axum::extract::{Extension, Path}; +use axum::response::IntoResponse; use axum::routing::get; +use axum::{response, Router}; +use indoc::indoc; +use jwt_simple::prelude::{Claims, HS256Key, MACLike, VerificationOptions}; use once_cell::sync::OnceCell; use rust_macros::wrap_slow_macros; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use serde_json::json; -use crate::utils::db::uuid::{new_uuid_v4_as_b64, new_uuid_v4_as_b64_id}; -use crate::db_constants::SYSTEM_POLICY_PUBLIC_UNGOVERNED_NAME; -use crate::utils::futures::make_reliable; -use crate::utils::general::get_uri_params; -use indoc::indoc; -use crate::utils::time::time_since_epoch_ms_i64; -use crate::utils::type_aliases::JSONValue; -use crate::utils::_k8s::{get_or_create_k8s_secret}; -use crate::{async_graphql, serde_json, SubError, to_sub_err, to_sub_err_in_stream, to_anyhow}; -use tracing::{info, error, warn}; -use jwt_simple::prelude::{HS256Key, Claims, MACLike, VerificationOptions}; +use tracing::{error, info, warn}; /// Rather than baking the permissions and such into the jwt, we store only the id and email, which are unchanging fields. (well that and the `readOnly` flag, letting the user restrict the JWT's capabilities) /// We later use that minimal info to retrieve the full user-data from the database. (this way it's up-to-date if the user's username, permissions, etc. change) #[derive(Clone, Serialize, Deserialize)] pub struct UserJWTData { - pub id: String, - pub email: String, - pub readOnly: Option, + pub id: String, + pub email: String, + pub readOnly: Option, } pub async fn get_or_create_jwt_key_hs256() -> Result { - let key_str = get_or_create_jwt_key_hs256_str().await?; - let key_str_bytes = base64::decode(key_str)?; - let key = HS256Key::from_bytes(&key_str_bytes); - Ok(key) + let key_str = get_or_create_jwt_key_hs256_str().await?; + let key_str_bytes = base64::decode(key_str)?; + let key = HS256Key::from_bytes(&key_str_bytes); + Ok(key) } static JWT_KEY_HS256_STR: OnceCell = OnceCell::new(); /// Retrieves and/or creates the hs256 secret-key for use in generating JWTs. /// Why do retrieval manually rather than having k8s import it as an environment-variable at startup? /// Because k8s converts the base64 string into a utf8 string, which makes conversion complicated. (we want to decode it simply as a raw byte-array, for passing to HS256Key::from_bytes) pub async fn get_or_create_jwt_key_hs256_str() -> Result { - // first, try to read the key from a global variable (in case this func has already been run) - if let Some(key_as_base64_str) = JWT_KEY_HS256_STR.get() { - //info!("Retrieved secret key from global-var:{:?}", key_as_base64_str); - return Ok(key_as_base64_str.to_owned()); - } - - // create a new key, and try to store it as a k8s secret - let new_secret_data_if_missing = json!({ - "key": base64::encode(HS256Key::generate().to_bytes()), - }); - let secret = get_or_create_k8s_secret("dm-jwt-secret-hs256".to_owned(), "default", Some(new_secret_data_if_missing)).await?; - let key_as_base64_str = secret.data["key"].as_str().ok_or(anyhow!("The \"key\" field is missing!"))?; - //info!("Read/created secret key through k8s api:{:?}", key_as_base64_str); + // first, try to read the key from a global variable (in case this func has already been run) + if let Some(key_as_base64_str) = JWT_KEY_HS256_STR.get() { + //info!("Retrieved secret key from global-var:{:?}", key_as_base64_str); + return Ok(key_as_base64_str.to_owned()); + } + + // create a new key, and try to store it as a k8s secret + let new_secret_data_if_missing = json!({ + "key": base64::encode(HS256Key::generate().to_bytes()), + }); + let secret = get_or_create_k8s_secret("dm-jwt-secret-hs256".to_owned(), "default", Some(new_secret_data_if_missing)).await?; + let key_as_base64_str = secret.data["key"].as_str().ok_or(anyhow!("The \"key\" field is missing!"))?; + //info!("Read/created secret key through k8s api:{:?}", key_as_base64_str); - // now that we have the key, store it in global-var for faster retrieval (use get_or_init for safe handling in case this func was called by two threads concurrently) - let result = JWT_KEY_HS256_STR.get_or_init(|| key_as_base64_str.to_owned()); - - //Ok(key_as_base64_str.to_owned()) - Ok(result.to_owned()) -} \ No newline at end of file + // now that we have the key, store it in global-var for faster retrieval (use get_or_init for safe handling in case this func was called by two threads concurrently) + let result = JWT_KEY_HS256_STR.get_or_init(|| key_as_base64_str.to_owned()); + + //Ok(key_as_base64_str.to_owned()) + Ok(result.to_owned()) +} diff --git a/Packages/rust-shared/src/utils/db/agql_ext/gql_general_extension.rs b/Packages/rust-shared/src/utils/db/agql_ext/gql_general_extension.rs index 3f49dd3dd..d56c89504 100644 --- a/Packages/rust-shared/src/utils/db/agql_ext/gql_general_extension.rs +++ b/Packages/rust-shared/src/utils/db/agql_ext/gql_general_extension.rs @@ -1,45 +1,52 @@ use std::fmt::Write; use std::sync::Arc; -use async_graphql::extensions::{ - Extension, ExtensionContext, ExtensionFactory, NextExecute, NextParseQuery, NextSubscribe, NextPrepareRequest, NextRequest, -}; -use async_graphql::parser::types::{ExecutableDocument, OperationType, Selection}; -use async_graphql::{PathSegment, Response, ServerResult, Variables, ServerError, Request}; -use async_graphql::futures_util::stream::BoxStream; -use indoc::{indoc, formatdoc}; use crate::utils::errors_::backtrace_simplifier::simplify_backtrace_str; use crate::utils::general_::extensions::{indent_all_lines, ToOwnedV}; -use tracing::{warn, info}; +use async_graphql::extensions::{Extension, ExtensionContext, ExtensionFactory, NextExecute, NextParseQuery, NextPrepareRequest, NextRequest, NextSubscribe}; +use async_graphql::futures_util::stream::BoxStream; +use async_graphql::parser::types::{ExecutableDocument, OperationType, Selection}; +use async_graphql::{PathSegment, Request, Response, ServerError, ServerResult, Variables}; +use indoc::{formatdoc, indoc}; +use tracing::{info, warn}; /// Logger extension #[cfg_attr(docsrs, doc(cfg(feature = "log")))] pub struct CustomExtensionCreator; impl CustomExtensionCreator { - pub fn new() -> Self { - Self {} - } + pub fn new() -> Self { + Self {} + } } impl ExtensionFactory for CustomExtensionCreator { - fn create(&self) -> Arc { - Arc::new(CustomExtension) - } + fn create(&self) -> Arc { + Arc::new(CustomExtension) + } } fn path_to_str(path: &[PathSegment]) -> String { - let mut path_str = String::new(); - for (idx, s) in path.iter().enumerate() { - if idx > 0 { path_str.push('.'); } - match s { - PathSegment::Index(idx) => { let _ = write!(&mut path_str, "{}", idx); } - PathSegment::Field(name) => { let _ = write!(&mut path_str, "{}", name); } - } - } - if path_str.len() == 0 { path_str.push_str("n/a"); } - path_str + let mut path_str = String::new(); + for (idx, s) in path.iter().enumerate() { + if idx > 0 { + path_str.push('.'); + } + match s { + PathSegment::Index(idx) => { + let _ = write!(&mut path_str, "{}", idx); + }, + PathSegment::Field(name) => { + let _ = write!(&mut path_str, "{}", name); + }, + } + } + if path_str.len() == 0 { + path_str.push_str("n/a"); + } + path_str } /// Helper so that that after we've printed the full error-info to the server log, we can remove the stacktrace-info from the response. (so it doesn't get sent to the client) fn strip_stacktraces_from_errors(errors: Vec) -> Vec { + #[rustfmt::skip] errors.into_iter().map(|err| { let message_before_backtrace = err.message.split("Stack backtrace:").next().map(|a| a.trim_end()).unwrap_or(&err.message); ServerError { @@ -52,62 +59,63 @@ fn strip_stacktraces_from_errors(errors: Vec) -> Vec { pub struct CustomExtension; #[async_trait::async_trait] impl Extension for CustomExtension { - /// Called at start of query/mutation request. - async fn request(&self, ctx: &ExtensionContext<'_>, next: NextRequest<'_>) -> Response { - let mut resp = next.run(ctx).await; - for err in &resp.errors { - // todo: find way to have logs for errors here include the query-string and variables as well (helpful for debugging other devs' failed query attempts, as well as catching abuse attempts) - let error_message_cleaned = simplify_backtrace_str(err.message.o(), true); - let error_message_final = indent_all_lines(&error_message_cleaned, 1); - warn!(target: "async-graphql", "[error in gql.request] path={} locations={:?} message={}", path_to_str(&err.path), err.locations, error_message_final); - } - //Response { errors: strip_stacktraces_from_errors(resp.errors), ..resp } - resp.errors = strip_stacktraces_from_errors(resp.errors); - resp - } + /// Called at start of query/mutation request. + async fn request(&self, ctx: &ExtensionContext<'_>, next: NextRequest<'_>) -> Response { + let mut resp = next.run(ctx).await; + for err in &resp.errors { + // todo: find way to have logs for errors here include the query-string and variables as well (helpful for debugging other devs' failed query attempts, as well as catching abuse attempts) + let error_message_cleaned = simplify_backtrace_str(err.message.o(), true); + let error_message_final = indent_all_lines(&error_message_cleaned, 1); + warn!(target: "async-graphql", "[error in gql.request] path={} locations={:?} message={}", path_to_str(&err.path), err.locations, error_message_final); + } + //Response { errors: strip_stacktraces_from_errors(resp.errors), ..resp } + resp.errors = strip_stacktraces_from_errors(resp.errors); + resp + } - // todo: find way to log errors in subscribe-requests here (atm, using line in SubError constructor to accomplish) - //fn subscribe<'s>(&self, ctx: &ExtensionContext<'_>, stream: BoxStream<'s, Response>, next: NextSubscribe<'_>) -> BoxStream<'s, Response> { next.run(ctx, stream) } + // todo: find way to log errors in subscribe-requests here (atm, using line in SubError constructor to accomplish) + //fn subscribe<'s>(&self, ctx: &ExtensionContext<'_>, stream: BoxStream<'s, Response>, next: NextSubscribe<'_>) -> BoxStream<'s, Response> { next.run(ctx, stream) } - async fn parse_query(&self, ctx: &ExtensionContext<'_>, query: &str, variables: &Variables, next: NextParseQuery<'_>) -> ServerResult { - let document = next.run(ctx, query, variables).await?; - // commented; errors in syntax of the graphql-query appear to not actually show up as errors here, so this is not currently known to be helpful (anyway, we want a more universal solution) - // (instead they bubble-up to the `async_graphql::parser::parse_query(query_field)?` line in proxy_to_asjs.rs) - /*let document = next.run(ctx, query, variables).await.map_err(|err| { - // Why log here in addition to in `request`? Because the `query` and `variables` fields are not currently discernible in `request`, and we want that info in the logs. - let message = formatdoc!(r#" - [error in gql.parse_query] - variables={:?} - query= - ---------- - {} - ---------- - error:{:?} - "#, &variables, &query, &err).trim_end().to_owned(); - log::warn!(target: "async-graphql", "{}", message); - - err - })?;*/ + async fn parse_query(&self, ctx: &ExtensionContext<'_>, query: &str, variables: &Variables, next: NextParseQuery<'_>) -> ServerResult { + let document = next.run(ctx, query, variables).await?; + // commented; errors in syntax of the graphql-query appear to not actually show up as errors here, so this is not currently known to be helpful (anyway, we want a more universal solution) + // (instead they bubble-up to the `async_graphql::parser::parse_query(query_field)?` line in proxy_to_asjs.rs) + /*let document = next.run(ctx, query, variables).await.map_err(|err| { + // Why log here in addition to in `request`? Because the `query` and `variables` fields are not currently discernible in `request`, and we want that info in the logs. + let message = formatdoc!(r#" + [error in gql.parse_query] + variables={:?} + query= + ---------- + {} + ---------- + error:{:?} + "#, &variables, &query, &err).trim_end().to_owned(); + log::warn!(target: "async-graphql", "{}", message); + err + })?;*/ + + #[rustfmt::skip] let is_schema = document.operations.iter() .filter(|(_, operation)| operation.node.ty == OperationType::Query) .any(|(_, operation)| operation.node.selection_set.node.items.iter().any(|selection| matches!(&selection.node, Selection::Field(field) if field.node.name.node == "__schema"))); - if !is_schema { - // this isn't really necessary, but can be helpful for debugging in some cases (not visible unless `INFO` logging for "async-graphql" target is enabled in logging.rs) - info!(target: "async-graphql", "[gql.execute] {}", ctx.stringify_execute_doc(&document, variables)); - } - Ok(document) - } + if !is_schema { + // this isn't really necessary, but can be helpful for debugging in some cases (not visible unless `INFO` logging for "async-graphql" target is enabled in logging.rs) + info!(target: "async-graphql", "[gql.execute] {}", ctx.stringify_execute_doc(&document, variables)); + } + Ok(document) + } } /*struct DropListener; impl DropListener { - fn new() -> Self { - Self {} - } + fn new() -> Self { + Self {} + } } impl Drop for DropListener { - fn drop(&mut self) { - println!("DropListener got dropped. @address:{:p}", self); - } -}*/ \ No newline at end of file + fn drop(&mut self) { + println!("DropListener got dropped. @address:{:p}", self); + } +}*/ diff --git a/Packages/rust-shared/src/utils/db/uuid.rs b/Packages/rust-shared/src/utils/db/uuid.rs index 94a1cf91a..249a8b781 100644 --- a/Packages/rust-shared/src/utils/db/uuid.rs +++ b/Packages/rust-shared/src/utils/db/uuid.rs @@ -1,41 +1,41 @@ use anyhow::Error; use async_graphql::ID; use base64::URL_SAFE_NO_PAD; -use uuid::{Uuid, Bytes}; +use uuid::{Bytes, Uuid}; pub fn uuid_to_b64(id: Uuid) -> String { - let bytes = id.as_bytes(); + let bytes = id.as_bytes(); - /*let mut str = base64::encode(bytes); - // remove the "==" at the end - str.pop(); - str.pop();*/ - let str = base64::encode_config(bytes, URL_SAFE_NO_PAD); - - return str; + /*let mut str = base64::encode(bytes); + // remove the "==" at the end + str.pop(); + str.pop();*/ + let str = base64::encode_config(bytes, URL_SAFE_NO_PAD); + + return str; } fn bytes_slice_as_16_length(bytes: &[u8]) -> Result<[u8; 16], Error> { - let bytes_len16: [u8; 16] = bytes.try_into()?; - Ok(bytes_len16) + let bytes_len16: [u8; 16] = bytes.try_into()?; + Ok(bytes_len16) } pub fn uuid_from_b64(str: String) -> Result { - /*let mut str_full = str; - // add the "==" at the end, for it to be a proper base64 string - str_full += "=="; - let bytes = base64::decode(str_full)?;*/ - let bytes = base64::decode_config(str, URL_SAFE_NO_PAD)?; + /*let mut str_full = str; + // add the "==" at the end, for it to be a proper base64 string + str_full += "=="; + let bytes = base64::decode(str_full)?;*/ + let bytes = base64::decode_config(str, URL_SAFE_NO_PAD)?; - let bytes_len16 = bytes_slice_as_16_length(&bytes)?; - return Ok(Uuid::from_bytes(bytes_len16)); + let bytes_len16 = bytes_slice_as_16_length(&bytes)?; + return Ok(Uuid::from_bytes(bytes_len16)); } // higher-level functions // ========== pub fn new_uuid_v4_as_b64() -> String { - let uuid = Uuid::new_v4(); - uuid_to_b64(uuid) + let uuid = Uuid::new_v4(); + uuid_to_b64(uuid) } pub fn new_uuid_v4_as_b64_id() -> ID { - ID(new_uuid_v4_as_b64()) -} \ No newline at end of file + ID(new_uuid_v4_as_b64()) +} diff --git a/Packages/rust-shared/src/utils/errors.rs b/Packages/rust-shared/src/utils/errors.rs index c118abcaa..071a229e0 100644 --- a/Packages/rust-shared/src/utils/errors.rs +++ b/Packages/rust-shared/src/utils/errors.rs @@ -1,39 +1,42 @@ -use std::{fmt::{self, Debug, Display, Formatter}}; use anyhow::{anyhow, Error}; +use std::fmt::{self, Debug, Display, Formatter}; use async_graphql::{async_stream, ErrorExtensions}; use futures::Stream; -use tracing::{log::{self, error}, warn}; +use tracing::{ + log::{self, error}, + warn, +}; /// Use this as a "safer alternative" to `option.unwrap()`; it returns an error (as well as immediately `error!(...)` logging it) rather than panicking. pub fn should_be_unreachable() -> anyhow::Error { - let result = anyhow!("This code-path should be unreachable! However, to be safe, we're returning an error (and logging it) rather than panicking."); - error!("{:?}", result); - result + let result = anyhow!("This code-path should be unreachable! However, to be safe, we're returning an error (and logging it) rather than panicking."); + error!("{:?}", result); + result } /*pub fn to_anyhow< - //T: std::error::Error - T: ToString + //T: std::error::Error + T: ToString >(err: T) -> anyhow::Error - where T: Into + Send + Sync + where T: Into + Send + Sync { - anyhow!(err) + anyhow!(err) } pub fn to_anyhow_with_extra< - //T: std::error::Error - T: ToString + //T: std::error::Error + T: ToString >(err: T, extra: String) -> anyhow::Error - where T: Into + Send + Sync + where T: Into + Send + Sync { - anyhow!(err.to_string() + "\n@extra:" + &extra) + anyhow!(err.to_string() + "\n@extra:" + &extra) }*/ pub fn to_anyhow(err: T) -> anyhow::Error { - anyhow!(format!("{:?}", err)) + anyhow!(format!("{:?}", err)) } pub fn to_anyhow_with_extra(err: T, extra: String) -> anyhow::Error { - anyhow!(format!("{:?}", err) + "\n@extra:" + &extra) + anyhow!(format!("{:?}", err) + "\n@extra:" + &extra) } // BasicError @@ -41,23 +44,23 @@ pub fn to_anyhow_with_extra(err: T, extra: String) -> anyhow::Error { #[derive(Debug, Clone)] pub struct BasicError { - message: String, + message: String, } impl BasicError { - pub fn new(message: String) -> Self { - Self { message } - } - pub fn boxed(message: String) -> Box { - Box::new(Self::new(message)) - } + pub fn new(message: String) -> Self { + Self { message } + } + pub fn boxed(message: String) -> Box { + Box::new(Self::new(message)) + } } impl std::error::Error for BasicError {} impl fmt::Display for BasicError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.message) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.message) + } } // SubError (special one for async-graphql subscriptions) @@ -67,22 +70,22 @@ impl fmt::Display for BasicError { // Clone is needed for it to be used under async-graphql's `#[Subscription]` macro #[derive(Debug, Clone)] pub struct SubError { - message: String, + message: String, } impl SubError { - pub fn new(message: String) -> Self { - // When a SubError is constructed, immediately log it to the console, since async-graphql does not print it to the server's log itself (it only sends it to the client). - // (ideally, we would do this in gql_general_extension.subscribe [cleaner, and more universal, eg. also catches syntax errors], but I haven't figured out how yet) - log::warn!(target: "async-graphql", "[error in SubError/gql.subscribe] {:?}", anyhow!(message.clone())); // wrap in anyhow!, to get stacktrace + pub fn new(message: String) -> Self { + // When a SubError is constructed, immediately log it to the console, since async-graphql does not print it to the server's log itself (it only sends it to the client). + // (ideally, we would do this in gql_general_extension.subscribe [cleaner, and more universal, eg. also catches syntax errors], but I haven't figured out how yet) + log::warn!(target: "async-graphql", "[error in SubError/gql.subscribe] {:?}", anyhow!(message.clone())); // wrap in anyhow!, to get stacktrace - Self { message } - } + Self { message } + } } impl std::error::Error for SubError {} impl fmt::Display for SubError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "SubError:{}", self.message) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "SubError:{}", self.message) + } } /// Use like this: @@ -90,8 +93,8 @@ impl fmt::Display for SubError { /// some_func().map_err(to_sub_err)?; /// ``` /*pub fn to_sub_err(base_err: anyhow::Error) -> SubError { - //SubError::new(base_err.to_string()) // this only provides the first line (in some cases anyway) - SubError::new(format!("{:?}", base_err)) + //SubError::new(base_err.to_string()) // this only provides the first line (in some cases anyway) + SubError::new(format!("{:?}", base_err)) }*/ /// Use like this: @@ -99,18 +102,18 @@ impl fmt::Display for SubError { /// some_func().map_err(to_sub_err)?; /// ``` pub fn to_sub_err(base_err: T) -> SubError { - //SubError::new(base_err.to_string()) // this only provides the first line (in some cases anyway) - SubError::new(format!("{:?}", base_err)) + //SubError::new(base_err.to_string()) // this only provides the first line (in some cases anyway) + SubError::new(format!("{:?}", base_err)) } // commented for now; current use-cases are better just using with_context() earlier in-chain /*pub fn to_sub_err_with_extra(base_err: T, extra: String) -> SubError { - SubError::new(format!("{:?}", base_err) + "\n@extra:" + &extra) + SubError::new(format!("{:?}", base_err) + "\n@extra:" + &extra) }*/ pub fn to_sub_err_in_stream(base_err: T) -> impl Stream> { - async_stream::stream! { - yield Err(to_sub_err(base_err)) - } + async_stream::stream! { + yield Err(to_sub_err(base_err)) + } } // graphql-error @@ -123,9 +126,9 @@ pub struct GQLError { full_error_info_string: String, } /*impl GQLError { - pub fn new(full_error_info_string: String) -> Self { - Self { full_error_info_string } - } + pub fn new(full_error_info_string: String) -> Self { + Self { full_error_info_string } + } }*/ impl Display for GQLError { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { @@ -133,23 +136,24 @@ impl Display for GQLError { } } /*impl Debug for GQLError { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.full_error_info_string) - } + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.full_error_info_string) + } }*/ -impl From for GQLError where E: Into + Send + Sync + 'static { +impl From for GQLError +where + E: Into + Send + Sync + 'static, +{ fn from(error: E) -> Self { let as_anyhow_error_with_backtrace: anyhow::Error = error.into(); - /*for cause in as_anyhow_error_with_backtrace.chain() { - warn!("New GQLError cause: {:?}", cause); - /*if let Some(ref app_err) = cause.downcast_ref::() { // cast to AppError - error!("New GQLError cause2:{:?}", app_err); - }*/ - } - warn!("New GQLError: {:?}", as_anyhow_error_with_backtrace);*/ - GQLError { - full_error_info_string: format!("{:?}", as_anyhow_error_with_backtrace), + /*for cause in as_anyhow_error_with_backtrace.chain() { + warn!("New GQLError cause: {:?}", cause); + /*if let Some(ref app_err) = cause.downcast_ref::() { // cast to AppError + error!("New GQLError cause2:{:?}", app_err); + }*/ } + warn!("New GQLError: {:?}", as_anyhow_error_with_backtrace);*/ + GQLError { full_error_info_string: format!("{:?}", as_anyhow_error_with_backtrace) } } } @@ -158,9 +162,7 @@ impl From for GQLError where E: Into + Send + Sync + 'stati /// some_func().map_err(to_gql_error)?; /// ``` pub fn to_gql_err(base_err: T) -> GQLError { - //GQLError::new(base_err.to_string()) // this only provides the first line (in some cases anyway) - //GQLError::new(format!("{:?}", base_err)) - GQLError { - full_error_info_string: format!("{:?}", base_err), - } -} \ No newline at end of file + //GQLError::new(base_err.to_string()) // this only provides the first line (in some cases anyway) + //GQLError::new(format!("{:?}", base_err)) + GQLError { full_error_info_string: format!("{:?}", base_err) } +} diff --git a/Packages/rust-shared/src/utils/errors_/backtrace_simplifier.rs b/Packages/rust-shared/src/utils/errors_/backtrace_simplifier.rs index b7743cd1e..c4227683e 100644 --- a/Packages/rust-shared/src/utils/errors_/backtrace_simplifier.rs +++ b/Packages/rust-shared/src/utils/errors_/backtrace_simplifier.rs @@ -1,4 +1,4 @@ -use crate::anyhow::{Error, ensure}; +use crate::anyhow::{ensure, Error}; use crate::itertools::Itertools; use crate::utils::general_::extensions::ToOwnedV; use once_cell::sync::Lazy; @@ -6,70 +6,74 @@ use regex::Regex; #[derive(Clone)] enum StackTraceLine { - // source types - FuncName(String), - CodePath(String), - Other(String), - // types that only exist after simplification - CodePathPlusFuncName(String), + // source types + FuncName(String), + CodePath(String), + Other(String), + // types that only exist after simplification + CodePathPlusFuncName(String), } impl StackTraceLine { - pub fn get_str(&self) -> &str { - match self { - Self::FuncName(str) => str, - Self::CodePath(str) => str, - Self::Other(str) => str, - Self::CodePathPlusFuncName(str) => str, - } - } + pub fn get_str(&self) -> &str { + match self { + Self::FuncName(str) => str, + Self::CodePath(str) => str, + Self::Other(str) => str, + Self::CodePathPlusFuncName(str) => str, + } + } } // this error-handling layer is probably not necessary (the unwraps should all be safe); but using it for now pub fn simplify_backtrace_str(source: String, remove_spammy_trace_lines: bool) -> String { - try_simplify_backtrace_str(&source, remove_spammy_trace_lines).unwrap_or( - format!("[tried but failed to simplify backtrace; stacktrace left unchanged below]\n{}", source) - ) + try_simplify_backtrace_str(&source, remove_spammy_trace_lines).unwrap_or(format!("[tried but failed to simplify backtrace; stacktrace left unchanged below]\n{}", source)) } #[allow(non_upper_case_globals)] pub fn try_simplify_backtrace_str(source: &str, remove_spammy_trace_lines: bool) -> Result { - let lines_raw = source.split("\n"); + let lines_raw = source.split("\n"); - // regexes for general categorization - static regex__func_name: Lazy = Lazy::new(|| Regex::new(r"^ +(\d+):").unwrap()); - static regex__code_path: Lazy = Lazy::new(|| Regex::new(r"^ +at ").unwrap()); - static regex__code_path_for_own_code: Lazy = Lazy::new(|| Regex::new(r"^ +at \./").unwrap()); - // regexes for *simplifying* stack-entry lines - static regex__github_path: Lazy = Lazy::new(|| Regex::new("/usr/local/cargo/registry/src/github.com-([0-9a-f]+)/").unwrap()); - static regex__rustc_path: Lazy = Lazy::new(|| Regex::new("/rustc/([0-9a-f]+)/").unwrap()); - // regexes for *omitting* stack-entry lines - static regex__tokio: Lazy = Lazy::new(|| Regex::new(r"\/tokio-\d").unwrap()); + // regexes for general categorization + static regex__func_name: Lazy = Lazy::new(|| Regex::new(r"^ +(\d+):").unwrap()); + static regex__code_path: Lazy = Lazy::new(|| Regex::new(r"^ +at ").unwrap()); + static regex__code_path_for_own_code: Lazy = Lazy::new(|| Regex::new(r"^ +at \./").unwrap()); + // regexes for *simplifying* stack-entry lines + static regex__github_path: Lazy = Lazy::new(|| Regex::new("/usr/local/cargo/registry/src/github.com-([0-9a-f]+)/").unwrap()); + static regex__rustc_path: Lazy = Lazy::new(|| Regex::new("/rustc/([0-9a-f]+)/").unwrap()); + // regexes for *omitting* stack-entry lines + static regex__tokio: Lazy = Lazy::new(|| Regex::new(r"\/tokio-\d").unwrap()); - let lines = lines_raw.map(|line| { - if regex__func_name.is_match(line) { - let simplified = line.trim_start().to_owned(); - StackTraceLine::FuncName(simplified) - } else if regex__code_path.is_match(line) { - let mut simplified = line.trim_start().to_owned(); - simplified = regex__github_path.replace(&simplified, "[GH]/").into_owned(); - simplified = regex__rustc_path.replace(&simplified, "[RUSTC]/").into_owned(); - // for code-path lines that are for non-local-project files, indent it a bit (making visual distinction easier) - if !regex__code_path_for_own_code.is_match(line) { - simplified.insert_str(0, " "); - } - StackTraceLine::CodePath(simplified) - } else { - StackTraceLine::Other(line.to_owned()) - } - }).collect_vec(); - let indent_for_column_2: usize = lines.iter().filter_map(|line| { - match line { - StackTraceLine::CodePath(str) => Some(str.len()), - _ => None, - } - }).max().unwrap_or(0) + 3; // add a 3-space buffer, for longest-path-line + let lines = lines_raw + .map(|line| { + if regex__func_name.is_match(line) { + let simplified = line.trim_start().to_owned(); + StackTraceLine::FuncName(simplified) + } else if regex__code_path.is_match(line) { + let mut simplified = line.trim_start().to_owned(); + simplified = regex__github_path.replace(&simplified, "[GH]/").into_owned(); + simplified = regex__rustc_path.replace(&simplified, "[RUSTC]/").into_owned(); + // for code-path lines that are for non-local-project files, indent it a bit (making visual distinction easier) + if !regex__code_path_for_own_code.is_match(line) { + simplified.insert_str(0, " "); + } + StackTraceLine::CodePath(simplified) + } else { + StackTraceLine::Other(line.to_owned()) + } + }) + .collect_vec(); + let indent_for_column_2: usize = lines + .iter() + .filter_map(|line| match line { + StackTraceLine::CodePath(str) => Some(str.len()), + _ => None, + }) + .max() + .unwrap_or(0) + + 3; // add a 3-space buffer, for longest-path-line - let old_lines = lines.clone(); + let old_lines = lines.clone(); + #[rustfmt::skip] let new_lines = lines.into_iter().enumerate().filter_map(|(i, line)| { match line { StackTraceLine::FuncName(_) => { @@ -87,39 +91,38 @@ pub fn try_simplify_backtrace_str(source: &str, remove_spammy_trace_lines: bool) let path_line_str = old_lines.get(i)?.get_str(); let func_line_str = match &old_lines.get(i - 1) { Some(StackTraceLine::FuncName(str)) => Some(str), _ => None }; - if remove_spammy_trace_lines { - let last_path_line_str = match &old_lines.get(i - 2) { Some(StackTraceLine::CodePath(str)) => Some(str), _ => None }; + if remove_spammy_trace_lines { + let last_path_line_str = match &old_lines.get(i - 2) { + Some(StackTraceLine::CodePath(str)) => Some(str), + _ => None, + }; - // if this stack-entry's path was to a "spammy" tokio-related stack-line, and true for last stack-entry as well, replace with - if regex__tokio.is_match(path_line_str) { - match regex__tokio.is_match(last_path_line_str.unwrap_or(&"".o())) { - true => return None, - false => { - //let new_path_line_str = " ===== [spammy segment of stack-trace; removed by backtrace_simplifier.rs; stack-entry type: tokio] ====="; - let new_path_line_str = " [spammy segment of stack-trace; removed by backtrace_simplifier.rs; stack-entry type: tokio]"; - let spaces_to_reach_c2_indent = indent_for_column_2 - new_path_line_str.len(); - let new_line = StackTraceLine::CodePathPlusFuncName( - new_path_line_str.to_owned() - + &" ".repeat(spaces_to_reach_c2_indent) - + "===== " + &func_line_str.unwrap_or(&"".o()) + " =====" - ); - return Some((i, new_line)); - }, - } - } - } + // if this stack-entry's path was to a "spammy" tokio-related stack-line, and true for last stack-entry as well, replace with + if regex__tokio.is_match(path_line_str) { + match regex__tokio.is_match(last_path_line_str.unwrap_or(&"".o())) { + true => return None, + false => { + //let new_path_line_str = " ===== [spammy segment of stack-trace; removed by backtrace_simplifier.rs; stack-entry type: tokio] ====="; + let new_path_line_str = " [spammy segment of stack-trace; removed by backtrace_simplifier.rs; stack-entry type: tokio]"; + let spaces_to_reach_c2_indent = indent_for_column_2 - new_path_line_str.len(); + let new_line = StackTraceLine::CodePathPlusFuncName(new_path_line_str.to_owned() + &" ".repeat(spaces_to_reach_c2_indent) + "===== " + &func_line_str.unwrap_or(&"".o()) + " ====="); + return Some((i, new_line)); + }, + } + } + } - if let Some(func_line_str) = func_line_str { - let spaces_to_reach_c2_indent = indent_for_column_2 - path_line_str.len(); - let new_line = StackTraceLine::CodePathPlusFuncName(path_line_str.to_owned() + &" ".repeat(spaces_to_reach_c2_indent) + &func_line_str); - return Some((i, new_line)); - } - Some((i, line)) - }, - _ => Some((i, line)), - } - }).map(|a| a.1).collect_vec(); + if let Some(func_line_str) = func_line_str { + let spaces_to_reach_c2_indent = indent_for_column_2 - path_line_str.len(); + let new_line = StackTraceLine::CodePathPlusFuncName(path_line_str.to_owned() + &" ".repeat(spaces_to_reach_c2_indent) + &func_line_str); + return Some((i, new_line)); + } + Some((i, line)) + }, + _ => Some((i, line)), + } + }).map(|a| a.1).collect_vec(); - let result = new_lines.iter().map(|a| a.get_str()).collect_vec().join("\n"); - Ok(result) -} \ No newline at end of file + let result = new_lines.iter().map(|a| a.get_str()).collect_vec().join("\n"); + Ok(result) +} diff --git a/Packages/rust-shared/src/utils/futures.rs b/Packages/rust-shared/src/utils/futures.rs index 8764e0b40..0ea3ff2c3 100644 --- a/Packages/rust-shared/src/utils/futures.rs +++ b/Packages/rust-shared/src/utils/futures.rs @@ -1,3 +1,4 @@ +use futures::lock::Mutex; use std::future::Future; use std::rc::Rc; use std::sync::Arc; @@ -5,53 +6,52 @@ use std::task::{Context, Poll, Wake}; use std::thread::{self, Thread}; use std::time::Duration; use tokio; -use futures::lock::Mutex; // attempt 1 // ========== /*lazy_static::lazy_static! { - //static ref GLOBAL_TICK_RECEIVER: ABReceiver = create_global_tick_receiver(); - static ref GLOBAL_TICK_RECEIVER: FReceiver = create_global_tick_receiver(); + //static ref GLOBAL_TICK_RECEIVER: ABReceiver = create_global_tick_receiver(); + static ref GLOBAL_TICK_RECEIVER: FReceiver = create_global_tick_receiver(); } //fn create_global_tick_receiver() -> ABReceiver { fn create_global_tick_receiver() -> FReceiver { - //let (msg_sender, msg_receiver): (ABSender, ABReceiver) = async_broadcast::broadcast(10000); - let (msg_sender, msg_receiver): (FSender, FReceiver) = flume::unbounded(); + //let (msg_sender, msg_receiver): (ABSender, ABReceiver) = async_broadcast::broadcast(10000); + let (msg_sender, msg_receiver): (FSender, FReceiver) = flume::unbounded(); - let sender_clone = msg_sender.clone(); - let receiver_clone = msg_receiver.clone(); - tokio::spawn(async move { - let mut interval = tokio::time::interval(Duration::from_secs_f64(0.1)); - loop { - // first, do a standard tick-wait - interval.tick().await; + let sender_clone = msg_sender.clone(); + let receiver_clone = msg_receiver.clone(); + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs_f64(0.1)); + loop { + // first, do a standard tick-wait + interval.tick().await; - // then, if the buffer is >100 entries, wait until the buffer-size goes down - /*while sender_clone.len() > 100 { - interval.tick().await; - }*/ + // then, if the buffer is >100 entries, wait until the buffer-size goes down + /*while sender_clone.len() > 100 { + interval.tick().await; + }*/ - // if the buffer is >100 entries, consume them ourself into it gets back under 100 - while sender_clone.len() > 100 { - receiver_clone.recv_async().await.unwrap(); - } + // if the buffer is >100 entries, consume them ourself into it gets back under 100 + while sender_clone.len() > 100 { + receiver_clone.recv_async().await.unwrap(); + } - //sender_clone.broadcast(time_since_epoch_ms()).await.unwrap(); - sender_clone.send(time_since_epoch_ms()).unwrap(); - println!("Ticked! @len:{}", sender_clone.len()); - } - }); + //sender_clone.broadcast(time_since_epoch_ms()).await.unwrap(); + sender_clone.send(time_since_epoch_ms()).unwrap(); + println!("Ticked! @len:{}", sender_clone.len()); + } + }); - msg_receiver + msg_receiver } pub async fn global_tick_helper() { - /*let temp = GLOBAL_TICK_RECEIVER; - temp.recv().unwrap();*/ + /*let temp = GLOBAL_TICK_RECEIVER; + temp.recv().unwrap();*/ - GLOBAL_TICK_RECEIVER.recv_async().await.unwrap(); + GLOBAL_TICK_RECEIVER.recv_async().await.unwrap(); - //tokio::time::sleep(Duration::from_secs(0)).await + //tokio::time::sleep(Duration::from_secs(0)).await }*/ // attempt 2 @@ -59,33 +59,33 @@ pub async fn global_tick_helper() { /// An ugly but necessary workaround to resolve the issue of some futures not being polled reliably. (eg. SubscriptionShard_General::logEntries) pub async fn make_reliable(fut: impl Future, poll_frequency: Duration) -> T { - // Pin the future so it can be polled. - let mut fut = Box::pin(fut); + // Pin the future so it can be polled. + let mut fut = Box::pin(fut); - // Create a new context to be passed to the future. - let waker = Arc::new(EmptyWaker).into(); + // Create a new context to be passed to the future. + let waker = Arc::new(EmptyWaker).into(); - // maybe temp; have a new Context created at each loop, since (after update) error occurs at await-point otherwise (about some `*mut ()` being non-Send) - //let mut cx = Context::from_waker(&waker); + // maybe temp; have a new Context created at each loop, since (after update) error occurs at await-point otherwise (about some `*mut ()` being non-Send) + //let mut cx = Context::from_waker(&waker); - let mut interval = tokio::time::interval(poll_frequency); - loop { - // The next two lines are where the magic happens: every X interval, poll the future... - // ...even if the future-context "above" this function/future is not getting polled reliably - interval.tick().await; - let mut cx = Context::from_waker(&waker); - match fut.as_mut().poll(&mut cx) { - Poll::Pending => {}, - Poll::Ready(res) => { - return res; - }, - } - } + let mut interval = tokio::time::interval(poll_frequency); + loop { + // The next two lines are where the magic happens: every X interval, poll the future... + // ...even if the future-context "above" this function/future is not getting polled reliably + interval.tick().await; + let mut cx = Context::from_waker(&waker); + match fut.as_mut().poll(&mut cx) { + Poll::Pending => {}, + Poll::Ready(res) => { + return res; + }, + } + } } struct EmptyWaker; impl Wake for EmptyWaker { - fn wake(self: Arc) { - // do nothing; we merely need a context-object to pass to poll() - } -} \ No newline at end of file + fn wake(self: Arc) { + // do nothing; we merely need a context-object to pass to poll() + } +} diff --git a/Packages/rust-shared/src/utils/general.rs b/Packages/rust-shared/src/utils/general.rs index 7a5cc58b3..b4e1d2c07 100644 --- a/Packages/rust-shared/src/utils/general.rs +++ b/Packages/rust-shared/src/utils/general.rs @@ -1,64 +1,62 @@ -use std::{env, collections::HashMap}; use anyhow::{anyhow, Error}; +use std::{collections::HashMap, env}; -use async_graphql::{EnumType, resolver_utils::enum_value}; +use async_graphql::{resolver_utils::enum_value, EnumType}; use axum::http::Uri; pub enum K8sEnv { - Dev, - Prod, + Dev, + Prod, } impl std::fmt::Debug for K8sEnv { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::Dev => write!(f, "dev"), - Self::Prod => write!(f, "prod"), - } - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Dev => write!(f, "dev"), + Self::Prod => write!(f, "prod"), + } + } } pub fn k8s_env() -> K8sEnv { - match env::var("ENVIRONMENT").expect("An environment-variable named `ENVIRONMENT` must be provided, with value `dev` or `prod`.").as_str() { - "dev" => K8sEnv::Dev, - "prod" => K8sEnv::Prod, - _ => panic!("The environment-variable named `ENVIRONMENT` must be either `dev` or `prod`."), - } + match env::var("ENVIRONMENT").expect("An environment-variable named `ENVIRONMENT` must be provided, with value `dev` or `prod`.").as_str() { + "dev" => K8sEnv::Dev, + "prod" => K8sEnv::Prod, + _ => panic!("The environment-variable named `ENVIRONMENT` must be either `dev` or `prod`."), + } } pub fn k8s_dev() -> bool { - match k8s_env() { - K8sEnv::Dev => true, - _ => false, - } + match k8s_env() { + K8sEnv::Dev => true, + _ => false, + } } pub fn k8s_prod() -> bool { - match k8s_env() { - K8sEnv::Prod => true, - _ => false, - } + match k8s_env() { + K8sEnv::Prod => true, + _ => false, + } } pub fn get_uri_params(uri: &Uri) -> HashMap { - let params: HashMap = uri.query() - .map(|v| url::form_urlencoded::parse(v.as_bytes()).into_owned().collect()) - .unwrap_or_else(HashMap::new); - params + let params: HashMap = uri.query().map(|v| url::form_urlencoded::parse(v.as_bytes()).into_owned().collect()).unwrap_or_else(HashMap::new); + params } pub fn as_debug_str(obj: &impl std::fmt::Debug) -> String { - format!("{:?}", obj) + format!("{:?}", obj) } pub fn as_json_str(obj: &T) -> Result { - let as_json_value = serde_json::to_value(obj)?; - let as_str = as_json_value.as_str().ok_or(anyhow!("The object did not serialize to a json string!"))?; - Ok(as_str.to_owned()) + let as_json_value = serde_json::to_value(obj)?; + let as_str = as_json_value.as_str().ok_or(anyhow!("The object did not serialize to a json string!"))?; + Ok(as_str.to_owned()) } // project-specific; basically all our enums have derive(Serialize), so use that for serialization pub fn enum_to_string(obj: &T) -> String { - as_json_str(obj).unwrap() + as_json_str(obj).unwrap() } /*pub fn enum_to_string(obj: T) -> String { - enum_value(obj).to_string() + enum_value(obj).to_string() }*/ /*pub fn x_is_one_of(x: &T, list: &[&T]) -> Result<(), Error> { @@ -71,34 +69,34 @@ pub fn enum_to_string(obj: &T) -> String { }*/ pub fn average(numbers: &[f64]) -> f64 { - numbers.iter().sum::() as f64 / numbers.len() as f64 + numbers.iter().sum::() as f64 / numbers.len() as f64 } pub fn f64_to_str_rounded(val: f64, fraction_digits: usize) -> String { - // see: https://stackoverflow.com/a/61101531 - format!("{:.1$}", val, fraction_digits) + // see: https://stackoverflow.com/a/61101531 + format!("{:.1$}", val, fraction_digits) } pub fn f64_to_percent_str(f: f64, fraction_digits: usize) -> String { - let val_as_percent = f * 100.0; - format!("{}%", f64_to_str_rounded(val_as_percent, fraction_digits)) + let val_as_percent = f * 100.0; + format!("{}%", f64_to_str_rounded(val_as_percent, fraction_digits)) } /*macro_rules! default( - // Create a new T where T is known. - // let x = default!(Foo, x:1); - ($T:ident, $($k:ident: $v:expr), *) => ( - $T { $($k: $v), *, ..::std::default::Default::default() } - ); + // Create a new T where T is known. + // let x = default!(Foo, x:1); + ($T:ident, $($k:ident: $v:expr), *) => ( + $T { $($k: $v), *, ..::std::default::Default::default() } + ); - // Create a new T where T is known, but with defaults. - // let x = default!(Foo); - ($T:ident) => ( - $T { ..::std::default::Default::default() } - ); + // Create a new T where T is known, but with defaults. + // let x = default!(Foo); + ($T:ident) => ( + $T { ..::std::default::Default::default() } + ); - // Create a new T where T is not known. - // let x: T = default!(); - () => ( - ::std::default::Default::default(); - ); -);*/ \ No newline at end of file + // Create a new T where T is not known. + // let x: T = default!(); + () => ( + ::std::default::Default::default(); + ); +);*/ diff --git a/Packages/rust-shared/src/utils/general_/extensions.rs b/Packages/rust-shared/src/utils/general_/extensions.rs index 35814a43d..25408024e 100644 --- a/Packages/rust-shared/src/utils/general_/extensions.rs +++ b/Packages/rust-shared/src/utils/general_/extensions.rs @@ -1,4 +1,8 @@ -use std::{fmt, num::TryFromIntError, ops::{Residual, Try}}; +use std::{ + fmt, + num::TryFromIntError, + ops::{Residual, Try}, +}; use crate::utils::errors_::backtrace_simplifier::simplify_backtrace_str; @@ -6,112 +10,113 @@ pub type ChangeOutputType = <::Residual as Residual>::TryType // commented in favor of `.o()` (seen below), as that communicates the "narrowness of the intended application" better (ie. for &str->String conversions, not as a general-purpose "to_string" method) /*pub trait ToStringV : ToString { - /// Simply an alias for `.to_string()`. - fn s(&self) -> String { - self.to_string() - } + /// Simply an alias for `.to_string()`. + fn s(&self) -> String { + self.to_string() + } } impl ToStringV for T where T: ToString {}*/ -pub trait ToOwnedV where Self : ToOwned { - /// Simply an alias for `.to_owned()`. - fn o(&self) -> ::Owned { - self.to_owned() - } +pub trait ToOwnedV +where + Self: ToOwned, +{ + /// Simply an alias for `.to_owned()`. + fn o(&self) -> ::Owned { + self.to_owned() + } } impl ToOwnedV for T where T: ToOwned {} -pub trait IteratorV : Iterator { - /// Alias for `core::iter::Iterator::try_collect` (needed for when import of itertools "shadows" that core implementation, which I prefer) - fn try_collect2(&mut self) -> ChangeOutputType - where - Self: Sized, - Self::Item: Try, - ::Residual: Residual, - B: FromIterator<::Output>, - { - core::iter::Iterator::try_collect::(self) - } +pub trait IteratorV: Iterator { + /// Alias for `core::iter::Iterator::try_collect` (needed for when import of itertools "shadows" that core implementation, which I prefer) + fn try_collect2(&mut self) -> ChangeOutputType + where + Self: Sized, + Self::Item: Try, + ::Residual: Residual, + B: FromIterator<::Output>, + { + core::iter::Iterator::try_collect::(self) + } - // figure out the type-definition for this someday, lol - /*fn try_collect_vec(&mut self) -> ChangeOutputType - where - Self: Sized, - Self::Item: Try, - ::Residual: Residual, - B: FromIterator<::Output>, - { - core::iter::Iterator::try_collect::>(self) - }*/ + // figure out the type-definition for this someday, lol + /*fn try_collect_vec(&mut self) -> ChangeOutputType + where + Self: Sized, + Self::Item: Try, + ::Residual: Residual, + B: FromIterator<::Output>, + { + core::iter::Iterator::try_collect::>(self) + }*/ } impl IteratorV for T where T: Iterator {} // this doesn't work, since Result is an enum, not a trait /*pub trait ResultV : Result { - fn expect_lazy String>(self, msg_getter: F) -> T - where - E: fmt::Debug - { - match self { - Ok(t) => t, - Err(err) => { - let e_str = format!("{err:?}"); - let msg = msg_getter(err); - panic!("{}: {}", msg, e_str); - }, - } - } + fn expect_lazy String>(self, msg_getter: F) -> T + where + E: fmt::Debug + { + match self { + Ok(t) => t, + Err(err) => { + let e_str = format!("{err:?}"); + let msg = msg_getter(err); + panic!("{}: {}", msg, e_str); + }, + } + } } impl ResultV for Result { }*/ pub trait ResultV { - fn expect_lazy String>(self, msg_getter: F) -> T - where - E: fmt::Debug; + fn expect_lazy String>(self, msg_getter: F) -> T + where + E: fmt::Debug; } impl ResultV for Result { - fn expect_lazy String>(self, msg_getter: F) -> T - where - E: fmt::Debug - { - match self { - Ok(t) => t, - Err(err) => { - let err_str = format!("{err:?}"); - let err_str_simplified = simplify_backtrace_str(err_str, true); - let msg = msg_getter(err); - panic!("{}\n\t@base_error:{}", msg, indent_all_lines(&err_str_simplified, 1)); - }, - } - } + fn expect_lazy String>(self, msg_getter: F) -> T + where + E: fmt::Debug, + { + match self { + Ok(t) => t, + Err(err) => { + let err_str = format!("{err:?}"); + let err_str_simplified = simplify_backtrace_str(err_str, true); + let msg = msg_getter(err); + panic!("{}\n\t@base_error:{}", msg, indent_all_lines(&err_str_simplified, 1)); + }, + } + } } pub fn indent_all_lines(from_str: &str, indent_amount: usize) -> String { - let lines = from_str.split("\n"); - let lines_indented: Vec = lines.map(|line| { - "\t".repeat(indent_amount) + line - }).collect(); - lines_indented.join("\n") + let lines = from_str.split("\n"); + let lines_indented: Vec = lines.map(|line| "\t".repeat(indent_amount) + line).collect(); + lines_indented.join("\n") } // trait that lets one do myVec.len_u32() to get a u32 (with panic if the length is too big) pub trait VecLenU32 { - fn len_u32(&self) -> u32; - fn len_u64(&self) -> u64; - fn try_len_u32(&self) -> Result; + fn len_u32(&self) -> u32; + fn len_u64(&self) -> u64; + fn try_len_u32(&self) -> Result; } impl VecLenU32 for Vec { - fn len_u32(&self) -> u32 { - self.len().try_into().expect("Vec length is too big to convert to u32") - } - // atm this is safe, since usize cannot be larger than u64 (rust doesn't support 128bit+ architectures atm) - fn len_u64(&self) -> u64 { - let len = self.len(); - len as u64 - } - - fn try_len_u32(&self) -> Result { - //self.len().try_into().map_err(anyhow!("Vec length is too big to convert to u32")) - self.len().try_into() - } -} \ No newline at end of file + fn len_u32(&self) -> u32 { + self.len().try_into().expect("Vec length is too big to convert to u32") + } + // atm this is safe, since usize cannot be larger than u64 (rust doesn't support 128bit+ architectures atm) + fn len_u64(&self) -> u64 { + let len = self.len(); + len as u64 + } + + fn try_len_u32(&self) -> Result { + //self.len().try_into().map_err(anyhow!("Vec length is too big to convert to u32")) + self.len().try_into() + } +} diff --git a/Packages/rust-shared/src/utils/general_/func_types.rs b/Packages/rust-shared/src/utils/general_/func_types.rs index d3e466078..82e40b424 100644 --- a/Packages/rust-shared/src/utils/general_/func_types.rs +++ b/Packages/rust-shared/src/utils/general_/func_types.rs @@ -1,33 +1,45 @@ use futures::Future; pub trait AsyncFn_Args0: Fn() -> Self::Future { - type Future: Future; + type Future: Future; } impl AsyncFn_Args0 for F -where F: Fn() -> Fut, Fut: Future { - type Future = Fut; +where + F: Fn() -> Fut, + Fut: Future, +{ + type Future = Fut; } pub trait AsyncFn_Args1: Fn(Arg1) -> Self::Future { - type Future: Future; + type Future: Future; } impl AsyncFn_Args1 for F -where F: Fn(Arg1) -> Fut, Fut: Future { - type Future = Fut; +where + F: Fn(Arg1) -> Fut, + Fut: Future, +{ + type Future = Fut; } pub trait AsyncFn_Args2: Fn(Arg1, Arg2) -> Self::Future { - type Future: Future; + type Future: Future; } impl AsyncFn_Args2 for F -where F: Fn(Arg1, Arg2) -> Fut, Fut: Future { - type Future = Fut; +where + F: Fn(Arg1, Arg2) -> Fut, + Fut: Future, +{ + type Future = Fut; } pub trait AsyncFn_Args3: Fn(Arg1, Arg2, Arg3) -> Self::Future { - type Future: Future; + type Future: Future; } impl AsyncFn_Args3 for F -where F: Fn(Arg1, Arg2, Arg3) -> Fut, Fut: Future { - type Future = Fut; -} \ No newline at end of file +where + F: Fn(Arg1, Arg2, Arg3) -> Fut, + Fut: Future, +{ + type Future = Fut; +} diff --git a/Packages/rust-shared/src/utils/general_/serde.rs b/Packages/rust-shared/src/utils/general_/serde.rs index 20c0bc620..008146aeb 100644 --- a/Packages/rust-shared/src/utils/general_/serde.rs +++ b/Packages/rust-shared/src/utils/general_/serde.rs @@ -1,34 +1,38 @@ -use std::{collections::{HashMap, BTreeMap}, env}; use anyhow::{anyhow, Error}; use serde::Serialize; -use serde_json::{value::Index, Value, Map}; +use serde_json::{value::Index, Map, Value}; use std::fmt::Debug; +use std::{ + collections::{BTreeMap, HashMap}, + env, +}; use crate::utils::type_aliases::JSONValue; pub fn to_json_value_for_borrowed_obj(value: &impl Serialize) -> Result { - let as_str = serde_json::to_string(value)?; - let as_json_value: JSONValue = serde_json::from_str(&as_str)?; - Ok(as_json_value) + let as_str = serde_json::to_string(value)?; + let as_json_value: JSONValue = serde_json::from_str(&as_str)?; + Ok(as_json_value) } // json-value extensions // ========== pub trait JSONValueV { - fn try_get(&self, index: I) -> Result<&Value, Error>; + fn try_get(&self, index: I) -> Result<&Value, Error>; fn try_as_bool(&self) -> Result; fn try_as_u64(&self) -> Result; fn try_as_i64(&self) -> Result; fn try_as_f64(&self) -> Result; fn try_as_str(&self) -> Result<&str, Error>; - fn try_as_array(&self) -> Result<&Vec, Error>; + fn try_as_array(&self) -> Result<&Vec, Error>; fn try_as_object(&self) -> Result<&Map, Error>; - // extras + // extras fn as_string(&self) -> Option; fn try_as_string(&self) -> Result; } +#[rustfmt::skip] impl JSONValueV for serde_json::Value { fn try_get(&self, index: I) -> Result<&Value, Error> { let index_str = format!("{:?}", index); @@ -52,20 +56,17 @@ impl JSONValueV for serde_json::Value { /// approach 1 for serializing HashMap with consistently-ordered (alphabetically) keys (from: https://stackoverflow.com/a/42723390) pub fn ordered_map(value: &HashMap, serializer: S) -> Result { - let ordered: BTreeMap<_, _> = value.iter().collect(); - ordered.serialize(serializer) + let ordered: BTreeMap<_, _> = value.iter().collect(); + ordered.serialize(serializer) } /// approach 2 for serializing HashMap (and such) with consistently-ordered (alphabetically) keys (from: https://stackoverflow.com/a/42723390) #[derive(Serialize)] //#[serde(crate = "rust_shared::serde")] -pub struct SortAlphabetically( - #[serde(serialize_with = "sort_alphabetically")] - T -); +pub struct SortAlphabetically(#[serde(serialize_with = "sort_alphabetically")] T); pub fn sort_alphabetically(value: &T, serializer: S) -> Result { - let value = serde_json::to_value(value).map_err(serde::ser::Error::custom)?; - value.serialize(serializer) + let value = serde_json::to_value(value).map_err(serde::ser::Error::custom)?; + value.serialize(serializer) } // approach 3 for serializing IndexMap, with insertion-order preserved -// [see IndexMapAGQL in gql_utils.rs] \ No newline at end of file +// [see IndexMapAGQL in gql_utils.rs] diff --git a/Packages/rust-shared/src/utils/k8s/cert_handling.rs b/Packages/rust-shared/src/utils/k8s/cert_handling.rs index 1a7485181..71a761ee9 100644 --- a/Packages/rust-shared/src/utils/k8s/cert_handling.rs +++ b/Packages/rust-shared/src/utils/k8s/cert_handling.rs @@ -1,11 +1,14 @@ -use std::{sync::Arc, fs::File, io::Read}; +use std::{fs::File, io::Read, sync::Arc}; use anyhow::{ensure, Context, Error}; use bytes::Bytes; use http_body_util::Full; use hyper::body::{self, Body}; use hyper_rustls::HttpsConnector; -use hyper_util::{client::legacy::{connect::HttpConnector, Client}, rt::TokioExecutor}; +use hyper_util::{ + client::legacy::{connect::HttpConnector, Client}, + rt::TokioExecutor, +}; use itertools::Itertools; use rustls::{pki_types::CertificateDer, ClientConfig, RootCertStore}; use tracing::{info, warn}; @@ -13,39 +16,39 @@ use tracing::{info, warn}; use crate::to_anyhow; pub fn get_k8s_certs() -> Result>, Error> { - let cert_file = File::open("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt").unwrap(); - let cert_file_reader = &mut std::io::BufReader::new(cert_file); - let certs: Vec = rustls_pemfile::certs(cert_file_reader).try_collect() - .map_err(|err| to_anyhow(err).context("Failed to parse a certificate from the k8s certificate file."))?; - ensure!(certs.len() > 0, "No certificates were found in the k8s certificate file."); - return Ok(certs); + let cert_file = File::open("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt").unwrap(); + let cert_file_reader = &mut std::io::BufReader::new(cert_file); + let certs: Vec = rustls_pemfile::certs(cert_file_reader).try_collect().map_err(|err| to_anyhow(err).context("Failed to parse a certificate from the k8s certificate file."))?; + ensure!(certs.len() > 0, "No certificates were found in the k8s certificate file."); + return Ok(certs); } pub fn get_rustls_config_that_accepts_k8s_certs() -> Result { - let mut store = rustls::RootCertStore::empty(); - for cert in get_k8s_certs()? { - store.add(cert).unwrap(); - } - Ok(ClientConfig::builder() - .with_root_certificates(store) - .with_no_client_auth()) + let mut store = rustls::RootCertStore::empty(); + for cert in get_k8s_certs()? { + store.add(cert).unwrap(); + } + Ok(ClientConfig::builder().with_root_certificates(store).with_no_client_auth()) } // this function was created for use by exec_command_in_another_pod; it may need tweaking to support other use-cases -pub fn get_hyper_client_with_k8s_certs() -> Result, B>, Error> where ::Data: Send { - let https = hyper_rustls::HttpsConnectorBuilder::new() - //.with_native_roots() - .with_tls_config(get_rustls_config_that_accepts_k8s_certs()?) - .https_only() - .enable_http1() - .build(); - Ok(Client::builder(TokioExecutor::new()).build(https)) +pub fn get_hyper_client_with_k8s_certs() -> Result, B>, Error> +where + ::Data: Send, +{ + let https = hyper_rustls::HttpsConnectorBuilder::new() + //.with_native_roots() + .with_tls_config(get_rustls_config_that_accepts_k8s_certs()?) + .https_only() + .enable_http1() + .build(); + Ok(Client::builder(TokioExecutor::new()).build(https)) } pub fn get_reqwest_client_with_k8s_certs() -> Result { - let mut builder = reqwest::ClientBuilder::new(); - for cert in get_k8s_certs()? { - let reqwest_cert = reqwest::Certificate::from_der(&cert)?; - builder = builder.add_root_certificate(reqwest_cert); - } - Ok(builder.build()?) -} \ No newline at end of file + let mut builder = reqwest::ClientBuilder::new(); + for cert in get_k8s_certs()? { + let reqwest_cert = reqwest::Certificate::from_der(&cert)?; + builder = builder.add_root_certificate(reqwest_cert); + } + Ok(builder.build()?) +} diff --git a/Packages/rust-shared/src/utils/k8s/k8s_client.rs b/Packages/rust-shared/src/utils/k8s/k8s_client.rs index 52448436b..337d9cbc2 100644 --- a/Packages/rust-shared/src/utils/k8s/k8s_client.rs +++ b/Packages/rust-shared/src/utils/k8s/k8s_client.rs @@ -6,52 +6,52 @@ use std::task::Poll; use anyhow::{anyhow, bail, Context, Error}; use bytes::Bytes; use http_body_util::{BodyExt, Empty, Full}; +use hyper::body::Body; use hyper::http::HeaderValue; use hyper::rt::{Read, ReadBuf, ReadBufCursor, Write}; use hyper::upgrade::Upgraded; use hyper::{http, Method, Request, Uri, Version}; +use hyper_rustls::HttpsConnector; use hyper_util::client::legacy::connect::{Connect, HttpConnector}; use hyper_util::client::legacy::Client; -use hyper_rustls::HttpsConnector; use hyper_util::rt::TokioIo; +use pin_project_lite::pin_project; use tokio::net::TcpStream; use tokio_tungstenite::tungstenite::protocol::WebSocketConfig; use tokio_tungstenite::{MaybeTlsStream, WebSocketStream}; -use hyper::body::Body; -use pin_project_lite::pin_project; use crate::utils::k8s::upgrade; /// Initiates an HTTPS connection to the URI specified within `request`, then immediately upgrades it to a WebSocket connection. /// This is different from simply connecting to a websocket endpoint, and it's necessary to connect to certain k8s endpoints. (eg. `exec` for pods) pub async fn upgrade_to_websocket(client: Client, B>, mut request: hyper::Request) -> Result>, Error> - where - ::Data: std::marker::Send, - ::Error: Into> +where + ::Data: std::marker::Send, + ::Error: Into>, { - // add various headers that are required by the websocket-upgrade process: https://stackoverflow.com/a/42334717 - //let uri = request.uri().clone(); - let headers = request.headers_mut(); - //headers.insert(http::header::HOST, uri.host().unwrap().parse().expect("valid header value")); // commented; link above says it's necessary, but doesn't seem to be (and not sure if this value is correct) - headers.insert(http::header::CONNECTION, HeaderValue::from_static("Upgrade")); - headers.insert(http::header::UPGRADE, HeaderValue::from_static("websocket")); - headers.insert(http::header::SEC_WEBSOCKET_VERSION, HeaderValue::from_static("13")); - let key = upgrade::sec_websocket_key(); - headers.insert(http::header::SEC_WEBSOCKET_KEY, key.parse().expect("valid header value")); - // Use the binary subprotocol v4, to get JSON `Status` object in `error` channel (3). - // There's no official documentation about this protocol, but it's described in [`k8s.io/apiserver/pkg/util/wsstream/conn.go`](https://git.io/JLQED). - // There's a comment about v4 and `Status` object in [`kublet/cri/streaming/remotecommand/httpstream.go`](https://git.io/JLQEh). - headers.insert(http::header::SEC_WEBSOCKET_PROTOCOL, HeaderValue::from_static(upgrade::WS_PROTOCOL)); + // add various headers that are required by the websocket-upgrade process: https://stackoverflow.com/a/42334717 + //let uri = request.uri().clone(); + let headers = request.headers_mut(); + //headers.insert(http::header::HOST, uri.host().unwrap().parse().expect("valid header value")); // commented; link above says it's necessary, but doesn't seem to be (and not sure if this value is correct) + headers.insert(http::header::CONNECTION, HeaderValue::from_static("Upgrade")); + headers.insert(http::header::UPGRADE, HeaderValue::from_static("websocket")); + headers.insert(http::header::SEC_WEBSOCKET_VERSION, HeaderValue::from_static("13")); + let key = upgrade::sec_websocket_key(); + headers.insert(http::header::SEC_WEBSOCKET_KEY, key.parse().expect("valid header value")); + // Use the binary subprotocol v4, to get JSON `Status` object in `error` channel (3). + // There's no official documentation about this protocol, but it's described in [`k8s.io/apiserver/pkg/util/wsstream/conn.go`](https://git.io/JLQED). + // There's a comment about v4 and `Status` object in [`kublet/cri/streaming/remotecommand/httpstream.go`](https://git.io/JLQEh). + headers.insert(http::header::SEC_WEBSOCKET_PROTOCOL, HeaderValue::from_static(upgrade::WS_PROTOCOL)); - let res = client.request(request).await.context("Failed in client.request.")?; + let res = client.request(request).await.context("Failed in client.request.")?; - upgrade::verify_response(&res, &key).context("Failed to verify response.")?; //.map_err(Error::UpgradeConnection)?; - match hyper::upgrade::on(res).await { - Ok(upgraded) => { - let upgraded_wrapped = TokioIo::new(upgraded); - Ok(WebSocketStream::from_raw_socket(upgraded_wrapped, tokio_tungstenite::tungstenite::protocol::Role::Client, None).await) - } - /*Err(e) => Err(Error::UpgradeConnection(UpgradeConnectionError::GetPendingUpgrade(e))),*/ - Err(e) => bail!("Hit error: {:?}", e) - } -} \ No newline at end of file + upgrade::verify_response(&res, &key).context("Failed to verify response.")?; //.map_err(Error::UpgradeConnection)?; + match hyper::upgrade::on(res).await { + Ok(upgraded) => { + let upgraded_wrapped = TokioIo::new(upgraded); + Ok(WebSocketStream::from_raw_socket(upgraded_wrapped, tokio_tungstenite::tungstenite::protocol::Role::Client, None).await) + }, + /*Err(e) => Err(Error::UpgradeConnection(UpgradeConnectionError::GetPendingUpgrade(e))),*/ + Err(e) => bail!("Hit error: {:?}", e), + } +} diff --git a/Packages/rust-shared/src/utils/k8s/k8s_structs.rs b/Packages/rust-shared/src/utils/k8s/k8s_structs.rs index 26bfff6a3..a968e7d0e 100644 --- a/Packages/rust-shared/src/utils/k8s/k8s_structs.rs +++ b/Packages/rust-shared/src/utils/k8s/k8s_structs.rs @@ -1,11 +1,11 @@ use super::super::type_aliases::JSONValue; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize)] pub struct K8sSecret { - pub apiVersion: String, - pub data: JSONValue, - pub metadata: JSONValue, - pub kind: String, - pub r#type: String, -} \ No newline at end of file + pub apiVersion: String, + pub data: JSONValue, + pub metadata: JSONValue, + pub kind: String, + pub r#type: String, +} diff --git a/Packages/rust-shared/src/utils/k8s/upgrade.rs b/Packages/rust-shared/src/utils/k8s/upgrade.rs index 53b0e6961..f756e29ef 100644 --- a/Packages/rust-shared/src/utils/k8s/upgrade.rs +++ b/Packages/rust-shared/src/utils/k8s/upgrade.rs @@ -1,6 +1,9 @@ use bytes::Bytes; use http_body_util::Full; -use hyper::{body::Body, http::{self, Response, StatusCode}}; +use hyper::{ + body::Body, + http::{self, Response, StatusCode}, +}; use jwt_simple::reexports::rand; use thiserror::Error; use tokio_tungstenite::tungstenite as ws; @@ -14,66 +17,65 @@ pub const WS_PROTOCOL: &str = "v4.channel.k8s.io"; /// Generate a random key for the `Sec-WebSocket-Key` header. /// This must be nonce consisting of a randomly selected 16-byte value in base64. pub fn sec_websocket_key() -> String { - let r: [u8; 16] = rand::random(); - base64::encode(r) + let r: [u8; 16] = rand::random(); + base64::encode(r) } /// Possible errors from upgrading to a WebSocket connection #[derive(Debug, Error)] pub enum UpgradeConnectionError { - /// The server did not respond with [`SWITCHING_PROTOCOLS`] status when upgrading the - /// connection. - /// - /// [`SWITCHING_PROTOCOLS`]: http::status::StatusCode::SWITCHING_PROTOCOLS - #[error("failed to switch protocol: {0}")] - ProtocolSwitch(http::status::StatusCode), + /// The server did not respond with [`SWITCHING_PROTOCOLS`] status when upgrading the + /// connection. + /// + /// [`SWITCHING_PROTOCOLS`]: http::status::StatusCode::SWITCHING_PROTOCOLS + #[error("failed to switch protocol: {0}")] + ProtocolSwitch(http::status::StatusCode), - /// `Upgrade` header was not set to `websocket` (case insensitive) - #[error("upgrade header was not set to websocket")] - MissingUpgradeWebSocketHeader, + /// `Upgrade` header was not set to `websocket` (case insensitive) + #[error("upgrade header was not set to websocket")] + MissingUpgradeWebSocketHeader, - /// `Connection` header was not set to `Upgrade` (case insensitive) - #[error("connection header was not set to Upgrade")] - MissingConnectionUpgradeHeader, + /// `Connection` header was not set to `Upgrade` (case insensitive) + #[error("connection header was not set to Upgrade")] + MissingConnectionUpgradeHeader, - /// `Sec-WebSocket-Accept` key mismatched. - #[error("Sec-WebSocket-Accept key mismatched")] - SecWebSocketAcceptKeyMismatch, + /// `Sec-WebSocket-Accept` key mismatched. + #[error("Sec-WebSocket-Accept key mismatched")] + SecWebSocketAcceptKeyMismatch, - /// `Sec-WebSocket-Protocol` mismatched. - #[error("Sec-WebSocket-Protocol mismatched")] - SecWebSocketProtocolMismatch, + /// `Sec-WebSocket-Protocol` mismatched. + #[error("Sec-WebSocket-Protocol mismatched")] + SecWebSocketProtocolMismatch, - /// Failed to get pending HTTP upgrade. - #[error("failed to get pending HTTP upgrade: {0}")] - GetPendingUpgrade(#[source] hyper::Error), + /// Failed to get pending HTTP upgrade. + #[error("failed to get pending HTTP upgrade: {0}")] + GetPendingUpgrade(#[source] hyper::Error), } - /// Verify upgrade response according to RFC6455. /// Based on `tungstenite` and added subprotocol verification. pub fn verify_response(res: &Response, key: &str) -> Result<(), UpgradeConnectionError> { - if res.status() != StatusCode::SWITCHING_PROTOCOLS { - return Err(UpgradeConnectionError::ProtocolSwitch(res.status())); - } + if res.status() != StatusCode::SWITCHING_PROTOCOLS { + return Err(UpgradeConnectionError::ProtocolSwitch(res.status())); + } - let headers = res.headers(); - if !headers.get(http::header::UPGRADE).and_then(|h| h.to_str().ok()).map(|h| h.eq_ignore_ascii_case("websocket")).unwrap_or(false) { - return Err(UpgradeConnectionError::MissingUpgradeWebSocketHeader); - } + let headers = res.headers(); + if !headers.get(http::header::UPGRADE).and_then(|h| h.to_str().ok()).map(|h| h.eq_ignore_ascii_case("websocket")).unwrap_or(false) { + return Err(UpgradeConnectionError::MissingUpgradeWebSocketHeader); + } - if !headers.get(http::header::CONNECTION).and_then(|h| h.to_str().ok()).map(|h| h.eq_ignore_ascii_case("Upgrade")).unwrap_or(false) { - return Err(UpgradeConnectionError::MissingConnectionUpgradeHeader); - } + if !headers.get(http::header::CONNECTION).and_then(|h| h.to_str().ok()).map(|h| h.eq_ignore_ascii_case("Upgrade")).unwrap_or(false) { + return Err(UpgradeConnectionError::MissingConnectionUpgradeHeader); + } - let accept_key = ws::handshake::derive_accept_key(key.as_ref()); - if !headers.get(http::header::SEC_WEBSOCKET_ACCEPT).map(|h| h == &accept_key).unwrap_or(false) { - return Err(UpgradeConnectionError::SecWebSocketAcceptKeyMismatch); - } - // Make sure that the server returned the correct subprotocol. - if !headers.get(http::header::SEC_WEBSOCKET_PROTOCOL).map(|h| h == WS_PROTOCOL).unwrap_or(false) { - return Err(UpgradeConnectionError::SecWebSocketProtocolMismatch); - } + let accept_key = ws::handshake::derive_accept_key(key.as_ref()); + if !headers.get(http::header::SEC_WEBSOCKET_ACCEPT).map(|h| h == &accept_key).unwrap_or(false) { + return Err(UpgradeConnectionError::SecWebSocketAcceptKeyMismatch); + } + // Make sure that the server returned the correct subprotocol. + if !headers.get(http::header::SEC_WEBSOCKET_PROTOCOL).map(|h| h == WS_PROTOCOL).unwrap_or(false) { + return Err(UpgradeConnectionError::SecWebSocketProtocolMismatch); + } - Ok(()) -} \ No newline at end of file + Ok(()) +} diff --git a/Packages/rust-shared/src/utils/locks/check_lock_order.rs b/Packages/rust-shared/src/utils/locks/check_lock_order.rs index 0298a7775..cc09ef3a8 100644 --- a/Packages/rust-shared/src/utils/locks/check_lock_order.rs +++ b/Packages/rust-shared/src/utils/locks/check_lock_order.rs @@ -2,11 +2,11 @@ use std::marker::ConstParamTy; #[derive(PartialEq, Eq, ConstParamTy)] pub enum Lock { - unknown_prior = 0, - //LQGroup_groups_x = 1, - LQGroup_batches_x = 1, - LQInstance_entry_watchers = 2, - LQInstance_last_entries = 3, // sync this value with macro below + unknown_prior = 0, + //LQGroup_groups_x = 1, + LQGroup_batches_x = 1, + LQInstance_entry_watchers = 2, + LQInstance_last_entries = 3, // sync this value with macro below } // these macros are needed for cases where comparisons are done in the "where" clause of a function (where Lock::X cannot be used) #[macro_export] @@ -20,10 +20,12 @@ impl IsTrue for Assert {} /// * Usage: Whenever a given scope already holds a guard/lock of a type listed in the `Lock` enum ("T1"), and is about to acquire another ("T2"), call this function with T1 and T2 as const-parameters. /// * Effect: The Rust compiler checks whether the "order value" of T1 is lower than that of T2 (as determined by the usize values in the Lock enum); if not, a compile-time error is thrown. pub fn check_lock_order() - where Assert::<{(T1 as usize) < (T2 as usize)}>: IsTrue, -{} +where + Assert<{ (T1 as usize) < (T2 as usize) }>: IsTrue, +{ +} /*pub fn check_lock_order_usize() - where Assert::<{T1 < T2}>: IsTrue, + where Assert::<{T1 < T2}>: IsTrue, {}*/ #[macro_export] @@ -41,4 +43,4 @@ macro_rules! check_lock_chain { $crate::utils::locks::rwlock_tracked::check_lock_order::<$lt3, $lt4>(); }; } -pub(crate) use check_lock_chain; \ No newline at end of file +pub(crate) use check_lock_chain; diff --git a/Packages/rust-shared/src/utils/locks/rwlock_tracked.rs b/Packages/rust-shared/src/utils/locks/rwlock_tracked.rs index a8426b68a..d14b5d1b9 100644 --- a/Packages/rust-shared/src/utils/locks/rwlock_tracked.rs +++ b/Packages/rust-shared/src/utils/locks/rwlock_tracked.rs @@ -1,5 +1,11 @@ -use std::{sync::{Mutex, Arc, atomic::{Ordering, AtomicI64}}, ops::{Deref, DerefMut}}; use anyhow::{anyhow, bail, Error}; +use std::{ + ops::{Deref, DerefMut}, + sync::{ + atomic::{AtomicI64, Ordering}, + Arc, Mutex, + }, +}; use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; /// Wrapper around RwLock, which: @@ -8,129 +14,125 @@ use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; /// 3) Provides `read_checked` and `write_checked` functions; these allow you to get a read-lock, drop it, then get a write-lock later, while confirming /// that no other threads have obtained a write-lock during the "lock-free" period. (if they have, then the `write_checked` function will return an error) pub struct RwLock_Tracked { - l: RwLock, - live_guards: Arc>>, - write_guards_acquired: AtomicI64, + l: RwLock, + live_guards: Arc>>, + write_guards_acquired: AtomicI64, } impl RwLock_Tracked { - pub fn new(lock_value: T) -> Self { - Self { - l: RwLock::new(lock_value), - live_guards: Arc::new(Mutex::new(vec![])), - write_guards_acquired: AtomicI64::new(0), - } - } + pub fn new(lock_value: T) -> Self { + Self { l: RwLock::new(lock_value), live_guards: Arc::new(Mutex::new(vec![])), write_guards_acquired: AtomicI64::new(0) } + } - pub async fn read(&self, caller: &str) -> RwLockReadGuard_Tracked<'_, T> { - let base_guard = self.l.read().await; - RwLockReadGuard_Tracked::new(base_guard, self.live_guards.clone(), caller.to_owned()) - } - pub async fn write(&self, caller: &str) -> RwLockWriteGuard_Tracked<'_, T> { - let base_guard = self.l.write().await; - // on orderings, see: https://stackoverflow.com/a/33293463 and https://reddit.com/r/rust/comments/p9a740 - self.write_guards_acquired.fetch_add(1, Ordering::Acquire); - RwLockWriteGuard_Tracked::new(base_guard, self.live_guards.clone(), caller.to_owned()) - } + pub async fn read(&self, caller: &str) -> RwLockReadGuard_Tracked<'_, T> { + let base_guard = self.l.read().await; + RwLockReadGuard_Tracked::new(base_guard, self.live_guards.clone(), caller.to_owned()) + } + pub async fn write(&self, caller: &str) -> RwLockWriteGuard_Tracked<'_, T> { + let base_guard = self.l.write().await; + // on orderings, see: https://stackoverflow.com/a/33293463 and https://reddit.com/r/rust/comments/p9a740 + self.write_guards_acquired.fetch_add(1, Ordering::Acquire); + RwLockWriteGuard_Tracked::new(base_guard, self.live_guards.clone(), caller.to_owned()) + } - /// Same as `read`, except also returns a "prior write-lock count", which can later be provided to `write_checked`, to - /// confirm that no other threads have obtained a write-lock during the "lock-free" period. (if they have, then the `write_checked` function will return an error) - pub async fn read_checked(&self, caller: &str) -> (RwLockReadGuard_Tracked<'_, T>, i64) { - //let prior_write_lock_count = self.live_guards.lock().unwrap().iter().filter(|a| a.ends_with(" [write]")).count() as i64; - // on orderings, see: https://stackoverflow.com/a/33293463 and https://reddit.com/r/rust/comments/p9a740 - let prior_write_lock_count = self.write_guards_acquired.load(Ordering::Acquire); - - let guard = self.read(caller).await; - (guard, prior_write_lock_count) - } - /// Same as `write`, except also checks whether other write-locks have been acquired since the time that the associated `read_checked` call as made. (ie. during this thread's "lock-free" period) - /// If other write-locks were acquired during that period, then this `write_checked` function will return an error. (meant to be used for call-paths that are "retry-capable") - pub async fn write_checked(&self, caller: &str, old_write_lock_count: i64) -> Result, Error> { - let guard = self.write(caller).await; + /// Same as `read`, except also returns a "prior write-lock count", which can later be provided to `write_checked`, to + /// confirm that no other threads have obtained a write-lock during the "lock-free" period. (if they have, then the `write_checked` function will return an error) + pub async fn read_checked(&self, caller: &str) -> (RwLockReadGuard_Tracked<'_, T>, i64) { + //let prior_write_lock_count = self.live_guards.lock().unwrap().iter().filter(|a| a.ends_with(" [write]")).count() as i64; + // on orderings, see: https://stackoverflow.com/a/33293463 and https://reddit.com/r/rust/comments/p9a740 + let prior_write_lock_count = self.write_guards_acquired.load(Ordering::Acquire); - // check whether any other write-locks were acquired during the "lock-free" period + let guard = self.read(caller).await; + (guard, prior_write_lock_count) + } + /// Same as `write`, except also checks whether other write-locks have been acquired since the time that the associated `read_checked` call as made. (ie. during this thread's "lock-free" period) + /// If other write-locks were acquired during that period, then this `write_checked` function will return an error. (meant to be used for call-paths that are "retry-capable") + pub async fn write_checked(&self, caller: &str, old_write_lock_count: i64) -> Result, Error> { + let guard = self.write(caller).await; - //let new_write_lock_count = self.live_guards.lock().unwrap().iter().filter(|a| a.ends_with(" [write]")).count() as i64; - // on orderings, see: https://stackoverflow.com/a/33293463 and https://reddit.com/r/rust/comments/p9a740 - let new_write_lock_count = self.write_guards_acquired.load(Ordering::Acquire); + // check whether any other write-locks were acquired during the "lock-free" period - let other_write_locks_acquired = (new_write_lock_count - old_write_lock_count) - 1; - if other_write_locks_acquired > 0 { - bail!("write_checked failed: {other_write_locks_acquired} other write-lock(s) were acquired between the calling of `read_checked` and `write_checked`."); - } - Ok(guard) - } + //let new_write_lock_count = self.live_guards.lock().unwrap().iter().filter(|a| a.ends_with(" [write]")).count() as i64; + // on orderings, see: https://stackoverflow.com/a/33293463 and https://reddit.com/r/rust/comments/p9a740 + let new_write_lock_count = self.write_guards_acquired.load(Ordering::Acquire); - pub fn get_live_guards(&self) -> Vec { - let live_guards = self.live_guards.lock().unwrap(); - live_guards.iter().map(|a| a.clone()).collect::>() - } - pub fn get_live_guards_str(&self) -> String { - let live_guards = self.live_guards.lock().unwrap(); - if live_guards.len() == 0 { - return "[none]".to_owned(); - } - live_guards.iter().map(|a| a.clone()).collect::>().join(" ; ") - } + let other_write_locks_acquired = (new_write_lock_count - old_write_lock_count) - 1; + if other_write_locks_acquired > 0 { + bail!("write_checked failed: {other_write_locks_acquired} other write-lock(s) were acquired between the calling of `read_checked` and `write_checked`."); + } + Ok(guard) + } + + pub fn get_live_guards(&self) -> Vec { + let live_guards = self.live_guards.lock().unwrap(); + live_guards.iter().map(|a| a.clone()).collect::>() + } + pub fn get_live_guards_str(&self) -> String { + let live_guards = self.live_guards.lock().unwrap(); + if live_guards.len() == 0 { + return "[none]".to_owned(); + } + live_guards.iter().map(|a| a.clone()).collect::>().join(" ; ") + } } pub struct RwLockReadGuard_Tracked<'a, T> { - base: RwLockReadGuard<'a, T>, - live_guards: Arc>>, - access_key: String, + base: RwLockReadGuard<'a, T>, + live_guards: Arc>>, + access_key: String, } impl<'a, T> RwLockReadGuard_Tracked<'a, T> { - pub fn new(base_guard: RwLockReadGuard<'a, T>, live_guards: Arc>>, caller: String) -> Self { - let access_key = caller + " [read]"; - live_guards.lock().unwrap().push(access_key.clone()); - Self { base: base_guard, live_guards, access_key } - } + pub fn new(base_guard: RwLockReadGuard<'a, T>, live_guards: Arc>>, caller: String) -> Self { + let access_key = caller + " [read]"; + live_guards.lock().unwrap().push(access_key.clone()); + Self { base: base_guard, live_guards, access_key } + } } impl<'a, T> Deref for RwLockReadGuard_Tracked<'a, T> { - type Target = RwLockReadGuard<'a, T>; - fn deref(&self) -> &Self::Target { - &self.base - } + type Target = RwLockReadGuard<'a, T>; + fn deref(&self) -> &Self::Target { + &self.base + } } impl<'a, T> DerefMut for RwLockReadGuard_Tracked<'a, T> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.base - } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.base + } } impl<'a, T> Drop for RwLockReadGuard_Tracked<'a, T> { - fn drop(&mut self) { - let mut live_guards = self.live_guards.lock().unwrap(); - let index = live_guards.iter().position(|a| *a == self.access_key).unwrap(); - live_guards.remove(index); - } + fn drop(&mut self) { + let mut live_guards = self.live_guards.lock().unwrap(); + let index = live_guards.iter().position(|a| *a == self.access_key).unwrap(); + live_guards.remove(index); + } } pub struct RwLockWriteGuard_Tracked<'a, T> { - base: RwLockWriteGuard<'a, T>, - live_guards: Arc>>, - access_key: String, + base: RwLockWriteGuard<'a, T>, + live_guards: Arc>>, + access_key: String, } impl<'a, T> RwLockWriteGuard_Tracked<'a, T> { - pub fn new(base_guard: RwLockWriteGuard<'a, T>, live_guards: Arc>>, caller: String) -> Self { - let access_key = caller + " [write]"; - live_guards.lock().unwrap().push(access_key.clone()); - Self { base: base_guard, live_guards, access_key } - } + pub fn new(base_guard: RwLockWriteGuard<'a, T>, live_guards: Arc>>, caller: String) -> Self { + let access_key = caller + " [write]"; + live_guards.lock().unwrap().push(access_key.clone()); + Self { base: base_guard, live_guards, access_key } + } } impl<'a, T> Deref for RwLockWriteGuard_Tracked<'a, T> { - type Target = RwLockWriteGuard<'a, T>; - fn deref(&self) -> &Self::Target { - &self.base - } + type Target = RwLockWriteGuard<'a, T>; + fn deref(&self) -> &Self::Target { + &self.base + } } impl<'a, T> DerefMut for RwLockWriteGuard_Tracked<'a, T> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.base - } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.base + } } impl<'a, T> Drop for RwLockWriteGuard_Tracked<'a, T> { - fn drop(&mut self) { - let mut live_guards = self.live_guards.lock().unwrap(); - let index = live_guards.iter().position(|a| *a == self.access_key).unwrap(); - live_guards.remove(index); - } -} \ No newline at end of file + fn drop(&mut self) { + let mut live_guards = self.live_guards.lock().unwrap(); + let index = live_guards.iter().position(|a| *a == self.access_key).unwrap(); + live_guards.remove(index); + } +} diff --git a/Packages/rust-shared/src/utils/mtx/mtx.rs b/Packages/rust-shared/src/utils/mtx/mtx.rs index b8d1de535..3a2d592b3 100644 --- a/Packages/rust-shared/src/utils/mtx/mtx.rs +++ b/Packages/rust-shared/src/utils/mtx/mtx.rs @@ -2,12 +2,8 @@ // (note: this compilation-flag is active/"debug" when opt-level is exactly 0; see: https://github.com/rust-lang/rust/blob/44d679b9021f03a79133021b94e6d23e9b55b3ab/compiler/rustc_session/src/config.rs#L2519) // ========== -#[cfg(debug_assertions)] -mod mtx_real; -#[cfg(debug_assertions)] -pub use mtx_real::*; +#[cfg(debug_assertions)] mod mtx_real; +#[cfg(debug_assertions)] pub use mtx_real::*; -#[cfg(not(debug_assertions))] -mod mtx_stub; -#[cfg(not(debug_assertions))] -pub use mtx_stub::*; \ No newline at end of file +#[cfg(not(debug_assertions))] mod mtx_stub; +#[cfg(not(debug_assertions))] pub use mtx_stub::*; diff --git a/Packages/rust-shared/src/utils/mtx/mtx/mtx_real.rs b/Packages/rust-shared/src/utils/mtx/mtx/mtx_real.rs index 11fdfaeff..e581b1ec5 100644 --- a/Packages/rust-shared/src/utils/mtx/mtx/mtx_real.rs +++ b/Packages/rust-shared/src/utils/mtx/mtx/mtx_real.rs @@ -1,354 +1,350 @@ -use std::{sync::{Arc, RwLock, RwLockWriteGuard}, cell::RefCell, time::{Instant, Duration}, borrow::Cow, rc::Rc, collections::HashMap}; - -use crate::utils::type_aliases::{JSONValue, FSender, FReceiver}; +use std::{ + borrow::Cow, + cell::RefCell, + collections::HashMap, + rc::Rc, + sync::{Arc, RwLock, RwLockWriteGuard}, + time::{Duration, Instant}, +}; + +use crate::utils::type_aliases::{FReceiver, FSender, JSONValue}; use anyhow::Error; use async_graphql::SimpleObject; -use once_cell::sync::{OnceCell, Lazy}; +use flume::{Receiver, Sender}; +use once_cell::sync::{Lazy, OnceCell}; use serde_json; use tokio; -use flume::{Sender, Receiver}; use crate::hyper::{Method, Request}; -use indexmap::IndexMap; use crate::rust_macros::wrap_slow_macros; -use crate::utils::time::time_since_epoch_ms; -use crate::serde::{Serialize, Deserialize}; +use crate::serde::{Deserialize, Serialize}; use crate::serde_json::{json, Map}; -use crate::tokio::{time}; -use tracing::{trace, error, info, warn}; +use crate::tokio::time; +use crate::utils::time::time_since_epoch_ms; use crate::uuid::Uuid; +use indexmap::IndexMap; +use tracing::{error, info, trace, warn}; #[macro_export] macro_rules! fn_name { - () => {{ - fn f() {} - fn type_name_of(_: T) -> &'static str { - std::any::type_name::() - } - let name = type_name_of(f); - let mut result = &name[..name.len() - 3]; - - // trim "::{{closure}}" from the end, if present (such is present for async functions) - result = result.trim_end_matches("::{{closure}}"); - - // trim path to function from func-name (eg: trims "my::path::my_func" to "my_func") - if let Some(pos) = &result.rfind(':') { - result = &result[pos + 1..]; - } - - result - }} + () => {{ + fn f() {} + fn type_name_of(_: T) -> &'static str { + std::any::type_name::() + } + let name = type_name_of(f); + let mut result = &name[..name.len() - 3]; + + // trim "::{{closure}}" from the end, if present (such is present for async functions) + result = result.trim_end_matches("::{{closure}}"); + + // trim path to function from func-name (eg: trims "my::path::my_func" to "my_func") + if let Some(pos) = &result.rfind(':') { + result = &result[pos + 1..]; + } + + result + }}; } pub use fn_name; #[macro_export] macro_rules! new_mtx { - ($mtx:ident, $first_section_name:expr) => { - $crate::utils::mtx::mtx::new_mtx!($mtx, $first_section_name, None); - }; - ($mtx:ident, $first_section_name:expr, $parent_mtx:expr) => { - $crate::utils::mtx::mtx::new_mtx!($mtx, $first_section_name, $parent_mtx, None); - }; - ($mtx:ident, $first_section_name:expr, $parent_mtx:expr, $extra_info:expr) => { - let parent_mtx: Option<&$crate::utils::mtx::mtx::Mtx> = $parent_mtx; - #[allow(unused_mut)] - let mut $mtx = $crate::utils::mtx::mtx::Mtx::new($crate::utils::mtx::mtx::fn_name!(), $first_section_name, parent_mtx, $extra_info); - }; + ($mtx:ident, $first_section_name:expr) => { + $crate::utils::mtx::mtx::new_mtx!($mtx, $first_section_name, None); + }; + ($mtx:ident, $first_section_name:expr, $parent_mtx:expr) => { + $crate::utils::mtx::mtx::new_mtx!($mtx, $first_section_name, $parent_mtx, None); + }; + ($mtx:ident, $first_section_name:expr, $parent_mtx:expr, $extra_info:expr) => { + let parent_mtx: Option<&$crate::utils::mtx::mtx::Mtx> = $parent_mtx; + #[allow(unused_mut)] + let mut $mtx = $crate::utils::mtx::mtx::Mtx::new($crate::utils::mtx::mtx::fn_name!(), $first_section_name, parent_mtx, $extra_info); + }; } pub use new_mtx; /*pub fn new_mtx_impl<'a>(fn_name_final: &str, first_section_name: &str, parent_mtx: Option<&'a mut Mtx>) -> (Option, Option<&'a mut Mtx>) { - let mut mtx = Mtx::new(fn_name_final); - mtx.section(first_section_name); - if let Some(parent_mtx) = parent_mtx { - let mtx_borrow = parent_mtx.add_sub(mtx); - return (None, Some(mtx_borrow)); - } else { - return (Some(mtx), None); - /*let mtx_borrow = &mut mtx; - return (None, Some(mtx_borrow));*/ - } + let mut mtx = Mtx::new(fn_name_final); + mtx.section(first_section_name); + if let Some(parent_mtx) = parent_mtx { + let mtx_borrow = parent_mtx.add_sub(mtx); + return (None, Some(mtx_borrow)); + } else { + return (Some(mtx), None); + /*let mtx_borrow = &mut mtx; + return (None, Some(mtx_borrow));*/ + } }*/ /*pub fn new_mtx_impl<'a>(fn_name_final: &str, first_section_name: &str, parent_mtx: &'a mut Mtx) -> &'a mut Mtx { - let mut mtx = Mtx::new(fn_name_final); - mtx.section(first_section_name); - let mtx_borrow = parent_mtx.add_sub(mtx); - return mtx_borrow; + let mut mtx = Mtx::new(fn_name_final); + mtx.section(first_section_name); + let mtx_borrow = parent_mtx.add_sub(mtx); + return mtx_borrow; }*/ /*pub fn new_mtx_impl<'a>(fn_name_final: &str, first_section_name: &str) -> Mtx { - let mut mtx = Mtx::new(fn_name_final); - mtx.section(first_section_name); - mtx + let mut mtx = Mtx::new(fn_name_final); + mtx.section(first_section_name); + mtx }*/ pub enum MtxMessage { - /// tuple.0 is the section's path and time (see section_lifetimes description); tuple.1 is the SectionLifetime struct, with times as ms-since-epoch - UpdateSectionLifetime(String, MtxSection), + /// tuple.0 is the section's path and time (see section_lifetimes description); tuple.1 is the SectionLifetime struct, with times as ms-since-epoch + UpdateSectionLifetime(String, MtxSection), } impl MtxMessage { - pub fn apply_messages_to_mtx_data( - mtx_section_lifetimes: &Arc>>, - //mtx_section_lifetimes: &Arc>, - messages: impl Iterator - ) { - let mut section_lifetimes = mtx_section_lifetimes.write().unwrap(); - /*let guard = mtx_section_lifetimes.guard(); - let section_lifetimes = mtx_section_lifetimes.with_guard(&guard);*/ - for msg in messages { - msg.apply_to_mtx_data(&mut section_lifetimes); - } - } - pub fn apply_to_mtx_data(self, - section_lifetimes_ref: &mut RwLockWriteGuard> - //section_lifetimes_ref: &flurry::HashMapRef - ) { - match self { - MtxMessage::UpdateSectionLifetime(path_and_time, lifetime) => { - section_lifetimes_ref.insert(path_and_time, lifetime); - } - } - } + pub fn apply_messages_to_mtx_data( + mtx_section_lifetimes: &Arc>>, + //mtx_section_lifetimes: &Arc>, + messages: impl Iterator, + ) { + let mut section_lifetimes = mtx_section_lifetimes.write().unwrap(); + /*let guard = mtx_section_lifetimes.guard(); + let section_lifetimes = mtx_section_lifetimes.with_guard(&guard);*/ + for msg in messages { + msg.apply_to_mtx_data(&mut section_lifetimes); + } + } + pub fn apply_to_mtx_data( + self, + section_lifetimes_ref: &mut RwLockWriteGuard>, //section_lifetimes_ref: &flurry::HashMapRef + ) { + match self { + MtxMessage::UpdateSectionLifetime(path_and_time, lifetime) => { + section_lifetimes_ref.insert(path_and_time, lifetime); + }, + } + } } //#[derive(Serialize)] #[derive(Debug)] pub struct Mtx { - pub id: Arc, - pub func_name: String, - /// Note that the "path_from_root_mtx" may not be unique! (eg. if root-func calls the same nested-func twice, in same section) - pub path_from_root_mtx: String, - //pub extra_info: String, - - pub current_section: MtxSection, - /// This field holds the timings of all sections in the root mtx-enabled function, as well as any mtx-enabled functions called underneath it (where the root mtx is passed). - /// Entry's key is the "path" to the section + ";" + the section's start-time, eg: root_func/part1/other_func/part3;1649321920506.4802 - /// Entry's value is `SectionLifetime` struct, containing the start-time and duration of the section (stored as fractional milliseconds), etc. - pub section_lifetimes: Arc>>, - //pub section_lifetimes: Arc>, - - // communication helpers - pub msg_sender: Sender, - pub msg_receiver: Receiver, - - //pub parent: Option<&'a mut Mtx<'b, 'b>>, - //pub parent: Option<&'a RefCell>, - //#[serde(skip)] pub parent_sender: Option>, - pub root_mtx_sender: Sender, - - // todo: probably clean up how this is implemented - pub root_mtx_id_arc_for_keepalive: Arc, + pub id: Arc, + pub func_name: String, + /// Note that the "path_from_root_mtx" may not be unique! (eg. if root-func calls the same nested-func twice, in same section) + pub path_from_root_mtx: String, + //pub extra_info: String, + pub current_section: MtxSection, + /// This field holds the timings of all sections in the root mtx-enabled function, as well as any mtx-enabled functions called underneath it (where the root mtx is passed). + /// Entry's key is the "path" to the section + ";" + the section's start-time, eg: root_func/part1/other_func/part3;1649321920506.4802 + /// Entry's value is `SectionLifetime` struct, containing the start-time and duration of the section (stored as fractional milliseconds), etc. + pub section_lifetimes: Arc>>, + //pub section_lifetimes: Arc>, + + // communication helpers + pub msg_sender: Sender, + pub msg_receiver: Receiver, + + //pub parent: Option<&'a mut Mtx<'b, 'b>>, + //pub parent: Option<&'a RefCell>, + //#[serde(skip)] pub parent_sender: Option>, + pub root_mtx_sender: Sender, + + // todo: probably clean up how this is implemented + pub root_mtx_id_arc_for_keepalive: Arc, } //pub static mtx_none: Arc = Arc::new(Mtx::new("n/a")); impl Mtx { - pub fn new(func_name: &str, first_section_name: impl Into>, parent: Option<&Mtx>, extra_info: Option) -> Self { - let (msg_sender, msg_receiver): (Sender, Receiver) = flume::unbounded(); - let root_mtx_sender = match parent { - Some(parent) => parent.root_mtx_sender.clone(), - None => msg_sender.clone(), - }; - let path_from_root_mtx = match parent { - Some(parent) => format!("{}/{}", parent.current_section.path, func_name), - None => func_name.to_owned(), - }; - - let id_arc = Arc::new(Uuid::new_v4()); - let id_arc_first_clone = id_arc.clone(); - let mut new_self = Self { - id: id_arc, - func_name: func_name.to_owned(), - path_from_root_mtx, - //extra_info, - - // the value of this doesn't matter; it gets overwritten by start_new_section below - current_section: MtxSection { path: "[temp placeholder]".to_string(), start_time: 0f64, extra_info: None, duration: None }, - - section_lifetimes: Arc::new(RwLock::new(IndexMap::new())), - //section_lifetimes: Arc::new(flurry::HashMap::new()), - msg_sender, - msg_receiver, - //parent: None, - /*parent_sender: match parent { - Some(parent) => Some(parent.msg_sender.clone()), - None => None, - },*/ - root_mtx_sender, - - // fix for issue of root-mtx update-receiving-loop (see tokio::spawn block below) being dropped while proxy was still sending more data to it - root_mtx_id_arc_for_keepalive: match parent { - Some(parent) => parent.root_mtx_id_arc_for_keepalive.clone(), - None => id_arc_first_clone, - } - }; - new_self.start_new_section(&first_section_name.into(), extra_info, time_since_epoch_ms()); - - if new_self.is_root_mtx() && !cfg!(test) { - // start a timer that, once per second (while the mtx-instance is active/in-scope), sends its data to the backend - let (id_clone, section_lifetimes_clone, msg_receiver_clone) = (new_self.id.clone(), new_self.section_lifetimes.clone(), new_self.msg_receiver.clone()); - tokio::spawn(async move { - let mut interval = time::interval(time::Duration::from_secs(1)); - let mut last_data_as_str: Option = None; - loop { - interval.tick().await; - - // if this is the first iteration, wait a bit - // (this wait appears to give time for the mtx-instance to be bound to a scope or something, such that the strong_count() call sees the remove strong-reference we expect) - if last_data_as_str.is_none() { - time::sleep(Duration::from_millis(1000)).await; - } - - // process any messages that have buffered up - MtxMessage::apply_messages_to_mtx_data(§ion_lifetimes_clone, msg_receiver_clone.drain()); - - // package data and send to channel (which user-project can decide what to do with) - let data_as_str = package_up_mtx_data_and_send_to_channel(id_clone.clone(), section_lifetimes_clone.clone(), last_data_as_str).await; - - last_data_as_str = data_as_str; - //println!("Sent partial results for mtx entry..."); // temp - - // if we're the last place holding a reference to the root-mtx's id-arc, the entire tree of mtx-instances must have been dropped by now (since each mtx-instance holds a reference to its tree's root id-arc); - // that means it has already sent its final results to the monitor-backend (through the `Drop::drop` implementation), so we can end this loop - if Arc::strong_count(&id_clone) <= 1 { - if Arc::strong_count(§ion_lifetimes_clone) > 1 { - warn!("Despite all refs to root-mtx's id-arc (other than receiver loop) having dropped, the section-lifetimes-arc still has other refs! This is unexpected. Receiver loop is proceeding with shutdown anyway, but worth investigating."); - } - //println!("Stopping mtx-data-sending timer, since mtx instance has been dropped."); // temp - break; - } - } - }); - } - - new_self - } - - /// Use this when you want to collect mtx data from some async function (or otherwise uncontrolled call-path). Basically, it's made for cases where you can neither: - /// 1) ..."move" the mtx to pass as an argument (due to there being local mtx-sections after the relevant call) - /// 2) ...nor can you "borrow" it to pass by reference (since the call-tree is uncontrolled/async, so rust's borrow-checker can't verify the lifetimes) - /// Usage example: - /// ``` - /// new_mtx!(mtx, "1:section one", None); - /// uncontrolled_call_path(Some(mtx.proxy())).await; - /// mtx.section("2:section two"); - /// something_else(); - /// ``` - pub fn proxy(&self, /*keep_parent_alive: bool*/) -> Mtx { - Mtx::new("", "", Some(&self), None) - } - - pub fn is_root_mtx(&self) -> bool { - !self.path_from_root_mtx.contains("/") - } - /*pub fn current_section_path(&self) -> String { - format!("{}/{}", self.path_from_root_mtx, self.current_section_name) - }*/ - pub fn section(&mut self, name: impl Into>) { - self.section_2(name, None); - } - pub fn section_2(&mut self, name: impl Into>, extra_info: Option) { - let old_section_end_time = self.end_old_section(); - self.start_new_section(&name.into(), extra_info, old_section_end_time); - } - fn end_old_section(&mut self) -> f64 { - let old_section = &mut self.current_section; - let section_end_time = time_since_epoch_ms(); - old_section.duration = Some(section_end_time - old_section.start_time); - - { - let mut section_lifetimes = self.section_lifetimes.write().unwrap(); - /*let guard = self.section_lifetimes.guard(); - let section_lifetimes = self.section_lifetimes.with_guard(&guard);*/ - section_lifetimes.insert(old_section.get_key(), old_section.clone()); - } - // ignore send-error; this just means we're within a "proxy subtree", and the root mtx has already been dropped - // todo: probably have caller decide what to do in this situation, as an argument to `proxy()` (since in some cases this might be unexpected) - let _ = self.root_mtx_sender.send(MtxMessage::UpdateSectionLifetime(old_section.get_key(), old_section.clone())); - section_end_time - } - fn start_new_section(&mut self, name: &str, extra_info: Option, old_section_end_time: f64) { - let new_section = MtxSection { - path: format!("{}/{}", self.path_from_root_mtx, name), - start_time: old_section_end_time, - extra_info, - duration: None, - }; - trace!("Section started:{new_section:?}"); - self.start_new_section_2(new_section, name != MTX_FINAL_SECTION_NAME); - } - fn start_new_section_2(&mut self, new_section: MtxSection, send_to_root_mtx: bool) { - self.current_section = new_section.clone(); - // store partial-data for new section - if send_to_root_mtx { - let msg = MtxMessage::UpdateSectionLifetime(new_section.get_key(), new_section); - /*if self.is_root_mtx() { - MtxMessage::apply_messages_to_mtx_data(&self.section_lifetimes, vec![msg].into_iter()); - } else {*/ - // ignore send-error; this just means we're within a "proxy subtree", and the root mtx has already been dropped - // todo: probably have caller decide what to do in this situation, as an argument to `proxy()` (since in some cases this might be unexpected) - let _ = self.root_mtx_sender.send(msg); - } - } - - /*pub fn send_to_monitor_backend(&mut self) { - // sort section_lifetimes collection (makes log-based inspection a lot easier) - self.section_lifetimes.sort_keys(); - - //send_mtx_tree_to_monitor_backend(self).await; - let self_as_str = json_obj_1field("mtx", self).map(|a| a.to_string()).unwrap_or("failed to serialize mtx-instance".to_string()); - tokio::spawn(async move { - let result = send_mtx_tree_to_monitor_backend(self_as_str).await; - result.expect("Got error while sending mtx-tree to monitor-backend..."); - }); - }*/ - - /// Helper function to make an `info!(...)` log-call, with the basic info like function-name. (avoids need to add custom message for logging of key function-calls) - pub fn log_call(&self, temp_extra_info: Option) { - let current_section_extra_info_str = self.current_section.extra_info.as_ref().map_or("".to_owned(), |a| format!(" {}", a)); - let temp_extra_info_str = temp_extra_info.map_or("".to_owned(), |a| format!(" {}", a)); - info!("Called:{}{}{}", self.func_name, current_section_extra_info_str, temp_extra_info_str); - } + pub fn new(func_name: &str, first_section_name: impl Into>, parent: Option<&Mtx>, extra_info: Option) -> Self { + let (msg_sender, msg_receiver): (Sender, Receiver) = flume::unbounded(); + let root_mtx_sender = match parent { + Some(parent) => parent.root_mtx_sender.clone(), + None => msg_sender.clone(), + }; + let path_from_root_mtx = match parent { + Some(parent) => format!("{}/{}", parent.current_section.path, func_name), + None => func_name.to_owned(), + }; + + let id_arc = Arc::new(Uuid::new_v4()); + let id_arc_first_clone = id_arc.clone(); + let mut new_self = Self { + id: id_arc, + func_name: func_name.to_owned(), + path_from_root_mtx, + //extra_info, + + // the value of this doesn't matter; it gets overwritten by start_new_section below + current_section: MtxSection { path: "[temp placeholder]".to_string(), start_time: 0f64, extra_info: None, duration: None }, + + section_lifetimes: Arc::new(RwLock::new(IndexMap::new())), + //section_lifetimes: Arc::new(flurry::HashMap::new()), + msg_sender, + msg_receiver, + //parent: None, + /*parent_sender: match parent { + Some(parent) => Some(parent.msg_sender.clone()), + None => None, + },*/ + root_mtx_sender, + + // fix for issue of root-mtx update-receiving-loop (see tokio::spawn block below) being dropped while proxy was still sending more data to it + root_mtx_id_arc_for_keepalive: match parent { + Some(parent) => parent.root_mtx_id_arc_for_keepalive.clone(), + None => id_arc_first_clone, + }, + }; + new_self.start_new_section(&first_section_name.into(), extra_info, time_since_epoch_ms()); + + if new_self.is_root_mtx() && !cfg!(test) { + // start a timer that, once per second (while the mtx-instance is active/in-scope), sends its data to the backend + let (id_clone, section_lifetimes_clone, msg_receiver_clone) = (new_self.id.clone(), new_self.section_lifetimes.clone(), new_self.msg_receiver.clone()); + tokio::spawn(async move { + let mut interval = time::interval(time::Duration::from_secs(1)); + let mut last_data_as_str: Option = None; + loop { + interval.tick().await; + + // if this is the first iteration, wait a bit + // (this wait appears to give time for the mtx-instance to be bound to a scope or something, such that the strong_count() call sees the remove strong-reference we expect) + if last_data_as_str.is_none() { + time::sleep(Duration::from_millis(1000)).await; + } + + // process any messages that have buffered up + MtxMessage::apply_messages_to_mtx_data(§ion_lifetimes_clone, msg_receiver_clone.drain()); + + // package data and send to channel (which user-project can decide what to do with) + let data_as_str = package_up_mtx_data_and_send_to_channel(id_clone.clone(), section_lifetimes_clone.clone(), last_data_as_str).await; + + last_data_as_str = data_as_str; + //println!("Sent partial results for mtx entry..."); // temp + + // if we're the last place holding a reference to the root-mtx's id-arc, the entire tree of mtx-instances must have been dropped by now (since each mtx-instance holds a reference to its tree's root id-arc); + // that means it has already sent its final results to the monitor-backend (through the `Drop::drop` implementation), so we can end this loop + if Arc::strong_count(&id_clone) <= 1 { + if Arc::strong_count(§ion_lifetimes_clone) > 1 { + warn!("Despite all refs to root-mtx's id-arc (other than receiver loop) having dropped, the section-lifetimes-arc still has other refs! This is unexpected. Receiver loop is proceeding with shutdown anyway, but worth investigating."); + } + //println!("Stopping mtx-data-sending timer, since mtx instance has been dropped."); // temp + break; + } + } + }); + } + + new_self + } + + /// Use this when you want to collect mtx data from some async function (or otherwise uncontrolled call-path). Basically, it's made for cases where you can neither: + /// 1) ..."move" the mtx to pass as an argument (due to there being local mtx-sections after the relevant call) + /// 2) ...nor can you "borrow" it to pass by reference (since the call-tree is uncontrolled/async, so rust's borrow-checker can't verify the lifetimes) + /// Usage example: + /// ``` + /// new_mtx!(mtx, "1:section one", None); + /// uncontrolled_call_path(Some(mtx.proxy())).await; + /// mtx.section("2:section two"); + /// something_else(); + /// ``` + pub fn proxy(&self /*keep_parent_alive: bool*/) -> Mtx { + Mtx::new("", "", Some(&self), None) + } + + pub fn is_root_mtx(&self) -> bool { + !self.path_from_root_mtx.contains("/") + } + /*pub fn current_section_path(&self) -> String { + format!("{}/{}", self.path_from_root_mtx, self.current_section_name) + }*/ + pub fn section(&mut self, name: impl Into>) { + self.section_2(name, None); + } + pub fn section_2(&mut self, name: impl Into>, extra_info: Option) { + let old_section_end_time = self.end_old_section(); + self.start_new_section(&name.into(), extra_info, old_section_end_time); + } + fn end_old_section(&mut self) -> f64 { + let old_section = &mut self.current_section; + let section_end_time = time_since_epoch_ms(); + old_section.duration = Some(section_end_time - old_section.start_time); + + { + let mut section_lifetimes = self.section_lifetimes.write().unwrap(); + /*let guard = self.section_lifetimes.guard(); + let section_lifetimes = self.section_lifetimes.with_guard(&guard);*/ + section_lifetimes.insert(old_section.get_key(), old_section.clone()); + } + // ignore send-error; this just means we're within a "proxy subtree", and the root mtx has already been dropped + // todo: probably have caller decide what to do in this situation, as an argument to `proxy()` (since in some cases this might be unexpected) + let _ = self.root_mtx_sender.send(MtxMessage::UpdateSectionLifetime(old_section.get_key(), old_section.clone())); + section_end_time + } + fn start_new_section(&mut self, name: &str, extra_info: Option, old_section_end_time: f64) { + let new_section = MtxSection { path: format!("{}/{}", self.path_from_root_mtx, name), start_time: old_section_end_time, extra_info, duration: None }; + trace!("Section started:{new_section:?}"); + self.start_new_section_2(new_section, name != MTX_FINAL_SECTION_NAME); + } + fn start_new_section_2(&mut self, new_section: MtxSection, send_to_root_mtx: bool) { + self.current_section = new_section.clone(); + // store partial-data for new section + if send_to_root_mtx { + let msg = MtxMessage::UpdateSectionLifetime(new_section.get_key(), new_section); + /*if self.is_root_mtx() { + MtxMessage::apply_messages_to_mtx_data(&self.section_lifetimes, vec![msg].into_iter()); + } else {*/ + // ignore send-error; this just means we're within a "proxy subtree", and the root mtx has already been dropped + // todo: probably have caller decide what to do in this situation, as an argument to `proxy()` (since in some cases this might be unexpected) + let _ = self.root_mtx_sender.send(msg); + } + } + + /*pub fn send_to_monitor_backend(&mut self) { + // sort section_lifetimes collection (makes log-based inspection a lot easier) + self.section_lifetimes.sort_keys(); + + //send_mtx_tree_to_monitor_backend(self).await; + let self_as_str = json_obj_1field("mtx", self).map(|a| a.to_string()).unwrap_or("failed to serialize mtx-instance".to_string()); + tokio::spawn(async move { + let result = send_mtx_tree_to_monitor_backend(self_as_str).await; + result.expect("Got error while sending mtx-tree to monitor-backend..."); + }); + }*/ + + /// Helper function to make an `info!(...)` log-call, with the basic info like function-name. (avoids need to add custom message for logging of key function-calls) + pub fn log_call(&self, temp_extra_info: Option) { + let current_section_extra_info_str = self.current_section.extra_info.as_ref().map_or("".to_owned(), |a| format!(" {}", a)); + let temp_extra_info_str = temp_extra_info.map_or("".to_owned(), |a| format!(" {}", a)); + info!("Called:{}{}{}", self.func_name, current_section_extra_info_str, temp_extra_info_str); + } } impl Drop for Mtx { - fn drop(&mut self) { - //println!("Drop called. @current_section:{:?} @lifetimes:{:?}", self.current_section.get_key(), /*self.section_lifetimes*/ "[snip]"); - self.section(MTX_FINAL_SECTION_NAME); // called simply to mark end of prior section - if self.is_root_mtx() && !cfg!(test) { - MtxMessage::apply_messages_to_mtx_data(&self.section_lifetimes, self.msg_receiver.drain()); - - let (id_clone, section_lifetimes_clone) = (self.id.clone(), self.section_lifetimes.clone()); - tokio::spawn(async move { - // sending `None` since we don't have an easy way to access the `last_data_as_str` here (it's in the tokio loop within `Mtx::new`) - package_up_mtx_data_and_send_to_channel(id_clone, section_lifetimes_clone, None).await; - }); - } - } + fn drop(&mut self) { + //println!("Drop called. @current_section:{:?} @lifetimes:{:?}", self.current_section.get_key(), /*self.section_lifetimes*/ "[snip]"); + self.section(MTX_FINAL_SECTION_NAME); // called simply to mark end of prior section + if self.is_root_mtx() && !cfg!(test) { + MtxMessage::apply_messages_to_mtx_data(&self.section_lifetimes, self.msg_receiver.drain()); + + let (id_clone, section_lifetimes_clone) = (self.id.clone(), self.section_lifetimes.clone()); + tokio::spawn(async move { + // sending `None` since we don't have an easy way to access the `last_data_as_str` here (it's in the tokio loop within `Mtx::new`) + package_up_mtx_data_and_send_to_channel(id_clone, section_lifetimes_clone, None).await; + }); + } + } } pub async fn package_up_mtx_data_and_send_to_channel(id: Arc, section_lifetimes: Arc>>, last_data_as_str: Option) -> Option { - let mtx_data = MtxData::from(id, section_lifetimes).await; - - // we stringify the data an additional time here, as a convenience field for user-project (so it can check if action needs to be taken) - let data_as_str = serde_json::to_string(&mtx_data).unwrap(); - - let (msg_sender, _msg_receiver) = MTX_GLOBAL_MESSAGE_SENDER_AND_RECEIVER.get().expect("MTX_GLOBAL_MESSAGE_SENDER_AND_RECEIVER not initialized!"); - - // ignore error; if channel is full, we don't care (user-project must just not care to be reading them all) - let _ = msg_sender.send_timeout(MtxGlobalMsg::NotifyMtxDataPossiblyChanged(MtxDataWithExtraInfo { - id: mtx_data.id, - section_lifetimes: mtx_data.section_lifetimes, - data_as_str: data_as_str.clone(), - last_data_as_str: last_data_as_str.clone(), - }), Duration::from_millis(500)); // we pick 500ms so it's certain to complete within the 1s interval of the tokio loop within `Mtx::new` - - Some(data_as_str) + let mtx_data = MtxData::from(id, section_lifetimes).await; + + // we stringify the data an additional time here, as a convenience field for user-project (so it can check if action needs to be taken) + let data_as_str = serde_json::to_string(&mtx_data).unwrap(); + + let (msg_sender, _msg_receiver) = MTX_GLOBAL_MESSAGE_SENDER_AND_RECEIVER.get().expect("MTX_GLOBAL_MESSAGE_SENDER_AND_RECEIVER not initialized!"); + + // ignore error; if channel is full, we don't care (user-project must just not care to be reading them all) + let _ = msg_sender.send_timeout(MtxGlobalMsg::NotifyMtxDataPossiblyChanged(MtxDataWithExtraInfo { id: mtx_data.id, section_lifetimes: mtx_data.section_lifetimes, data_as_str: data_as_str.clone(), last_data_as_str: last_data_as_str.clone() }), Duration::from_millis(500)); // we pick 500ms so it's certain to complete within the 1s interval of the tokio loop within `Mtx::new` + + Some(data_as_str) } pub enum MtxGlobalMsg { - NotifyMtxDataPossiblyChanged(MtxDataWithExtraInfo), + NotifyMtxDataPossiblyChanged(MtxDataWithExtraInfo), } /*pub static MTX_GLOBAL_MESSAGE_SENDER_AND_RECEIVER: Lazy<(FSender, FReceiver)> = Lazy::new(|| { - // limit to 10k messages (eg. in case user-project is not reading them, we don't want the queue's memory-usage to just endlessly grow) - let (msg_sender, msg_receiver): (FSender, FReceiver) = flume::bounded(10000); - (msg_sender, msg_receiver) + // limit to 10k messages (eg. in case user-project is not reading them, we don't want the queue's memory-usage to just endlessly grow) + let (msg_sender, msg_receiver): (FSender, FReceiver) = flume::bounded(10000); + (msg_sender, msg_receiver) });*/ /// It is up to the user-project to initialize this channel at program startup; until that is done, any data-updates from Mtx instances will just be lost/ignored. pub static MTX_GLOBAL_MESSAGE_SENDER_AND_RECEIVER: OnceCell<(FSender, FReceiver)> = OnceCell::new(); @@ -358,29 +354,29 @@ pub static MTX_GLOBAL_MESSAGE_SENDER_AND_RECEIVER: OnceCell<(FSender { - mtx: &'a Mtx, + mtx: &'a Mtx, } //}*/ pub fn json_obj_1field(field_name: &str, field_value: T) -> Result { - let mut obj = serde_json::Map::new(); - //obj[field_name] = field_value; - obj.insert(field_name.to_string(), serde_json::to_value(field_value)?); - return Ok(JSONValue::Object(obj)); + let mut obj = serde_json::Map::new(); + //obj[field_name] = field_value; + obj.insert(field_name.to_string(), serde_json::to_value(field_value)?); + return Ok(JSONValue::Object(obj)); } /*pub fn get_host_of_other_pod(service_name: &str, namespace: &str, port: &str) -> String { - format!("{service_name}.{namespace}.svc.cluster.local:{port}") + format!("{service_name}.{namespace}.svc.cluster.local:{port}") }*/ pub const MTX_FINAL_SECTION_NAME: &'static str = "$end-marker$"; impl MtxSection { - pub fn get_key(&self) -> String { - // use "#" as the separator, so that it sorts earlier than "/" (such that children show up after the parent, when sorting by path-plus-time) - let new_section_path_plus_time = format!("{}#{}", self.path, self.start_time); - new_section_path_plus_time - } + pub fn get_key(&self) -> String { + // use "#" as the separator, so that it sorts earlier than "/" (such that children show up after the parent, when sorting by path-plus-time) + let new_section_path_plus_time = format!("{}#{}", self.path, self.start_time); + new_section_path_plus_time + } } // this alias is needed, since `wrap_serde_macros.rs` inserts refs to, eg. `rust_shared::rust_macros::Serialize_Stub` @@ -390,58 +386,58 @@ wrap_slow_macros! { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MtxSection { - pub path: String, - pub extra_info: Option, - pub start_time: f64, - pub duration: Option, + pub path: String, + pub extra_info: Option, + pub start_time: f64, + pub duration: Option, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MtxData { - pub id: String, - pub section_lifetimes: IndexMap, + pub id: String, + pub section_lifetimes: IndexMap, } impl MtxData { - async fn from( - id: Arc, - section_lifetimes: Arc>>, - //section_lifetimes: Arc>, - ) -> MtxData { - let section_lifetimes = section_lifetimes.read().unwrap(); - MtxData { - //id: (*id).clone(), - id: (*id).to_string(), - section_lifetimes: (*section_lifetimes).clone(), - } - } + async fn from( + id: Arc, + section_lifetimes: Arc>>, + //section_lifetimes: Arc>, + ) -> MtxData { + let section_lifetimes = section_lifetimes.read().unwrap(); + MtxData { + //id: (*id).clone(), + id: (*id).to_string(), + section_lifetimes: (*section_lifetimes).clone(), + } + } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MtxDataWithExtraInfo { - pub id: String, - pub section_lifetimes: IndexMap, - //pub section_lifetimes: Arc>, - - // extra info (eg. so user-project can know if action needs to be taken) - pub data_as_str: String, - pub last_data_as_str: Option, + pub id: String, + pub section_lifetimes: IndexMap, + //pub section_lifetimes: Arc>, + + // extra info (eg. so user-project can know if action needs to be taken) + pub data_as_str: String, + pub last_data_as_str: Option, } #[derive(SimpleObject)] // added for AGQL variant only #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MtxDataForAGQL { - pub id: String, // changed to String in MtxData structs, for more universality (eg. easier usage with gql in monitor-backend -- agql's OutputType isn't implemented for Uuid) + pub id: String, // changed to String in MtxData structs, for more universality (eg. easier usage with gql in monitor-backend -- agql's OutputType isn't implemented for Uuid) - // use HashMap (since agql has OutputType implemented for it), but have serde use the ordered_map function (which enforced a consistent order, of alphabetical, through use of a temporary BTreeMap) - #[serde(serialize_with = "crate::utils::general_::serde::ordered_map")] - pub section_lifetimes: HashMap, + // use HashMap (since agql has OutputType implemented for it), but have serde use the ordered_map function (which enforced a consistent order, of alphabetical, through use of a temporary BTreeMap) + #[serde(serialize_with = "crate::utils::general_::serde::ordered_map")] + pub section_lifetimes: HashMap, } impl MtxDataForAGQL { - pub fn from_base(mtx_data: &MtxData) -> MtxDataForAGQL { - let mtx_data_as_str = serde_json::to_string(&mtx_data).unwrap(); - let mtx_data_for_agql: MtxDataForAGQL = serde_json::from_str(&mtx_data_as_str).unwrap(); - mtx_data_for_agql - } + pub fn from_base(mtx_data: &MtxData) -> MtxDataForAGQL { + let mtx_data_as_str = serde_json::to_string(&mtx_data).unwrap(); + let mtx_data_for_agql: MtxDataForAGQL = serde_json::from_str(&mtx_data_as_str).unwrap(); + mtx_data_for_agql + } } -} \ No newline at end of file +} diff --git a/Packages/rust-shared/src/utils/mtx/mtx/mtx_stub.rs b/Packages/rust-shared/src/utils/mtx/mtx/mtx_stub.rs index b573612e3..d7d34656b 100644 --- a/Packages/rust-shared/src/utils/mtx/mtx/mtx_stub.rs +++ b/Packages/rust-shared/src/utils/mtx/mtx/mtx_stub.rs @@ -1,125 +1,134 @@ -use std::{sync::{Arc, RwLock, RwLockWriteGuard}, cell::RefCell, time::{Instant, Duration}, borrow::Cow, rc::Rc, collections::HashMap}; - -use crate::utils::{general_::extensions::ToOwnedV, type_aliases::{FReceiver, FSender, JSONValue}}; +use std::{ + borrow::Cow, + cell::RefCell, + collections::HashMap, + rc::Rc, + sync::{Arc, RwLock, RwLockWriteGuard}, + time::{Duration, Instant}, +}; + +use crate::utils::{ + general_::extensions::ToOwnedV, + type_aliases::{FReceiver, FSender, JSONValue}, +}; use anyhow::Error; use async_graphql::SimpleObject; -use once_cell::sync::{OnceCell, Lazy}; +use flume::{Receiver, Sender}; +use once_cell::sync::{Lazy, OnceCell}; use serde_json; use tokio; -use flume::{Sender, Receiver}; use crate::hyper::{Method, Request}; -use indexmap::IndexMap; use crate::rust_macros::wrap_slow_macros; -use crate::utils::time::time_since_epoch_ms; -use crate::serde::{Serialize, Deserialize}; +use crate::serde::{Deserialize, Serialize}; use crate::serde_json::{json, Map}; -use crate::tokio::{time}; -use tracing::{trace, error, info, warn}; +use crate::tokio::time; +use crate::utils::time::time_since_epoch_ms; use crate::uuid::Uuid; +use indexmap::IndexMap; +use tracing::{error, info, trace, warn}; #[macro_export] macro_rules! new_mtx { - ($mtx:ident, $first_section_name:expr) => { - $crate::utils::mtx::mtx::new_mtx!($mtx, $first_section_name, None); - }; - ($mtx:ident, $first_section_name:expr, $parent_mtx:expr) => { - $crate::utils::mtx::mtx::new_mtx!($mtx, $first_section_name, $parent_mtx, None); - }; - ($mtx:ident, $first_section_name:expr, $parent_mtx:expr, $extra_info:expr) => { - let parent_mtx: Option<&$crate::utils::mtx::mtx::Mtx> = $parent_mtx; - #[allow(unused_mut)] - let mut $mtx = $crate::utils::mtx::mtx::Mtx::new("", $first_section_name, parent_mtx, $extra_info); - }; + ($mtx:ident, $first_section_name:expr) => { + $crate::utils::mtx::mtx::new_mtx!($mtx, $first_section_name, None); + }; + ($mtx:ident, $first_section_name:expr, $parent_mtx:expr) => { + $crate::utils::mtx::mtx::new_mtx!($mtx, $first_section_name, $parent_mtx, None); + }; + ($mtx:ident, $first_section_name:expr, $parent_mtx:expr, $extra_info:expr) => { + let parent_mtx: Option<&$crate::utils::mtx::mtx::Mtx> = $parent_mtx; + #[allow(unused_mut)] + let mut $mtx = $crate::utils::mtx::mtx::Mtx::new("", $first_section_name, parent_mtx, $extra_info); + }; } pub use new_mtx; pub enum MtxMessage { - UpdateSectionLifetime(String, MtxSection), + UpdateSectionLifetime(String, MtxSection), } impl MtxMessage { - pub fn apply_messages_to_mtx_data( - mtx_section_lifetimes: &Arc>>, - messages: impl Iterator - ) { - let mut section_lifetimes = mtx_section_lifetimes.write().unwrap(); - for msg in messages { - msg.apply_to_mtx_data(&mut section_lifetimes); - } - } - pub fn apply_to_mtx_data(self, - section_lifetimes_ref: &mut RwLockWriteGuard> - ) { - match self { - MtxMessage::UpdateSectionLifetime(path_and_time, lifetime) => { - section_lifetimes_ref.insert(path_and_time, lifetime); - } - } - } + pub fn apply_messages_to_mtx_data(mtx_section_lifetimes: &Arc>>, messages: impl Iterator) { + let mut section_lifetimes = mtx_section_lifetimes.write().unwrap(); + for msg in messages { + msg.apply_to_mtx_data(&mut section_lifetimes); + } + } + pub fn apply_to_mtx_data(self, section_lifetimes_ref: &mut RwLockWriteGuard>) { + match self { + MtxMessage::UpdateSectionLifetime(path_and_time, lifetime) => { + section_lifetimes_ref.insert(path_and_time, lifetime); + }, + } + } } #[derive(Debug)] pub struct Mtx { - pub id: Arc, - pub func_name: String, - pub path_from_root_mtx: String, - pub current_section: MtxSection, - pub section_lifetimes: Arc>>, - pub msg_sender: Sender, - pub msg_receiver: Receiver, - pub root_mtx_sender: Sender, - pub root_mtx_id_arc_for_keepalive: Arc, + pub id: Arc, + pub func_name: String, + pub path_from_root_mtx: String, + pub current_section: MtxSection, + pub section_lifetimes: Arc>>, + pub msg_sender: Sender, + pub msg_receiver: Receiver, + pub root_mtx_sender: Sender, + pub root_mtx_id_arc_for_keepalive: Arc, } //pub static mtx_none: Arc = Arc::new(Mtx::new("n/a")); impl Mtx { - pub fn new(func_name: &str, _first_section_name: impl Into>, parent: Option<&Mtx>, _extra_info: Option) -> Self { - let (msg_sender, msg_receiver): (Sender, Receiver) = flume::unbounded(); - let root_mtx_sender = match parent { - Some(parent) => parent.root_mtx_sender.clone(), - None => msg_sender.clone(), - }; - let id_arc = Arc::new(Uuid::nil()); // nil() avoids call to getrandom::getrandom, which shows up in profiling - let id_arc_first_clone = id_arc.clone(); - let new_self = Self { - id: id_arc, - func_name: func_name.to_owned(), - path_from_root_mtx: "".o(), - current_section: MtxSection { path: "[temp placeholder]".to_string(), start_time: 0f64, extra_info: None, duration: None }, - section_lifetimes: Arc::new(RwLock::new(IndexMap::new())), - msg_sender, - msg_receiver, - root_mtx_sender, - // fix for issue of root-mtx update-receiving-loop (see tokio::spawn block below) being dropped while proxy was still sending more data to it - root_mtx_id_arc_for_keepalive: match parent { - Some(parent) => parent.root_mtx_id_arc_for_keepalive.clone(), - None => id_arc_first_clone, - } - }; - - new_self - } - - pub fn proxy(&self, /*keep_parent_alive: bool*/) -> Mtx { - Mtx::new("", "", Some(&self), None) - } - pub fn is_root_mtx(&self) -> bool { true } - pub fn section(&mut self, _name: impl Into>) {} - pub fn section_2(&mut self, _name: impl Into>, _extra_info: Option) {} - pub fn log_call(&self, _temp_extra_info: Option) {} + pub fn new(func_name: &str, _first_section_name: impl Into>, parent: Option<&Mtx>, _extra_info: Option) -> Self { + let (msg_sender, msg_receiver): (Sender, Receiver) = flume::unbounded(); + let root_mtx_sender = match parent { + Some(parent) => parent.root_mtx_sender.clone(), + None => msg_sender.clone(), + }; + let id_arc = Arc::new(Uuid::nil()); // nil() avoids call to getrandom::getrandom, which shows up in profiling + let id_arc_first_clone = id_arc.clone(); + let new_self = Self { + id: id_arc, + func_name: func_name.to_owned(), + path_from_root_mtx: "".o(), + current_section: MtxSection { path: "[temp placeholder]".to_string(), start_time: 0f64, extra_info: None, duration: None }, + section_lifetimes: Arc::new(RwLock::new(IndexMap::new())), + msg_sender, + msg_receiver, + root_mtx_sender, + // fix for issue of root-mtx update-receiving-loop (see tokio::spawn block below) being dropped while proxy was still sending more data to it + root_mtx_id_arc_for_keepalive: match parent { + Some(parent) => parent.root_mtx_id_arc_for_keepalive.clone(), + None => id_arc_first_clone, + }, + }; + + new_self + } + + pub fn proxy(&self /*keep_parent_alive: bool*/) -> Mtx { + Mtx::new("", "", Some(&self), None) + } + pub fn is_root_mtx(&self) -> bool { + true + } + pub fn section(&mut self, _name: impl Into>) {} + pub fn section_2(&mut self, _name: impl Into>, _extra_info: Option) {} + pub fn log_call(&self, _temp_extra_info: Option) {} } pub async fn package_up_mtx_data_and_send_to_channel(_id: Arc, _section_lifetimes: Arc>>, _last_data_as_str: Option) -> Option { - Some("".o()) + Some("".o()) } pub enum MtxGlobalMsg { - NotifyMtxDataPossiblyChanged(MtxDataWithExtraInfo), + NotifyMtxDataPossiblyChanged(MtxDataWithExtraInfo), } pub static MTX_GLOBAL_MESSAGE_SENDER_AND_RECEIVER: OnceCell<(FSender, FReceiver)> = OnceCell::new(); pub const MTX_FINAL_SECTION_NAME: &'static str = "$end-marker$"; impl MtxSection { - pub fn get_key(&self) -> String { "".o() } + pub fn get_key(&self) -> String { + "".o() + } } // this alias is needed, since `wrap_serde_macros.rs` inserts refs to, eg. `rust_shared::rust_macros::Serialize_Stub` @@ -129,51 +138,51 @@ wrap_slow_macros! { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MtxSection { - pub path: String, - pub extra_info: Option, - pub start_time: f64, - pub duration: Option, + pub path: String, + pub extra_info: Option, + pub start_time: f64, + pub duration: Option, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MtxData { - pub id: String, - pub section_lifetimes: IndexMap, + pub id: String, + pub section_lifetimes: IndexMap, } impl MtxData { - async fn from( - id: Arc, - section_lifetimes: Arc>>, - ) -> MtxData { - let section_lifetimes = section_lifetimes.read().unwrap(); - MtxData { - id: (*id).to_string(), - section_lifetimes: (*section_lifetimes).clone(), - } - } + async fn from( + id: Arc, + section_lifetimes: Arc>>, + ) -> MtxData { + let section_lifetimes = section_lifetimes.read().unwrap(); + MtxData { + id: (*id).to_string(), + section_lifetimes: (*section_lifetimes).clone(), + } + } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MtxDataWithExtraInfo { - pub id: String, - pub section_lifetimes: IndexMap, - pub data_as_str: String, - pub last_data_as_str: Option, + pub id: String, + pub section_lifetimes: IndexMap, + pub data_as_str: String, + pub last_data_as_str: Option, } #[derive(SimpleObject)] // added for AGQL variant only #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MtxDataForAGQL { - pub id: String, - #[serde(serialize_with = "crate::utils::general_::serde::ordered_map")] - pub section_lifetimes: HashMap, + pub id: String, + #[serde(serialize_with = "crate::utils::general_::serde::ordered_map")] + pub section_lifetimes: HashMap, } impl MtxDataForAGQL { - pub fn from_base(mtx_data: &MtxData) -> MtxDataForAGQL { - let mtx_data_as_str = serde_json::to_string(&mtx_data).unwrap(); - let mtx_data_for_agql: MtxDataForAGQL = serde_json::from_str(&mtx_data_as_str).unwrap(); - mtx_data_for_agql - } + pub fn from_base(mtx_data: &MtxData) -> MtxDataForAGQL { + let mtx_data_as_str = serde_json::to_string(&mtx_data).unwrap(); + let mtx_data_for_agql: MtxDataForAGQL = serde_json::from_str(&mtx_data_as_str).unwrap(); + mtx_data_for_agql + } } -} \ No newline at end of file +} diff --git a/Packages/rust-shared/src/utils/net.rs b/Packages/rust-shared/src/utils/net.rs index 4d0e5f55e..cadce4fdc 100644 --- a/Packages/rust-shared/src/utils/net.rs +++ b/Packages/rust-shared/src/utils/net.rs @@ -1,6 +1,6 @@ use std::borrow::Cow; +use std::collections::{BTreeMap, HashMap}; use std::convert::Infallible; -use std::{collections::{BTreeMap, HashMap}}; use axum::response::ErrorResponse; use bytes::Bytes; @@ -10,9 +10,13 @@ use hyper::body::Incoming; use hyper_util::client::legacy::connect::HttpConnector; use hyper_util::rt::TokioExecutor; -use crate::{anyhow::{anyhow, bail, Error}, hyper::body::Body, serde, serde_json, to_anyhow}; -use crate::serde::Serialize; use crate::http_body_util::BodyExt; +use crate::serde::Serialize; +use crate::{ + anyhow::{anyhow, bail, Error}, + hyper::body::Body, + serde, serde_json, to_anyhow, +}; //pub type HyperClient = rust_shared::hyper_util::client::legacy::Client>; //pub type HyperClient = rust_shared::hyper_util::client::legacy::Client; @@ -20,7 +24,7 @@ use crate::http_body_util::BodyExt; pub type HyperClient = crate::hyper_util::client::legacy::Client; pub use crate::hyper_util::client::legacy::Client as HyperClient_; // to access the static functions, use this alias pub fn new_hyper_client_http() -> HyperClient { - HyperClient_::builder(TokioExecutor::new()).build_http::() + HyperClient_::builder(TokioExecutor::new()).build_http::() } pub type AxumResult = axum::response::Result; @@ -29,36 +33,43 @@ pub type AxumResultE = axum::response::Result pub type AxumResultI = axum::response::Result, Infallible>; pub type AxumBody = axum::body::Body; -pub async fn body_to_bytes(body: B) -> Result where ::Error: std::fmt::Debug { - let body_collected = match body.collect().await { Ok(a) => a, - Err(e) => bail!("Error while converting body to bytes: {:?}", e) - }; - let bytes = body_collected.to_bytes(); - Ok(bytes) +pub async fn body_to_bytes(body: B) -> Result +where + ::Error: std::fmt::Debug, +{ + let body_collected = match body.collect().await { + Ok(a) => a, + Err(e) => bail!("Error while converting body to bytes: {:?}", e), + }; + let bytes = body_collected.to_bytes(); + Ok(bytes) } -pub async fn body_to_str(body: B) -> Result where ::Error: std::fmt::Debug { - let bytes = body_to_bytes(body).await?; - let str: String = String::from_utf8_lossy(&bytes).as_ref().to_owned(); - Ok(str) +pub async fn body_to_str(body: B) -> Result +where + ::Error: std::fmt::Debug, +{ + let bytes = body_to_bytes(body).await?; + let str: String = String::from_utf8_lossy(&bytes).as_ref().to_owned(); + Ok(str) } pub fn full_body_from_str(into_str_cow: impl Into>) -> Full { - let str_cow: Cow<'static, str> = into_str_cow.into(); - let str = str_cow.into_owned(); - let bytes = Bytes::from(str); - Full::new(bytes) + let str_cow: Cow<'static, str> = into_str_cow.into(); + let str = str_cow.into_owned(); + let bytes = Bytes::from(str); + Full::new(bytes) } /// This currently doesn't return until the response's data has been completely collected. (ie. this is a blocking operation atm) pub async fn hyper_response_to_axum_response(hyper_response: crate::hyper::Response) -> axum::http::Response { - let (parts, body) = hyper_response.into_parts(); + let (parts, body) = hyper_response.into_parts(); - /*let body_as_stream = body.frame().map(|f| Ok::(f.unwrap().unwrap().into_data().unwrap())).into_stream(); - let axum_body = axum::body::Body::from_stream(body_as_stream);*/ + /*let body_as_stream = body.frame().map(|f| Ok::(f.unwrap().unwrap().into_data().unwrap())).into_stream(); + let axum_body = axum::body::Body::from_stream(body_as_stream);*/ - let bytes = body_to_bytes(body).await.unwrap(); - let axum_body = AxumBody::from(bytes); + let bytes = body_to_bytes(body).await.unwrap(); + let axum_body = AxumBody::from(bytes); - let axum_response = axum::http::Response::from_parts(parts, axum_body); - axum_response -} \ No newline at end of file + let axum_response = axum::http::Response::from_parts(parts, axum_body); + axum_response +} diff --git a/Packages/rust-shared/src/utils/time.rs b/Packages/rust-shared/src/utils/time.rs index 1cc212193..e7c3d37c5 100644 --- a/Packages/rust-shared/src/utils/time.rs +++ b/Packages/rust-shared/src/utils/time.rs @@ -1,22 +1,24 @@ -use std::time::{SystemTime, Duration, UNIX_EPOCH}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; pub fn time_since_epoch() -> Duration { - SystemTime::now().duration_since(UNIX_EPOCH).unwrap() + SystemTime::now().duration_since(UNIX_EPOCH).unwrap() } /*pub fn time_since_epoch_ms_u128() -> u128 { - time_since_epoch().as_millis() + time_since_epoch().as_millis() }*/ pub fn time_since_epoch_ms() -> f64 { - time_since_epoch().as_secs_f64() * 1000f64 + time_since_epoch().as_secs_f64() * 1000f64 } pub fn time_since_epoch_ms_i64() -> i64 { - //time_since_epoch().as_millis() - time_since_epoch_ms() as i64 + //time_since_epoch().as_millis() + time_since_epoch_ms() as i64 } pub async fn tokio_sleep(sleep_time_in_ms: i64) { - if sleep_time_in_ms <= 0 { return; } - tokio::time::sleep(std::time::Duration::from_millis(sleep_time_in_ms as u64)).await; + if sleep_time_in_ms <= 0 { + return; + } + tokio::time::sleep(std::time::Duration::from_millis(sleep_time_in_ms as u64)).await; } pub async fn tokio_sleep_until(wake_time_as_ms_since_epoch: i64) { - tokio_sleep(wake_time_as_ms_since_epoch - time_since_epoch_ms_i64()).await; -} \ No newline at end of file + tokio_sleep(wake_time_as_ms_since_epoch - time_since_epoch_ms_i64()).await; +} diff --git a/Packages/rust-shared/src/utils/type_aliases.rs b/Packages/rust-shared/src/utils/type_aliases.rs index 6fcc481bf..d46ca080f 100644 --- a/Packages/rust-shared/src/utils/type_aliases.rs +++ b/Packages/rust-shared/src/utils/type_aliases.rs @@ -7,4 +7,4 @@ pub type JWTDuration = jwt_simple::prelude::Duration; // channels pub type FSender = flume::Sender; -pub type FReceiver = flume::Receiver; \ No newline at end of file +pub type FReceiver = flume::Receiver; diff --git a/Packages/web-server/src/main.rs b/Packages/web-server/src/main.rs index 1ea579281..435ae9cd6 100644 --- a/Packages/web-server/src/main.rs +++ b/Packages/web-server/src/main.rs @@ -18,6 +18,7 @@ // to avoid false-positives, of certain functions, as well as for [Serialize/Deserialize]_Stub macro-usage (wrt private fields) dead_code, )] +#![feature(stmt_expr_attributes)] // allow attributes on expressions, eg. for disabling rustfmt per-expression use rust_shared::anyhow::Error; use static_web_server::Settings; @@ -27,18 +28,18 @@ use std::path::PathBuf; //use axum_server::main_axum; //mod axum_server; -static STATIC_DIR_PATH: &'static str = "../client/Dist"; +static STATIC_DIR_PATH: &'static str = "../client/Dist"; fn main() -> Result<(), Error> { - //return main_axum(); + //return main_axum(); - let mut opts = Settings::get(true)?; - opts.general.port = 5100; - opts.general.root = PathBuf::from(STATIC_DIR_PATH); - opts.general.health = true; - opts.general.compression_static = true; - opts.general.page_fallback = format!("{STATIC_DIR_PATH}/index.html").into(); - static_web_server::Server::new(opts)?.run_standalone(None)?; + let mut opts = Settings::get(true)?; + opts.general.port = 5100; + opts.general.root = PathBuf::from(STATIC_DIR_PATH); + opts.general.health = true; + opts.general.compression_static = true; + opts.general.page_fallback = format!("{STATIC_DIR_PATH}/index.html").into(); + static_web_server::Server::new(opts)?.run_standalone(None)?; - Ok(()) -} \ No newline at end of file + Ok(()) +} diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 000000000..4c86cad97 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,15 @@ +hard_tabs = true +fn_params_layout = "Compressed" +match_block_trailing_comma = true +newline_style = "Unix" +use_field_init_shorthand = true +overflow_delimited_expr = true + +inline_attribute_width = 50 # this doesn't apply for in-function code-lines with the #[cfg(...)] attribute unfortunately + +max_width = 290 +use_small_heuristics = "Max" +# overrides for fields that (otherwise) default to max_width (keep these as multiples of 50) +single_line_if_else_max_width = 100 +struct_lit_width = 150 +struct_variant_width = 150 \ No newline at end of file

Navigate to debatemap.app instead. (or localhost:5100/localhost:5101, if running Debate Map locally)