diff --git a/Cargo.toml b/Cargo.toml index e3723e0c..e9c39da9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,8 +15,8 @@ publish = false # cargo dist --> Avoid publishing to crates.io # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -mesa = "0.16.5" -# mesa = { path = "../mesa" } # Only for development purposes +# mesa = "0.16.5" +mesa = { path = "../mesa" } # Only for development purposes chrono = "0.4.31" anyhow = "1.0.44" reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls", "socks"] } diff --git a/build.rs b/build.rs index c2d3d475..de0df37b 100644 --- a/build.rs +++ b/build.rs @@ -9,7 +9,7 @@ fn main() -> Result<(), Error> { Some(outdir) => outdir, }; - let mut cmd = build_cli(None, Vec::new()); + let mut cmd = build_cli(None, &Vec::new()); let path = generate_to( Bash, &mut cmd, // We need to specify what generator to use "manta", // We need to specify the bin name manually diff --git a/src/cli/build.rs b/src/cli/build.rs index 22cb461b..f915e4b4 100644 --- a/src/cli/build.rs +++ b/src/cli/build.rs @@ -2,7 +2,7 @@ use clap::{arg, value_parser, ArgAction, ArgGroup, Command}; use std::path::PathBuf; -pub fn build_cli(hsm_group: Option<&String>, hsm_available_vec: Vec) -> Command { +pub fn build_cli(hsm_group: Option<&String>, hsm_available_vec: &[String]) -> Command { Command::new(env!("CARGO_PKG_NAME")) .term_width(100) .version(env!("CARGO_PKG_VERSION")) @@ -78,7 +78,7 @@ pub fn build_cli(hsm_group: Option<&String>, hsm_available_vec: Vec) -> .subcommand(subcommand_config(hsm_available_vec)) } -pub fn subcommand_config(hsm_available_opt: Vec) -> Command { +pub fn subcommand_config(hsm_available_opt: &[String]) -> Command { // Enforce user to chose a HSM group is hsm_available config param is not empty. This is to // make sure tenants like PSI won't unset parameter hsm_group and take over all HSM groups. // NOTE: by default 'manta config set hsm' will unset the hsm_group config value and the user diff --git a/src/cli/commands/config_set.rs b/src/cli/commands/config_set.rs index eaca731b..5d58189b 100644 --- a/src/cli/commands/config_set.rs +++ b/src/cli/commands/config_set.rs @@ -8,6 +8,7 @@ pub async fn exec( shasta_base_url: &str, shasta_root_cert: &[u8], new_hsm_opt: Option<&String>, + all_hsm_available_vec: &[String], ) { // Read configuration file @@ -35,33 +36,14 @@ pub async fn exec( .expect("ERROR: could not parse configuration file to TOML"); // VALIDATION - let hsm_available_vec; - if doc.get("hsm_available").is_some() - && doc["hsm_available"].as_array().is_some() - && !doc["hsm_available"].as_array().unwrap().is_empty() - { + if !all_hsm_available_vec.is_empty() { // If hsm_available config param has values, then a tenant is running manta ==> enfore // config param 'hsm_group' has a value from 'hsm_available' because tenants can't unset // 'hsm_group' otherwise they will be able to operate on any HSM group in the system. // Note: tenants can't modify the configuration file directly because of manta runs as // manta user using sticky bit - hsm_available_vec = doc["hsm_available"] - .as_array() - .unwrap() - .iter() - .map(|hsm_group_value| hsm_group_value.as_str().unwrap().to_string()) - .collect::>(); - - /* if new_hsm_opt.is_none() { - println!("new hsm is empty!"); - eprintln!( - "Please provide one of the following HSM values {:?}", - hsm_available_vec - ); - std::process::exit(1); - } */ - - validate_hsm_group_and_hsm_available_config_params(new_hsm_opt.unwrap(), hsm_available_vec); + + validate_hsm_group_and_hsm_available_config_params(new_hsm_opt.unwrap(), all_hsm_available_vec); // All goot, we are safe to update 'hsm_group' config param log::info!( @@ -74,13 +56,13 @@ pub async fn exec( // 'hsm_available' config param is empty or does not exists, then an admin user is running // manta and 'hsm_group' config param is empty or does not exists, then it is safe to remove // this param from the config file - log::info!("New HSM value not provided. Unset 'hsm_group' config param"); - doc.remove("hsm_group"); + // + // NOTHING TO DO } else { // 'hsm_available' config param is empty or does not exists (an admin user is running manta) // and 'hsm_group' has a value, then we fetch all HSM groups from CSM and check the user is // asking to put a valid HSM group in the configuration file - hsm_available_vec = mesa::shasta::hsm::http_client::get_all_hsm_groups( + let all_hsm_available_vec = mesa::shasta::hsm::http_client::get_all_hsm_groups( shasta_token, shasta_base_url, shasta_root_cert, @@ -91,7 +73,7 @@ pub async fn exec( .map(|hsm_group_value| hsm_group_value["label"].as_str().unwrap().to_string()) .collect::>(); - validate_hsm_group_and_hsm_available_config_params(new_hsm_opt.unwrap(), hsm_available_vec); + validate_hsm_group_and_hsm_available_config_params(new_hsm_opt.unwrap(), &all_hsm_available_vec); // All goot, we are safe to update 'hsm_group' config param log::info!( @@ -125,7 +107,7 @@ pub async fn exec( pub fn validate_hsm_group_and_hsm_available_config_params( hsm_group: &String, - hsm_available_vec: Vec, + hsm_available_vec: &[String], ) { if !hsm_available_vec.contains(hsm_group) { eprintln!( diff --git a/src/cli/commands/config_show.rs b/src/cli/commands/config_show.rs index 3f616362..8c5235b4 100644 --- a/src/cli/commands/config_show.rs +++ b/src/cli/commands/config_show.rs @@ -1,4 +1,4 @@ -use crate::common::config_ops; +use crate::common::{config_ops, jwt_ops}; /// Prints Manta's configuration on screen pub async fn exec(shasta_token: &str, shasta_base_url: &str, shasta_root_cert: &[u8]) { @@ -14,15 +14,28 @@ pub async fn exec(shasta_token: &str, shasta_base_url: &str, shasta_root_cert: & let k8s_api_url = settings.get_string("k8s_api_url").unwrap(); let log_level = settings.get_string("log").unwrap_or("error".to_string()); */ let settings_hsm_group = settings.get_string("hsm_group").unwrap_or("".to_string()); - let settings_hsm_group_available_value_rslt = settings.get_array("hsm_available"); + // let settings_hsm_group_available_value_rslt = settings.get_array("hsm_available"); + + let mut realm_access_role_vec = jwt_ops::get_claims_from_jwt_token(&shasta_token) + .unwrap() + .pointer("/realm_access/roles") + .unwrap() + .as_array() + .unwrap_or(&Vec::new()) + .iter() + .map(|role_value| role_value.as_str().unwrap().to_string()) + .collect::>(); + + realm_access_role_vec + .retain(|role| !role.eq("offline_access") && !role.eq("uma_authorization")); + + // println!("JWT token resour_access:\n{:?}", realm_access_role_vec); + + let settings_hsm_available_vec = realm_access_role_vec; let hsm_group_available: String = - if let Ok(hsm_group_available_value) = settings_hsm_group_available_value_rslt { - hsm_group_available_value - .into_iter() - .map(|hsm_group| hsm_group.into_string().unwrap()) - .collect::>() - .join(", ") + if !settings_hsm_available_vec.is_empty() { + settings_hsm_available_vec.join(", ") } else { mesa::shasta::hsm::http_client::get_all_hsm_groups( shasta_token, diff --git a/src/cli/process.rs b/src/cli/process.rs index acbb21c4..6dea8c55 100644 --- a/src/cli/process.rs +++ b/src/cli/process.rs @@ -32,884 +32,904 @@ pub async fn process_cli( gitea_token: &str, gitea_base_url: &str, hsm_group: Option<&String>, + hsm_available_vec: &[String], // base_image_id: &str, k8s_api_url: &str, ) -> core::result::Result<(), Box> { - if let Some(cli_get) = cli_root.subcommand_matches("get") { - if let Some(cli_get_configuration) = cli_get.subcommand_matches("configuration") { - /* let hsm_group_name = match hsm_group { - // ref: https://stackoverflow.com/a/32487173/1918003 - None => cli_get_configuration.get_one::("hsm-group"), - Some(hsm_group_val) => Some(hsm_group_val), - }; */ - get_configuration::exec( - gitea_base_url, - gitea_token, - shasta_token, - shasta_base_url, - shasta_root_cert, - cli_get_configuration.get_one::("name"), - // hsm_group_name, - cli_get_configuration - .get_one::("most-recent") - .cloned(), - cli_get_configuration.get_one::("limit"), - ) - .await; - } else if let Some(cli_get_session) = cli_get.subcommand_matches("session") { - let session_name = cli_get_session.get_one::("name"); - - let hsm_group_name = match hsm_group { - // ref: https://stackoverflow.com/a/32487173/1918003 - None => cli_get_session.get_one::("hsm-group"), - Some(hsm_group_val) => Some(hsm_group_val), - }; - - let most_recent = cli_get_session.get_one::("most-recent"); - - let limit_number = if let Some(true) = most_recent { - Some(&1) - } else if let Some(false) = most_recent { - cli_get_session.get_one::("limit") - } else { - None - }; - - get_session::exec( - shasta_token, - shasta_base_url, - shasta_root_cert, - hsm_group_name, - session_name, - limit_number, - cli_get_session.get_one("output"), - ) - .await; - } else if let Some(cli_get_template) = cli_get.subcommand_matches("template") { - let hsm_group_name = match hsm_group { - None => cli_get_template.get_one::("hsm-group"), - Some(hsm_group_val) => Some(hsm_group_val), - }; - get_template::exec( - // hsm_group, - shasta_token, - shasta_base_url, - shasta_root_cert, - hsm_group_name, - cli_get_template.get_one::("name"), - cli_get_template.get_one::("most-recent").cloned(), - cli_get_template.get_one::("limit"), - ) - .await; - } else if let Some(cli_get_node) = cli_get.subcommand_matches("nodes") { - // Check HSM group name provided and configuration file - let hsm_group_name = match hsm_group { - None => cli_get_node.get_one::("HSM_GROUP_NAME"), - Some(_) => hsm_group, - }; - get_nodes::exec( - shasta_token, - shasta_base_url, - shasta_root_cert, - hsm_group_name, - *cli_get_node - .get_one::("nids-only-one-line") - .unwrap_or(&false), - *cli_get_node - .get_one::("xnames-only-one-line") - .unwrap_or(&false), - cli_get_node.get_one::("output"), - ) - .await; - } else if let Some(cli_get_hsm_groups) = cli_get.subcommand_matches("hsm-groups") { - let hsm_group_name = match hsm_group { - None => cli_get_hsm_groups - .get_one::("HSM_GROUP_NAME") - .unwrap(), - Some(hsm_group_name_value) => hsm_group_name_value, - }; - get_hsm::exec( - shasta_token, - shasta_base_url, - shasta_root_cert, - hsm_group_name, - ) - .await; - } else if let Some(cli_get_images) = cli_get.subcommand_matches("images") { - let hsm_group_name = match hsm_group { - // ref: https://stackoverflow.com/a/32487173/1918003 - None => cli_get_images.get_one::("hsm-group"), - Some(hsm_group_val) => Some(hsm_group_val), - }; - get_images::exec( - shasta_token, - shasta_base_url, - shasta_root_cert, - hsm_group_name, - cli_get_images.get_one::("limit"), - ) - .await; + if let Some(cli_config) = cli_root.subcommand_matches("config") { + if let Some(_cli_config_show) = cli_config.subcommand_matches("show") { + config_show::exec(shasta_token, shasta_base_url, shasta_root_cert).await; + } else if let Some(cli_config_set) = cli_config.subcommand_matches("set") { + if let Some(cli_config_set_hsm) = cli_config_set.subcommand_matches("hsm") { + config_set::exec( + shasta_token, + shasta_base_url, + shasta_root_cert, + cli_config_set_hsm.get_one::("HSM_GROUP_NAME"), + hsm_available_vec, + ) + .await; + } + } + } else { + println!("hsm_group: {:?}", hsm_group); + println!("hsm group available: {:?}", hsm_available_vec); + // Validate hsm_vailable and hsm_group + if hsm_group.is_none() && !hsm_available_vec.is_empty() { + eprintln!("HSM group not defined. Please use 'manta config hsm ' to set the HSM group to use in your requests. Exit"); + std::process::exit(1); } - } else if let Some(cli_apply) = cli_root.subcommand_matches("apply") { - /* if let Some(cli_apply_configuration) = cli_apply.subcommand_matches("configuration") { - let timestamp = chrono::Utc::now().format("%Y%m%d%H%M%S").to_string(); - apply_configuration::exec( - cli_apply_configuration.get_one("file").unwrap(), - shasta_token, - shasta_base_url, - ×tamp, - ) - .await; - } else */ - if let Some(cli_apply_session) = cli_apply.subcommand_matches("session") { - let hsm_group_name = match hsm_group { - // ref: https://stackoverflow.com/a/32487173/1918003 - None => cli_apply_session.get_one::("hsm-group"), - Some(hsm_group_val) => Some(hsm_group_val), - }; - apply_session::exec( - gitea_token, - gitea_base_url, - vault_base_url, - vault_secret_path, - vault_role_id, - shasta_token, - shasta_base_url, - shasta_root_cert, - k8s_api_url, - cli_apply_session.get_one::("name").cloned(), - hsm_group_name, - cli_apply_session - .get_many("repo-path") - .unwrap() - .cloned() - .collect(), - cli_apply_session - .get_one::("ansible-limit") - .cloned(), - cli_apply_session - .get_one::("ansible-verbosity") - .cloned(), - cli_apply_session - .get_one::("ansible-passthrough") - .cloned(), - *cli_apply_session - .get_one::("watch-logs") - .unwrap_or(&false), - ) - .await; - } else if let Some(cli_apply_image) = cli_apply.subcommand_matches("image") { - let tag = if let Some(input_tag) = cli_apply_image.get_one::("tag") { - input_tag.clone() - } else { - chrono::Utc::now().format("%Y%m%d%H%M%S").to_string() - }; - apply_image::exec( - vault_base_url, - vault_secret_path, - vault_role_id, - cli_apply_image.get_one("file").unwrap(), - shasta_token, - shasta_base_url, - shasta_root_cert, - // base_image_id, - cli_apply_image.get_one::("ansible-verbosity"), - cli_apply_image.get_one::("ansible-passthrough"), - cli_apply_image.get_one::("watch-logs"), - &tag, - hsm_group, - k8s_api_url, - cli_apply_image.get_one::("output"), - ) - .await; - } else if let Some(cli_apply_cluster) = cli_apply.subcommand_matches("cluster") { - let tag = if let Some(input_tag) = cli_apply_cluster.get_one::("tag") { - input_tag.clone() - } else { - chrono::Utc::now().format("%Y%m%d%H%M%S").to_string() - }; - apply_cluster::exec( - vault_base_url, - vault_secret_path, - vault_role_id, - // cli_apply_cluster, - shasta_token, - shasta_base_url, - shasta_root_cert, - cli_apply_cluster.get_one("file").unwrap(), - // base_image_id, - hsm_group, - cli_apply_cluster.get_one::("ansible-verbosity"), - cli_apply_cluster.get_one::("ansible-passthrough"), - k8s_api_url, - tag, - cli_apply_cluster.get_one::("output"), - ) - .await; - } else if let Some(cli_apply_node) = cli_apply.subcommand_matches("node") { - if let Some(cli_apply_node_on) = cli_apply_node.subcommand_matches("on") { - apply_node_on::exec( - hsm_group, + if let Some(cli_get) = cli_root.subcommand_matches("get") { + if let Some(cli_get_configuration) = cli_get.subcommand_matches("configuration") { + /* let hsm_group_name = match hsm_group { + // ref: https://stackoverflow.com/a/32487173/1918003 + None => cli_get_configuration.get_one::("hsm-group"), + Some(hsm_group_val) => Some(hsm_group_val), + }; */ + get_configuration::exec( + gitea_base_url, + gitea_token, shasta_token, shasta_base_url, shasta_root_cert, - cli_apply_node_on - .get_one::("XNAMES") - .unwrap() - .split(',') - .map(|xname| xname.trim()) - .collect(), - cli_apply_node_on.get_one::("reason").cloned(), + cli_get_configuration.get_one::("name"), + // hsm_group_name, + cli_get_configuration + .get_one::("most-recent") + .cloned(), + cli_get_configuration.get_one::("limit"), ) .await; - } else if let Some(cli_apply_node_off) = cli_apply_node.subcommand_matches("off") { - apply_node_off::exec( - hsm_group, + } else if let Some(cli_get_session) = cli_get.subcommand_matches("session") { + let session_name = cli_get_session.get_one::("name"); + + let hsm_group_name = match hsm_group { + // ref: https://stackoverflow.com/a/32487173/1918003 + None => cli_get_session.get_one::("hsm-group"), + Some(hsm_group_val) => Some(hsm_group_val), + }; + + let most_recent = cli_get_session.get_one::("most-recent"); + + let limit_number = if let Some(true) = most_recent { + Some(&1) + } else if let Some(false) = most_recent { + cli_get_session.get_one::("limit") + } else { + None + }; + + get_session::exec( shasta_token, shasta_base_url, shasta_root_cert, - cli_apply_node_off - .get_one::("XNAMES") + hsm_group_name, + session_name, + limit_number, + cli_get_session.get_one("output"), + ) + .await; + } else if let Some(cli_get_template) = cli_get.subcommand_matches("template") { + let hsm_group_name = match hsm_group { + None => cli_get_template.get_one::("hsm-group"), + Some(hsm_group_val) => Some(hsm_group_val), + }; + get_template::exec( + // hsm_group, + shasta_token, + shasta_base_url, + shasta_root_cert, + hsm_group_name, + cli_get_template.get_one::("name"), + cli_get_template.get_one::("most-recent").cloned(), + cli_get_template.get_one::("limit"), + ) + .await; + } else if let Some(cli_get_node) = cli_get.subcommand_matches("nodes") { + // Check HSM group name provided and configuration file + let hsm_group_name = match hsm_group { + None => cli_get_node.get_one::("HSM_GROUP_NAME"), + Some(_) => hsm_group, + }; + get_nodes::exec( + shasta_token, + shasta_base_url, + shasta_root_cert, + hsm_group_name, + *cli_get_node + .get_one::("nids-only-one-line") + .unwrap_or(&false), + *cli_get_node + .get_one::("xnames-only-one-line") + .unwrap_or(&false), + cli_get_node.get_one::("output"), + ) + .await; + } else if let Some(cli_get_hsm_groups) = cli_get.subcommand_matches("hsm-groups") { + let hsm_group_name = match hsm_group { + None => cli_get_hsm_groups + .get_one::("HSM_GROUP_NAME") + .unwrap(), + Some(hsm_group_name_value) => hsm_group_name_value, + }; + get_hsm::exec( + shasta_token, + shasta_base_url, + shasta_root_cert, + hsm_group_name, + ) + .await; + } else if let Some(cli_get_images) = cli_get.subcommand_matches("images") { + let hsm_group_name = match hsm_group { + // ref: https://stackoverflow.com/a/32487173/1918003 + None => cli_get_images.get_one::("hsm-group"), + Some(hsm_group_val) => Some(hsm_group_val), + }; + get_images::exec( + shasta_token, + shasta_base_url, + shasta_root_cert, + hsm_group_name, + cli_get_images.get_one::("limit"), + ) + .await; + } + } else if let Some(cli_apply) = cli_root.subcommand_matches("apply") { + /* if let Some(cli_apply_configuration) = cli_apply.subcommand_matches("configuration") { + let timestamp = chrono::Utc::now().format("%Y%m%d%H%M%S").to_string(); + apply_configuration::exec( + cli_apply_configuration.get_one("file").unwrap(), + shasta_token, + shasta_base_url, + ×tamp, + ) + .await; + } else */ + if let Some(cli_apply_session) = cli_apply.subcommand_matches("session") { + let hsm_group_name = match hsm_group { + // ref: https://stackoverflow.com/a/32487173/1918003 + None => cli_apply_session.get_one::("hsm-group"), + Some(hsm_group_val) => Some(hsm_group_val), + }; + apply_session::exec( + gitea_token, + gitea_base_url, + vault_base_url, + vault_secret_path, + vault_role_id, + shasta_token, + shasta_base_url, + shasta_root_cert, + k8s_api_url, + cli_apply_session.get_one::("name").cloned(), + hsm_group_name, + cli_apply_session + .get_many("repo-path") .unwrap() - .split(',') - .map(|xname| xname.trim()) + .cloned() .collect(), - cli_apply_node_off.get_one::("reason").cloned(), - *cli_apply_node_off.get_one::("force").unwrap(), + cli_apply_session + .get_one::("ansible-limit") + .cloned(), + cli_apply_session + .get_one::("ansible-verbosity") + .cloned(), + cli_apply_session + .get_one::("ansible-passthrough") + .cloned(), + *cli_apply_session + .get_one::("watch-logs") + .unwrap_or(&false), ) .await; - } else if let Some(cli_apply_node_reset) = cli_apply_node.subcommand_matches("reset") { - apply_node_reset::exec( + } else if let Some(cli_apply_image) = cli_apply.subcommand_matches("image") { + let tag = if let Some(input_tag) = cli_apply_image.get_one::("tag") { + input_tag.clone() + } else { + chrono::Utc::now().format("%Y%m%d%H%M%S").to_string() + }; + + apply_image::exec( + vault_base_url, + vault_secret_path, + vault_role_id, + cli_apply_image.get_one("file").unwrap(), + shasta_token, + shasta_base_url, + shasta_root_cert, + // base_image_id, + cli_apply_image.get_one::("ansible-verbosity"), + cli_apply_image.get_one::("ansible-passthrough"), + cli_apply_image.get_one::("watch-logs"), + &tag, hsm_group, + k8s_api_url, + cli_apply_image.get_one::("output"), + ) + .await; + } else if let Some(cli_apply_cluster) = cli_apply.subcommand_matches("cluster") { + let tag = if let Some(input_tag) = cli_apply_cluster.get_one::("tag") { + input_tag.clone() + } else { + chrono::Utc::now().format("%Y%m%d%H%M%S").to_string() + }; + apply_cluster::exec( + vault_base_url, + vault_secret_path, + vault_role_id, + // cli_apply_cluster, shasta_token, shasta_base_url, shasta_root_cert, - cli_apply_node_reset + cli_apply_cluster.get_one("file").unwrap(), + // base_image_id, + hsm_group, + cli_apply_cluster.get_one::("ansible-verbosity"), + cli_apply_cluster.get_one::("ansible-passthrough"), + k8s_api_url, + tag, + cli_apply_cluster.get_one::("output"), + ) + .await; + } else if let Some(cli_apply_node) = cli_apply.subcommand_matches("node") { + if let Some(cli_apply_node_on) = cli_apply_node.subcommand_matches("on") { + apply_node_on::exec( + hsm_group, + shasta_token, + shasta_base_url, + shasta_root_cert, + cli_apply_node_on + .get_one::("XNAMES") + .unwrap() + .split(',') + .map(|xname| xname.trim()) + .collect(), + cli_apply_node_on.get_one::("reason").cloned(), + ) + .await; + } else if let Some(cli_apply_node_off) = cli_apply_node.subcommand_matches("off") { + apply_node_off::exec( + hsm_group, + shasta_token, + shasta_base_url, + shasta_root_cert, + cli_apply_node_off + .get_one::("XNAMES") + .unwrap() + .split(',') + .map(|xname| xname.trim()) + .collect(), + cli_apply_node_off.get_one::("reason").cloned(), + *cli_apply_node_off.get_one::("force").unwrap(), + ) + .await; + } else if let Some(cli_apply_node_reset) = + cli_apply_node.subcommand_matches("reset") + { + apply_node_reset::exec( + hsm_group, + shasta_token, + shasta_base_url, + shasta_root_cert, + cli_apply_node_reset + .get_one::("XNAMES") + .unwrap() + .split(',') + .map(|xname| xname.trim()) + .collect(), + cli_apply_node_reset.get_one::("reason"), + *cli_apply_node_reset + .get_one::("force") + .unwrap_or(&false), + ) + .await; + } + } else if let Some(cli_apply_ephemeral_environment) = + cli_apply.subcommand_matches("ephemeral-environment") + { + if !std::io::stdout().is_terminal() { + eprintln!("This command needs to run in interactive mode. Exit"); + std::process::exit(1); + } + + apply_ephemeral_env::exec( + shasta_token, + shasta_base_url, + shasta_root_cert, + // cli_apply_ephemeral_environment + // .get_one::("block") + // .copied(), + cli_apply_ephemeral_environment + .get_one::("image-id") + .unwrap(), + ) + .await; + } + } else if let Some(cli_update) = cli_root.subcommand_matches("update") { + if let Some(cli_update_node) = cli_update.subcommand_matches("nodes") { + let hsm_group_name = if hsm_group.is_none() { + cli_update_node.get_one::("HSM_GROUP_NAME") + } else { + hsm_group + }; + update_node::exec( + shasta_token, + shasta_base_url, + shasta_root_cert, + hsm_group_name, + cli_update_node.get_one::("boot-image"), + cli_update_node.get_one::("desired-configuration"), + cli_update_node .get_one::("XNAMES") .unwrap() .split(',') .map(|xname| xname.trim()) .collect(), - cli_apply_node_reset.get_one::("reason"), - *cli_apply_node_reset - .get_one::("force") - .unwrap_or(&false), + ) + .await; + } else if let Some(cli_update_hsm_group) = cli_update.subcommand_matches("hsm-group") { + let hsm_group_name = if hsm_group.is_none() { + cli_update_hsm_group.get_one::("HSM_GROUP_NAME") + } else { + hsm_group + }; + update_hsm_group::exec( + shasta_token, + shasta_base_url, + shasta_root_cert, + cli_update_hsm_group.get_one::("boot-image"), + cli_update_hsm_group.get_one::("desired-configuration"), + hsm_group_name.unwrap(), ) .await; } - } else if let Some(cli_apply_ephemeral_environment) = - cli_apply.subcommand_matches("ephemeral-environment") - { - if !std::io::stdout().is_terminal() { - eprintln!("This command needs to run in interactive mode. Exit"); - std::process::exit(1); - } - - apply_ephemeral_env::exec( + } else if let Some(cli_log) = cli_root.subcommand_matches("log") { + commands::log::exec( + // cli_log, shasta_token, shasta_base_url, shasta_root_cert, - // cli_apply_ephemeral_environment - // .get_one::("block") - // .copied(), - cli_apply_ephemeral_environment - .get_one::("image-id") - .unwrap(), - ) - .await; - } - } else if let Some(cli_update) = cli_root.subcommand_matches("update") { - if let Some(cli_update_node) = cli_update.subcommand_matches("nodes") { - let hsm_group_name = if hsm_group.is_none() { - cli_update_node.get_one::("HSM_GROUP_NAME") - } else { - hsm_group - }; - update_node::exec( - shasta_token, - shasta_base_url, - shasta_root_cert, - hsm_group_name, - cli_update_node.get_one::("boot-image"), - cli_update_node.get_one::("desired-configuration"), - cli_update_node - .get_one::("XNAMES") - .unwrap() - .split(',') - .map(|xname| xname.trim()) - .collect(), - ) - .await; - } else if let Some(cli_update_hsm_group) = cli_update.subcommand_matches("hsm-group") { - let hsm_group_name = if hsm_group.is_none() { - cli_update_hsm_group.get_one::("HSM_GROUP_NAME") - } else { - hsm_group - }; - update_hsm_group::exec( - shasta_token, - shasta_base_url, - shasta_root_cert, - cli_update_hsm_group.get_one::("boot-image"), - cli_update_hsm_group.get_one::("desired-configuration"), - hsm_group_name.unwrap(), + vault_base_url, + vault_secret_path, + vault_role_id, + k8s_api_url, + None, + cli_log.get_one::("SESSION_NAME"), + // cli_log.get_one::("layer-id"), + hsm_group, ) .await; - } - } else if let Some(cli_log) = cli_root.subcommand_matches("log") { - commands::log::exec( - // cli_log, + /* } else if let Some(cli_console) = cli_root.subcommand_matches("console") { + console_node::exec( + hsm_group, + // cli_console, shasta_token, shasta_base_url, - shasta_root_cert, vault_base_url, vault_secret_path, vault_role_id, k8s_api_url, - None, - cli_log.get_one::("SESSION_NAME"), - // cli_log.get_one::("layer-id"), - hsm_group, + cli_console.get_one::("XNAME").unwrap(), ) - .await; - /* } else if let Some(cli_console) = cli_root.subcommand_matches("console") { - console_node::exec( - hsm_group, - // cli_console, - shasta_token, - shasta_base_url, - vault_base_url, - vault_secret_path, - vault_role_id, - k8s_api_url, - cli_console.get_one::("XNAME").unwrap(), - ) - .await; */ - } else if let Some(cli_console) = cli_root.subcommand_matches("console") { - if let Some(cli_console_node) = cli_console.subcommand_matches("node") { - if !std::io::stdout().is_terminal() { - eprintln!("This command needs to run in interactive mode. Exit"); - std::process::exit(1); - } + .await; */ + } else if let Some(cli_console) = cli_root.subcommand_matches("console") { + if let Some(cli_console_node) = cli_console.subcommand_matches("node") { + if !std::io::stdout().is_terminal() { + eprintln!("This command needs to run in interactive mode. Exit"); + std::process::exit(1); + } - console_node::exec( - hsm_group, - // cli_console, - shasta_token, - shasta_base_url, - shasta_root_cert, - vault_base_url, - vault_secret_path, - vault_role_id, - k8s_api_url, - cli_console_node.get_one::("XNAME").unwrap(), - ) - .await; - } else if let Some(cli_console_target_ansible) = - cli_console.subcommand_matches("target-ansible") - { - if !std::io::stdout().is_terminal() { - eprintln!("This command needs to run in interactive mode. Exit"); - std::process::exit(1); - } + console_node::exec( + hsm_group, + // cli_console, + shasta_token, + shasta_base_url, + shasta_root_cert, + vault_base_url, + vault_secret_path, + vault_role_id, + k8s_api_url, + cli_console_node.get_one::("XNAME").unwrap(), + ) + .await; + } else if let Some(cli_console_target_ansible) = + cli_console.subcommand_matches("target-ansible") + { + if !std::io::stdout().is_terminal() { + eprintln!("This command needs to run in interactive mode. Exit"); + std::process::exit(1); + } - console_cfs_session_image_target_ansible::exec( - hsm_group, - // cli_console, - shasta_token, - shasta_base_url, - shasta_root_cert, - vault_base_url, - vault_secret_path, - vault_role_id, - k8s_api_url, - cli_console_target_ansible - .get_one::("SESSION_NAME") - .unwrap(), - ) - .await; - } - } else if let Some(cli_config) = cli_root.subcommand_matches("config") { - if let Some(_cli_config_show) = cli_config.subcommand_matches("show") { - config_show::exec(shasta_token, shasta_base_url, shasta_root_cert).await; - } else if let Some(cli_config_set) = cli_config.subcommand_matches("set") { - if let Some(cli_config_set_hsm) = cli_config_set.subcommand_matches("hsm") { - config_set::exec( + console_cfs_session_image_target_ansible::exec( + hsm_group, + // cli_console, shasta_token, shasta_base_url, shasta_root_cert, - cli_config_set_hsm.get_one::("HSM_GROUP_NAME"), + vault_base_url, + vault_secret_path, + vault_role_id, + k8s_api_url, + cli_console_target_ansible + .get_one::("SESSION_NAME") + .unwrap(), ) .await; } - } - } else if let Some(cli_delete) = cli_root.subcommand_matches("delete") { - let since_opt = if let Some(since) = cli_delete.get_one::("since") { - let date_time = chrono::NaiveDateTime::parse_from_str( - &(since.to_string() + "T00:00:00"), - "%Y-%m-%dT%H:%M:%S", - ) - .unwrap(); - Some(date_time) - } else { - None - }; - - let until_opt = if let Some(until) = cli_delete.get_one::("until") { - let date_time = chrono::NaiveDateTime::parse_from_str( - &(until.to_string() + "T00:00:00"), - "%Y-%m-%dT%H:%M:%S", - ) - .unwrap(); - Some(date_time) - } else { - None - }; - - let cfs_configuration_name_opt = cli_delete.get_one::("configuration-name"); - - let hsm_group_name_opt = if hsm_group.is_some() { - hsm_group - } else { - cli_delete.get_one::("hsm-group") - }; - - // INPUT VALIDATION - Check since date is prior until date - if since_opt.is_some() && until_opt.is_some() && since_opt.unwrap() > until_opt.unwrap() { - println!("since date can't be after until date. Exit"); - std::process::exit(1); - } + } else if let Some(cli_delete) = cli_root.subcommand_matches("delete") { + let since_opt = if let Some(since) = cli_delete.get_one::("since") { + let date_time = chrono::NaiveDateTime::parse_from_str( + &(since.to_string() + "T00:00:00"), + "%Y-%m-%dT%H:%M:%S", + ) + .unwrap(); + Some(date_time) + } else { + None + }; - // COLLECT SITE WIDE DATA FOR VALIDATION - // + let until_opt = if let Some(until) = cli_delete.get_one::("until") { + let date_time = chrono::NaiveDateTime::parse_from_str( + &(until.to_string() + "T00:00:00"), + "%Y-%m-%dT%H:%M:%S", + ) + .unwrap(); + Some(date_time) + } else { + None + }; - // Check dessired configuration not using any CFS configuration to delete: Get all CFS components in CSM - let cfs_components = mesa::shasta::cfs::component::http_client::get_multiple_components( - shasta_token, - shasta_base_url, - shasta_root_cert, - None, - None, - ) - .await - .unwrap(); + let cfs_configuration_name_opt = cli_delete.get_one::("configuration-name"); - // Check images related to CFS configurations to delete are not used to boot nodes. For - // this we need to get images from both CFS session and BOS sessiontemplate because CSCS staff - // Get all BSS boot params - let boot_param_vec = mesa::shasta::bss::http_client::get_boot_params( - shasta_token, - shasta_base_url, - shasta_root_cert, - &[], - ) - .await - .unwrap(); + let hsm_group_name_opt = if hsm_group.is_some() { + hsm_group + } else { + cli_delete.get_one::("hsm-group") + }; - let mut cfs_configuration_value_vec = mesa::shasta::cfs::configuration::http_client::get( - shasta_token, - shasta_base_url, - shasta_root_cert, - cfs_configuration_name_opt, - None, - ) - .await - .unwrap(); + // INPUT VALIDATION - Check since date is prior until date + if since_opt.is_some() && until_opt.is_some() && since_opt.unwrap() > until_opt.unwrap() + { + println!("since date can't be after until date. Exit"); + std::process::exit(1); + } - // Filter CFS configurations based on user input - if since_opt.is_some() && until_opt.is_some() { - cfs_configuration_value_vec.retain(|cfs_configuration_value| { - let date = chrono::DateTime::parse_from_rfc3339( - cfs_configuration_value["lastUpdated"].as_str().unwrap(), - ) - .unwrap() - .naive_utc(); + // COLLECT SITE WIDE DATA FOR VALIDATION + // - since_opt.unwrap() <= date && date < until_opt.unwrap() - }); - } else if cfs_configuration_name_opt.is_some() { - cfs_configuration_value_vec.retain(|cfs_configuration_value| { - cfs_configuration_value["name"] - .as_str() - .unwrap() - .eq_ignore_ascii_case(cfs_configuration_name_opt.unwrap()) - }); - } + // Check dessired configuration not using any CFS configuration to delete: Get all CFS components in CSM + let cfs_components = + mesa::shasta::cfs::component::http_client::get_multiple_components( + shasta_token, + shasta_base_url, + shasta_root_cert, + None, + None, + ) + .await + .unwrap(); - // Get list CFS configuration names - let mut cfs_configuration_name_vec = cfs_configuration_value_vec - .iter() - .map(|configuration_value| configuration_value["name"].as_str().unwrap()) - .collect::>(); + // Check images related to CFS configurations to delete are not used to boot nodes. For + // this we need to get images from both CFS session and BOS sessiontemplate because CSCS staff + // Get all BSS boot params + let boot_param_vec = mesa::shasta::bss::http_client::get_boot_params( + shasta_token, + shasta_base_url, + shasta_root_cert, + &[], + ) + .await + .unwrap(); - // Check images related to CFS configurations to delete are not used to boot nodes. For - // this we need to get images from both CFS session and BOS sessiontemplate because CSCS staff - // Get all BOS session templates - let mut bos_sessiontemplate_value_vec = mesa::shasta::bos::template::http_client::get( - shasta_token, - shasta_base_url, - shasta_root_cert, - hsm_group_name_opt, - None, - None, - ) - .await - .unwrap(); - - // TODO: change to iter so we can later on get its image ids without having to copy memory - // to create new Vec - bos_sessiontemplate_value_vec.retain(|bos_sessiontemplate_value| { - cfs_configuration_name_vec.contains( - &bos_sessiontemplate_value - .pointer("/cfs/configuration") + let mut cfs_configuration_value_vec = + mesa::shasta::cfs::configuration::http_client::get( + shasta_token, + shasta_base_url, + shasta_root_cert, + cfs_configuration_name_opt, + None, + ) + .await + .unwrap(); + + // Filter CFS configurations based on user input + if since_opt.is_some() && until_opt.is_some() { + cfs_configuration_value_vec.retain(|cfs_configuration_value| { + let date = chrono::DateTime::parse_from_rfc3339( + cfs_configuration_value["lastUpdated"].as_str().unwrap(), + ) .unwrap() - .as_str() - .unwrap(), - ) - }); + .naive_utc(); - let cfs_configuration_name_from_bos_sessiontemplate_value_iter = - bos_sessiontemplate_value_vec - .iter() - .map(|bos_sessiontemplate_value| { - bos_sessiontemplate_value - .pointer("/cfs/configuration") - .unwrap() + since_opt.unwrap() <= date && date < until_opt.unwrap() + }); + } else if cfs_configuration_name_opt.is_some() { + cfs_configuration_value_vec.retain(|cfs_configuration_value| { + cfs_configuration_value["name"] .as_str() .unwrap() + .eq_ignore_ascii_case(cfs_configuration_name_opt.unwrap()) }); + } - // Check images related to CFS configurations to delete are not used to boot nodes. For - // this we need to get images from both CFS session and BOS sessiontemplate because CSCS staff - // Get all CFS sessions - let mut cfs_session_value_vec = mesa::shasta::cfs::session::http_client::get( - shasta_token, - shasta_base_url, - shasta_root_cert, - hsm_group_name_opt, - None, - None, - None, - ) - .await - .unwrap(); - - // TODO: change to iter so we can later on get its image ids without having to copy memory - // to create new Vec - cfs_session_value_vec.retain(|cfs_session_value| { - cfs_configuration_name_vec.contains( - &cfs_session_value - .pointer("/configuration/name") - .unwrap() - .as_str() - .unwrap(), + // Get list CFS configuration names + let mut cfs_configuration_name_vec = cfs_configuration_value_vec + .iter() + .map(|configuration_value| configuration_value["name"].as_str().unwrap()) + .collect::>(); + + // Check images related to CFS configurations to delete are not used to boot nodes. For + // this we need to get images from both CFS session and BOS sessiontemplate because CSCS staff + // Get all BOS session templates + let mut bos_sessiontemplate_value_vec = mesa::shasta::bos::template::http_client::get( + shasta_token, + shasta_base_url, + shasta_root_cert, + hsm_group_name_opt, + None, + None, ) - }); + .await + .unwrap(); - let cfs_configuration_name_from_cfs_sessions = - cfs_session_value_vec.iter().map(|cfs_session_value| { - cfs_session_value - .pointer("/configuration/name") - .unwrap() - .as_str() - .unwrap() + // TODO: change to iter so we can later on get its image ids without having to copy memory + // to create new Vec + bos_sessiontemplate_value_vec.retain(|bos_sessiontemplate_value| { + cfs_configuration_name_vec.contains( + &bos_sessiontemplate_value + .pointer("/cfs/configuration") + .unwrap() + .as_str() + .unwrap(), + ) }); - // Get list of CFS configuration names related to CFS sessions and BOS sessiontemplates - cfs_configuration_name_vec = cfs_configuration_name_from_bos_sessiontemplate_value_iter - .chain(cfs_configuration_name_from_cfs_sessions) - .collect::>(); - cfs_configuration_name_vec.sort(); - cfs_configuration_name_vec.dedup(); - - // Get final list of CFS configuration serde values related to CFS sessions and BOS - // sessiontemplates - cfs_configuration_value_vec.retain(|cfs_configuration_value| { - cfs_configuration_name_vec.contains(&cfs_configuration_value["name"].as_str().unwrap()) - }); - - // Get image ids from CFS sessions and BOS sessiontemplate related to CFS configuration to delete - let image_id_from_cfs_session_vec = - get_image_id_from_cfs_session_related_to_cfs_configuration(&cfs_session_value_vec); - - // Get image ids from BOS session template related to CFS configuration to delete - let image_id_from_bos_sessiontemplate_vec = - get_image_id_from_bos_sessiontemplate_related_to_cfs_configuration( - &bos_sessiontemplate_value_vec, - ); + let cfs_configuration_name_from_bos_sessiontemplate_value_iter = + bos_sessiontemplate_value_vec + .iter() + .map(|bos_sessiontemplate_value| { + bos_sessiontemplate_value + .pointer("/cfs/configuration") + .unwrap() + .as_str() + .unwrap() + }); - // Combine image ids from CFS session and BOS session template - let mut image_id_related_from_cfs_session_bos_sessiontemplate_vec = [ - image_id_from_cfs_session_vec, - image_id_from_bos_sessiontemplate_vec, - ] - .concat(); - - image_id_related_from_cfs_session_bos_sessiontemplate_vec.sort(); - image_id_related_from_cfs_session_bos_sessiontemplate_vec.dedup(); - - // Filter list of image ids by removing the ones that does not exists. This is because we - // currently image id list contains the values from CFS session and BOS sessiontemplate - // which does not means the image still exists (the image perse could have been deleted - // previously and the CFS session and BOS sessiontemplate not being cleared) - let mut image_id_vec = Vec::new(); - for image_id in &image_id_related_from_cfs_session_bos_sessiontemplate_vec { - if mesa::shasta::ims::image::http_client::get( + // Check images related to CFS configurations to delete are not used to boot nodes. For + // this we need to get images from both CFS session and BOS sessiontemplate because CSCS staff + // Get all CFS sessions + let mut cfs_session_value_vec = mesa::shasta::cfs::session::http_client::get( shasta_token, shasta_base_url, shasta_root_cert, hsm_group_name_opt, - Some(image_id), + None, + None, None, ) .await - .is_ok() - { - log::info!("Image ID {} exists", image_id); - image_id_vec.push(image_id.to_string()); - } - } + .unwrap(); - log::info!( - "Image id related to CFS sessions and/or BOS sessiontemplate: {:?}", - image_id_related_from_cfs_session_bos_sessiontemplate_vec - ); - log::info!("Image ids to delete: {:?}", image_id_vec); - - // Get list of CFS session name, CFS configuration name and image id - let cfs_session_cfs_configuration_image_id_tuple_iter = - cfs_session_value_vec.iter().map(|cfs_session_value| { - ( - cfs_session_value["name"].as_str().unwrap(), - cfs_session_value + // TODO: change to iter so we can later on get its image ids without having to copy memory + // to create new Vec + cfs_session_value_vec.retain(|cfs_session_value| { + cfs_configuration_name_vec.contains( + &cfs_session_value .pointer("/configuration/name") .unwrap() .as_str() .unwrap(), - cfs_session_value - .pointer("/status/artifacts/0/result_id") - .and_then(|result_id| result_id.as_str()) - .unwrap_or(""), ) }); - // Get list of BOS sessiontemplate name, CFS configuration name and image ids for compute nodes - let bos_sessiontemplate_cfs_configuration_compute_image_id_tuple_iter = - bos_sessiontemplate_value_vec - .iter() - .map(|bos_sessiontemplate_value| { - let cfs_session_name = bos_sessiontemplate_value["name"].as_str().unwrap(); - let cfs_configuration_name = bos_sessiontemplate_value - .pointer("/cfs/configuration") + let cfs_configuration_name_from_cfs_sessions = + cfs_session_value_vec.iter().map(|cfs_session_value| { + cfs_session_value + .pointer("/configuration/name") .unwrap() .as_str() - .unwrap(); - let image_id = if let Some(image_path_value) = - bos_sessiontemplate_value.pointer("/boot_sets/compute/path") - { - image_path_value - .as_str() - .unwrap() - .strip_prefix("s3://boot-images/") - .unwrap() - .strip_suffix("/manifest.json") - .unwrap() - } else { - "" - }; - (cfs_session_name, cfs_configuration_name, image_id) - }); - - // Get list of BOS sessiontemplate name, CFS configuration name and image ids for uan nodes - let bos_sessiontemplate_cfs_configuration_uan_image_id_tuple_iter = - bos_sessiontemplate_value_vec - .iter() - .map(|bos_sessiontemplate_value| { - let bos_sessiontemplate_name = - bos_sessiontemplate_value["name"].as_str().unwrap(); - let cfs_configuration_name = bos_sessiontemplate_value - .pointer("/cfs/configuration") .unwrap() - .as_str() - .unwrap(); - let image_id = if let Some(image_path_value) = - bos_sessiontemplate_value.pointer("/boot_sets/uan/path") - { - image_path_value - .as_str() - .unwrap() - .strip_prefix("s3://boot-images/") - .unwrap() - .strip_suffix("/manifest.json") - .unwrap() - } else { - "" - }; - (bos_sessiontemplate_name, cfs_configuration_name, image_id) }); - // Get final list of CFS configurations to delete. NOTE this list won't include CFS configurations with neither BOS sessiontemplate nor CFS session related, the reason is must filter data to delete by HSM group and CFS configurations by default are not related to any HSM group - let bos_sessiontemplate_cfs_configuration_image_id_tuple_iter = - bos_sessiontemplate_cfs_configuration_compute_image_id_tuple_iter - .chain(bos_sessiontemplate_cfs_configuration_uan_image_id_tuple_iter) - .collect::>(); - - // EVALUATE IF NEED TO CONTINUE. EXIT IF THERE IS NO DATA TO DELETE - // - if cfs_configuration_name_vec.is_empty() - && image_id_vec.is_empty() - && cfs_session_value_vec.is_empty() - && bos_sessiontemplate_value_vec.is_empty() - { - println!("Nothing to delete. Exit"); - std::process::exit(0); - } + // Get list of CFS configuration names related to CFS sessions and BOS sessiontemplates + cfs_configuration_name_vec = cfs_configuration_name_from_bos_sessiontemplate_value_iter + .chain(cfs_configuration_name_from_cfs_sessions) + .collect::>(); + cfs_configuration_name_vec.sort(); + cfs_configuration_name_vec.dedup(); - // PRINT SUMMARY/DATA TO DELETE - // - println!("CFS sessions to delete:"); + // Get final list of CFS configuration serde values related to CFS sessions and BOS + // sessiontemplates + cfs_configuration_value_vec.retain(|cfs_configuration_value| { + cfs_configuration_name_vec + .contains(&cfs_configuration_value["name"].as_str().unwrap()) + }); - let mut cfs_session_table = Table::new(); + // Get image ids from CFS sessions and BOS sessiontemplate related to CFS configuration to delete + let image_id_from_cfs_session_vec = + get_image_id_from_cfs_session_related_to_cfs_configuration(&cfs_session_value_vec); - cfs_session_table.set_header(vec!["Name", "Configuration", "Image ID"]); + // Get image ids from BOS session template related to CFS configuration to delete + let image_id_from_bos_sessiontemplate_vec = + get_image_id_from_bos_sessiontemplate_related_to_cfs_configuration( + &bos_sessiontemplate_value_vec, + ); - for cfs_session_tuple in cfs_session_cfs_configuration_image_id_tuple_iter { - cfs_session_table.add_row(vec![ - cfs_session_tuple.0, - cfs_session_tuple.1, - cfs_session_tuple.2, - ]); - } + // Combine image ids from CFS session and BOS session template + let mut image_id_related_from_cfs_session_bos_sessiontemplate_vec = [ + image_id_from_cfs_session_vec, + image_id_from_bos_sessiontemplate_vec, + ] + .concat(); + + image_id_related_from_cfs_session_bos_sessiontemplate_vec.sort(); + image_id_related_from_cfs_session_bos_sessiontemplate_vec.dedup(); + + // Filter list of image ids by removing the ones that does not exists. This is because we + // currently image id list contains the values from CFS session and BOS sessiontemplate + // which does not means the image still exists (the image perse could have been deleted + // previously and the CFS session and BOS sessiontemplate not being cleared) + let mut image_id_vec = Vec::new(); + for image_id in &image_id_related_from_cfs_session_bos_sessiontemplate_vec { + if mesa::shasta::ims::image::http_client::get( + shasta_token, + shasta_base_url, + shasta_root_cert, + hsm_group_name_opt, + Some(image_id), + None, + ) + .await + .is_ok() + { + log::info!("Image ID {} exists", image_id); + image_id_vec.push(image_id.to_string()); + } + } - println!("{cfs_session_table}"); + log::info!( + "Image id related to CFS sessions and/or BOS sessiontemplate: {:?}", + image_id_related_from_cfs_session_bos_sessiontemplate_vec + ); + log::info!("Image ids to delete: {:?}", image_id_vec); + + // Get list of CFS session name, CFS configuration name and image id + let cfs_session_cfs_configuration_image_id_tuple_iter = + cfs_session_value_vec.iter().map(|cfs_session_value| { + ( + cfs_session_value["name"].as_str().unwrap(), + cfs_session_value + .pointer("/configuration/name") + .unwrap() + .as_str() + .unwrap(), + cfs_session_value + .pointer("/status/artifacts/0/result_id") + .and_then(|result_id| result_id.as_str()) + .unwrap_or(""), + ) + }); - println!("BOS sessiontemplates to delete:"); + // Get list of BOS sessiontemplate name, CFS configuration name and image ids for compute nodes + let bos_sessiontemplate_cfs_configuration_compute_image_id_tuple_iter = + bos_sessiontemplate_value_vec + .iter() + .map(|bos_sessiontemplate_value| { + let cfs_session_name = bos_sessiontemplate_value["name"].as_str().unwrap(); + let cfs_configuration_name = bos_sessiontemplate_value + .pointer("/cfs/configuration") + .unwrap() + .as_str() + .unwrap(); + let image_id = if let Some(image_path_value) = + bos_sessiontemplate_value.pointer("/boot_sets/compute/path") + { + image_path_value + .as_str() + .unwrap() + .strip_prefix("s3://boot-images/") + .unwrap() + .strip_suffix("/manifest.json") + .unwrap() + } else { + "" + }; + (cfs_session_name, cfs_configuration_name, image_id) + }); + + // Get list of BOS sessiontemplate name, CFS configuration name and image ids for uan nodes + let bos_sessiontemplate_cfs_configuration_uan_image_id_tuple_iter = + bos_sessiontemplate_value_vec + .iter() + .map(|bos_sessiontemplate_value| { + let bos_sessiontemplate_name = + bos_sessiontemplate_value["name"].as_str().unwrap(); + let cfs_configuration_name = bos_sessiontemplate_value + .pointer("/cfs/configuration") + .unwrap() + .as_str() + .unwrap(); + let image_id = if let Some(image_path_value) = + bos_sessiontemplate_value.pointer("/boot_sets/uan/path") + { + image_path_value + .as_str() + .unwrap() + .strip_prefix("s3://boot-images/") + .unwrap() + .strip_suffix("/manifest.json") + .unwrap() + } else { + "" + }; + (bos_sessiontemplate_name, cfs_configuration_name, image_id) + }); + + // Get final list of CFS configurations to delete. NOTE this list won't include CFS configurations with neither BOS sessiontemplate nor CFS session related, the reason is must filter data to delete by HSM group and CFS configurations by default are not related to any HSM group + let bos_sessiontemplate_cfs_configuration_image_id_tuple_iter = + bos_sessiontemplate_cfs_configuration_compute_image_id_tuple_iter + .chain(bos_sessiontemplate_cfs_configuration_uan_image_id_tuple_iter) + .collect::>(); + + // EVALUATE IF NEED TO CONTINUE. EXIT IF THERE IS NO DATA TO DELETE + // + if cfs_configuration_name_vec.is_empty() + && image_id_vec.is_empty() + && cfs_session_value_vec.is_empty() + && bos_sessiontemplate_value_vec.is_empty() + { + println!("Nothing to delete. Exit"); + std::process::exit(0); + } - let mut bos_sessiontemplate_table = Table::new(); + // PRINT SUMMARY/DATA TO DELETE + // + println!("CFS sessions to delete:"); - bos_sessiontemplate_table.set_header(vec!["Name", "Configuration", "Image ID"]); + let mut cfs_session_table = Table::new(); - for bos_sessiontemplate_tuple in &bos_sessiontemplate_cfs_configuration_image_id_tuple_iter - { - bos_sessiontemplate_table.add_row(vec![ - bos_sessiontemplate_tuple.0, - bos_sessiontemplate_tuple.1, - bos_sessiontemplate_tuple.2, - ]); - } + cfs_session_table.set_header(vec!["Name", "Configuration", "Image ID"]); - println!("{bos_sessiontemplate_table}"); + for cfs_session_tuple in cfs_session_cfs_configuration_image_id_tuple_iter { + cfs_session_table.add_row(vec![ + cfs_session_tuple.0, + cfs_session_tuple.1, + cfs_session_tuple.2, + ]); + } - println!("CFS configurations to delete:"); + println!("{cfs_session_table}"); - let mut cfs_configuration_table = Table::new(); + println!("BOS sessiontemplates to delete:"); - cfs_configuration_table.set_header(vec!["Name", "Last Update"]); + let mut bos_sessiontemplate_table = Table::new(); - for cfs_configuration_value in &cfs_configuration_value_vec { - cfs_configuration_table.add_row(vec![ - cfs_configuration_value["name"].as_str().unwrap(), - cfs_configuration_value["lastUpdated"].as_str().unwrap(), - ]); - } + bos_sessiontemplate_table.set_header(vec!["Name", "Configuration", "Image ID"]); - println!("{cfs_configuration_table}"); + for bos_sessiontemplate_tuple in + &bos_sessiontemplate_cfs_configuration_image_id_tuple_iter + { + bos_sessiontemplate_table.add_row(vec![ + bos_sessiontemplate_tuple.0, + bos_sessiontemplate_tuple.1, + bos_sessiontemplate_tuple.2, + ]); + } - println!("Images to delete:"); + println!("{bos_sessiontemplate_table}"); - let mut image_id_table = Table::new(); + println!("CFS configurations to delete:"); - image_id_table.set_header(vec!["Image ID"]); + let mut cfs_configuration_table = Table::new(); - for image_id in &image_id_vec { - image_id_table.add_row(vec![image_id]); - } + cfs_configuration_table.set_header(vec!["Name", "Last Update"]); - println!("{image_id_table}"); + for cfs_configuration_value in &cfs_configuration_value_vec { + cfs_configuration_table.add_row(vec![ + cfs_configuration_value["name"].as_str().unwrap(), + cfs_configuration_value["lastUpdated"].as_str().unwrap(), + ]); + } - // VALIDATION - // - // Process CFS configurations to delete one by one - for cfs_configuration_name in &cfs_configuration_name_vec { - // Check dessired configuration not using any CFS configuration to delete - let mut nodes_using_cfs_configuration_as_dessired_configuration_vec = cfs_components - .iter() - .filter(|cfs_component| { - cfs_component["desiredConfig"] - .as_str() - .unwrap() - .eq(*cfs_configuration_name) - }) - .map(|cfs_component| cfs_component["id"].as_str().unwrap()) - .collect::>(); + println!("{cfs_configuration_table}"); - nodes_using_cfs_configuration_as_dessired_configuration_vec.sort(); + println!("Images to delete:"); - if !nodes_using_cfs_configuration_as_dessired_configuration_vec.is_empty() { - eprintln!( + let mut image_id_table = Table::new(); + + image_id_table.set_header(vec!["Image ID"]); + + for image_id in &image_id_vec { + image_id_table.add_row(vec![image_id]); + } + + println!("{image_id_table}"); + + // VALIDATION + // + // Process CFS configurations to delete one by one + for cfs_configuration_name in &cfs_configuration_name_vec { + // Check dessired configuration not using any CFS configuration to delete + let mut nodes_using_cfs_configuration_as_dessired_configuration_vec = + cfs_components + .iter() + .filter(|cfs_component| { + cfs_component["desiredConfig"] + .as_str() + .unwrap() + .eq(*cfs_configuration_name) + }) + .map(|cfs_component| cfs_component["id"].as_str().unwrap()) + .collect::>(); + + nodes_using_cfs_configuration_as_dessired_configuration_vec.sort(); + + if !nodes_using_cfs_configuration_as_dessired_configuration_vec.is_empty() { + eprintln!( "CFS configuration {} can't be deleted. Reason:\nCFS configuration {} used as desired configuration for nodes: {}", cfs_configuration_name, cfs_configuration_name, nodes_using_cfs_configuration_as_dessired_configuration_vec.join(", ") ); - std::process::exit(1); + std::process::exit(1); + } } - } - for cfs_configuration_name in &cfs_configuration_name_vec { - // Check images related to CFS configurations to delete are not used to boot nodes. For - // this we need to get images from both CFS session and BOS sessiontemplate because CSCS staff + for cfs_configuration_name in &cfs_configuration_name_vec { + // Check images related to CFS configurations to delete are not used to boot nodes. For + // this we need to get images from both CFS session and BOS sessiontemplate because CSCS staff - // Check images related to CFS configurations to delete are not used to boot nodes. For - // this we need to get images from both CFS session and BOS sessiontemplate because CSCS staff - let mut boot_image_node_vec = Vec::new(); + // Check images related to CFS configurations to delete are not used to boot nodes. For + // this we need to get images from both CFS session and BOS sessiontemplate because CSCS staff + let mut boot_image_node_vec = Vec::new(); - for image_id in &image_id_vec { - let nodes = get_node_vec_booting_image(image_id, &boot_param_vec); + for image_id in &image_id_vec { + let nodes = get_node_vec_booting_image(image_id, &boot_param_vec); - if !nodes.is_empty() { - boot_image_node_vec.push((image_id, nodes)); + if !nodes.is_empty() { + boot_image_node_vec.push((image_id, nodes)); + } } - } - if !boot_image_node_vec.is_empty() { - eprintln!( - "Image based on CFS configuration {} can't be deleted. Reason:", - cfs_configuration_name - ); - for (image_id, node_vec) in boot_image_node_vec { - eprintln!("Image id {} used to boot nodes:\n{:?}", image_id, node_vec); + if !boot_image_node_vec.is_empty() { + eprintln!( + "Image based on CFS configuration {} can't be deleted. Reason:", + cfs_configuration_name + ); + for (image_id, node_vec) in boot_image_node_vec { + eprintln!("Image id {} used to boot nodes:\n{:?}", image_id, node_vec); + } + std::process::exit(1); } - std::process::exit(1); } - } - // ASK USER FOR CONFIRMATION - // - if Confirm::with_theme(&ColorfulTheme::default()) - .with_prompt("Please revew the data above and confirm to delete:") - .interact() - .unwrap() - { - println!("Continue"); - } else { - println!("Cancelled by user. Aborting."); - std::process::exit(0); - } + // ASK USER FOR CONFIRMATION + // + if Confirm::with_theme(&ColorfulTheme::default()) + .with_prompt("Please revew the data above and confirm to delete:") + .interact() + .unwrap() + { + println!("Continue"); + } else { + println!("Cancelled by user. Aborting."); + std::process::exit(0); + } - // DELETE DATA - // - delete_data_related_to_cfs_configuration::delete( - shasta_token, - shasta_base_url, - shasta_root_cert, - &cfs_configuration_name_vec, - &image_id_vec, - // &cfs_components, - &cfs_session_value_vec, - &bos_sessiontemplate_value_vec, - // &boot_param_vec, - ) - .await; + // DELETE DATA + // + delete_data_related_to_cfs_configuration::delete( + shasta_token, + shasta_base_url, + shasta_root_cert, + &cfs_configuration_name_vec, + &image_id_vec, + // &cfs_components, + &cfs_session_value_vec, + &bos_sessiontemplate_value_vec, + // &boot_param_vec, + ) + .await; + } } Ok(()) diff --git a/src/main.rs b/src/main.rs index e307b65f..3edcee49 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,6 @@ mod cli; mod common; -use mesa::shasta; +use mesa::{common::jwt_ops, shasta}; use shasta::authentication; @@ -89,12 +89,12 @@ async fn main() -> core::result::Result<(), Box> { } let settings_hsm_group_opt = settings.get_string("hsm_group").ok(); - let settings_hsm_available_vec = settings - .get_array("hsm_available") - .unwrap_or(Vec::new()) - .into_iter() - .map(|hsm_group| hsm_group.into_string().unwrap()) - .collect::>(); + /* let settings_hsm_available_vec = settings + .get_array("hsm_available") + .unwrap_or(Vec::new()) + .into_iter() + .map(|hsm_group| hsm_group.into_string().unwrap()) + .collect::>(); */ let shasta_root_cert = common::config_ops::get_csm_root_cert_content(); @@ -111,12 +111,26 @@ async fn main() -> core::result::Result<(), Box> { Err(_) => None, }; */ - let shasta_token = authentication::get_api_token( - &shasta_base_url, - &shasta_root_cert, - &keycloak_base_url, - ) - .await?; + let shasta_token = + authentication::get_api_token(&shasta_base_url, &shasta_root_cert, &keycloak_base_url) + .await?; + + let mut settings_hsm_available_vec = jwt_ops::get_claims_from_jwt_token(&shasta_token) + .unwrap() + .pointer("/realm_access/roles") + .unwrap() + .as_array() + .unwrap() + .iter() + .map(|role_value| role_value.as_str().unwrap().to_string()) + .collect::>(); + + settings_hsm_available_vec + .retain(|role| !role.eq("offline_access") && !role.eq("uma_authorization")); + + // println!("JWT token resour_access:\n{:?}", realm_access_role_vec); + + // let settings_hsm_available_vec = realm_access_role_vec; let gitea_token = crate::common::vault::http_client::fetch_shasta_vcs_token( &vault_base_url, @@ -128,7 +142,7 @@ async fn main() -> core::result::Result<(), Box> { // Process input params let matches = - crate::cli::build::build_cli(settings_hsm_group_opt.as_ref(), settings_hsm_available_vec) + crate::cli::build::build_cli(settings_hsm_group_opt.as_ref(), &settings_hsm_available_vec) .get_matches(); let cli_result = crate::cli::process::process_cli( matches, @@ -141,6 +155,7 @@ async fn main() -> core::result::Result<(), Box> { &gitea_token, &gitea_base_url, settings_hsm_group_opt.as_ref(), + &settings_hsm_available_vec.clone(), // &base_image_id, &k8s_api_url, )