From afd47d2a47c754dd711c13a7cda3023e33893fee Mon Sep 17 00:00:00 2001 From: Howard Smith Date: Fri, 6 Dec 2024 20:55:42 +0000 Subject: [PATCH] Feat/alert config (#90) * feat: Add migration to setup alert config tables * chore: Add local dev seed data for a Slack alert * feat: Model alert configs * feat: Add `Error` for invalid alert configs * chore: Fix `seeder` command in `docker-compose.yml` * chore: Rename `docker-compose.yml` -> `compose.yaml` * refactor: `get_connection` out from repos and into free-standing func * feat: Add data model for reading alert configs * feat: `AlertConfigRepository` (only reading methods for now) * refactor: `AlertConfigReadData` -> `AlertConfigData` * test: Add some more test cases for converting `AlertConfigData` to `AlertConfig`s * feat: Support inserting, updating and deleting alert configs in `AlertConfigRepository` * feat: More control over JSON serialisation for `AlertConfig` * refactor: Test seed deletion * chore: Use `.contains_key` rather than `.get` and `.is_some` * test: Add test seeds for alert configs * test: Add integration tests for `AlertConfigRepository` * docs: Update `README` with info on env variables required to seed local DB * chore: Changing warning to note box, and fix formatting * chore: Try running `clippy` with `--verbose` to get more info * chore: No need for `--verbose` with `cargo clippy` * chore: Surpress `clippy` error * test: Add missing coverage * chore: Fix failing test --- .gitignore | 1 + README.md | 10 +- api/src/domain/models/alert_config.rs | 129 ++++++++++ api/src/domain/models/mod.rs | 1 + api/src/errors.rs | 2 + api/src/infrastructure/database.rs | 11 +- api/src/infrastructure/db_schema.rs | 44 +++- api/src/infrastructure/middleware/response.rs | 29 +++ .../down.sql | 3 + .../up.sql | 37 +++ api/src/infrastructure/models/alert_config.rs | 224 ++++++++++++++++++ api/src/infrastructure/models/mod.rs | 1 + .../repositories/alert_config_repo.rs | 171 +++++++++++++ .../repositories/api_key_repo.rs | 22 +- api/src/infrastructure/repositories/mod.rs | 1 + .../repositories/monitor_repo.rs | 22 +- api/src/infrastructure/seeding/seeds.sql | 32 +++ api/tests/alert_config_repo_test.rs | 209 ++++++++++++++++ api/tests/common/infra.rs | 27 ++- api/tests/common/postgres.rs | 62 ++++- api/tests/common/seeds.rs | 56 ++++- api/tests/monitor_repo_test.rs | 1 + docker-compose.yml => compose.yaml | 4 +- 23 files changed, 1045 insertions(+), 54 deletions(-) create mode 100644 api/src/domain/models/alert_config.rs create mode 100644 api/src/infrastructure/migrations/2024-12-02-203820_add-alert-config-tables/down.sql create mode 100644 api/src/infrastructure/migrations/2024-12-02-203820_add-alert-config-tables/up.sql create mode 100644 api/src/infrastructure/models/alert_config.rs create mode 100644 api/src/infrastructure/repositories/alert_config_repo.rs create mode 100644 api/tests/alert_config_repo_test.rs rename docker-compose.yml => compose.yaml (96%) diff --git a/.gitignore b/.gitignore index 2cc08ce6..23acf9c2 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,5 @@ api/target # Miscellaneous .DS_Store +.env lcov.info diff --git a/README.md b/README.md index 631b29d3..6d700137 100644 --- a/README.md +++ b/README.md @@ -74,17 +74,21 @@ intended to be ran within the application container of development container. Both `Makefile`s have mostly the same commands, with the exception of the following commands that only the root-level `Makefile` has: -- `install`: Builds all application containers, installs the required Node modules in the Vue - application and sets up a local PostgreSQL database with test data. +- `install`: Builds all application containers and sets up a local PostgreSQL database with test data*. - `build-containers`: Builds all application containers. - `seed`: Remove all data from the local database and insert the test data (this is the same test - data that get's written to the local database during `make install`). + data that get's written to the local database during `make install`)*. - `shell`: Open a `bash` shell on the application container, where you can use the _other_ `Makefile` to run commands without the overhead of spinning up containers for each command. - `delete-postgres-volume`: Remove the Docker volume being used to make PostgreSQL data persist. This can be handy is you run into any problems with your local database and you just want to trash it and start again. The next time the database container runs this will be recreated naturally +> [!NOTE] +> \* - Seeding the local database with test data requires a `.env` file to be present at the root of the project, containing the following environment variables: +> - `SLACK_CHANNEL`: A Slack channel to send Slack alerts to (can be empty if not using Slack integration) +> - `SLACK_TOKEN`: A Slack Bot OAuth token for sending SLack alerts (can be empty if not using Slack integration) + The following commands are present in both `Makefile`s: - `run`: Run the CronMon API (release build). diff --git a/api/src/domain/models/alert_config.rs b/api/src/domain/models/alert_config.rs new file mode 100644 index 00000000..aa20a898 --- /dev/null +++ b/api/src/domain/models/alert_config.rs @@ -0,0 +1,129 @@ +use serde::Serialize; +use uuid::Uuid; + +/// A domain model representing user configuration for alerts. +#[derive(Clone, Debug, PartialEq, Serialize)] +pub struct AlertConfig { + /// The unique identifier for the alert configuration. + pub alert_config_id: Uuid, + /// The name of the alert configuration. + pub name: String, + /// The tenant that the alert configuration belongs to. + pub tenant: String, + /// Whether the alert configuration is active. + pub active: bool, + /// Whether to send alerts for late jobs. + pub on_late: bool, + /// Whether to send alerts for errored jobs. + pub on_error: bool, + /// The type of alert. + #[serde(rename = "type")] + pub type_: AlertType, +} + +/// The different types of alerts that can be configured. +#[derive(Clone, Debug, PartialEq, Serialize)] +pub enum AlertType { + /// An alert that sends a Slack message. + #[serde(rename = "slack")] + Slack(SlackAlertConfig), +} + +/// Slack-specifc configuration for alerts. +#[derive(Clone, Debug, PartialEq, Serialize)] +pub struct SlackAlertConfig { + /// The channel to send the alert to. + pub channel: String, + /// The Slack bot-user OAuth token (for use with chat.postMessage) + pub token: String, +} + +impl AlertConfig { + /// Create a new `AlertConfig` for Slack. + pub fn new_slack_config( + name: String, + tenant: String, + active: bool, + on_late: bool, + on_error: bool, + channel: String, + token: String, + ) -> Self { + Self { + alert_config_id: Uuid::new_v4(), + name, + tenant, + active, + on_late, + on_error, + type_: AlertType::Slack(SlackAlertConfig { channel, token }), + } + } +} + +#[cfg(test)] +mod tests { + use serde_json::json; + + use super::*; + + #[test] + fn new_slack_config() { + let alert_config = AlertConfig::new_slack_config( + "test-name".to_string(), + "test-tenant".to_string(), + true, + true, + true, + "test-channel".to_string(), + "test-token".to_string(), + ); + + // Cannot check the alert_config_id as it is randomly generated, but we know it'll be a Uuid + // because of its type. + assert_eq!(&alert_config.name, "test-name"); + assert_eq!(&alert_config.tenant, "test-tenant"); + assert!(alert_config.active); + assert!(alert_config.on_late); + assert!(alert_config.on_error); + assert_eq!( + alert_config.type_, + AlertType::Slack(SlackAlertConfig { + channel: "test-channel".to_string(), + token: "test-token".to_string(), + }) + ); + } + + #[test] + fn test_serialisation() { + let alert_config = AlertConfig::new_slack_config( + "test-name".to_string(), + "test-tenant".to_string(), + true, + true, + true, + "test-channel".to_string(), + "test-token".to_string(), + ); + + let value = serde_json::to_value(&alert_config).unwrap(); + assert_eq!( + value, + json!({ + "alert_config_id": alert_config.alert_config_id.to_string(), + "name": "test-name", + "tenant": "test-tenant", + "active": true, + "on_late": true, + "on_error": true, + "type": { + "slack": { + "channel": "test-channel", + "token": "test-token" + } + } + }) + ); + } +} diff --git a/api/src/domain/models/mod.rs b/api/src/domain/models/mod.rs index 644f85d9..182ef0df 100644 --- a/api/src/domain/models/mod.rs +++ b/api/src/domain/models/mod.rs @@ -1,3 +1,4 @@ +pub mod alert_config; pub mod api_key; pub mod job; pub mod monitor; diff --git a/api/src/errors.rs b/api/src/errors.rs index 466838a7..dc403d58 100644 --- a/api/src/errors.rs +++ b/api/src/errors.rs @@ -12,6 +12,7 @@ pub enum Error { JobAlreadyFinished(Uuid), InvalidMonitor(String), InvalidJob(String), + InvalidAlertConfig(String), Unauthorized(String), AuthenticationError(String), } @@ -37,6 +38,7 @@ impl Display for Error { } Self::InvalidMonitor(reason) => write!(f, "Invalid Monitor: {reason}"), Self::InvalidJob(reason) => write!(f, "Invalid Job: {reason}"), + Self::InvalidAlertConfig(reason) => write!(f, "Invalid Alert Configuration: {reason}"), Self::Unauthorized(reason) => write!(f, "Unauthorized: {reason}"), Self::AuthenticationError(reason) => write!(f, "Authentication error: {reason}"), } diff --git a/api/src/infrastructure/database.rs b/api/src/infrastructure/database.rs index f2eea0ef..58266784 100644 --- a/api/src/infrastructure/database.rs +++ b/api/src/infrastructure/database.rs @@ -2,7 +2,10 @@ use std::env; use diesel::Connection; use diesel::PgConnection; -use diesel_async::pooled_connection::{deadpool::Pool, AsyncDieselConnectionManager}; +use diesel_async::pooled_connection::{ + deadpool::{Object, Pool}, + AsyncDieselConnectionManager, +}; use diesel_async::AsyncPgConnection; use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; @@ -22,6 +25,12 @@ pub fn create_connection_pool() -> Result { Ok(pool) } +pub async fn get_connection(pool: &DbPool) -> Result, Error> { + pool.get() + .await + .map_err(|e| Error::RepositoryError(e.to_string())) +} + pub fn run_migrations() { let mut conn = PgConnection::establish(&get_database_url()) .unwrap_or_else(|_| panic!("Failed to establish DB connection")); diff --git a/api/src/infrastructure/db_schema.rs b/api/src/infrastructure/db_schema.rs index 7fac7cc7..ef71f5c9 100644 --- a/api/src/infrastructure/db_schema.rs +++ b/api/src/infrastructure/db_schema.rs @@ -1,5 +1,20 @@ // @generated automatically by Diesel CLI. +diesel::table! { + alert_config (alert_config_id) { + alert_config_id -> Uuid, + created_at -> Timestamp, + updated_at -> Timestamp, + name -> Varchar, + tenant -> Varchar, + #[sql_name = "type"] + type_ -> Varchar, + active -> Bool, + on_late -> Bool, + on_error -> Bool, + } +} + diesel::table! { api_key (api_key_id) { api_key_id -> Uuid, @@ -43,6 +58,33 @@ diesel::table! { } } +diesel::table! { + monitor_alert_config (alert_config_id, monitor_id) { + alert_config_id -> Uuid, + monitor_id -> Uuid, + } +} + +diesel::table! { + slack_alert_config (alert_config_id) { + alert_config_id -> Uuid, + created_at -> Timestamp, + updated_at -> Timestamp, + slack_channel -> Varchar, + slack_bot_oauth_token -> Varchar, + } +} + diesel::joinable!(job -> monitor (monitor_id)); +diesel::joinable!(monitor_alert_config -> alert_config (alert_config_id)); +diesel::joinable!(monitor_alert_config -> monitor (monitor_id)); +diesel::joinable!(slack_alert_config -> alert_config (alert_config_id)); -diesel::allow_tables_to_appear_in_same_query!(api_key, job, monitor,); +diesel::allow_tables_to_appear_in_same_query!( + alert_config, + api_key, + job, + monitor, + monitor_alert_config, + slack_alert_config, +); diff --git a/api/src/infrastructure/middleware/response.rs b/api/src/infrastructure/middleware/response.rs index 3c56439f..6110136b 100644 --- a/api/src/infrastructure/middleware/response.rs +++ b/api/src/infrastructure/middleware/response.rs @@ -20,6 +20,9 @@ impl<'r> Responder<'r, 'static> for Error { // default to server-side errors. Error::InvalidMonitor(_) => (Status::InternalServerError, "Invalid Monitor"), Error::InvalidJob(_) => (Status::InternalServerError, "Invalid Job"), + Error::InvalidAlertConfig(_) => { + (Status::InternalServerError, "Invalid Alert Configuration") + } Error::Unauthorized(_) => (Status::Unauthorized, "Unauthorized"), Error::AuthenticationError(_) => (Status::InternalServerError, "Authentication Error"), }; @@ -91,6 +94,13 @@ mod tests { Err(Error::InvalidJob("invalid job".to_string())) } + #[rocket::get("/invalid_alert_config")] + fn invalid_alert_config() -> Result<(), Error> { + Err(Error::InvalidAlertConfig( + "invalid alert config".to_string(), + )) + } + #[rocket::get("/unauthorized")] fn unauthorized() -> Result<(), Error> { Err(Error::Unauthorized("insufficient permissions".to_string())) @@ -115,6 +125,7 @@ mod tests { job_already_finished, invalid_monitor, invalid_job, + invalid_alert_config, unauthorized, auth_error ], @@ -253,6 +264,24 @@ mod tests { ); } + #[rstest] + fn test_invalid_alert_config(test_client: Client) { + let response = test_client.get("/invalid_alert_config").dispatch(); + + assert_eq!(response.status(), Status::InternalServerError); + assert_eq!(response.content_type(), Some(ContentType::JSON)); + assert_eq!( + response.into_json::().unwrap(), + json!({ + "error": { + "code": 500, + "reason": "Invalid Alert Configuration", + "description": "Invalid Alert Configuration: invalid alert config" + } + }) + ); + } + #[rstest] fn test_unauthorized(test_client: Client) { let response = test_client.get("/unauthorized").dispatch(); diff --git a/api/src/infrastructure/migrations/2024-12-02-203820_add-alert-config-tables/down.sql b/api/src/infrastructure/migrations/2024-12-02-203820_add-alert-config-tables/down.sql new file mode 100644 index 00000000..36c30a1a --- /dev/null +++ b/api/src/infrastructure/migrations/2024-12-02-203820_add-alert-config-tables/down.sql @@ -0,0 +1,3 @@ +DROP TABLE monitor_alert_config; +DROP TABLE slack_alert_config; +DROP TABLE alert_config; diff --git a/api/src/infrastructure/migrations/2024-12-02-203820_add-alert-config-tables/up.sql b/api/src/infrastructure/migrations/2024-12-02-203820_add-alert-config-tables/up.sql new file mode 100644 index 00000000..b9212951 --- /dev/null +++ b/api/src/infrastructure/migrations/2024-12-02-203820_add-alert-config-tables/up.sql @@ -0,0 +1,37 @@ +CREATE TABLE alert_config ( + alert_config_id uuid PRIMARY KEY, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + + name VARCHAR NOT NULL, + tenant VARCHAR NOT NULL, + type VARCHAR NOT NULL, + active BOOLEAN NOT NULL, + on_late BOOLEAN NOT NULL, + on_error BOOLEAN NOT NULL +); + +CREATE TABLE slack_alert_config ( + alert_config_id uuid PRIMARY KEY REFERENCES alert_config ON DELETE CASCADE, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + + -- These columns are intentionally overly verbose in how they are named, + -- since we're using class table inheritance, and we fetch all alert configs + -- in one query using left joins. If we weren't as verbose here, the columns + -- could class (for example if we add support for Discord alerts, would + -- 'channel' be for Discord or for Slack? + slack_channel VARCHAR NOT NULL, + slack_bot_oauth_token VARCHAR NOT NULL +); + +-- This is an association table between alert_config and monitor. +CREATE TABLE monitor_alert_config ( + alert_config_id uuid REFERENCES alert_config ON DELETE CASCADE, + monitor_id uuid REFERENCES monitor ON DELETE CASCADE, + + CONSTRAINT pk_monitor_alert_config PRIMARY KEY (alert_config_id, monitor_id) +); + +SELECT diesel_manage_updated_at('alert_config'); +SELECT diesel_manage_updated_at('slack_alert_config'); diff --git a/api/src/infrastructure/models/alert_config.rs b/api/src/infrastructure/models/alert_config.rs new file mode 100644 index 00000000..580c2d97 --- /dev/null +++ b/api/src/infrastructure/models/alert_config.rs @@ -0,0 +1,224 @@ +use diesel::prelude::*; +use uuid::Uuid; + +use crate::domain::models::alert_config::{AlertConfig, AlertType, SlackAlertConfig}; +use crate::errors::Error; +use crate::infrastructure::db_schema::{alert_config, slack_alert_config}; + +#[derive(Clone, Queryable)] +pub struct AlertConfigData { + pub alert_config_id: Uuid, + pub name: String, + pub tenant: String, + pub type_: String, + pub active: bool, + pub on_late: bool, + pub on_error: bool, + pub slack_channel: Option, + pub slack_bot_oauth_token: Option, +} + +#[derive(Identifiable, Insertable, AsChangeset)] +#[diesel(table_name = alert_config)] +#[diesel(primary_key(alert_config_id))] +pub struct NewAlertConfigData { + pub alert_config_id: Uuid, + pub name: String, + pub tenant: String, + pub type_: String, + pub active: bool, + pub on_late: bool, + pub on_error: bool, +} + +#[derive(Identifiable, Insertable, AsChangeset)] +#[diesel(table_name = slack_alert_config)] +#[diesel(primary_key(alert_config_id))] +pub struct NewSlackAlertConfigData { + pub alert_config_id: Uuid, + pub slack_channel: String, + pub slack_bot_oauth_token: String, +} + +impl AlertConfigData { + pub fn to_model(&self) -> Result { + Ok(AlertConfig { + alert_config_id: self.alert_config_id, + name: self.name.clone(), + tenant: self.tenant.clone(), + active: self.active, + on_late: self.on_late, + on_error: self.on_error, + type_: match self.type_.as_str() { + "slack" => { + if let (Some(channel), Some(token)) = + (&self.slack_channel, &self.slack_bot_oauth_token) + { + AlertType::Slack(SlackAlertConfig { + channel: channel.clone(), + token: token.clone(), + }) + } else { + return Err(Error::InvalidAlertConfig( + "Slack channel and/ or bot OAuth token is missing".to_owned(), + )); + } + } + _ => return Err(Error::InvalidAlertConfig("Unknown alert type".to_owned())), + }, + }) + } +} + +impl NewAlertConfigData { + pub fn from_model(alert_config: &AlertConfig) -> (Self, Option) { + let (type_, specific_data) = match &alert_config.type_ { + AlertType::Slack(slack_config) => ( + "slack".to_string(), + Some(NewSlackAlertConfigData { + alert_config_id: alert_config.alert_config_id, + slack_channel: slack_config.channel.clone(), + slack_bot_oauth_token: slack_config.token.clone(), + }), + ), + }; + + ( + NewAlertConfigData { + alert_config_id: alert_config.alert_config_id, + name: alert_config.name.clone(), + tenant: alert_config.tenant.clone(), + type_, + active: alert_config.active, + on_late: alert_config.on_late, + on_error: alert_config.on_error, + }, + specific_data, + ) + } +} + +#[cfg(test)] +mod tests { + use pretty_assertions::assert_eq; + use rstest::rstest; + + use test_utils::gen_uuid; + + use super::*; + + #[test] + fn test_converting_db_to_alert_config() { + let alert_config_data = AlertConfigData { + alert_config_id: gen_uuid("41ebffb4-a188-48e9-8ec1-61380085cde3"), + name: "test-slack-alert".to_owned(), + tenant: "foo-tenant".to_owned(), + type_: "slack".to_owned(), + active: true, + on_late: true, + on_error: false, + slack_channel: Some("test-channel".to_owned()), + slack_bot_oauth_token: Some("test-token".to_owned()), + }; + + let alert_config = alert_config_data.to_model().unwrap(); + + assert_eq!( + alert_config.alert_config_id, + gen_uuid("41ebffb4-a188-48e9-8ec1-61380085cde3") + ); + assert_eq!(&alert_config.name, "test-slack-alert"); + assert_eq!(&alert_config.tenant, "foo-tenant"); + assert!(alert_config.active); + assert!(alert_config.on_late); + assert!(!alert_config.on_error); + assert_eq!( + alert_config.type_, + AlertType::Slack(SlackAlertConfig { + channel: "test-channel".to_owned(), + token: "test-token".to_owned() + }) + ); + } + + #[rstest] + #[case::unknown_type("unknown", None, None, "Unknown alert type")] + #[case::missing_channel( + "slack", + None, + Some("test-token".to_owned()), + "Slack channel and/ or bot OAuth token is missing" + )] + #[case::missing_token( + "slack", + Some("test-channel".to_owned()), + None, + "Slack channel and/ or bot OAuth token is missing" + )] + #[case::missing_channel_and_token( + "slack", + None, + None, + "Slack channel and/ or bot OAuth token is missing" + )] + fn test_converting_invalid_db_data_to_model( + #[case] type_: &str, + #[case] channel: Option, + #[case] token: Option, + #[case] expected_error: &str, + ) { + let alert_config_data = AlertConfigData { + alert_config_id: gen_uuid("41ebffb4-a188-48e9-8ec1-61380085cde3"), + name: "test-slack-alert".to_owned(), + tenant: "foo-tenant".to_owned(), + type_: type_.to_owned(), + active: true, + on_late: true, + on_error: false, + slack_channel: channel, + slack_bot_oauth_token: token, + }; + + let result = alert_config_data.to_model(); + + assert_eq!( + result, + Err(Error::InvalidAlertConfig(expected_error.to_owned())) + ); + } + + #[test] + fn test_model_to_db_data() { + let alert_config = AlertConfig { + alert_config_id: gen_uuid("41ebffb4-a188-48e9-8ec1-61380085cde3"), + name: "test-slack-alert".to_owned(), + tenant: "foo-tenant".to_owned(), + active: true, + on_late: true, + on_error: false, + type_: AlertType::Slack(SlackAlertConfig { + channel: "test-channel".to_owned(), + token: "test-token".to_owned(), + }), + }; + + let (alert_config_data, slack_data) = NewAlertConfigData::from_model(&alert_config); + + assert_eq!( + alert_config_data.alert_config_id, + alert_config.alert_config_id + ); + assert_eq!(&alert_config_data.name, "test-slack-alert"); + assert_eq!(&alert_config_data.tenant, "foo-tenant"); + assert_eq!(&alert_config_data.type_, "slack"); + assert!(alert_config_data.active); + assert!(alert_config_data.on_late); + assert!(!alert_config_data.on_error); + + assert!(slack_data.is_some()); + let slack_data = slack_data.unwrap(); + assert_eq!(slack_data.alert_config_id, alert_config.alert_config_id); + assert_eq!(&slack_data.slack_channel, "test-channel"); + assert_eq!(&slack_data.slack_bot_oauth_token, "test-token"); + } +} diff --git a/api/src/infrastructure/models/mod.rs b/api/src/infrastructure/models/mod.rs index 644f85d9..182ef0df 100644 --- a/api/src/infrastructure/models/mod.rs +++ b/api/src/infrastructure/models/mod.rs @@ -1,3 +1,4 @@ +pub mod alert_config; pub mod api_key; pub mod job; pub mod monitor; diff --git a/api/src/infrastructure/repositories/alert_config_repo.rs b/api/src/infrastructure/repositories/alert_config_repo.rs new file mode 100644 index 00000000..5ee0bf63 --- /dev/null +++ b/api/src/infrastructure/repositories/alert_config_repo.rs @@ -0,0 +1,171 @@ +use std::collections::HashMap; + +use async_trait::async_trait; +use diesel::prelude::*; +use diesel::result::Error as DieselError; +use diesel_async::AsyncConnection; +use diesel_async::RunQueryDsl; +use uuid::Uuid; + +use crate::domain::models::alert_config::AlertConfig; +use crate::errors::Error; +use crate::infrastructure::database::{get_connection, DbPool}; +use crate::infrastructure::db_schema::{alert_config, slack_alert_config}; +use crate::infrastructure::models::alert_config::AlertConfigData; +use crate::infrastructure::models::alert_config::NewAlertConfigData; +use crate::infrastructure::repositories::Repository; + +macro_rules! build_polymorphic_query { + () => {{ + alert_config::table + .left_join( + slack_alert_config::dsl::slack_alert_config + .on(slack_alert_config::dsl::alert_config_id.eq(alert_config::alert_config_id)), + ) + .select(( + alert_config::alert_config_id, + alert_config::name, + alert_config::tenant, + alert_config::type_, + alert_config::active, + alert_config::on_late, + alert_config::on_error, + slack_alert_config::dsl::slack_channel.nullable(), + slack_alert_config::dsl::slack_bot_oauth_token.nullable(), + )) + .into_boxed() + }}; +} + +pub struct AlertConfigRepository<'a> { + pool: &'a DbPool, + data: HashMap, +} + +impl<'a> AlertConfigRepository<'a> { + pub fn new(pool: &'a DbPool) -> Self { + Self { + pool, + data: HashMap::new(), + } + } + + fn db_to_model(&mut self, alert_config_data: &AlertConfigData) -> Result { + let alert_config = alert_config_data.to_model()?; + self.data + .insert(alert_config.alert_config_id, alert_config.clone()); + Ok(alert_config) + } +} + +#[async_trait] +#[allow(clippy::needless_lifetimes)] // This is needed for the lifetime of the pool +impl<'a> Repository for AlertConfigRepository<'a> { + async fn get( + &mut self, + alert_config_id: Uuid, + tenant: &str, + ) -> Result, Error> { + let mut connection = get_connection(self.pool).await?; + let result = connection + .transaction::, DieselError, _>(|conn| { + Box::pin(async move { + build_polymorphic_query!() + .filter( + alert_config::alert_config_id + .eq(alert_config_id) + .and(alert_config::tenant.eq(tenant)), + ) + .first(conn) + .await + .optional() + }) + }) + .await + .map_err(|err| Error::RepositoryError(err.to_string()))?; + + Ok(match result { + None => None, + Some(alert_config_data) => Some(self.db_to_model(&alert_config_data)?), + }) + } + + async fn all(&mut self, tenant: &str) -> Result, Error> { + let mut connection = get_connection(self.pool).await?; + let results = connection + .transaction::, DieselError, _>(|conn| { + Box::pin(async move { + build_polymorphic_query!() + .filter(alert_config::tenant.eq(tenant)) + .load(conn) + .await + }) + }) + .await + .map_err(|err| Error::RepositoryError(err.to_string()))?; + + Ok(results + .iter() + .map(|data| self.db_to_model(data)) + .collect::, Error>>()?) + } + + async fn save(&mut self, alert_config: &AlertConfig) -> Result<(), Error> { + let (alert_config_data, slack_alert_config_data) = + NewAlertConfigData::from_model(alert_config); + + // We can do this now as we only support Slack, but when we add more integrations we will + // need to handle this differently. + let slack_alert_config_data = slack_alert_config_data.unwrap(); + + let mut connection = get_connection(self.pool).await?; + connection + .transaction::<(), DieselError, _>(|conn| { + Box::pin(async { + if self.data.contains_key(&alert_config.alert_config_id) { + diesel::update(&alert_config_data) + .set(&alert_config_data) + .execute(conn) + .await?; + diesel::update(&slack_alert_config_data) + .set(&slack_alert_config_data) + .execute(conn) + .await?; + } else { + diesel::insert_into(alert_config::table) + .values(&alert_config_data) + .execute(conn) + .await?; + + diesel::insert_into(slack_alert_config::table) + .values(&slack_alert_config_data) + .execute(conn) + .await?; + } + + self.data + .insert(alert_config.alert_config_id, alert_config.clone()); + Ok(()) + }) + }) + .await + .map_err(|err| Error::RepositoryError(err.to_string())) + } + + async fn delete(&mut self, alert_config: &AlertConfig) -> Result<(), Error> { + let mut connection = get_connection(self.pool).await?; + + // We only need to delete the alert_config row, as the foreign key constraint will take care + // of deleting the integration specific rows via `ON DELETE CASCADE`. + diesel::delete( + alert_config::table + .filter(alert_config::alert_config_id.eq(alert_config.alert_config_id)), + ) + .execute(&mut connection) + .await + .map_err(|err| Error::RepositoryError(err.to_string()))?; + + self.data.remove(&alert_config.alert_config_id); + Ok(()) + } +} diff --git a/api/src/infrastructure/repositories/api_key_repo.rs b/api/src/infrastructure/repositories/api_key_repo.rs index d4fa92dc..68bdb803 100644 --- a/api/src/infrastructure/repositories/api_key_repo.rs +++ b/api/src/infrastructure/repositories/api_key_repo.rs @@ -3,14 +3,13 @@ use std::collections::HashMap; use async_trait::async_trait; use diesel::prelude::*; use diesel::result::Error as DieselError; -use diesel_async::pooled_connection::deadpool::Object; use diesel_async::AsyncConnection; -use diesel_async::{AsyncPgConnection, RunQueryDsl}; +use diesel_async::RunQueryDsl; use uuid::Uuid; use crate::domain::models::api_key::ApiKey; use crate::errors::Error; -use crate::infrastructure::database::DbPool; +use crate::infrastructure::database::{get_connection, DbPool}; use crate::infrastructure::db_schema::api_key; use crate::infrastructure::models::api_key::ApiKeyData; use crate::infrastructure::repositories::api_keys::GetByKey; @@ -30,13 +29,6 @@ impl<'a> ApiKeyRepository<'a> { } } - async fn get_connection(&mut self) -> Result, Error> { - self.pool - .get() - .await - .map_err(|e| Error::RepositoryError(e.to_string())) - } - fn db_to_key(&mut self, key: &ApiKeyData) -> ApiKey { let api_key = ApiKey::from(key); self.data.insert(key.api_key_id, key.clone()); @@ -48,7 +40,7 @@ impl<'a> ApiKeyRepository<'a> { #[allow(clippy::needless_lifetimes)] // This is needed for the lifetime of the pool impl<'a> GetByKey for ApiKeyRepository<'a> { async fn get_by_key(&mut self, key: &str) -> Result, Error> { - let mut connection = self.get_connection().await?; + let mut connection = get_connection(self.pool).await?; connection .transaction::, DieselError, _>(|conn| { Box::pin(async move { @@ -71,7 +63,7 @@ impl<'a> GetByKey for ApiKeyRepository<'a> { #[allow(clippy::needless_lifetimes)] // This is needed for the lifetime of the pool impl<'a> Repository for ApiKeyRepository<'a> { async fn get(&mut self, api_key_id: Uuid, tenant: &str) -> Result, Error> { - let mut connection = self.get_connection().await?; + let mut connection = get_connection(self.pool).await?; connection .transaction::, DieselError, _>(|conn| { Box::pin(async move { @@ -94,7 +86,7 @@ impl<'a> Repository for ApiKeyRepository<'a> { } async fn all(&mut self, tenant: &str) -> Result, Error> { - let mut connection = self.get_connection().await?; + let mut connection = get_connection(self.pool).await?; connection .transaction::, DieselError, _>(|conn| { Box::pin(async move { @@ -112,7 +104,7 @@ impl<'a> Repository for ApiKeyRepository<'a> { } async fn save(&mut self, key: &ApiKey) -> Result<(), Error> { - let mut connection = self.get_connection().await?; + let mut connection = get_connection(self.pool).await?; connection .transaction::<(), DieselError, _>(|conn| { Box::pin(async move { @@ -140,7 +132,7 @@ impl<'a> Repository for ApiKeyRepository<'a> { async fn delete(&mut self, key: &ApiKey) -> Result<(), Error> { let api_key_data = ApiKeyData::from(key); - let mut connection = self.get_connection().await?; + let mut connection = get_connection(self.pool).await?; diesel::delete(&api_key_data) .execute(&mut connection) .await diff --git a/api/src/infrastructure/repositories/mod.rs b/api/src/infrastructure/repositories/mod.rs index eb14a2b9..3066089a 100644 --- a/api/src/infrastructure/repositories/mod.rs +++ b/api/src/infrastructure/repositories/mod.rs @@ -1,3 +1,4 @@ +pub mod alert_config_repo; pub mod api_key_repo; pub mod api_keys; pub mod monitor; diff --git a/api/src/infrastructure/repositories/monitor_repo.rs b/api/src/infrastructure/repositories/monitor_repo.rs index ae97c242..07a7b5d4 100644 --- a/api/src/infrastructure/repositories/monitor_repo.rs +++ b/api/src/infrastructure/repositories/monitor_repo.rs @@ -4,14 +4,13 @@ use async_trait::async_trait; use diesel::dsl::now; use diesel::prelude::*; use diesel::result::Error as DieselError; -use diesel_async::pooled_connection::deadpool::Object; use diesel_async::AsyncConnection; -use diesel_async::{AsyncPgConnection, RunQueryDsl}; +use diesel_async::RunQueryDsl; use uuid::Uuid; use crate::domain::models::monitor::Monitor; use crate::errors::Error; -use crate::infrastructure::database::DbPool; +use crate::infrastructure::database::{get_connection, DbPool}; use crate::infrastructure::db_schema::job; use crate::infrastructure::db_schema::monitor; use crate::infrastructure::models::job::JobData; @@ -33,13 +32,6 @@ impl<'a> MonitorRepository<'a> { } } - async fn get_connection(&mut self) -> Result, Error> { - self.pool - .get() - .await - .map_err(|e| Error::RepositoryError(e.to_string())) - } - fn db_to_monitor( &mut self, monitor_data: MonitorData, @@ -55,7 +47,7 @@ impl<'a> MonitorRepository<'a> { #[allow(clippy::needless_lifetimes)] // This is needed for the lifetime of the pool impl<'a> GetWithLateJobs for MonitorRepository<'a> { async fn get_with_late_jobs(&mut self) -> Result, Error> { - let mut connection = self.get_connection().await?; + let mut connection = get_connection(self.pool).await?; let (monitor_datas, job_datas) = connection .transaction::<(Vec, Vec), DieselError, _>(|conn| { Box::pin(async move { @@ -103,7 +95,7 @@ impl<'a> GetWithLateJobs for MonitorRepository<'a> { #[allow(clippy::needless_lifetimes)] // This is needed for the lifetime of the pool impl<'a> Repository for MonitorRepository<'a> { async fn get(&mut self, monitor_id: Uuid, tenant: &str) -> Result, Error> { - let mut connection = self.get_connection().await?; + let mut connection = get_connection(self.pool).await?; let result = connection .transaction::)>, DieselError, _>(|conn| { Box::pin(async move { @@ -140,7 +132,7 @@ impl<'a> Repository for MonitorRepository<'a> { } async fn all(&mut self, tenant: &str) -> Result, Error> { - let mut connection = self.get_connection().await?; + let mut connection = get_connection(self.pool).await?; let (monitor_datas, job_datas) = connection .transaction::<(Vec, Vec), DieselError, _>(|conn| { Box::pin(async move { @@ -173,7 +165,7 @@ impl<'a> Repository for MonitorRepository<'a> { async fn save(&mut self, monitor: &Monitor) -> Result<(), Error> { let (monitor_data, job_datas) = <(MonitorData, Vec)>::from(monitor); - let mut connection = self.get_connection().await?; + let mut connection = get_connection(self.pool).await?; connection .transaction::<(), DieselError, _>(|conn| { Box::pin(async { @@ -220,7 +212,7 @@ impl<'a> Repository for MonitorRepository<'a> { async fn delete(&mut self, monitor: &Monitor) -> Result<(), Error> { let (monitor_data, _) = <(MonitorData, Vec)>::from(monitor); - let mut connection = self.get_connection().await?; + let mut connection = get_connection(self.pool).await?; diesel::delete(&monitor_data) .execute(&mut connection) .await diff --git a/api/src/infrastructure/seeding/seeds.sql b/api/src/infrastructure/seeding/seeds.sql index d4883398..c4ae3881 100644 --- a/api/src/infrastructure/seeding/seeds.sql +++ b/api/src/infrastructure/seeding/seeds.sql @@ -2,6 +2,9 @@ DELETE FROM job; DELETE FROM monitor; DELETE FROM api_key; +DELETE FROM alert_config; +DELETE FROM slack_alert_config; +DELETE FROM monitor_alert_config; -- Monitors. INSERT INTO monitor @@ -236,3 +239,32 @@ VALUES 'f0b291fe-bd41-4787-bc2d-1329903f7a6a', 'generate-orders.sh' ); + +-- Alert configs. +INSERT INTO alert_config + (alert_config_id, tenant, "name", type, active, on_late, on_error) +VALUES + ( + 'f1b1b1b1-1b1b-1b1b-1b1b-1b1b1b1b1b1b', + 'cron-mon', + 'Slack Alert for lates', + 'slack', + TRUE, + TRUE, + FALSE + ); +INSERT INTO slack_alert_config + (alert_config_id, slack_channel, slack_bot_oauth_token) +VALUES + ( + 'f1b1b1b1-1b1b-1b1b-1b1b-1b1b1b1b1b1b', + :'slack_channel', + :'slack_token' + ); + +-- Monitor alert configs. +INSERT INTO monitor_alert_config + (alert_config_id, monitor_id) +VALUES + ('f1b1b1b1-1b1b-1b1b-1b1b-1b1b1b1b1b1b', 'c1bf0515-df39-448b-aa95-686360a33b36'), + ('f1b1b1b1-1b1b-1b1b-1b1b-1b1b1b1b1b1b', 'f0b291fe-bd41-4787-bc2d-1329903f7a6a'); diff --git a/api/tests/alert_config_repo_test.rs b/api/tests/alert_config_repo_test.rs new file mode 100644 index 00000000..80842174 --- /dev/null +++ b/api/tests/alert_config_repo_test.rs @@ -0,0 +1,209 @@ +pub mod common; + +use pretty_assertions::assert_eq; +use rstest::rstest; + +use test_utils::gen_uuid; + +use cron_mon_api::domain::models::alert_config::{AlertConfig, AlertType, SlackAlertConfig}; +use cron_mon_api::errors::Error; +use cron_mon_api::infrastructure::models::alert_config::NewAlertConfigData; +use cron_mon_api::infrastructure::repositories::alert_config_repo::AlertConfigRepository; +use cron_mon_api::infrastructure::repositories::Repository; + +use common::{infrastructure, Infrastructure}; + +#[rstest] +#[tokio::test] +async fn test_all(#[future] infrastructure: Infrastructure) { + let infra = infrastructure.await; + let mut repo = AlertConfigRepository::new(&infra.pool); + + let alert_configs = repo.all("foo").await.unwrap(); + + let names: Vec = alert_configs + .iter() + .map(|alert_config| alert_config.name.clone()) + .collect(); + assert_eq!( + names, + vec![ + "Test Slack alert (for lates)".to_owned(), + "Test Slack alert (for errors)".to_owned(), + "Test Slack alert (for lates and errors)".to_owned() + ] + ); + + let types: Vec = alert_configs + .iter() + .map(|alert_config| alert_config.type_.clone()) + .collect(); + assert_eq!( + types, + vec![ + AlertType::Slack(SlackAlertConfig { + channel: "#test-channel".to_owned(), + token: "test-token".to_owned() + }), + AlertType::Slack(SlackAlertConfig { + channel: "#test-channel".to_owned(), + token: "test-token".to_owned() + }), + AlertType::Slack(SlackAlertConfig { + channel: "#test-channel".to_owned(), + token: "test-token".to_owned() + }) + ] + ); +} + +#[rstest] +#[tokio::test] +async fn test_get(#[future] infrastructure: Infrastructure) { + let infra = infrastructure.await; + let mut repo = AlertConfigRepository::new(&infra.pool); + + let non_existent_alert_config_id = repo + .get(gen_uuid("4940ede2-72fc-4e0e-838e-f15f35e3594f"), "foo") + .await + .unwrap(); + let wrong_tenant = repo + .get(gen_uuid("fadd7266-648b-4102-8f85-c768655f4297"), "bar") + .await + .unwrap(); + let should_be_some = repo + .get(gen_uuid("fadd7266-648b-4102-8f85-c768655f4297"), "foo") + .await + .unwrap(); + + assert!(non_existent_alert_config_id.is_none()); + assert!(wrong_tenant.is_none()); + assert!(should_be_some.is_some()); + + let monitor = should_be_some.unwrap(); + assert_eq!(monitor.name, "Test Slack alert (for lates)"); +} + +#[rstest] +#[tokio::test] +async fn test_save_with_new(#[future] infrastructure: Infrastructure) { + let infra = infrastructure.await; + let mut repo = AlertConfigRepository::new(&infra.pool); + + let new_alert_config = AlertConfig::new_slack_config( + "New config".to_string(), + "foo".to_string(), + false, + false, + false, + "#new-channel".to_string(), + "new-test-token".to_string(), + ); + + repo.save(&new_alert_config).await.unwrap(); + assert_eq!(repo.all("foo").await.unwrap().len(), 4); + + let read_new_alert_config = repo + .get(new_alert_config.alert_config_id, "foo") + .await + .unwrap() + .unwrap(); + assert_eq!( + new_alert_config.alert_config_id, + read_new_alert_config.alert_config_id + ); + assert_eq!(new_alert_config.name, read_new_alert_config.name); + assert_eq!(new_alert_config.active, read_new_alert_config.active); + assert_eq!(new_alert_config.on_late, read_new_alert_config.on_late); + assert_eq!(new_alert_config.on_error, read_new_alert_config.on_error); + assert_eq!(new_alert_config.type_, read_new_alert_config.type_); +} + +#[rstest] +#[tokio::test] +async fn test_save_with_existing(#[future] infrastructure: Infrastructure) { + let infra = infrastructure.await; + let mut repo = AlertConfigRepository::new(&infra.pool); + + let mut alert_config = repo + .get(gen_uuid("fadd7266-648b-4102-8f85-c768655f4297"), "foo") + .await + .unwrap() + .unwrap(); + alert_config.name = "Updated name".to_string(); + alert_config.active = false; + alert_config.on_late = false; + alert_config.on_error = false; + + repo.save(&alert_config).await.unwrap(); + assert_eq!(repo.all("foo").await.unwrap().len(), 3); + + let read_alert_config = repo + .get(alert_config.alert_config_id, "foo") + .await + .unwrap() + .unwrap(); + + assert_eq!(alert_config.name, read_alert_config.name); + assert_eq!(alert_config.active, read_alert_config.active); + assert_eq!(alert_config.on_late, read_alert_config.on_late); + assert_eq!(alert_config.on_error, read_alert_config.on_error); + assert_eq!(alert_config.type_, read_alert_config.type_); +} + +#[rstest] +#[tokio::test] +async fn test_delete(#[future] infrastructure: Infrastructure) { + let infra = infrastructure.await; + let mut repo = AlertConfigRepository::new(&infra.pool); + + let alert_config = repo + .get(gen_uuid("fadd7266-648b-4102-8f85-c768655f4297"), "foo") + .await + .unwrap() + .unwrap(); + + repo.delete(&alert_config).await.unwrap(); + assert!(repo + .get(alert_config.alert_config_id, "foo") + .await + .unwrap() + .is_none()); + assert_eq!(repo.all("foo").await.unwrap().len(), 2); +} + +#[tokio::test] +async fn test_loading_invalid_config() { + let infra = Infrastructure::from_seeds( + vec![], + vec![], + vec![], + ( + vec![NewAlertConfigData { + alert_config_id: gen_uuid("027820c0-ab21-47cd-bff0-bc298b3e6646"), + name: "test-slack-alert".to_owned(), + tenant: "foo".to_owned(), + type_: "slack".to_owned(), + active: true, + on_late: true, + on_error: false, + }], + vec![], + ), + ) + .await; + + // Attempt to retrieve that monitor. + let mut repo = AlertConfigRepository::new(&infra.pool); + let alert_config_result = repo + .get(gen_uuid("027820c0-ab21-47cd-bff0-bc298b3e6646"), "foo") + .await; + + // Ensure that the monitor is not returned. + assert_eq!( + alert_config_result, + Err(Error::InvalidAlertConfig( + "Slack channel and/ or bot OAuth token is missing".to_string() + )) + ); +} diff --git a/api/tests/common/infra.rs b/api/tests/common/infra.rs index 9e319486..aa5f7a36 100644 --- a/api/tests/common/infra.rs +++ b/api/tests/common/infra.rs @@ -4,13 +4,16 @@ use wiremock::MockServer; use cron_mon_api::infrastructure::database::{run_migrations, DbPool}; use cron_mon_api::infrastructure::models::{ - api_key::ApiKeyData, job::JobData, monitor::MonitorData, + alert_config::{NewAlertConfigData, NewSlackAlertConfigData}, + api_key::ApiKeyData, + job::JobData, + monitor::MonitorData, }; use cron_mon_api::rocket; use super::auth::setup_mock_jwks_server; use super::postgres::seed_db; -use super::seeds::{api_key_seeds, job_seeds, monitor_seeds}; +use super::seeds::{alert_config_seeds, api_key_seeds, job_seeds, monitor_seeds}; use super::{postgres_container, PostgresContainer}; #[fixture] @@ -28,7 +31,13 @@ pub struct Infrastructure { impl Infrastructure { /// Create a new, default instance of Infrastructure. pub async fn create() -> Self { - Self::new(monitor_seeds(), job_seeds(), api_key_seeds()).await + Self::new( + monitor_seeds(), + job_seeds(), + api_key_seeds(), + alert_config_seeds(), + ) + .await } /// Create a new instance of Infrastructure with the provided seeds. @@ -36,21 +45,29 @@ impl Infrastructure { monitor_seeds: Vec, job_seeds: Vec, api_key_seeds: Vec, + alert_config_seeds: (Vec, Vec), ) -> Self { - Self::new(monitor_seeds, job_seeds, api_key_seeds).await + Self::new(monitor_seeds, job_seeds, api_key_seeds, alert_config_seeds).await } async fn new( monitor_seeds: Vec, job_seeds: Vec, api_key_seeds: Vec, + alert_config_seeds: (Vec, Vec), ) -> Self { let container = postgres_container().await; run_migrations(); // See data seeds for the expected data (/api/tests/common/mod.rs) - let pool = seed_db(&monitor_seeds, &job_seeds, &api_key_seeds).await; + let pool = seed_db( + &monitor_seeds, + &job_seeds, + &api_key_seeds, + &alert_config_seeds, + ) + .await; Self { _container: container, diff --git a/api/tests/common/postgres.rs b/api/tests/common/postgres.rs index 63756834..3f27e41f 100644 --- a/api/tests/common/postgres.rs +++ b/api/tests/common/postgres.rs @@ -1,13 +1,18 @@ use std::env; -use diesel_async::RunQueryDsl; +use diesel_async::{AsyncPgConnection, RunQueryDsl}; use testcontainers::{runners::AsyncRunner, ContainerAsync, ImageExt}; use testcontainers_modules::postgres::Postgres; use cron_mon_api::infrastructure::database::{create_connection_pool, DbPool}; -use cron_mon_api::infrastructure::db_schema::{api_key, job, monitor}; +use cron_mon_api::infrastructure::db_schema::{ + alert_config, api_key, job, monitor, slack_alert_config, +}; use cron_mon_api::infrastructure::models::{ - api_key::ApiKeyData, job::JobData, monitor::MonitorData, + alert_config::{NewAlertConfigData, NewSlackAlertConfigData}, + api_key::ApiKeyData, + job::JobData, + monitor::MonitorData, }; pub type PostgresContainer = ContainerAsync; @@ -39,6 +44,7 @@ pub async fn seed_db( monitor_seeds: &Vec, job_seeds: &Vec, api_key_seeds: &Vec, + alert_config_seeds: &(Vec, Vec), ) -> DbPool { let pool = create_connection_pool().expect("Failed to setup DB connection pool"); @@ -47,15 +53,7 @@ pub async fn seed_db( .await .expect("Failed to retrieve DB connection from the pool"); - diesel::delete(monitor::table) - .execute(&mut conn) - .await - .expect("Failed to delete existing monitor data"); - - diesel::delete(api_key::table) - .execute(&mut conn) - .await - .expect("Failed to delete existing api_key data"); + delete_existing_data(&mut conn).await; diesel::insert_into(monitor::table) .values(monitor_seeds) @@ -75,5 +73,45 @@ pub async fn seed_db( .await .expect("Failed to seed api_keys"); + let (alert_config_seeds, slack_alert_config_seeds) = alert_config_seeds; + diesel::insert_into(alert_config::table) + .values(alert_config_seeds) + .execute(&mut conn) + .await + .expect("Failed to seed alert_configs"); + + diesel::insert_into(slack_alert_config::table) + .values(slack_alert_config_seeds) + .execute(&mut conn) + .await + .expect("Failed to seed slack_alert_configs"); + pool } + +async fn delete_existing_data(conn: &mut AsyncPgConnection) { + diesel::delete(monitor::table) + .execute(conn) + .await + .expect("Failed to delete existing monitor data"); + + diesel::delete(job::table) + .execute(conn) + .await + .expect("Failed to delete existing job data"); + + diesel::delete(api_key::table) + .execute(conn) + .await + .expect("Failed to delete existing api_key data"); + + diesel::delete(alert_config::table) + .execute(conn) + .await + .expect("Failed to delete existing alert_config data"); + + diesel::delete(slack_alert_config::table) + .execute(conn) + .await + .expect("Failed to delete existing slack_alert_config data"); +} diff --git a/api/tests/common/seeds.rs b/api/tests/common/seeds.rs index 35727c88..79bec572 100644 --- a/api/tests/common/seeds.rs +++ b/api/tests/common/seeds.rs @@ -1,5 +1,8 @@ use cron_mon_api::infrastructure::models::{ - api_key::ApiKeyData, job::JobData, monitor::MonitorData, + alert_config::{NewAlertConfigData, NewSlackAlertConfigData}, + api_key::ApiKeyData, + job::JobData, + monitor::MonitorData, }; use test_utils::{gen_datetime, gen_uuid}; @@ -133,3 +136,54 @@ pub fn api_key_seeds() -> Vec { }, ] } + +pub fn alert_config_seeds() -> (Vec, Vec) { + ( + vec![ + NewAlertConfigData { + alert_config_id: gen_uuid("fadd7266-648b-4102-8f85-c768655f4297"), + name: "Test Slack alert (for lates)".to_owned(), + tenant: "foo".to_owned(), + type_: "slack".to_owned(), + active: true, + on_late: true, + on_error: false, + }, + NewAlertConfigData { + alert_config_id: gen_uuid("3ba21f52-32c9-41dc-924d-d18d4fc0e81c"), + name: "Test Slack alert (for errors)".to_owned(), + tenant: "foo".to_owned(), + type_: "slack".to_owned(), + active: true, + on_late: false, + on_error: true, + }, + NewAlertConfigData { + alert_config_id: gen_uuid("8d307d12-4696-4801-bfb6-628f8f640864"), + name: "Test Slack alert (for lates and errors)".to_owned(), + tenant: "foo".to_owned(), + type_: "slack".to_owned(), + active: true, + on_late: true, + on_error: true, + }, + ], + vec![ + NewSlackAlertConfigData { + alert_config_id: gen_uuid("fadd7266-648b-4102-8f85-c768655f4297"), + slack_channel: "#test-channel".to_owned(), + slack_bot_oauth_token: "test-token".to_owned(), + }, + NewSlackAlertConfigData { + alert_config_id: gen_uuid("3ba21f52-32c9-41dc-924d-d18d4fc0e81c"), + slack_channel: "#test-channel".to_owned(), + slack_bot_oauth_token: "test-token".to_owned(), + }, + NewSlackAlertConfigData { + alert_config_id: gen_uuid("8d307d12-4696-4801-bfb6-628f8f640864"), + slack_channel: "#test-channel".to_owned(), + slack_bot_oauth_token: "test-token".to_owned(), + }, + ], + ) +} diff --git a/api/tests/monitor_repo_test.rs b/api/tests/monitor_repo_test.rs index 58774f22..ed45a291 100644 --- a/api/tests/monitor_repo_test.rs +++ b/api/tests/monitor_repo_test.rs @@ -163,6 +163,7 @@ async fn test_loading_invalid_job() { error_alert_sent: false, }], vec![], + (vec![], vec![]), ) .await; diff --git a/docker-compose.yml b/compose.yaml similarity index 96% rename from docker-compose.yml rename to compose.yaml index bf63f517..44dbd8a5 100644 --- a/docker-compose.yml +++ b/compose.yaml @@ -96,12 +96,14 @@ services: PGUSER: cron-mon-api PGPASSWORD: itsasecret PGDATABASE: cron-mon + env_file: + - .env volumes: - ./api/src/infrastructure/seeding/seeds.sql:/usr/share/seeds.sql depends_on: db: condition: service_healthy - command: psql -f /usr/share/seeds.sql + command: 'psql --set=slack_channel="$SLACK_CHANNEL" --set=slack_token="$SLACK_TOKEN" -f /usr/share/seeds.sql' rust-release: image: cron-mon/rust-release-image