From 726f1ed13e8d55b352e062da80b556cb82ded8a4 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Tue, 23 Jul 2024 20:11:53 +0530 Subject: [PATCH 01/44] feat : added tests for increasing coverage --- crates/orchestrator/src/constants.rs | 1 + .../src/controllers/jobs_controller.rs | 4 +- .../src/data_storage/aws_s3/config.rs | 3 ++ .../src/data_storage/aws_s3/mod.rs | 9 ++-- crates/orchestrator/src/lib.rs | 4 +- .../orchestrator/src/tests/controllers/mod.rs | 54 +++++++++++++++++++ .../src/tests/data_storage/mod.rs | 34 ++++++++++++ crates/orchestrator/src/tests/mod.rs | 2 + .../src/tests/workers/snos/mod.rs | 3 +- 9 files changed, 105 insertions(+), 9 deletions(-) create mode 100644 crates/orchestrator/src/tests/controllers/mod.rs create mode 100644 crates/orchestrator/src/tests/data_storage/mod.rs diff --git a/crates/orchestrator/src/constants.rs b/crates/orchestrator/src/constants.rs index 9361d764..1fd645a9 100644 --- a/crates/orchestrator/src/constants.rs +++ b/crates/orchestrator/src/constants.rs @@ -1,2 +1,3 @@ pub const BLOB_DATA_FILE_NAME: &str = "blob_data.txt"; pub const SNOS_OUTPUT_FILE_NAME: &str = "snos_output.json"; +pub const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue"; diff --git a/crates/orchestrator/src/controllers/jobs_controller.rs b/crates/orchestrator/src/controllers/jobs_controller.rs index 43a67652..e5f0daf8 100644 --- a/crates/orchestrator/src/controllers/jobs_controller.rs +++ b/crates/orchestrator/src/controllers/jobs_controller.rs @@ -10,11 +10,11 @@ use crate::jobs::types::JobType; #[derive(Debug, Deserialize)] pub struct CreateJobRequest { /// Job type - job_type: JobType, + pub job_type: JobType, /// Internal id must be a way to identify the job. For example /// block_no, transaction_hash etc. The (job_type, internal_id) /// pair must be unique. - internal_id: String, + pub internal_id: String, } /// Create a job diff --git a/crates/orchestrator/src/data_storage/aws_s3/config.rs b/crates/orchestrator/src/data_storage/aws_s3/config.rs index 7c41f3c6..d4518d85 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/config.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/config.rs @@ -12,6 +12,8 @@ pub struct AWSS3Config { pub s3_bucket_name: String, /// S3 Bucket region pub s3_bucket_region: String, + /// Endpoint url + pub endpoint_url: String, } /// Implementation of `DataStorageConfig` for `AWSS3Config` @@ -23,6 +25,7 @@ impl DataStorageConfig for AWSS3Config { s3_key_secret: get_env_var_or_panic("AWS_SECRET_ACCESS_KEY"), s3_bucket_name: get_env_var_or_panic("AWS_S3_BUCKET_NAME"), s3_bucket_region: get_env_var_or_panic("AWS_S3_BUCKET_REGION"), + endpoint_url: get_env_var_or_panic("AWS_ENDPOINT_URL"), } } } diff --git a/crates/orchestrator/src/data_storage/aws_s3/mod.rs b/crates/orchestrator/src/data_storage/aws_s3/mod.rs index 81751929..4e6e908a 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/mod.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/mod.rs @@ -1,5 +1,5 @@ use crate::data_storage::aws_s3::config::AWSS3Config; -use crate::data_storage::DataStorage; +use crate::data_storage::{DataStorage}; use async_trait::async_trait; use aws_sdk_s3::config::{Builder, Credentials, Region}; use aws_sdk_s3::primitives::ByteStream; @@ -22,7 +22,6 @@ pub struct AWSS3 { impl AWSS3 { /// Initializes a new AWS S3 client by passing the config /// and returning it. - #[allow(dead_code)] pub async fn new(config: AWSS3Config) -> Self { // AWS cred building let credentials = Credentials::new( @@ -33,7 +32,11 @@ impl AWSS3 { "loaded_from_custom_env", ); let region = Region::new(config.s3_bucket_region.clone().to_string()); - let conf_builder = Builder::new().region(region).credentials_provider(credentials); + let conf_builder = Builder::new() + .region(region) + .credentials_provider(credentials) + .endpoint_url(config.endpoint_url.clone()) + .force_path_style(true); let conf = conf_builder.build(); // Building AWS S3 config diff --git a/crates/orchestrator/src/lib.rs b/crates/orchestrator/src/lib.rs index 7e4367ef..3d02378b 100644 --- a/crates/orchestrator/src/lib.rs +++ b/crates/orchestrator/src/lib.rs @@ -1,6 +1,6 @@ /// Config of the service. Contains configurations for DB, Queues and other services. pub mod config; -mod constants; +pub mod constants; /// Controllers for the routes pub mod controllers; /// Contains the trait that implements the fetching functions @@ -17,6 +17,6 @@ pub mod queue; /// Contains the routes for the service pub mod routes; #[cfg(test)] -mod tests; +pub mod tests; /// Contains workers which act like cron jobs pub mod workers; diff --git a/crates/orchestrator/src/tests/controllers/mod.rs b/crates/orchestrator/src/tests/controllers/mod.rs new file mode 100644 index 00000000..2aae2349 --- /dev/null +++ b/crates/orchestrator/src/tests/controllers/mod.rs @@ -0,0 +1,54 @@ +use crate::config::config_force_init; +use crate::constants::JOB_PROCESSING_QUEUE; +use crate::controllers::jobs_controller::{create_job, CreateJobRequest}; +use crate::database::MockDatabase; +use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; +use crate::queue::MockQueueProvider; +use crate::tests::common::init_config; +use axum::Json; +use mockall::predicate::eq; +use rstest::rstest; +use uuid::Uuid; + +#[rstest] +#[tokio::test] +async fn test_create_job_jobs_controller() -> color_eyre::Result<()> { + let mut db = MockDatabase::new(); + let mut queue = MockQueueProvider::new(); + + // mocking db get function (when creating job it should return no job existing) + db.expect_get_last_successful_job_by_type().times(1).with(eq(JobType::SnosRun)).returning(|_| Ok(None)); + // mocking db get function (when creating job to pre-check if job is not existing : worker module) + db.expect_get_job_by_internal_id_and_type() + .times(1) + .with(eq("1"), eq(JobType::SnosRun)) + .returning(|_, _| Ok(None)); + // mocking creation of the job + db.expect_create_job().times(1).withf(move |item| item.internal_id == "1".to_string()).returning(move |_| { + Ok(JobItem { + id: Uuid::new_v4(), + internal_id: "1".to_string(), + job_type: JobType::SnosRun, + status: JobStatus::Created, + external_id: ExternalId::Number(0), + metadata: Default::default(), + version: 0, + }) + }); + // mocking sending of the job into the queue after the creation + queue + .expect_send_message_to_queue() + .returning(|_, _, _| Ok(())) + .withf(|queue, _payload, _delay| queue == JOB_PROCESSING_QUEUE); + + let config = init_config(None, Some(db), Some(queue), None, None, None, None).await; + config_force_init(config).await; + + let create_job_request = CreateJobRequest { job_type: JobType::SnosRun, internal_id: "1".to_string() }; + + let create_job_call = create_job(Json::from(create_job_request)).await.unwrap(); + // comparing the output (safety check not really necessary) + assert_eq!(create_job_call.0, Json::from(()).0); + + Ok(()) +} diff --git a/crates/orchestrator/src/tests/data_storage/mod.rs b/crates/orchestrator/src/tests/data_storage/mod.rs new file mode 100644 index 00000000..6322b3a0 --- /dev/null +++ b/crates/orchestrator/src/tests/data_storage/mod.rs @@ -0,0 +1,34 @@ +use bytes::Bytes; +use dotenvy::dotenv; +use rstest::rstest; +use serde_json::json; +use crate::data_storage::aws_s3::AWSS3; +use crate::data_storage::aws_s3::config::AWSS3Config; +use crate::data_storage::{DataStorage, DataStorageConfig}; + +#[rstest] +#[tokio::test] +async fn test_put_and_get_data_s3() -> color_eyre::Result<()> { + dotenv().ok(); + let config = AWSS3Config::new_from_env(); + let s3_client = AWSS3::new(config).await; + + let mock_data = json!( + { + "body" : "hello world. hello world." + } + ); + let json_bytes = serde_json::to_vec(&mock_data)?; + let key = "test_data.txt"; + + // putting test data on key : "test_data.txt" + s3_client.put_data(Bytes::from(json_bytes), key).await.expect("Unable to put data into the bucket."); + + // getting the data from key : "test_data.txt" + let data = s3_client.get_data(key).await.expect("Unable to get the data from the bucket."); + let received_json: serde_json::Value = serde_json::from_slice(&data)?; + + assert_eq!(received_json, mock_data); + + Ok(()) +} \ No newline at end of file diff --git a/crates/orchestrator/src/tests/mod.rs b/crates/orchestrator/src/tests/mod.rs index b4b53dd3..142be3cf 100644 --- a/crates/orchestrator/src/tests/mod.rs +++ b/crates/orchestrator/src/tests/mod.rs @@ -7,4 +7,6 @@ pub mod server; pub mod queue; pub mod common; +mod controllers; pub mod workers; +mod data_storage; diff --git a/crates/orchestrator/src/tests/workers/snos/mod.rs b/crates/orchestrator/src/tests/workers/snos/mod.rs index fa9a4a1e..c4e95902 100644 --- a/crates/orchestrator/src/tests/workers/snos/mod.rs +++ b/crates/orchestrator/src/tests/workers/snos/mod.rs @@ -1,4 +1,5 @@ use crate::config::config_force_init; +use crate::constants::JOB_PROCESSING_QUEUE; use crate::database::MockDatabase; use crate::jobs::types::JobType; use crate::queue::MockQueueProvider; @@ -26,8 +27,6 @@ async fn test_snos_worker(#[case] db_val: bool) -> Result<(), Box> { let start_job_index; let block; - const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue"; - // Mocking db function expectations if !db_val { db.expect_get_last_successful_job_by_type().times(1).with(eq(JobType::SnosRun)).returning(|_| Ok(None)); From b3a80b04003ab942a27ed3b0d85c3e8592413f80 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Wed, 24 Jul 2024 19:46:28 +0530 Subject: [PATCH 02/44] feat : added mongo db tests and fixtures and updated the ci for tests --- .env.example | 3 + .github/workflows/coverage.yml | 38 ++++++++++ crates/orchestrator/src/config.rs | 10 +-- .../src/data_storage/aws_s3/mod.rs | 2 +- .../orchestrator/src/database/mongodb/mod.rs | 4 ++ crates/orchestrator/src/tests/common/mod.rs | 52 +++++++++++++- .../orchestrator/src/tests/controllers/mod.rs | 11 +-- .../src/tests/data_storage/mod.rs | 8 +-- crates/orchestrator/src/tests/database/mod.rs | 70 ++++++++++++++++++- crates/orchestrator/src/tests/mod.rs | 2 +- crates/settlement-clients/ethereum/src/lib.rs | 2 +- 11 files changed, 179 insertions(+), 23 deletions(-) diff --git a/.env.example b/.env.example index 57635205..37723425 100644 --- a/.env.example +++ b/.env.example @@ -28,3 +28,6 @@ AWS_SECRET_ACCESS_KEY= # S3 AWS_S3_BUCKET_NAME= AWS_S3_BUCKET_REGION= + +# Local Stack +AWS_ENDPOINT_URL= \ No newline at end of file diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 29f80c5a..d4f094eb 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -10,9 +10,29 @@ jobs: # sadly, for now we have to "rebuild" for the coverage runs-on: ubuntu-latest + services: + localstack: + image: localstack/localstack + env: + SERVICES: s3, sqs + DEFAULT_REGION: us-east-1 + AWS_ACCESS_KEY_ID: "AWS_ACCESS_KEY_ID" + AWS_SECRET_ACCESS_KEY: "AWS_SECRET_ACCESS_KEY" + ports: + - 4566:4566 + steps: - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: Install dependencies + run: | + pip install awscli-local + # selecting a toolchain either by action or manual `rustup` calls should happen # before the plugin, as the cache uses the current rustc version as its cache key - run: rustup show @@ -25,7 +45,25 @@ jobs: cargo llvm-cov clean --workspace - name: Run llvm-cov + env: + AWS_ACCESS_KEY_ID: "AWS_ACCESS_KEY_ID" + AWS_SECRET_ACCESS_KEY: "AWS_SECRET_ACCESS_KEY" + AWS_S3_BUCKET_NAME: "madara-orchestrator-test-bucket" + AWS_S3_BUCKET_REGION: "us-east-1" + AWS_ENDPOINT_URL: "http://localhost.localstack.cloud:4566" + MADARA_RPC_URL: "http://localhost:3000" + ETHEREUM_RPC_URL: "http://localhost:3001" + MEMORY_PAGES_CONTRACT_ADDRESS: "0x000000000000000000000000000000000001dead" + PRIVATE_KEY: "0xdead" + ETHEREUM_PRIVATE_KEY: "0x000000000000000000000000000000000000000000000000000000000000beef" + STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS: "0x000000000000000000000000000000000002dead" + DA_LAYER: "ethereum" + PROVER_SERVICE: "sharp" + SETTLEMENT_LAYER: "ethereum" + DATA_STORAGE: "s3" + MONGODB_CONNECTION_STRING: "mongodb://localhost:27017" run: | + aws --endpoint-url=http://localhost:4566 s3api create-bucket --bucket madara-orchestrator-test-bucket cargo llvm-cov nextest --release --lcov --output-path lcov.info --test-threads=1 - name: Upload coverage to codecov.io diff --git a/crates/orchestrator/src/config.rs b/crates/orchestrator/src/config.rs index 41ce3824..55761526 100644 --- a/crates/orchestrator/src/config.rs +++ b/crates/orchestrator/src/config.rs @@ -148,7 +148,7 @@ pub async fn config_force_init(config: Config) { } /// Builds the DA client based on the environment variable DA_LAYER -async fn build_da_client() -> Box { +pub async fn build_da_client() -> Box { match get_env_var_or_panic("DA_LAYER").as_str() { "ethereum" => { let config = EthereumDaConfig::new_from_env(); @@ -159,7 +159,7 @@ async fn build_da_client() -> Box { } /// Builds the prover service based on the environment variable PROVER_SERVICE -fn build_prover_service(settings_provider: &impl SettingsProvider) -> Box { +pub fn build_prover_service(settings_provider: &impl SettingsProvider) -> Box { match get_env_var_or_panic("PROVER_SERVICE").as_str() { "sharp" => Box::new(SharpProverService::with_settings(settings_provider)), _ => panic!("Unsupported prover service"), @@ -167,7 +167,9 @@ fn build_prover_service(settings_provider: &impl SettingsProvider) -> Box Box { +pub async fn build_settlement_client( + settings_provider: &impl SettingsProvider, +) -> Box { match get_env_var_or_panic("SETTLEMENT_LAYER").as_str() { "ethereum" => Box::new(EthereumSettlementClient::with_settings(settings_provider)), "starknet" => Box::new(StarknetSettlementClient::with_settings(settings_provider).await), @@ -175,7 +177,7 @@ async fn build_settlement_client(settings_provider: &impl SettingsProvider) -> B } } -async fn build_storage_client() -> Box { +pub async fn build_storage_client() -> Box { match get_env_var_or_panic("DATA_STORAGE").as_str() { "s3" => Box::new(AWSS3::new(AWSS3Config::new_from_env()).await), _ => panic!("Unsupported Storage Client"), diff --git a/crates/orchestrator/src/data_storage/aws_s3/mod.rs b/crates/orchestrator/src/data_storage/aws_s3/mod.rs index 4e6e908a..da673eba 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/mod.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/mod.rs @@ -1,5 +1,5 @@ use crate::data_storage::aws_s3::config::AWSS3Config; -use crate::data_storage::{DataStorage}; +use crate::data_storage::DataStorage; use async_trait::async_trait; use aws_sdk_s3::config::{Builder, Credentials, Region}; use aws_sdk_s3::primitives::ByteStream; diff --git a/crates/orchestrator/src/database/mongodb/mod.rs b/crates/orchestrator/src/database/mongodb/mod.rs index 5be0cf54..aea5a41a 100644 --- a/crates/orchestrator/src/database/mongodb/mod.rs +++ b/crates/orchestrator/src/database/mongodb/mod.rs @@ -40,6 +40,10 @@ impl MongoDb { MongoDb { client } } + pub fn client(&self) -> Client { + self.client.clone() + } + fn get_job_collection(&self) -> Collection { self.client.database("orchestrator").collection("jobs") } diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index c8a36696..3f57a6f9 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -6,16 +6,22 @@ use std::sync::Arc; use ::uuid::Uuid; use constants::*; use da_client_interface::MockDaClient; +use dotenvy::dotenv; +use mongodb::Client; use prover_client_interface::MockProverClient; use rstest::*; use settlement_client_interface::MockSettlementClient; use starknet::providers::jsonrpc::HttpTransport; use starknet::providers::JsonRpcClient; use url::Url; +use utils::env_utils::get_env_var_or_panic; +use utils::settings::default::DefaultSettingsProvider; -use crate::config::Config; +use crate::config::{config_force_init, Config}; use crate::data_storage::MockDataStorage; -use crate::database::MockDatabase; +use crate::database::mongodb::config::MongoDbConfig; +use crate::database::mongodb::MongoDb; +use crate::database::{DatabaseConfig, MockDatabase}; use crate::jobs::types::JobStatus::Created; use crate::jobs::types::JobType::DataSubmission; use crate::jobs::types::{ExternalId, JobItem}; @@ -74,3 +80,45 @@ pub fn custom_job_item(default_job_item: JobItem, #[default(String::from("0"))] job_item } + +/// For implementation of integration tests +#[fixture] +pub async fn build_config() -> color_eyre::Result<()> { + dotenv().ok(); + + // init starknet client + let provider = JsonRpcClient::new(HttpTransport::new( + Url::parse(get_env_var_or_panic("MADARA_RPC_URL").as_str()).expect("Failed to parse URL"), + )); + + // init database + let database = Box::new(MongoDb::new(MongoDbConfig::new_from_env()).await); + + // init the queue + let queue = Box::new(crate::queue::sqs::SqsQueue {}); + + let da_client = crate::config::build_da_client().await; + let settings_provider = DefaultSettingsProvider {}; + let settlement_client = crate::config::build_settlement_client(&settings_provider).await; + let prover_client = crate::config::build_prover_service(&settings_provider); + let storage_client = crate::config::build_storage_client().await; + + let config = + Config::new(Arc::new(provider), da_client, prover_client, settlement_client, database, queue, storage_client); + config_force_init(config).await; + + Ok(()) +} + +#[fixture] +pub async fn get_database_client() -> Client { + MongoDb::new(MongoDbConfig::new_from_env()).await.client() +} + +#[fixture] +pub async fn drop_database() -> color_eyre::Result<()> { + let db_client: Client = get_database_client().await; + // dropping `jobs` collection. + db_client.database("orchestrator").collection::("jobs").drop(None).await?; + Ok(()) +} diff --git a/crates/orchestrator/src/tests/controllers/mod.rs b/crates/orchestrator/src/tests/controllers/mod.rs index 2aae2349..b5849a86 100644 --- a/crates/orchestrator/src/tests/controllers/mod.rs +++ b/crates/orchestrator/src/tests/controllers/mod.rs @@ -19,12 +19,9 @@ async fn test_create_job_jobs_controller() -> color_eyre::Result<()> { // mocking db get function (when creating job it should return no job existing) db.expect_get_last_successful_job_by_type().times(1).with(eq(JobType::SnosRun)).returning(|_| Ok(None)); // mocking db get function (when creating job to pre-check if job is not existing : worker module) - db.expect_get_job_by_internal_id_and_type() - .times(1) - .with(eq("1"), eq(JobType::SnosRun)) - .returning(|_, _| Ok(None)); + db.expect_get_job_by_internal_id_and_type().times(1).with(eq("1"), eq(JobType::SnosRun)).returning(|_, _| Ok(None)); // mocking creation of the job - db.expect_create_job().times(1).withf(move |item| item.internal_id == "1".to_string()).returning(move |_| { + db.expect_create_job().times(1).withf(move |item| item.internal_id == *"1").returning(move |_| { Ok(JobItem { id: Uuid::new_v4(), internal_id: "1".to_string(), @@ -46,9 +43,7 @@ async fn test_create_job_jobs_controller() -> color_eyre::Result<()> { let create_job_request = CreateJobRequest { job_type: JobType::SnosRun, internal_id: "1".to_string() }; - let create_job_call = create_job(Json::from(create_job_request)).await.unwrap(); - // comparing the output (safety check not really necessary) - assert_eq!(create_job_call.0, Json::from(()).0); + let _ = create_job(Json::from(create_job_request)).await.unwrap(); Ok(()) } diff --git a/crates/orchestrator/src/tests/data_storage/mod.rs b/crates/orchestrator/src/tests/data_storage/mod.rs index 6322b3a0..8436a808 100644 --- a/crates/orchestrator/src/tests/data_storage/mod.rs +++ b/crates/orchestrator/src/tests/data_storage/mod.rs @@ -1,10 +1,10 @@ +use crate::data_storage::aws_s3::config::AWSS3Config; +use crate::data_storage::aws_s3::AWSS3; +use crate::data_storage::{DataStorage, DataStorageConfig}; use bytes::Bytes; use dotenvy::dotenv; use rstest::rstest; use serde_json::json; -use crate::data_storage::aws_s3::AWSS3; -use crate::data_storage::aws_s3::config::AWSS3Config; -use crate::data_storage::{DataStorage, DataStorageConfig}; #[rstest] #[tokio::test] @@ -31,4 +31,4 @@ async fn test_put_and_get_data_s3() -> color_eyre::Result<()> { assert_eq!(received_json, mock_data); Ok(()) -} \ No newline at end of file +} diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index c8adec2c..ae9db9df 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -1,7 +1,73 @@ +use crate::config::config; +use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; +use crate::tests::common::{build_config, drop_database}; +use color_eyre::eyre::eyre; +use dotenvy::dotenv; use rstest::*; +use uuid::Uuid; #[rstest] #[tokio::test] -async fn test_database() { - // TODO: write test case +async fn test_database_connection() -> color_eyre::Result<()> { + let init_config_error = build_config().await.is_err(); + if init_config_error { + return Err(eyre!("Not able to init config.")); + } + + Ok(()) +} + +/// Tests for `create_job` operation in database trait. +/// Creates 3 jobs and asserts them. +#[rstest] +#[tokio::test] +async fn test_database_create_job() -> color_eyre::Result<()> { + dotenv().ok(); + let init_config = build_config().await.is_ok(); + if !init_config { + return Err(eyre!("Not able to init config.")); + } + + drop_database().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + + let job_vec = [ + get_random_job_item(JobType::ProofCreation, JobStatus::Created, 1), + get_random_job_item(JobType::ProofCreation, JobStatus::Created, 2), + get_random_job_item(JobType::ProofCreation, JobStatus::Created, 3), + ]; + + database_client.create_job(job_vec[0].clone()).await.unwrap(); + database_client.create_job(job_vec[1].clone()).await.unwrap(); + database_client.create_job(job_vec[2].clone()).await.unwrap(); + + let get_job_1 = + database_client.get_job_by_internal_id_and_type("1", &JobType::ProofCreation).await.unwrap().unwrap(); + let get_job_2 = + database_client.get_job_by_internal_id_and_type("2", &JobType::ProofCreation).await.unwrap().unwrap(); + let get_job_3 = + database_client.get_job_by_internal_id_and_type("3", &JobType::ProofCreation).await.unwrap().unwrap(); + + assert_eq!(get_job_1, job_vec[0].clone()); + assert_eq!(get_job_2, job_vec[1].clone()); + assert_eq!(get_job_3, job_vec[2].clone()); + + Ok(()) +} + +// Test Util Functions +// ========================================== + +fn get_random_job_item(job_type: JobType, job_status: JobStatus, internal_id: u64) -> JobItem { + JobItem { + id: Uuid::new_v4(), + internal_id: internal_id.to_string(), + job_type, + status: job_status, + external_id: ExternalId::Number(0), + metadata: Default::default(), + version: 0, + } } diff --git a/crates/orchestrator/src/tests/mod.rs b/crates/orchestrator/src/tests/mod.rs index 142be3cf..348e5c70 100644 --- a/crates/orchestrator/src/tests/mod.rs +++ b/crates/orchestrator/src/tests/mod.rs @@ -8,5 +8,5 @@ pub mod queue; pub mod common; mod controllers; -pub mod workers; mod data_storage; +pub mod workers; diff --git a/crates/settlement-clients/ethereum/src/lib.rs b/crates/settlement-clients/ethereum/src/lib.rs index 534edf48..cdf2788b 100644 --- a/crates/settlement-clients/ethereum/src/lib.rs +++ b/crates/settlement-clients/ethereum/src/lib.rs @@ -69,7 +69,7 @@ impl EthereumSettlementClient { ProviderBuilder::new().with_recommended_fillers().wallet(wallet.clone()).on_http(settlement_cfg.rpc_url), ); let core_contract_client = StarknetValidityContractClient::new( - Address::from_slice(settlement_cfg.core_contract_address.as_bytes()).0.into(), + Address::from_str(&settlement_cfg.core_contract_address).unwrap().0.into(), provider.clone(), ); From 5826d44e685b2028420558d3a596c23b11c440cb Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Wed, 24 Jul 2024 20:04:42 +0530 Subject: [PATCH 03/44] update : removed unwanted fixtures --- .github/workflows/coverage.yml | 3 ++- crates/orchestrator/src/tests/common/mod.rs | 8 +------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index d4f094eb..ba9e6ed0 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -63,7 +63,8 @@ jobs: DATA_STORAGE: "s3" MONGODB_CONNECTION_STRING: "mongodb://localhost:27017" run: | - aws --endpoint-url=http://localhost:4566 s3api create-bucket --bucket madara-orchestrator-test-bucket + awslocal s3api create-bucket --bucket madara-orchestrator-test-bucket + awslocal s3 ls cargo llvm-cov nextest --release --lcov --output-path lcov.info --test-threads=1 - name: Upload coverage to codecov.io diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 3f57a6f9..4265a707 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -110,14 +110,8 @@ pub async fn build_config() -> color_eyre::Result<()> { Ok(()) } -#[fixture] -pub async fn get_database_client() -> Client { - MongoDb::new(MongoDbConfig::new_from_env()).await.client() -} - -#[fixture] pub async fn drop_database() -> color_eyre::Result<()> { - let db_client: Client = get_database_client().await; + let db_client: Client = MongoDb::new(MongoDbConfig::new_from_env()).await.client(); // dropping `jobs` collection. db_client.database("orchestrator").collection::("jobs").drop(None).await?; Ok(()) From 0e70fff85ab96d4f248967027006da49039194ba Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Wed, 24 Jul 2024 20:05:39 +0530 Subject: [PATCH 04/44] update : removed unwanted fixtures --- .github/workflows/coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index ba9e6ed0..ef9d662b 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -27,7 +27,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v2 with: - python-version: '3.x' + python-version: "3.x" - name: Install dependencies run: | From 97f1f210242357a68e318fa9388cd3be4223ef2f Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Wed, 24 Jul 2024 20:11:09 +0530 Subject: [PATCH 05/44] update : added mongo db runner in ci --- .github/workflows/coverage.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index ef9d662b..2ce12339 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -20,6 +20,10 @@ jobs: AWS_SECRET_ACCESS_KEY: "AWS_SECRET_ACCESS_KEY" ports: - 4566:4566 + mongodb: + image: mongo:latest + ports: + - 27017:27017 steps: - uses: actions/checkout@v3 @@ -29,9 +33,20 @@ jobs: with: python-version: "3.x" + - name: Set up Node.js + uses: actions/setup-node@v2 + with: + node-version: "14" + - name: Install dependencies run: | pip install awscli-local + npm ci + + - name: Verify MongoDB connection + run: | + sudo apt-get install -y mongodb-clients + mongosh --eval "db.runCommand({ping:1})" # selecting a toolchain either by action or manual `rustup` calls should happen # before the plugin, as the cache uses the current rustc version as its cache key From a64613cefeb39bc99020917dc1b9a43131cfc3e0 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Wed, 24 Jul 2024 20:15:06 +0530 Subject: [PATCH 06/44] update : added mongo db runner in ci --- .github/workflows/coverage.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 2ce12339..d0b842ad 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -41,7 +41,6 @@ jobs: - name: Install dependencies run: | pip install awscli-local - npm ci - name: Verify MongoDB connection run: | From db472d538a4cc7f3ff5bf3e5769b1f571f940ad4 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Wed, 24 Jul 2024 20:18:53 +0530 Subject: [PATCH 07/44] update : added mongo db runner in ci --- .github/workflows/coverage.yml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index d0b842ad..c97218b0 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -33,20 +33,10 @@ jobs: with: python-version: "3.x" - - name: Set up Node.js - uses: actions/setup-node@v2 - with: - node-version: "14" - - name: Install dependencies run: | pip install awscli-local - - name: Verify MongoDB connection - run: | - sudo apt-get install -y mongodb-clients - mongosh --eval "db.runCommand({ping:1})" - # selecting a toolchain either by action or manual `rustup` calls should happen # before the plugin, as the cache uses the current rustc version as its cache key - run: rustup show From 15541ef49dcb53ef579457f7e6bd3498f1635d36 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 26 Jul 2024 01:08:26 +0530 Subject: [PATCH 08/44] update : updated with new changes and ci --- .env.example | 3 -- .env.test.example | 33 +++++++++++++ .github/workflows/coverage.yml | 44 ++++++----------- .gitignore | 1 + CHANGELOG.md | 3 ++ crates/orchestrator/src/constants.rs | 1 - .../src/controllers/jobs_controller.rs | 24 --------- crates/orchestrator/src/controllers/mod.rs | 3 -- .../src/data_storage/aws_s3/config.rs | 2 + .../src/data_storage/aws_s3/mod.rs | 20 ++++++-- crates/orchestrator/src/data_storage/mod.rs | 2 + crates/orchestrator/src/queue/job_queue.rs | 4 +- crates/orchestrator/src/routes.rs | 14 +----- crates/orchestrator/src/tests/common/mod.rs | 11 +++-- .../orchestrator/src/tests/controllers/mod.rs | 49 ------------------- .../src/tests/data_storage/mod.rs | 4 +- crates/orchestrator/src/tests/database/mod.rs | 10 ++-- crates/orchestrator/src/tests/mod.rs | 1 - .../src/tests/workers/snos/mod.rs | 2 +- 19 files changed, 90 insertions(+), 141 deletions(-) create mode 100644 .env.test.example delete mode 100644 crates/orchestrator/src/controllers/jobs_controller.rs delete mode 100644 crates/orchestrator/src/tests/controllers/mod.rs diff --git a/.env.example b/.env.example index 37723425..57635205 100644 --- a/.env.example +++ b/.env.example @@ -28,6 +28,3 @@ AWS_SECRET_ACCESS_KEY= # S3 AWS_S3_BUCKET_NAME= AWS_S3_BUCKET_REGION= - -# Local Stack -AWS_ENDPOINT_URL= \ No newline at end of file diff --git a/.env.test.example b/.env.test.example new file mode 100644 index 00000000..e55b97fc --- /dev/null +++ b/.env.test.example @@ -0,0 +1,33 @@ +HOST= +PORT= +DATABASE_URL= +MADARA_RPC_URL= +DA_LAYER= +SETTLEMENT_LAYER= + +# Ethereum +ETHEREUM_PRIVATE_KEY= +ETHEREUM_RPC_URL= +MEMORY_PAGES_CONTRACT_ADDRESS= +STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS= + + +# Starknet +STARKNET_PUBLIC_KEY= +STARNET_PRIVATE_KEY= +STARKNET_RPC_URL= +STARKNET_CAIRO_CORE_CONTRACT_ADDRESS= + +# MongoDB connection string +MONGODB_CONNECTION_STRING= + +# SQS +AWS_ACCESS_KEY_ID= +AWS_SECRET_ACCESS_KEY= + +# S3 +AWS_S3_BUCKET_NAME= +AWS_S3_BUCKET_REGION= + +# AWS Local Stack URL +AWS_ENDPOINT_URL= diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index c97218b0..d5d10b8d 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -28,15 +28,6 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: "3.x" - - - name: Install dependencies - run: | - pip install awscli-local - # selecting a toolchain either by action or manual `rustup` calls should happen # before the plugin, as the cache uses the current rustc version as its cache key - run: rustup show @@ -49,26 +40,23 @@ jobs: cargo llvm-cov clean --workspace - name: Run llvm-cov - env: - AWS_ACCESS_KEY_ID: "AWS_ACCESS_KEY_ID" - AWS_SECRET_ACCESS_KEY: "AWS_SECRET_ACCESS_KEY" - AWS_S3_BUCKET_NAME: "madara-orchestrator-test-bucket" - AWS_S3_BUCKET_REGION: "us-east-1" - AWS_ENDPOINT_URL: "http://localhost.localstack.cloud:4566" - MADARA_RPC_URL: "http://localhost:3000" - ETHEREUM_RPC_URL: "http://localhost:3001" - MEMORY_PAGES_CONTRACT_ADDRESS: "0x000000000000000000000000000000000001dead" - PRIVATE_KEY: "0xdead" - ETHEREUM_PRIVATE_KEY: "0x000000000000000000000000000000000000000000000000000000000000beef" - STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS: "0x000000000000000000000000000000000002dead" - DA_LAYER: "ethereum" - PROVER_SERVICE: "sharp" - SETTLEMENT_LAYER: "ethereum" - DATA_STORAGE: "s3" - MONGODB_CONNECTION_STRING: "mongodb://localhost:27017" run: | - awslocal s3api create-bucket --bucket madara-orchestrator-test-bucket - awslocal s3 ls + echo 'AWS_ACCESS_KEY_ID="AWS_ACCESS_KEY_ID"' >> .env.test + echo 'AWS_SECRET_ACCESS_KEY="AWS_SECRET_ACCESS_KEY"' >> .env.test + echo 'AWS_S3_BUCKET_NAME="madara-orchestrator-test-bucket"' >> .env.test + echo 'AWS_S3_BUCKET_REGION="us-east-1"' >> .env.test + echo 'AWS_ENDPOINT_URL="http://localhost.localstack.cloud:4566"' >> .env.test + echo 'MADARA_RPC_URL="http://localhost:3000"' >> .env.test + echo 'ETHEREUM_RPC_URL="http://localhost:3001"' >> .env.test + echo 'MEMORY_PAGES_CONTRACT_ADDRESS="0x000000000000000000000000000000000001dead"' >> .env.test + echo 'PRIVATE_KEY="0xdead"' >> .env.test + echo 'ETHEREUM_PRIVATE_KEY="0x000000000000000000000000000000000000000000000000000000000000beef"' >> .env.test + echo 'STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS="0x000000000000000000000000000000000002dead"' >> .env.test + echo 'DA_LAYER="ethereum"' >> .env.test + echo 'PROVER_SERVICE="sharp"' >> .env.test + echo 'SETTLEMENT_LAYER="ethereum"' >> .env.test + echo 'DATA_STORAGE="s3"' >> .env.test + echo 'MONGODB_CONNECTION_STRING="mongodb://localhost:27017"' >> .env.test cargo llvm-cov nextest --release --lcov --output-path lcov.info --test-threads=1 - name: Upload coverage to codecov.io diff --git a/.gitignore b/.gitignore index cc424bf3..76cd1131 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ .env .idea .DS_Store +.env.test *.code-workspace .vscode diff --git a/CHANGELOG.md b/CHANGELOG.md index 40fd1c8d..70c41f6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,9 +9,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - Function to calculate the kzg proof of x_0. - Tests for updating the state. - Function to update the state and publish blob on ethereum in state update job. +- Fixtures for testing. ## Changed +- GitHub's coverage CI yml file for localstack and db testing. + ## Removed - `fetch_from_test` argument diff --git a/crates/orchestrator/src/constants.rs b/crates/orchestrator/src/constants.rs index 1fd645a9..9361d764 100644 --- a/crates/orchestrator/src/constants.rs +++ b/crates/orchestrator/src/constants.rs @@ -1,3 +1,2 @@ pub const BLOB_DATA_FILE_NAME: &str = "blob_data.txt"; pub const SNOS_OUTPUT_FILE_NAME: &str = "snos_output.json"; -pub const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue"; diff --git a/crates/orchestrator/src/controllers/jobs_controller.rs b/crates/orchestrator/src/controllers/jobs_controller.rs deleted file mode 100644 index e5f0daf8..00000000 --- a/crates/orchestrator/src/controllers/jobs_controller.rs +++ /dev/null @@ -1,24 +0,0 @@ -use std::collections::HashMap; - -use axum::extract::Json; -use serde::Deserialize; - -use crate::controllers::errors::AppError; -use crate::jobs::types::JobType; - -/// Client request to create a job -#[derive(Debug, Deserialize)] -pub struct CreateJobRequest { - /// Job type - pub job_type: JobType, - /// Internal id must be a way to identify the job. For example - /// block_no, transaction_hash etc. The (job_type, internal_id) - /// pair must be unique. - pub internal_id: String, -} - -/// Create a job -pub async fn create_job(Json(payload): Json) -> Result, AppError> { - crate::jobs::create_job(payload.job_type, payload.internal_id, HashMap::new()).await?; - Ok(Json::from(())) -} diff --git a/crates/orchestrator/src/controllers/mod.rs b/crates/orchestrator/src/controllers/mod.rs index aadb38b7..8575ccdc 100644 --- a/crates/orchestrator/src/controllers/mod.rs +++ b/crates/orchestrator/src/controllers/mod.rs @@ -1,5 +1,2 @@ /// Errors mod errors; - -/// Job controllers -pub mod jobs_controller; diff --git a/crates/orchestrator/src/data_storage/aws_s3/config.rs b/crates/orchestrator/src/data_storage/aws_s3/config.rs index d4518d85..06eeaff8 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/config.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/config.rs @@ -13,6 +13,7 @@ pub struct AWSS3Config { /// S3 Bucket region pub s3_bucket_region: String, /// Endpoint url + #[cfg(test)] pub endpoint_url: String, } @@ -25,6 +26,7 @@ impl DataStorageConfig for AWSS3Config { s3_key_secret: get_env_var_or_panic("AWS_SECRET_ACCESS_KEY"), s3_bucket_name: get_env_var_or_panic("AWS_S3_BUCKET_NAME"), s3_bucket_region: get_env_var_or_panic("AWS_S3_BUCKET_REGION"), + #[cfg(test)] endpoint_url: get_env_var_or_panic("AWS_ENDPOINT_URL"), } } diff --git a/crates/orchestrator/src/data_storage/aws_s3/mod.rs b/crates/orchestrator/src/data_storage/aws_s3/mod.rs index da673eba..50ae6e3e 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/mod.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/mod.rs @@ -32,11 +32,15 @@ impl AWSS3 { "loaded_from_custom_env", ); let region = Region::new(config.s3_bucket_region.clone().to_string()); - let conf_builder = Builder::new() - .region(region) - .credentials_provider(credentials) - .endpoint_url(config.endpoint_url.clone()) - .force_path_style(true); + + #[allow(unused_mut)] + let mut conf_builder = Builder::new().region(region).credentials_provider(credentials).force_path_style(true); + + #[cfg(test)] + { + conf_builder = conf_builder.endpoint_url(config.endpoint_url.clone().to_string()); + } + let conf = conf_builder.build(); // Building AWS S3 config @@ -72,4 +76,10 @@ impl DataStorage for AWSS3 { Ok(()) } + + #[cfg(test)] + async fn build_test_bucket(&self, bucket_name: &str) -> Result<()> { + self.client.create_bucket().bucket(bucket_name).send().await?; + Ok(()) + } } diff --git a/crates/orchestrator/src/data_storage/mod.rs b/crates/orchestrator/src/data_storage/mod.rs index f259bf61..b3ff74fd 100644 --- a/crates/orchestrator/src/data_storage/mod.rs +++ b/crates/orchestrator/src/data_storage/mod.rs @@ -17,6 +17,8 @@ use mockall::automock; pub trait DataStorage: Send + Sync { async fn get_data(&self, key: &str) -> Result; async fn put_data(&self, data: Bytes, key: &str) -> Result<()>; + #[cfg(test)] + async fn build_test_bucket(&self, bucket_name: &str) -> Result<()>; } /// **DataStorageConfig** : Trait method to represent the config struct needed for diff --git a/crates/orchestrator/src/queue/job_queue.rs b/crates/orchestrator/src/queue/job_queue.rs index 9432276f..aebba4bf 100644 --- a/crates/orchestrator/src/queue/job_queue.rs +++ b/crates/orchestrator/src/queue/job_queue.rs @@ -12,8 +12,8 @@ use uuid::Uuid; use crate::config::config; use crate::jobs::{process_job, verify_job}; -const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue"; -const JOB_VERIFICATION_QUEUE: &str = "madara_orchestrator_job_verification_queue"; +pub const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue"; +pub const JOB_VERIFICATION_QUEUE: &str = "madara_orchestrator_job_verification_queue"; #[derive(Debug, Serialize, Deserialize)] pub struct JobQueueMessage { diff --git a/crates/orchestrator/src/routes.rs b/crates/orchestrator/src/routes.rs index 39d8f3d4..877e5b88 100644 --- a/crates/orchestrator/src/routes.rs +++ b/crates/orchestrator/src/routes.rs @@ -1,16 +1,10 @@ use axum::http::StatusCode; use axum::response::IntoResponse; -use axum::routing::{get, post}; +use axum::routing::get; use axum::Router; -use crate::controllers::jobs_controller; - pub fn app_router() -> Router { - Router::new() - .route("/health", get(root)) - .nest("/v1/dev", dev_routes()) - .nest("/v1/job", job_routes()) - .fallback(handler_404) + Router::new().route("/health", get(root)).nest("/v1/dev", dev_routes()).fallback(handler_404) } async fn root() -> &'static str { @@ -21,10 +15,6 @@ async fn handler_404() -> impl IntoResponse { (StatusCode::NOT_FOUND, "The requested resource was not found") } -fn job_routes() -> Router { - Router::new().route("/create_job", post(jobs_controller::create_job)) -} - fn dev_routes() -> Router { Router::new() } diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 4265a707..0a97fa33 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -6,7 +6,6 @@ use std::sync::Arc; use ::uuid::Uuid; use constants::*; use da_client_interface::MockDaClient; -use dotenvy::dotenv; use mongodb::Client; use prover_client_interface::MockProverClient; use rstest::*; @@ -17,7 +16,7 @@ use url::Url; use utils::env_utils::get_env_var_or_panic; use utils::settings::default::DefaultSettingsProvider; -use crate::config::{config_force_init, Config}; +use crate::config::{build_storage_client, config_force_init, Config}; use crate::data_storage::MockDataStorage; use crate::database::mongodb::config::MongoDbConfig; use crate::database::mongodb::MongoDb; @@ -84,7 +83,8 @@ pub fn custom_job_item(default_job_item: JobItem, #[default(String::from("0"))] /// For implementation of integration tests #[fixture] pub async fn build_config() -> color_eyre::Result<()> { - dotenv().ok(); + // Getting .env.test variables + dotenvy::from_filename("../.env.test")?; // init starknet client let provider = JsonRpcClient::new(HttpTransport::new( @@ -101,7 +101,10 @@ pub async fn build_config() -> color_eyre::Result<()> { let settings_provider = DefaultSettingsProvider {}; let settlement_client = crate::config::build_settlement_client(&settings_provider).await; let prover_client = crate::config::build_prover_service(&settings_provider); - let storage_client = crate::config::build_storage_client().await; + let storage_client = build_storage_client().await; + + // building a test bucket : + storage_client.build_test_bucket(&get_env_var_or_panic("AWS_S3_BUCKET_NAME")).await?; let config = Config::new(Arc::new(provider), da_client, prover_client, settlement_client, database, queue, storage_client); diff --git a/crates/orchestrator/src/tests/controllers/mod.rs b/crates/orchestrator/src/tests/controllers/mod.rs deleted file mode 100644 index b5849a86..00000000 --- a/crates/orchestrator/src/tests/controllers/mod.rs +++ /dev/null @@ -1,49 +0,0 @@ -use crate::config::config_force_init; -use crate::constants::JOB_PROCESSING_QUEUE; -use crate::controllers::jobs_controller::{create_job, CreateJobRequest}; -use crate::database::MockDatabase; -use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; -use crate::queue::MockQueueProvider; -use crate::tests::common::init_config; -use axum::Json; -use mockall::predicate::eq; -use rstest::rstest; -use uuid::Uuid; - -#[rstest] -#[tokio::test] -async fn test_create_job_jobs_controller() -> color_eyre::Result<()> { - let mut db = MockDatabase::new(); - let mut queue = MockQueueProvider::new(); - - // mocking db get function (when creating job it should return no job existing) - db.expect_get_last_successful_job_by_type().times(1).with(eq(JobType::SnosRun)).returning(|_| Ok(None)); - // mocking db get function (when creating job to pre-check if job is not existing : worker module) - db.expect_get_job_by_internal_id_and_type().times(1).with(eq("1"), eq(JobType::SnosRun)).returning(|_, _| Ok(None)); - // mocking creation of the job - db.expect_create_job().times(1).withf(move |item| item.internal_id == *"1").returning(move |_| { - Ok(JobItem { - id: Uuid::new_v4(), - internal_id: "1".to_string(), - job_type: JobType::SnosRun, - status: JobStatus::Created, - external_id: ExternalId::Number(0), - metadata: Default::default(), - version: 0, - }) - }); - // mocking sending of the job into the queue after the creation - queue - .expect_send_message_to_queue() - .returning(|_, _, _| Ok(())) - .withf(|queue, _payload, _delay| queue == JOB_PROCESSING_QUEUE); - - let config = init_config(None, Some(db), Some(queue), None, None, None, None).await; - config_force_init(config).await; - - let create_job_request = CreateJobRequest { job_type: JobType::SnosRun, internal_id: "1".to_string() }; - - let _ = create_job(Json::from(create_job_request)).await.unwrap(); - - Ok(()) -} diff --git a/crates/orchestrator/src/tests/data_storage/mod.rs b/crates/orchestrator/src/tests/data_storage/mod.rs index 8436a808..f007a457 100644 --- a/crates/orchestrator/src/tests/data_storage/mod.rs +++ b/crates/orchestrator/src/tests/data_storage/mod.rs @@ -2,14 +2,14 @@ use crate::data_storage::aws_s3::config::AWSS3Config; use crate::data_storage::aws_s3::AWSS3; use crate::data_storage::{DataStorage, DataStorageConfig}; use bytes::Bytes; -use dotenvy::dotenv; use rstest::rstest; use serde_json::json; #[rstest] #[tokio::test] async fn test_put_and_get_data_s3() -> color_eyre::Result<()> { - dotenv().ok(); + dotenvy::from_filename("../.env.test")?; + let config = AWSS3Config::new_from_env(); let s3_client = AWSS3::new(config).await; diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index ae9db9df..5e8a651c 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -2,7 +2,6 @@ use crate::config::config; use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; use crate::tests::common::{build_config, drop_database}; use color_eyre::eyre::eyre; -use dotenvy::dotenv; use rstest::*; use uuid::Uuid; @@ -22,7 +21,6 @@ async fn test_database_connection() -> color_eyre::Result<()> { #[rstest] #[tokio::test] async fn test_database_create_job() -> color_eyre::Result<()> { - dotenv().ok(); let init_config = build_config().await.is_ok(); if !init_config { return Err(eyre!("Not able to init config.")); @@ -34,9 +32,9 @@ async fn test_database_create_job() -> color_eyre::Result<()> { let database_client = config.database(); let job_vec = [ - get_random_job_item(JobType::ProofCreation, JobStatus::Created, 1), - get_random_job_item(JobType::ProofCreation, JobStatus::Created, 2), - get_random_job_item(JobType::ProofCreation, JobStatus::Created, 3), + build_job_item(JobType::ProofCreation, JobStatus::Created, 1), + build_job_item(JobType::ProofCreation, JobStatus::Created, 2), + build_job_item(JobType::ProofCreation, JobStatus::Created, 3), ]; database_client.create_job(job_vec[0].clone()).await.unwrap(); @@ -60,7 +58,7 @@ async fn test_database_create_job() -> color_eyre::Result<()> { // Test Util Functions // ========================================== -fn get_random_job_item(job_type: JobType, job_status: JobStatus, internal_id: u64) -> JobItem { +fn build_job_item(job_type: JobType, job_status: JobStatus, internal_id: u64) -> JobItem { JobItem { id: Uuid::new_v4(), internal_id: internal_id.to_string(), diff --git a/crates/orchestrator/src/tests/mod.rs b/crates/orchestrator/src/tests/mod.rs index 348e5c70..83dfc04c 100644 --- a/crates/orchestrator/src/tests/mod.rs +++ b/crates/orchestrator/src/tests/mod.rs @@ -7,6 +7,5 @@ pub mod server; pub mod queue; pub mod common; -mod controllers; mod data_storage; pub mod workers; diff --git a/crates/orchestrator/src/tests/workers/snos/mod.rs b/crates/orchestrator/src/tests/workers/snos/mod.rs index c4e95902..7799ffb2 100644 --- a/crates/orchestrator/src/tests/workers/snos/mod.rs +++ b/crates/orchestrator/src/tests/workers/snos/mod.rs @@ -1,7 +1,7 @@ use crate::config::config_force_init; -use crate::constants::JOB_PROCESSING_QUEUE; use crate::database::MockDatabase; use crate::jobs::types::JobType; +use crate::queue::job_queue::JOB_PROCESSING_QUEUE; use crate::queue::MockQueueProvider; use crate::tests::common::init_config; use crate::tests::workers::utils::get_job_item_mock_by_id; From fc7d4938bc477707fd38d47d083035da43efd157 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 26 Jul 2024 14:53:42 +0530 Subject: [PATCH 09/44] update : updated test cases for s3 client --- crates/orchestrator/src/tests/data_storage/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/orchestrator/src/tests/data_storage/mod.rs b/crates/orchestrator/src/tests/data_storage/mod.rs index f007a457..ef1dd0ec 100644 --- a/crates/orchestrator/src/tests/data_storage/mod.rs +++ b/crates/orchestrator/src/tests/data_storage/mod.rs @@ -1,6 +1,7 @@ use crate::data_storage::aws_s3::config::AWSS3Config; use crate::data_storage::aws_s3::AWSS3; use crate::data_storage::{DataStorage, DataStorageConfig}; +use crate::tests::common::build_config; use bytes::Bytes; use rstest::rstest; use serde_json::json; @@ -8,6 +9,7 @@ use serde_json::json; #[rstest] #[tokio::test] async fn test_put_and_get_data_s3() -> color_eyre::Result<()> { + build_config().await?; dotenvy::from_filename("../.env.test")?; let config = AWSS3Config::new_from_env(); From 9a101ba76614366d76f05baeb71b4b4f749fc193 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 26 Jul 2024 14:56:44 +0530 Subject: [PATCH 10/44] update : added .env.test file in the commit --- .env.test | 24 ++++++++++++++++++++++++ .env.test.example | 33 --------------------------------- .github/workflows/coverage.yml | 16 ---------------- .gitignore | 1 - 4 files changed, 24 insertions(+), 50 deletions(-) create mode 100644 .env.test delete mode 100644 .env.test.example diff --git a/.env.test b/.env.test new file mode 100644 index 00000000..75cfea32 --- /dev/null +++ b/.env.test @@ -0,0 +1,24 @@ +##### AWS config ##### + +AWS_ACCESS_KEY_ID="AWS_ACCESS_KEY_ID" +AWS_SECRET_ACCESS_KEY="AWS_SECRET_ACCESS_KEY" +AWS_S3_BUCKET_NAME="madara-orchestrator-test-bucket" +AWS_S3_BUCKET_REGION="us-east-1" +AWS_ENDPOINT_URL="http://localhost.localstack.cloud:4566" + +##### On chain config ##### + +MADARA_RPC_URL="http://localhost:3000" +ETHEREUM_RPC_URL="http://localhost:3001" +MEMORY_PAGES_CONTRACT_ADDRESS="0x000000000000000000000000000000000001dead" +PRIVATE_KEY="0xdead" +ETHEREUM_PRIVATE_KEY="0x000000000000000000000000000000000000000000000000000000000000beef" +STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS="0x000000000000000000000000000000000002dead" + +##### Config URLs ##### + +DA_LAYER="ethereum" +PROVER_SERVICE="sharp" +SETTLEMENT_LAYER="ethereum" +DATA_STORAGE="s3" +MONGODB_CONNECTION_STRING="mongodb://localhost:27017" diff --git a/.env.test.example b/.env.test.example deleted file mode 100644 index e55b97fc..00000000 --- a/.env.test.example +++ /dev/null @@ -1,33 +0,0 @@ -HOST= -PORT= -DATABASE_URL= -MADARA_RPC_URL= -DA_LAYER= -SETTLEMENT_LAYER= - -# Ethereum -ETHEREUM_PRIVATE_KEY= -ETHEREUM_RPC_URL= -MEMORY_PAGES_CONTRACT_ADDRESS= -STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS= - - -# Starknet -STARKNET_PUBLIC_KEY= -STARNET_PRIVATE_KEY= -STARKNET_RPC_URL= -STARKNET_CAIRO_CORE_CONTRACT_ADDRESS= - -# MongoDB connection string -MONGODB_CONNECTION_STRING= - -# SQS -AWS_ACCESS_KEY_ID= -AWS_SECRET_ACCESS_KEY= - -# S3 -AWS_S3_BUCKET_NAME= -AWS_S3_BUCKET_REGION= - -# AWS Local Stack URL -AWS_ENDPOINT_URL= diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index d5d10b8d..a55082e6 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -41,22 +41,6 @@ jobs: - name: Run llvm-cov run: | - echo 'AWS_ACCESS_KEY_ID="AWS_ACCESS_KEY_ID"' >> .env.test - echo 'AWS_SECRET_ACCESS_KEY="AWS_SECRET_ACCESS_KEY"' >> .env.test - echo 'AWS_S3_BUCKET_NAME="madara-orchestrator-test-bucket"' >> .env.test - echo 'AWS_S3_BUCKET_REGION="us-east-1"' >> .env.test - echo 'AWS_ENDPOINT_URL="http://localhost.localstack.cloud:4566"' >> .env.test - echo 'MADARA_RPC_URL="http://localhost:3000"' >> .env.test - echo 'ETHEREUM_RPC_URL="http://localhost:3001"' >> .env.test - echo 'MEMORY_PAGES_CONTRACT_ADDRESS="0x000000000000000000000000000000000001dead"' >> .env.test - echo 'PRIVATE_KEY="0xdead"' >> .env.test - echo 'ETHEREUM_PRIVATE_KEY="0x000000000000000000000000000000000000000000000000000000000000beef"' >> .env.test - echo 'STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS="0x000000000000000000000000000000000002dead"' >> .env.test - echo 'DA_LAYER="ethereum"' >> .env.test - echo 'PROVER_SERVICE="sharp"' >> .env.test - echo 'SETTLEMENT_LAYER="ethereum"' >> .env.test - echo 'DATA_STORAGE="s3"' >> .env.test - echo 'MONGODB_CONNECTION_STRING="mongodb://localhost:27017"' >> .env.test cargo llvm-cov nextest --release --lcov --output-path lcov.info --test-threads=1 - name: Upload coverage to codecov.io diff --git a/.gitignore b/.gitignore index 76cd1131..cc424bf3 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,6 @@ .env .idea .DS_Store -.env.test *.code-workspace .vscode From 906a1eb2efa7706d593d0d065ed42132cf406465 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 26 Jul 2024 15:57:21 +0530 Subject: [PATCH 11/44] feat : added database necessary tests --- crates/orchestrator/src/tests/database/mod.rs | 128 ++++++++++++++++++ 1 file changed, 128 insertions(+) diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index 5e8a651c..29ce7847 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -55,6 +55,134 @@ async fn test_database_create_job() -> color_eyre::Result<()> { Ok(()) } +/// Test for `get_jobs_without_successor` operation in database trait. +/// Creates jobs in the following sequence : +/// +/// - Creates 3 snos run jobs with completed status +/// +/// - Creates 2 proof creation jobs with succession of the 2 snos jobs +/// +/// - Should return one snos job without the successor job of proof creation +#[rstest] +#[tokio::test] +async fn test_database_get_jobs_without_successor() -> color_eyre::Result<()> { + let init_config = build_config().await.is_ok(); + if !init_config { + return Err(eyre!("Not able to init config.")); + } + + drop_database().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + + let job_vec = [ + build_job_item(JobType::SnosRun, JobStatus::Completed, 1), + build_job_item(JobType::SnosRun, JobStatus::Completed, 2), + build_job_item(JobType::SnosRun, JobStatus::Completed, 3), + build_job_item(JobType::ProofCreation, JobStatus::Created, 1), + build_job_item(JobType::ProofCreation, JobStatus::Created, 3), + ]; + + database_client.create_job(job_vec[0].clone()).await.unwrap(); + database_client.create_job(job_vec[1].clone()).await.unwrap(); + database_client.create_job(job_vec[2].clone()).await.unwrap(); + database_client.create_job(job_vec[3].clone()).await.unwrap(); + database_client.create_job(job_vec[4].clone()).await.unwrap(); + + let jobs_without_successor = database_client + .get_jobs_without_successor(JobType::SnosRun, JobStatus::Completed, JobType::ProofCreation) + .await + .unwrap(); + + assert_eq!(jobs_without_successor.len(), 1, "Expected number of jobs assertion failed."); + assert_eq!(jobs_without_successor[0], job_vec[1], "Expected job assertion failed."); + + Ok(()) +} + +/// Test for `get_last_successful_job_by_type` operation in database trait. +/// Creates the jobs in following sequence : +/// +/// - Creates 3 successful jobs. +/// +/// - Should return the last successful job +#[rstest] +#[tokio::test] +async fn test_database_get_last_successful_job_by_type() -> color_eyre::Result<()> { + let init_config = build_config().await.is_ok(); + if !init_config { + return Err(eyre!("Not able to init config.")); + } + + drop_database().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + + let job_vec = [ + build_job_item(JobType::SnosRun, JobStatus::Completed, 1), + build_job_item(JobType::SnosRun, JobStatus::Completed, 2), + build_job_item(JobType::SnosRun, JobStatus::Completed, 3), + ]; + + database_client.create_job(job_vec[0].clone()).await.unwrap(); + database_client.create_job(job_vec[1].clone()).await.unwrap(); + database_client.create_job(job_vec[2].clone()).await.unwrap(); + + let last_successful_job = database_client.get_last_successful_job_by_type(JobType::SnosRun).await.unwrap(); + + assert_eq!(last_successful_job.unwrap(), job_vec[2], "Expected job assertion failed"); + + Ok(()) +} + +/// Test for `get_jobs_after_internal_id_by_job_type` operation in database trait. +/// Creates the jobs in following sequence : +/// +/// - Creates 5 successful jobs. +/// +/// - Should return the jobs after internal id +#[rstest] +#[tokio::test] +async fn test_database_get_jobs_after_internal_id_by_job_type() -> color_eyre::Result<()> { + let init_config = build_config().await.is_ok(); + if !init_config { + return Err(eyre!("Not able to init config.")); + } + + drop_database().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + + let job_vec = [ + build_job_item(JobType::SnosRun, JobStatus::Completed, 1), + build_job_item(JobType::SnosRun, JobStatus::Completed, 2), + build_job_item(JobType::SnosRun, JobStatus::Completed, 3), + build_job_item(JobType::SnosRun, JobStatus::Completed, 4), + build_job_item(JobType::SnosRun, JobStatus::Completed, 5), + ]; + + database_client.create_job(job_vec[0].clone()).await.unwrap(); + database_client.create_job(job_vec[1].clone()).await.unwrap(); + database_client.create_job(job_vec[2].clone()).await.unwrap(); + database_client.create_job(job_vec[3].clone()).await.unwrap(); + database_client.create_job(job_vec[4].clone()).await.unwrap(); + + let jobs_after_internal_id = database_client + .get_jobs_after_internal_id_by_job_type(JobType::SnosRun, JobStatus::Completed, "2".to_string()) + .await + .unwrap(); + + assert_eq!(jobs_after_internal_id.len(), 3, "Number of jobs assertion failed"); + assert_eq!(jobs_after_internal_id[0], job_vec[2]); + assert_eq!(jobs_after_internal_id[1], job_vec[3]); + assert_eq!(jobs_after_internal_id[0], job_vec[4]); + + Ok(()) +} + // Test Util Functions // ========================================== From 65d66e6a621f05d2151a131f19bb0fc9725aaa0b Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 26 Jul 2024 15:58:14 +0530 Subject: [PATCH 12/44] feat : added database necessary tests --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 70c41f6d..9a22504e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - Tests for updating the state. - Function to update the state and publish blob on ethereum in state update job. - Fixtures for testing. +- Tests for database client. ## Changed From 20d82c3c470a9d7bc9574c93db86ef273d20967a Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 26 Jul 2024 16:07:02 +0530 Subject: [PATCH 13/44] Revert "feat : added database necessary tests" This reverts commit 65d66e6a621f05d2151a131f19bb0fc9725aaa0b. --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a22504e..70c41f6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - Tests for updating the state. - Function to update the state and publish blob on ethereum in state update job. - Fixtures for testing. -- Tests for database client. ## Changed From 63bc8460e9fa6544756429973e57d9c110e504e0 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 26 Jul 2024 16:10:05 +0530 Subject: [PATCH 14/44] Revert "feat : added database necessary tests" This reverts commit 906a1eb2efa7706d593d0d065ed42132cf406465. --- crates/orchestrator/src/tests/database/mod.rs | 128 ------------------ 1 file changed, 128 deletions(-) diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index 29ce7847..5e8a651c 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -55,134 +55,6 @@ async fn test_database_create_job() -> color_eyre::Result<()> { Ok(()) } -/// Test for `get_jobs_without_successor` operation in database trait. -/// Creates jobs in the following sequence : -/// -/// - Creates 3 snos run jobs with completed status -/// -/// - Creates 2 proof creation jobs with succession of the 2 snos jobs -/// -/// - Should return one snos job without the successor job of proof creation -#[rstest] -#[tokio::test] -async fn test_database_get_jobs_without_successor() -> color_eyre::Result<()> { - let init_config = build_config().await.is_ok(); - if !init_config { - return Err(eyre!("Not able to init config.")); - } - - drop_database().await.unwrap(); - - let config = config().await; - let database_client = config.database(); - - let job_vec = [ - build_job_item(JobType::SnosRun, JobStatus::Completed, 1), - build_job_item(JobType::SnosRun, JobStatus::Completed, 2), - build_job_item(JobType::SnosRun, JobStatus::Completed, 3), - build_job_item(JobType::ProofCreation, JobStatus::Created, 1), - build_job_item(JobType::ProofCreation, JobStatus::Created, 3), - ]; - - database_client.create_job(job_vec[0].clone()).await.unwrap(); - database_client.create_job(job_vec[1].clone()).await.unwrap(); - database_client.create_job(job_vec[2].clone()).await.unwrap(); - database_client.create_job(job_vec[3].clone()).await.unwrap(); - database_client.create_job(job_vec[4].clone()).await.unwrap(); - - let jobs_without_successor = database_client - .get_jobs_without_successor(JobType::SnosRun, JobStatus::Completed, JobType::ProofCreation) - .await - .unwrap(); - - assert_eq!(jobs_without_successor.len(), 1, "Expected number of jobs assertion failed."); - assert_eq!(jobs_without_successor[0], job_vec[1], "Expected job assertion failed."); - - Ok(()) -} - -/// Test for `get_last_successful_job_by_type` operation in database trait. -/// Creates the jobs in following sequence : -/// -/// - Creates 3 successful jobs. -/// -/// - Should return the last successful job -#[rstest] -#[tokio::test] -async fn test_database_get_last_successful_job_by_type() -> color_eyre::Result<()> { - let init_config = build_config().await.is_ok(); - if !init_config { - return Err(eyre!("Not able to init config.")); - } - - drop_database().await.unwrap(); - - let config = config().await; - let database_client = config.database(); - - let job_vec = [ - build_job_item(JobType::SnosRun, JobStatus::Completed, 1), - build_job_item(JobType::SnosRun, JobStatus::Completed, 2), - build_job_item(JobType::SnosRun, JobStatus::Completed, 3), - ]; - - database_client.create_job(job_vec[0].clone()).await.unwrap(); - database_client.create_job(job_vec[1].clone()).await.unwrap(); - database_client.create_job(job_vec[2].clone()).await.unwrap(); - - let last_successful_job = database_client.get_last_successful_job_by_type(JobType::SnosRun).await.unwrap(); - - assert_eq!(last_successful_job.unwrap(), job_vec[2], "Expected job assertion failed"); - - Ok(()) -} - -/// Test for `get_jobs_after_internal_id_by_job_type` operation in database trait. -/// Creates the jobs in following sequence : -/// -/// - Creates 5 successful jobs. -/// -/// - Should return the jobs after internal id -#[rstest] -#[tokio::test] -async fn test_database_get_jobs_after_internal_id_by_job_type() -> color_eyre::Result<()> { - let init_config = build_config().await.is_ok(); - if !init_config { - return Err(eyre!("Not able to init config.")); - } - - drop_database().await.unwrap(); - - let config = config().await; - let database_client = config.database(); - - let job_vec = [ - build_job_item(JobType::SnosRun, JobStatus::Completed, 1), - build_job_item(JobType::SnosRun, JobStatus::Completed, 2), - build_job_item(JobType::SnosRun, JobStatus::Completed, 3), - build_job_item(JobType::SnosRun, JobStatus::Completed, 4), - build_job_item(JobType::SnosRun, JobStatus::Completed, 5), - ]; - - database_client.create_job(job_vec[0].clone()).await.unwrap(); - database_client.create_job(job_vec[1].clone()).await.unwrap(); - database_client.create_job(job_vec[2].clone()).await.unwrap(); - database_client.create_job(job_vec[3].clone()).await.unwrap(); - database_client.create_job(job_vec[4].clone()).await.unwrap(); - - let jobs_after_internal_id = database_client - .get_jobs_after_internal_id_by_job_type(JobType::SnosRun, JobStatus::Completed, "2".to_string()) - .await - .unwrap(); - - assert_eq!(jobs_after_internal_id.len(), 3, "Number of jobs assertion failed"); - assert_eq!(jobs_after_internal_id[0], job_vec[2]); - assert_eq!(jobs_after_internal_id[1], job_vec[3]); - assert_eq!(jobs_after_internal_id[0], job_vec[4]); - - Ok(()) -} - // Test Util Functions // ========================================== From f36b483459c3e8bfa92f7c7cc110c36c5f5647c2 Mon Sep 17 00:00:00 2001 From: Heemank Verma Date: Fri, 26 Jul 2024 17:20:25 +0530 Subject: [PATCH 15/44] update: Replaced Build_Config Fixture with TestConfigBuilder --- crates/orchestrator/src/config.rs | 115 ++++++++++++++++++ crates/orchestrator/src/tests/common/mod.rs | 37 +----- .../src/tests/data_storage/mod.rs | 5 +- crates/orchestrator/src/tests/database/mod.rs | 16 +-- 4 files changed, 123 insertions(+), 50 deletions(-) diff --git a/crates/orchestrator/src/config.rs b/crates/orchestrator/src/config.rs index 55761526..828df754 100644 --- a/crates/orchestrator/src/config.rs +++ b/crates/orchestrator/src/config.rs @@ -183,3 +183,118 @@ pub async fn build_storage_client() -> Box { _ => panic!("Unsupported Storage Client"), } } + +#[cfg(test)] +use httpmock::MockServer; + +// Inspiration : https://rust-unofficial.github.io/patterns/patterns/creational/builder.html +// TestConfigBuilder allows to heavily customise the global configs based on the test's requirement. +// Eg: We want to mock only the da client and leave rest to be as it is, use mock_da_client. + +// TestBuilder for Config +#[cfg(test)] +pub struct TestConfigBuilder { + /// The starknet client to get data from the node + starknet_client: Option>>, + /// The DA client to interact with the DA layer + da_client: Option>, + /// The service that produces proof and registers it onchain + prover_client: Option>, + /// Settlement client + settlement_client: Option>, + /// The database client + database: Option>, + /// Queue client + queue: Option>, + /// Storage client + storage: Option>, +} + +#[cfg(test)] +impl Default for TestConfigBuilder { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +impl TestConfigBuilder { + /// Create a new config + pub fn new() -> TestConfigBuilder { + TestConfigBuilder { + starknet_client: None, + da_client: None, + prover_client: None, + settlement_client: None, + database: None, + queue: None, + storage: None, + } + } + + pub fn mock_da_client(mut self, da_client: Box) -> TestConfigBuilder { + self.da_client = Some(da_client); + self + } + + pub async fn build(mut self) -> MockServer { + dotenv().ok(); + + // init starknet client + if self.starknet_client.is_none() { + let provider = JsonRpcClient::new(HttpTransport::new( + Url::parse(get_env_var_or_panic("MADARA_RPC_URL").as_str()).expect("Failed to parse URL"), + )); + self.starknet_client = Some(Arc::new(provider)); + } + + // init database + if self.database.is_none() { + self.database = Some(Box::new(MongoDb::new(MongoDbConfig::new_from_env()).await)); + } + + // init queue + if self.queue.is_none() { + self.queue = Some(Box::new(SqsQueue {})); + } + + // init the DA client + if self.da_client.is_none() { + self.da_client = Some(build_da_client().await); + } + + let settings_provider = DefaultSettingsProvider {}; + + // init the Settings client + if self.settlement_client.is_none() { + self.settlement_client = Some(build_settlement_client(&settings_provider).await); + } + + // init the Prover client + if self.prover_client.is_none() { + self.prover_client = Some(build_prover_service(&settings_provider)); + } + + // init the storage client + if self.storage.is_none() { + self.storage = Some(build_storage_client().await); + } + + // return config and server as tuple + let config = Config::new( + self.starknet_client.unwrap(), + self.da_client.unwrap(), + self.prover_client.unwrap(), + self.settlement_client.unwrap(), + self.database.unwrap(), + self.queue.unwrap(), + self.storage.unwrap(), + ); + + config_force_init(config).await; + + let server = MockServer::connect(get_env_var_or_panic("MADARA_RPC_URL").as_str()); + + server + } +} diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 0a97fa33..96337c68 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -13,10 +13,8 @@ use settlement_client_interface::MockSettlementClient; use starknet::providers::jsonrpc::HttpTransport; use starknet::providers::JsonRpcClient; use url::Url; -use utils::env_utils::get_env_var_or_panic; -use utils::settings::default::DefaultSettingsProvider; -use crate::config::{build_storage_client, config_force_init, Config}; +use crate::config::Config; use crate::data_storage::MockDataStorage; use crate::database::mongodb::config::MongoDbConfig; use crate::database::mongodb::MongoDb; @@ -80,39 +78,6 @@ pub fn custom_job_item(default_job_item: JobItem, #[default(String::from("0"))] job_item } -/// For implementation of integration tests -#[fixture] -pub async fn build_config() -> color_eyre::Result<()> { - // Getting .env.test variables - dotenvy::from_filename("../.env.test")?; - - // init starknet client - let provider = JsonRpcClient::new(HttpTransport::new( - Url::parse(get_env_var_or_panic("MADARA_RPC_URL").as_str()).expect("Failed to parse URL"), - )); - - // init database - let database = Box::new(MongoDb::new(MongoDbConfig::new_from_env()).await); - - // init the queue - let queue = Box::new(crate::queue::sqs::SqsQueue {}); - - let da_client = crate::config::build_da_client().await; - let settings_provider = DefaultSettingsProvider {}; - let settlement_client = crate::config::build_settlement_client(&settings_provider).await; - let prover_client = crate::config::build_prover_service(&settings_provider); - let storage_client = build_storage_client().await; - - // building a test bucket : - storage_client.build_test_bucket(&get_env_var_or_panic("AWS_S3_BUCKET_NAME")).await?; - - let config = - Config::new(Arc::new(provider), da_client, prover_client, settlement_client, database, queue, storage_client); - config_force_init(config).await; - - Ok(()) -} - pub async fn drop_database() -> color_eyre::Result<()> { let db_client: Client = MongoDb::new(MongoDbConfig::new_from_env()).await.client(); // dropping `jobs` collection. diff --git a/crates/orchestrator/src/tests/data_storage/mod.rs b/crates/orchestrator/src/tests/data_storage/mod.rs index ef1dd0ec..1fdb73f3 100644 --- a/crates/orchestrator/src/tests/data_storage/mod.rs +++ b/crates/orchestrator/src/tests/data_storage/mod.rs @@ -1,7 +1,7 @@ +use crate::config::TestConfigBuilder; use crate::data_storage::aws_s3::config::AWSS3Config; use crate::data_storage::aws_s3::AWSS3; use crate::data_storage::{DataStorage, DataStorageConfig}; -use crate::tests::common::build_config; use bytes::Bytes; use rstest::rstest; use serde_json::json; @@ -9,7 +9,8 @@ use serde_json::json; #[rstest] #[tokio::test] async fn test_put_and_get_data_s3() -> color_eyre::Result<()> { - build_config().await?; + TestConfigBuilder::new().build().await; + dotenvy::from_filename("../.env.test")?; let config = AWSS3Config::new_from_env(); diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index 5e8a651c..9e9e3dea 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -1,18 +1,13 @@ -use crate::config::config; +use crate::config::{config, TestConfigBuilder}; use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; -use crate::tests::common::{build_config, drop_database}; -use color_eyre::eyre::eyre; +use crate::tests::common::drop_database; use rstest::*; use uuid::Uuid; #[rstest] #[tokio::test] async fn test_database_connection() -> color_eyre::Result<()> { - let init_config_error = build_config().await.is_err(); - if init_config_error { - return Err(eyre!("Not able to init config.")); - } - + TestConfigBuilder::new().build().await; Ok(()) } @@ -21,10 +16,7 @@ async fn test_database_connection() -> color_eyre::Result<()> { #[rstest] #[tokio::test] async fn test_database_create_job() -> color_eyre::Result<()> { - let init_config = build_config().await.is_ok(); - if !init_config { - return Err(eyre!("Not able to init config.")); - } + TestConfigBuilder::new().build().await; drop_database().await.unwrap(); From 8b7e68ffe98cc0c2f4120bfbd332fff82f7595a2 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 26 Jul 2024 18:01:42 +0530 Subject: [PATCH 16/44] update : config update --- crates/orchestrator/src/config.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/orchestrator/src/config.rs b/crates/orchestrator/src/config.rs index 828df754..3bdfdca2 100644 --- a/crates/orchestrator/src/config.rs +++ b/crates/orchestrator/src/config.rs @@ -238,7 +238,7 @@ impl TestConfigBuilder { } pub async fn build(mut self) -> MockServer { - dotenv().ok(); + dotenvy::from_filename("../.env.test").expect("Failed to load the .env file"); // init starknet client if self.starknet_client.is_none() { @@ -293,8 +293,6 @@ impl TestConfigBuilder { config_force_init(config).await; - let server = MockServer::connect(get_env_var_or_panic("MADARA_RPC_URL").as_str()); - - server + MockServer::start() } } From 74fea521244f50f74dff143fbad5cb21f79a73c0 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 26 Jul 2024 18:26:03 +0530 Subject: [PATCH 17/44] update : test_put_and_get_data_s3 test --- crates/orchestrator/src/tests/data_storage/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/orchestrator/src/tests/data_storage/mod.rs b/crates/orchestrator/src/tests/data_storage/mod.rs index 1fdb73f3..cf67a001 100644 --- a/crates/orchestrator/src/tests/data_storage/mod.rs +++ b/crates/orchestrator/src/tests/data_storage/mod.rs @@ -5,6 +5,7 @@ use crate::data_storage::{DataStorage, DataStorageConfig}; use bytes::Bytes; use rstest::rstest; use serde_json::json; +use utils::env_utils::get_env_var_or_panic; #[rstest] #[tokio::test] @@ -15,6 +16,7 @@ async fn test_put_and_get_data_s3() -> color_eyre::Result<()> { let config = AWSS3Config::new_from_env(); let s3_client = AWSS3::new(config).await; + s3_client.build_test_bucket(&get_env_var_or_panic("AWS_S3_BUCKET_NAME")).await.unwrap(); let mock_data = json!( { From 385974d3b19f85f1e36c139a187a7c017ef92662 Mon Sep 17 00:00:00 2001 From: Heemank Verma Date: Mon, 29 Jul 2024 17:24:41 +0530 Subject: [PATCH 18/44] update: moved testconfigbuilder to tests/config.rs & added docs , drop all collections not just jobs --- CHANGELOG.md | 1 + crates/orchestrator/src/config.rs | 113 -------------- .../orchestrator/src/database/mongodb/mod.rs | 3 + crates/orchestrator/src/tests/common/mod.rs | 6 +- crates/orchestrator/src/tests/config.rs | 139 ++++++++++++++++++ .../src/tests/data_storage/mod.rs | 6 +- crates/orchestrator/src/tests/database/mod.rs | 3 +- crates/orchestrator/src/tests/mod.rs | 1 + 8 files changed, 155 insertions(+), 117 deletions(-) create mode 100644 crates/orchestrator/src/tests/config.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 70c41f6d..ee8fec8f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). ## Changed - GitHub's coverage CI yml file for localstack and db testing. +- Orchestrator :Moved TestConfigBuilder to `config.rs` in tests folder. ## Removed diff --git a/crates/orchestrator/src/config.rs b/crates/orchestrator/src/config.rs index 3bdfdca2..55761526 100644 --- a/crates/orchestrator/src/config.rs +++ b/crates/orchestrator/src/config.rs @@ -183,116 +183,3 @@ pub async fn build_storage_client() -> Box { _ => panic!("Unsupported Storage Client"), } } - -#[cfg(test)] -use httpmock::MockServer; - -// Inspiration : https://rust-unofficial.github.io/patterns/patterns/creational/builder.html -// TestConfigBuilder allows to heavily customise the global configs based on the test's requirement. -// Eg: We want to mock only the da client and leave rest to be as it is, use mock_da_client. - -// TestBuilder for Config -#[cfg(test)] -pub struct TestConfigBuilder { - /// The starknet client to get data from the node - starknet_client: Option>>, - /// The DA client to interact with the DA layer - da_client: Option>, - /// The service that produces proof and registers it onchain - prover_client: Option>, - /// Settlement client - settlement_client: Option>, - /// The database client - database: Option>, - /// Queue client - queue: Option>, - /// Storage client - storage: Option>, -} - -#[cfg(test)] -impl Default for TestConfigBuilder { - fn default() -> Self { - Self::new() - } -} - -#[cfg(test)] -impl TestConfigBuilder { - /// Create a new config - pub fn new() -> TestConfigBuilder { - TestConfigBuilder { - starknet_client: None, - da_client: None, - prover_client: None, - settlement_client: None, - database: None, - queue: None, - storage: None, - } - } - - pub fn mock_da_client(mut self, da_client: Box) -> TestConfigBuilder { - self.da_client = Some(da_client); - self - } - - pub async fn build(mut self) -> MockServer { - dotenvy::from_filename("../.env.test").expect("Failed to load the .env file"); - - // init starknet client - if self.starknet_client.is_none() { - let provider = JsonRpcClient::new(HttpTransport::new( - Url::parse(get_env_var_or_panic("MADARA_RPC_URL").as_str()).expect("Failed to parse URL"), - )); - self.starknet_client = Some(Arc::new(provider)); - } - - // init database - if self.database.is_none() { - self.database = Some(Box::new(MongoDb::new(MongoDbConfig::new_from_env()).await)); - } - - // init queue - if self.queue.is_none() { - self.queue = Some(Box::new(SqsQueue {})); - } - - // init the DA client - if self.da_client.is_none() { - self.da_client = Some(build_da_client().await); - } - - let settings_provider = DefaultSettingsProvider {}; - - // init the Settings client - if self.settlement_client.is_none() { - self.settlement_client = Some(build_settlement_client(&settings_provider).await); - } - - // init the Prover client - if self.prover_client.is_none() { - self.prover_client = Some(build_prover_service(&settings_provider)); - } - - // init the storage client - if self.storage.is_none() { - self.storage = Some(build_storage_client().await); - } - - // return config and server as tuple - let config = Config::new( - self.starknet_client.unwrap(), - self.da_client.unwrap(), - self.prover_client.unwrap(), - self.settlement_client.unwrap(), - self.database.unwrap(), - self.queue.unwrap(), - self.storage.unwrap(), - ); - - config_force_init(config).await; - - MockServer::start() - } -} diff --git a/crates/orchestrator/src/database/mongodb/mod.rs b/crates/orchestrator/src/database/mongodb/mod.rs index aea5a41a..90b6ecc7 100644 --- a/crates/orchestrator/src/database/mongodb/mod.rs +++ b/crates/orchestrator/src/database/mongodb/mod.rs @@ -40,6 +40,9 @@ impl MongoDb { MongoDb { client } } + /// Mongodb client uses Arc internally, reducing the cost of clone. + /// Directly using clone is not recommended for libraries not using Arc internally. + /// Dev might want to pass an Arc manually. pub fn client(&self) -> Client { self.client.clone() } diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 96337c68..858635a9 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -80,7 +80,9 @@ pub fn custom_job_item(default_job_item: JobItem, #[default(String::from("0"))] pub async fn drop_database() -> color_eyre::Result<()> { let db_client: Client = MongoDb::new(MongoDbConfig::new_from_env()).await.client(); - // dropping `jobs` collection. - db_client.database("orchestrator").collection::("jobs").drop(None).await?; + // dropping all the collection. + // use .collection::("") + // if only particular collection is to be dropped + db_client.database("orchestrator").drop(None).await?; Ok(()) } diff --git a/crates/orchestrator/src/tests/config.rs b/crates/orchestrator/src/tests/config.rs new file mode 100644 index 00000000..5191bfc4 --- /dev/null +++ b/crates/orchestrator/src/tests/config.rs @@ -0,0 +1,139 @@ +use std::sync::Arc; + +use crate::config::{ + build_da_client, build_prover_service, build_settlement_client, build_storage_client, config_force_init, Config, +}; +use crate::data_storage::DataStorage; +use da_client_interface::DaClient; +use prover_client_interface::ProverClient; +use settlement_client_interface::SettlementClient; +use starknet::providers::jsonrpc::HttpTransport; +use starknet::providers::{JsonRpcClient, Url}; +use utils::env_utils::get_env_var_or_panic; +use utils::settings::default::DefaultSettingsProvider; + +use crate::database::mongodb::config::MongoDbConfig; +use crate::database::mongodb::MongoDb; +use crate::database::{Database, DatabaseConfig}; +use crate::queue::sqs::SqsQueue; +use crate::queue::QueueProvider; + +use httpmock::MockServer; +// Inspiration : https://rust-unofficial.github.io/patterns/patterns/creational/builder.html +// TestConfigBuilder allows to heavily customise the global configs based on the test's requirement. +// Eg: We want to mock only the da client and leave rest to be as it is, use mock_da_client. + +// TestBuilder for Config +pub struct TestConfigBuilder { + /// The starknet client to get data from the node + starknet_client: Option>>, + /// The DA client to interact with the DA layer + da_client: Option>, + /// The service that produces proof and registers it onchain + prover_client: Option>, + /// Settlement client + settlement_client: Option>, + /// The database client + database: Option>, + /// Queue client + queue: Option>, + /// Storage client + storage: Option>, +} + +impl Default for TestConfigBuilder { + fn default() -> Self { + Self::new() + } +} + +impl TestConfigBuilder { + /// Create a new config + pub fn new() -> TestConfigBuilder { + TestConfigBuilder { + starknet_client: None, + da_client: None, + prover_client: None, + settlement_client: None, + database: None, + queue: None, + storage: None, + } + } + + pub fn mock_da_client(mut self, da_client: Box) -> TestConfigBuilder { + self.da_client = Some(da_client); + self + } + + pub async fn build(mut self) -> MockServer { + dotenvy::from_filename("../.env.test").expect("Failed to load the .env file"); + + let server = MockServer::start(); + + // init starknet client + if self.starknet_client.is_none() { + let provider = JsonRpcClient::new(HttpTransport::new( + Url::parse(format!("http://localhost:{}", server.port()).as_str()).expect("Failed to parse URL"), + )); + self.starknet_client = Some(Arc::new(provider)); + } + + // init database + if self.database.is_none() { + self.database = Some(Box::new(MongoDb::new(MongoDbConfig::new_from_env()).await)); + } + + // init queue + if self.queue.is_none() { + self.queue = Some(Box::new(SqsQueue {})); + } + + // init the DA client + if self.da_client.is_none() { + self.da_client = Some(build_da_client().await); + } + + let settings_provider = DefaultSettingsProvider {}; + + // init the Settings client + if self.settlement_client.is_none() { + self.settlement_client = Some(build_settlement_client(&settings_provider).await); + } + + // init the Prover client + if self.prover_client.is_none() { + self.prover_client = Some(build_prover_service(&settings_provider)); + } + + // init the storage client + if self.storage.is_none() { + self.storage = Some(build_storage_client().await); + match get_env_var_or_panic("DATA_STORAGE").as_str() { + "s3" => self + .storage + .as_ref() + .unwrap() + .build_test_bucket(&get_env_var_or_panic("AWS_S3_BUCKET_NAME")) + .await + .unwrap(), + _ => panic!("Unsupported Storage Client"), + } + } + + // return config and server as tuple + let config = Config::new( + self.starknet_client.unwrap(), + self.da_client.unwrap(), + self.prover_client.unwrap(), + self.settlement_client.unwrap(), + self.database.unwrap(), + self.queue.unwrap(), + self.storage.unwrap(), + ); + + config_force_init(config).await; + + server + } +} diff --git a/crates/orchestrator/src/tests/data_storage/mod.rs b/crates/orchestrator/src/tests/data_storage/mod.rs index cf67a001..8f68d312 100644 --- a/crates/orchestrator/src/tests/data_storage/mod.rs +++ b/crates/orchestrator/src/tests/data_storage/mod.rs @@ -1,12 +1,16 @@ -use crate::config::TestConfigBuilder; use crate::data_storage::aws_s3::config::AWSS3Config; use crate::data_storage::aws_s3::AWSS3; use crate::data_storage::{DataStorage, DataStorageConfig}; +use crate::tests::config::TestConfigBuilder; use bytes::Bytes; use rstest::rstest; use serde_json::json; use utils::env_utils::get_env_var_or_panic; +/// This test checks the ability to put and get data from AWS S3 using `AWSS3`. +/// It puts JSON data into a test bucket and retrieves it, verifying the data +/// matches what was originally uploaded. +/// Dependencies: `color_eyre`, `dotenvy`, `rstest`, `tokio`, `serde_json`. #[rstest] #[tokio::test] async fn test_put_and_get_data_s3() -> color_eyre::Result<()> { diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index 9e9e3dea..f7cca727 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -1,6 +1,7 @@ -use crate::config::{config, TestConfigBuilder}; +use crate::config::config; use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; use crate::tests::common::drop_database; +use crate::tests::config::TestConfigBuilder; use rstest::*; use uuid::Uuid; diff --git a/crates/orchestrator/src/tests/mod.rs b/crates/orchestrator/src/tests/mod.rs index 83dfc04c..1dbc21a2 100644 --- a/crates/orchestrator/src/tests/mod.rs +++ b/crates/orchestrator/src/tests/mod.rs @@ -1,3 +1,4 @@ +pub mod config; pub mod database; pub mod jobs; From eb43d49394e5cc0c57e4bbf0cecb245d8d86978d Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Tue, 30 Jul 2024 20:41:54 +0530 Subject: [PATCH 19/44] feat : create job test case error returning --- crates/orchestrator/src/config.rs | 27 ++++++- crates/orchestrator/src/jobs/mod.rs | 14 +++- crates/orchestrator/src/tests/common/mod.rs | 2 + crates/orchestrator/src/tests/config.rs | 78 +++++++++++++++++++++ crates/orchestrator/src/tests/jobs/mod.rs | 60 ++++++++++++++++ 5 files changed, 176 insertions(+), 5 deletions(-) diff --git a/crates/orchestrator/src/config.rs b/crates/orchestrator/src/config.rs index 55761526..bed1c7b0 100644 --- a/crates/orchestrator/src/config.rs +++ b/crates/orchestrator/src/config.rs @@ -22,6 +22,7 @@ use utils::settings::SettingsProvider; use crate::database::mongodb::config::MongoDbConfig; use crate::database::mongodb::MongoDb; use crate::database::{Database, DatabaseConfig}; +use crate::jobs::MockJob; use crate::queue::sqs::SqsQueue; use crate::queue::QueueProvider; @@ -42,6 +43,9 @@ pub struct Config { queue: Box, /// Storage client storage: Box, + /// Job Handler (to be used during testing only.) + #[cfg(test)] + pub(crate) job_handler: Box, } /// Initializes the app config @@ -67,7 +71,7 @@ pub async fn init_config() -> Config { let storage_client = build_storage_client().await; - Config::new(Arc::new(provider), da_client, prover_client, settlement_client, database, queue, storage_client) + Config::new(Arc::new(provider), da_client, prover_client, settlement_client, database, queue, storage_client, Box::new(MockJob::new())) } impl Config { @@ -80,8 +84,21 @@ impl Config { database: Box, queue: Box, storage: Box, + // to be used in test environment only + #[allow(unused_variables)] + job_handler: Box ) -> Self { - Self { starknet_client, da_client, prover_client, settlement_client, database, queue, storage } + Self { + starknet_client, + da_client, + prover_client, + settlement_client, + database, + queue, + storage, + #[cfg(test)] + job_handler, + } } /// Returns the starknet client @@ -118,6 +135,12 @@ impl Config { pub fn storage(&self) -> &dyn DataStorage { self.storage.as_ref() } + + /// Returns the job handler (used in test environment) + #[cfg(test)] + pub fn job_handler(&self) -> &MockJob { + self.job_handler.as_ref() + } } /// The app config. It can be accessed from anywhere inside the service. diff --git a/crates/orchestrator/src/jobs/mod.rs b/crates/orchestrator/src/jobs/mod.rs index b501cade..1e07ca54 100644 --- a/crates/orchestrator/src/jobs/mod.rs +++ b/crates/orchestrator/src/jobs/mod.rs @@ -4,6 +4,7 @@ use std::time::Duration; use async_trait::async_trait; use color_eyre::eyre::eyre; use color_eyre::Result; +use mockall::automock; use tracing::log; use uuid::Uuid; @@ -18,11 +19,10 @@ pub mod proving_job; pub mod register_proof_job; pub mod snos_job; pub mod state_update_job; -pub mod types; - /// The Job trait is used to define the methods that a job /// should implement to be used as a job for the orchestrator. The orchestrator automatically /// handles queueing and processing of jobs as long as they implement the trait. +#[automock] #[async_trait] pub trait Job: Send + Sync { /// Should build a new job item and return it @@ -50,6 +50,8 @@ pub trait Job: Send + Sync { fn verification_polling_delay_seconds(&self) -> u64; } +pub mod types; + /// Creates the job in the DB in the created state and adds it to the process queue pub async fn create_job(job_type: JobType, internal_id: String, metadata: HashMap) -> Result<()> { let config = config().await; @@ -65,6 +67,7 @@ pub async fn create_job(job_type: JobType, internal_id: String, metadata: HashMa let job_handler = get_job_handler(&job_type); let job_item = job_handler.create_job(config.as_ref(), internal_id, metadata).await?; + println!(">>>> job_item : {:?}", job_item); config.database().create_job(job_item.clone()).await?; add_job_to_process_queue(job_item.id).await?; @@ -173,7 +176,12 @@ pub async fn verify_job(id: Uuid) -> Result<()> { Ok(()) } -fn get_job_handler(job_type: &JobType) -> Box { + +async fn get_job_handler(job_type: &JobType) -> Box<&dyn Job> { + #[cfg(test)] + return Box::new(config().await.job_handler.as_ref()); + + #[cfg(not(test))] match job_type { JobType::DataSubmission => Box::new(da_job::DaJob), JobType::SnosRun => Box::new(snos_job::SnosJob), diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 858635a9..9631b551 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -19,6 +19,7 @@ use crate::data_storage::MockDataStorage; use crate::database::mongodb::config::MongoDbConfig; use crate::database::mongodb::MongoDb; use crate::database::{DatabaseConfig, MockDatabase}; +use crate::jobs::MockJob; use crate::jobs::types::JobStatus::Created; use crate::jobs::types::JobType::DataSubmission; use crate::jobs::types::{ExternalId, JobItem}; @@ -54,6 +55,7 @@ pub async fn init_config( Box::new(database), Box::new(queue), Box::new(storage_client), + Box::new(MockJob::new()) ) } diff --git a/crates/orchestrator/src/tests/config.rs b/crates/orchestrator/src/tests/config.rs index 5191bfc4..38a6c4da 100644 --- a/crates/orchestrator/src/tests/config.rs +++ b/crates/orchestrator/src/tests/config.rs @@ -18,6 +18,7 @@ use crate::database::{Database, DatabaseConfig}; use crate::queue::sqs::SqsQueue; use crate::queue::QueueProvider; +use crate::jobs::MockJob; use httpmock::MockServer; // Inspiration : https://rust-unofficial.github.io/patterns/patterns/creational/builder.html // TestConfigBuilder allows to heavily customise the global configs based on the test's requirement. @@ -39,6 +40,9 @@ pub struct TestConfigBuilder { queue: Option>, /// Storage client storage: Option>, + /// Job Handler + #[allow(dead_code)] + job_handler: Option, } impl Default for TestConfigBuilder { @@ -58,6 +62,7 @@ impl TestConfigBuilder { database: None, queue: None, storage: None, + job_handler: None, } } @@ -66,6 +71,78 @@ impl TestConfigBuilder { self } + pub async fn build_with_mock_job(mut self, mock_job: MockJob) -> MockServer { + dotenvy::from_filename("../.env.test").expect("Failed to load the .env file"); + + let server = MockServer::start(); + + // init starknet client + if self.starknet_client.is_none() { + let provider = JsonRpcClient::new(HttpTransport::new( + Url::parse(format!("http://localhost:{}", server.port()).as_str()).expect("Failed to parse URL"), + )); + self.starknet_client = Some(Arc::new(provider)); + } + + // init database + if self.database.is_none() { + self.database = Some(Box::new(MongoDb::new(MongoDbConfig::new_from_env()).await)); + } + + // init queue + if self.queue.is_none() { + self.queue = Some(Box::new(SqsQueue {})); + } + + // init the DA client + if self.da_client.is_none() { + self.da_client = Some(build_da_client().await); + } + + let settings_provider = DefaultSettingsProvider {}; + + // init the Settings client + if self.settlement_client.is_none() { + self.settlement_client = Some(build_settlement_client(&settings_provider).await); + } + + // init the Prover client + if self.prover_client.is_none() { + self.prover_client = Some(build_prover_service(&settings_provider)); + } + + // init the storage client + if self.storage.is_none() { + self.storage = Some(build_storage_client().await); + match get_env_var_or_panic("DATA_STORAGE").as_str() { + "s3" => self + .storage + .as_ref() + .unwrap() + .build_test_bucket(&get_env_var_or_panic("AWS_S3_BUCKET_NAME")) + .await + .unwrap(), + _ => panic!("Unsupported Storage Client"), + } + } + + // return config and server as tuple + let config = Config::new( + self.starknet_client.unwrap(), + self.da_client.unwrap(), + self.prover_client.unwrap(), + self.settlement_client.unwrap(), + self.database.unwrap(), + self.queue.unwrap(), + self.storage.unwrap(), + Box::new(mock_job) + ); + + config_force_init(config).await; + + server + } + pub async fn build(mut self) -> MockServer { dotenvy::from_filename("../.env.test").expect("Failed to load the .env file"); @@ -130,6 +207,7 @@ impl TestConfigBuilder { self.database.unwrap(), self.queue.unwrap(), self.storage.unwrap(), + Box::new(MockJob::new()) ); config_force_init(config).await; diff --git a/crates/orchestrator/src/tests/jobs/mod.rs b/crates/orchestrator/src/tests/jobs/mod.rs index 7a707131..a19bbce7 100644 --- a/crates/orchestrator/src/tests/jobs/mod.rs +++ b/crates/orchestrator/src/tests/jobs/mod.rs @@ -20,3 +20,63 @@ async fn create_job_fails_job_already_exists() { async fn create_job_fails_works_new_job() { // TODO } + +#[cfg(test)] +mod job_handler_tests { + use crate::config::config; + use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; + use crate::jobs::{create_job, MockJob}; + use crate::tests::common::drop_database; + use crate::tests::config::TestConfigBuilder; + use rstest::rstest; + use std::collections::HashMap; + use uuid::Uuid; + + #[rstest] + #[case(JobType::SnosRun, false, true)] + #[case(JobType::ProofCreation, true, true)] + // #[case(JobType::ProofRegistration, false, false)] + #[tokio::test] + async fn test_create_job_handler( + #[case] job_type: JobType, + #[case] job_exists_in_db: bool, + #[case] job_implemented: bool, + ) { + // MODIFY : + // If queue needs to be spun up. + // We need to implement it in localstack. + + let job_item = JobItem { + id: Uuid::new_v4(), + internal_id: "0".to_string(), + job_type: job_type.clone(), + status: JobStatus::Created, + external_id: ExternalId::Number(0), + metadata: Default::default(), + version: 0, + }; + + // Expecting for create_job handler for that particular job. + let mut job_handler = MockJob::new(); + + // if job_implemented && !job_exists_in_db { + // job_handler.expect_create_job().times(1); + // } + + TestConfigBuilder::new().build_with_mock_job(job_handler).await; + drop_database().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + + if job_exists_in_db { + database_client.create_job(job_item).await.unwrap(); + } + + if job_implemented && !job_exists_in_db { + let _ = create_job(job_type, "0".to_string(), HashMap::new()).await.is_ok(); + } else { + let _ = create_job(job_type, "0".to_string(), HashMap::new()).await.is_err(); + } + } +} From a79b63373f6f6bcdb178974525d73801031e9cfa Mon Sep 17 00:00:00 2001 From: apoorvsadana <95699312+apoorvsadana@users.noreply.github.com> Date: Tue, 30 Jul 2024 21:36:19 +0530 Subject: [PATCH 20/44] mock job handler --- crates/orchestrator/src/config.rs | 23 +++--- crates/orchestrator/src/jobs/mod.rs | 44 ++++++++--- crates/orchestrator/src/tests/common/mod.rs | 4 +- crates/orchestrator/src/tests/config.rs | 82 +++------------------ crates/orchestrator/src/tests/jobs/mod.rs | 2 +- 5 files changed, 60 insertions(+), 95 deletions(-) diff --git a/crates/orchestrator/src/config.rs b/crates/orchestrator/src/config.rs index bed1c7b0..0691527d 100644 --- a/crates/orchestrator/src/config.rs +++ b/crates/orchestrator/src/config.rs @@ -45,7 +45,7 @@ pub struct Config { storage: Box, /// Job Handler (to be used during testing only.) #[cfg(test)] - pub(crate) job_handler: Box, + pub job_handler: MockJob, } /// Initializes the app config @@ -71,7 +71,17 @@ pub async fn init_config() -> Config { let storage_client = build_storage_client().await; - Config::new(Arc::new(provider), da_client, prover_client, settlement_client, database, queue, storage_client, Box::new(MockJob::new())) + Config::new( + Arc::new(provider), + da_client, + prover_client, + settlement_client, + database, + queue, + storage_client, + #[cfg(test)] + MockJob::new(), + ) } impl Config { @@ -85,8 +95,7 @@ impl Config { queue: Box, storage: Box, // to be used in test environment only - #[allow(unused_variables)] - job_handler: Box + #[cfg(test)] job_handler: MockJob, ) -> Self { Self { starknet_client, @@ -135,12 +144,6 @@ impl Config { pub fn storage(&self) -> &dyn DataStorage { self.storage.as_ref() } - - /// Returns the job handler (used in test environment) - #[cfg(test)] - pub fn job_handler(&self) -> &MockJob { - self.job_handler.as_ref() - } } /// The app config. It can be accessed from anywhere inside the service. diff --git a/crates/orchestrator/src/jobs/mod.rs b/crates/orchestrator/src/jobs/mod.rs index 1e07ca54..3b48176e 100644 --- a/crates/orchestrator/src/jobs/mod.rs +++ b/crates/orchestrator/src/jobs/mod.rs @@ -4,7 +4,7 @@ use std::time::Duration; use async_trait::async_trait; use color_eyre::eyre::eyre; use color_eyre::Result; -use mockall::automock; +use mockall::{automock, mock}; use tracing::log; use uuid::Uuid; @@ -22,7 +22,6 @@ pub mod state_update_job; /// The Job trait is used to define the methods that a job /// should implement to be used as a job for the orchestrator. The orchestrator automatically /// handles queueing and processing of jobs as long as they implement the trait. -#[automock] #[async_trait] pub trait Job: Send + Sync { /// Should build a new job item and return it @@ -50,6 +49,28 @@ pub trait Job: Send + Sync { fn verification_polling_delay_seconds(&self) -> u64; } +mock! { + pub Job {} + impl Clone for Job { + fn clone(&self) -> Self; + } + + #[async_trait] + impl Job for Job { + async fn create_job( + &self, + config: &Config, + internal_id: String, + metadata: HashMap, + ) -> Result; + async fn process_job(&self, config: &Config, job: &mut JobItem) -> Result; + async fn verify_job(&self, config: &Config, job: &mut JobItem) -> Result; + fn max_process_attempts(&self) -> u64; + fn max_verification_attempts(&self) -> u64; + fn verification_polling_delay_seconds(&self) -> u64; + } +} + pub mod types; /// Creates the job in the DB in the created state and adds it to the process queue @@ -65,7 +86,7 @@ pub async fn create_job(job_type: JobType, internal_id: String, metadata: HashMa )); } - let job_handler = get_job_handler(&job_type); + let job_handler = get_job_handler(&job_type).await; let job_item = job_handler.create_job(config.as_ref(), internal_id, metadata).await?; println!(">>>> job_item : {:?}", job_item); config.database().create_job(job_item.clone()).await?; @@ -96,7 +117,7 @@ pub async fn process_job(id: Uuid) -> Result<()> { // outdated config.database().update_job_status(&job, JobStatus::LockedForProcessing).await?; - let job_handler = get_job_handler(&job.job_type); + let job_handler = get_job_handler(&job.job_type).await; let external_id = job_handler.process_job(config.as_ref(), &mut job).await?; let metadata = increment_key_in_metadata(&job.metadata, JOB_PROCESS_ATTEMPT_METADATA_KEY)?; @@ -130,7 +151,7 @@ pub async fn verify_job(id: Uuid) -> Result<()> { } } - let job_handler = get_job_handler(&job.job_type); + let job_handler = get_job_handler(&job.job_type).await; let verification_status = job_handler.verify_job(config.as_ref(), &mut job).await?; match verification_status { @@ -176,12 +197,8 @@ pub async fn verify_job(id: Uuid) -> Result<()> { Ok(()) } - -async fn get_job_handler(job_type: &JobType) -> Box<&dyn Job> { - #[cfg(test)] - return Box::new(config().await.job_handler.as_ref()); - - #[cfg(not(test))] +#[cfg(not(test))] +async fn get_job_handler(job_type: &JobType) -> Box { match job_type { JobType::DataSubmission => Box::new(da_job::DaJob), JobType::SnosRun => Box::new(snos_job::SnosJob), @@ -191,6 +208,11 @@ async fn get_job_handler(job_type: &JobType) -> Box<&dyn Job> { } } +#[cfg(test)] +async fn get_job_handler<'a>(_job_type: &JobType) -> Box { + return Box::new(config().await.job_handler.clone()); +} + async fn get_job(id: Uuid) -> Result { let config = config().await; let job = config.database().get_job_by_id(id).await?; diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 9631b551..0526e72a 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -19,10 +19,10 @@ use crate::data_storage::MockDataStorage; use crate::database::mongodb::config::MongoDbConfig; use crate::database::mongodb::MongoDb; use crate::database::{DatabaseConfig, MockDatabase}; -use crate::jobs::MockJob; use crate::jobs::types::JobStatus::Created; use crate::jobs::types::JobType::DataSubmission; use crate::jobs::types::{ExternalId, JobItem}; +use crate::jobs::MockJob; use crate::queue::MockQueueProvider; pub async fn init_config( @@ -55,7 +55,7 @@ pub async fn init_config( Box::new(database), Box::new(queue), Box::new(storage_client), - Box::new(MockJob::new()) + MockJob::new(), ) } diff --git a/crates/orchestrator/src/tests/config.rs b/crates/orchestrator/src/tests/config.rs index 38a6c4da..ef04fde7 100644 --- a/crates/orchestrator/src/tests/config.rs +++ b/crates/orchestrator/src/tests/config.rs @@ -71,76 +71,9 @@ impl TestConfigBuilder { self } - pub async fn build_with_mock_job(mut self, mock_job: MockJob) -> MockServer { - dotenvy::from_filename("../.env.test").expect("Failed to load the .env file"); - - let server = MockServer::start(); - - // init starknet client - if self.starknet_client.is_none() { - let provider = JsonRpcClient::new(HttpTransport::new( - Url::parse(format!("http://localhost:{}", server.port()).as_str()).expect("Failed to parse URL"), - )); - self.starknet_client = Some(Arc::new(provider)); - } - - // init database - if self.database.is_none() { - self.database = Some(Box::new(MongoDb::new(MongoDbConfig::new_from_env()).await)); - } - - // init queue - if self.queue.is_none() { - self.queue = Some(Box::new(SqsQueue {})); - } - - // init the DA client - if self.da_client.is_none() { - self.da_client = Some(build_da_client().await); - } - - let settings_provider = DefaultSettingsProvider {}; - - // init the Settings client - if self.settlement_client.is_none() { - self.settlement_client = Some(build_settlement_client(&settings_provider).await); - } - - // init the Prover client - if self.prover_client.is_none() { - self.prover_client = Some(build_prover_service(&settings_provider)); - } - - // init the storage client - if self.storage.is_none() { - self.storage = Some(build_storage_client().await); - match get_env_var_or_panic("DATA_STORAGE").as_str() { - "s3" => self - .storage - .as_ref() - .unwrap() - .build_test_bucket(&get_env_var_or_panic("AWS_S3_BUCKET_NAME")) - .await - .unwrap(), - _ => panic!("Unsupported Storage Client"), - } - } - - // return config and server as tuple - let config = Config::new( - self.starknet_client.unwrap(), - self.da_client.unwrap(), - self.prover_client.unwrap(), - self.settlement_client.unwrap(), - self.database.unwrap(), - self.queue.unwrap(), - self.storage.unwrap(), - Box::new(mock_job) - ); - - config_force_init(config).await; - - server + pub async fn mock_job_handler(mut self, mock_job: MockJob) -> TestConfigBuilder { + self.job_handler = Some(mock_job); + self } pub async fn build(mut self) -> MockServer { @@ -183,6 +116,13 @@ impl TestConfigBuilder { self.prover_client = Some(build_prover_service(&settings_provider)); } + // init job handler + if self.job_handler.is_none() { + self.job_handler = Some(MockJob::new()); + } + let mut temp_job_handler = MockJob::new(); + temp_job_handler.expect_clone().return_const(self.job_handler.unwrap()); + // init the storage client if self.storage.is_none() { self.storage = Some(build_storage_client().await); @@ -207,7 +147,7 @@ impl TestConfigBuilder { self.database.unwrap(), self.queue.unwrap(), self.storage.unwrap(), - Box::new(MockJob::new()) + MockJob::new(), ); config_force_init(config).await; diff --git a/crates/orchestrator/src/tests/jobs/mod.rs b/crates/orchestrator/src/tests/jobs/mod.rs index a19bbce7..df9e3117 100644 --- a/crates/orchestrator/src/tests/jobs/mod.rs +++ b/crates/orchestrator/src/tests/jobs/mod.rs @@ -63,7 +63,7 @@ mod job_handler_tests { // job_handler.expect_create_job().times(1); // } - TestConfigBuilder::new().build_with_mock_job(job_handler).await; + TestConfigBuilder::new().mock_job_handler(job_handler).await; drop_database().await.unwrap(); let config = config().await; From e026d8d62f36de194a31e22ac45557bf1f4310e4 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Thu, 1 Aug 2024 04:55:29 +0530 Subject: [PATCH 21/44] feat : added jobs test and modified worker tests --- CHANGELOG.md | 1 + Cargo.lock | 55 +++++- Cargo.toml | 2 +- crates/da-clients/ethereum/src/config.rs | 4 +- crates/da-clients/ethereum/src/lib.rs | 4 +- crates/orchestrator/Cargo.toml | 3 +- crates/orchestrator/src/config.rs | 36 +--- .../src/data_storage/aws_s3/mod.rs | 5 +- crates/orchestrator/src/data_storage/types.rs | 3 +- crates/orchestrator/src/database/mod.rs | 2 +- .../orchestrator/src/database/mongodb/mod.rs | 13 +- crates/orchestrator/src/jobs/da_job/mod.rs | 19 +- .../src/jobs/job_handler_factory.rs | 65 +++++++ crates/orchestrator/src/jobs/mod.rs | 53 ++---- .../src/jobs/state_update_job/mod.rs | 17 +- .../src/jobs/state_update_job/utils.rs | 3 +- crates/orchestrator/src/tests/common/mod.rs | 2 - crates/orchestrator/src/tests/config.rs | 29 +-- .../src/tests/data_storage/mod.rs | 9 +- crates/orchestrator/src/tests/database/mod.rs | 5 +- .../orchestrator/src/tests/jobs/da_job/mod.rs | 4 +- crates/orchestrator/src/tests/jobs/mod.rs | 165 ++++++++++++++++-- .../src/tests/jobs/proving_job/mod.rs | 2 +- .../src/tests/jobs/state_update_job/mod.rs | 31 ++-- .../src/tests/workers/proving/mod.rs | 45 +++-- .../src/tests/workers/snos/mod.rs | 34 +++- .../src/tests/workers/update_state/mod.rs | 31 +++- .../src/tests/workers/utils/mod.rs | 52 ++++-- crates/orchestrator/src/workers/proving.rs | 6 +- .../orchestrator/src/workers/update_state.rs | 4 +- .../gps-fact-checker/src/fact_node.rs | 4 +- .../clients/interfaces/validity_interface.rs | 16 +- .../ethereum/src/clients/validity.rs | 4 +- .../ethereum/src/conversion.rs | 4 +- crates/settlement-clients/ethereum/src/lib.rs | 40 ++--- .../settlement-clients/ethereum/src/types.rs | 12 +- crates/settlement-clients/starknet/src/lib.rs | 28 ++- rust-toolchain.toml | 2 +- 38 files changed, 518 insertions(+), 296 deletions(-) create mode 100644 crates/orchestrator/src/jobs/job_handler_factory.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 40fd1c8d..50c5285b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - Function to calculate the kzg proof of x_0. - Tests for updating the state. - Function to update the state and publish blob on ethereum in state update job. +- Tests for job handlers in orchestrator/src/jobs/mod.rs. ## Changed diff --git a/Cargo.lock b/Cargo.lock index 57c82755..76b771e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3489,7 +3489,7 @@ dependencies = [ "async-trait", "axum 0.7.5", "color-eyre", - "mockall", + "mockall 0.12.1", "starknet", ] @@ -4064,7 +4064,7 @@ dependencies = [ "color-eyre", "da-client-interface", "dotenv", - "mockall", + "mockall 0.12.1", "reqwest 0.12.5", "rstest 0.18.2", "serde", @@ -4084,7 +4084,7 @@ dependencies = [ "c-kzg", "color-eyre", "dotenv", - "mockall", + "mockall 0.12.1", "reqwest 0.12.5", "rstest 0.18.2", "serde", @@ -5881,7 +5881,21 @@ dependencies = [ "downcast", "fragile", "lazy_static", - "mockall_derive", + "mockall_derive 0.12.1", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "mockall_derive 0.13.0", "predicates", "predicates-tree", ] @@ -5898,6 +5912,30 @@ dependencies = [ "syn 2.0.66", ] +[[package]] +name = "mockall_derive" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 2.0.66", +] + +[[package]] +name = "mockall_double" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1ca96e5ac35256ae3e13536edd39b172b88f41615e1d7b653c8ad24524113e8" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 2.0.66", +] + [[package]] name = "mongodb" version = "2.8.2" @@ -6301,7 +6339,8 @@ dependencies = [ "log", "majin-blob-core", "majin-blob-types", - "mockall", + "mockall 0.13.0", + "mockall_double", "mongodb", "num", "num-bigint", @@ -7013,7 +7052,7 @@ dependencies = [ "async-trait", "cairo-vm 1.0.0-rc3", "gps-fact-checker", - "mockall", + "mockall 0.12.1", "snos", "thiserror", "utils", @@ -8165,7 +8204,7 @@ dependencies = [ "axum 0.7.5", "c-kzg", "color-eyre", - "mockall", + "mockall 0.12.1", "starknet", ] @@ -8690,7 +8729,7 @@ dependencies = [ "color-eyre", "dotenv", "lazy_static", - "mockall", + "mockall 0.12.1", "reqwest 0.12.5", "rstest 0.18.2", "serde", diff --git a/Cargo.toml b/Cargo.toml index b715e867..594e904d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,4 +87,4 @@ utils = { path = "crates/utils" } prover-client-interface = { path = "crates/prover-services/prover-client-interface" } gps-fact-checker = { path = "crates/prover-services/gps-fact-checker" } sharp-service = { path = "crates/prover-services/sharp-service" } -orchestrator = { path = "crates/orchestrator" } +orchestrator = { path = "crates/orchestrator" } \ No newline at end of file diff --git a/crates/da-clients/ethereum/src/config.rs b/crates/da-clients/ethereum/src/config.rs index b50604b2..34370036 100644 --- a/crates/da-clients/ethereum/src/config.rs +++ b/crates/da-clients/ethereum/src/config.rs @@ -1,6 +1,8 @@ use std::str::FromStr; -use alloy::{network::Ethereum, providers::ProviderBuilder, rpc::client::RpcClient}; +use alloy::network::Ethereum; +use alloy::providers::ProviderBuilder; +use alloy::rpc::client::RpcClient; use async_trait::async_trait; use da_client_interface::DaConfig; use url::Url; diff --git a/crates/da-clients/ethereum/src/lib.rs b/crates/da-clients/ethereum/src/lib.rs index 68c933bd..e48f5ca7 100644 --- a/crates/da-clients/ethereum/src/lib.rs +++ b/crates/da-clients/ethereum/src/lib.rs @@ -20,8 +20,8 @@ pub struct EthereumDaClient { #[async_trait] impl DaClient for EthereumDaClient { async fn publish_state_diff(&self, _state_diff: Vec>, _to: &[u8; 32]) -> Result { - // Here in case of ethereum we are not publishing the state diff because we are doing it all together in update_state job. - // So we don't need to send the blob here. + // Here in case of ethereum we are not publishing the state diff because we are doing it all + // together in update_state job. So we don't need to send the blob here. Ok("NA".to_string()) } diff --git a/crates/orchestrator/Cargo.toml b/crates/orchestrator/Cargo.toml index 6ca4c43f..cf4a4ba1 100644 --- a/crates/orchestrator/Cargo.toml +++ b/crates/orchestrator/Cargo.toml @@ -35,7 +35,8 @@ lazy_static = { workspace = true } log = "0.4.21" majin-blob-core = { git = "https://github.com/AbdelStark/majin-blob", branch = "main" } majin-blob-types = { git = "https://github.com/AbdelStark/majin-blob", branch = "main" } -mockall = "0.12.1" +mockall = { version = "0.13.0", features = ["nightly"] } +mockall_double = "0.3.1" mongodb = { workspace = true, features = ["bson-uuid-1"], optional = true } num = { workspace = true } num-bigint = { workspace = true } diff --git a/crates/orchestrator/src/config.rs b/crates/orchestrator/src/config.rs index 0691527d..76335c19 100644 --- a/crates/orchestrator/src/config.rs +++ b/crates/orchestrator/src/config.rs @@ -1,8 +1,5 @@ use std::sync::Arc; -use crate::data_storage::aws_s3::config::AWSS3Config; -use crate::data_storage::aws_s3::AWSS3; -use crate::data_storage::{DataStorage, DataStorageConfig}; use arc_swap::{ArcSwap, Guard}; use da_client_interface::{DaClient, DaConfig}; use dotenvy::dotenv; @@ -19,10 +16,12 @@ use utils::env_utils::get_env_var_or_panic; use utils::settings::default::DefaultSettingsProvider; use utils::settings::SettingsProvider; +use crate::data_storage::aws_s3::config::AWSS3Config; +use crate::data_storage::aws_s3::AWSS3; +use crate::data_storage::{DataStorage, DataStorageConfig}; use crate::database::mongodb::config::MongoDbConfig; use crate::database::mongodb::MongoDb; use crate::database::{Database, DatabaseConfig}; -use crate::jobs::MockJob; use crate::queue::sqs::SqsQueue; use crate::queue::QueueProvider; @@ -43,9 +42,6 @@ pub struct Config { queue: Box, /// Storage client storage: Box, - /// Job Handler (to be used during testing only.) - #[cfg(test)] - pub job_handler: MockJob, } /// Initializes the app config @@ -71,17 +67,7 @@ pub async fn init_config() -> Config { let storage_client = build_storage_client().await; - Config::new( - Arc::new(provider), - da_client, - prover_client, - settlement_client, - database, - queue, - storage_client, - #[cfg(test)] - MockJob::new(), - ) + Config::new(Arc::new(provider), da_client, prover_client, settlement_client, database, queue, storage_client) } impl Config { @@ -94,20 +80,8 @@ impl Config { database: Box, queue: Box, storage: Box, - // to be used in test environment only - #[cfg(test)] job_handler: MockJob, ) -> Self { - Self { - starknet_client, - da_client, - prover_client, - settlement_client, - database, - queue, - storage, - #[cfg(test)] - job_handler, - } + Self { starknet_client, da_client, prover_client, settlement_client, database, queue, storage } } /// Returns the starknet client diff --git a/crates/orchestrator/src/data_storage/aws_s3/mod.rs b/crates/orchestrator/src/data_storage/aws_s3/mod.rs index 50ae6e3e..386c4210 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/mod.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/mod.rs @@ -1,5 +1,3 @@ -use crate::data_storage::aws_s3::config::AWSS3Config; -use crate::data_storage::DataStorage; use async_trait::async_trait; use aws_sdk_s3::config::{Builder, Credentials, Region}; use aws_sdk_s3::primitives::ByteStream; @@ -7,6 +5,9 @@ use aws_sdk_s3::Client; use bytes::Bytes; use color_eyre::Result; +use crate::data_storage::aws_s3::config::AWSS3Config; +use crate::data_storage::DataStorage; + /// Module for AWS S3 config structs and implementations pub mod config; diff --git a/crates/orchestrator/src/data_storage/types.rs b/crates/orchestrator/src/data_storage/types.rs index 0fc1a6fb..31270558 100644 --- a/crates/orchestrator/src/data_storage/types.rs +++ b/crates/orchestrator/src/data_storage/types.rs @@ -1,6 +1,7 @@ +use std::collections::HashMap; + use cairo_vm::Felt252; use serde::{Deserialize, Serialize}; -use std::collections::HashMap; /// This struct represents the contract changes that will be in `StarknetOsOutput` /// as a vector. diff --git a/crates/orchestrator/src/database/mod.rs b/crates/orchestrator/src/database/mod.rs index 387457d5..aacfc49f 100644 --- a/crates/orchestrator/src/database/mod.rs +++ b/crates/orchestrator/src/database/mod.rs @@ -1,6 +1,6 @@ -use ::mongodb::bson::doc; use std::collections::HashMap; +use ::mongodb::bson::doc; use async_trait::async_trait; use color_eyre::Result; use mockall::automock; diff --git a/crates/orchestrator/src/database/mongodb/mod.rs b/crates/orchestrator/src/database/mongodb/mod.rs index 90b6ecc7..adc47f02 100644 --- a/crates/orchestrator/src/database/mongodb/mod.rs +++ b/crates/orchestrator/src/database/mongodb/mod.rs @@ -1,17 +1,12 @@ -use async_std::stream::StreamExt; use std::collections::HashMap; +use async_std::stream::StreamExt; use async_trait::async_trait; use color_eyre::eyre::eyre; use color_eyre::Result; -use mongodb::bson::{Bson, Document}; -use mongodb::options::{FindOneOptions, UpdateOptions}; -use mongodb::{ - bson, - bson::doc, - options::{ClientOptions, ServerApi, ServerApiVersion}, - Client, Collection, -}; +use mongodb::bson::{doc, Bson, Document}; +use mongodb::options::{ClientOptions, FindOneOptions, ServerApi, ServerApiVersion, UpdateOptions}; +use mongodb::{bson, Client, Collection}; use uuid::Uuid; use crate::database::mongodb::config::MongoDbConfig; diff --git a/crates/orchestrator/src/jobs/da_job/mod.rs b/crates/orchestrator/src/jobs/da_job/mod.rs index 4b21b9c4..f6c0452c 100644 --- a/crates/orchestrator/src/jobs/da_job/mod.rs +++ b/crates/orchestrator/src/jobs/da_job/mod.rs @@ -319,7 +319,12 @@ fn da_word(class_flag: bool, nonce_change: Option, num_changes: u6 // checking for nonce here if let Some(_new_nonce) = nonce_change { - let bytes: [u8; 32] = nonce_change.expect("Not able to convert the nonce_change var into [u8; 32] type. Possible Error : Improper parameter length.").to_bytes_be(); + let bytes: [u8; 32] = nonce_change + .expect( + "Not able to convert the nonce_change var into [u8; 32] type. Possible Error : Improper parameter \ + length.", + ) + .to_bytes_be(); let biguint = BigUint::from_bytes_be(&bytes); let binary_string_local = format!("{:b}", biguint); let padded_binary_string = format!("{:0>64}", binary_string_local); @@ -348,17 +353,17 @@ mod tests { use std::io::Read; use ::serde::{Deserialize, Serialize}; + use da_client_interface::MockDaClient; use httpmock::prelude::*; use majin_blob_core::blob; use majin_blob_types::serde; use majin_blob_types::state_diffs::UnorderedEq; - // use majin_blob_types::serde; - use crate::data_storage::MockDataStorage; - use da_client_interface::MockDaClient; use rstest::rstest; use serde_json::json; use super::*; + // use majin_blob_types::serde; + use crate::data_storage::MockDataStorage; use crate::tests::common::init_config; #[rstest] @@ -521,10 +526,6 @@ mod tests { new_hex_chars = new_hex_chars.trim_start_matches('0').to_string(); // Handle the case where the trimmed string is empty (e.g., data was all zeros) - if new_hex_chars.is_empty() { - "0x0".to_string() - } else { - format!("0x{}", new_hex_chars) - } + if new_hex_chars.is_empty() { "0x0".to_string() } else { format!("0x{}", new_hex_chars) } } } diff --git a/crates/orchestrator/src/jobs/job_handler_factory.rs b/crates/orchestrator/src/jobs/job_handler_factory.rs new file mode 100644 index 00000000..a61a13d8 --- /dev/null +++ b/crates/orchestrator/src/jobs/job_handler_factory.rs @@ -0,0 +1,65 @@ +use mockall::automock; + +#[automock] +pub mod factory { + use std::sync::Arc; + + #[allow(unused_imports)] + use mockall::automock; + + use crate::jobs::types::JobType; + use crate::jobs::{da_job, proving_job, snos_job, state_update_job, Job}; + + /// To get the job handler + // +-------------------+ + // | | + // | Arc>| + // | | + // +--------+----------+ + // | + // | +----------------+ + // | | | + // +--->| Box | + // | | | + // | +----------------+ + // | | + // | | + // +-------v-------+ | + // | | | + // | Closure 1 | | + // | | | + // +---------------+ | + // | + // +---------------+ | + // | | | + // | Closure x | | + // | | | + // +---------------+ | + // | + // | + // v + // +--------------+ + // | | + // | dyn Job | + // | (job_handler)| + // | | + // +--------------+ + /// We are using Arc so that we can call the Arc::clone while testing that will point + /// to the same Box. So when we are mocking the behaviour : + /// + /// - We create the MockJob + /// + /// - And clone that to returning in the mock get_job_handler. + pub async fn get_job_handler(job_type: &JobType) -> Arc> { + // Original implementation + let job: Box = match job_type { + JobType::DataSubmission => Box::new(da_job::DaJob), + JobType::SnosRun => Box::new(snos_job::SnosJob), + JobType::ProofCreation => Box::new(proving_job::ProvingJob), + JobType::StateTransition => Box::new(state_update_job::StateUpdateJob), + _ => unimplemented!("Job type not implemented yet."), + }; + + Arc::new(job) + } +} diff --git a/crates/orchestrator/src/jobs/mod.rs b/crates/orchestrator/src/jobs/mod.rs index 3b48176e..00d1338b 100644 --- a/crates/orchestrator/src/jobs/mod.rs +++ b/crates/orchestrator/src/jobs/mod.rs @@ -4,24 +4,30 @@ use std::time::Duration; use async_trait::async_trait; use color_eyre::eyre::eyre; use color_eyre::Result; -use mockall::{automock, mock}; +use mockall::automock; +use mockall_double::double; use tracing::log; use uuid::Uuid; use crate::config::{config, Config}; use crate::jobs::constants::{JOB_PROCESS_ATTEMPT_METADATA_KEY, JOB_VERIFICATION_ATTEMPT_METADATA_KEY}; +#[double] +use crate::jobs::job_handler_factory::factory; use crate::jobs::types::{JobItem, JobStatus, JobType, JobVerificationStatus}; use crate::queue::job_queue::{add_job_to_process_queue, add_job_to_verification_queue}; pub mod constants; pub mod da_job; +pub mod job_handler_factory; pub mod proving_job; pub mod register_proof_job; pub mod snos_job; pub mod state_update_job; + /// The Job trait is used to define the methods that a job /// should implement to be used as a job for the orchestrator. The orchestrator automatically /// handles queueing and processing of jobs as long as they implement the trait. +#[automock] #[async_trait] pub trait Job: Send + Sync { /// Should build a new job item and return it @@ -49,28 +55,6 @@ pub trait Job: Send + Sync { fn verification_polling_delay_seconds(&self) -> u64; } -mock! { - pub Job {} - impl Clone for Job { - fn clone(&self) -> Self; - } - - #[async_trait] - impl Job for Job { - async fn create_job( - &self, - config: &Config, - internal_id: String, - metadata: HashMap, - ) -> Result; - async fn process_job(&self, config: &Config, job: &mut JobItem) -> Result; - async fn verify_job(&self, config: &Config, job: &mut JobItem) -> Result; - fn max_process_attempts(&self) -> u64; - fn max_verification_attempts(&self) -> u64; - fn verification_polling_delay_seconds(&self) -> u64; - } -} - pub mod types; /// Creates the job in the DB in the created state and adds it to the process queue @@ -86,9 +70,8 @@ pub async fn create_job(job_type: JobType, internal_id: String, metadata: HashMa )); } - let job_handler = get_job_handler(&job_type).await; + let job_handler = factory::get_job_handler(&job_type).await; let job_item = job_handler.create_job(config.as_ref(), internal_id, metadata).await?; - println!(">>>> job_item : {:?}", job_item); config.database().create_job(job_item.clone()).await?; add_job_to_process_queue(job_item.id).await?; @@ -117,7 +100,7 @@ pub async fn process_job(id: Uuid) -> Result<()> { // outdated config.database().update_job_status(&job, JobStatus::LockedForProcessing).await?; - let job_handler = get_job_handler(&job.job_type).await; + let job_handler = factory::get_job_handler(&job.job_type).await; let external_id = job_handler.process_job(config.as_ref(), &mut job).await?; let metadata = increment_key_in_metadata(&job.metadata, JOB_PROCESS_ATTEMPT_METADATA_KEY)?; @@ -151,7 +134,7 @@ pub async fn verify_job(id: Uuid) -> Result<()> { } } - let job_handler = get_job_handler(&job.job_type).await; + let job_handler = factory::get_job_handler(&job.job_type).await; let verification_status = job_handler.verify_job(config.as_ref(), &mut job).await?; match verification_status { @@ -197,22 +180,6 @@ pub async fn verify_job(id: Uuid) -> Result<()> { Ok(()) } -#[cfg(not(test))] -async fn get_job_handler(job_type: &JobType) -> Box { - match job_type { - JobType::DataSubmission => Box::new(da_job::DaJob), - JobType::SnosRun => Box::new(snos_job::SnosJob), - JobType::ProofCreation => Box::new(proving_job::ProvingJob), - JobType::StateTransition => Box::new(state_update_job::StateUpdateJob), - _ => unimplemented!("Job type not implemented yet."), - } -} - -#[cfg(test)] -async fn get_job_handler<'a>(_job_type: &JobType) -> Box { - return Box::new(config().await.job_handler.clone()); -} - async fn get_job(id: Uuid) -> Result { let config = config().await; let job = config.database().get_job_by_id(id).await?; diff --git a/crates/orchestrator/src/jobs/state_update_job/mod.rs b/crates/orchestrator/src/jobs/state_update_job/mod.rs index d60c86d1..2e6167d6 100644 --- a/crates/orchestrator/src/jobs/state_update_job/mod.rs +++ b/crates/orchestrator/src/jobs/state_update_job/mod.rs @@ -7,16 +7,14 @@ use async_trait::async_trait; use cairo_vm::Felt252; use color_eyre::eyre::eyre; use color_eyre::Result; +use settlement_client_interface::SettlementVerificationStatus; use snos::io::output::StarknetOsOutput; use uuid::Uuid; -use settlement_client_interface::SettlementVerificationStatus; - use super::constants::{ JOB_METADATA_STATE_UPDATE_ATTEMPT_PREFIX, JOB_METADATA_STATE_UPDATE_LAST_FAILED_BLOCK_NO, JOB_PROCESS_ATTEMPT_METADATA_KEY, }; - use crate::config::{config, Config}; use crate::constants::SNOS_OUTPUT_FILE_NAME; use crate::jobs::constants::JOB_METADATA_STATE_UPDATE_BLOCKS_TO_SETTLE_KEY; @@ -76,13 +74,20 @@ impl Job for StateUpdateJob { self.insert_attempts_into_metadata(job, &attempt_no, &sent_tx_hashes); // external_id returned corresponds to the last block number settled - Ok(block_numbers.last().expect("Last number in block_numbers array returned as None. Possible Error : Delay in job processing or Failed job execution.").to_string()) + Ok(block_numbers + .last() + .expect( + "Last number in block_numbers array returned as None. Possible Error : Delay in job processing or \ + Failed job execution.", + ) + .to_string()) } /// Returns the status of the passed job. /// Status will be verified if: /// 1. the last settlement tx hash is successful, - /// 2. the expected last settled block from our configuration is indeed the one found in the provider. + /// 2. the expected last settled block from our configuration is indeed the one found in the + /// provider. async fn verify_job(&self, config: &Config, job: &mut JobItem) -> Result { let attempt_no = job.metadata.get(JOB_PROCESS_ATTEMPT_METADATA_KEY).expect("Could not find current attempt number.").clone(); @@ -115,7 +120,7 @@ impl Job for StateUpdateJob { return Ok(new_status.into()); } SettlementVerificationStatus::Pending => { - return Err(eyre!("Tx {tx_hash} should not be pending.")) + return Err(eyre!("Tx {tx_hash} should not be pending.")); } SettlementVerificationStatus::Verified => {} } diff --git a/crates/orchestrator/src/jobs/state_update_job/utils.rs b/crates/orchestrator/src/jobs/state_update_job/utils.rs index 1d92c9a4..c696b06c 100644 --- a/crates/orchestrator/src/jobs/state_update_job/utils.rs +++ b/crates/orchestrator/src/jobs/state_update_job/utils.rs @@ -1,6 +1,7 @@ +use color_eyre::eyre::eyre; + use crate::config::config; use crate::constants::BLOB_DATA_FILE_NAME; -use color_eyre::eyre::eyre; /// Fetching the blob data (stored in remote storage during DA job) for a particular block pub async fn fetch_blob_data_for_block(block_number: u64) -> color_eyre::Result>> { diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 0526e72a..858635a9 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -22,7 +22,6 @@ use crate::database::{DatabaseConfig, MockDatabase}; use crate::jobs::types::JobStatus::Created; use crate::jobs::types::JobType::DataSubmission; use crate::jobs::types::{ExternalId, JobItem}; -use crate::jobs::MockJob; use crate::queue::MockQueueProvider; pub async fn init_config( @@ -55,7 +54,6 @@ pub async fn init_config( Box::new(database), Box::new(queue), Box::new(storage_client), - MockJob::new(), ) } diff --git a/crates/orchestrator/src/tests/config.rs b/crates/orchestrator/src/tests/config.rs index ef04fde7..a586a07d 100644 --- a/crates/orchestrator/src/tests/config.rs +++ b/crates/orchestrator/src/tests/config.rs @@ -1,10 +1,7 @@ use std::sync::Arc; -use crate::config::{ - build_da_client, build_prover_service, build_settlement_client, build_storage_client, config_force_init, Config, -}; -use crate::data_storage::DataStorage; use da_client_interface::DaClient; +use httpmock::MockServer; use prover_client_interface::ProverClient; use settlement_client_interface::SettlementClient; use starknet::providers::jsonrpc::HttpTransport; @@ -12,14 +9,15 @@ use starknet::providers::{JsonRpcClient, Url}; use utils::env_utils::get_env_var_or_panic; use utils::settings::default::DefaultSettingsProvider; +use crate::config::{ + build_da_client, build_prover_service, build_settlement_client, build_storage_client, config_force_init, Config, +}; +use crate::data_storage::DataStorage; use crate::database::mongodb::config::MongoDbConfig; use crate::database::mongodb::MongoDb; use crate::database::{Database, DatabaseConfig}; use crate::queue::sqs::SqsQueue; use crate::queue::QueueProvider; - -use crate::jobs::MockJob; -use httpmock::MockServer; // Inspiration : https://rust-unofficial.github.io/patterns/patterns/creational/builder.html // TestConfigBuilder allows to heavily customise the global configs based on the test's requirement. // Eg: We want to mock only the da client and leave rest to be as it is, use mock_da_client. @@ -40,9 +38,6 @@ pub struct TestConfigBuilder { queue: Option>, /// Storage client storage: Option>, - /// Job Handler - #[allow(dead_code)] - job_handler: Option, } impl Default for TestConfigBuilder { @@ -62,7 +57,6 @@ impl TestConfigBuilder { database: None, queue: None, storage: None, - job_handler: None, } } @@ -71,11 +65,6 @@ impl TestConfigBuilder { self } - pub async fn mock_job_handler(mut self, mock_job: MockJob) -> TestConfigBuilder { - self.job_handler = Some(mock_job); - self - } - pub async fn build(mut self) -> MockServer { dotenvy::from_filename("../.env.test").expect("Failed to load the .env file"); @@ -116,13 +105,6 @@ impl TestConfigBuilder { self.prover_client = Some(build_prover_service(&settings_provider)); } - // init job handler - if self.job_handler.is_none() { - self.job_handler = Some(MockJob::new()); - } - let mut temp_job_handler = MockJob::new(); - temp_job_handler.expect_clone().return_const(self.job_handler.unwrap()); - // init the storage client if self.storage.is_none() { self.storage = Some(build_storage_client().await); @@ -147,7 +129,6 @@ impl TestConfigBuilder { self.database.unwrap(), self.queue.unwrap(), self.storage.unwrap(), - MockJob::new(), ); config_force_init(config).await; diff --git a/crates/orchestrator/src/tests/data_storage/mod.rs b/crates/orchestrator/src/tests/data_storage/mod.rs index 8f68d312..20ad48c5 100644 --- a/crates/orchestrator/src/tests/data_storage/mod.rs +++ b/crates/orchestrator/src/tests/data_storage/mod.rs @@ -1,12 +1,13 @@ -use crate::data_storage::aws_s3::config::AWSS3Config; -use crate::data_storage::aws_s3::AWSS3; -use crate::data_storage::{DataStorage, DataStorageConfig}; -use crate::tests::config::TestConfigBuilder; use bytes::Bytes; use rstest::rstest; use serde_json::json; use utils::env_utils::get_env_var_or_panic; +use crate::data_storage::aws_s3::config::AWSS3Config; +use crate::data_storage::aws_s3::AWSS3; +use crate::data_storage::{DataStorage, DataStorageConfig}; +use crate::tests::config::TestConfigBuilder; + /// This test checks the ability to put and get data from AWS S3 using `AWSS3`. /// It puts JSON data into a test bucket and retrieves it, verifying the data /// matches what was originally uploaded. diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index f7cca727..af5080ef 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -1,9 +1,10 @@ +use rstest::*; +use uuid::Uuid; + use crate::config::config; use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; use crate::tests::common::drop_database; use crate::tests::config::TestConfigBuilder; -use rstest::*; -use uuid::Uuid; #[rstest] #[tokio::test] diff --git a/crates/orchestrator/src/tests/jobs/da_job/mod.rs b/crates/orchestrator/src/tests/jobs/da_job/mod.rs index 56f08375..3dab09e4 100644 --- a/crates/orchestrator/src/tests/jobs/da_job/mod.rs +++ b/crates/orchestrator/src/tests/jobs/da_job/mod.rs @@ -1,7 +1,5 @@ use std::collections::HashMap; -use crate::config::{config, config_force_init}; -use crate::data_storage::MockDataStorage; use da_client_interface::{DaVerificationStatus, MockDaClient}; use httpmock::prelude::*; use rstest::*; @@ -11,6 +9,8 @@ use uuid::Uuid; use super::super::common::constants::{ETHEREUM_MAX_BLOB_PER_TXN, ETHEREUM_MAX_BYTES_PER_BLOB}; use super::super::common::{default_job_item, init_config}; +use crate::config::{config, config_force_init}; +use crate::data_storage::MockDataStorage; use crate::jobs::da_job::DaJob; use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; use crate::jobs::Job; diff --git a/crates/orchestrator/src/tests/jobs/mod.rs b/crates/orchestrator/src/tests/jobs/mod.rs index df9e3117..c092cdc5 100644 --- a/crates/orchestrator/src/tests/jobs/mod.rs +++ b/crates/orchestrator/src/tests/jobs/mod.rs @@ -23,29 +23,32 @@ async fn create_job_fails_works_new_job() { #[cfg(test)] mod job_handler_tests { + use std::collections::HashMap; + use std::sync::Arc; + + use mockall::predicate::eq; + use rstest::rstest; + use uuid::Uuid; + use crate::config::config; - use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; - use crate::jobs::{create_job, MockJob}; + use crate::jobs::constants::{JOB_PROCESS_ATTEMPT_METADATA_KEY, JOB_VERIFICATION_ATTEMPT_METADATA_KEY}; + use crate::jobs::job_handler_factory::mock_factory; + use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType, JobVerificationStatus}; + use crate::jobs::{create_job, process_job, verify_job, Job, MockJob}; use crate::tests::common::drop_database; use crate::tests::config::TestConfigBuilder; - use rstest::rstest; - use std::collections::HashMap; - use uuid::Uuid; #[rstest] #[case(JobType::SnosRun, false, true)] #[case(JobType::ProofCreation, true, true)] - // #[case(JobType::ProofRegistration, false, false)] + #[should_panic] + #[case(JobType::ProofRegistration, false, false)] #[tokio::test] async fn test_create_job_handler( #[case] job_type: JobType, #[case] job_exists_in_db: bool, #[case] job_implemented: bool, ) { - // MODIFY : - // If queue needs to be spun up. - // We need to implement it in localstack. - let job_item = JobItem { id: Uuid::new_v4(), internal_id: "0".to_string(), @@ -56,14 +59,14 @@ mod job_handler_tests { version: 0, }; - // Expecting for create_job handler for that particular job. let mut job_handler = MockJob::new(); + if job_implemented && !job_exists_in_db { + // Expecting for create_job handler for that particular job. + let job_item_clone = job_item.clone(); + job_handler.expect_create_job().times(1).returning(move |_, _, _| Ok(job_item_clone.clone())); + } - // if job_implemented && !job_exists_in_db { - // job_handler.expect_create_job().times(1); - // } - - TestConfigBuilder::new().mock_job_handler(job_handler).await; + TestConfigBuilder::new().build().await; drop_database().await.unwrap(); let config = config().await; @@ -74,9 +77,137 @@ mod job_handler_tests { } if job_implemented && !job_exists_in_db { - let _ = create_job(job_type, "0".to_string(), HashMap::new()).await.is_ok(); + let y: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + ctx.expect().times(1).with(eq(job_type.clone())).returning(move |_| Arc::clone(&y)); + let _ = create_job(job_type.clone(), "0".to_string(), HashMap::new()).await.is_ok(); } else { let _ = create_job(job_type, "0".to_string(), HashMap::new()).await.is_err(); } } + + #[rstest] + #[case(JobType::SnosRun, JobStatus::Created, true)] + #[case(JobType::DataSubmission, JobStatus::VerificationFailed("".to_string()), true)] + #[case(JobType::SnosRun, JobStatus::VerificationFailed("".to_string()), false)] + #[tokio::test] + async fn test_process_job_handler( + #[case] job_type: JobType, + #[case] job_status: JobStatus, + #[case] job_exists_in_db: bool, + ) { + let job_item = get_random_job_item_by_type_and_status(job_type.clone(), job_status.clone(), "1".to_string()); + + // building config + TestConfigBuilder::new().build().await; + drop_database().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + let mut job_handler = MockJob::new(); + if job_exists_in_db { + // creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + // expecting process job function in job processor to return the external ID + job_handler.expect_process_job().times(1).returning(move |_, _| Ok("0xbeef".to_string())); + job_handler.expect_verification_polling_delay_seconds().return_const(1u64); + } + + if job_exists_in_db && is_valid_job_processing_status(job_status) { + let y: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + ctx.expect().times(1).with(eq(job_type.clone())).returning(move |_| Arc::clone(&y)); + + let _ = process_job(job_item.id).await.is_ok(); + // getting the job + let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + // checking if job_status is updated in db + assert_eq!(updated_job.status, JobStatus::PendingVerification); + } else { + let _ = process_job(job_item.id).await.is_err(); + } + } + + #[rstest] + #[case(JobType::DataSubmission, JobStatus::PendingVerification, JobVerificationStatus::Verified, true)] + #[case(JobType::DataSubmission, JobStatus::PendingVerification, JobVerificationStatus::Rejected("".to_string()), true)] + #[case(JobType::DataSubmission, JobStatus::PendingVerification, JobVerificationStatus::Pending, true)] + #[case(JobType::SnosRun, JobStatus::Created, JobVerificationStatus::Rejected("".to_string()), false)] + #[tokio::test] + async fn test_verify_job_handler( + #[case] job_type: JobType, + #[case] job_status: JobStatus, + #[case] verification_status: JobVerificationStatus, + #[case] job_exists_in_db: bool, + ) { + let job_item = get_random_job_item_by_type_and_status(job_type.clone(), job_status.clone(), "1".to_string()); + let expected_verification_status = verification_status.clone(); + // building config + TestConfigBuilder::new().build().await; + drop_database().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + let mut job_handler = MockJob::new(); + + if job_exists_in_db { + // creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + // expecting process job function in job processor to return the external ID + job_handler.expect_verify_job().times(1).returning(move |_, _| Ok(verification_status.clone())); + job_handler.expect_max_process_attempts().returning(move || 2u64); + job_handler.expect_max_verification_attempts().returning(move || 2u64); + job_handler.expect_verification_polling_delay_seconds().returning(move || 2u64); + } + + if job_exists_in_db && is_valid_job_verification_status(job_status) { + let y: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + ctx.expect().times(1).with(eq(job_type.clone())).returning(move |_| Arc::clone(&y)); + + let _ = verify_job(job_item.id).await.is_ok(); + + let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + + if expected_verification_status == JobVerificationStatus::Verified { + assert_eq!(updated_job.status, JobStatus::Completed); + } else if expected_verification_status == JobVerificationStatus::Rejected("".to_string()) { + assert_eq!(updated_job.status, JobStatus::VerificationFailed("".to_string())); + } else if expected_verification_status == JobVerificationStatus::Pending { + assert_eq!(updated_job.metadata.get(JOB_VERIFICATION_ATTEMPT_METADATA_KEY).unwrap(), "1"); + } + } else { + let _ = verify_job(job_item.id).await.is_err(); + } + } + + fn is_valid_job_processing_status(job_status: JobStatus) -> bool { + matches!(job_status, JobStatus::Created | JobStatus::VerificationFailed(_)) + } + + fn is_valid_job_verification_status(job_status: JobStatus) -> bool { + matches!(job_status, JobStatus::PendingVerification) + } + + fn get_random_job_item_by_type_and_status( + job_type: JobType, + job_status: JobStatus, + internal_id: String, + ) -> JobItem { + let mut hashmap: HashMap = HashMap::new(); + hashmap.insert(JOB_PROCESS_ATTEMPT_METADATA_KEY.to_string(), "0".to_string()); + hashmap.insert(JOB_VERIFICATION_ATTEMPT_METADATA_KEY.to_string(), "0".to_string()); + JobItem { + id: Uuid::new_v4(), + internal_id, + job_type, + status: job_status, + external_id: ExternalId::Number(0), + metadata: hashmap, + version: 0, + } + } } diff --git a/crates/orchestrator/src/tests/jobs/proving_job/mod.rs b/crates/orchestrator/src/tests/jobs/proving_job/mod.rs index 6eeaaef7..35ddd7fa 100644 --- a/crates/orchestrator/src/tests/jobs/proving_job/mod.rs +++ b/crates/orchestrator/src/tests/jobs/proving_job/mod.rs @@ -1,12 +1,12 @@ use std::collections::HashMap; -use crate::config::{config, config_force_init}; use httpmock::prelude::*; use prover_client_interface::{MockProverClient, TaskStatus}; use rstest::*; use uuid::Uuid; use super::super::common::{default_job_item, init_config}; +use crate::config::{config, config_force_init}; use crate::jobs::constants::JOB_METADATA_CAIRO_PIE_PATH_KEY; use crate::jobs::proving_job::ProvingJob; use crate::jobs::types::{JobItem, JobStatus, JobType}; diff --git a/crates/orchestrator/src/tests/jobs/state_update_job/mod.rs b/crates/orchestrator/src/tests/jobs/state_update_job/mod.rs index 082af9ca..cd5d8f85 100644 --- a/crates/orchestrator/src/tests/jobs/state_update_job/mod.rs +++ b/crates/orchestrator/src/tests/jobs/state_update_job/mod.rs @@ -1,29 +1,26 @@ +use std::collections::HashMap; +use std::fs; +use std::path::PathBuf; + +use bytes::Bytes; +use httpmock::prelude::*; +use lazy_static::lazy_static; use mockall::predicate::eq; use rstest::*; use settlement_client_interface::MockSettlementClient; -use bytes::Bytes; -use std::path::PathBuf; -use std::{collections::HashMap, fs}; - use super::super::common::init_config; - -use crate::jobs::{ - constants::{ - JOB_METADATA_STATE_UPDATE_BLOCKS_TO_SETTLE_KEY, JOB_METADATA_STATE_UPDATE_FETCH_FROM_TESTS, - JOB_PROCESS_ATTEMPT_METADATA_KEY, - }, - state_update_job::StateUpdateJob, - types::{JobStatus, JobType}, - Job, -}; - use crate::config::{config, config_force_init}; use crate::constants::{BLOB_DATA_FILE_NAME, SNOS_OUTPUT_FILE_NAME}; use crate::data_storage::MockDataStorage; +use crate::jobs::constants::{ + JOB_METADATA_STATE_UPDATE_BLOCKS_TO_SETTLE_KEY, JOB_METADATA_STATE_UPDATE_FETCH_FROM_TESTS, + JOB_PROCESS_ATTEMPT_METADATA_KEY, +}; use crate::jobs::state_update_job::utils::hex_string_to_u8_vec; -use httpmock::prelude::*; -use lazy_static::lazy_static; +use crate::jobs::state_update_job::StateUpdateJob; +use crate::jobs::types::{JobStatus, JobType}; +use crate::jobs::Job; lazy_static! { pub static ref CURRENT_PATH: PathBuf = std::env::current_dir().unwrap(); diff --git a/crates/orchestrator/src/tests/workers/proving/mod.rs b/crates/orchestrator/src/tests/workers/proving/mod.rs index 79a076f4..c8ee61f6 100644 --- a/crates/orchestrator/src/tests/workers/proving/mod.rs +++ b/crates/orchestrator/src/tests/workers/proving/mod.rs @@ -1,23 +1,26 @@ +use std::error::Error; +use std::sync::Arc; + +use da_client_interface::MockDaClient; +use httpmock::MockServer; +use mockall::predicate::eq; +use prover_client_interface::MockProverClient; +use rstest::rstest; +use settlement_client_interface::MockSettlementClient; + use crate::config::config_force_init; use crate::database::MockDatabase; +use crate::jobs::job_handler_factory::mock_factory; use crate::jobs::types::{JobItem, JobStatus, JobType}; +use crate::jobs::{Job, MockJob}; use crate::queue::MockQueueProvider; use crate::tests::common::init_config; use crate::tests::workers::utils::{db_checks_proving_worker, get_job_by_mock_id_vector}; use crate::workers::proving::ProvingWorker; -use crate::workers::Worker; -use da_client_interface::MockDaClient; -use httpmock::MockServer; -use prover_client_interface::MockProverClient; -use rstest::rstest; -use settlement_client_interface::MockSettlementClient; -use std::error::Error; -use std::time::Duration; -use tokio::time::sleep; #[rstest] -#[case(false)] #[case(true)] +#[case(false)] #[tokio::test] async fn test_proving_worker(#[case] incomplete_runs: bool) -> Result<(), Box> { let server = MockServer::start(); @@ -27,12 +30,15 @@ async fn test_proving_worker(#[case] incomplete_runs: bool) -> Result<(), Box = get_job_by_mock_id_vector(JobType::ProofCreation, JobStatus::Created, 5, 1) .into_iter() @@ -47,7 +53,7 @@ async fn test_proving_worker(#[case] incomplete_runs: bool) -> Result<(), Box = vec![1, 2, 4, 5]; for i in num_vec { - db_checks_proving_worker(i, &mut db); + db_checks_proving_worker(i, &mut db, &mut job_handler); } prover_client.expect_submit_task().times(4).returning(|_| Ok("task_id".to_string())); @@ -60,7 +66,7 @@ async fn test_proving_worker(#[case] incomplete_runs: bool) -> Result<(), Box Result<(), Box> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + if incomplete_runs { + ctx.expect().times(4).with(eq(JobType::ProofCreation)).returning(move |_| Arc::clone(&y)); + } else { + ctx.expect().times(5).with(eq(JobType::ProofCreation)).returning(move |_| Arc::clone(&y)); + } + let proving_worker = ProvingWorker {}; proving_worker.run_worker().await?; Ok(()) } + +use crate::workers::Worker; diff --git a/crates/orchestrator/src/tests/workers/snos/mod.rs b/crates/orchestrator/src/tests/workers/snos/mod.rs index 7799ffb2..cc6a6401 100644 --- a/crates/orchestrator/src/tests/workers/snos/mod.rs +++ b/crates/orchestrator/src/tests/workers/snos/mod.rs @@ -1,19 +1,24 @@ +use std::error::Error; +use std::sync::Arc; + +use da_client_interface::MockDaClient; +use httpmock::MockServer; +use mockall::predicate::eq; +use rstest::rstest; +use serde_json::json; +use uuid::Uuid; + use crate::config::config_force_init; use crate::database::MockDatabase; +use crate::jobs::job_handler_factory::mock_factory; use crate::jobs::types::JobType; +use crate::jobs::{Job, MockJob}; use crate::queue::job_queue::JOB_PROCESSING_QUEUE; use crate::queue::MockQueueProvider; use crate::tests::common::init_config; use crate::tests::workers::utils::get_job_item_mock_by_id; use crate::workers::snos::SnosWorker; use crate::workers::Worker; -use da_client_interface::MockDaClient; -use httpmock::MockServer; -use mockall::predicate::eq; -use rstest::rstest; -use serde_json::json; -use std::error::Error; -use uuid::Uuid; #[rstest] #[case(false)] @@ -27,6 +32,9 @@ async fn test_snos_worker(#[case] db_val: bool) -> Result<(), Box> { let start_job_index; let block; + // Mocking the get_job_handler function. + let mut job_handler = MockJob::new(); + // Mocking db function expectations if !db_val { db.expect_get_last_successful_job_by_type().times(1).with(eq(JobType::SnosRun)).returning(|_| Ok(None)); @@ -51,13 +59,23 @@ async fn test_snos_worker(#[case] db_val: bool) -> Result<(), Box> { let uuid = Uuid::new_v4(); + let job_item = get_job_item_mock_by_id(i.clone().to_string(), uuid); + let job_item_cloned = job_item.clone(); + + job_handler.expect_create_job().times(1).returning(move |_, _, _| Ok(job_item_cloned.clone())); + // creating jobs call expectations db.expect_create_job() .times(1) .withf(move |item| item.internal_id == i.clone().to_string()) - .returning(move |_| Ok(get_job_item_mock_by_id(i.clone().to_string(), uuid))); + .returning(move |_| Ok(job_item.clone())); } + let y: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + ctx.expect().times(5).with(eq(JobType::SnosRun)).returning(move |_| Arc::clone(&y)); + // Queue function call simulations queue .expect_send_message_to_queue() diff --git a/crates/orchestrator/src/tests/workers/update_state/mod.rs b/crates/orchestrator/src/tests/workers/update_state/mod.rs index a5271f9f..a475a57d 100644 --- a/crates/orchestrator/src/tests/workers/update_state/mod.rs +++ b/crates/orchestrator/src/tests/workers/update_state/mod.rs @@ -1,6 +1,17 @@ +use std::error::Error; +use std::sync::Arc; + +use da_client_interface::MockDaClient; +use httpmock::MockServer; +use mockall::predicate::eq; +use rstest::rstest; +use uuid::Uuid; + use crate::config::config_force_init; use crate::database::MockDatabase; +use crate::jobs::job_handler_factory::mock_factory; use crate::jobs::types::{JobStatus, JobType}; +use crate::jobs::{Job, MockJob}; use crate::queue::MockQueueProvider; use crate::tests::common::init_config; use crate::tests::workers::utils::{ @@ -8,12 +19,6 @@ use crate::tests::workers::utils::{ }; use crate::workers::update_state::UpdateStateWorker; use crate::workers::Worker; -use da_client_interface::MockDaClient; -use httpmock::MockServer; -use mockall::predicate::eq; -use rstest::rstest; -use std::error::Error; -use uuid::Uuid; #[rstest] #[case(false, 0)] @@ -30,6 +35,9 @@ async fn test_update_state_worker( const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue"; + // Mocking the get_job_handler function. + let mut job_handler = MockJob::new(); + // Mocking db function expectations // If no successful state update jobs exist if !last_successful_job_exists { @@ -55,7 +63,8 @@ async fn test_update_state_worker( )) }); - // mocking getting of the jobs (when there is a safety check for any pre-existing job during job creation) + // mocking getting of the jobs (when there is a safety check for any pre-existing job during job + // creation) let completed_jobs = get_job_by_mock_id_vector(JobType::ProofCreation, JobStatus::Completed, number_of_processed_jobs as u64, 2); for job in completed_jobs { @@ -69,9 +78,17 @@ async fn test_update_state_worker( db_create_job_expectations_update_state_worker( &mut db, get_job_by_mock_id_vector(JobType::ProofCreation, JobStatus::Completed, number_of_processed_jobs as u64, 2), + &mut job_handler, ); } + let y: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + if last_successful_job_exists { + ctx.expect().times(5).with(eq(JobType::StateTransition)).returning(move |_| Arc::clone(&y)); + } + // Queue function call simulations queue .expect_send_message_to_queue() diff --git a/crates/orchestrator/src/tests/workers/utils/mod.rs b/crates/orchestrator/src/tests/workers/utils/mod.rs index 8e776155..c4fcd5da 100644 --- a/crates/orchestrator/src/tests/workers/utils/mod.rs +++ b/crates/orchestrator/src/tests/workers/utils/mod.rs @@ -1,9 +1,12 @@ +use std::collections::HashMap; + +use mockall::predicate::eq; +use uuid::Uuid; + use crate::database::MockDatabase; use crate::jobs::constants::JOB_METADATA_CAIRO_PIE_PATH_KEY; use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; -use mockall::predicate::eq; -use std::collections::HashMap; -use uuid::Uuid; +use crate::jobs::MockJob; pub fn get_job_item_mock_by_id(id: String, uuid: Uuid) -> JobItem { JobItem { @@ -52,24 +55,34 @@ pub fn get_job_by_mock_id_vector( jobs_vec } -pub fn db_create_job_expectations_update_state_worker(db: &mut MockDatabase, proof_creation_jobs: Vec) { +pub fn db_create_job_expectations_update_state_worker( + db: &mut MockDatabase, + proof_creation_jobs: Vec, + mock_job: &mut MockJob, +) { for job in proof_creation_jobs { let internal_id = job.internal_id.clone(); - db.expect_create_job().times(1).withf(move |item| item.internal_id == job.internal_id).returning(move |_| { - Ok(JobItem { - id: Uuid::new_v4(), - internal_id: internal_id.clone(), - job_type: JobType::StateTransition, - status: JobStatus::Created, - external_id: ExternalId::Number(0), - metadata: get_hashmap(), - version: 0, - }) - }); + let job_item = JobItem { + id: Uuid::new_v4(), + internal_id: internal_id.clone(), + job_type: JobType::StateTransition, + status: JobStatus::Created, + external_id: ExternalId::Number(0), + metadata: get_hashmap(), + version: 0, + }; + let job_item_cloned = job_item.clone(); + + mock_job.expect_create_job().times(1).returning(move |_, _, _| Ok(job_item.clone())); + + db.expect_create_job() + .times(1) + .withf(move |item| item.internal_id == job.internal_id) + .returning(move |_| Ok(job_item_cloned.clone())); } } -pub fn db_checks_proving_worker(id: i32, db: &mut MockDatabase) { +pub fn db_checks_proving_worker(id: i32, db: &mut MockDatabase, mock_job: &mut MockJob) { fn get_job_item_mock_by_id(id: i32) -> JobItem { let uuid = Uuid::new_v4(); JobItem { @@ -88,10 +101,15 @@ pub fn db_checks_proving_worker(id: i32, db: &mut MockDatabase) { .with(eq(id.clone().to_string()), eq(JobType::ProofCreation)) .returning(|_, _| Ok(None)); + let job_item = get_job_item_mock_by_id(id); + let job_item_cloned = job_item.clone(); + + mock_job.expect_create_job().times(1).returning(move |_, _, _| Ok(job_item.clone())); + db.expect_create_job() .times(1) .withf(move |item| item.internal_id == id.clone().to_string()) - .returning(move |_| Ok(get_job_item_mock_by_id(id))); + .returning(move |_| Ok(job_item_cloned.clone())); } pub fn get_hashmap() -> HashMap { diff --git a/crates/orchestrator/src/workers/proving.rs b/crates/orchestrator/src/workers/proving.rs index 4ec85b91..7fcb7194 100644 --- a/crates/orchestrator/src/workers/proving.rs +++ b/crates/orchestrator/src/workers/proving.rs @@ -1,9 +1,11 @@ +use std::error::Error; + +use async_trait::async_trait; + use crate::config::config; use crate::jobs::create_job; use crate::jobs::types::{JobStatus, JobType}; use crate::workers::Worker; -use async_trait::async_trait; -use std::error::Error; pub struct ProvingWorker; diff --git a/crates/orchestrator/src/workers/update_state.rs b/crates/orchestrator/src/workers/update_state.rs index c100ab0e..80f9b894 100644 --- a/crates/orchestrator/src/workers/update_state.rs +++ b/crates/orchestrator/src/workers/update_state.rs @@ -1,10 +1,10 @@ use std::error::Error; +use async_trait::async_trait; + use crate::config::config; use crate::jobs::create_job; use crate::jobs::types::{JobStatus, JobType}; -use async_trait::async_trait; - use crate::workers::Worker; pub struct UpdateStateWorker; diff --git a/crates/prover-services/gps-fact-checker/src/fact_node.rs b/crates/prover-services/gps-fact-checker/src/fact_node.rs index 2d66fbac..494c9669 100644 --- a/crates/prover-services/gps-fact-checker/src/fact_node.rs +++ b/crates/prover-services/gps-fact-checker/src/fact_node.rs @@ -12,8 +12,8 @@ //! constructed using a stack of nodes (initialized to an empty stack) by repeating for each pair: //! 1. Add #n_pages lead nodes to the stack. //! 2. Pop the top #n_nodes, construct a parent node for them, and push it back to the stack. -//! After applying the steps above, the stack must contain exactly one node, which will -//! constitute the root of the Merkle tree. +//! After applying the steps above, the stack must contain exactly one node, which will +//! constitute the root of the Merkle tree. //! //! For example, [(2, 2)] will create a Merkle tree with a root and two direct children, while //! [(3, 2), (0, 2)] will create a Merkle tree with a root whose left child is a leaf and diff --git a/crates/settlement-clients/ethereum/src/clients/interfaces/validity_interface.rs b/crates/settlement-clients/ethereum/src/clients/interfaces/validity_interface.rs index 7b8a31a1..37bf76b0 100644 --- a/crates/settlement-clients/ethereum/src/clients/interfaces/validity_interface.rs +++ b/crates/settlement-clients/ethereum/src/clients/interfaces/validity_interface.rs @@ -1,16 +1,14 @@ use std::sync::Arc; +use alloy::network::Ethereum; +use alloy::primitives::{I256, U256}; +use alloy::providers::Provider; +use alloy::rpc::types::eth::TransactionReceipt; +use alloy::sol; +use alloy::transports::http::Http; +use alloy::transports::{RpcError, TransportErrorKind}; use async_trait::async_trait; -use alloy::{ - network::Ethereum, - primitives::{I256, U256}, - providers::Provider, - rpc::types::eth::TransactionReceipt, - sol, - transports::{http::Http, RpcError, TransportErrorKind}, -}; - use crate::types::LocalWalletSignerMiddleware; // TODO: should be moved to Zaun: diff --git a/crates/settlement-clients/ethereum/src/clients/validity.rs b/crates/settlement-clients/ethereum/src/clients/validity.rs index 8ed89c5b..575c6748 100644 --- a/crates/settlement-clients/ethereum/src/clients/validity.rs +++ b/crates/settlement-clients/ethereum/src/clients/validity.rs @@ -1,6 +1,8 @@ use std::sync::Arc; -use alloy::{network::Ethereum, primitives::Address, transports::http::Http}; +use alloy::network::Ethereum; +use alloy::primitives::Address; +use alloy::transports::http::Http; use crate::clients::interfaces::validity_interface::StarknetValidityContract; use crate::types::LocalWalletSignerMiddleware; diff --git a/crates/settlement-clients/ethereum/src/conversion.rs b/crates/settlement-clients/ethereum/src/conversion.rs index c86eb89a..ea4ab011 100644 --- a/crates/settlement-clients/ethereum/src/conversion.rs +++ b/crates/settlement-clients/ethereum/src/conversion.rs @@ -1,7 +1,7 @@ use alloy::primitives::U256; -/// Converts a `&[Vec]` to `Vec`. Each inner slice is expected to be exactly 32 bytes long. -/// Pads with zeros if any inner slice is shorter than 32 bytes. +/// Converts a `&[Vec]` to `Vec`. Each inner slice is expected to be exactly 32 bytes +/// long. Pads with zeros if any inner slice is shorter than 32 bytes. pub(crate) fn slice_slice_u8_to_vec_u256(slices: &[[u8; 32]]) -> Vec { slices.iter().map(|slice| slice_u8_to_u256(slice)).collect() } diff --git a/crates/settlement-clients/ethereum/src/lib.rs b/crates/settlement-clients/ethereum/src/lib.rs index cdf2788b..5ffc2254 100644 --- a/crates/settlement-clients/ethereum/src/lib.rs +++ b/crates/settlement-clients/ethereum/src/lib.rs @@ -3,35 +3,34 @@ pub mod config; pub mod conversion; pub mod types; +use std::fmt::Write; +use std::path::{Path, PathBuf}; +use std::str::FromStr; +use std::sync::Arc; + use alloy::consensus::{ BlobTransactionSidecar, SignableTransaction, TxEip4844, TxEip4844Variant, TxEip4844WithSidecar, TxEnvelope, }; use alloy::eips::eip2718::Encodable2718; use alloy::eips::eip2930::AccessList; use alloy::eips::eip4844::BYTES_PER_BLOB; -use alloy::primitives::{Bytes, FixedBytes}; -use alloy::{ - network::EthereumWallet, - primitives::{Address, B256, U256}, - providers::{PendingTransactionConfig, Provider, ProviderBuilder}, - rpc::types::TransactionReceipt, - signers::local::PrivateKeySigner, -}; +use alloy::network::EthereumWallet; +use alloy::primitives::{Address, Bytes, FixedBytes, B256, U256}; +use alloy::providers::{PendingTransactionConfig, Provider, ProviderBuilder}; +use alloy::rpc::types::TransactionReceipt; +use alloy::signers::local::PrivateKeySigner; use async_trait::async_trait; use c_kzg::{Blob, Bytes32, KzgCommitment, KzgProof, KzgSettings}; use color_eyre::eyre::eyre; use color_eyre::Result; -use mockall::{automock, lazy_static, predicate::*}; +use mockall::predicate::*; +use mockall::{automock, lazy_static}; use rstest::rstest; -use std::fmt::Write; -use std::path::{Path, PathBuf}; -use std::str::FromStr; -use std::sync::Arc; - -use crate::clients::interfaces::validity_interface::StarknetValidityContractTrait; use settlement_client_interface::{SettlementClient, SettlementVerificationStatus, SETTLEMENT_SETTINGS_NAME}; -use utils::{env_utils::get_env_var_or_panic, settings::SettingsProvider}; +use utils::env_utils::get_env_var_or_panic; +use utils::settings::SettingsProvider; +use crate::clients::interfaces::validity_interface::StarknetValidityContractTrait; use crate::clients::StarknetValidityContractClient; use crate::config::EthereumSettlementConfig; use crate::conversion::{slice_slice_u8_to_vec_u256, slice_u8_to_u256}; @@ -97,11 +96,7 @@ impl EthereumSettlementClient { &KZG_SETTINGS, )?; - if !eval { - Err(eyre!("ERROR : Assertion failed, not able to verify the proof.")) - } else { - Ok(kzg_proof) - } + if !eval { Err(eyre!("ERROR : Assertion failed, not able to verify the proof.")) } else { Ok(kzg_proof) } } } @@ -246,7 +241,8 @@ async fn prepare_sidecar( fn get_txn_input_bytes(program_output: Vec<[u8; 32]>, kzg_proof: [u8; 48]) -> Bytes { let program_output_hex_string = vec_u8_32_to_hex_string(program_output); let kzg_proof_hex_string = u8_48_to_hex_string(kzg_proof); - // cast keccak "updateStateKzgDA(uint256[] calldata programOutput, bytes calldata kzgProof)" | cut -b 1-10 + // cast keccak "updateStateKzgDA(uint256[] calldata programOutput, bytes calldata kzgProof)" | cut + // -b 1-10 let function_selector = "0x1a790556"; Bytes::from(program_output_hex_string + &kzg_proof_hex_string + function_selector) diff --git a/crates/settlement-clients/ethereum/src/types.rs b/crates/settlement-clients/ethereum/src/types.rs index 3415ee0a..6ec5914c 100644 --- a/crates/settlement-clients/ethereum/src/types.rs +++ b/crates/settlement-clients/ethereum/src/types.rs @@ -1,11 +1,7 @@ -use alloy::{ - network::{Ethereum, EthereumWallet}, - providers::{ - fillers::{ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller}, - Identity, RootProvider, - }, - transports::http::{Client, Http}, -}; +use alloy::network::{Ethereum, EthereumWallet}; +use alloy::providers::fillers::{ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller}; +use alloy::providers::{Identity, RootProvider}; +use alloy::transports::http::{Client, Http}; pub type LocalWalletSignerMiddleware = FillProvider< JoinFill< diff --git a/crates/settlement-clients/starknet/src/lib.rs b/crates/settlement-clients/starknet/src/lib.rs index abcd10df..0957ea81 100644 --- a/crates/settlement-clients/starknet/src/lib.rs +++ b/crates/settlement-clients/starknet/src/lib.rs @@ -7,22 +7,18 @@ use async_trait::async_trait; use color_eyre::eyre::eyre; use color_eyre::Result; use lazy_static::lazy_static; -use mockall::{automock, predicate::*}; -use starknet::accounts::ConnectedAccount; -use starknet::core::types::{ExecutionResult, MaybePendingTransactionReceipt}; -use starknet::providers::Provider; -use starknet::{ - accounts::{Account, Call, ExecutionEncoding, SingleOwnerAccount}, - core::{ - types::{BlockId, BlockTag, FieldElement, FunctionCall}, - utils::get_selector_from_name, - }, - providers::{jsonrpc::HttpTransport, JsonRpcClient}, - signers::{LocalWallet, SigningKey}, +use mockall::automock; +use mockall::predicate::*; +use settlement_client_interface::{SettlementClient, SettlementVerificationStatus, SETTLEMENT_SETTINGS_NAME}; +use starknet::accounts::{Account, Call, ConnectedAccount, ExecutionEncoding, SingleOwnerAccount}; +use starknet::core::types::{ + BlockId, BlockTag, ExecutionResult, FieldElement, FunctionCall, MaybePendingTransactionReceipt, }; +use starknet::core::utils::get_selector_from_name; +use starknet::providers::jsonrpc::HttpTransport; +use starknet::providers::{JsonRpcClient, Provider}; +use starknet::signers::{LocalWallet, SigningKey}; use tokio::time::{sleep, Duration}; - -use settlement_client_interface::{SettlementClient, SettlementVerificationStatus, SETTLEMENT_SETTINGS_NAME}; use utils::env_utils::get_env_var_or_panic; use utils::settings::SettingsProvider; @@ -85,8 +81,8 @@ lazy_static! { get_selector_from_name("stateBlockNumber").expect("Invalid update state selector"); } -// TODO: Note that we already have an implementation of the appchain core contract client available here: -// https://github.com/keep-starknet-strange/zaun/tree/main/crates/l3/appchain-core-contract-client +// TODO: Note that we already have an implementation of the appchain core contract client available +// here: https://github.com/keep-starknet-strange/zaun/tree/main/crates/l3/appchain-core-contract-client // However, this implementation uses different FieldElement types, and incorporating all of them // into this repository would introduce unnecessary complexity. // Therefore, we will wait for the update of starknet_rs in the Zaun repository before adapting diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 77f06cdb..0c9b1997 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "stable" +channel = "nightly" components = ["clippy", "rustfmt"] targets = ["wasm32-unknown-unknown"] From 661adaf16e33f88b62947377381aa9323116b4f4 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Thu, 1 Aug 2024 18:34:40 +0530 Subject: [PATCH 22/44] feat : added queue checks to tests and revamped some tests --- Cargo.lock | 29 +- crates/orchestrator/Cargo.toml | 3 +- crates/orchestrator/src/jobs/da_job/mod.rs | 6 +- crates/orchestrator/src/jobs/mod.rs | 4 +- crates/orchestrator/src/tests/common/mod.rs | 64 ++ crates/orchestrator/src/tests/config.rs | 5 + crates/orchestrator/src/tests/jobs/mod.rs | 571 ++++++++++++++---- crates/settlement-clients/ethereum/src/lib.rs | 6 +- rust-toolchain.toml | 2 +- 9 files changed, 555 insertions(+), 135 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 76b771e5..fb8b39c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1647,9 +1647,9 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a4a5e448145999d7de17bf44a886900ecb834953408dae8aaf90465ce91c1dd" +checksum = "87c5f920ffd1e0526ec9e70e50bf444db50b204395a0fa7016bbf9e31ea1698f" dependencies = [ "aws-credential-types", "aws-sigv4", @@ -1706,9 +1706,9 @@ dependencies = [ [[package]] name = "aws-sdk-sqs" -version = "1.29.1" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f3fb8da46554d08e63272e56495f7c94908c16dc62d3c7cc8a0fb4d7591726a" +checksum = "3587fbaf540d65337c2356ebf3f78fba160025b3d69634175f1ea3a7895738e9" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1795,9 +1795,9 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "1.2.2" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31eed8d45759b2c5fe7fd304dd70739060e9e0de509209036eabea14d0720cce" +checksum = "5df1b0fa6be58efe9d4ccc257df0a53b89cd8909e86591a13ca54817c87517be" dependencies = [ "aws-credential-types", "aws-smithy-eventstream", @@ -1867,9 +1867,9 @@ dependencies = [ [[package]] name = "aws-smithy-http" -version = "0.60.8" +version = "0.60.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a7de001a1b9a25601016d8057ea16e31a45fdca3751304c8edf4ad72e706c08" +checksum = "d9cd0ae3d97daa0a2bf377a4d8e8e1362cae590c4a1aad0d40058ebca18eb91e" dependencies = [ "aws-smithy-eventstream", "aws-smithy-runtime-api", @@ -1907,9 +1907,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.6.0" +version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db83b08939838d18e33b5dbaf1a0f048f28c10bd28071ab7ce6f245451855414" +checksum = "ce87155eba55e11768b8c1afa607f3e864ae82f03caf63258b37455b0ad02537" dependencies = [ "aws-smithy-async", "aws-smithy-http", @@ -1934,9 +1934,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime-api" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b570ea39eb95bd32543f6e4032bce172cb6209b9bc8c83c770d08169e875afc" +checksum = "30819352ed0a04ecf6a2f3477e344d2d1ba33d43e0f09ad9047c12e0d923616f" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -1986,9 +1986,9 @@ dependencies = [ [[package]] name = "aws-types" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2009a9733865d0ebf428a314440bbe357cc10d0c16d86a8e15d32e9b47c1e80e" +checksum = "5221b91b3e441e6675310829fd8984801b772cb1546ef6c0e54dec9f1ac13fef" dependencies = [ "aws-credential-types", "aws-smithy-async", @@ -6320,6 +6320,7 @@ dependencies = [ "async-trait", "aws-config", "aws-sdk-s3", + "aws-sdk-sqs", "axum 0.7.5", "axum-macros", "bincode 1.3.3", diff --git a/crates/orchestrator/Cargo.toml b/crates/orchestrator/Cargo.toml index cf4a4ba1..76094adb 100644 --- a/crates/orchestrator/Cargo.toml +++ b/crates/orchestrator/Cargo.toml @@ -17,6 +17,7 @@ arc-swap = { workspace = true } async-std = "1.12.0" async-trait = { workspace = true } aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } +aws-sdk-sqs = "1.36.0" aws-sdk-s3 = { version = "1.38.0", features = ["behavior-version-latest"] } axum = { workspace = true, features = ["macros"] } axum-macros = { workspace = true } @@ -35,7 +36,7 @@ lazy_static = { workspace = true } log = "0.4.21" majin-blob-core = { git = "https://github.com/AbdelStark/majin-blob", branch = "main" } majin-blob-types = { git = "https://github.com/AbdelStark/majin-blob", branch = "main" } -mockall = { version = "0.13.0", features = ["nightly"] } +mockall = { version = "0.13.0" } mockall_double = "0.3.1" mongodb = { workspace = true, features = ["bson-uuid-1"], optional = true } num = { workspace = true } diff --git a/crates/orchestrator/src/jobs/da_job/mod.rs b/crates/orchestrator/src/jobs/da_job/mod.rs index f6c0452c..1cd5d33a 100644 --- a/crates/orchestrator/src/jobs/da_job/mod.rs +++ b/crates/orchestrator/src/jobs/da_job/mod.rs @@ -526,6 +526,10 @@ mod tests { new_hex_chars = new_hex_chars.trim_start_matches('0').to_string(); // Handle the case where the trimmed string is empty (e.g., data was all zeros) - if new_hex_chars.is_empty() { "0x0".to_string() } else { format!("0x{}", new_hex_chars) } + if new_hex_chars.is_empty() { + "0x0".to_string() + } else { + format!("0x{}", new_hex_chars) + } } } diff --git a/crates/orchestrator/src/jobs/mod.rs b/crates/orchestrator/src/jobs/mod.rs index 00d1338b..8a0e6803 100644 --- a/crates/orchestrator/src/jobs/mod.rs +++ b/crates/orchestrator/src/jobs/mod.rs @@ -163,7 +163,7 @@ pub async fn verify_job(id: Uuid) -> Result<()> { let verify_attempts = get_u64_from_metadata(&job.metadata, JOB_VERIFICATION_ATTEMPT_METADATA_KEY)?; if verify_attempts >= job_handler.max_verification_attempts() { // TODO: send alert - log::info!("Verification attempts exceeded for job {}. Marking as timedout.", job.id); + log::info!("Verification attempts exceeded for job {}. Marking as timed out.", job.id); config.database().update_job_status(&job, JobStatus::VerificationTimeout).await?; return Ok(()); } @@ -192,7 +192,7 @@ async fn get_job(id: Uuid) -> Result { } } -fn increment_key_in_metadata(metadata: &HashMap, key: &str) -> Result> { +pub fn increment_key_in_metadata(metadata: &HashMap, key: &str) -> Result> { let mut new_metadata = metadata.clone(); let attempt = get_u64_from_metadata(metadata, key)?; let incremented_value = attempt.checked_add(1); diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 858635a9..09b0b223 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -4,11 +4,14 @@ use std::collections::HashMap; use std::sync::Arc; use ::uuid::Uuid; +use aws_config::meta::region::RegionProviderChain; +use aws_sdk_sqs::types::Message; use constants::*; use da_client_interface::MockDaClient; use mongodb::Client; use prover_client_interface::MockProverClient; use rstest::*; +use serde::Deserialize; use settlement_client_interface::MockSettlementClient; use starknet::providers::jsonrpc::HttpTransport; use starknet::providers::JsonRpcClient; @@ -22,6 +25,7 @@ use crate::database::{DatabaseConfig, MockDatabase}; use crate::jobs::types::JobStatus::Created; use crate::jobs::types::JobType::DataSubmission; use crate::jobs::types::{ExternalId, JobItem}; +use crate::queue::job_queue::{JOB_PROCESSING_QUEUE, JOB_VERIFICATION_QUEUE}; use crate::queue::MockQueueProvider; pub async fn init_config( @@ -86,3 +90,63 @@ pub async fn drop_database() -> color_eyre::Result<()> { db_client.database("orchestrator").drop(None).await?; Ok(()) } + +// SQS structs & functions +// ============================================================= + +pub async fn create_sqs_queues() -> color_eyre::Result<()> { + let sqs_client = get_sqs_client().await; + + // Dropping sqs queues + let list_queues_output = sqs_client.list_queues().send().await?; + let queue_urls = list_queues_output.queue_urls(); + println!("Found {} queues", queue_urls.len()); + for queue_url in queue_urls { + match sqs_client.delete_queue().queue_url(queue_url).send().await { + Ok(_) => println!("Successfully deleted queue: {}", queue_url), + Err(e) => eprintln!("Error deleting queue {}: {:?}", queue_url, e), + } + } + + // Creating SQS queues + sqs_client.create_queue().queue_name(JOB_PROCESSING_QUEUE).send().await?; + sqs_client.create_queue().queue_name(JOB_VERIFICATION_QUEUE).send().await?; + Ok(()) +} + +pub async fn list_messages_in_queue(queue_name: String) -> color_eyre::Result> { + let sqs_client = get_sqs_client().await; + let mut all_messages = Vec::new(); + let mut continue_receiving = true; + + while continue_receiving { + let receive_message_output = sqs_client + .receive_message() + .queue_url(format!("http://sqs.ap-south-1.localhost.localstack.cloud:4566/000000000000/{}", queue_name)) + .max_number_of_messages(10) // SQS allows receiving up to 10 messages at a time + .visibility_timeout(30) // Hide received messages for 30 seconds + .wait_time_seconds(20) // Enable long polling, wait up to 20 seconds for messages + .send() + .await?; + + let messages = receive_message_output.messages(); + if messages.is_empty() { + continue_receiving = false; + } else { + all_messages.extend(messages.iter().cloned()); + } + } + + Ok(all_messages) +} + +async fn get_sqs_client() -> aws_sdk_sqs::Client { + let region_provider = RegionProviderChain::default_provider().or_else("ap-south-1"); + let config = aws_config::from_env().region(region_provider).load().await; + aws_sdk_sqs::Client::new(&config) +} + +#[derive(Deserialize)] +pub struct MessagePayloadType { + pub(crate) id: Uuid, +} diff --git a/crates/orchestrator/src/tests/config.rs b/crates/orchestrator/src/tests/config.rs index a586a07d..8a787d81 100644 --- a/crates/orchestrator/src/tests/config.rs +++ b/crates/orchestrator/src/tests/config.rs @@ -65,6 +65,11 @@ impl TestConfigBuilder { self } + pub fn mock_db_client(mut self, db_client: Box) -> TestConfigBuilder { + self.database = Some(db_client); + self + } + pub async fn build(mut self) -> MockServer { dotenvy::from_filename("../.env.test").expect("Failed to load the .env file"); diff --git a/crates/orchestrator/src/tests/jobs/mod.rs b/crates/orchestrator/src/tests/jobs/mod.rs index c092cdc5..aa54f165 100644 --- a/crates/orchestrator/src/tests/jobs/mod.rs +++ b/crates/orchestrator/src/tests/jobs/mod.rs @@ -25,178 +25,515 @@ async fn create_job_fails_works_new_job() { mod job_handler_tests { use std::collections::HashMap; use std::sync::Arc; + use std::time::Duration; use mockall::predicate::eq; + use mongodb::bson::doc; use rstest::rstest; + use tokio::time::sleep; use uuid::Uuid; use crate::config::config; + use crate::database::mongodb::config::MongoDbConfig; + use crate::database::mongodb::MongoDb; + use crate::database::{Database, DatabaseConfig, MockDatabase}; use crate::jobs::constants::{JOB_PROCESS_ATTEMPT_METADATA_KEY, JOB_VERIFICATION_ATTEMPT_METADATA_KEY}; use crate::jobs::job_handler_factory::mock_factory; use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType, JobVerificationStatus}; - use crate::jobs::{create_job, process_job, verify_job, Job, MockJob}; - use crate::tests::common::drop_database; + use crate::jobs::{create_job, increment_key_in_metadata, process_job, verify_job, Job, MockJob}; + use crate::queue::job_queue::{JOB_PROCESSING_QUEUE, JOB_VERIFICATION_QUEUE}; + use crate::tests::common::{create_sqs_queues, drop_database, list_messages_in_queue, MessagePayloadType}; use crate::tests::config::TestConfigBuilder; + /// Tests `create_job` function when job is not existing in the db. #[rstest] - #[case(JobType::SnosRun, false, true)] - #[case(JobType::ProofCreation, true, true)] - #[should_panic] - #[case(JobType::ProofRegistration, false, false)] #[tokio::test] - async fn test_create_job_handler( - #[case] job_type: JobType, - #[case] job_exists_in_db: bool, - #[case] job_implemented: bool, - ) { - let job_item = JobItem { - id: Uuid::new_v4(), - internal_id: "0".to_string(), - job_type: job_type.clone(), - status: JobStatus::Created, - external_id: ExternalId::Number(0), - metadata: Default::default(), - version: 0, - }; - + async fn test_create_job_handler_job_does_not_exists_in_db() { + let job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Created, "0".to_string()); let mut job_handler = MockJob::new(); - if job_implemented && !job_exists_in_db { - // Expecting for create_job handler for that particular job. - let job_item_clone = job_item.clone(); - job_handler.expect_create_job().times(1).returning(move |_, _, _| Ok(job_item_clone.clone())); - } + + // Adding expectation for creation of new job. + let job_item_clone = job_item.clone(); + job_handler.expect_create_job().times(1).returning(move |_, _, _| Ok(job_item_clone.clone())); TestConfigBuilder::new().build().await; drop_database().await.unwrap(); + create_sqs_queues().await.unwrap(); + + // Mocking the `get_job_handler` call in create_job function. + let y: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + ctx.expect().times(1).with(eq(JobType::SnosRun)).return_once(move |_| Arc::clone(&y)); + + let _ = create_job(JobType::SnosRun, "0".to_string(), HashMap::new()).await.is_ok(); + + let messages_in_queue = list_messages_in_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); + assert_eq!(messages_in_queue.len(), 1); + let message_0_body: MessagePayloadType = + serde_json::from_str(&messages_in_queue[0].clone().body.unwrap()).unwrap(); + assert_eq!(message_0_body.id, job_item.id); + } + + /// Tests `create_job` function when job is already existing in the db. + #[rstest] + #[tokio::test] + async fn test_create_job_handler_job_exists_in_db() { + let job_item = build_job_item_by_type_and_status(JobType::ProofCreation, JobStatus::Created, "0".to_string()); + + TestConfigBuilder::new().build().await; + drop_database().await.unwrap(); + create_sqs_queues().await.unwrap(); let config = config().await; let database_client = config.database(); + database_client.create_job(job_item).await.unwrap(); - if job_exists_in_db { - database_client.create_job(job_item).await.unwrap(); - } + let _ = create_job(JobType::ProofCreation, "0".to_string(), HashMap::new()).await.is_err(); - if job_implemented && !job_exists_in_db { - let y: Arc> = Arc::new(Box::new(job_handler)); - let ctx = mock_factory::get_job_handler_context(); - // Mocking the `get_job_handler` call in create_job function. - ctx.expect().times(1).with(eq(job_type.clone())).returning(move |_| Arc::clone(&y)); - let _ = create_job(job_type.clone(), "0".to_string(), HashMap::new()).await.is_ok(); - } else { - let _ = create_job(job_type, "0".to_string(), HashMap::new()).await.is_err(); - } + let messages_in_queue = list_messages_in_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); + assert_eq!(messages_in_queue.len(), 0); } + /// Tests `create_job` function when job handler is not implemented in the `get_job_handler` + /// This test should fail as job handler is not implemented in the `factory.rs` #[rstest] - #[case(JobType::SnosRun, JobStatus::Created, true)] - #[case(JobType::DataSubmission, JobStatus::VerificationFailed("".to_string()), true)] - #[case(JobType::SnosRun, JobStatus::VerificationFailed("".to_string()), false)] + #[should_panic(expected = "Job type not implemented yet.")] #[tokio::test] - async fn test_process_job_handler( + async fn test_create_job_handler_job_handler_is_not_implemented() { + TestConfigBuilder::new().build().await; + drop_database().await.unwrap(); + create_sqs_queues().await.unwrap(); + + // Mocking the `get_job_handler` call in create_job function. + let ctx = mock_factory::get_job_handler_context(); + ctx.expect().times(1).with(eq(JobType::ProofCreation)).returning(|_| panic!("Job type not implemented yet.")); + + let _ = create_job(JobType::ProofCreation, "0".to_string(), HashMap::new()).await.is_err(); + + let messages_in_queue = list_messages_in_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); + assert_eq!(messages_in_queue.len(), 0); + } + + /// Tests `process_job` function when job is already existing in the db and job status is either + /// `Created` or `VerificationFailed`. + #[rstest] + #[case(JobType::SnosRun, JobStatus::Created)] + #[case(JobType::DataSubmission, JobStatus::VerificationFailed("".to_string()))] + #[tokio::test] + async fn test_process_job_handler_job_exists_in_db_and_valid_job_processing_status( #[case] job_type: JobType, #[case] job_status: JobStatus, - #[case] job_exists_in_db: bool, ) { - let job_item = get_random_job_item_by_type_and_status(job_type.clone(), job_status.clone(), "1".to_string()); + let job_item = build_job_item_by_type_and_status(job_type.clone(), job_status.clone(), "1".to_string()); + + // Building config + TestConfigBuilder::new().build().await; + drop_database().await.unwrap(); + create_sqs_queues().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + + let mut job_handler = MockJob::new(); + + // Creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + // Expecting process job function in job processor to return the external ID. + job_handler.expect_process_job().times(1).returning(move |_, _| Ok("0xbeef".to_string())); + job_handler.expect_verification_polling_delay_seconds().return_const(1u64); + + // Mocking the `get_job_handler` call in create_job function. + let y: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + ctx.expect().times(1).with(eq(job_type.clone())).returning(move |_| Arc::clone(&y)); + + let _ = process_job(job_item.id).await.is_ok(); + // Getting the updated job. + let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + // checking if job_status is updated in db + assert_eq!(updated_job.status, JobStatus::PendingVerification); + + // Queue checks + let messages_in_queue = list_messages_in_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); + assert_eq!(messages_in_queue.len(), 1); + let message_0_body: MessagePayloadType = + serde_json::from_str(&messages_in_queue[0].clone().body.unwrap()).unwrap(); + assert_eq!(message_0_body.id, job_item.id); + } + + /// Tests `process_job` function when job is already existing in the db and job status is not + /// `Created` or `VerificationFailed`. + #[rstest] + #[tokio::test] + async fn test_process_job_handler_job_exists_in_db_and_invalid_job_processing_status() { + // Creating a job with Completed status which is invalid processing. + let job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Completed, "1".to_string()); // building config TestConfigBuilder::new().build().await; drop_database().await.unwrap(); + create_sqs_queues().await.unwrap(); let config = config().await; let database_client = config.database(); + + // creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + + let _ = process_job(job_item.id).await.is_err(); + + let job_in_db = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + // Job should be untouched in db. + assert_eq!(job_in_db.status, JobStatus::Completed); + + let messages_in_queue = list_messages_in_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); + assert_eq!(messages_in_queue.len(), 0); + } + + /// Tests `process_job` function when job is not in the db + /// This test should fail + #[rstest] + #[tokio::test] + async fn test_process_job_handler_job_does_not_exists_in_db() { + // Creating a valid job which is not existing in the db. + let job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Created, "1".to_string()); + + // building config + TestConfigBuilder::new().build().await; + drop_database().await.unwrap(); + create_sqs_queues().await.unwrap(); + + let _ = process_job(job_item.id).await.is_err(); + let messages_in_queue = list_messages_in_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); + assert_eq!(messages_in_queue.len(), 0); + } + + /// Tests `process_job` function when 2 workers try to process the same job. + /// This test should fail because once the job is locked for processing on one + /// worker it should not be accessed by another worker and should throw an error + /// when updating the job status. + #[rstest] + #[tokio::test] + async fn test_process_job_two_workers_process_same_job() { + // Loading .env.test to get the db client + dotenvy::from_filename("../.env.test").expect("Failed to load the .env file"); + + drop_database().await.unwrap(); + + // Creating a valid job which is not existing in the db. + let mut job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Created, "1".to_string()); + let job_item_cloned = job_item.clone(); + let mut job_handler = MockJob::new(); - if job_exists_in_db { - // creating job in database - database_client.create_job(job_item.clone()).await.unwrap(); - // expecting process job function in job processor to return the external ID - job_handler.expect_process_job().times(1).returning(move |_, _| Ok("0xbeef".to_string())); - job_handler.expect_verification_polling_delay_seconds().return_const(1u64); - } + // Expecting process job function in job processor to return the external ID. + job_handler.expect_process_job().times(2).returning(move |_, _| Ok("0xbeef".to_string())); + job_handler.expect_verification_polling_delay_seconds().return_const(1u64); - if job_exists_in_db && is_valid_job_processing_status(job_status) { - let y: Arc> = Arc::new(Box::new(job_handler)); - let ctx = mock_factory::get_job_handler_context(); - // Mocking the `get_job_handler` call in create_job function. - ctx.expect().times(1).with(eq(job_type.clone())).returning(move |_| Arc::clone(&y)); - - let _ = process_job(job_item.id).await.is_ok(); - // getting the job - let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); - // checking if job_status is updated in db - assert_eq!(updated_job.status, JobStatus::PendingVerification); - } else { - let _ = process_job(job_item.id).await.is_err(); - } + // Creating a new mongo db client for actual database. + let db_client: Box = Box::new(get_mongo_db_client().await); + // Creating the job in actual db instance + db_client.create_job(job_item.clone()).await.unwrap(); + let job_in_db = db_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + + println!("job_in_db : {:?}", job_in_db); + + // Spinning up a new mock database. + let mut database = MockDatabase::new(); + + let metadata = + crate::jobs::increment_key_in_metadata(&job_item.metadata, JOB_PROCESS_ATTEMPT_METADATA_KEY).unwrap(); + job_item.external_id = "0xbeef".to_string().into(); + job_item.status = JobStatus::PendingVerification; + job_item.metadata = metadata; + + let job_in_db_clone = job_in_db.clone(); + + // Adding expectations for mock database. + database + .expect_get_job_by_id() + .times(1) + .with(eq(job_in_db_clone.id)) + .return_once(move |_| Ok(Some(job_in_db.clone()))); + database + .expect_update_job_status() + .with(eq(job_in_db_clone.clone()), eq(JobStatus::LockedForProcessing)) + .times(1) + .returning(|_, _| Ok(())); + database.expect_update_job().with(eq(job_item.clone())).times(1).returning(|_| Ok(())); + + // Mocking the `get_job_handler` call in create_job function. + let y: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + ctx.expect().times(2).with(eq(JobType::SnosRun)).returning(move |_| Arc::clone(&y)); + + // building config + TestConfigBuilder::new().mock_db_client(Box::new(database)).build().await; + + let _ = process_job(job_item.id).await.is_ok(); + // Updating the job in db. + db_client.update_job_status(&job_item, JobStatus::LockedForProcessing).await.unwrap(); + + // Worker 2 Attempt + // =================================================================== + + // Spinning up a new mock database. + let mut database_2 = MockDatabase::new(); + let mut job_item_clone_2 = job_item_cloned.clone(); + + let metadata = + crate::jobs::increment_key_in_metadata(&job_item_clone_2.metadata, JOB_PROCESS_ATTEMPT_METADATA_KEY) + .unwrap(); + job_item_clone_2.external_id = "0xbeef".to_string().into(); + job_item_clone_2.status = JobStatus::PendingVerification; + job_item_clone_2.metadata = metadata; + + // Adding expectations for mock database. + database_2 + .expect_get_job_by_id() + .times(1) + .with(eq(job_in_db_clone.id)) + .returning(move |_| Ok(Some(job_item_cloned.clone()))); + database_2 + .expect_update_job_status() + .with(eq(job_in_db_clone.clone()), eq(JobStatus::LockedForProcessing)) + .times(1) + .returning(|_, _| Ok(())); + database_2.expect_update_job().with(eq(job_item_clone_2.clone())).times(1).returning(|_| Ok(())); + sleep(Duration::from_secs(2)).await; + + // Making new config with database 2 mock + TestConfigBuilder::new().mock_db_client(Box::new(database_2)).build().await; + + let _ = process_job(job_item_clone_2.id).await.is_ok(); + + // This should fail as there would be conflicting versions. + let _ = db_client.update_job_status(&job_item, JobStatus::LockedForProcessing).await.is_err(); } + /// Tests `verify_job` function when job is having expected status + /// and returns a `Verified` verification status. #[rstest] - #[case(JobType::DataSubmission, JobStatus::PendingVerification, JobVerificationStatus::Verified, true)] - #[case(JobType::DataSubmission, JobStatus::PendingVerification, JobVerificationStatus::Rejected("".to_string()), true)] - #[case(JobType::DataSubmission, JobStatus::PendingVerification, JobVerificationStatus::Pending, true)] - #[case(JobType::SnosRun, JobStatus::Created, JobVerificationStatus::Rejected("".to_string()), false)] #[tokio::test] - async fn test_verify_job_handler( - #[case] job_type: JobType, - #[case] job_status: JobStatus, - #[case] verification_status: JobVerificationStatus, - #[case] job_exists_in_db: bool, - ) { - let job_item = get_random_job_item_by_type_and_status(job_type.clone(), job_status.clone(), "1".to_string()); - let expected_verification_status = verification_status.clone(); + async fn test_verify_job_handler_with_expected_job_status_and_verified_status_return() { + let job_item = + build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); + // building config TestConfigBuilder::new().build().await; drop_database().await.unwrap(); + create_sqs_queues().await.unwrap(); let config = config().await; let database_client = config.database(); let mut job_handler = MockJob::new(); - if job_exists_in_db { - // creating job in database - database_client.create_job(job_item.clone()).await.unwrap(); - // expecting process job function in job processor to return the external ID - job_handler.expect_verify_job().times(1).returning(move |_, _| Ok(verification_status.clone())); - job_handler.expect_max_process_attempts().returning(move || 2u64); - job_handler.expect_max_verification_attempts().returning(move || 2u64); - job_handler.expect_verification_polling_delay_seconds().returning(move || 2u64); - } + // creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + // expecting process job function in job processor to return the external ID + job_handler.expect_verify_job().times(1).returning(move |_, _| Ok(JobVerificationStatus::Verified)); + job_handler.expect_max_process_attempts().returning(move || 2u64); - if job_exists_in_db && is_valid_job_verification_status(job_status) { - let y: Arc> = Arc::new(Box::new(job_handler)); - let ctx = mock_factory::get_job_handler_context(); - // Mocking the `get_job_handler` call in create_job function. - ctx.expect().times(1).with(eq(job_type.clone())).returning(move |_| Arc::clone(&y)); - - let _ = verify_job(job_item.id).await.is_ok(); - - let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); - - if expected_verification_status == JobVerificationStatus::Verified { - assert_eq!(updated_job.status, JobStatus::Completed); - } else if expected_verification_status == JobVerificationStatus::Rejected("".to_string()) { - assert_eq!(updated_job.status, JobStatus::VerificationFailed("".to_string())); - } else if expected_verification_status == JobVerificationStatus::Pending { - assert_eq!(updated_job.metadata.get(JOB_VERIFICATION_ATTEMPT_METADATA_KEY).unwrap(), "1"); - } - } else { - let _ = verify_job(job_item.id).await.is_err(); - } + let y: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&y)); + + let _ = verify_job(job_item.id).await.is_ok(); + + // DB checks. + let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + assert_eq!(updated_job.status, JobStatus::Completed); + + // Queue checks. + let messages_in_process_queue = list_messages_in_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); + let messages_in_verification_queue = list_messages_in_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); + assert_eq!(messages_in_process_queue.len(), 0); + assert_eq!(messages_in_verification_queue.len(), 0); } - fn is_valid_job_processing_status(job_status: JobStatus) -> bool { - matches!(job_status, JobStatus::Created | JobStatus::VerificationFailed(_)) + /// Tests `verify_job` function when job is having expected status + /// and returns a `Rejected` verification status. + #[rstest] + #[tokio::test] + async fn test_verify_job_handler_with_expected_job_status_and_rejected_status_return_and_adds_process_to_job_queue() + { + let job_item = + build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); + + // building config + TestConfigBuilder::new().build().await; + drop_database().await.unwrap(); + create_sqs_queues().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + let mut job_handler = MockJob::new(); + + // creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + // expecting process job function in job processor to return the external ID + job_handler + .expect_verify_job() + .times(1) + .returning(move |_, _| Ok(JobVerificationStatus::Rejected("".to_string()))); + job_handler.expect_max_process_attempts().returning(move || 2u64); + + let y: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&y)); + + let _ = verify_job(job_item.id).await.is_ok(); + + // DB checks. + let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + assert_eq!(updated_job.status, JobStatus::VerificationFailed("".to_string())); + + // Queue checks. + let messages_in_queue = list_messages_in_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); + assert_eq!(messages_in_queue.len(), 1); + let message_0_body: MessagePayloadType = + serde_json::from_str(&messages_in_queue[0].clone().body.unwrap()).unwrap(); + assert_eq!(message_0_body.id, job_item.id); } - fn is_valid_job_verification_status(job_status: JobStatus) -> bool { - matches!(job_status, JobStatus::PendingVerification) + /// Tests `verify_job` function when job is having expected status + /// and returns a `Rejected` verification status but doesn't add + /// the job to process queue because of maximum attempts reached. + #[rstest] + #[tokio::test] + async fn test_verify_job_handler_with_expected_job_status_and_rejected_status_return() { + let mut job_item = + build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); + + // increasing JOB_VERIFICATION_ATTEMPT_METADATA_KEY to simulate max. attempts reached. + let metadata = increment_key_in_metadata(&job_item.metadata, JOB_PROCESS_ATTEMPT_METADATA_KEY).unwrap(); + job_item.metadata = metadata; + + // building config + TestConfigBuilder::new().build().await; + drop_database().await.unwrap(); + create_sqs_queues().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + let mut job_handler = MockJob::new(); + + // creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + // expecting process job function in job processor to return the external ID + job_handler + .expect_verify_job() + .times(1) + .returning(move |_, _| Ok(JobVerificationStatus::Rejected("".to_string()))); + job_handler.expect_max_process_attempts().returning(move || 1u64); + + let y: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&y)); + + let _ = verify_job(job_item.id).await.is_ok(); + + // DB checks. + let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + assert_eq!(updated_job.status, JobStatus::VerificationFailed("".to_string())); + + // Queue checks. + let messages_in_queue = list_messages_in_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); + assert_eq!(messages_in_queue.len(), 0); + } + + /// Tests `verify_job` function when job is having expected status + /// and returns a `Pending` verification status. + #[rstest] + #[tokio::test] + async fn test_verify_job_handler_with_expected_job_status_and_pending_status_return_and_adds_job_to_verification_queue( + ) { + let job_item = + build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); + + // building config + TestConfigBuilder::new().build().await; + drop_database().await.unwrap(); + create_sqs_queues().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + let mut job_handler = MockJob::new(); + + // creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + // expecting process job function in job processor to return the external ID + job_handler.expect_verify_job().times(1).returning(move |_, _| Ok(JobVerificationStatus::Pending)); + job_handler.expect_max_verification_attempts().returning(move || 2u64); + job_handler.expect_verification_polling_delay_seconds().returning(move || 2u64); + + let y: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&y)); + + let _ = verify_job(job_item.id).await.is_ok(); + + // DB checks. + let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + assert_eq!(updated_job.metadata.get(JOB_VERIFICATION_ATTEMPT_METADATA_KEY).unwrap(), "1"); + + // Queue checks. + let messages_in_queue = list_messages_in_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); + assert_eq!(messages_in_queue.len(), 1); + let message_0_body: MessagePayloadType = + serde_json::from_str(&messages_in_queue[0].clone().body.unwrap()).unwrap(); + assert_eq!(message_0_body.id, job_item.id); } - fn get_random_job_item_by_type_and_status( - job_type: JobType, - job_status: JobStatus, - internal_id: String, - ) -> JobItem { + /// Tests `verify_job` function when job is having expected status + /// and returns a `Pending` verification status but doesn't add + /// the job to process queue because of maximum attempts reached. + #[rstest] + #[tokio::test] + async fn test_verify_job_handler_with_expected_job_status_and_pending_status_return() { + let mut job_item = + build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); + + // increasing JOB_VERIFICATION_ATTEMPT_METADATA_KEY to simulate max. attempts reached. + let metadata = increment_key_in_metadata(&job_item.metadata, JOB_VERIFICATION_ATTEMPT_METADATA_KEY).unwrap(); + job_item.metadata = metadata; + + // building config + TestConfigBuilder::new().build().await; + drop_database().await.unwrap(); + create_sqs_queues().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + let mut job_handler = MockJob::new(); + + // creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + // expecting process job function in job processor to return the external ID + job_handler.expect_verify_job().times(1).returning(move |_, _| Ok(JobVerificationStatus::Pending)); + job_handler.expect_max_verification_attempts().returning(move || 1u64); + job_handler.expect_verification_polling_delay_seconds().returning(move || 2u64); + + let y: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&y)); + + let _ = verify_job(job_item.id).await.is_ok(); + + // DB checks. + let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + assert_eq!(updated_job.status, JobStatus::VerificationTimeout); + + // Queue checks. + let messages_in_queue = list_messages_in_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); + assert_eq!(messages_in_queue.len(), 0); + } + + fn build_job_item_by_type_and_status(job_type: JobType, job_status: JobStatus, internal_id: String) -> JobItem { let mut hashmap: HashMap = HashMap::new(); hashmap.insert(JOB_PROCESS_ATTEMPT_METADATA_KEY.to_string(), "0".to_string()); hashmap.insert(JOB_VERIFICATION_ATTEMPT_METADATA_KEY.to_string(), "0".to_string()); @@ -210,4 +547,8 @@ mod job_handler_tests { version: 0, } } + + async fn get_mongo_db_client() -> MongoDb { + MongoDb::new(MongoDbConfig::new_from_env()).await + } } diff --git a/crates/settlement-clients/ethereum/src/lib.rs b/crates/settlement-clients/ethereum/src/lib.rs index 5ffc2254..ff8075b7 100644 --- a/crates/settlement-clients/ethereum/src/lib.rs +++ b/crates/settlement-clients/ethereum/src/lib.rs @@ -96,7 +96,11 @@ impl EthereumSettlementClient { &KZG_SETTINGS, )?; - if !eval { Err(eyre!("ERROR : Assertion failed, not able to verify the proof.")) } else { Ok(kzg_proof) } + if !eval { + Err(eyre!("ERROR : Assertion failed, not able to verify the proof.")) + } else { + Ok(kzg_proof) + } } } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 0c9b1997..77f06cdb 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "nightly" +channel = "stable" components = ["clippy", "rustfmt"] targets = ["wasm32-unknown-unknown"] From 6ecef6005ec67927e807d4e987d276522717aacd Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 2 Aug 2024 16:26:24 +0530 Subject: [PATCH 23/44] feat : updated tests and resolved comments --- CHANGELOG.md | 3 + .../src/jobs/job_handler_factory.rs | 4 +- crates/orchestrator/src/tests/common/mod.rs | 34 +- crates/orchestrator/src/tests/config.rs | 6 + crates/orchestrator/src/tests/jobs/mod.rs | 318 +++++++----------- 5 files changed, 137 insertions(+), 228 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d7b56ebe..9a913a51 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). ## Changed +- GitHub's coverage CI yml file for localstack and db testing. +- Orchestrator :Moved TestConfigBuilder to `config.rs` in tests folder. + ## Removed - `fetch_from_test` argument diff --git a/crates/orchestrator/src/jobs/job_handler_factory.rs b/crates/orchestrator/src/jobs/job_handler_factory.rs index a61a13d8..5a4daef3 100644 --- a/crates/orchestrator/src/jobs/job_handler_factory.rs +++ b/crates/orchestrator/src/jobs/job_handler_factory.rs @@ -48,8 +48,8 @@ pub mod factory { /// to the same Box. So when we are mocking the behaviour : /// /// - We create the MockJob - /// - /// - And clone that to returning in the mock get_job_handler. + /// - We return this mocked job whenever a function calls `get_job_handler` + /// - Making it an Arc allows us to return the same MockJob in multiple calls to `get_job_handler`. This is needed because `MockJob` doesn't implement Clone pub async fn get_job_handler(job_type: &JobType) -> Arc> { // Original implementation let job: Box = match job_type { diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 09b0b223..f7a6e2e2 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -5,7 +5,6 @@ use std::sync::Arc; use ::uuid::Uuid; use aws_config::meta::region::RegionProviderChain; -use aws_sdk_sqs::types::Message; use constants::*; use da_client_interface::MockDaClient; use mongodb::Client; @@ -92,7 +91,6 @@ pub async fn drop_database() -> color_eyre::Result<()> { } // SQS structs & functions -// ============================================================= pub async fn create_sqs_queues() -> color_eyre::Result<()> { let sqs_client = get_sqs_client().await; @@ -100,10 +98,10 @@ pub async fn create_sqs_queues() -> color_eyre::Result<()> { // Dropping sqs queues let list_queues_output = sqs_client.list_queues().send().await?; let queue_urls = list_queues_output.queue_urls(); - println!("Found {} queues", queue_urls.len()); + log::debug!("Found {} queues", queue_urls.len()); for queue_url in queue_urls { match sqs_client.delete_queue().queue_url(queue_url).send().await { - Ok(_) => println!("Successfully deleted queue: {}", queue_url), + Ok(_) => log::debug!("Successfully deleted queue: {}", queue_url), Err(e) => eprintln!("Error deleting queue {}: {:?}", queue_url, e), } } @@ -114,39 +112,13 @@ pub async fn create_sqs_queues() -> color_eyre::Result<()> { Ok(()) } -pub async fn list_messages_in_queue(queue_name: String) -> color_eyre::Result> { - let sqs_client = get_sqs_client().await; - let mut all_messages = Vec::new(); - let mut continue_receiving = true; - - while continue_receiving { - let receive_message_output = sqs_client - .receive_message() - .queue_url(format!("http://sqs.ap-south-1.localhost.localstack.cloud:4566/000000000000/{}", queue_name)) - .max_number_of_messages(10) // SQS allows receiving up to 10 messages at a time - .visibility_timeout(30) // Hide received messages for 30 seconds - .wait_time_seconds(20) // Enable long polling, wait up to 20 seconds for messages - .send() - .await?; - - let messages = receive_message_output.messages(); - if messages.is_empty() { - continue_receiving = false; - } else { - all_messages.extend(messages.iter().cloned()); - } - } - - Ok(all_messages) -} - async fn get_sqs_client() -> aws_sdk_sqs::Client { let region_provider = RegionProviderChain::default_provider().or_else("ap-south-1"); let config = aws_config::from_env().region(region_provider).load().await; aws_sdk_sqs::Client::new(&config) } -#[derive(Deserialize)] +#[derive(Deserialize, Debug)] pub struct MessagePayloadType { pub(crate) id: Uuid, } diff --git a/crates/orchestrator/src/tests/config.rs b/crates/orchestrator/src/tests/config.rs index 8a787d81..d4831a6c 100644 --- a/crates/orchestrator/src/tests/config.rs +++ b/crates/orchestrator/src/tests/config.rs @@ -18,6 +18,7 @@ use crate::database::mongodb::MongoDb; use crate::database::{Database, DatabaseConfig}; use crate::queue::sqs::SqsQueue; use crate::queue::QueueProvider; +use crate::tests::common::{create_sqs_queues, drop_database}; // Inspiration : https://rust-unofficial.github.io/patterns/patterns/creational/builder.html // TestConfigBuilder allows to heavily customise the global configs based on the test's requirement. // Eg: We want to mock only the da client and leave rest to be as it is, use mock_da_client. @@ -125,6 +126,11 @@ impl TestConfigBuilder { } } + // Deleting and Creating the queues in sqs. + create_sqs_queues().await.expect("Not able to delete and create the queues."); + // Deleting the database + drop_database().await.expect("Unable to drop the database."); + // return config and server as tuple let config = Config::new( self.starknet_client.unwrap(), diff --git a/crates/orchestrator/src/tests/jobs/mod.rs b/crates/orchestrator/src/tests/jobs/mod.rs index aa54f165..fb07f7dc 100644 --- a/crates/orchestrator/src/tests/jobs/mod.rs +++ b/crates/orchestrator/src/tests/jobs/mod.rs @@ -1,5 +1,3 @@ -use rstest::rstest; - #[cfg(test)] pub mod da_job; @@ -9,19 +7,6 @@ pub mod proving_job; #[cfg(test)] pub mod state_update_job; -#[rstest] -#[tokio::test] -async fn create_job_fails_job_already_exists() { - // TODO -} - -#[rstest] -#[tokio::test] -async fn create_job_fails_works_new_job() { - // TODO -} - -#[cfg(test)] mod job_handler_tests { use std::collections::HashMap; use std::sync::Arc; @@ -29,26 +14,24 @@ mod job_handler_tests { use mockall::predicate::eq; use mongodb::bson::doc; + use omniqueue::QueueError; use rstest::rstest; use tokio::time::sleep; use uuid::Uuid; use crate::config::config; - use crate::database::mongodb::config::MongoDbConfig; - use crate::database::mongodb::MongoDb; - use crate::database::{Database, DatabaseConfig, MockDatabase}; use crate::jobs::constants::{JOB_PROCESS_ATTEMPT_METADATA_KEY, JOB_VERIFICATION_ATTEMPT_METADATA_KEY}; use crate::jobs::job_handler_factory::mock_factory; use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType, JobVerificationStatus}; use crate::jobs::{create_job, increment_key_in_metadata, process_job, verify_job, Job, MockJob}; use crate::queue::job_queue::{JOB_PROCESSING_QUEUE, JOB_VERIFICATION_QUEUE}; - use crate::tests::common::{create_sqs_queues, drop_database, list_messages_in_queue, MessagePayloadType}; + use crate::tests::common::MessagePayloadType; use crate::tests::config::TestConfigBuilder; /// Tests `create_job` function when job is not existing in the db. #[rstest] #[tokio::test] - async fn test_create_job_handler_job_does_not_exists_in_db() { + async fn create_job_handler_job_does_not_exists_in_db() { let job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Created, "0".to_string()); let mut job_handler = MockJob::new(); @@ -57,32 +40,36 @@ mod job_handler_tests { job_handler.expect_create_job().times(1).returning(move |_, _, _| Ok(job_item_clone.clone())); TestConfigBuilder::new().build().await; - drop_database().await.unwrap(); - create_sqs_queues().await.unwrap(); + let config = config().await; // Mocking the `get_job_handler` call in create_job function. - let y: Arc> = Arc::new(Box::new(job_handler)); + let job_handler: Arc> = Arc::new(Box::new(job_handler)); let ctx = mock_factory::get_job_handler_context(); - ctx.expect().times(1).with(eq(JobType::SnosRun)).return_once(move |_| Arc::clone(&y)); + ctx.expect().times(1).with(eq(JobType::SnosRun)).return_once(move |_| Arc::clone(&job_handler)); let _ = create_job(JobType::SnosRun, "0".to_string(), HashMap::new()).await.is_ok(); - let messages_in_queue = list_messages_in_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); - assert_eq!(messages_in_queue.len(), 1); - let message_0_body: MessagePayloadType = - serde_json::from_str(&messages_in_queue[0].clone().body.unwrap()).unwrap(); - assert_eq!(message_0_body.id, job_item.id); + // Db checks. + let job_in_db = config.database().get_job_by_id(job_item.id).await.unwrap().unwrap(); + assert_eq!(job_in_db.id, job_item.id); + + // Waiting for 5 secs for message to be passed into the queue + sleep(Duration::from_secs(5)).await; + + // Queue checks. + let consumed_messages = + config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); + let consumed_message_payload: MessagePayloadType = consumed_messages.payload_serde_json().unwrap().unwrap(); + assert_eq!(consumed_message_payload.id, job_item.id); } /// Tests `create_job` function when job is already existing in the db. #[rstest] #[tokio::test] - async fn test_create_job_handler_job_exists_in_db() { + async fn create_job_handler_job_exists_in_db() { let job_item = build_job_item_by_type_and_status(JobType::ProofCreation, JobStatus::Created, "0".to_string()); TestConfigBuilder::new().build().await; - drop_database().await.unwrap(); - create_sqs_queues().await.unwrap(); let config = config().await; let database_client = config.database(); @@ -90,8 +77,10 @@ mod job_handler_tests { let _ = create_job(JobType::ProofCreation, "0".to_string(), HashMap::new()).await.is_err(); - let messages_in_queue = list_messages_in_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); - assert_eq!(messages_in_queue.len(), 0); + // Queue checks. + let consumed_messages = + config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap_err(); + assert!(matches!(consumed_messages, QueueError::NoData)); } /// Tests `create_job` function when job handler is not implemented in the `get_job_handler` @@ -99,19 +88,20 @@ mod job_handler_tests { #[rstest] #[should_panic(expected = "Job type not implemented yet.")] #[tokio::test] - async fn test_create_job_handler_job_handler_is_not_implemented() { + async fn create_job_handler_job_handler_is_not_implemented() { TestConfigBuilder::new().build().await; - drop_database().await.unwrap(); - create_sqs_queues().await.unwrap(); + let config = config().await; // Mocking the `get_job_handler` call in create_job function. let ctx = mock_factory::get_job_handler_context(); - ctx.expect().times(1).with(eq(JobType::ProofCreation)).returning(|_| panic!("Job type not implemented yet.")); + ctx.expect().times(1).returning(|_| panic!("Job type not implemented yet.")); let _ = create_job(JobType::ProofCreation, "0".to_string(), HashMap::new()).await.is_err(); - let messages_in_queue = list_messages_in_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); - assert_eq!(messages_in_queue.len(), 0); + // Queue checks. + let consumed_messages = + config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap_err(); + assert!(matches!(consumed_messages, QueueError::NoData)); } /// Tests `process_job` function when job is already existing in the db and job status is either @@ -120,7 +110,7 @@ mod job_handler_tests { #[case(JobType::SnosRun, JobStatus::Created)] #[case(JobType::DataSubmission, JobStatus::VerificationFailed("".to_string()))] #[tokio::test] - async fn test_process_job_handler_job_exists_in_db_and_valid_job_processing_status( + async fn process_job_handler_job_exists_in_db_and_valid_job_processing_status( #[case] job_type: JobType, #[case] job_status: JobStatus, ) { @@ -128,9 +118,6 @@ mod job_handler_tests { // Building config TestConfigBuilder::new().build().await; - drop_database().await.unwrap(); - create_sqs_queues().await.unwrap(); - let config = config().await; let database_client = config.database(); @@ -143,9 +130,9 @@ mod job_handler_tests { job_handler.expect_verification_polling_delay_seconds().return_const(1u64); // Mocking the `get_job_handler` call in create_job function. - let y: Arc> = Arc::new(Box::new(job_handler)); + let job_handler: Arc> = Arc::new(Box::new(job_handler)); let ctx = mock_factory::get_job_handler_context(); - ctx.expect().times(1).with(eq(job_type.clone())).returning(move |_| Arc::clone(&y)); + ctx.expect().times(1).with(eq(job_type.clone())).returning(move |_| Arc::clone(&job_handler)); let _ = process_job(job_item.id).await.is_ok(); // Getting the updated job. @@ -153,27 +140,27 @@ mod job_handler_tests { // checking if job_status is updated in db assert_eq!(updated_job.status, JobStatus::PendingVerification); + // Waiting for 5 secs for message to be passed into the queue + sleep(Duration::from_secs(5)).await; + // Queue checks - let messages_in_queue = list_messages_in_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); - assert_eq!(messages_in_queue.len(), 1); - let message_0_body: MessagePayloadType = - serde_json::from_str(&messages_in_queue[0].clone().body.unwrap()).unwrap(); - assert_eq!(message_0_body.id, job_item.id); + let consumed_messages = + config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); + let consumed_message_payload: MessagePayloadType = consumed_messages.payload_serde_json().unwrap().unwrap(); + println!("payload: {:?}", consumed_message_payload); + assert_eq!(consumed_message_payload.id, job_item.id); } /// Tests `process_job` function when job is already existing in the db and job status is not /// `Created` or `VerificationFailed`. #[rstest] #[tokio::test] - async fn test_process_job_handler_job_exists_in_db_and_invalid_job_processing_status() { + async fn process_job_handler_job_exists_in_db_and_invalid_job_processing_status() { // Creating a job with Completed status which is invalid processing. let job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Completed, "1".to_string()); // building config TestConfigBuilder::new().build().await; - drop_database().await.unwrap(); - create_sqs_queues().await.unwrap(); - let config = config().await; let database_client = config.database(); @@ -186,26 +173,30 @@ mod job_handler_tests { // Job should be untouched in db. assert_eq!(job_in_db.status, JobStatus::Completed); - let messages_in_queue = list_messages_in_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); - assert_eq!(messages_in_queue.len(), 0); + // Queue checks. + let consumed_messages = + config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap_err(); + assert!(matches!(consumed_messages, QueueError::NoData)); } /// Tests `process_job` function when job is not in the db /// This test should fail #[rstest] #[tokio::test] - async fn test_process_job_handler_job_does_not_exists_in_db() { + async fn process_job_handler_job_does_not_exists_in_db() { // Creating a valid job which is not existing in the db. let job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Created, "1".to_string()); // building config TestConfigBuilder::new().build().await; - drop_database().await.unwrap(); - create_sqs_queues().await.unwrap(); + let config = config().await; let _ = process_job(job_item.id).await.is_err(); - let messages_in_queue = list_messages_in_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); - assert_eq!(messages_in_queue.len(), 0); + + // Queue checks. + let consumed_messages = + config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap_err(); + assert!(matches!(consumed_messages, QueueError::NoData)); } /// Tests `process_job` function when 2 workers try to process the same job. @@ -214,114 +205,57 @@ mod job_handler_tests { /// when updating the job status. #[rstest] #[tokio::test] - async fn test_process_job_two_workers_process_same_job() { - // Loading .env.test to get the db client - dotenvy::from_filename("../.env.test").expect("Failed to load the .env file"); - - drop_database().await.unwrap(); - - // Creating a valid job which is not existing in the db. - let mut job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Created, "1".to_string()); - let job_item_cloned = job_item.clone(); - + async fn process_job_two_workers_process_same_job() { let mut job_handler = MockJob::new(); // Expecting process job function in job processor to return the external ID. - job_handler.expect_process_job().times(2).returning(move |_, _| Ok("0xbeef".to_string())); + job_handler.expect_process_job().times(1).returning(move |_, _| Ok("0xbeef".to_string())); job_handler.expect_verification_polling_delay_seconds().return_const(1u64); - // Creating a new mongo db client for actual database. - let db_client: Box = Box::new(get_mongo_db_client().await); - // Creating the job in actual db instance - db_client.create_job(job_item.clone()).await.unwrap(); - let job_in_db = db_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); - - println!("job_in_db : {:?}", job_in_db); - - // Spinning up a new mock database. - let mut database = MockDatabase::new(); - - let metadata = - crate::jobs::increment_key_in_metadata(&job_item.metadata, JOB_PROCESS_ATTEMPT_METADATA_KEY).unwrap(); - job_item.external_id = "0xbeef".to_string().into(); - job_item.status = JobStatus::PendingVerification; - job_item.metadata = metadata; - - let job_in_db_clone = job_in_db.clone(); - - // Adding expectations for mock database. - database - .expect_get_job_by_id() - .times(1) - .with(eq(job_in_db_clone.id)) - .return_once(move |_| Ok(Some(job_in_db.clone()))); - database - .expect_update_job_status() - .with(eq(job_in_db_clone.clone()), eq(JobStatus::LockedForProcessing)) - .times(1) - .returning(|_, _| Ok(())); - database.expect_update_job().with(eq(job_item.clone())).times(1).returning(|_| Ok(())); - // Mocking the `get_job_handler` call in create_job function. - let y: Arc> = Arc::new(Box::new(job_handler)); + let job_handler: Arc> = Arc::new(Box::new(job_handler)); let ctx = mock_factory::get_job_handler_context(); - ctx.expect().times(2).with(eq(JobType::SnosRun)).returning(move |_| Arc::clone(&y)); + ctx.expect().times(1).with(eq(JobType::SnosRun)).returning(move |_| Arc::clone(&job_handler)); // building config - TestConfigBuilder::new().mock_db_client(Box::new(database)).build().await; + TestConfigBuilder::new().build().await; + let config = config().await; + let db_client = config.database(); - let _ = process_job(job_item.id).await.is_ok(); - // Updating the job in db. - db_client.update_job_status(&job_item, JobStatus::LockedForProcessing).await.unwrap(); - - // Worker 2 Attempt - // =================================================================== - - // Spinning up a new mock database. - let mut database_2 = MockDatabase::new(); - let mut job_item_clone_2 = job_item_cloned.clone(); - - let metadata = - crate::jobs::increment_key_in_metadata(&job_item_clone_2.metadata, JOB_PROCESS_ATTEMPT_METADATA_KEY) - .unwrap(); - job_item_clone_2.external_id = "0xbeef".to_string().into(); - job_item_clone_2.status = JobStatus::PendingVerification; - job_item_clone_2.metadata = metadata; - - // Adding expectations for mock database. - database_2 - .expect_get_job_by_id() - .times(1) - .with(eq(job_in_db_clone.id)) - .returning(move |_| Ok(Some(job_item_cloned.clone()))); - database_2 - .expect_update_job_status() - .with(eq(job_in_db_clone.clone()), eq(JobStatus::LockedForProcessing)) - .times(1) - .returning(|_, _| Ok(())); - database_2.expect_update_job().with(eq(job_item_clone_2.clone())).times(1).returning(|_| Ok(())); - sleep(Duration::from_secs(2)).await; + let job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Created, "1".to_string()); - // Making new config with database 2 mock - TestConfigBuilder::new().mock_db_client(Box::new(database_2)).build().await; + // Creating the job in the db + db_client.create_job(job_item.clone()).await.unwrap(); - let _ = process_job(job_item_clone_2.id).await.is_ok(); + // Simulating the two workers + let worker_1 = tokio::spawn(async move { process_job(job_item.id).await }); + let worker_2 = tokio::spawn(async move { process_job(job_item.id).await }); - // This should fail as there would be conflicting versions. - let _ = db_client.update_job_status(&job_item, JobStatus::LockedForProcessing).await.is_err(); + // waiting for workers to complete the processing + let (result_1, result_2) = tokio::join!(worker_1, worker_2); + + assert_ne!( + result_1.unwrap().is_ok(), + result_2.unwrap().is_ok(), + "One worker should succeed and the other should fail" + ); + + // Waiting for 5 secs for job to be updated in the db + sleep(Duration::from_secs(5)).await; + + let final_job_in_db = db_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + assert_eq!(final_job_in_db.status, JobStatus::PendingVerification); } /// Tests `verify_job` function when job is having expected status /// and returns a `Verified` verification status. #[rstest] #[tokio::test] - async fn test_verify_job_handler_with_expected_job_status_and_verified_status_return() { + async fn verify_job_handler_with_expected_job_status_and_verified_status_return() { let job_item = build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); // building config TestConfigBuilder::new().build().await; - drop_database().await.unwrap(); - create_sqs_queues().await.unwrap(); let config = config().await; let database_client = config.database(); @@ -333,10 +267,10 @@ mod job_handler_tests { job_handler.expect_verify_job().times(1).returning(move |_, _| Ok(JobVerificationStatus::Verified)); job_handler.expect_max_process_attempts().returning(move || 2u64); - let y: Arc> = Arc::new(Box::new(job_handler)); + let job_handler: Arc> = Arc::new(Box::new(job_handler)); let ctx = mock_factory::get_job_handler_context(); // Mocking the `get_job_handler` call in create_job function. - ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&y)); + ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&job_handler)); let _ = verify_job(job_item.id).await.is_ok(); @@ -345,25 +279,24 @@ mod job_handler_tests { assert_eq!(updated_job.status, JobStatus::Completed); // Queue checks. - let messages_in_process_queue = list_messages_in_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); - let messages_in_verification_queue = list_messages_in_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); - assert_eq!(messages_in_process_queue.len(), 0); - assert_eq!(messages_in_verification_queue.len(), 0); + let consumed_messages_verification_queue = + config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap_err(); + assert!(matches!(consumed_messages_verification_queue, QueueError::NoData)); + let consumed_messages_processing_queue = + config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap_err(); + assert!(matches!(consumed_messages_processing_queue, QueueError::NoData)); } /// Tests `verify_job` function when job is having expected status /// and returns a `Rejected` verification status. #[rstest] #[tokio::test] - async fn test_verify_job_handler_with_expected_job_status_and_rejected_status_return_and_adds_process_to_job_queue() - { + async fn verify_job_handler_with_expected_job_status_and_rejected_status_return_and_adds_process_to_job_queue() { let job_item = build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); // building config TestConfigBuilder::new().build().await; - drop_database().await.unwrap(); - create_sqs_queues().await.unwrap(); let config = config().await; let database_client = config.database(); @@ -371,17 +304,16 @@ mod job_handler_tests { // creating job in database database_client.create_job(job_item.clone()).await.unwrap(); - // expecting process job function in job processor to return the external ID job_handler .expect_verify_job() .times(1) .returning(move |_, _| Ok(JobVerificationStatus::Rejected("".to_string()))); job_handler.expect_max_process_attempts().returning(move || 2u64); - let y: Arc> = Arc::new(Box::new(job_handler)); + let job_handler: Arc> = Arc::new(Box::new(job_handler)); let ctx = mock_factory::get_job_handler_context(); // Mocking the `get_job_handler` call in create_job function. - ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&y)); + ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&job_handler)); let _ = verify_job(job_item.id).await.is_ok(); @@ -389,12 +321,14 @@ mod job_handler_tests { let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); assert_eq!(updated_job.status, JobStatus::VerificationFailed("".to_string())); + // Waiting for 5 secs for message to be passed into the queue + sleep(Duration::from_secs(5)).await; + // Queue checks. - let messages_in_queue = list_messages_in_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); - assert_eq!(messages_in_queue.len(), 1); - let message_0_body: MessagePayloadType = - serde_json::from_str(&messages_in_queue[0].clone().body.unwrap()).unwrap(); - assert_eq!(message_0_body.id, job_item.id); + let consumed_messages = + config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); + let consumed_message_payload: MessagePayloadType = consumed_messages.payload_serde_json().unwrap().unwrap(); + assert_eq!(consumed_message_payload.id, job_item.id); } /// Tests `verify_job` function when job is having expected status @@ -402,7 +336,7 @@ mod job_handler_tests { /// the job to process queue because of maximum attempts reached. #[rstest] #[tokio::test] - async fn test_verify_job_handler_with_expected_job_status_and_rejected_status_return() { + async fn verify_job_handler_with_expected_job_status_and_rejected_status_return() { let mut job_item = build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); @@ -412,8 +346,6 @@ mod job_handler_tests { // building config TestConfigBuilder::new().build().await; - drop_database().await.unwrap(); - create_sqs_queues().await.unwrap(); let config = config().await; let database_client = config.database(); @@ -428,10 +360,10 @@ mod job_handler_tests { .returning(move |_, _| Ok(JobVerificationStatus::Rejected("".to_string()))); job_handler.expect_max_process_attempts().returning(move || 1u64); - let y: Arc> = Arc::new(Box::new(job_handler)); + let job_handler: Arc> = Arc::new(Box::new(job_handler)); let ctx = mock_factory::get_job_handler_context(); // Mocking the `get_job_handler` call in create_job function. - ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&y)); + ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&job_handler)); let _ = verify_job(job_item.id).await.is_ok(); @@ -440,23 +372,22 @@ mod job_handler_tests { assert_eq!(updated_job.status, JobStatus::VerificationFailed("".to_string())); // Queue checks. - let messages_in_queue = list_messages_in_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); - assert_eq!(messages_in_queue.len(), 0); + let consumed_messages_processing_queue = + config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap_err(); + assert!(matches!(consumed_messages_processing_queue, QueueError::NoData)); } /// Tests `verify_job` function when job is having expected status /// and returns a `Pending` verification status. #[rstest] #[tokio::test] - async fn test_verify_job_handler_with_expected_job_status_and_pending_status_return_and_adds_job_to_verification_queue( - ) { + async fn verify_job_handler_with_expected_job_status_and_pending_status_return_and_adds_job_to_verification_queue() + { let job_item = build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); // building config TestConfigBuilder::new().build().await; - drop_database().await.unwrap(); - create_sqs_queues().await.unwrap(); let config = config().await; let database_client = config.database(); @@ -469,10 +400,10 @@ mod job_handler_tests { job_handler.expect_max_verification_attempts().returning(move || 2u64); job_handler.expect_verification_polling_delay_seconds().returning(move || 2u64); - let y: Arc> = Arc::new(Box::new(job_handler)); + let job_handler: Arc> = Arc::new(Box::new(job_handler)); let ctx = mock_factory::get_job_handler_context(); // Mocking the `get_job_handler` call in create_job function. - ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&y)); + ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&job_handler)); let _ = verify_job(job_item.id).await.is_ok(); @@ -480,12 +411,14 @@ mod job_handler_tests { let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); assert_eq!(updated_job.metadata.get(JOB_VERIFICATION_ATTEMPT_METADATA_KEY).unwrap(), "1"); - // Queue checks. - let messages_in_queue = list_messages_in_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); - assert_eq!(messages_in_queue.len(), 1); - let message_0_body: MessagePayloadType = - serde_json::from_str(&messages_in_queue[0].clone().body.unwrap()).unwrap(); - assert_eq!(message_0_body.id, job_item.id); + // Waiting for 5 secs for message to be passed into the queue + sleep(Duration::from_secs(5)).await; + + // Queue checks + let consumed_messages = + config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); + let consumed_message_payload: MessagePayloadType = consumed_messages.payload_serde_json().unwrap().unwrap(); + assert_eq!(consumed_message_payload.id, job_item.id); } /// Tests `verify_job` function when job is having expected status @@ -493,7 +426,7 @@ mod job_handler_tests { /// the job to process queue because of maximum attempts reached. #[rstest] #[tokio::test] - async fn test_verify_job_handler_with_expected_job_status_and_pending_status_return() { + async fn verify_job_handler_with_expected_job_status_and_pending_status_return() { let mut job_item = build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); @@ -503,8 +436,6 @@ mod job_handler_tests { // building config TestConfigBuilder::new().build().await; - drop_database().await.unwrap(); - create_sqs_queues().await.unwrap(); let config = config().await; let database_client = config.database(); @@ -517,10 +448,10 @@ mod job_handler_tests { job_handler.expect_max_verification_attempts().returning(move || 1u64); job_handler.expect_verification_polling_delay_seconds().returning(move || 2u64); - let y: Arc> = Arc::new(Box::new(job_handler)); + let job_handler: Arc> = Arc::new(Box::new(job_handler)); let ctx = mock_factory::get_job_handler_context(); // Mocking the `get_job_handler` call in create_job function. - ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&y)); + ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&job_handler)); let _ = verify_job(job_item.id).await.is_ok(); @@ -529,8 +460,9 @@ mod job_handler_tests { assert_eq!(updated_job.status, JobStatus::VerificationTimeout); // Queue checks. - let messages_in_queue = list_messages_in_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); - assert_eq!(messages_in_queue.len(), 0); + let consumed_messages_verification_queue = + config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap_err(); + assert!(matches!(consumed_messages_verification_queue, QueueError::NoData)); } fn build_job_item_by_type_and_status(job_type: JobType, job_status: JobStatus, internal_id: String) -> JobItem { @@ -547,8 +479,4 @@ mod job_handler_tests { version: 0, } } - - async fn get_mongo_db_client() -> MongoDb { - MongoDb::new(MongoDbConfig::new_from_env()).await - } } From b31212cd9e28a5718d4188aa793f01e6110baff1 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 2 Aug 2024 18:27:13 +0530 Subject: [PATCH 24/44] feat : updated test config and added config type to aws s3 config --- crates/orchestrator/src/config.rs | 4 +- .../src/data_storage/aws_s3/config.rs | 36 +++++++++- .../src/data_storage/aws_s3/mod.rs | 70 +++++++++++++------ .../orchestrator/src/database/mongodb/mod.rs | 1 - crates/orchestrator/src/tests/common/mod.rs | 8 ++- crates/orchestrator/src/tests/config.rs | 41 ++++------- .../src/tests/data_storage/mod.rs | 6 +- crates/orchestrator/src/tests/database/mod.rs | 3 - .../gps-fact-checker/src/fact_node.rs | 1 + 9 files changed, 108 insertions(+), 62 deletions(-) diff --git a/crates/orchestrator/src/config.rs b/crates/orchestrator/src/config.rs index 55761526..447b25ec 100644 --- a/crates/orchestrator/src/config.rs +++ b/crates/orchestrator/src/config.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::data_storage::aws_s3::config::AWSS3Config; +use crate::data_storage::aws_s3::config::{AWSS3Config, AWSS3ConfigType}; use crate::data_storage::aws_s3::AWSS3; use crate::data_storage::{DataStorage, DataStorageConfig}; use arc_swap::{ArcSwap, Guard}; @@ -179,7 +179,7 @@ pub async fn build_settlement_client( pub async fn build_storage_client() -> Box { match get_env_var_or_panic("DATA_STORAGE").as_str() { - "s3" => Box::new(AWSS3::new(AWSS3Config::new_from_env()).await), + "s3" => Box::new(AWSS3::new(AWSS3ConfigType::WithoutEndpoint(AWSS3Config::new_from_env())).await), _ => panic!("Unsupported Storage Client"), } } diff --git a/crates/orchestrator/src/data_storage/aws_s3/config.rs b/crates/orchestrator/src/data_storage/aws_s3/config.rs index 06eeaff8..0970d80d 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/config.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/config.rs @@ -2,7 +2,15 @@ use utils::env_utils::get_env_var_or_panic; use crate::data_storage::DataStorageConfig; +/// Represents the type of the config which one wants to pass to create the client +#[derive(Clone)] +pub enum AWSS3ConfigType { + WithEndpoint(S3LocalStackConfig), + WithoutEndpoint(AWSS3Config), +} + /// Represents AWS S3 config struct with all the necessary variables. +#[derive(Clone)] pub struct AWSS3Config { /// AWS ACCESS KEY ID pub s3_key_id: String, @@ -12,8 +20,20 @@ pub struct AWSS3Config { pub s3_bucket_name: String, /// S3 Bucket region pub s3_bucket_region: String, +} + +/// Represents AWS S3 config struct with all the necessary variables. +#[derive(Clone)] +pub struct S3LocalStackConfig { + /// AWS ACCESS KEY ID + pub s3_key_id: String, + /// AWS ACCESS KEY SECRET + pub s3_key_secret: String, + /// S3 Bucket Name + pub s3_bucket_name: String, + /// S3 Bucket region + pub s3_bucket_region: String, /// Endpoint url - #[cfg(test)] pub endpoint_url: String, } @@ -26,7 +46,19 @@ impl DataStorageConfig for AWSS3Config { s3_key_secret: get_env_var_or_panic("AWS_SECRET_ACCESS_KEY"), s3_bucket_name: get_env_var_or_panic("AWS_S3_BUCKET_NAME"), s3_bucket_region: get_env_var_or_panic("AWS_S3_BUCKET_REGION"), - #[cfg(test)] + } + } +} + +/// Implementation of `DataStorageConfig` for `S3LocalStackConfig` +impl DataStorageConfig for S3LocalStackConfig { + /// To return the config struct by creating it from the environment variables. + fn new_from_env() -> Self { + Self { + s3_key_id: get_env_var_or_panic("AWS_ACCESS_KEY_ID"), + s3_key_secret: get_env_var_or_panic("AWS_SECRET_ACCESS_KEY"), + s3_bucket_name: get_env_var_or_panic("AWS_S3_BUCKET_NAME"), + s3_bucket_region: get_env_var_or_panic("AWS_S3_BUCKET_REGION"), endpoint_url: get_env_var_or_panic("AWS_ENDPOINT_URL"), } } diff --git a/crates/orchestrator/src/data_storage/aws_s3/mod.rs b/crates/orchestrator/src/data_storage/aws_s3/mod.rs index 50ae6e3e..48707cc0 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/mod.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/mod.rs @@ -1,4 +1,4 @@ -use crate::data_storage::aws_s3::config::AWSS3Config; +use crate::data_storage::aws_s3::config::AWSS3ConfigType; use crate::data_storage::DataStorage; use async_trait::async_trait; use aws_sdk_s3::config::{Builder, Credentials, Region}; @@ -13,7 +13,7 @@ pub mod config; /// AWSS3 represents AWS S3 client object containing the client and the config itself. pub struct AWSS3 { client: Client, - config: AWSS3Config, + config: AWSS3ConfigType, } /// Implementation for AWS S3 client. Contains the function for : @@ -22,32 +22,56 @@ pub struct AWSS3 { impl AWSS3 { /// Initializes a new AWS S3 client by passing the config /// and returning it. - pub async fn new(config: AWSS3Config) -> Self { - // AWS cred building - let credentials = Credentials::new( - config.s3_key_id.clone(), - config.s3_key_secret.clone(), - None, - None, - "loaded_from_custom_env", - ); - let region = Region::new(config.s3_bucket_region.clone().to_string()); + pub async fn new(config: AWSS3ConfigType) -> Self { + let (config_builder, config) = match config { + AWSS3ConfigType::WithoutEndpoint(config) => { + let credentials = Credentials::new( + config.s3_key_id.clone(), + config.s3_key_secret.clone(), + None, + None, + "loaded_from_custom_env", + ); + let region = Region::new(config.s3_bucket_region.clone().to_string()); + ( + Builder::new().region(region).credentials_provider(credentials).force_path_style(true), + AWSS3ConfigType::WithoutEndpoint(config), + ) + } + AWSS3ConfigType::WithEndpoint(config) => { + let credentials = Credentials::new( + config.s3_key_id.clone(), + config.s3_key_secret.clone(), + None, + None, + "loaded_from_custom_env", + ); + let region = Region::new(config.s3_bucket_region.clone().to_string()); + ( + Builder::new() + .region(region) + .credentials_provider(credentials) + .force_path_style(true) + .endpoint_url(config.endpoint_url.clone()), + AWSS3ConfigType::WithEndpoint(config), + ) + } + }; - #[allow(unused_mut)] - let mut conf_builder = Builder::new().region(region).credentials_provider(credentials).force_path_style(true); - - #[cfg(test)] - { - conf_builder = conf_builder.endpoint_url(config.endpoint_url.clone().to_string()); - } - - let conf = conf_builder.build(); + let conf = config_builder.build(); // Building AWS S3 config let client = Client::from_conf(conf); Self { client, config } } + + pub fn get_bucket_name(&self) -> String { + match self.config.clone() { + AWSS3ConfigType::WithEndpoint(config) => config.s3_bucket_name, + AWSS3ConfigType::WithoutEndpoint(config) => config.s3_bucket_name, + } + } } /// Implementation of `DataStorage` for `AWSS3` @@ -57,7 +81,7 @@ impl AWSS3 { impl DataStorage for AWSS3 { /// Function to get the data from S3 bucket by Key. async fn get_data(&self, key: &str) -> Result { - let response = self.client.get_object().bucket(self.config.s3_bucket_name.clone()).key(key).send().await?; + let response = self.client.get_object().bucket(self.get_bucket_name()).key(key).send().await?; let data_stream = response.body.collect().await.expect("Failed to convert body into AggregatedBytes."); let data_bytes = data_stream.into_bytes(); Ok(data_bytes) @@ -67,7 +91,7 @@ impl DataStorage for AWSS3 { async fn put_data(&self, data: Bytes, key: &str) -> Result<()> { self.client .put_object() - .bucket(self.config.s3_bucket_name.clone()) + .bucket(self.get_bucket_name()) .key(key) .body(ByteStream::from(data)) .content_type("application/json") diff --git a/crates/orchestrator/src/database/mongodb/mod.rs b/crates/orchestrator/src/database/mongodb/mod.rs index 90b6ecc7..d70ca83c 100644 --- a/crates/orchestrator/src/database/mongodb/mod.rs +++ b/crates/orchestrator/src/database/mongodb/mod.rs @@ -42,7 +42,6 @@ impl MongoDb { /// Mongodb client uses Arc internally, reducing the cost of clone. /// Directly using clone is not recommended for libraries not using Arc internally. - /// Dev might want to pass an Arc manually. pub fn client(&self) -> Client { self.client.clone() } diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 858635a9..c008aa22 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -15,7 +15,9 @@ use starknet::providers::JsonRpcClient; use url::Url; use crate::config::Config; -use crate::data_storage::MockDataStorage; +use crate::data_storage::aws_s3::config::{AWSS3ConfigType, S3LocalStackConfig}; +use crate::data_storage::aws_s3::AWSS3; +use crate::data_storage::{DataStorage, DataStorageConfig, MockDataStorage}; use crate::database::mongodb::config::MongoDbConfig; use crate::database::mongodb::MongoDb; use crate::database::{DatabaseConfig, MockDatabase}; @@ -86,3 +88,7 @@ pub async fn drop_database() -> color_eyre::Result<()> { db_client.database("orchestrator").drop(None).await?; Ok(()) } + +pub async fn get_storage_client() -> Box { + Box::new(AWSS3::new(AWSS3ConfigType::WithEndpoint(S3LocalStackConfig::new_from_env())).await) +} diff --git a/crates/orchestrator/src/tests/config.rs b/crates/orchestrator/src/tests/config.rs index 5191bfc4..61c156a2 100644 --- a/crates/orchestrator/src/tests/config.rs +++ b/crates/orchestrator/src/tests/config.rs @@ -1,8 +1,6 @@ use std::sync::Arc; -use crate::config::{ - build_da_client, build_prover_service, build_settlement_client, build_storage_client, config_force_init, Config, -}; +use crate::config::{build_da_client, build_prover_service, build_settlement_client, config_force_init, Config}; use crate::data_storage::DataStorage; use da_client_interface::DaClient; use prover_client_interface::ProverClient; @@ -18,6 +16,7 @@ use crate::database::{Database, DatabaseConfig}; use crate::queue::sqs::SqsQueue; use crate::queue::QueueProvider; +use crate::tests::common::{drop_database, get_storage_client}; use httpmock::MockServer; // Inspiration : https://rust-unofficial.github.io/patterns/patterns/creational/builder.html // TestConfigBuilder allows to heavily customise the global configs based on the test's requirement. @@ -70,45 +69,26 @@ impl TestConfigBuilder { dotenvy::from_filename("../.env.test").expect("Failed to load the .env file"); let server = MockServer::start(); - - // init starknet client - if self.starknet_client.is_none() { - let provider = JsonRpcClient::new(HttpTransport::new( - Url::parse(format!("http://localhost:{}", server.port()).as_str()).expect("Failed to parse URL"), - )); - self.starknet_client = Some(Arc::new(provider)); - } + let settings_provider = DefaultSettingsProvider {}; // init database if self.database.is_none() { self.database = Some(Box::new(MongoDb::new(MongoDbConfig::new_from_env()).await)); } - // init queue - if self.queue.is_none() { - self.queue = Some(Box::new(SqsQueue {})); - } - // init the DA client if self.da_client.is_none() { self.da_client = Some(build_da_client().await); } - let settings_provider = DefaultSettingsProvider {}; - // init the Settings client if self.settlement_client.is_none() { self.settlement_client = Some(build_settlement_client(&settings_provider).await); } - // init the Prover client - if self.prover_client.is_none() { - self.prover_client = Some(build_prover_service(&settings_provider)); - } - // init the storage client if self.storage.is_none() { - self.storage = Some(build_storage_client().await); + self.storage = Some(get_storage_client().await); match get_env_var_or_panic("DATA_STORAGE").as_str() { "s3" => self .storage @@ -121,14 +101,21 @@ impl TestConfigBuilder { } } + drop_database().await.unwrap(); + // return config and server as tuple let config = Config::new( - self.starknet_client.unwrap(), + self.starknet_client.unwrap_or_else(|| { + let provider = JsonRpcClient::new(HttpTransport::new( + Url::parse(format!("http://localhost:{}", server.port()).as_str()).expect("Failed to parse URL"), + )); + Arc::new(provider) + }), self.da_client.unwrap(), - self.prover_client.unwrap(), + self.prover_client.unwrap_or_else(|| build_prover_service(&settings_provider)), self.settlement_client.unwrap(), self.database.unwrap(), - self.queue.unwrap(), + self.queue.unwrap_or_else(|| Box::new(SqsQueue {})), self.storage.unwrap(), ); diff --git a/crates/orchestrator/src/tests/data_storage/mod.rs b/crates/orchestrator/src/tests/data_storage/mod.rs index 8f68d312..d127917a 100644 --- a/crates/orchestrator/src/tests/data_storage/mod.rs +++ b/crates/orchestrator/src/tests/data_storage/mod.rs @@ -1,4 +1,4 @@ -use crate::data_storage::aws_s3::config::AWSS3Config; +use crate::data_storage::aws_s3::config::{AWSS3ConfigType, S3LocalStackConfig}; use crate::data_storage::aws_s3::AWSS3; use crate::data_storage::{DataStorage, DataStorageConfig}; use crate::tests::config::TestConfigBuilder; @@ -18,8 +18,8 @@ async fn test_put_and_get_data_s3() -> color_eyre::Result<()> { dotenvy::from_filename("../.env.test")?; - let config = AWSS3Config::new_from_env(); - let s3_client = AWSS3::new(config).await; + let config = S3LocalStackConfig::new_from_env(); + let s3_client = AWSS3::new(AWSS3ConfigType::WithEndpoint(config)).await; s3_client.build_test_bucket(&get_env_var_or_panic("AWS_S3_BUCKET_NAME")).await.unwrap(); let mock_data = json!( diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index f7cca727..343fdf55 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -1,6 +1,5 @@ use crate::config::config; use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; -use crate::tests::common::drop_database; use crate::tests::config::TestConfigBuilder; use rstest::*; use uuid::Uuid; @@ -19,8 +18,6 @@ async fn test_database_connection() -> color_eyre::Result<()> { async fn test_database_create_job() -> color_eyre::Result<()> { TestConfigBuilder::new().build().await; - drop_database().await.unwrap(); - let config = config().await; let database_client = config.database(); diff --git a/crates/prover-services/gps-fact-checker/src/fact_node.rs b/crates/prover-services/gps-fact-checker/src/fact_node.rs index 2d66fbac..fbbb2be4 100644 --- a/crates/prover-services/gps-fact-checker/src/fact_node.rs +++ b/crates/prover-services/gps-fact-checker/src/fact_node.rs @@ -12,6 +12,7 @@ //! constructed using a stack of nodes (initialized to an empty stack) by repeating for each pair: //! 1. Add #n_pages lead nodes to the stack. //! 2. Pop the top #n_nodes, construct a parent node for them, and push it back to the stack. +//! //! After applying the steps above, the stack must contain exactly one node, which will //! constitute the root of the Merkle tree. //! From ad585f1d1a28bb4b068f74c10d9c506b76867d10 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 2 Aug 2024 20:04:49 +0530 Subject: [PATCH 25/44] feat : updated tests and test names --- Cargo.lock | 1 + crates/orchestrator/Cargo.toml | 1 + crates/orchestrator/src/tests/jobs/mod.rs | 917 +++++++++--------- .../src/tests/workers/proving/mod.rs | 6 +- .../src/tests/workers/snos/mod.rs | 4 +- 5 files changed, 477 insertions(+), 452 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb8b39c1..6dc64ecd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6316,6 +6316,7 @@ version = "0.1.0" dependencies = [ "alloy 0.1.2", "arc-swap", + "assert_matches", "async-std", "async-trait", "aws-config", diff --git a/crates/orchestrator/Cargo.toml b/crates/orchestrator/Cargo.toml index 76094adb..d4d36695 100644 --- a/crates/orchestrator/Cargo.toml +++ b/crates/orchestrator/Cargo.toml @@ -14,6 +14,7 @@ path = "src/main.rs" [dependencies] alloy = { workspace = true } arc-swap = { workspace = true } +assert_matches = "1.5.0" async-std = "1.12.0" async-trait = { workspace = true } aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } diff --git a/crates/orchestrator/src/tests/jobs/mod.rs b/crates/orchestrator/src/tests/jobs/mod.rs index fb07f7dc..41f870d5 100644 --- a/crates/orchestrator/src/tests/jobs/mod.rs +++ b/crates/orchestrator/src/tests/jobs/mod.rs @@ -7,476 +7,499 @@ pub mod proving_job; #[cfg(test)] pub mod state_update_job; -mod job_handler_tests { - use std::collections::HashMap; - use std::sync::Arc; - use std::time::Duration; - - use mockall::predicate::eq; - use mongodb::bson::doc; - use omniqueue::QueueError; - use rstest::rstest; - use tokio::time::sleep; - use uuid::Uuid; - - use crate::config::config; - use crate::jobs::constants::{JOB_PROCESS_ATTEMPT_METADATA_KEY, JOB_VERIFICATION_ATTEMPT_METADATA_KEY}; - use crate::jobs::job_handler_factory::mock_factory; - use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType, JobVerificationStatus}; - use crate::jobs::{create_job, increment_key_in_metadata, process_job, verify_job, Job, MockJob}; - use crate::queue::job_queue::{JOB_PROCESSING_QUEUE, JOB_VERIFICATION_QUEUE}; - use crate::tests::common::MessagePayloadType; - use crate::tests::config::TestConfigBuilder; - - /// Tests `create_job` function when job is not existing in the db. - #[rstest] - #[tokio::test] - async fn create_job_handler_job_does_not_exists_in_db() { - let job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Created, "0".to_string()); - let mut job_handler = MockJob::new(); - - // Adding expectation for creation of new job. - let job_item_clone = job_item.clone(); - job_handler.expect_create_job().times(1).returning(move |_, _, _| Ok(job_item_clone.clone())); - - TestConfigBuilder::new().build().await; - let config = config().await; - - // Mocking the `get_job_handler` call in create_job function. - let job_handler: Arc> = Arc::new(Box::new(job_handler)); - let ctx = mock_factory::get_job_handler_context(); - ctx.expect().times(1).with(eq(JobType::SnosRun)).return_once(move |_| Arc::clone(&job_handler)); - - let _ = create_job(JobType::SnosRun, "0".to_string(), HashMap::new()).await.is_ok(); - - // Db checks. - let job_in_db = config.database().get_job_by_id(job_item.id).await.unwrap().unwrap(); - assert_eq!(job_in_db.id, job_item.id); - - // Waiting for 5 secs for message to be passed into the queue - sleep(Duration::from_secs(5)).await; - - // Queue checks. - let consumed_messages = - config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); - let consumed_message_payload: MessagePayloadType = consumed_messages.payload_serde_json().unwrap().unwrap(); - assert_eq!(consumed_message_payload.id, job_item.id); - } +use assert_matches::assert_matches; +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Duration; + +use mockall::predicate::eq; +use mongodb::bson::doc; +use omniqueue::QueueError; +use rstest::rstest; +use tokio::time::sleep; +use uuid::Uuid; + +use crate::config::config; +use crate::jobs::constants::{JOB_PROCESS_ATTEMPT_METADATA_KEY, JOB_VERIFICATION_ATTEMPT_METADATA_KEY}; +use crate::jobs::job_handler_factory::mock_factory; +use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType, JobVerificationStatus}; +use crate::jobs::{create_job, increment_key_in_metadata, process_job, verify_job, Job, MockJob}; +use crate::queue::job_queue::{JOB_PROCESSING_QUEUE, JOB_VERIFICATION_QUEUE}; +use crate::tests::common::MessagePayloadType; +use crate::tests::config::TestConfigBuilder; + +/// Tests `create_job` function when job is not existing in the db. +#[rstest] +#[tokio::test] +async fn create_job_job_does_not_exists_in_db_works() { + let job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Created, "0".to_string()); + let mut job_handler = MockJob::new(); + + // Adding expectation for creation of new job. + let job_item_clone = job_item.clone(); + job_handler.expect_create_job().times(1).returning(move |_, _, _| Ok(job_item_clone.clone())); + + TestConfigBuilder::new().build().await; + let config = config().await; + + // Mocking the `get_job_handler` call in create_job function. + let job_handler: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + ctx.expect().times(1).with(eq(JobType::SnosRun)).return_once(move |_| Arc::clone(&job_handler)); + + assert!(create_job(JobType::SnosRun, "0".to_string(), HashMap::new()).await.is_ok()); + + let mut hashmap: HashMap = HashMap::new(); + hashmap.insert(JOB_PROCESS_ATTEMPT_METADATA_KEY.to_string(), "0".to_string()); + hashmap.insert(JOB_VERIFICATION_ATTEMPT_METADATA_KEY.to_string(), "0".to_string()); + + // Db checks. + let job_in_db = config.database().get_job_by_id(job_item.id).await.unwrap().unwrap(); + assert_eq!(job_in_db.id, job_item.id); + assert_eq!(job_in_db.internal_id, job_item.internal_id); + assert_eq!(job_in_db.metadata, hashmap); + + // Waiting for 5 secs for message to be passed into the queue + sleep(Duration::from_secs(5)).await; + + // Queue checks. + let consumed_messages = config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); + let consumed_message_payload: MessagePayloadType = consumed_messages.payload_serde_json().unwrap().unwrap(); + assert_eq!(consumed_message_payload.id, job_item.id); +} - /// Tests `create_job` function when job is already existing in the db. - #[rstest] - #[tokio::test] - async fn create_job_handler_job_exists_in_db() { - let job_item = build_job_item_by_type_and_status(JobType::ProofCreation, JobStatus::Created, "0".to_string()); +/// Tests `create_job` function when job is already existing in the db. +#[rstest] +#[tokio::test] +async fn create_job_job_exists_in_db_works() { + let job_item = build_job_item_by_type_and_status(JobType::ProofCreation, JobStatus::Created, "0".to_string()); - TestConfigBuilder::new().build().await; + TestConfigBuilder::new().build().await; - let config = config().await; - let database_client = config.database(); - database_client.create_job(job_item).await.unwrap(); + let config = config().await; + let database_client = config.database(); + database_client.create_job(job_item).await.unwrap(); - let _ = create_job(JobType::ProofCreation, "0".to_string(), HashMap::new()).await.is_err(); + assert!(create_job(JobType::ProofCreation, "0".to_string(), HashMap::new()).await.is_err()); - // Queue checks. - let consumed_messages = - config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap_err(); - assert!(matches!(consumed_messages, QueueError::NoData)); - } + // Waiting for 5 secs for message to be passed into the queue + sleep(Duration::from_secs(5)).await; - /// Tests `create_job` function when job handler is not implemented in the `get_job_handler` - /// This test should fail as job handler is not implemented in the `factory.rs` - #[rstest] - #[should_panic(expected = "Job type not implemented yet.")] - #[tokio::test] - async fn create_job_handler_job_handler_is_not_implemented() { - TestConfigBuilder::new().build().await; - let config = config().await; - - // Mocking the `get_job_handler` call in create_job function. - let ctx = mock_factory::get_job_handler_context(); - ctx.expect().times(1).returning(|_| panic!("Job type not implemented yet.")); - - let _ = create_job(JobType::ProofCreation, "0".to_string(), HashMap::new()).await.is_err(); - - // Queue checks. - let consumed_messages = - config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap_err(); - assert!(matches!(consumed_messages, QueueError::NoData)); - } + // Queue checks. + let consumed_messages = + config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap_err(); + assert_matches!(consumed_messages, QueueError::NoData); +} - /// Tests `process_job` function when job is already existing in the db and job status is either - /// `Created` or `VerificationFailed`. - #[rstest] - #[case(JobType::SnosRun, JobStatus::Created)] - #[case(JobType::DataSubmission, JobStatus::VerificationFailed("".to_string()))] - #[tokio::test] - async fn process_job_handler_job_exists_in_db_and_valid_job_processing_status( - #[case] job_type: JobType, - #[case] job_status: JobStatus, - ) { - let job_item = build_job_item_by_type_and_status(job_type.clone(), job_status.clone(), "1".to_string()); - - // Building config - TestConfigBuilder::new().build().await; - let config = config().await; - let database_client = config.database(); - - let mut job_handler = MockJob::new(); - - // Creating job in database - database_client.create_job(job_item.clone()).await.unwrap(); - // Expecting process job function in job processor to return the external ID. - job_handler.expect_process_job().times(1).returning(move |_, _| Ok("0xbeef".to_string())); - job_handler.expect_verification_polling_delay_seconds().return_const(1u64); - - // Mocking the `get_job_handler` call in create_job function. - let job_handler: Arc> = Arc::new(Box::new(job_handler)); - let ctx = mock_factory::get_job_handler_context(); - ctx.expect().times(1).with(eq(job_type.clone())).returning(move |_| Arc::clone(&job_handler)); - - let _ = process_job(job_item.id).await.is_ok(); - // Getting the updated job. - let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); - // checking if job_status is updated in db - assert_eq!(updated_job.status, JobStatus::PendingVerification); - - // Waiting for 5 secs for message to be passed into the queue - sleep(Duration::from_secs(5)).await; - - // Queue checks - let consumed_messages = - config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); - let consumed_message_payload: MessagePayloadType = consumed_messages.payload_serde_json().unwrap().unwrap(); - println!("payload: {:?}", consumed_message_payload); - assert_eq!(consumed_message_payload.id, job_item.id); - } +/// Tests `create_job` function when job handler is not implemented in the `get_job_handler` +/// This test should fail as job handler is not implemented in the `factory.rs` +#[rstest] +#[should_panic(expected = "Job type not implemented yet.")] +#[tokio::test] +async fn create_job_job_handler_is_not_implemented_panics() { + TestConfigBuilder::new().build().await; + let config = config().await; - /// Tests `process_job` function when job is already existing in the db and job status is not - /// `Created` or `VerificationFailed`. - #[rstest] - #[tokio::test] - async fn process_job_handler_job_exists_in_db_and_invalid_job_processing_status() { - // Creating a job with Completed status which is invalid processing. - let job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Completed, "1".to_string()); + // Mocking the `get_job_handler` call in create_job function. + let ctx = mock_factory::get_job_handler_context(); + ctx.expect().times(1).returning(|_| panic!("Job type not implemented yet.")); - // building config - TestConfigBuilder::new().build().await; - let config = config().await; - let database_client = config.database(); + assert!(create_job(JobType::ProofCreation, "0".to_string(), HashMap::new()).await.is_err()); - // creating job in database - database_client.create_job(job_item.clone()).await.unwrap(); + // Waiting for 5 secs for message to be passed into the queue + sleep(Duration::from_secs(5)).await; - let _ = process_job(job_item.id).await.is_err(); + // Queue checks. + let consumed_messages = + config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap_err(); + assert_matches!(consumed_messages, QueueError::NoData); +} - let job_in_db = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); - // Job should be untouched in db. - assert_eq!(job_in_db.status, JobStatus::Completed); +/// Tests `process_job` function when job is already existing in the db and job status is either +/// `Created` or `VerificationFailed`. +#[rstest] +#[case(JobType::SnosRun, JobStatus::Created)] +#[case(JobType::DataSubmission, JobStatus::VerificationFailed("".to_string()))] +#[tokio::test] +async fn process_job_with_job_exists_in_db_and_valid_job_processing_status_works( + #[case] job_type: JobType, + #[case] job_status: JobStatus, +) { + let job_item = build_job_item_by_type_and_status(job_type.clone(), job_status.clone(), "1".to_string()); + + // Building config + TestConfigBuilder::new().build().await; + let config = config().await; + let database_client = config.database(); + + let mut job_handler = MockJob::new(); + + // Creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + // Expecting process job function in job processor to return the external ID. + job_handler.expect_process_job().times(1).returning(move |_, _| Ok("0xbeef".to_string())); + job_handler.expect_verification_polling_delay_seconds().return_const(1u64); + + // Mocking the `get_job_handler` call in create_job function. + let job_handler: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + ctx.expect().times(1).with(eq(job_type.clone())).returning(move |_| Arc::clone(&job_handler)); + + assert!(process_job(job_item.id).await.is_ok()); + // Getting the updated job. + let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + // checking if job_status is updated in db + assert_eq!(updated_job.status, JobStatus::PendingVerification); + assert_eq!(updated_job.external_id, ExternalId::String(Box::from("0xbeef"))); + assert_eq!(updated_job.metadata.get(JOB_PROCESS_ATTEMPT_METADATA_KEY).unwrap(), "1"); + + // Waiting for 5 secs for message to be passed into the queue + sleep(Duration::from_secs(5)).await; + + // Queue checks + let consumed_messages = + config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); + let consumed_message_payload: MessagePayloadType = consumed_messages.payload_serde_json().unwrap().unwrap(); + assert_eq!(consumed_message_payload.id, job_item.id); +} - // Queue checks. - let consumed_messages = - config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap_err(); - assert!(matches!(consumed_messages, QueueError::NoData)); - } +/// Tests `process_job` function when job is already existing in the db and job status is not +/// `Created` or `VerificationFailed`. +#[rstest] +#[tokio::test] +async fn process_job_with_job_exists_in_db_with_invalid_job_processing_status_errors() { + // Creating a job with Completed status which is invalid processing. + let job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Completed, "1".to_string()); + + // building config + TestConfigBuilder::new().build().await; + let config = config().await; + let database_client = config.database(); + + // creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + + assert!(process_job(job_item.id).await.is_err()); + + let job_in_db = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + // Job should be untouched in db. + assert_eq!(job_in_db.status, JobStatus::Completed); + assert_eq!(job_in_db.version, 0); + assert_eq!(job_in_db.metadata.get(JOB_PROCESS_ATTEMPT_METADATA_KEY).unwrap(), "0"); + + // Waiting for 5 secs for message to be passed into the queue + sleep(Duration::from_secs(5)).await; + + // Queue checks. + let consumed_messages = + config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap_err(); + assert_matches!(consumed_messages, QueueError::NoData); +} - /// Tests `process_job` function when job is not in the db - /// This test should fail - #[rstest] - #[tokio::test] - async fn process_job_handler_job_does_not_exists_in_db() { - // Creating a valid job which is not existing in the db. - let job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Created, "1".to_string()); +/// Tests `process_job` function when job is not in the db +/// This test should fail +#[rstest] +#[tokio::test] +async fn process_job_job_does_not_exists_in_db_works() { + // Creating a valid job which is not existing in the db. + let job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Created, "1".to_string()); - // building config - TestConfigBuilder::new().build().await; - let config = config().await; + // building config + TestConfigBuilder::new().build().await; + let config = config().await; - let _ = process_job(job_item.id).await.is_err(); + assert!(process_job(job_item.id).await.is_err()); - // Queue checks. - let consumed_messages = - config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap_err(); - assert!(matches!(consumed_messages, QueueError::NoData)); - } + // Waiting for 5 secs for message to be passed into the queue + sleep(Duration::from_secs(5)).await; - /// Tests `process_job` function when 2 workers try to process the same job. - /// This test should fail because once the job is locked for processing on one - /// worker it should not be accessed by another worker and should throw an error - /// when updating the job status. - #[rstest] - #[tokio::test] - async fn process_job_two_workers_process_same_job() { - let mut job_handler = MockJob::new(); - // Expecting process job function in job processor to return the external ID. - job_handler.expect_process_job().times(1).returning(move |_, _| Ok("0xbeef".to_string())); - job_handler.expect_verification_polling_delay_seconds().return_const(1u64); - - // Mocking the `get_job_handler` call in create_job function. - let job_handler: Arc> = Arc::new(Box::new(job_handler)); - let ctx = mock_factory::get_job_handler_context(); - ctx.expect().times(1).with(eq(JobType::SnosRun)).returning(move |_| Arc::clone(&job_handler)); - - // building config - TestConfigBuilder::new().build().await; - let config = config().await; - let db_client = config.database(); - - let job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Created, "1".to_string()); - - // Creating the job in the db - db_client.create_job(job_item.clone()).await.unwrap(); - - // Simulating the two workers - let worker_1 = tokio::spawn(async move { process_job(job_item.id).await }); - let worker_2 = tokio::spawn(async move { process_job(job_item.id).await }); - - // waiting for workers to complete the processing - let (result_1, result_2) = tokio::join!(worker_1, worker_2); - - assert_ne!( - result_1.unwrap().is_ok(), - result_2.unwrap().is_ok(), - "One worker should succeed and the other should fail" - ); - - // Waiting for 5 secs for job to be updated in the db - sleep(Duration::from_secs(5)).await; - - let final_job_in_db = db_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); - assert_eq!(final_job_in_db.status, JobStatus::PendingVerification); - } + // Queue checks. + let consumed_messages = + config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap_err(); + assert_matches!(consumed_messages, QueueError::NoData); +} - /// Tests `verify_job` function when job is having expected status - /// and returns a `Verified` verification status. - #[rstest] - #[tokio::test] - async fn verify_job_handler_with_expected_job_status_and_verified_status_return() { - let job_item = - build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); - - // building config - TestConfigBuilder::new().build().await; - - let config = config().await; - let database_client = config.database(); - let mut job_handler = MockJob::new(); - - // creating job in database - database_client.create_job(job_item.clone()).await.unwrap(); - // expecting process job function in job processor to return the external ID - job_handler.expect_verify_job().times(1).returning(move |_, _| Ok(JobVerificationStatus::Verified)); - job_handler.expect_max_process_attempts().returning(move || 2u64); - - let job_handler: Arc> = Arc::new(Box::new(job_handler)); - let ctx = mock_factory::get_job_handler_context(); - // Mocking the `get_job_handler` call in create_job function. - ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&job_handler)); - - let _ = verify_job(job_item.id).await.is_ok(); - - // DB checks. - let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); - assert_eq!(updated_job.status, JobStatus::Completed); - - // Queue checks. - let consumed_messages_verification_queue = - config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap_err(); - assert!(matches!(consumed_messages_verification_queue, QueueError::NoData)); - let consumed_messages_processing_queue = - config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap_err(); - assert!(matches!(consumed_messages_processing_queue, QueueError::NoData)); - } +/// Tests `process_job` function when 2 workers try to process the same job. +/// This test should fail because once the job is locked for processing on one +/// worker it should not be accessed by another worker and should throw an error +/// when updating the job status. +#[rstest] +#[tokio::test] +async fn process_job_two_workers_process_same_job_works() { + let mut job_handler = MockJob::new(); + // Expecting process job function in job processor to return the external ID. + job_handler.expect_process_job().times(1).returning(move |_, _| Ok("0xbeef".to_string())); + job_handler.expect_verification_polling_delay_seconds().return_const(1u64); + + // Mocking the `get_job_handler` call in create_job function. + let job_handler: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + ctx.expect().times(1).with(eq(JobType::SnosRun)).returning(move |_| Arc::clone(&job_handler)); + + // building config + TestConfigBuilder::new().build().await; + let config = config().await; + let db_client = config.database(); + + let job_item = build_job_item_by_type_and_status(JobType::SnosRun, JobStatus::Created, "1".to_string()); + + // Creating the job in the db + db_client.create_job(job_item.clone()).await.unwrap(); + + // Simulating the two workers + let worker_1 = tokio::spawn(async move { process_job(job_item.id).await }); + let worker_2 = tokio::spawn(async move { process_job(job_item.id).await }); + + // waiting for workers to complete the processing + let (result_1, result_2) = tokio::join!(worker_1, worker_2); + + assert_ne!( + result_1.unwrap().is_ok(), + result_2.unwrap().is_ok(), + "One worker should succeed and the other should fail" + ); + + // Waiting for 5 secs for job to be updated in the db + sleep(Duration::from_secs(5)).await; + + let final_job_in_db = db_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + assert_eq!(final_job_in_db.status, JobStatus::PendingVerification); +} - /// Tests `verify_job` function when job is having expected status - /// and returns a `Rejected` verification status. - #[rstest] - #[tokio::test] - async fn verify_job_handler_with_expected_job_status_and_rejected_status_return_and_adds_process_to_job_queue() { - let job_item = - build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); - - // building config - TestConfigBuilder::new().build().await; - - let config = config().await; - let database_client = config.database(); - let mut job_handler = MockJob::new(); - - // creating job in database - database_client.create_job(job_item.clone()).await.unwrap(); - job_handler - .expect_verify_job() - .times(1) - .returning(move |_, _| Ok(JobVerificationStatus::Rejected("".to_string()))); - job_handler.expect_max_process_attempts().returning(move || 2u64); - - let job_handler: Arc> = Arc::new(Box::new(job_handler)); - let ctx = mock_factory::get_job_handler_context(); - // Mocking the `get_job_handler` call in create_job function. - ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&job_handler)); - - let _ = verify_job(job_item.id).await.is_ok(); - - // DB checks. - let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); - assert_eq!(updated_job.status, JobStatus::VerificationFailed("".to_string())); - - // Waiting for 5 secs for message to be passed into the queue - sleep(Duration::from_secs(5)).await; - - // Queue checks. - let consumed_messages = - config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); - let consumed_message_payload: MessagePayloadType = consumed_messages.payload_serde_json().unwrap().unwrap(); - assert_eq!(consumed_message_payload.id, job_item.id); - } +/// Tests `verify_job` function when job is having expected status +/// and returns a `Verified` verification status. +#[rstest] +#[tokio::test] +async fn verify_job_with_verified_status_works() { + let job_item = + build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); + + // building config + TestConfigBuilder::new().build().await; + + let config = config().await; + let database_client = config.database(); + let mut job_handler = MockJob::new(); + + // creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + // expecting process job function in job processor to return the external ID + job_handler.expect_verify_job().times(1).returning(move |_, _| Ok(JobVerificationStatus::Verified)); + job_handler.expect_max_process_attempts().returning(move || 2u64); + + let job_handler: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&job_handler)); + + assert!(verify_job(job_item.id).await.is_ok()); + + // DB checks. + let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + assert_eq!(updated_job.status, JobStatus::Completed); + + // Waiting for 5 secs for message to be passed into the queue + sleep(Duration::from_secs(5)).await; + + // Queue checks. + let consumed_messages_verification_queue = + config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap_err(); + assert_matches!(consumed_messages_verification_queue, QueueError::NoData); + let consumed_messages_processing_queue = + config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap_err(); + assert_matches!(consumed_messages_processing_queue, QueueError::NoData); +} - /// Tests `verify_job` function when job is having expected status - /// and returns a `Rejected` verification status but doesn't add - /// the job to process queue because of maximum attempts reached. - #[rstest] - #[tokio::test] - async fn verify_job_handler_with_expected_job_status_and_rejected_status_return() { - let mut job_item = - build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); - - // increasing JOB_VERIFICATION_ATTEMPT_METADATA_KEY to simulate max. attempts reached. - let metadata = increment_key_in_metadata(&job_item.metadata, JOB_PROCESS_ATTEMPT_METADATA_KEY).unwrap(); - job_item.metadata = metadata; - - // building config - TestConfigBuilder::new().build().await; - - let config = config().await; - let database_client = config.database(); - let mut job_handler = MockJob::new(); - - // creating job in database - database_client.create_job(job_item.clone()).await.unwrap(); - // expecting process job function in job processor to return the external ID - job_handler - .expect_verify_job() - .times(1) - .returning(move |_, _| Ok(JobVerificationStatus::Rejected("".to_string()))); - job_handler.expect_max_process_attempts().returning(move || 1u64); - - let job_handler: Arc> = Arc::new(Box::new(job_handler)); - let ctx = mock_factory::get_job_handler_context(); - // Mocking the `get_job_handler` call in create_job function. - ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&job_handler)); - - let _ = verify_job(job_item.id).await.is_ok(); - - // DB checks. - let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); - assert_eq!(updated_job.status, JobStatus::VerificationFailed("".to_string())); - - // Queue checks. - let consumed_messages_processing_queue = - config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap_err(); - assert!(matches!(consumed_messages_processing_queue, QueueError::NoData)); - } +/// Tests `verify_job` function when job is having expected status +/// and returns a `Rejected` verification status. +#[rstest] +#[tokio::test] +async fn verify_job_with_rejected_status_adds_to_queue_works() { + let job_item = + build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); - /// Tests `verify_job` function when job is having expected status - /// and returns a `Pending` verification status. - #[rstest] - #[tokio::test] - async fn verify_job_handler_with_expected_job_status_and_pending_status_return_and_adds_job_to_verification_queue() - { - let job_item = - build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); - - // building config - TestConfigBuilder::new().build().await; - - let config = config().await; - let database_client = config.database(); - let mut job_handler = MockJob::new(); - - // creating job in database - database_client.create_job(job_item.clone()).await.unwrap(); - // expecting process job function in job processor to return the external ID - job_handler.expect_verify_job().times(1).returning(move |_, _| Ok(JobVerificationStatus::Pending)); - job_handler.expect_max_verification_attempts().returning(move || 2u64); - job_handler.expect_verification_polling_delay_seconds().returning(move || 2u64); - - let job_handler: Arc> = Arc::new(Box::new(job_handler)); - let ctx = mock_factory::get_job_handler_context(); - // Mocking the `get_job_handler` call in create_job function. - ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&job_handler)); - - let _ = verify_job(job_item.id).await.is_ok(); - - // DB checks. - let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); - assert_eq!(updated_job.metadata.get(JOB_VERIFICATION_ATTEMPT_METADATA_KEY).unwrap(), "1"); - - // Waiting for 5 secs for message to be passed into the queue - sleep(Duration::from_secs(5)).await; - - // Queue checks - let consumed_messages = - config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); - let consumed_message_payload: MessagePayloadType = consumed_messages.payload_serde_json().unwrap().unwrap(); - assert_eq!(consumed_message_payload.id, job_item.id); - } + // building config + TestConfigBuilder::new().build().await; - /// Tests `verify_job` function when job is having expected status - /// and returns a `Pending` verification status but doesn't add - /// the job to process queue because of maximum attempts reached. - #[rstest] - #[tokio::test] - async fn verify_job_handler_with_expected_job_status_and_pending_status_return() { - let mut job_item = - build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); - - // increasing JOB_VERIFICATION_ATTEMPT_METADATA_KEY to simulate max. attempts reached. - let metadata = increment_key_in_metadata(&job_item.metadata, JOB_VERIFICATION_ATTEMPT_METADATA_KEY).unwrap(); - job_item.metadata = metadata; - - // building config - TestConfigBuilder::new().build().await; - - let config = config().await; - let database_client = config.database(); - let mut job_handler = MockJob::new(); - - // creating job in database - database_client.create_job(job_item.clone()).await.unwrap(); - // expecting process job function in job processor to return the external ID - job_handler.expect_verify_job().times(1).returning(move |_, _| Ok(JobVerificationStatus::Pending)); - job_handler.expect_max_verification_attempts().returning(move || 1u64); - job_handler.expect_verification_polling_delay_seconds().returning(move || 2u64); - - let job_handler: Arc> = Arc::new(Box::new(job_handler)); - let ctx = mock_factory::get_job_handler_context(); - // Mocking the `get_job_handler` call in create_job function. - ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&job_handler)); - - let _ = verify_job(job_item.id).await.is_ok(); - - // DB checks. - let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); - assert_eq!(updated_job.status, JobStatus::VerificationTimeout); - - // Queue checks. - let consumed_messages_verification_queue = - config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap_err(); - assert!(matches!(consumed_messages_verification_queue, QueueError::NoData)); - } + let config = config().await; + let database_client = config.database(); + let mut job_handler = MockJob::new(); + + // creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + job_handler.expect_verify_job().times(1).returning(move |_, _| Ok(JobVerificationStatus::Rejected("".to_string()))); + job_handler.expect_max_process_attempts().returning(move || 2u64); + + let job_handler: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&job_handler)); + + assert!(verify_job(job_item.id).await.is_ok()); + + // DB checks. + let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + assert_eq!(updated_job.status, JobStatus::VerificationFailed("".to_string())); + + // Waiting for 5 secs for message to be passed into the queue + sleep(Duration::from_secs(5)).await; + + // Queue checks. + let consumed_messages = config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap(); + let consumed_message_payload: MessagePayloadType = consumed_messages.payload_serde_json().unwrap().unwrap(); + assert_eq!(consumed_message_payload.id, job_item.id); +} + +/// Tests `verify_job` function when job is having expected status +/// and returns a `Rejected` verification status but doesn't add +/// the job to process queue because of maximum attempts reached. +#[rstest] +#[tokio::test] +async fn verify_job_with_rejected_status_works() { + let mut job_item = + build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); + + // increasing JOB_VERIFICATION_ATTEMPT_METADATA_KEY to simulate max. attempts reached. + let metadata = increment_key_in_metadata(&job_item.metadata, JOB_PROCESS_ATTEMPT_METADATA_KEY).unwrap(); + job_item.metadata = metadata; + + // building config + TestConfigBuilder::new().build().await; + + let config = config().await; + let database_client = config.database(); + let mut job_handler = MockJob::new(); + + // creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + // expecting process job function in job processor to return the external ID + job_handler.expect_verify_job().times(1).returning(move |_, _| Ok(JobVerificationStatus::Rejected("".to_string()))); + job_handler.expect_max_process_attempts().returning(move || 1u64); + + let job_handler: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&job_handler)); + + assert!(verify_job(job_item.id).await.is_ok()); + + // DB checks. + let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + assert_eq!(updated_job.status, JobStatus::VerificationFailed("".to_string())); + assert_eq!(updated_job.metadata.get(JOB_PROCESS_ATTEMPT_METADATA_KEY).unwrap(), "1"); + + // Waiting for 5 secs for message to be passed into the queue + sleep(Duration::from_secs(5)).await; + + // Queue checks. + let consumed_messages_processing_queue = + config.queue().consume_message_from_queue(JOB_PROCESSING_QUEUE.to_string()).await.unwrap_err(); + assert_matches!(consumed_messages_processing_queue, QueueError::NoData); +} + +/// Tests `verify_job` function when job is having expected status +/// and returns a `Pending` verification status. +#[rstest] +#[tokio::test] +async fn verify_job_with_pending_status_adds_to_queue_works() { + let job_item = + build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); + + // building config + TestConfigBuilder::new().build().await; + + let config = config().await; + let database_client = config.database(); + let mut job_handler = MockJob::new(); + + // creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + // expecting process job function in job processor to return the external ID + job_handler.expect_verify_job().times(1).returning(move |_, _| Ok(JobVerificationStatus::Pending)); + job_handler.expect_max_verification_attempts().returning(move || 2u64); + job_handler.expect_verification_polling_delay_seconds().returning(move || 2u64); + + let job_handler: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&job_handler)); + + assert!(verify_job(job_item.id).await.is_ok()); + + // DB checks. + let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + assert_eq!(updated_job.metadata.get(JOB_VERIFICATION_ATTEMPT_METADATA_KEY).unwrap(), "1"); + assert_eq!(updated_job.status, JobStatus::PendingVerification); + + // Waiting for 5 secs for message to be passed into the queue + sleep(Duration::from_secs(5)).await; + + // Queue checks + let consumed_messages = + config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap(); + let consumed_message_payload: MessagePayloadType = consumed_messages.payload_serde_json().unwrap().unwrap(); + assert_eq!(consumed_message_payload.id, job_item.id); +} + +/// Tests `verify_job` function when job is having expected status +/// and returns a `Pending` verification status but doesn't add +/// the job to process queue because of maximum attempts reached. +#[rstest] +#[tokio::test] +async fn verify_job_with_pending_status_works() { + let mut job_item = + build_job_item_by_type_and_status(JobType::DataSubmission, JobStatus::PendingVerification, "1".to_string()); + + // increasing JOB_VERIFICATION_ATTEMPT_METADATA_KEY to simulate max. attempts reached. + let metadata = increment_key_in_metadata(&job_item.metadata, JOB_VERIFICATION_ATTEMPT_METADATA_KEY).unwrap(); + job_item.metadata = metadata; + + // building config + TestConfigBuilder::new().build().await; + + let config = config().await; + let database_client = config.database(); + let mut job_handler = MockJob::new(); + + // creating job in database + database_client.create_job(job_item.clone()).await.unwrap(); + // expecting process job function in job processor to return the external ID + job_handler.expect_verify_job().times(1).returning(move |_, _| Ok(JobVerificationStatus::Pending)); + job_handler.expect_max_verification_attempts().returning(move || 1u64); + job_handler.expect_verification_polling_delay_seconds().returning(move || 2u64); + + let job_handler: Arc> = Arc::new(Box::new(job_handler)); + let ctx = mock_factory::get_job_handler_context(); + // Mocking the `get_job_handler` call in create_job function. + ctx.expect().times(1).with(eq(JobType::DataSubmission)).returning(move |_| Arc::clone(&job_handler)); + + assert!(verify_job(job_item.id).await.is_ok()); + + // DB checks. + let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); + assert_eq!(updated_job.status, JobStatus::VerificationTimeout); + assert_eq!(updated_job.metadata.get(JOB_VERIFICATION_ATTEMPT_METADATA_KEY).unwrap(), "1"); + + // Waiting for 5 secs for message to be passed into the queue + sleep(Duration::from_secs(5)).await; + + // Queue checks. + let consumed_messages_verification_queue = + config.queue().consume_message_from_queue(JOB_VERIFICATION_QUEUE.to_string()).await.unwrap_err(); + assert_matches!(consumed_messages_verification_queue, QueueError::NoData); +} - fn build_job_item_by_type_and_status(job_type: JobType, job_status: JobStatus, internal_id: String) -> JobItem { - let mut hashmap: HashMap = HashMap::new(); - hashmap.insert(JOB_PROCESS_ATTEMPT_METADATA_KEY.to_string(), "0".to_string()); - hashmap.insert(JOB_VERIFICATION_ATTEMPT_METADATA_KEY.to_string(), "0".to_string()); - JobItem { - id: Uuid::new_v4(), - internal_id, - job_type, - status: job_status, - external_id: ExternalId::Number(0), - metadata: hashmap, - version: 0, - } +fn build_job_item_by_type_and_status(job_type: JobType, job_status: JobStatus, internal_id: String) -> JobItem { + let mut hashmap: HashMap = HashMap::new(); + hashmap.insert(JOB_PROCESS_ATTEMPT_METADATA_KEY.to_string(), "0".to_string()); + hashmap.insert(JOB_VERIFICATION_ATTEMPT_METADATA_KEY.to_string(), "0".to_string()); + JobItem { + id: Uuid::new_v4(), + internal_id, + job_type, + status: job_status, + external_id: ExternalId::Number(0), + metadata: hashmap, + version: 0, } } diff --git a/crates/orchestrator/src/tests/workers/proving/mod.rs b/crates/orchestrator/src/tests/workers/proving/mod.rs index c8ee61f6..4bf0ef75 100644 --- a/crates/orchestrator/src/tests/workers/proving/mod.rs +++ b/crates/orchestrator/src/tests/workers/proving/mod.rs @@ -97,13 +97,13 @@ async fn test_proving_worker(#[case] incomplete_runs: bool) -> Result<(), Box> = Arc::new(Box::new(job_handler)); + let job_handler: Arc> = Arc::new(Box::new(job_handler)); let ctx = mock_factory::get_job_handler_context(); // Mocking the `get_job_handler` call in create_job function. if incomplete_runs { - ctx.expect().times(4).with(eq(JobType::ProofCreation)).returning(move |_| Arc::clone(&y)); + ctx.expect().times(4).with(eq(JobType::ProofCreation)).returning(move |_| Arc::clone(&job_handler)); } else { - ctx.expect().times(5).with(eq(JobType::ProofCreation)).returning(move |_| Arc::clone(&y)); + ctx.expect().times(5).with(eq(JobType::ProofCreation)).returning(move |_| Arc::clone(&job_handler)); } let proving_worker = ProvingWorker {}; diff --git a/crates/orchestrator/src/tests/workers/snos/mod.rs b/crates/orchestrator/src/tests/workers/snos/mod.rs index cc6a6401..905894f3 100644 --- a/crates/orchestrator/src/tests/workers/snos/mod.rs +++ b/crates/orchestrator/src/tests/workers/snos/mod.rs @@ -71,10 +71,10 @@ async fn test_snos_worker(#[case] db_val: bool) -> Result<(), Box> { .returning(move |_| Ok(job_item.clone())); } - let y: Arc> = Arc::new(Box::new(job_handler)); + let job_handler: Arc> = Arc::new(Box::new(job_handler)); let ctx = mock_factory::get_job_handler_context(); // Mocking the `get_job_handler` call in create_job function. - ctx.expect().times(5).with(eq(JobType::SnosRun)).returning(move |_| Arc::clone(&y)); + ctx.expect().times(5).with(eq(JobType::SnosRun)).returning(move |_| Arc::clone(&job_handler)); // Queue function call simulations queue From b3de1a23ac7e31cd4d5a527f1630939ed234d389 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Sat, 3 Aug 2024 16:42:48 +0530 Subject: [PATCH 26/44] feat : lint fixes --- Cargo.toml | 2 +- crates/da-clients/ethereum/src/config.rs | 4 +-- crates/da-clients/ethereum/src/lib.rs | 4 +-- crates/orchestrator/Cargo.toml | 2 +- crates/orchestrator/src/config.rs | 3 -- .../src/data_storage/aws_s3/mod.rs | 3 -- .../orchestrator/src/database/mongodb/mod.rs | 11 ++++-- crates/orchestrator/src/jobs/da_job/mod.rs | 7 +--- .../src/jobs/state_update_job/mod.rs | 13 ++----- .../src/jobs/state_update_job/utils.rs | 3 +- crates/orchestrator/src/tests/common/mod.rs | 2 +- crates/orchestrator/src/tests/config.rs | 9 +---- .../src/tests/data_storage/mod.rs | 5 --- crates/orchestrator/src/tests/jobs/mod.rs | 2 +- .../src/tests/workers/utils/mod.rs | 8 ++--- crates/orchestrator/src/workers/proving.rs | 6 ++-- .../clients/interfaces/validity_interface.rs | 16 +++++---- .../ethereum/src/clients/validity.rs | 4 +-- .../ethereum/src/conversion.rs | 4 +-- crates/settlement-clients/ethereum/src/lib.rs | 36 +++++++++---------- .../settlement-clients/ethereum/src/types.rs | 12 ++++--- crates/settlement-clients/starknet/src/lib.rs | 28 ++++++++------- 22 files changed, 80 insertions(+), 104 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 594e904d..b715e867 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,4 +87,4 @@ utils = { path = "crates/utils" } prover-client-interface = { path = "crates/prover-services/prover-client-interface" } gps-fact-checker = { path = "crates/prover-services/gps-fact-checker" } sharp-service = { path = "crates/prover-services/sharp-service" } -orchestrator = { path = "crates/orchestrator" } \ No newline at end of file +orchestrator = { path = "crates/orchestrator" } diff --git a/crates/da-clients/ethereum/src/config.rs b/crates/da-clients/ethereum/src/config.rs index 34370036..b50604b2 100644 --- a/crates/da-clients/ethereum/src/config.rs +++ b/crates/da-clients/ethereum/src/config.rs @@ -1,8 +1,6 @@ use std::str::FromStr; -use alloy::network::Ethereum; -use alloy::providers::ProviderBuilder; -use alloy::rpc::client::RpcClient; +use alloy::{network::Ethereum, providers::ProviderBuilder, rpc::client::RpcClient}; use async_trait::async_trait; use da_client_interface::DaConfig; use url::Url; diff --git a/crates/da-clients/ethereum/src/lib.rs b/crates/da-clients/ethereum/src/lib.rs index e48f5ca7..68c933bd 100644 --- a/crates/da-clients/ethereum/src/lib.rs +++ b/crates/da-clients/ethereum/src/lib.rs @@ -20,8 +20,8 @@ pub struct EthereumDaClient { #[async_trait] impl DaClient for EthereumDaClient { async fn publish_state_diff(&self, _state_diff: Vec>, _to: &[u8; 32]) -> Result { - // Here in case of ethereum we are not publishing the state diff because we are doing it all - // together in update_state job. So we don't need to send the blob here. + // Here in case of ethereum we are not publishing the state diff because we are doing it all together in update_state job. + // So we don't need to send the blob here. Ok("NA".to_string()) } diff --git a/crates/orchestrator/Cargo.toml b/crates/orchestrator/Cargo.toml index d4d36695..32e06e5b 100644 --- a/crates/orchestrator/Cargo.toml +++ b/crates/orchestrator/Cargo.toml @@ -18,8 +18,8 @@ assert_matches = "1.5.0" async-std = "1.12.0" async-trait = { workspace = true } aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -aws-sdk-sqs = "1.36.0" aws-sdk-s3 = { version = "1.38.0", features = ["behavior-version-latest"] } +aws-sdk-sqs = "1.36.0" axum = { workspace = true, features = ["macros"] } axum-macros = { workspace = true } bincode = { workspace = true } diff --git a/crates/orchestrator/src/config.rs b/crates/orchestrator/src/config.rs index 3bc3f9ea..447b25ec 100644 --- a/crates/orchestrator/src/config.rs +++ b/crates/orchestrator/src/config.rs @@ -19,9 +19,6 @@ use utils::env_utils::get_env_var_or_panic; use utils::settings::default::DefaultSettingsProvider; use utils::settings::SettingsProvider; -use crate::data_storage::aws_s3::config::AWSS3Config; -use crate::data_storage::aws_s3::AWSS3; -use crate::data_storage::{DataStorage, DataStorageConfig}; use crate::database::mongodb::config::MongoDbConfig; use crate::database::mongodb::MongoDb; use crate::database::{Database, DatabaseConfig}; diff --git a/crates/orchestrator/src/data_storage/aws_s3/mod.rs b/crates/orchestrator/src/data_storage/aws_s3/mod.rs index a5ef2b0a..48707cc0 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/mod.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/mod.rs @@ -7,9 +7,6 @@ use aws_sdk_s3::Client; use bytes::Bytes; use color_eyre::Result; -use crate::data_storage::aws_s3::config::AWSS3Config; -use crate::data_storage::DataStorage; - /// Module for AWS S3 config structs and implementations pub mod config; diff --git a/crates/orchestrator/src/database/mongodb/mod.rs b/crates/orchestrator/src/database/mongodb/mod.rs index 8cc6e341..3226f12a 100644 --- a/crates/orchestrator/src/database/mongodb/mod.rs +++ b/crates/orchestrator/src/database/mongodb/mod.rs @@ -4,9 +4,14 @@ use async_std::stream::StreamExt; use async_trait::async_trait; use color_eyre::eyre::eyre; use color_eyre::Result; -use mongodb::bson::{doc, Bson, Document}; -use mongodb::options::{ClientOptions, FindOneOptions, ServerApi, ServerApiVersion, UpdateOptions}; -use mongodb::{bson, Client, Collection}; +use mongodb::bson::{Bson, Document}; +use mongodb::options::{FindOneOptions, UpdateOptions}; +use mongodb::{ + bson, + bson::doc, + options::{ClientOptions, ServerApi, ServerApiVersion}, + Client, Collection, +}; use uuid::Uuid; use crate::database::mongodb::config::MongoDbConfig; diff --git a/crates/orchestrator/src/jobs/da_job/mod.rs b/crates/orchestrator/src/jobs/da_job/mod.rs index 1cd5d33a..0581139f 100644 --- a/crates/orchestrator/src/jobs/da_job/mod.rs +++ b/crates/orchestrator/src/jobs/da_job/mod.rs @@ -319,12 +319,7 @@ fn da_word(class_flag: bool, nonce_change: Option, num_changes: u6 // checking for nonce here if let Some(_new_nonce) = nonce_change { - let bytes: [u8; 32] = nonce_change - .expect( - "Not able to convert the nonce_change var into [u8; 32] type. Possible Error : Improper parameter \ - length.", - ) - .to_bytes_be(); + let bytes: [u8; 32] = nonce_change.expect("Not able to convert the nonce_change var into [u8; 32] type. Possible Error : Improper parameter length.").to_bytes_be(); let biguint = BigUint::from_bytes_be(&bytes); let binary_string_local = format!("{:b}", biguint); let padded_binary_string = format!("{:0>64}", binary_string_local); diff --git a/crates/orchestrator/src/jobs/state_update_job/mod.rs b/crates/orchestrator/src/jobs/state_update_job/mod.rs index 2e6167d6..dc460d1b 100644 --- a/crates/orchestrator/src/jobs/state_update_job/mod.rs +++ b/crates/orchestrator/src/jobs/state_update_job/mod.rs @@ -74,20 +74,13 @@ impl Job for StateUpdateJob { self.insert_attempts_into_metadata(job, &attempt_no, &sent_tx_hashes); // external_id returned corresponds to the last block number settled - Ok(block_numbers - .last() - .expect( - "Last number in block_numbers array returned as None. Possible Error : Delay in job processing or \ - Failed job execution.", - ) - .to_string()) + Ok(block_numbers.last().expect("Last number in block_numbers array returned as None. Possible Error : Delay in job processing or Failed job execution.").to_string()) } /// Returns the status of the passed job. /// Status will be verified if: /// 1. the last settlement tx hash is successful, - /// 2. the expected last settled block from our configuration is indeed the one found in the - /// provider. + /// 2. the expected last settled block from our configuration is indeed the one found in the provider. async fn verify_job(&self, config: &Config, job: &mut JobItem) -> Result { let attempt_no = job.metadata.get(JOB_PROCESS_ATTEMPT_METADATA_KEY).expect("Could not find current attempt number.").clone(); @@ -120,7 +113,7 @@ impl Job for StateUpdateJob { return Ok(new_status.into()); } SettlementVerificationStatus::Pending => { - return Err(eyre!("Tx {tx_hash} should not be pending.")); + return Err(eyre!("Tx {tx_hash} should not be pending.")) } SettlementVerificationStatus::Verified => {} } diff --git a/crates/orchestrator/src/jobs/state_update_job/utils.rs b/crates/orchestrator/src/jobs/state_update_job/utils.rs index c696b06c..1d92c9a4 100644 --- a/crates/orchestrator/src/jobs/state_update_job/utils.rs +++ b/crates/orchestrator/src/jobs/state_update_job/utils.rs @@ -1,7 +1,6 @@ -use color_eyre::eyre::eyre; - use crate::config::config; use crate::constants::BLOB_DATA_FILE_NAME; +use color_eyre::eyre::eyre; /// Fetching the blob data (stored in remote storage during DA job) for a particular block pub async fn fetch_blob_data_for_block(block_number: u64) -> color_eyre::Result>> { diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index ef0d391c..1bd70d4c 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -123,7 +123,7 @@ async fn get_sqs_client() -> aws_sdk_sqs::Client { #[derive(Deserialize, Debug)] pub struct MessagePayloadType { pub(crate) id: Uuid, - +} pub async fn get_storage_client() -> Box { Box::new(AWSS3::new(AWSS3ConfigType::WithEndpoint(S3LocalStackConfig::new_from_env())).await) } diff --git a/crates/orchestrator/src/tests/config.rs b/crates/orchestrator/src/tests/config.rs index e67a7937..e147d8a6 100644 --- a/crates/orchestrator/src/tests/config.rs +++ b/crates/orchestrator/src/tests/config.rs @@ -11,19 +11,12 @@ use starknet::providers::{JsonRpcClient, Url}; use utils::env_utils::get_env_var_or_panic; use utils::settings::default::DefaultSettingsProvider; -use crate::config::{ - build_da_client, build_prover_service, build_settlement_client, build_storage_client, config_force_init, Config, -}; -use crate::data_storage::DataStorage; use crate::database::mongodb::config::MongoDbConfig; use crate::database::mongodb::MongoDb; use crate::database::{Database, DatabaseConfig}; use crate::queue::sqs::SqsQueue; use crate::queue::QueueProvider; -use crate::tests::common::{create_sqs_queues, drop_database}; - -use crate::tests::common::{drop_database, get_storage_client}; -use httpmock::MockServer; +use crate::tests::common::{create_sqs_queues, drop_database, get_storage_client}; // Inspiration : https://rust-unofficial.github.io/patterns/patterns/creational/builder.html // TestConfigBuilder allows to heavily customise the global configs based on the test's requirement. // Eg: We want to mock only the da client and leave rest to be as it is, use mock_da_client. diff --git a/crates/orchestrator/src/tests/data_storage/mod.rs b/crates/orchestrator/src/tests/data_storage/mod.rs index a2316daa..d127917a 100644 --- a/crates/orchestrator/src/tests/data_storage/mod.rs +++ b/crates/orchestrator/src/tests/data_storage/mod.rs @@ -7,11 +7,6 @@ use rstest::rstest; use serde_json::json; use utils::env_utils::get_env_var_or_panic; -use crate::data_storage::aws_s3::config::AWSS3Config; -use crate::data_storage::aws_s3::AWSS3; -use crate::data_storage::{DataStorage, DataStorageConfig}; -use crate::tests::config::TestConfigBuilder; - /// This test checks the ability to put and get data from AWS S3 using `AWSS3`. /// It puts JSON data into a test bucket and retrieves it, verifying the data /// matches what was originally uploaded. diff --git a/crates/orchestrator/src/tests/jobs/mod.rs b/crates/orchestrator/src/tests/jobs/mod.rs index 41f870d5..86e65dde 100644 --- a/crates/orchestrator/src/tests/jobs/mod.rs +++ b/crates/orchestrator/src/tests/jobs/mod.rs @@ -52,7 +52,7 @@ async fn create_job_job_does_not_exists_in_db_works() { let mut hashmap: HashMap = HashMap::new(); hashmap.insert(JOB_PROCESS_ATTEMPT_METADATA_KEY.to_string(), "0".to_string()); hashmap.insert(JOB_VERIFICATION_ATTEMPT_METADATA_KEY.to_string(), "0".to_string()); - + // Db checks. let job_in_db = config.database().get_job_by_id(job_item.id).await.unwrap().unwrap(); assert_eq!(job_in_db.id, job_item.id); diff --git a/crates/orchestrator/src/tests/workers/utils/mod.rs b/crates/orchestrator/src/tests/workers/utils/mod.rs index c4fcd5da..03dd0cd3 100644 --- a/crates/orchestrator/src/tests/workers/utils/mod.rs +++ b/crates/orchestrator/src/tests/workers/utils/mod.rs @@ -1,12 +1,10 @@ -use std::collections::HashMap; - -use mockall::predicate::eq; -use uuid::Uuid; - use crate::database::MockDatabase; use crate::jobs::constants::JOB_METADATA_CAIRO_PIE_PATH_KEY; use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; use crate::jobs::MockJob; +use mockall::predicate::eq; +use std::collections::HashMap; +use uuid::Uuid; pub fn get_job_item_mock_by_id(id: String, uuid: Uuid) -> JobItem { JobItem { diff --git a/crates/orchestrator/src/workers/proving.rs b/crates/orchestrator/src/workers/proving.rs index 7fcb7194..4ec85b91 100644 --- a/crates/orchestrator/src/workers/proving.rs +++ b/crates/orchestrator/src/workers/proving.rs @@ -1,11 +1,9 @@ -use std::error::Error; - -use async_trait::async_trait; - use crate::config::config; use crate::jobs::create_job; use crate::jobs::types::{JobStatus, JobType}; use crate::workers::Worker; +use async_trait::async_trait; +use std::error::Error; pub struct ProvingWorker; diff --git a/crates/settlement-clients/ethereum/src/clients/interfaces/validity_interface.rs b/crates/settlement-clients/ethereum/src/clients/interfaces/validity_interface.rs index 37bf76b0..7b8a31a1 100644 --- a/crates/settlement-clients/ethereum/src/clients/interfaces/validity_interface.rs +++ b/crates/settlement-clients/ethereum/src/clients/interfaces/validity_interface.rs @@ -1,14 +1,16 @@ use std::sync::Arc; -use alloy::network::Ethereum; -use alloy::primitives::{I256, U256}; -use alloy::providers::Provider; -use alloy::rpc::types::eth::TransactionReceipt; -use alloy::sol; -use alloy::transports::http::Http; -use alloy::transports::{RpcError, TransportErrorKind}; use async_trait::async_trait; +use alloy::{ + network::Ethereum, + primitives::{I256, U256}, + providers::Provider, + rpc::types::eth::TransactionReceipt, + sol, + transports::{http::Http, RpcError, TransportErrorKind}, +}; + use crate::types::LocalWalletSignerMiddleware; // TODO: should be moved to Zaun: diff --git a/crates/settlement-clients/ethereum/src/clients/validity.rs b/crates/settlement-clients/ethereum/src/clients/validity.rs index 575c6748..8ed89c5b 100644 --- a/crates/settlement-clients/ethereum/src/clients/validity.rs +++ b/crates/settlement-clients/ethereum/src/clients/validity.rs @@ -1,8 +1,6 @@ use std::sync::Arc; -use alloy::network::Ethereum; -use alloy::primitives::Address; -use alloy::transports::http::Http; +use alloy::{network::Ethereum, primitives::Address, transports::http::Http}; use crate::clients::interfaces::validity_interface::StarknetValidityContract; use crate::types::LocalWalletSignerMiddleware; diff --git a/crates/settlement-clients/ethereum/src/conversion.rs b/crates/settlement-clients/ethereum/src/conversion.rs index ea4ab011..c86eb89a 100644 --- a/crates/settlement-clients/ethereum/src/conversion.rs +++ b/crates/settlement-clients/ethereum/src/conversion.rs @@ -1,7 +1,7 @@ use alloy::primitives::U256; -/// Converts a `&[Vec]` to `Vec`. Each inner slice is expected to be exactly 32 bytes -/// long. Pads with zeros if any inner slice is shorter than 32 bytes. +/// Converts a `&[Vec]` to `Vec`. Each inner slice is expected to be exactly 32 bytes long. +/// Pads with zeros if any inner slice is shorter than 32 bytes. pub(crate) fn slice_slice_u8_to_vec_u256(slices: &[[u8; 32]]) -> Vec { slices.iter().map(|slice| slice_u8_to_u256(slice)).collect() } diff --git a/crates/settlement-clients/ethereum/src/lib.rs b/crates/settlement-clients/ethereum/src/lib.rs index ff8075b7..534edf48 100644 --- a/crates/settlement-clients/ethereum/src/lib.rs +++ b/crates/settlement-clients/ethereum/src/lib.rs @@ -3,34 +3,35 @@ pub mod config; pub mod conversion; pub mod types; -use std::fmt::Write; -use std::path::{Path, PathBuf}; -use std::str::FromStr; -use std::sync::Arc; - use alloy::consensus::{ BlobTransactionSidecar, SignableTransaction, TxEip4844, TxEip4844Variant, TxEip4844WithSidecar, TxEnvelope, }; use alloy::eips::eip2718::Encodable2718; use alloy::eips::eip2930::AccessList; use alloy::eips::eip4844::BYTES_PER_BLOB; -use alloy::network::EthereumWallet; -use alloy::primitives::{Address, Bytes, FixedBytes, B256, U256}; -use alloy::providers::{PendingTransactionConfig, Provider, ProviderBuilder}; -use alloy::rpc::types::TransactionReceipt; -use alloy::signers::local::PrivateKeySigner; +use alloy::primitives::{Bytes, FixedBytes}; +use alloy::{ + network::EthereumWallet, + primitives::{Address, B256, U256}, + providers::{PendingTransactionConfig, Provider, ProviderBuilder}, + rpc::types::TransactionReceipt, + signers::local::PrivateKeySigner, +}; use async_trait::async_trait; use c_kzg::{Blob, Bytes32, KzgCommitment, KzgProof, KzgSettings}; use color_eyre::eyre::eyre; use color_eyre::Result; -use mockall::predicate::*; -use mockall::{automock, lazy_static}; +use mockall::{automock, lazy_static, predicate::*}; use rstest::rstest; -use settlement_client_interface::{SettlementClient, SettlementVerificationStatus, SETTLEMENT_SETTINGS_NAME}; -use utils::env_utils::get_env_var_or_panic; -use utils::settings::SettingsProvider; +use std::fmt::Write; +use std::path::{Path, PathBuf}; +use std::str::FromStr; +use std::sync::Arc; use crate::clients::interfaces::validity_interface::StarknetValidityContractTrait; +use settlement_client_interface::{SettlementClient, SettlementVerificationStatus, SETTLEMENT_SETTINGS_NAME}; +use utils::{env_utils::get_env_var_or_panic, settings::SettingsProvider}; + use crate::clients::StarknetValidityContractClient; use crate::config::EthereumSettlementConfig; use crate::conversion::{slice_slice_u8_to_vec_u256, slice_u8_to_u256}; @@ -68,7 +69,7 @@ impl EthereumSettlementClient { ProviderBuilder::new().with_recommended_fillers().wallet(wallet.clone()).on_http(settlement_cfg.rpc_url), ); let core_contract_client = StarknetValidityContractClient::new( - Address::from_str(&settlement_cfg.core_contract_address).unwrap().0.into(), + Address::from_slice(settlement_cfg.core_contract_address.as_bytes()).0.into(), provider.clone(), ); @@ -245,8 +246,7 @@ async fn prepare_sidecar( fn get_txn_input_bytes(program_output: Vec<[u8; 32]>, kzg_proof: [u8; 48]) -> Bytes { let program_output_hex_string = vec_u8_32_to_hex_string(program_output); let kzg_proof_hex_string = u8_48_to_hex_string(kzg_proof); - // cast keccak "updateStateKzgDA(uint256[] calldata programOutput, bytes calldata kzgProof)" | cut - // -b 1-10 + // cast keccak "updateStateKzgDA(uint256[] calldata programOutput, bytes calldata kzgProof)" | cut -b 1-10 let function_selector = "0x1a790556"; Bytes::from(program_output_hex_string + &kzg_proof_hex_string + function_selector) diff --git a/crates/settlement-clients/ethereum/src/types.rs b/crates/settlement-clients/ethereum/src/types.rs index 6ec5914c..3415ee0a 100644 --- a/crates/settlement-clients/ethereum/src/types.rs +++ b/crates/settlement-clients/ethereum/src/types.rs @@ -1,7 +1,11 @@ -use alloy::network::{Ethereum, EthereumWallet}; -use alloy::providers::fillers::{ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller}; -use alloy::providers::{Identity, RootProvider}; -use alloy::transports::http::{Client, Http}; +use alloy::{ + network::{Ethereum, EthereumWallet}, + providers::{ + fillers::{ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller}, + Identity, RootProvider, + }, + transports::http::{Client, Http}, +}; pub type LocalWalletSignerMiddleware = FillProvider< JoinFill< diff --git a/crates/settlement-clients/starknet/src/lib.rs b/crates/settlement-clients/starknet/src/lib.rs index 0957ea81..abcd10df 100644 --- a/crates/settlement-clients/starknet/src/lib.rs +++ b/crates/settlement-clients/starknet/src/lib.rs @@ -7,18 +7,22 @@ use async_trait::async_trait; use color_eyre::eyre::eyre; use color_eyre::Result; use lazy_static::lazy_static; -use mockall::automock; -use mockall::predicate::*; -use settlement_client_interface::{SettlementClient, SettlementVerificationStatus, SETTLEMENT_SETTINGS_NAME}; -use starknet::accounts::{Account, Call, ConnectedAccount, ExecutionEncoding, SingleOwnerAccount}; -use starknet::core::types::{ - BlockId, BlockTag, ExecutionResult, FieldElement, FunctionCall, MaybePendingTransactionReceipt, +use mockall::{automock, predicate::*}; +use starknet::accounts::ConnectedAccount; +use starknet::core::types::{ExecutionResult, MaybePendingTransactionReceipt}; +use starknet::providers::Provider; +use starknet::{ + accounts::{Account, Call, ExecutionEncoding, SingleOwnerAccount}, + core::{ + types::{BlockId, BlockTag, FieldElement, FunctionCall}, + utils::get_selector_from_name, + }, + providers::{jsonrpc::HttpTransport, JsonRpcClient}, + signers::{LocalWallet, SigningKey}, }; -use starknet::core::utils::get_selector_from_name; -use starknet::providers::jsonrpc::HttpTransport; -use starknet::providers::{JsonRpcClient, Provider}; -use starknet::signers::{LocalWallet, SigningKey}; use tokio::time::{sleep, Duration}; + +use settlement_client_interface::{SettlementClient, SettlementVerificationStatus, SETTLEMENT_SETTINGS_NAME}; use utils::env_utils::get_env_var_or_panic; use utils::settings::SettingsProvider; @@ -81,8 +85,8 @@ lazy_static! { get_selector_from_name("stateBlockNumber").expect("Invalid update state selector"); } -// TODO: Note that we already have an implementation of the appchain core contract client available -// here: https://github.com/keep-starknet-strange/zaun/tree/main/crates/l3/appchain-core-contract-client +// TODO: Note that we already have an implementation of the appchain core contract client available here: +// https://github.com/keep-starknet-strange/zaun/tree/main/crates/l3/appchain-core-contract-client // However, this implementation uses different FieldElement types, and incorporating all of them // into this repository would introduce unnecessary complexity. // Therefore, we will wait for the update of starknet_rs in the Zaun repository before adapting From 763379e128dbeb2d0253e16989d37b3b72a3c2b3 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Sat, 3 Aug 2024 17:26:32 +0530 Subject: [PATCH 27/44] feat : lint fixes --- crates/orchestrator/src/data_storage/types.rs | 3 +-- crates/orchestrator/src/database/mongodb/mod.rs | 1 - crates/orchestrator/src/jobs/da_job/mod.rs | 5 +++-- crates/orchestrator/src/jobs/state_update_job/mod.rs | 4 +++- crates/orchestrator/src/tests/database/mod.rs | 5 ++--- crates/orchestrator/src/tests/jobs/da_job/mod.rs | 4 ++-- crates/orchestrator/src/tests/jobs/mod.rs | 6 +++--- crates/orchestrator/src/tests/jobs/proving_job/mod.rs | 2 +- crates/orchestrator/src/tests/workers/snos/mod.rs | 2 +- 9 files changed, 16 insertions(+), 16 deletions(-) diff --git a/crates/orchestrator/src/data_storage/types.rs b/crates/orchestrator/src/data_storage/types.rs index 31270558..0fc1a6fb 100644 --- a/crates/orchestrator/src/data_storage/types.rs +++ b/crates/orchestrator/src/data_storage/types.rs @@ -1,7 +1,6 @@ -use std::collections::HashMap; - use cairo_vm::Felt252; use serde::{Deserialize, Serialize}; +use std::collections::HashMap; /// This struct represents the contract changes that will be in `StarknetOsOutput` /// as a vector. diff --git a/crates/orchestrator/src/database/mongodb/mod.rs b/crates/orchestrator/src/database/mongodb/mod.rs index 8a32b729..1ef99917 100644 --- a/crates/orchestrator/src/database/mongodb/mod.rs +++ b/crates/orchestrator/src/database/mongodb/mod.rs @@ -2,7 +2,6 @@ use async_std::stream::StreamExt; use futures::TryStreamExt; use std::collections::HashMap; -use async_std::stream::StreamExt; use async_trait::async_trait; use color_eyre::eyre::eyre; use color_eyre::Result; diff --git a/crates/orchestrator/src/jobs/da_job/mod.rs b/crates/orchestrator/src/jobs/da_job/mod.rs index 0581139f..7db24a09 100644 --- a/crates/orchestrator/src/jobs/da_job/mod.rs +++ b/crates/orchestrator/src/jobs/da_job/mod.rs @@ -348,17 +348,18 @@ mod tests { use std::io::Read; use ::serde::{Deserialize, Serialize}; - use da_client_interface::MockDaClient; use httpmock::prelude::*; use majin_blob_core::blob; use majin_blob_types::serde; use majin_blob_types::state_diffs::UnorderedEq; + // use majin_blob_types::serde; + use crate::data_storage::MockDataStorage; + use da_client_interface::MockDaClient; use rstest::rstest; use serde_json::json; use super::*; // use majin_blob_types::serde; - use crate::data_storage::MockDataStorage; use crate::tests::common::init_config; #[rstest] diff --git a/crates/orchestrator/src/jobs/state_update_job/mod.rs b/crates/orchestrator/src/jobs/state_update_job/mod.rs index dc460d1b..d60c86d1 100644 --- a/crates/orchestrator/src/jobs/state_update_job/mod.rs +++ b/crates/orchestrator/src/jobs/state_update_job/mod.rs @@ -7,14 +7,16 @@ use async_trait::async_trait; use cairo_vm::Felt252; use color_eyre::eyre::eyre; use color_eyre::Result; -use settlement_client_interface::SettlementVerificationStatus; use snos::io::output::StarknetOsOutput; use uuid::Uuid; +use settlement_client_interface::SettlementVerificationStatus; + use super::constants::{ JOB_METADATA_STATE_UPDATE_ATTEMPT_PREFIX, JOB_METADATA_STATE_UPDATE_LAST_FAILED_BLOCK_NO, JOB_PROCESS_ATTEMPT_METADATA_KEY, }; + use crate::config::{config, Config}; use crate::constants::SNOS_OUTPUT_FILE_NAME; use crate::jobs::constants::JOB_METADATA_STATE_UPDATE_BLOCKS_TO_SETTLE_KEY; diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index 3733f662..343fdf55 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -1,9 +1,8 @@ -use rstest::*; -use uuid::Uuid; - use crate::config::config; use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; use crate::tests::config::TestConfigBuilder; +use rstest::*; +use uuid::Uuid; #[rstest] #[tokio::test] diff --git a/crates/orchestrator/src/tests/jobs/da_job/mod.rs b/crates/orchestrator/src/tests/jobs/da_job/mod.rs index 3dab09e4..56f08375 100644 --- a/crates/orchestrator/src/tests/jobs/da_job/mod.rs +++ b/crates/orchestrator/src/tests/jobs/da_job/mod.rs @@ -1,5 +1,7 @@ use std::collections::HashMap; +use crate::config::{config, config_force_init}; +use crate::data_storage::MockDataStorage; use da_client_interface::{DaVerificationStatus, MockDaClient}; use httpmock::prelude::*; use rstest::*; @@ -9,8 +11,6 @@ use uuid::Uuid; use super::super::common::constants::{ETHEREUM_MAX_BLOB_PER_TXN, ETHEREUM_MAX_BYTES_PER_BLOB}; use super::super::common::{default_job_item, init_config}; -use crate::config::{config, config_force_init}; -use crate::data_storage::MockDataStorage; use crate::jobs::da_job::DaJob; use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; use crate::jobs::Job; diff --git a/crates/orchestrator/src/tests/jobs/mod.rs b/crates/orchestrator/src/tests/jobs/mod.rs index 86e65dde..7072af64 100644 --- a/crates/orchestrator/src/tests/jobs/mod.rs +++ b/crates/orchestrator/src/tests/jobs/mod.rs @@ -119,7 +119,7 @@ async fn create_job_job_handler_is_not_implemented_panics() { /// `Created` or `VerificationFailed`. #[rstest] #[case(JobType::SnosRun, JobStatus::Created)] -#[case(JobType::DataSubmission, JobStatus::VerificationFailed("".to_string()))] +#[case(JobType::DataSubmission, JobStatus::VerificationFailed)] #[tokio::test] async fn process_job_with_job_exists_in_db_and_valid_job_processing_status_works( #[case] job_type: JobType, @@ -339,7 +339,7 @@ async fn verify_job_with_rejected_status_adds_to_queue_works() { // DB checks. let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); - assert_eq!(updated_job.status, JobStatus::VerificationFailed("".to_string())); + assert_eq!(updated_job.status, JobStatus::VerificationFailed); // Waiting for 5 secs for message to be passed into the queue sleep(Duration::from_secs(5)).await; @@ -385,7 +385,7 @@ async fn verify_job_with_rejected_status_works() { // DB checks. let updated_job = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); - assert_eq!(updated_job.status, JobStatus::VerificationFailed("".to_string())); + assert_eq!(updated_job.status, JobStatus::VerificationFailed); assert_eq!(updated_job.metadata.get(JOB_PROCESS_ATTEMPT_METADATA_KEY).unwrap(), "1"); // Waiting for 5 secs for message to be passed into the queue diff --git a/crates/orchestrator/src/tests/jobs/proving_job/mod.rs b/crates/orchestrator/src/tests/jobs/proving_job/mod.rs index 35ddd7fa..6eeaaef7 100644 --- a/crates/orchestrator/src/tests/jobs/proving_job/mod.rs +++ b/crates/orchestrator/src/tests/jobs/proving_job/mod.rs @@ -1,12 +1,12 @@ use std::collections::HashMap; +use crate::config::{config, config_force_init}; use httpmock::prelude::*; use prover_client_interface::{MockProverClient, TaskStatus}; use rstest::*; use uuid::Uuid; use super::super::common::{default_job_item, init_config}; -use crate::config::{config, config_force_init}; use crate::jobs::constants::JOB_METADATA_CAIRO_PIE_PATH_KEY; use crate::jobs::proving_job::ProvingJob; use crate::jobs::types::{JobItem, JobStatus, JobType}; diff --git a/crates/orchestrator/src/tests/workers/snos/mod.rs b/crates/orchestrator/src/tests/workers/snos/mod.rs index 7e968a49..45917b30 100644 --- a/crates/orchestrator/src/tests/workers/snos/mod.rs +++ b/crates/orchestrator/src/tests/workers/snos/mod.rs @@ -11,9 +11,9 @@ use uuid::Uuid; use crate::config::config_force_init; use crate::database::MockDatabase; use crate::jobs::job_handler_factory::mock_factory; +use crate::jobs::types::{JobStatus, JobType}; use crate::jobs::{Job, MockJob}; use crate::queue::job_queue::JOB_PROCESSING_QUEUE; -use crate::jobs::types::{JobStatus, JobType}; use crate::queue::MockQueueProvider; use crate::tests::common::init_config; use crate::tests::workers::utils::get_job_item_mock_by_id; From ec61d72120abf4403612585e37ba06bf61f836ad Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Sat, 3 Aug 2024 17:36:04 +0530 Subject: [PATCH 28/44] feat : lint fixes --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b4083e99..a3fb07bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - Function to update the state and publish blob on ethereum in state update job. - Tests for job handlers in orchestrator/src/jobs/mod.rs. - Fixtures for testing. -- Added basic rust-toolchain support. +- Basic rust-toolchain support. ## Changed From ae2d4d92a3ee3224e74ba7a7a2a364c3fec1dd23 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Sat, 3 Aug 2024 17:58:38 +0530 Subject: [PATCH 29/44] chore: resolved pr comments --- .../src/data_storage/aws_s3/mod.rs | 25 +++++++++++-------- crates/orchestrator/src/tests/config.rs | 1 - crates/settlement-clients/ethereum/src/lib.rs | 5 +++- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/crates/orchestrator/src/data_storage/aws_s3/mod.rs b/crates/orchestrator/src/data_storage/aws_s3/mod.rs index 48707cc0..990a5d0a 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/mod.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/mod.rs @@ -25,28 +25,22 @@ impl AWSS3 { pub async fn new(config: AWSS3ConfigType) -> Self { let (config_builder, config) = match config { AWSS3ConfigType::WithoutEndpoint(config) => { - let credentials = Credentials::new( + let (credentials, region) = get_credentials_and_region_from_config( config.s3_key_id.clone(), config.s3_key_secret.clone(), - None, - None, - "loaded_from_custom_env", + config.s3_bucket_region.clone(), ); - let region = Region::new(config.s3_bucket_region.clone().to_string()); ( Builder::new().region(region).credentials_provider(credentials).force_path_style(true), AWSS3ConfigType::WithoutEndpoint(config), ) } AWSS3ConfigType::WithEndpoint(config) => { - let credentials = Credentials::new( + let (credentials, region) = get_credentials_and_region_from_config( config.s3_key_id.clone(), config.s3_key_secret.clone(), - None, - None, - "loaded_from_custom_env", + config.s3_bucket_region.clone(), ); - let region = Region::new(config.s3_bucket_region.clone().to_string()); ( Builder::new() .region(region) @@ -74,6 +68,17 @@ impl AWSS3 { } } +/// Return the constructed `Credentials` and `Region` +fn get_credentials_and_region_from_config( + s3_key_id: String, + s3_key_secret: String, + s3_bucket_region: String, +) -> (Credentials, Region) { + let credentials = Credentials::new(s3_key_id, s3_key_secret, None, None, "loaded_from_custom_env"); + let region = Region::new(s3_bucket_region); + (credentials, region) +} + /// Implementation of `DataStorage` for `AWSS3` /// contains the function for getting the data and putting the data /// by taking the key as an argument. diff --git a/crates/orchestrator/src/tests/config.rs b/crates/orchestrator/src/tests/config.rs index 61c156a2..f64f18aa 100644 --- a/crates/orchestrator/src/tests/config.rs +++ b/crates/orchestrator/src/tests/config.rs @@ -103,7 +103,6 @@ impl TestConfigBuilder { drop_database().await.unwrap(); - // return config and server as tuple let config = Config::new( self.starknet_client.unwrap_or_else(|| { let provider = JsonRpcClient::new(HttpTransport::new( diff --git a/crates/settlement-clients/ethereum/src/lib.rs b/crates/settlement-clients/ethereum/src/lib.rs index cdf2788b..dde57309 100644 --- a/crates/settlement-clients/ethereum/src/lib.rs +++ b/crates/settlement-clients/ethereum/src/lib.rs @@ -69,7 +69,10 @@ impl EthereumSettlementClient { ProviderBuilder::new().with_recommended_fillers().wallet(wallet.clone()).on_http(settlement_cfg.rpc_url), ); let core_contract_client = StarknetValidityContractClient::new( - Address::from_str(&settlement_cfg.core_contract_address).unwrap().0.into(), + Address::from_str(&settlement_cfg.core_contract_address) + .expect("Failed to convert the validity contract address.") + .0 + .into(), provider.clone(), ); From 67141baf4f27b02c01ef52680be445f2c55f0aa0 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Sat, 3 Aug 2024 18:00:17 +0530 Subject: [PATCH 30/44] Update crates/orchestrator/src/tests/database/mod.rs Co-authored-by: 0xevolve --- crates/orchestrator/src/tests/database/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index 343fdf55..9b7b19d2 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -18,7 +18,8 @@ async fn test_database_connection() -> color_eyre::Result<()> { async fn test_database_create_job() -> color_eyre::Result<()> { TestConfigBuilder::new().build().await; - let config = config().await; + #[awt] + async fn test_database_create_job(#[future] config ) -> color_eyre::Result<()> { let database_client = config.database(); let job_vec = [ From 26df776a3245e87f40b8d17ccdc25eadbd7bb99e Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Sat, 3 Aug 2024 18:05:59 +0530 Subject: [PATCH 31/44] chore : lint fixes --- crates/settlement-clients/ethereum/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/settlement-clients/ethereum/src/lib.rs b/crates/settlement-clients/ethereum/src/lib.rs index 534edf48..dde57309 100644 --- a/crates/settlement-clients/ethereum/src/lib.rs +++ b/crates/settlement-clients/ethereum/src/lib.rs @@ -69,7 +69,10 @@ impl EthereumSettlementClient { ProviderBuilder::new().with_recommended_fillers().wallet(wallet.clone()).on_http(settlement_cfg.rpc_url), ); let core_contract_client = StarknetValidityContractClient::new( - Address::from_slice(settlement_cfg.core_contract_address.as_bytes()).0.into(), + Address::from_str(&settlement_cfg.core_contract_address) + .expect("Failed to convert the validity contract address.") + .0 + .into(), provider.clone(), ); From 20a480ee8952a05d1bb5520370d8d318ba5e8f6b Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Sat, 3 Aug 2024 18:22:14 +0530 Subject: [PATCH 32/44] feat : lint fix --- crates/orchestrator/src/tests/database/mod.rs | 17 ++++++++++------- .../orchestrator/src/tests/workers/snos/mod.rs | 2 +- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index 9b7b19d2..fc2edac1 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -1,7 +1,9 @@ -use crate::config::config; +use crate::config::{config, Config}; use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; use crate::tests::config::TestConfigBuilder; +use arc_swap::Guard; use rstest::*; +use std::sync::Arc; use uuid::Uuid; #[rstest] @@ -11,15 +13,16 @@ async fn test_database_connection() -> color_eyre::Result<()> { Ok(()) } +#[fixture] +async fn get_config() -> Guard> { + config().await +} + /// Tests for `create_job` operation in database trait. /// Creates 3 jobs and asserts them. #[rstest] -#[tokio::test] -async fn test_database_create_job() -> color_eyre::Result<()> { - TestConfigBuilder::new().build().await; - - #[awt] - async fn test_database_create_job(#[future] config ) -> color_eyre::Result<()> { +async fn test_database_create_job(#[future] get_config: Guard>) -> color_eyre::Result<()> { + let config = get_config.await; let database_client = config.database(); let job_vec = [ diff --git a/crates/orchestrator/src/tests/workers/snos/mod.rs b/crates/orchestrator/src/tests/workers/snos/mod.rs index 856430c7..4579e651 100644 --- a/crates/orchestrator/src/tests/workers/snos/mod.rs +++ b/crates/orchestrator/src/tests/workers/snos/mod.rs @@ -1,7 +1,7 @@ use crate::config::config_force_init; use crate::database::MockDatabase; -use crate::queue::job_queue::JOB_PROCESSING_QUEUE; use crate::jobs::types::{JobStatus, JobType}; +use crate::queue::job_queue::JOB_PROCESSING_QUEUE; use crate::queue::MockQueueProvider; use crate::tests::common::init_config; use crate::tests::workers::utils::get_job_item_mock_by_id; From 7f018958285041997c7beea030ffe9e1bd748da1 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Mon, 5 Aug 2024 11:28:11 +0530 Subject: [PATCH 33/44] fix : coverage tests fix --- crates/orchestrator/src/tests/database/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index fc2edac1..66a731f6 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -15,6 +15,7 @@ async fn test_database_connection() -> color_eyre::Result<()> { #[fixture] async fn get_config() -> Guard> { + TestConfigBuilder::new().build().await; config().await } From 44aecd7c7088377a0167defd2b97d0a1d31fb862 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Mon, 5 Aug 2024 11:58:53 +0530 Subject: [PATCH 34/44] fix : test fix --- crates/orchestrator/src/tests/database/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index 66a731f6..42ae1a0e 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -15,14 +15,15 @@ async fn test_database_connection() -> color_eyre::Result<()> { #[fixture] async fn get_config() -> Guard> { - TestConfigBuilder::new().build().await; config().await } /// Tests for `create_job` operation in database trait. /// Creates 3 jobs and asserts them. #[rstest] +#[tokio::test] async fn test_database_create_job(#[future] get_config: Guard>) -> color_eyre::Result<()> { + TestConfigBuilder::new().build().await; let config = get_config.await; let database_client = config.database(); From 2de874dcfe98e0c0f0bfdec71a6a971f9f16c2b0 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Mon, 5 Aug 2024 13:30:57 +0530 Subject: [PATCH 35/44] fix : updated region in localstack .env.test --- .env.test | 2 +- .github/workflows/coverage.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.env.test b/.env.test index 75cfea32..9e1c91cf 100644 --- a/.env.test +++ b/.env.test @@ -3,7 +3,7 @@ AWS_ACCESS_KEY_ID="AWS_ACCESS_KEY_ID" AWS_SECRET_ACCESS_KEY="AWS_SECRET_ACCESS_KEY" AWS_S3_BUCKET_NAME="madara-orchestrator-test-bucket" -AWS_S3_BUCKET_REGION="us-east-1" +AWS_S3_BUCKET_REGION="ap-south-1" AWS_ENDPOINT_URL="http://localhost.localstack.cloud:4566" ##### On chain config ##### diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index a55082e6..66ffd29b 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -15,7 +15,7 @@ jobs: image: localstack/localstack env: SERVICES: s3, sqs - DEFAULT_REGION: us-east-1 + DEFAULT_REGION: ap-south-1 AWS_ACCESS_KEY_ID: "AWS_ACCESS_KEY_ID" AWS_SECRET_ACCESS_KEY: "AWS_SECRET_ACCESS_KEY" ports: From ea867ae3cf96365386c2951e008ee3dd5c78ef32 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Mon, 5 Aug 2024 13:37:35 +0530 Subject: [PATCH 36/44] feat : updated region --- .env.test | 2 +- .github/workflows/coverage.yml | 2 +- crates/orchestrator/src/tests/common/mod.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.env.test b/.env.test index 9e1c91cf..75cfea32 100644 --- a/.env.test +++ b/.env.test @@ -3,7 +3,7 @@ AWS_ACCESS_KEY_ID="AWS_ACCESS_KEY_ID" AWS_SECRET_ACCESS_KEY="AWS_SECRET_ACCESS_KEY" AWS_S3_BUCKET_NAME="madara-orchestrator-test-bucket" -AWS_S3_BUCKET_REGION="ap-south-1" +AWS_S3_BUCKET_REGION="us-east-1" AWS_ENDPOINT_URL="http://localhost.localstack.cloud:4566" ##### On chain config ##### diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 66ffd29b..a55082e6 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -15,7 +15,7 @@ jobs: image: localstack/localstack env: SERVICES: s3, sqs - DEFAULT_REGION: ap-south-1 + DEFAULT_REGION: us-east-1 AWS_ACCESS_KEY_ID: "AWS_ACCESS_KEY_ID" AWS_SECRET_ACCESS_KEY: "AWS_SECRET_ACCESS_KEY" ports: diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 1bd70d4c..ebba8f67 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -115,7 +115,7 @@ pub async fn create_sqs_queues() -> color_eyre::Result<()> { } async fn get_sqs_client() -> aws_sdk_sqs::Client { - let region_provider = RegionProviderChain::default_provider().or_else("ap-south-1"); + let region_provider = RegionProviderChain::default_provider().or_else("us-east-1"); let config = aws_config::from_env().region(region_provider).load().await; aws_sdk_sqs::Client::new(&config) } From 0887de562a98d4399231c659c6b1f74dd07837e1 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Mon, 5 Aug 2024 14:01:59 +0530 Subject: [PATCH 37/44] debug : added debug log to github ci --- .github/workflows/coverage.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index a55082e6..ba49c715 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -43,6 +43,10 @@ jobs: run: | cargo llvm-cov nextest --release --lcov --output-path lcov.info --test-threads=1 + - name: Temp Check (list queues) + run: | + aws --endpoint-url=http://localhost:4566 sqs list-queues + - name: Upload coverage to codecov.io uses: codecov/codecov-action@v3 with: From d6e31cbd79071ae16795d511a3a1795264b15f3c Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Mon, 5 Aug 2024 15:06:47 +0530 Subject: [PATCH 38/44] feat : updated queue code for test fixes --- .env.example | 2 ++ .env.test | 2 ++ crates/orchestrator/src/queue/sqs/mod.rs | 16 ++++++++++++++-- crates/orchestrator/src/tests/common/mod.rs | 2 +- 4 files changed, 19 insertions(+), 3 deletions(-) diff --git a/.env.example b/.env.example index 57635205..40ff44c6 100644 --- a/.env.example +++ b/.env.example @@ -24,6 +24,8 @@ MONGODB_CONNECTION_STRING= # SQS AWS_ACCESS_KEY_ID= AWS_SECRET_ACCESS_KEY= +SQS_JOB_PROCESSING_QUEUE_URL= +SQS_JOB_VERIFICATION_QUEUE_URL= # S3 AWS_S3_BUCKET_NAME= diff --git a/.env.test b/.env.test index 75cfea32..845dddd6 100644 --- a/.env.test +++ b/.env.test @@ -5,6 +5,8 @@ AWS_SECRET_ACCESS_KEY="AWS_SECRET_ACCESS_KEY" AWS_S3_BUCKET_NAME="madara-orchestrator-test-bucket" AWS_S3_BUCKET_REGION="us-east-1" AWS_ENDPOINT_URL="http://localhost.localstack.cloud:4566" +SQS_JOB_PROCESSING_QUEUE_URL="http://sqs.ap-south-1.localhost.localstack.cloud:4566/000000000000/madara_orchestrator_job_processing_queue" +SQS_JOB_VERIFICATION_QUEUE_URL="http://sqs.ap-south-1.localhost.localstack.cloud:4566/000000000000/madara_orchestrator_job_verification_queue" ##### On chain config ##### diff --git a/crates/orchestrator/src/queue/sqs/mod.rs b/crates/orchestrator/src/queue/sqs/mod.rs index 0ba901fd..4079080b 100644 --- a/crates/orchestrator/src/queue/sqs/mod.rs +++ b/crates/orchestrator/src/queue/sqs/mod.rs @@ -1,9 +1,11 @@ use std::time::Duration; +use crate::queue::job_queue::JOB_PROCESSING_QUEUE; use async_trait::async_trait; use color_eyre::Result; use omniqueue::backends::{SqsBackend, SqsConfig, SqsConsumer, SqsProducer}; use omniqueue::{Delivery, QueueError}; +use utils::env_utils::get_env_var_or_panic; use crate::queue::QueueProvider; pub struct SqsQueue; @@ -11,7 +13,8 @@ pub struct SqsQueue; #[async_trait] impl QueueProvider for SqsQueue { async fn send_message_to_queue(&self, queue: String, payload: String, delay: Option) -> Result<()> { - let producer = get_producer(queue).await?; + let queue_url = get_queue_url(queue); + let producer = get_producer(queue_url).await?; match delay { Some(d) => producer.send_raw_scheduled(payload.as_str(), d).await?, @@ -22,11 +25,20 @@ impl QueueProvider for SqsQueue { } async fn consume_message_from_queue(&self, queue: String) -> std::result::Result { - let mut consumer = get_consumer(queue).await?; + let queue_url = get_queue_url(queue); + let mut consumer = get_consumer(queue_url).await?; consumer.receive().await } } +fn get_queue_url(queue_name: String) -> String { + if queue_name == JOB_PROCESSING_QUEUE { + get_env_var_or_panic("SQS_JOB_PROCESSING_QUEUE_URL") + } else { + get_env_var_or_panic("SQS_JOB_VERIFICATION_QUEUE_URL") + } +} + // TODO: store the producer and consumer in memory to avoid creating a new one every time async fn get_producer(queue: String) -> Result { let (producer, _) = diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index ebba8f67..1bd70d4c 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -115,7 +115,7 @@ pub async fn create_sqs_queues() -> color_eyre::Result<()> { } async fn get_sqs_client() -> aws_sdk_sqs::Client { - let region_provider = RegionProviderChain::default_provider().or_else("us-east-1"); + let region_provider = RegionProviderChain::default_provider().or_else("ap-south-1"); let config = aws_config::from_env().region(region_provider).load().await; aws_sdk_sqs::Client::new(&config) } From a6d92559123266af25de2046ad951d5c84f95a51 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Mon, 5 Aug 2024 16:25:07 +0530 Subject: [PATCH 39/44] fix : sqs region fix --- .env.test | 4 ++-- crates/orchestrator/src/tests/common/mod.rs | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.env.test b/.env.test index 845dddd6..ad6ab2c1 100644 --- a/.env.test +++ b/.env.test @@ -5,8 +5,8 @@ AWS_SECRET_ACCESS_KEY="AWS_SECRET_ACCESS_KEY" AWS_S3_BUCKET_NAME="madara-orchestrator-test-bucket" AWS_S3_BUCKET_REGION="us-east-1" AWS_ENDPOINT_URL="http://localhost.localstack.cloud:4566" -SQS_JOB_PROCESSING_QUEUE_URL="http://sqs.ap-south-1.localhost.localstack.cloud:4566/000000000000/madara_orchestrator_job_processing_queue" -SQS_JOB_VERIFICATION_QUEUE_URL="http://sqs.ap-south-1.localhost.localstack.cloud:4566/000000000000/madara_orchestrator_job_verification_queue" +SQS_JOB_PROCESSING_QUEUE_URL="http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/madara_orchestrator_job_processing_queue" +SQS_JOB_VERIFICATION_QUEUE_URL="http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/madara_orchestrator_job_verification_queue" ##### On chain config ##### diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 1bd70d4c..356ffacd 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use ::uuid::Uuid; use aws_config::meta::region::RegionProviderChain; +use aws_config::Region; use constants::*; use da_client_interface::MockDaClient; use mongodb::Client; @@ -115,7 +116,7 @@ pub async fn create_sqs_queues() -> color_eyre::Result<()> { } async fn get_sqs_client() -> aws_sdk_sqs::Client { - let region_provider = RegionProviderChain::default_provider().or_else("ap-south-1"); + let region_provider = Region::new("us-east-1"); let config = aws_config::from_env().region(region_provider).load().await; aws_sdk_sqs::Client::new(&config) } From fa91ccb99f24ebd536cb2a195df09fc8a3f844e8 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Mon, 5 Aug 2024 16:57:58 +0530 Subject: [PATCH 40/44] debug : added debug logs for ci debugging --- .github/workflows/coverage.yml | 8 ++++---- CHANGELOG.md | 1 + crates/orchestrator/src/tests/common/mod.rs | 8 +++++--- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index ba49c715..529ea765 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -39,13 +39,13 @@ jobs: run: | cargo llvm-cov clean --workspace - - name: Run llvm-cov + - name: Cargo test temp run: | - cargo llvm-cov nextest --release --lcov --output-path lcov.info --test-threads=1 + RUST_LOG=debug cargo test -- create_job_job_does_not_exists_in_db_works --nocapture --test-threads=1 - - name: Temp Check (list queues) + - name: Run llvm-cov run: | - aws --endpoint-url=http://localhost:4566 sqs list-queues + cargo llvm-cov nextest --release --lcov --output-path lcov.info --test-threads=1 - name: Upload coverage to codecov.io uses: codecov/codecov-action@v3 diff --git a/CHANGELOG.md b/CHANGELOG.md index a3fb07bf..46adcf6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - GitHub's coverage CI yml file for localstack and db testing. - Orchestrator :Moved TestConfigBuilder to `config.rs` in tests folder. +- `.env` file requires two more variables which are queue urls for processing and verification. ## Removed diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 356ffacd..9a1c5bbb 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -4,7 +4,6 @@ use std::collections::HashMap; use std::sync::Arc; use ::uuid::Uuid; -use aws_config::meta::region::RegionProviderChain; use aws_config::Region; use constants::*; use da_client_interface::MockDaClient; @@ -101,10 +100,10 @@ pub async fn create_sqs_queues() -> color_eyre::Result<()> { // Dropping sqs queues let list_queues_output = sqs_client.list_queues().send().await?; let queue_urls = list_queues_output.queue_urls(); - log::debug!("Found {} queues", queue_urls.len()); + println!("Found {} queues", queue_urls.len()); for queue_url in queue_urls { match sqs_client.delete_queue().queue_url(queue_url).send().await { - Ok(_) => log::debug!("Successfully deleted queue: {}", queue_url), + Ok(_) => println!("Successfully deleted queue: {}", queue_url), Err(e) => eprintln!("Error deleting queue {}: {:?}", queue_url, e), } } @@ -112,10 +111,13 @@ pub async fn create_sqs_queues() -> color_eyre::Result<()> { // Creating SQS queues sqs_client.create_queue().queue_name(JOB_PROCESSING_QUEUE).send().await?; sqs_client.create_queue().queue_name(JOB_VERIFICATION_QUEUE).send().await?; + let list_queues_output = sqs_client.list_queues().send().await?.queue_urls.unwrap(); + println!(">>> list_queues_output: {:?}", list_queues_output); Ok(()) } async fn get_sqs_client() -> aws_sdk_sqs::Client { + // This function is for localstack. So we can hardcode the region for this as of now. let region_provider = Region::new("us-east-1"); let config = aws_config::from_env().region(region_provider).load().await; aws_sdk_sqs::Client::new(&config) From 2f6283e6c618b3c3e8ae24a479be7fd652d9368a Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Mon, 5 Aug 2024 17:17:47 +0530 Subject: [PATCH 41/44] feat : added override endpoint to queue url in producer and consumer --- CHANGELOG.md | 3 ++- crates/orchestrator/src/queue/sqs/mod.rs | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 46adcf6d..50d96a54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,7 +18,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - GitHub's coverage CI yml file for localstack and db testing. - Orchestrator :Moved TestConfigBuilder to `config.rs` in tests folder. -- `.env` file requires two more variables which are queue urls for processing and verification. +- `.env` file requires two more variables which are queue urls for processing +and verification. ## Removed diff --git a/crates/orchestrator/src/queue/sqs/mod.rs b/crates/orchestrator/src/queue/sqs/mod.rs index 4079080b..1598189a 100644 --- a/crates/orchestrator/src/queue/sqs/mod.rs +++ b/crates/orchestrator/src/queue/sqs/mod.rs @@ -42,12 +42,12 @@ fn get_queue_url(queue_name: String) -> String { // TODO: store the producer and consumer in memory to avoid creating a new one every time async fn get_producer(queue: String) -> Result { let (producer, _) = - SqsBackend::builder(SqsConfig { queue_dsn: queue, override_endpoint: false }).build_pair().await?; + SqsBackend::builder(SqsConfig { queue_dsn: queue, override_endpoint: true }).build_pair().await?; Ok(producer) } async fn get_consumer(queue: String) -> std::result::Result { let (_, consumer) = - SqsBackend::builder(SqsConfig { queue_dsn: queue, override_endpoint: false }).build_pair().await?; + SqsBackend::builder(SqsConfig { queue_dsn: queue, override_endpoint: true }).build_pair().await?; Ok(consumer) } From b1b9c9fa1dd88c66e5c3224537a373f997f7d95c Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Mon, 5 Aug 2024 17:42:09 +0530 Subject: [PATCH 42/44] feat : added override endpoint to queue url in producer and consumer --- .env.test | 1 + CHANGELOG.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.env.test b/.env.test index ad6ab2c1..586dbcbc 100644 --- a/.env.test +++ b/.env.test @@ -7,6 +7,7 @@ AWS_S3_BUCKET_REGION="us-east-1" AWS_ENDPOINT_URL="http://localhost.localstack.cloud:4566" SQS_JOB_PROCESSING_QUEUE_URL="http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/madara_orchestrator_job_processing_queue" SQS_JOB_VERIFICATION_QUEUE_URL="http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/madara_orchestrator_job_verification_queue" +AWS_DEFAULT_REGION="localhost" ##### On chain config ##### diff --git a/CHANGELOG.md b/CHANGELOG.md index 50d96a54..fb87c21a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,8 +18,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - GitHub's coverage CI yml file for localstack and db testing. - Orchestrator :Moved TestConfigBuilder to `config.rs` in tests folder. -- `.env` file requires two more variables which are queue urls for processing -and verification. +- `.env` file requires two more variables which are queue urls for processing + and verification. ## Removed From 4ab388fb5b89cfd89b071aa4822d835a3858fe1d Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Mon, 5 Aug 2024 18:09:39 +0530 Subject: [PATCH 43/44] fix : removed logs and refactored the code --- .env.example | 5 ++++- .github/workflows/coverage.yml | 4 ---- CHANGELOG.md | 1 + crates/orchestrator/src/tests/common/mod.rs | 6 ++---- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/.env.example b/.env.example index 40ff44c6..259562b5 100644 --- a/.env.example +++ b/.env.example @@ -21,9 +21,12 @@ STARKNET_CAIRO_CORE_CONTRACT_ADDRESS= # MongoDB connection string MONGODB_CONNECTION_STRING= -# SQS +# AWS AWS_ACCESS_KEY_ID= AWS_SECRET_ACCESS_KEY= +AWS_DEFAULT_REGION= + +# SQS SQS_JOB_PROCESSING_QUEUE_URL= SQS_JOB_VERIFICATION_QUEUE_URL= diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 529ea765..a55082e6 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -39,10 +39,6 @@ jobs: run: | cargo llvm-cov clean --workspace - - name: Cargo test temp - run: | - RUST_LOG=debug cargo test -- create_job_job_does_not_exists_in_db_works --nocapture --test-threads=1 - - name: Run llvm-cov run: | cargo llvm-cov nextest --release --lcov --output-path lcov.info --test-threads=1 diff --git a/CHANGELOG.md b/CHANGELOG.md index fb87c21a..8b6aac95 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - Tests for job handlers in orchestrator/src/jobs/mod.rs. - Fixtures for testing. - Basic rust-toolchain support. +- `AWS_DEFAULT_REGION="localhost"` var. in .env.test for omniqueue queue testing. ## Changed diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 9a1c5bbb..097b4126 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -100,10 +100,10 @@ pub async fn create_sqs_queues() -> color_eyre::Result<()> { // Dropping sqs queues let list_queues_output = sqs_client.list_queues().send().await?; let queue_urls = list_queues_output.queue_urls(); - println!("Found {} queues", queue_urls.len()); + log::debug!("Found {} queues", queue_urls.len()); for queue_url in queue_urls { match sqs_client.delete_queue().queue_url(queue_url).send().await { - Ok(_) => println!("Successfully deleted queue: {}", queue_url), + Ok(_) => log::debug!("Successfully deleted queue: {}", queue_url), Err(e) => eprintln!("Error deleting queue {}: {:?}", queue_url, e), } } @@ -111,8 +111,6 @@ pub async fn create_sqs_queues() -> color_eyre::Result<()> { // Creating SQS queues sqs_client.create_queue().queue_name(JOB_PROCESSING_QUEUE).send().await?; sqs_client.create_queue().queue_name(JOB_VERIFICATION_QUEUE).send().await?; - let list_queues_output = sqs_client.list_queues().send().await?.queue_urls.unwrap(); - println!(">>> list_queues_output: {:?}", list_queues_output); Ok(()) } From c4aef5d1788e63311efffea78c2d72f96b10cdaa Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Wed, 7 Aug 2024 00:33:25 +0530 Subject: [PATCH 44/44] chore : refactor code --- crates/orchestrator/src/tests/jobs/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/orchestrator/src/tests/jobs/mod.rs b/crates/orchestrator/src/tests/jobs/mod.rs index 7072af64..b40326aa 100644 --- a/crates/orchestrator/src/tests/jobs/mod.rs +++ b/crates/orchestrator/src/tests/jobs/mod.rs @@ -183,9 +183,7 @@ async fn process_job_with_job_exists_in_db_with_invalid_job_processing_status_er let job_in_db = database_client.get_job_by_id(job_item.id).await.unwrap().unwrap(); // Job should be untouched in db. - assert_eq!(job_in_db.status, JobStatus::Completed); - assert_eq!(job_in_db.version, 0); - assert_eq!(job_in_db.metadata.get(JOB_PROCESS_ATTEMPT_METADATA_KEY).unwrap(), "0"); + assert_eq!(job_in_db, job_item); // Waiting for 5 secs for message to be passed into the queue sleep(Duration::from_secs(5)).await;