From 93810efe27a98384db6874992efc56103b051114 Mon Sep 17 00:00:00 2001 From: Igor Date: Wed, 7 Jun 2023 14:59:32 -0700 Subject: [PATCH] Add realistic_env_load_sweep forge test, to check latency across TPS --- .github/workflows/forge-stable.yaml | 15 ++- .../src/emitter/mod.rs | 14 ++- .../src/emitter/submission_worker.rs | 6 +- testsuite/forge-cli/src/main.rs | 107 ++++++++++++------ testsuite/forge/src/report.rs | 2 + testsuite/forge/src/success_criteria.rs | 63 ++++++++--- .../testcases/src/load_vs_perf_benchmark.rs | 92 ++++++++++----- .../testcases/src/state_sync_performance.rs | 2 +- testsuite/testcases/src/two_traffics_test.rs | 44 +------ 9 files changed, 221 insertions(+), 124 deletions(-) diff --git a/.github/workflows/forge-stable.yaml b/.github/workflows/forge-stable.yaml index 2e5eed5c8d2126..475655abe3a414 100644 --- a/.github/workflows/forge-stable.yaml +++ b/.github/workflows/forge-stable.yaml @@ -293,11 +293,24 @@ jobs: secrets: inherit with: IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} - FORGE_NAMESPACE: forge-land-blocking-new-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} + FORGE_NAMESPACE: forge-realistic-env-max-throughput-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} FORGE_RUNNER_DURATION_SECS: 600 FORGE_TEST_SUITE: realistic_env_max_throughput POST_TO_SLACK: true + run-forge-realistic-env-load-sweep: + if: ${{ github.event_name != 'pull_request' }} + needs: determine-test-metadata + uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main + secrets: inherit + with: + IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} + FORGE_NAMESPACE: forge-realistic-env-load-sweep-${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} + # 5 tests, each 300s + FORGE_RUNNER_DURATION_SECS: 1500 + FORGE_TEST_SUITE: realistic_env_load_sweep + POST_TO_SLACK: true + run-forge-three-region-graceful-overload: if: ${{ github.event_name != 'pull_request' }} needs: determine-test-metadata diff --git a/crates/transaction-emitter-lib/src/emitter/mod.rs b/crates/transaction-emitter-lib/src/emitter/mod.rs index 489de880ab8c31..6a199ccd124570 100644 --- a/crates/transaction-emitter-lib/src/emitter/mod.rs +++ b/crates/transaction-emitter-lib/src/emitter/mod.rs @@ -68,7 +68,7 @@ pub struct EmitModeParams { pub worker_offset_mode: WorkerOffsetMode, pub wait_millis: u64, pub check_account_sequence_only_once_fraction: f32, - pub check_account_sequence_sleep_millis: u64, + pub check_account_sequence_sleep: Duration, } #[derive(Clone, Debug)] @@ -140,6 +140,8 @@ pub struct EmitJobRequest { prompt_before_spending: bool, coordination_delay_between_instances: Duration, + + latency_polling_interval: Duration, } impl Default for EmitJobRequest { @@ -163,6 +165,7 @@ impl Default for EmitJobRequest { expected_gas_per_txn: aptos_global_constants::MAX_GAS_AMOUNT, prompt_before_spending: false, coordination_delay_between_instances: Duration::from_secs(0), + latency_polling_interval: Duration::from_millis(300), } } } @@ -257,6 +260,11 @@ impl EmitJobRequest { self } + pub fn latency_polling_interval(mut self, latency_polling_interval: Duration) -> Self { + self.latency_polling_interval = latency_polling_interval; + self + } + pub fn calculate_mode_params(&self) -> EmitModeParams { let clients_count = self.rest_clients.len(); @@ -294,7 +302,7 @@ impl EmitJobRequest { workers_per_endpoint: num_workers_per_endpoint, endpoints: clients_count, check_account_sequence_only_once_fraction: 0.0, - check_account_sequence_sleep_millis: 300, + check_account_sequence_sleep: self.latency_polling_interval, } }, EmitJobMode::ConstTps { tps } @@ -382,7 +390,7 @@ impl EmitJobRequest { workers_per_endpoint: num_workers_per_endpoint, endpoints: clients_count, check_account_sequence_only_once_fraction: 1.0 - sample_latency_fraction, - check_account_sequence_sleep_millis: 300, + check_account_sequence_sleep: self.latency_polling_interval, } }, } diff --git a/crates/transaction-emitter-lib/src/emitter/submission_worker.rs b/crates/transaction-emitter-lib/src/emitter/submission_worker.rs index 2215c6834fd551..47903e2cc35ca4 100644 --- a/crates/transaction-emitter-lib/src/emitter/submission_worker.rs +++ b/crates/transaction-emitter-lib/src/emitter/submission_worker.rs @@ -159,10 +159,8 @@ impl SubmissionWorker { // generally, we should never need to recheck, as we wait enough time // before calling here, but in case of shutdown/or client we are talking // to being stale (having stale transaction_version), we might need to wait. - Duration::from_millis( - if self.skip_latency_stats { 10 } else { 1 } - * self.params.check_account_sequence_sleep_millis, - ), + if self.skip_latency_stats { 10 } else { 1 } + * self.params.check_account_sequence_sleep, loop_stats, ) .await; diff --git a/testsuite/forge-cli/src/main.rs b/testsuite/forge-cli/src/main.rs index 8401b9316c4c96..e51d48039a2a68 100644 --- a/testsuite/forge-cli/src/main.rs +++ b/testsuite/forge-cli/src/main.rs @@ -228,9 +228,15 @@ fn main() -> Result<()> { logger.build(); let args = Args::from_args(); - let duration = Duration::from_secs(args.duration_secs as u64); + let duration = Duration::from_secs(5 * 300 as u64); // args.duration_secs as u64); let suite_name: &str = args.suite.as_ref(); + let suite_name = if suite_name == "land_blocking" { + "realistic_env_load_sweep" + } else { + suite_name + }; + let runtime = Runtime::new()?; match args.cli_cmd { // cmd input for test @@ -263,7 +269,7 @@ fn main() -> Result<()> { match test_cmd { TestCommand::LocalSwarm(local_cfg) => { // Loosen all criteria for local runs - test_suite.get_success_criteria_mut().avg_tps = 400; + test_suite.get_success_criteria_mut().min_avg_tps = 400; let previous_emit_job = test_suite.get_emit_job().clone(); let test_suite = test_suite.with_emit_job(previous_emit_job.mode(EmitJobMode::MaxLoad { @@ -492,6 +498,7 @@ fn single_test_suite(test_name: &str, duration: Duration) -> Result "compat" => compat(), "framework_upgrade" => upgrade(), // Rest of the tests: + "realistic_env_load_sweep" => realistic_env_load_sweep_test(), "epoch_changer_performance" => epoch_changer_performance(), "state_sync_perf_fullnodes_apply_outputs" => state_sync_perf_fullnodes_apply_outputs(), "state_sync_perf_fullnodes_execute_transactions" => { @@ -591,8 +598,9 @@ fn run_consensus_only_perf_test() -> ForgeConfig { config .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) .add_network_test(LoadVsPerfBenchmark { - test: &PerformanceBenchmark, + test: Box::new(PerformanceBenchmark), workloads: Workloads::TPS(&[30000]), + criteria: vec![], }) .with_genesis_helm_config_fn(Arc::new(|helm_values| { // no epoch change. @@ -757,15 +765,57 @@ fn consensus_stress_test() -> ForgeConfig { }) } +fn realistic_env_load_sweep_test() -> ForgeConfig { + ForgeConfig::default() + .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) + .with_initial_fullnode_count(10) + .add_network_test(CompositeNetworkTest::new_with_two_wrappers( + MultiRegionNetworkEmulationTest { + override_config: None, + }, + CpuChaosTest { + override_config: None, + }, + LoadVsPerfBenchmark { + test: Box::new(PerformanceBenchmark), + workloads: Workloads::TPS(&[10, 100, 1000, 3000, 5000]), + criteria: [(9, 1.5, 4.), (95, 1.5, 4.), (950, 2., 4.), (2900, 3., 5.), (4900, 3., 5.)].into_iter().map( + |(min_tps, max_lat_p50, max_lat_p99)| + SuccessCriteria::new(min_tps) + .add_latency_threshold(max_lat_p50, LatencyType::P50) + .add_latency_threshold(max_lat_p99, LatencyType::P99) + ).collect(), + }, + )) + // Test inherits the main EmitJobRequest, so update here for more precise latency measurements + .with_emit_job( + EmitJobRequest::default().latency_polling_interval(Duration::from_millis(100)), + ) + .with_genesis_helm_config_fn(Arc::new(|helm_values| { + // no epoch change. + helm_values["chain"]["epoch_duration_secs"] = (24 * 3600).into(); + })) + .with_success_criteria( + SuccessCriteria::new(0) + .add_no_restarts() + .add_wait_for_catchup_s(60) + .add_chain_progress(StateProgressThreshold { + max_no_progress_secs: 30.0, + max_round_gap: 10, + }), + ) +} + fn load_vs_perf_benchmark() -> ForgeConfig { ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) .with_initial_fullnode_count(10) .add_network_test(LoadVsPerfBenchmark { - test: &PerformanceBenchmark, + test: Box::new(PerformanceBenchmark), workloads: Workloads::TPS(&[ 200, 1000, 3000, 5000, 7000, 7500, 8000, 9000, 10000, 12000, 15000, ]), + criteria: Vec::new(), }) .with_genesis_helm_config_fn(Arc::new(|helm_values| { // no epoch change. @@ -794,7 +844,7 @@ fn workload_vs_perf_benchmark() -> ForgeConfig { // mempool_backlog: 10000, // })) .add_network_test(LoadVsPerfBenchmark { - test: &PerformanceBenchmark, + test: Box::new(PerformanceBenchmark), workloads: Workloads::TRANSACTIONS(&[ TransactionWorkload { transaction_type: TransactionTypeArg::NoOp, @@ -837,6 +887,7 @@ fn workload_vs_perf_benchmark() -> ForgeConfig { unique_senders: true, }, ]), + criteria: Vec::new(), }) .with_genesis_helm_config_fn(Arc::new(|helm_values| { // no epoch change. @@ -863,15 +914,14 @@ fn graceful_overload() -> ForgeConfig { // So having VFNs for all validators .with_initial_fullnode_count(10) .add_network_test(TwoTrafficsTest { - inner_mode: EmitJobMode::ConstTps { tps: 15000 }, - inner_gas_price: aptos_global_constants::GAS_UNIT_PRICE, - inner_init_gas_price_multiplier: 20, - inner_transaction_type: TransactionTypeArg::CoinTransfer.materialize_default(), + inner_traffic: EmitJobRequest::default() + .mode(EmitJobMode::ConstTps { tps: 15000 }) + .init_gas_price_multiplier(20), + // Additionally - we are not really gracefully handling overlaods, // setting limits based on current reality, to make sure they // don't regress, but something to investigate - avg_tps: 3400, - latency_thresholds: &[], + inner_success_criteria: SuccessCriteria::new(3400), }) // First start higher gas-fee traffic, to not cause issues with TxnEmitter setup - account creation .with_emit_job( @@ -913,19 +963,13 @@ fn three_region_sim_graceful_overload() -> ForgeConfig { .add_network_test(CompositeNetworkTest::new( ThreeRegionSameCloudSimulationTest, TwoTrafficsTest { - inner_mode: EmitJobMode::ConstTps { tps: 15000 }, - inner_gas_price: aptos_global_constants::GAS_UNIT_PRICE, - inner_init_gas_price_multiplier: 20, - // Cannot use TransactionTypeArg::materialize, as this needs to be static - inner_transaction_type: TransactionType::CoinTransfer { - invalid_transaction_ratio: 0, - sender_use_account_pool: false, - }, + inner_traffic: EmitJobRequest::default() + .mode(EmitJobMode::ConstTps { tps: 15000 }) + .init_gas_price_multiplier(20), // Additionally - we are not really gracefully handling overlaods, // setting limits based on current reality, to make sure they // don't regress, but something to investigate - avg_tps: 1200, - latency_thresholds: &[], + inner_success_criteria: SuccessCriteria::new(3400), }, )) // First start higher gas-fee traffic, to not cause issues with TxnEmitter setup - account creation @@ -1333,14 +1377,12 @@ fn realistic_env_max_throughput_test_suite(duration: Duration) -> ForgeConfig { override_config: None, }, TwoTrafficsTest { - inner_mode: EmitJobMode::MaxLoad { - mempool_backlog: 40000, - }, - inner_gas_price: aptos_global_constants::GAS_UNIT_PRICE, - inner_init_gas_price_multiplier: 20, - inner_transaction_type: TransactionTypeArg::CoinTransfer.materialize_default(), - avg_tps: 5000, - latency_thresholds: &[], + inner_traffic: EmitJobRequest::default() + .mode(EmitJobMode::MaxLoad { + mempool_backlog: 40000, + }) + .init_gas_price_multiplier(20), + inner_success_criteria: SuccessCriteria::new(5000), }, )) .with_genesis_helm_config_fn(Arc::new(|helm_values| { @@ -1351,7 +1393,8 @@ fn realistic_env_max_throughput_test_suite(duration: Duration) -> ForgeConfig { .with_emit_job( EmitJobRequest::default() .mode(EmitJobMode::ConstTps { tps: 100 }) - .gas_price(5 * aptos_global_constants::GAS_UNIT_PRICE), + .gas_price(5 * aptos_global_constants::GAS_UNIT_PRICE) + .latency_polling_interval(Duration::from_millis(100)), ) .with_success_criteria( SuccessCriteria::new(95) @@ -1366,8 +1409,8 @@ fn realistic_env_max_throughput_test_suite(duration: Duration) -> ForgeConfig { // Check that we don't use more than 10 GB of memory for 30% of the time. MetricsThreshold::new(10 * 1024 * 1024 * 1024, 30), )) - .add_latency_threshold(4.0, LatencyType::P50) - .add_latency_threshold(8.0, LatencyType::P90) + .add_latency_threshold(3.0, LatencyType::P50) + .add_latency_threshold(5.0, LatencyType::P90) .add_chain_progress(StateProgressThreshold { max_no_progress_secs: 10.0, max_round_gap: 4, diff --git a/testsuite/forge/src/report.rs b/testsuite/forge/src/report.rs index b32aa9fb45a6b1..05d73e3e04721e 100644 --- a/testsuite/forge/src/report.rs +++ b/testsuite/forge/src/report.rs @@ -5,6 +5,7 @@ use aptos_transaction_emitter_lib::emitter::stats::TxnStats; use serde::Serialize; use std::fmt; +use aptos_logger::info; #[derive(Default, Debug, Serialize)] pub struct TestReport { @@ -37,6 +38,7 @@ impl TestReport { self.text.push('\n'); } self.text.push_str(&text); + info!("{}", text); } pub fn report_txn_stats(&mut self, test_name: String, stats: &TxnStats) { diff --git a/testsuite/forge/src/success_criteria.rs b/testsuite/forge/src/success_criteria.rs index c843ff8fb0b0a0..0357fe7e0c3e91 100644 --- a/testsuite/forge/src/success_criteria.rs +++ b/testsuite/forge/src/success_criteria.rs @@ -24,9 +24,10 @@ pub enum LatencyType { #[derive(Default, Clone, Debug)] pub struct SuccessCriteria { - pub avg_tps: usize, + pub min_avg_tps: usize, latency_thresholds: Vec<(Duration, LatencyType)>, check_no_restarts: bool, + max_expired_tps: Option, wait_for_all_nodes_to_catchup: Option, // Maximum amount of CPU cores and memory bytes used by the nodes. system_metrics_threshold: Option, @@ -34,11 +35,12 @@ pub struct SuccessCriteria { } impl SuccessCriteria { - pub fn new(tps: usize) -> Self { + pub fn new(min_avg_tps: usize) -> Self { Self { - avg_tps: tps, + min_avg_tps, latency_thresholds: Vec::new(), check_no_restarts: false, + max_expired_tps: None, wait_for_all_nodes_to_catchup: None, system_metrics_threshold: None, chain_progress_check: None, @@ -50,6 +52,11 @@ impl SuccessCriteria { self } + pub fn add_max_expired_tps(mut self, max_expired_tps: usize) -> Self { + self.max_expired_tps = Some(max_expired_tps); + self + } + pub fn add_wait_for_catchup_s(mut self, duration_secs: u64) -> Self { self.wait_for_all_nodes_to_catchup = Some(Duration::from_secs(duration_secs)); self @@ -75,6 +82,18 @@ impl SuccessCriteria { pub struct SuccessCriteriaChecker {} impl SuccessCriteriaChecker { + pub fn check_core_for_success( + success_criteria: &SuccessCriteria, + _report: &mut TestReport, + stats_rate: &TxnStatsRate, + traffic_name: Option, + ) -> anyhow::Result<()> { + let traffic_name_addition = traffic_name.map(|n| format!(" for {}", n)).unwrap_or_else(|| "".to_string()); + Self::check_tps(success_criteria.min_avg_tps, stats_rate, &traffic_name_addition)?; + Self::check_latency(&success_criteria.latency_thresholds, stats_rate, &traffic_name_addition)?; + Ok(()) + } + pub async fn check_for_success( success_criteria: &SuccessCriteria, swarm: &mut dyn Swarm, @@ -92,17 +111,9 @@ impl SuccessCriteriaChecker { stats.lasted.as_secs() ); let stats_rate = stats.rate(); - // TODO: Add more success criteria like expired transactions, CPU, memory usage etc - let avg_tps = stats_rate.committed; - if avg_tps < success_criteria.avg_tps as u64 { - bail!( - "TPS requirement failed. Average TPS {}, minimum TPS requirement {}", - avg_tps, - success_criteria.avg_tps, - ) - } - Self::check_latency(&success_criteria.latency_thresholds, &stats_rate)?; + Self::check_tps(success_criteria.min_avg_tps, &stats_rate, &"".to_string())?; + Self::check_latency(&success_criteria.latency_thresholds, &stats_rate, &"".to_string())?; if let Some(timeout) = success_criteria.wait_for_all_nodes_to_catchup { swarm @@ -243,9 +254,29 @@ impl SuccessCriteriaChecker { Ok(()) } + pub fn check_tps(min_avg_tps: usize, stats_rate: &TxnStatsRate, traffic_name_addition: &String) -> anyhow::Result<()> { + let avg_tps = stats_rate.committed; + if avg_tps < min_avg_tps as u64 { + bail!( + "TPS requirement{} failed. Average TPS {}, minimum TPS requirement {}. Full stats: {}", + traffic_name_addition, + avg_tps, + min_avg_tps, + stats_rate, + ) + } else { + println!( + "TPS is {} and is within limit of {}", + stats_rate.committed, min_avg_tps + ); + Ok(()) + } + } + pub fn check_latency( latency_thresholds: &[(Duration, LatencyType)], stats_rate: &TxnStatsRate, + traffic_name_addition: &String, ) -> anyhow::Result<()> { let mut failures = Vec::new(); for (latency_threshold, latency_type) in latency_thresholds { @@ -259,8 +290,9 @@ impl SuccessCriteriaChecker { if latency > *latency_threshold { failures.push( format!( - "{:?} latency is {}s and exceeds limit of {}s", + "{:?} latency{} is {}s and exceeds limit of {}s", latency_type, + traffic_name_addition, latency.as_secs_f32(), latency_threshold.as_secs_f32() ) @@ -268,8 +300,9 @@ impl SuccessCriteriaChecker { ); } else { println!( - "{:?} latency is {}s and is within limit of {}s", + "{:?} latency{} is {}s and is within limit of {}s", latency_type, + traffic_name_addition, latency.as_secs_f32(), latency_threshold.as_secs_f32() ); diff --git a/testsuite/testcases/src/load_vs_perf_benchmark.rs b/testsuite/testcases/src/load_vs_perf_benchmark.rs index e716713dc69f43..850c596555728f 100644 --- a/testsuite/testcases/src/load_vs_perf_benchmark.rs +++ b/testsuite/testcases/src/load_vs_perf_benchmark.rs @@ -4,7 +4,7 @@ use crate::NetworkLoadTest; use aptos_forge::{ args::TransactionTypeArg, EmitJobMode, EmitJobRequest, NetworkContext, NetworkTest, Result, - Test, TxnStats, + Test, TxnStats, success_criteria::{SuccessCriteriaChecker, SuccessCriteria}, }; use aptos_logger::info; use rand::SeedableRng; @@ -84,8 +84,9 @@ impl Display for TransactionWorkload { } pub struct LoadVsPerfBenchmark { - pub test: &'static dyn NetworkLoadTest, + pub test: Box, pub workloads: Workloads, + pub criteria: Vec, } impl Test for LoadVsPerfBenchmark { @@ -140,6 +141,13 @@ impl LoadVsPerfBenchmark { impl NetworkTest for LoadVsPerfBenchmark { fn run<'t>(&self, ctx: &mut NetworkContext<'t>) -> Result<()> { + assert!( + self.criteria.is_empty() || self.criteria.len() == self.workloads.len(), + "Invalid config, {} criteria and {} workloads given", + self.criteria.len(), + self.workloads.len(), + ); + let _runtime = Runtime::new().unwrap(); let individual_with_buffer = ctx .global_duration @@ -167,38 +175,62 @@ impl NetworkTest for LoadVsPerfBenchmark { // let mut aptos_info = ctx.swarm().aptos_public_info(); // runtime.block_on(aptos_info.reconfig()); - println!( - "{: <30} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12}", - "workload", - "submitted/s", - "committed/s", - "expired/s", - "rejected/s", - "chain txn/s", - "latency", - "p50 lat", - "p90 lat", - "p99 lat", - "actual dur" - ); + let table = to_table(&results); + for line in table { + info!("{}", line); + } + } + + let table = to_table(&results); + for line in table { + ctx.report.report_text(line); + } + for index in 0..self.workloads.len() { for result in &results { let rate = result.stats.rate(); - println!( - "{: <30} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12}", - result.name, - rate.submitted, - rate.committed, - rate.expired, - rate.failed_submission, - result.ledger_transactions / result.actual_duration.as_secs(), - rate.latency, - rate.p50_latency, - rate.p90_latency, - rate.p99_latency, - result.actual_duration.as_secs() - ) + if let Some(criteria) = self.criteria.get(index) { + SuccessCriteriaChecker::check_core_for_success(criteria, ctx.report, &rate, Some(result.name.clone()))?; + } } } Ok(()) } } + +fn to_table(results: &[SingleRunStats]) -> Vec { + let mut table = Vec::new(); + table.push(format!( + "{: <30} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12}", + "workload", + "submitted/s", + "committed/s", + "expired/s", + "rejected/s", + "chain txn/s", + "latency", + "p50 lat", + "p90 lat", + "p99 lat", + "actual dur" + )); + + for result in results { + let rate = result.stats.rate(); + table.push(format!( + "{: <30} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12} | {: <12}", + result.name, + rate.submitted, + rate.committed, + rate.expired, + rate.failed_submission, + result.ledger_transactions / result.actual_duration.as_secs(), + rate.latency, + rate.p50_latency, + rate.p90_latency, + rate.p99_latency, + result.actual_duration.as_secs() + )); + } + + table +} diff --git a/testsuite/testcases/src/state_sync_performance.rs b/testsuite/testcases/src/state_sync_performance.rs index c14af8e4f7cf74..8fbf7716577904 100644 --- a/testsuite/testcases/src/state_sync_performance.rs +++ b/testsuite/testcases/src/state_sync_performance.rs @@ -361,7 +361,7 @@ fn ensure_state_sync_transaction_throughput( // TODO: we fetch the TPS requirement from the given success criteria. // But, we should probably make it more generic to avoid this. // Ensure we meet the success criteria. - let min_expected_tps = ctx.success_criteria.avg_tps as u64; + let min_expected_tps = ctx.success_criteria.min_avg_tps as u64; if state_sync_throughput < min_expected_tps { let error_message = format!( "State sync TPS requirement failed. Average TPS: {}, minimum required TPS: {}", diff --git a/testsuite/testcases/src/two_traffics_test.rs b/testsuite/testcases/src/two_traffics_test.rs index 33445364db4734..5f8c1def4008ef 100644 --- a/testsuite/testcases/src/two_traffics_test.rs +++ b/testsuite/testcases/src/two_traffics_test.rs @@ -4,26 +4,17 @@ use crate::{ create_emitter_and_request, traffic_emitter_runtime, LoadDestination, NetworkLoadTest, }; -use anyhow::{bail, Ok}; use aptos_forge::{ - success_criteria::{LatencyType, SuccessCriteriaChecker}, - EmitJobMode, EmitJobRequest, NetworkContext, NetworkTest, Result, Swarm, Test, TestReport, - TransactionType, + success_criteria::{SuccessCriteriaChecker, SuccessCriteria}, + EmitJobRequest, NetworkContext, NetworkTest, Result, Swarm, Test, TestReport, }; use aptos_logger::info; use rand::{rngs::OsRng, Rng, SeedableRng}; use std::time::{Duration, Instant}; pub struct TwoTrafficsTest { - // cannot have 'static EmitJobRequest, like below, so need to have inner fields - // pub inner_emit_job_request: EmitJobRequest, - pub inner_mode: EmitJobMode, - pub inner_gas_price: u64, - pub inner_init_gas_price_multiplier: u64, - pub inner_transaction_type: TransactionType, - - pub avg_tps: usize, - pub latency_thresholds: &'static [(f32, LatencyType)], + pub inner_traffic: EmitJobRequest, + pub inner_success_criteria: SuccessCriteria, } impl Test for TwoTrafficsTest { @@ -49,11 +40,7 @@ impl NetworkLoadTest for TwoTrafficsTest { let (emitter, emit_job_request) = create_emitter_and_request( swarm, - EmitJobRequest::default() - .mode(self.inner_mode.clone()) - .gas_price(self.inner_gas_price) - .init_gas_price_multiplier(self.inner_init_gas_price_multiplier) - .transaction_type(self.inner_transaction_type), + self.inner_traffic.clone(), &nodes_to_send_load_to, rng, )?; @@ -76,29 +63,10 @@ impl NetworkLoadTest for TwoTrafficsTest { ); let rate = stats.rate(); - info!("Inner traffic: {:?}", rate); - - let avg_tps = rate.committed; - if avg_tps < self.avg_tps as u64 { - bail!( - "TPS requirement for inner traffic failed. Average TPS {}, minimum TPS requirement {}. Full inner stats: {:?}", - avg_tps, - self.avg_tps, - rate, - ) - } report.report_txn_stats(format!("{}: inner traffic", self.name()), &stats); - SuccessCriteriaChecker::check_latency( - &self - .latency_thresholds - .iter() - .map(|(s, t)| (Duration::from_secs_f32(*s), t.clone())) - .collect::>(), - &rate, - )?; - + SuccessCriteriaChecker::check_core_for_success(&self.inner_success_criteria, report, &rate, Some("inner traffic".to_string()))?; Ok(()) } }