-
Notifications
You must be signed in to change notification settings - Fork 175
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Async/subscription benches #372
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,20 +1,58 @@ | ||
use criterion::*; | ||
use helpers::{SUB_METHOD_NAME, UNSUB_METHOD_NAME}; | ||
use jsonrpsee::{ | ||
http_client::{ | ||
traits::Client, | ||
v2::params::{Id, JsonRpcParams}, | ||
v2::request::JsonRpcCallSer, | ||
HttpClientBuilder, | ||
}, | ||
types::traits::SubscriptionClient, | ||
ws_client::WsClientBuilder, | ||
}; | ||
use std::sync::Arc; | ||
use tokio::runtime::Runtime as TokioRuntime; | ||
|
||
mod helpers; | ||
|
||
criterion_group!(benches, http_requests, batched_http_requests, websocket_requests, jsonrpsee_types_v2); | ||
criterion_main!(benches); | ||
criterion_group!(types_benches, jsonrpsee_types_v2); | ||
criterion_group!( | ||
sync_benches, | ||
SyncBencher::http_requests, | ||
SyncBencher::batched_http_requests, | ||
SyncBencher::websocket_requests | ||
); | ||
criterion_group!( | ||
async_benches, | ||
AsyncBencher::http_requests, | ||
AsyncBencher::batched_http_requests, | ||
AsyncBencher::websocket_requests | ||
); | ||
criterion_group!(subscriptions, AsyncBencher::subscriptions); | ||
criterion_main!(types_benches, sync_benches, async_benches, subscriptions); | ||
|
||
#[derive(Debug, Clone, Copy)] | ||
enum RequestType { | ||
Sync, | ||
Async, | ||
} | ||
|
||
impl RequestType { | ||
fn method_name(self) -> &'static str { | ||
match self { | ||
RequestType::Sync => crate::helpers::SYNC_METHOD_NAME, | ||
RequestType::Async => crate::helpers::ASYNC_METHOD_NAME, | ||
} | ||
} | ||
|
||
fn group_name(self, name: &str) -> String { | ||
let request_type_name = match self { | ||
RequestType::Sync => "sync", | ||
RequestType::Async => "async", | ||
}; | ||
format!("{}/{}", request_type_name, name) | ||
} | ||
} | ||
|
||
fn v2_serialize(req: JsonRpcCallSer<'_>) -> String { | ||
serde_json::to_string(&req).unwrap() | ||
|
@@ -39,53 +77,135 @@ pub fn jsonrpsee_types_v2(crit: &mut Criterion) { | |
}); | ||
} | ||
|
||
pub fn http_requests(crit: &mut Criterion) { | ||
let rt = TokioRuntime::new().unwrap(); | ||
let url = rt.block_on(helpers::http_server()); | ||
let client = Arc::new(HttpClientBuilder::default().build(&url).unwrap()); | ||
run_round_trip(&rt, crit, client.clone(), "http_round_trip"); | ||
run_concurrent_round_trip(&rt, crit, client, "http_concurrent_round_trip"); | ||
} | ||
trait RequestBencher { | ||
const REQUEST_TYPE: RequestType; | ||
|
||
pub fn batched_http_requests(crit: &mut Criterion) { | ||
let rt = TokioRuntime::new().unwrap(); | ||
let url = rt.block_on(helpers::http_server()); | ||
let client = Arc::new(HttpClientBuilder::default().build(&url).unwrap()); | ||
run_round_trip_with_batch(&rt, crit, client, "http batch requests"); | ||
fn http_requests(crit: &mut Criterion) { | ||
let rt = TokioRuntime::new().unwrap(); | ||
let url = rt.block_on(helpers::http_server()); | ||
let client = Arc::new(HttpClientBuilder::default().build(&url).unwrap()); | ||
run_round_trip(&rt, crit, client.clone(), "http_round_trip", Self::REQUEST_TYPE); | ||
run_concurrent_round_trip(&rt, crit, client, "http_concurrent_round_trip", Self::REQUEST_TYPE); | ||
} | ||
|
||
fn batched_http_requests(crit: &mut Criterion) { | ||
let rt = TokioRuntime::new().unwrap(); | ||
let url = rt.block_on(helpers::http_server()); | ||
let client = Arc::new(HttpClientBuilder::default().build(&url).unwrap()); | ||
run_round_trip_with_batch(&rt, crit, client, "http batch requests", Self::REQUEST_TYPE); | ||
} | ||
|
||
fn websocket_requests(crit: &mut Criterion) { | ||
let rt = TokioRuntime::new().unwrap(); | ||
let url = rt.block_on(helpers::ws_server()); | ||
let client = | ||
Arc::new(rt.block_on(WsClientBuilder::default().max_concurrent_requests(1024 * 1024).build(&url)).unwrap()); | ||
run_round_trip(&rt, crit, client.clone(), "ws_round_trip", Self::REQUEST_TYPE); | ||
run_concurrent_round_trip(&rt, crit, client, "ws_concurrent_round_trip", Self::REQUEST_TYPE); | ||
} | ||
|
||
fn batched_ws_requests(crit: &mut Criterion) { | ||
let rt = TokioRuntime::new().unwrap(); | ||
let url = rt.block_on(helpers::ws_server()); | ||
let client = | ||
Arc::new(rt.block_on(WsClientBuilder::default().max_concurrent_requests(1024 * 1024).build(&url)).unwrap()); | ||
run_round_trip_with_batch(&rt, crit, client, "ws batch requests", Self::REQUEST_TYPE); | ||
} | ||
|
||
fn subscriptions(crit: &mut Criterion) { | ||
let rt = TokioRuntime::new().unwrap(); | ||
let url = rt.block_on(helpers::ws_server()); | ||
let client = | ||
Arc::new(rt.block_on(WsClientBuilder::default().max_concurrent_requests(1024 * 1024).build(&url)).unwrap()); | ||
run_sub_round_trip(&rt, crit, client, "subscriptions"); | ||
} | ||
} | ||
|
||
pub fn websocket_requests(crit: &mut Criterion) { | ||
let rt = TokioRuntime::new().unwrap(); | ||
let url = rt.block_on(helpers::ws_server()); | ||
let client = | ||
Arc::new(rt.block_on(WsClientBuilder::default().max_concurrent_requests(1024 * 1024).build(&url)).unwrap()); | ||
run_round_trip(&rt, crit, client.clone(), "ws_round_trip"); | ||
run_concurrent_round_trip(&rt, crit, client, "ws_concurrent_round_trip"); | ||
pub struct SyncBencher; | ||
|
||
impl RequestBencher for SyncBencher { | ||
const REQUEST_TYPE: RequestType = RequestType::Sync; | ||
} | ||
pub struct AsyncBencher; | ||
|
||
pub fn batched_ws_requests(crit: &mut Criterion) { | ||
let rt = TokioRuntime::new().unwrap(); | ||
let url = rt.block_on(helpers::ws_server()); | ||
let client = | ||
Arc::new(rt.block_on(WsClientBuilder::default().max_concurrent_requests(1024 * 1024).build(&url)).unwrap()); | ||
run_round_trip_with_batch(&rt, crit, client, "ws batch requests"); | ||
impl RequestBencher for AsyncBencher { | ||
const REQUEST_TYPE: RequestType = RequestType::Async; | ||
} | ||
|
||
fn run_round_trip(rt: &TokioRuntime, crit: &mut Criterion, client: Arc<impl Client>, name: &str) { | ||
crit.bench_function(name, |b| { | ||
fn run_round_trip(rt: &TokioRuntime, crit: &mut Criterion, client: Arc<impl Client>, name: &str, request: RequestType) { | ||
crit.bench_function(&request.group_name(name), |b| { | ||
b.iter(|| { | ||
rt.block_on(async { | ||
black_box(client.request::<String>("say_hello", JsonRpcParams::NoParams).await.unwrap()); | ||
black_box(client.request::<String>(request.method_name(), JsonRpcParams::NoParams).await.unwrap()); | ||
}) | ||
}) | ||
}); | ||
} | ||
|
||
/// Benchmark http batch requests over batch sizes of 2, 5, 10, 50 and 100 RPCs in each batch. | ||
fn run_round_trip_with_batch(rt: &TokioRuntime, crit: &mut Criterion, client: Arc<impl Client>, name: &str) { | ||
fn run_sub_round_trip(rt: &TokioRuntime, crit: &mut Criterion, client: Arc<impl SubscriptionClient>, name: &str) { | ||
let mut group = crit.benchmark_group(name); | ||
group.bench_function("subscribe", |b| { | ||
b.iter_with_large_drop(|| { | ||
rt.block_on(async { | ||
black_box( | ||
client | ||
.subscribe::<String>(SUB_METHOD_NAME, JsonRpcParams::NoParams, UNSUB_METHOD_NAME) | ||
.await | ||
.unwrap(), | ||
); | ||
}) | ||
}) | ||
}); | ||
group.bench_function("subscribe_response", |b| { | ||
b.iter_with_setup( | ||
|| { | ||
rt.block_on(async { | ||
client | ||
.subscribe::<String>(SUB_METHOD_NAME, JsonRpcParams::NoParams, UNSUB_METHOD_NAME) | ||
.await | ||
.unwrap() | ||
}) | ||
}, | ||
|mut sub| { | ||
rt.block_on(async { black_box(sub.next().await.unwrap()) }); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ok, and you can't create the subscription outside the bench function because There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I can, but in that case, it'd be quite difficult to create a representative benchmark. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. fair enough sounds complicated make sense. |
||
// Note that this benchmark will include costs for measuring `drop` for subscription, | ||
// since it's not possible to combine both `iter_with_setup` and `iter_with_large_drop`. | ||
// To estimate pure cost of method, one should subtract the result of `unsub` bench | ||
// from this one. | ||
}, | ||
) | ||
}); | ||
group.bench_function("unsub", |b| { | ||
b.iter_with_setup( | ||
|| { | ||
rt.block_on(async { | ||
client | ||
.subscribe::<String>(SUB_METHOD_NAME, JsonRpcParams::NoParams, UNSUB_METHOD_NAME) | ||
.await | ||
.unwrap() | ||
}) | ||
}, | ||
|sub| { | ||
// Subscription will be closed inside of the drop impl. | ||
// Actually, it just sends a notification about object being closed, | ||
// but it's still important to know that drop impl is not too expensive. | ||
drop(black_box(sub)); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I like this |
||
}, | ||
) | ||
}); | ||
} | ||
|
||
/// Benchmark http batch requests over batch sizes of 2, 5, 10, 50 and 100 RPCs in each batch. | ||
fn run_round_trip_with_batch( | ||
rt: &TokioRuntime, | ||
crit: &mut Criterion, | ||
client: Arc<impl Client>, | ||
name: &str, | ||
request: RequestType, | ||
) { | ||
let mut group = crit.benchmark_group(request.group_name(name)); | ||
for batch_size in [2, 5, 10, 50, 100usize].iter() { | ||
let batch = vec![("say_hello", JsonRpcParams::NoParams); *batch_size]; | ||
let batch = vec![(request.method_name(), JsonRpcParams::NoParams); *batch_size]; | ||
group.throughput(Throughput::Elements(*batch_size as u64)); | ||
group.bench_with_input(BenchmarkId::from_parameter(batch_size), batch_size, |b, _| { | ||
b.iter(|| rt.block_on(async { client.batch_request::<String>(batch.clone()).await.unwrap() })) | ||
|
@@ -99,17 +219,19 @@ fn run_concurrent_round_trip<C: 'static + Client + Send + Sync>( | |
crit: &mut Criterion, | ||
client: Arc<C>, | ||
name: &str, | ||
request: RequestType, | ||
) { | ||
let mut group = crit.benchmark_group(name); | ||
let mut group = crit.benchmark_group(request.group_name(name)); | ||
for num_concurrent_tasks in helpers::concurrent_tasks() { | ||
group.bench_function(format!("{}", num_concurrent_tasks), |b| { | ||
b.iter(|| { | ||
let mut tasks = Vec::new(); | ||
for _ in 0..num_concurrent_tasks { | ||
let client_rc = client.clone(); | ||
let task = rt.spawn(async move { | ||
let _ = | ||
black_box(client_rc.request::<String>("say_hello", JsonRpcParams::NoParams).await.unwrap()); | ||
let _ = black_box( | ||
client_rc.request::<String>(request.method_name(), JsonRpcParams::NoParams).await.unwrap(), | ||
); | ||
}); | ||
tasks.push(task); | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
a follow could be to subscribe to the "same subscription (by method name)" many times to benchmark how costly it is to access the
Mutex
when the number of subscribers grows. ThisMutex
should only accessed when creating a new subscription or dropping an existing one however so maybe not that interesting anymore.currently we have one
Arc<Mutex>
per registered subscription (by method name)There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think it'd be more right to write more specific benches for that matter. Like, ones that do not include real server and client, and thus impact of mutex will be more clear.