diff --git a/Cargo.lock b/Cargo.lock index 9b185fd2..4bf3a6f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2326,7 +2326,7 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "viceroy" -version = "0.5.2" +version = "0.6.0" dependencies = [ "anyhow", "clap", @@ -2348,7 +2348,7 @@ dependencies = [ [[package]] name = "viceroy-lib" -version = "0.5.2" +version = "0.6.0" dependencies = [ "anyhow", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 906ba9ad..7eaf1037 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,3 +19,16 @@ default-members = [ "cli" ] # Since some of the integration tests involve compiling Wasm, a little optimization goes a long way # toward making the test suite not take forever opt-level = 1 + +[workspace.dependencies] +anyhow = "1.0.31" +hyper = { version = "=0.14.26", features = ["full"] } +itertools = "0.10.5" +serde_json = "1.0.59" +tokio = { version = "1.21.2", features = ["full"] } +tracing = "0.1.37" +tracing-futures = "0.2.5" +wasi-common = "10.0.0" +wasmtime = "10.0.0" +futures = "0.3.24" +url = "2.3.1" diff --git a/cli/Cargo.toml b/cli/Cargo.toml index d35c5586..58a574e8 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "viceroy" description = "Viceroy is a local testing daemon for Compute@Edge." -version = "0.5.2" +version = "0.6.0" authors = ["Fastly"] readme = "../README.md" edition = "2021" @@ -30,22 +30,22 @@ name = "viceroy" path = "src/main.rs" [dependencies] -anyhow = "^1.0.31" -hyper = { version = "=0.14.26", features = ["full"] } -itertools = "^0.10.5" -serde_json = "^1.0.59" +anyhow = { workspace = true } +hyper = { workspace = true } +itertools = { workspace = true } +serde_json = { workspace = true } clap = { version = "^4.0.18", features = ["derive"] } -tokio = { version = "^1.21.2", features = ["full"] } -tracing = "^0.1.37" -tracing-futures = "^0.2.5" +tokio = { workspace = true } +tracing = { workspace = true } +tracing-futures = { workspace = true } tracing-subscriber = { version = "^0.3.16", features = ["env-filter", "fmt"] } -viceroy-lib = { path = "../lib", version = "^0.5.2" } +viceroy-lib = { path = "../lib", version = "0.6.0" } wat = "^1.0.38" -wasi-common = "10.0.0" -wasmtime = "10.0.0" +wasi-common = { workspace = true } +wasmtime = { workspace = true } libc = "^0.2.139" [dev-dependencies] -anyhow = "^1.0.31" -futures = "^0.3.24" -url = "^2.3.1" +anyhow = { workspace = true } +futures = { workspace = true } +url = { workspace = true } diff --git a/cli/src/main.rs b/cli/src/main.rs index 675ef2a2..04fa3a53 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -91,7 +91,12 @@ pub async fn run_wasm_main(run_args: RunArgs) -> Result<(), anyhow::Error> { Some(stem) => stem.to_string_lossy(), None => panic!("program cannot be a directory"), }; - ctx.run_main(&program_name, run_args.wasm_args()).await + ctx.run_main( + &program_name, + run_args.wasm_args(), + run_args.profile_guest(), + ) + .await } fn install_tracing_subscriber(verbosity: u8) { diff --git a/cli/src/opts.rs b/cli/src/opts.rs index 5874c2e7..86f5bd61 100644 --- a/cli/src/opts.rs +++ b/cli/src/opts.rs @@ -61,6 +61,11 @@ pub struct RunArgs { #[command(flatten)] shared: SharedArgs, + /// Whether to profile the wasm guest. Takes an optional filename to save + /// the profile to + #[arg(long, default_missing_value = "guest-profile.json", num_args=0..=1, require_equals=true)] + profile_guest: Option, + /// Args to pass along to the binary being executed. #[arg(trailing_var_arg = true, allow_hyphen_values = true)] wasm_args: Vec, @@ -80,7 +85,7 @@ pub struct SharedArgs { /// Whether to treat stderr as a logging endpoint #[arg(long = "log-stderr", default_value = "false")] log_stderr: bool, - // Whether to enable wasmtime's builtin profiler. + /// Whether to enable wasmtime's builtin profiler. #[arg(long = "profiler", value_parser = check_wasmtime_profiler_mode)] profiler: Option, /// Set of experimental WASI modules to link against. @@ -116,6 +121,11 @@ impl RunArgs { pub fn shared(&self) -> &SharedArgs { &self.shared } + + /// The path to write a guest profile to + pub fn profile_guest(&self) -> Option<&PathBuf> { + self.profile_guest.as_ref() + } } impl SharedArgs { diff --git a/cli/tests/trap-test/Cargo.lock b/cli/tests/trap-test/Cargo.lock index c20ffe6f..1a29d6be 100644 --- a/cli/tests/trap-test/Cargo.lock +++ b/cli/tests/trap-test/Cargo.lock @@ -2265,7 +2265,7 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "viceroy-lib" -version = "0.5.2" +version = "0.6.0" dependencies = [ "anyhow", "bytes", diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 001a4120..31178677 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "viceroy-lib" -version = "0.5.2" +version = "0.6.0" description = "Viceroy implementation details." authors = ["Fastly"] edition = "2021" @@ -23,18 +23,18 @@ include = [ ] [dependencies] -anyhow = "^1.0.31" +anyhow = { workspace = true } bytes = "^1.2.1" bytesize = "^1.1.0" cfg-if = "^1.0" cranelift-entity = "^0.88.1" fastly-shared = "^0.9.3" flate2 = "^1.0.24" -futures = "^0.3.24" +futures = { workspace = true } http = "^0.2.8" http-body = "^0.4.5" -hyper = { version = "=0.14.26", features = ["full"] } -itertools = "^0.10.5" +hyper = { workspace = true } +itertools = { workspace = true } lazy_static = "^1.4.0" regex = "^1.3.9" rustls = "^0.19.1" @@ -42,20 +42,20 @@ rustls-native-certs = "^0.5.0" semver = "^0.10.0" serde = "^1.0.145" serde_derive = "^1.0.114" -serde_json = "^1.0.59" +serde_json = { workspace = true } thiserror = "^1.0.37" -tokio = { version = "^1.21.2", features = ["full"] } +tokio = { workspace = true } tokio-rustls = "^0.22.0" toml = "^0.5.9" -tracing = "^0.1.37" -tracing-futures = "^0.2.5" -url = "^2.3.1" -wasi-common = "^10.0.0" -wasmtime = "^10.0.0" -wasmtime-wasi = "^10.0.0" -wasmtime-wasi-nn = "^10.0.0" +tracing = { workspace = true } +tracing-futures = { workspace = true } +url = { workspace = true } +wasi-common = { workspace = true } +wasmtime = { workspace = true } +wasmtime-wasi = "10.0.0" +wasmtime-wasi-nn = "10.0.0" webpki = "^0.21.0" -wiggle = "^10.0.0" +wiggle = "10.0.0" [dev-dependencies] tempfile = "3.6.0" diff --git a/lib/src/execute.rs b/lib/src/execute.rs index e4125a72..6cccb707 100644 --- a/lib/src/execute.rs +++ b/lib/src/execute.rs @@ -1,5 +1,7 @@ //! Guest code execution. +use wasmtime::GuestProfiler; + use { crate::{ body::Body, @@ -29,6 +31,7 @@ use { wasmtime::{Engine, InstancePre, Linker, Module, ProfilingStrategy}, }; +pub const EPOCH_INTERRUPTION_PERIOD: Duration = Duration::from_micros(50); /// Execution context used by a [`ViceroyService`](struct.ViceroyService.html). /// /// This is all of the state needed to instantiate a module, in order to respond to an HTTP @@ -40,6 +43,8 @@ pub struct ExecuteCtx { engine: Engine, /// An almost-linked Instance: each import function is linked, just needs a Store instance_pre: Arc>, + /// The module to run + module: Module, /// The backends for this execution. backends: Arc, /// The geolocation mappings for this execution. @@ -80,13 +85,13 @@ impl ExecuteCtx { let instance_pre = linker.instantiate_pre(&module)?; // Create the epoch-increment thread. - let epoch_interruption_period = Duration::from_micros(50); + let epoch_increment_stop = Arc::new(AtomicBool::new(false)); let engine_clone = engine.clone(); let epoch_increment_stop_clone = epoch_increment_stop.clone(); let epoch_increment_thread = Some(Arc::new(thread::spawn(move || { while !epoch_increment_stop_clone.load(Ordering::Relaxed) { - thread::sleep(epoch_interruption_period); + thread::sleep(EPOCH_INTERRUPTION_PERIOD); engine_clone.increment_epoch(); } }))); @@ -94,6 +99,7 @@ impl ExecuteCtx { Ok(Self { engine, instance_pre: Arc::new(instance_pre), + module, backends: Arc::new(Backends::default()), geolocation: Arc::new(Geolocation::default()), tls_config: TlsConfig::new()?, @@ -308,7 +314,7 @@ impl ExecuteCtx { // due to wasmtime limitations, in particular the fact that `Instance` is not `Send`. // However, the fact that the module itself is created within `ExecuteCtx::new` // means that the heavy lifting happens only once. - let mut store = create_store(&self, session).map_err(ExecutionError::Context)?; + let mut store = create_store(&self, session, None).map_err(ExecutionError::Context)?; let instance = self .instance_pre @@ -361,7 +367,12 @@ impl ExecuteCtx { outcome } - pub async fn run_main(self, program_name: &str, args: &[String]) -> Result<(), anyhow::Error> { + pub async fn run_main( + self, + program_name: &str, + args: &[String], + guest_profile_path: Option<&PathBuf>, + ) -> Result<(), anyhow::Error> { // placeholders for request, result sender channel, and remote IP let req = Request::get("http://example.com/").body(Body::empty())?; let req_id = 0; @@ -382,7 +393,14 @@ impl ExecuteCtx { self.secret_stores.clone(), ); - let mut store = create_store(&self, session).map_err(ExecutionError::Context)?; + let profiler = guest_profile_path.map(|_| { + GuestProfiler::new( + program_name, + EPOCH_INTERRUPTION_PERIOD, + vec![(program_name.to_string(), self.module.clone())], + ) + }); + let mut store = create_store(&self, session, profiler).map_err(ExecutionError::Context)?; store.data_mut().wasi().push_arg(program_name)?; for arg in args { store.data_mut().wasi().push_arg(arg)?; @@ -403,6 +421,28 @@ impl ExecuteCtx { // Invoke the entrypoint function and collect its exit code let result = main_func.call_async(&mut store, ()).await; + // If we collected a profile, write it to the file + if let (Some(profile), Some(path)) = + (store.data_mut().take_guest_profiler(), guest_profile_path) + { + if let Err(e) = std::fs::File::create(&path) + .map_err(anyhow::Error::new) + .and_then(|output| profile.finish(std::io::BufWriter::new(output))) + { + event!( + Level::ERROR, + "failed writing profile at {}: {e:#}", + path.display() + ); + } else { + event!( + Level::INFO, + "\nProfile written to: {}\nView this profile at https://profiler.firefox.com/.", + path.display() + ); + } + } + // Ensure the downstream response channel is closed, whether or not a response was // sent during execution. store.data_mut().close_downstream_response_sender(); diff --git a/lib/src/linking.rs b/lib/src/linking.rs index 601c72fa..e474f2d4 100644 --- a/lib/src/linking.rs +++ b/lib/src/linking.rs @@ -8,7 +8,7 @@ use { anyhow::Context, std::collections::HashSet, wasi_common::{pipe::WritePipe, WasiCtx}, - wasmtime::{Linker, Store}, + wasmtime::{GuestProfiler, Linker, Store, UpdateDeadline}, wasmtime_wasi::WasiCtxBuilder, wasmtime_wasi_nn::WasiNnCtx, }; @@ -17,6 +17,7 @@ pub struct WasmCtx { wasi: WasiCtx, wasi_nn: WasiNnCtx, session: Session, + guest_profiler: Option>, } impl WasmCtx { @@ -31,6 +32,10 @@ impl WasmCtx { pub fn session(&mut self) -> &mut Session { &mut self.session } + + pub fn take_guest_profiler(&mut self) -> Option> { + self.guest_profiler.take() + } } impl WasmCtx { @@ -46,6 +51,7 @@ impl WasmCtx { pub(crate) fn create_store( ctx: &ExecuteCtx, session: Session, + guest_profiler: Option, ) -> Result, anyhow::Error> { let wasi = make_wasi_ctx(ctx, &session).context("creating Wasi context")?; let wasi_nn = WasiNnCtx::new().unwrap(); @@ -53,10 +59,17 @@ pub(crate) fn create_store( wasi, wasi_nn, session, + guest_profiler: guest_profiler.map(Box::new), }; let mut store = Store::new(ctx.engine(), wasm_ctx); store.set_epoch_deadline(1); - store.epoch_deadline_async_yield_and_update(1); + store.epoch_deadline_callback(|mut store| { + if let Some(mut prof) = store.data_mut().guest_profiler.take() { + prof.sample(&store); + store.data_mut().guest_profiler = Some(prof); + } + Ok(UpdateDeadline::Yield(1)) + }); Ok(store) }