Skip to content

Commit

Permalink
Remove test listing and orchestration and just run the _start function
Browse files Browse the repository at this point in the history
of the given binary, passing along any parameters following `--`
  • Loading branch information
itsrainy committed Jan 17, 2023
1 parent 44a7491 commit 2348da3
Show file tree
Hide file tree
Showing 4 changed files with 70 additions and 245 deletions.
137 changes: 49 additions & 88 deletions cli/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,7 @@
#![cfg_attr(not(debug_assertions), doc(test(attr(allow(dead_code)))))]
#![cfg_attr(not(debug_assertions), doc(test(attr(allow(unused_variables)))))]

use itertools::Itertools;
use std::process::ExitCode;
use viceroy_lib::TestStatus;

mod opts;

Expand All @@ -31,9 +29,7 @@ use {
tokio::time::timeout,
tracing::{event, Level, Metadata},
tracing_subscriber::{filter::EnvFilter, fmt::writer::MakeWriter, FmtSubscriber},
viceroy_lib::{
config::FastlyConfig, BackendConnector, Error, ExecuteCtx, TestResult, ViceroyService,
},
viceroy_lib::{config::FastlyConfig, BackendConnector, Error, ExecuteCtx, ViceroyService},
};

/// Starts up a Viceroy server.
Expand Down Expand Up @@ -119,11 +115,10 @@ pub async fn serve(opts: Opts) -> Result<(), Error> {
pub async fn main() -> ExitCode {
// Parse the command-line options, exiting if there are any errors
let opts = Opts::parse();

install_tracing_subscriber(&opts);
if opts.test_mode() {
println!("Using Viceroy to run tests...");
match run_wasm_tests(opts).await {
if opts.run_mode() {
// println!("Using Viceroy to run tests...");
match run_wasm_main(opts).await {
Ok(_) => ExitCode::SUCCESS,
Err(_) => ExitCode::FAILURE,
}
Expand All @@ -147,86 +142,11 @@ pub async fn main() -> ExitCode {
}
}

const GREEN_OK: &str = "\x1b[32mok\x1b[0m";
const RED_FAILED: &str = "\x1b[31mFAILED\x1b[0m";
const YELLOW_IGNORED: &str = "\x1b[33mignored\x1b[0m";
/// Execute a Wasm program in the Viceroy environment.
pub async fn run_wasm_tests(opts: Opts) -> Result<(), anyhow::Error> {
pub async fn run_wasm_main(opts: Opts) -> Result<(), anyhow::Error> {
// Load the wasm module into an execution context
let ctx = create_execution_context(opts)?;

// Call the wasm module with the `--list` argument to get test names
let tests = ctx.clone().list_test_names(false).await?;
// Call the wasm module with `--list --ignored`to get ignored tests
let ignored_tests = ctx.clone().list_test_names(true).await?;

// Run the tests
println!("running {} tests", tests.len());
let mut results: Vec<TestResult> = Vec::new();
for test in &tests {
if ignored_tests.contains(test) {
// todo: diff these lists more efficiently
println!("test {} ... {YELLOW_IGNORED}", test);
results.push(TestResult::new(
test.clone(),
TestStatus::IGNORED,
String::new(),
String::new(),
));
continue;
}
print!("test {} ... ", test);
let result = ctx.clone().execute_test(&test).await?;
print!(
"{}\n",
if result.status == TestStatus::PASSED {
GREEN_OK
} else {
RED_FAILED
}
);
results.push(result);
}

print_test_results(results);
Ok(())
}

fn print_test_results(results: Vec<TestResult>) {
let counts = results.iter().counts_by(|r| r.status);
let failed = results
.iter()
.filter(|r| r.status == TestStatus::FAILED)
.collect::<Vec<&TestResult>>();

// Get the stderr output for each failing test
let stderr_block = failed
.iter()
.map(|f| format!("---- {} stderr ----\n{}", f.name, f.stderr))
.join("\n");

// Get the list of names of failing tests
let failure_list = failed.iter().map(|f| format!("\t{}", f.name)).join("\n");

let result_summary = format!(
"test result: {}. {} passed; {} failed; {} ignored",
if counts.contains_key(&TestStatus::FAILED) {
RED_FAILED
} else {
GREEN_OK
},
counts.get(&TestStatus::PASSED).unwrap_or(&0),
counts.get(&TestStatus::FAILED).unwrap_or(&0),
counts.get(&TestStatus::IGNORED).unwrap_or(&0)
);

if failed.len() > 0 {
print!("\nfailures:\n\n");
print!("{stderr_block}");
print!("\nfailures:\n");
print!("{failure_list}\n");
}
println!("\n{result_summary}");
let ctx = create_execution_context(&opts).await?;
ctx.run_main(opts.run()).await
}

fn install_tracing_subscriber(opts: &Opts) {
Expand Down Expand Up @@ -323,19 +243,24 @@ impl<'a> MakeWriter<'a> for StdWriter {
}
}

fn create_execution_context(opts: Opts) -> Result<ExecuteCtx, anyhow::Error> {
async fn create_execution_context(opts: &Opts) -> Result<ExecuteCtx, anyhow::Error> {
let mut ctx = ExecuteCtx::new(opts.input(), opts.profiling_strategy())?
.with_log_stderr(opts.log_stderr())
.with_log_stdout(opts.log_stdout());

if let Some(config_path) = opts.config_path() {
let config = FastlyConfig::from_file(config_path)?;
let backends = config.backends();
let geolocation = config.geolocation();
let dictionaries = config.dictionaries();
let object_store = config.object_store();
let backend_names = itertools::join(backends.keys(), ", ");

ctx = ctx
.with_backends(backends.clone())
.with_geolocation(geolocation.clone())
.with_dictionaries(dictionaries.clone())
.with_object_store(object_store.clone())
.with_config_path(config_path.into());

if backend_names.is_empty() {
Expand All @@ -345,6 +270,42 @@ fn create_execution_context(opts: Opts) -> Result<ExecuteCtx, anyhow::Error> {
config_path.display()
);
}
if !opts.run_mode() {
for (name, backend) in backends.iter() {
let client = Client::builder().build(BackendConnector::new(
backend.clone(),
ctx.tls_config().clone(),
));
let req = Request::get(&backend.uri).body(Body::empty()).unwrap();

event!(Level::INFO, "checking if backend '{}' is up", name);
match timeout(Duration::from_secs(5), client.request(req)).await {
// In the case that we don't time out but we have an error, we
// check that it's specifically a connection error as this is
// the only one that happens if the server is not up.
//
// We can't combine this with the case above due to needing the
// inner error to check if it's a connection error. The type
// checker complains about it.
Ok(Err(ref e)) if e.is_connect() => event!(
Level::WARN,
"backend '{}' on '{}' is not up right now",
name,
backend.uri
),
// In the case we timeout we assume the backend is not up as 5
// seconds to do a simple get should be enough for a healthy
// service
Err(_) => event!(
Level::WARN,
"backend '{}' on '{}' is not up right now",
name,
backend.uri
),
Ok(_) => event!(Level::INFO, "backend '{}' is up", name),
}
}
}
} else {
event!(
Level::WARN,
Expand Down
18 changes: 13 additions & 5 deletions cli/src/opts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,10 @@ pub struct Opts {
/// The path to a TOML file containing `local_server` configuration.
#[arg(short = 'C', long = "config")]
config_path: Option<PathBuf>,
/// Whether to use Viceroy as a test runner for cargo test
#[arg(short = 't', long = "test", default_value = "false")]
test_mode: bool,
/// Use Viceroy to run a module's _start function once, rather than in a
/// web server loop
#[arg(short = 'r', long = "run", default_value = "false")]
run_mode: bool,
/// Whether to treat stdout as a logging endpoint
#[arg(long = "log-stdout", default_value = "false")]
log_stdout: bool,
Expand All @@ -46,6 +47,9 @@ pub struct Opts {
// Whether to enable wasmtime's builtin profiler.
#[arg(long = "profiler", value_parser = check_wasmtime_profiler_mode)]
profiler: Option<ProfilingStrategy>,
// Command line to start child process
#[arg(trailing_var_arg = true, allow_hyphen_values = true)]
run: Vec<String>,
}

impl Opts {
Expand All @@ -66,8 +70,8 @@ impl Opts {
}

/// Whether to run Viceroy as a test runner
pub fn test_mode(&self) -> bool {
self.test_mode
pub fn run_mode(&self) -> bool {
self.run_mode
}

/// Whether to treat stdout as a logging endpoint
Expand All @@ -91,6 +95,10 @@ impl Opts {
pub fn profiling_strategy(&self) -> ProfilingStrategy {
self.profiler.unwrap_or(ProfilingStrategy::None)
}

pub fn run(&self) -> &[String] {
self.run.as_ref()
}
}

/// A parsing function used by [`Opts`][opts] to check that the input is a valid Wasm module in
Expand Down
Loading

0 comments on commit 2348da3

Please sign in to comment.