diff --git a/src/bootstrap/Cargo.lock b/src/bootstrap/Cargo.lock index e861d520c5343..69bb98135ca14 100644 --- a/src/bootstrap/Cargo.lock +++ b/src/bootstrap/Cargo.lock @@ -11,6 +11,17 @@ dependencies = [ "memchr", ] +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + [[package]] name = "autocfg" version = "1.1.0" @@ -36,6 +47,7 @@ dependencies = [ name = "bootstrap" version = "0.0.0" dependencies = [ + "atty", "build_helper", "cc", "cmake", @@ -55,6 +67,7 @@ dependencies = [ "sha2", "sysinfo", "tar", + "termcolor", "toml", "walkdir", "winapi", @@ -636,6 +649,15 @@ dependencies = [ "xattr", ] +[[package]] +name = "termcolor" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +dependencies = [ + "winapi-util", +] + [[package]] name = "thread_local" version = "1.1.4" diff --git a/src/bootstrap/Cargo.toml b/src/bootstrap/Cargo.toml index 663987f113c8b..83e63df50147c 100644 --- a/src/bootstrap/Cargo.toml +++ b/src/bootstrap/Cargo.toml @@ -30,6 +30,7 @@ path = "bin/sccache-plus-cl.rs" test = false [dependencies] +atty = "0.2.14" build_helper = { path = "../tools/build_helper" } cmake = "0.1.38" fd-lock = "3.0.8" @@ -46,6 +47,7 @@ serde_derive = "1.0.137" serde_json = "1.0.2" sha2 = "0.10" tar = "0.4" +termcolor = "1.2.0" toml = "0.5" ignore = "0.4.10" opener = "0.5" diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index 05e742549499b..bfa237aa14d86 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -87,6 +87,9 @@ pub struct Config { pub patch_binaries_for_nix: bool, pub stage0_metadata: Stage0Metadata, + pub stdout_is_tty: bool, + pub stderr_is_tty: bool, + pub on_fail: Option, pub stage: u32, pub keep_stage: Vec, @@ -822,6 +825,9 @@ impl Config { config.deny_warnings = true; config.bindir = "bin".into(); + config.stdout_is_tty = atty::is(atty::Stream::Stdout); + config.stderr_is_tty = atty::is(atty::Stream::Stderr); + // set by build.rs config.build = TargetSelection::from_user(&env!("BUILD_TRIPLE")); diff --git a/src/bootstrap/lib.rs b/src/bootstrap/lib.rs index f4abdf1cc5758..9e1d88a6aefcc 100644 --- a/src/bootstrap/lib.rs +++ b/src/bootstrap/lib.rs @@ -55,6 +55,7 @@ mod format; mod install; mod metadata; mod native; +mod render_tests; mod run; mod sanity; mod setup; @@ -88,6 +89,7 @@ pub use crate::builder::PathSet; use crate::cache::{Interned, INTERNER}; pub use crate::config::Config; pub use crate::flags::Subcommand; +use termcolor::{ColorChoice, StandardStream, WriteColor}; const LLVM_TOOLS: &[&str] = &[ "llvm-cov", // used to generate coverage report @@ -1574,6 +1576,31 @@ to download LLVM rather than building it. self.config.ninja_in_file } + + pub fn colored_stdout R>(&self, f: F) -> R { + self.colored_stream_inner(StandardStream::stdout, self.config.stdout_is_tty, f) + } + + pub fn colored_stderr R>(&self, f: F) -> R { + self.colored_stream_inner(StandardStream::stderr, self.config.stderr_is_tty, f) + } + + fn colored_stream_inner(&self, constructor: C, is_tty: bool, f: F) -> R + where + C: Fn(ColorChoice) -> StandardStream, + F: FnOnce(&mut dyn WriteColor) -> R, + { + let choice = match self.config.color { + flags::Color::Always => ColorChoice::Always, + flags::Color::Never => ColorChoice::Never, + flags::Color::Auto if !is_tty => ColorChoice::Never, + flags::Color::Auto => ColorChoice::Auto, + }; + let mut stream = constructor(choice); + let result = f(&mut stream); + stream.reset().unwrap(); + result + } } #[cfg(unix)] diff --git a/src/bootstrap/metrics.rs b/src/bootstrap/metrics.rs index 2e62c950709df..5f254761aa19f 100644 --- a/src/bootstrap/metrics.rs +++ b/src/bootstrap/metrics.rs @@ -51,6 +51,7 @@ impl BuildMetrics { duration_excluding_children_sec: Duration::ZERO, children: Vec::new(), + tests: Vec::new(), }); } @@ -72,6 +73,16 @@ impl BuildMetrics { } } + pub(crate) fn record_test(&self, name: &str, outcome: TestOutcome) { + let mut state = self.state.borrow_mut(); + state + .running_steps + .last_mut() + .unwrap() + .tests + .push(Test { name: name.to_string(), outcome }); + } + fn collect_stats(&self, state: &mut MetricsState) { let step = state.running_steps.last_mut().unwrap(); @@ -125,6 +136,14 @@ impl BuildMetrics { } fn prepare_json_step(&self, step: StepMetrics) -> JsonNode { + let mut children = Vec::new(); + children.extend(step.children.into_iter().map(|child| self.prepare_json_step(child))); + children.extend( + step.tests + .into_iter() + .map(|test| JsonNode::Test { name: test.name, outcome: test.outcome }), + ); + JsonNode::RustbuildStep { type_: step.type_, debug_repr: step.debug_repr, @@ -135,11 +154,7 @@ impl BuildMetrics { / step.duration_excluding_children_sec.as_secs_f64(), }, - children: step - .children - .into_iter() - .map(|child| self.prepare_json_step(child)) - .collect(), + children, } } } @@ -161,6 +176,12 @@ struct StepMetrics { duration_excluding_children_sec: Duration, children: Vec, + tests: Vec, +} + +struct Test { + name: String, + outcome: TestOutcome, } #[derive(Serialize, Deserialize)] @@ -190,6 +211,19 @@ enum JsonNode { children: Vec, }, + Test { + name: String, + #[serde(flatten)] + outcome: TestOutcome, + }, +} + +#[derive(Serialize, Deserialize)] +#[serde(tag = "outcome", rename_all = "snake_case")] +pub(crate) enum TestOutcome { + Passed, + Failed, + Ignored { ignore_reason: Option }, } #[derive(Serialize, Deserialize)] diff --git a/src/bootstrap/render_tests.rs b/src/bootstrap/render_tests.rs new file mode 100644 index 0000000000000..af2370d439e6b --- /dev/null +++ b/src/bootstrap/render_tests.rs @@ -0,0 +1,376 @@ +//! This module renders the JSON output of libtest into a human-readable form, trying to be as +//! similar to libtest's native output as possible. +//! +//! This is needed because we need to use libtest in JSON mode to extract granluar information +//! about the executed tests. Doing so suppresses the human-readable output, and (compared to Cargo +//! and rustc) libtest doesn't include the rendered human-readable output as a JSON field. We had +//! to reimplement all the rendering logic in this module because of that. + +use crate::builder::Builder; +use std::io::{BufRead, BufReader, Write}; +use std::process::{ChildStdout, Command, Stdio}; +use std::time::Duration; +use termcolor::{Color, ColorSpec, WriteColor}; + +const TERSE_TESTS_PER_LINE: usize = 88; + +pub(crate) fn add_flags_and_try_run_tests(builder: &Builder<'_>, cmd: &mut Command) -> bool { + if cmd.get_args().position(|arg| arg == "--").is_none() { + cmd.arg("--"); + } + cmd.args(&["-Z", "unstable-options", "--format", "json"]); + + try_run_tests(builder, cmd) +} + +pub(crate) fn try_run_tests(builder: &Builder<'_>, cmd: &mut Command) -> bool { + if builder.config.dry_run() { + return true; + } + + if !run_tests(builder, cmd) { + if builder.fail_fast { + crate::detail_exit(1); + } else { + let mut failures = builder.delayed_failures.borrow_mut(); + failures.push(format!("{cmd:?}")); + false + } + } else { + true + } +} + +fn run_tests(builder: &Builder<'_>, cmd: &mut Command) -> bool { + cmd.stdout(Stdio::piped()); + + builder.verbose(&format!("running: {cmd:?}")); + + let mut process = cmd.spawn().unwrap(); + + // This runs until the stdout of the child is closed, which means the child exited. We don't + // run this on another thread since the builder is not Sync. + Renderer::new(process.stdout.take().unwrap(), builder).render_all(); + + let result = process.wait_with_output().unwrap(); + if !result.status.success() && builder.is_verbose() { + println!( + "\n\ncommand did not execute successfully: {cmd:?}\n\ + expected success, got: {}", + result.status + ); + } + + result.status.success() +} + +struct Renderer<'a> { + stdout: BufReader, + failures: Vec, + benches: Vec, + builder: &'a Builder<'a>, + tests_count: Option, + executed_tests: usize, + terse_tests_in_line: usize, +} + +impl<'a> Renderer<'a> { + fn new(stdout: ChildStdout, builder: &'a Builder<'a>) -> Self { + Self { + stdout: BufReader::new(stdout), + benches: Vec::new(), + failures: Vec::new(), + builder, + tests_count: None, + executed_tests: 0, + terse_tests_in_line: 0, + } + } + + fn render_all(mut self) { + let mut line = String::new(); + loop { + line.clear(); + match self.stdout.read_line(&mut line) { + Ok(_) => {} + Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break, + Err(err) => panic!("failed to read output of test runner: {err}"), + } + if line.is_empty() { + break; + } + + let trimmed = line.trim(); + if trimmed.starts_with("{") && trimmed.ends_with("}") { + self.render_message(match serde_json::from_str(&trimmed) { + Ok(parsed) => parsed, + Err(err) => { + panic!("failed to parse libtest json output; error: {err}, line: {line:?}"); + } + }); + } else { + // Handle non-JSON output, for example when --nocapture is passed. + print!("{line}"); + let _ = std::io::stdout().flush(); + } + } + } + + fn render_test_outcome(&mut self, outcome: Outcome<'_>, test: &TestOutcome) { + self.executed_tests += 1; + + #[cfg(feature = "build-metrics")] + self.builder.metrics.record_test( + &test.name, + match outcome { + Outcome::Ok | Outcome::BenchOk => crate::metrics::TestOutcome::Passed, + Outcome::Failed => crate::metrics::TestOutcome::Failed, + Outcome::Ignored { reason } => crate::metrics::TestOutcome::Ignored { + ignore_reason: reason.map(|s| s.to_string()), + }, + }, + ); + + if self.builder.config.verbose_tests { + self.render_test_outcome_verbose(outcome, test); + } else { + self.render_test_outcome_terse(outcome, test); + } + } + + fn render_test_outcome_verbose(&self, outcome: Outcome<'_>, test: &TestOutcome) { + print!("test {} ... ", test.name); + self.builder.colored_stdout(|stdout| outcome.write_long(stdout)).unwrap(); + if let Some(exec_time) = test.exec_time { + print!(" ({exec_time:.2?})"); + } + println!(); + } + + fn render_test_outcome_terse(&mut self, outcome: Outcome<'_>, _: &TestOutcome) { + if self.terse_tests_in_line != 0 && self.terse_tests_in_line % TERSE_TESTS_PER_LINE == 0 { + if let Some(total) = self.tests_count { + let total = total.to_string(); + let executed = format!("{:>width$}", self.executed_tests - 1, width = total.len()); + print!(" {executed}/{total}"); + } + println!(); + self.terse_tests_in_line = 0; + } + + self.terse_tests_in_line += 1; + self.builder.colored_stdout(|stdout| outcome.write_short(stdout)).unwrap(); + let _ = std::io::stdout().flush(); + } + + fn render_suite_outcome(&self, outcome: Outcome<'_>, suite: &SuiteOutcome) { + // The terse output doesn't end with a newline, so we need to add it ourselves. + if !self.builder.config.verbose_tests { + println!(); + } + + if !self.failures.is_empty() { + println!("\nfailures:\n"); + for failure in &self.failures { + if let Some(stdout) = &failure.stdout { + println!("---- {} stdout ----", failure.name); + println!("{stdout}"); + } + } + + println!("\nfailures:"); + for failure in &self.failures { + println!(" {}", failure.name); + } + } + + if !self.benches.is_empty() { + println!("\nbenchmarks:"); + + let mut rows = Vec::new(); + for bench in &self.benches { + rows.push(( + &bench.name, + format!("{:.2?}/iter", Duration::from_nanos(bench.median)), + format!("+/- {:.2?}", Duration::from_nanos(bench.deviation)), + )); + } + + let max_0 = rows.iter().map(|r| r.0.len()).max().unwrap_or(0); + let max_1 = rows.iter().map(|r| r.1.len()).max().unwrap_or(0); + let max_2 = rows.iter().map(|r| r.2.len()).max().unwrap_or(0); + for row in &rows { + println!(" {:max_1$} {:>max_2$}", row.0, row.1, row.2); + } + } + + print!("\ntest result: "); + self.builder.colored_stdout(|stdout| outcome.write_long(stdout)).unwrap(); + println!( + ". {} passed; {} failed; {} ignored; {} measured; {} filtered out; \ + finished in {:.2?}\n", + suite.passed, + suite.failed, + suite.ignored, + suite.measured, + suite.filtered_out, + Duration::from_secs_f64(suite.exec_time) + ); + } + + fn render_message(&mut self, message: Message) { + match message { + Message::Suite(SuiteMessage::Started { test_count }) => { + println!("\nrunning {test_count} tests"); + self.executed_tests = 0; + self.terse_tests_in_line = 0; + self.tests_count = Some(test_count); + } + Message::Suite(SuiteMessage::Ok(outcome)) => { + self.render_suite_outcome(Outcome::Ok, &outcome); + } + Message::Suite(SuiteMessage::Failed(outcome)) => { + self.render_suite_outcome(Outcome::Failed, &outcome); + } + Message::Bench(outcome) => { + // The formatting for benchmarks doesn't replicate 1:1 the formatting libtest + // outputs, mostly because libtest's formatting is broken in terse mode, which is + // the default used by our monorepo. We use a different formatting instead: + // successful benchmarks are just showed as "benchmarked"/"b", and the details are + // outputted at the bottom like failures. + let fake_test_outcome = TestOutcome { + name: outcome.name.clone(), + exec_time: None, + stdout: None, + message: None, + }; + self.render_test_outcome(Outcome::BenchOk, &fake_test_outcome); + self.benches.push(outcome); + } + Message::Test(TestMessage::Ok(outcome)) => { + self.render_test_outcome(Outcome::Ok, &outcome); + } + Message::Test(TestMessage::Ignored(outcome)) => { + self.render_test_outcome( + Outcome::Ignored { reason: outcome.message.as_deref() }, + &outcome, + ); + } + Message::Test(TestMessage::Failed(outcome)) => { + self.render_test_outcome(Outcome::Failed, &outcome); + self.failures.push(outcome); + } + Message::Test(TestMessage::Timeout { name }) => { + println!("test {name} has been running for a long time"); + } + Message::Test(TestMessage::Started) => {} // Not useful + } + } +} + +enum Outcome<'a> { + Ok, + BenchOk, + Failed, + Ignored { reason: Option<&'a str> }, +} + +impl Outcome<'_> { + fn write_short(&self, writer: &mut dyn WriteColor) -> Result<(), std::io::Error> { + match self { + Outcome::Ok => { + writer.set_color(&ColorSpec::new().set_fg(Some(Color::Green)))?; + write!(writer, ".")?; + } + Outcome::BenchOk => { + writer.set_color(&ColorSpec::new().set_fg(Some(Color::Cyan)))?; + write!(writer, "b")?; + } + Outcome::Failed => { + writer.set_color(&ColorSpec::new().set_fg(Some(Color::Red)))?; + write!(writer, "F")?; + } + Outcome::Ignored { .. } => { + writer.set_color(&ColorSpec::new().set_fg(Some(Color::Yellow)))?; + write!(writer, "i")?; + } + } + writer.reset() + } + + fn write_long(&self, writer: &mut dyn WriteColor) -> Result<(), std::io::Error> { + match self { + Outcome::Ok => { + writer.set_color(&ColorSpec::new().set_fg(Some(Color::Green)))?; + write!(writer, "ok")?; + } + Outcome::BenchOk => { + writer.set_color(&ColorSpec::new().set_fg(Some(Color::Cyan)))?; + write!(writer, "benchmarked")?; + } + Outcome::Failed => { + writer.set_color(&ColorSpec::new().set_fg(Some(Color::Red)))?; + write!(writer, "FAILED")?; + } + Outcome::Ignored { reason } => { + writer.set_color(&ColorSpec::new().set_fg(Some(Color::Yellow)))?; + write!(writer, "ignored")?; + if let Some(reason) = reason { + write!(writer, ", {reason}")?; + } + } + } + writer.reset() + } +} + +#[derive(serde_derive::Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +enum Message { + Suite(SuiteMessage), + Test(TestMessage), + Bench(BenchOutcome), +} + +#[derive(serde_derive::Deserialize)] +#[serde(tag = "event", rename_all = "snake_case")] +enum SuiteMessage { + Ok(SuiteOutcome), + Failed(SuiteOutcome), + Started { test_count: usize }, +} + +#[derive(serde_derive::Deserialize)] +struct SuiteOutcome { + passed: usize, + failed: usize, + ignored: usize, + measured: usize, + filtered_out: usize, + exec_time: f64, +} + +#[derive(serde_derive::Deserialize)] +#[serde(tag = "event", rename_all = "snake_case")] +enum TestMessage { + Ok(TestOutcome), + Failed(TestOutcome), + Ignored(TestOutcome), + Timeout { name: String }, + Started, +} + +#[derive(serde_derive::Deserialize)] +struct BenchOutcome { + name: String, + median: u64, + deviation: u64, +} + +#[derive(serde_derive::Deserialize)] +struct TestOutcome { + name: String, + exec_time: Option, + stdout: Option, + message: Option, +} diff --git a/src/bootstrap/test.rs b/src/bootstrap/test.rs index b4f1506dc8f30..71e83e741e12b 100644 --- a/src/bootstrap/test.rs +++ b/src/bootstrap/test.rs @@ -20,6 +20,7 @@ use crate::dist; use crate::doc::DocumentationFormat; use crate::flags::Subcommand; use crate::native; +use crate::render_tests::add_flags_and_try_run_tests; use crate::tool::{self, SourceType, Tool}; use crate::toolstate::ToolState; use crate::util::{self, add_link_lib_path, dylib_path, dylib_path_var, output, t}; @@ -123,7 +124,7 @@ impl Step for CrateJsonDocLint { SourceType::InTree, &[], ); - try_run(builder, &mut cargo.into()); + add_flags_and_try_run_tests(builder, &mut cargo.into()); } } @@ -172,7 +173,7 @@ You can skip linkcheck with --exclude src/tools/linkchecker" SourceType::InTree, &[], ); - try_run(builder, &mut cargo.into()); + add_flags_and_try_run_tests(builder, &mut cargo.into()); // Build all the default documentation. builder.default_doc(&[]); @@ -333,7 +334,7 @@ impl Step for Cargo { cargo.env("PATH", &path_for_cargo(builder, compiler)); - try_run(builder, &mut cargo.into()); + add_flags_and_try_run_tests(builder, &mut cargo.into()); } } @@ -392,7 +393,7 @@ impl Step for RustAnalyzer { cargo.add_rustc_lib_path(builder, compiler); cargo.arg("--").args(builder.config.cmd.test_args()); - builder.run(&mut cargo.into()); + add_flags_and_try_run_tests(builder, &mut cargo.into()); } } @@ -445,7 +446,7 @@ impl Step for Rustfmt { cargo.add_rustc_lib_path(builder, compiler); - builder.run(&mut cargo.into()); + add_flags_and_try_run_tests(builder, &mut cargo.into()); } } @@ -496,7 +497,7 @@ impl Step for RustDemangler { cargo.add_rustc_lib_path(builder, compiler); - builder.run(&mut cargo.into()); + add_flags_and_try_run_tests(builder, &mut cargo.into()); } } @@ -637,8 +638,7 @@ impl Step for Miri { // Forward test filters. cargo.arg("--").args(builder.config.cmd.test_args()); - let mut cargo = Command::from(cargo); - builder.run(&mut cargo); + add_flags_and_try_run_tests(builder, &mut cargo.into()); // # Run `cargo miri test`. // This is just a smoke test (Miri's own CI invokes this in a bunch of different ways and ensures @@ -711,7 +711,7 @@ impl Step for CompiletestTest { ); cargo.allow_features("test"); - try_run(builder, &mut cargo.into()); + add_flags_and_try_run_tests(builder, &mut cargo.into()); } } @@ -1189,7 +1189,7 @@ impl Step for TidySelfTest { SourceType::InTree, &[], ); - try_run(builder, &mut cargo.into()); + add_flags_and_try_run_tests(builder, &mut cargo.into()); } } @@ -1616,9 +1616,7 @@ note: if you're sure you want to do this, please open an issue as to why. In the cmd.arg("--verbose"); } - if !builder.config.verbose_tests { - cmd.arg("--quiet"); - } + cmd.arg("--json"); let mut llvm_components_passed = false; let mut copts_passed = false; @@ -1769,7 +1767,7 @@ note: if you're sure you want to do this, please open an issue as to why. In the suite, mode, &compiler.host, target )); let _time = util::timeit(&builder); - try_run(builder, &mut cmd); + crate::render_tests::try_run_tests(builder, &mut cmd); if let Some(compare_mode) = compare_mode { cmd.arg("--compare-mode").arg(compare_mode); @@ -1778,7 +1776,7 @@ note: if you're sure you want to do this, please open an issue as to why. In the suite, mode, compare_mode, &compiler.host, target )); let _time = util::timeit(&builder); - try_run(builder, &mut cmd); + crate::render_tests::try_run_tests(builder, &mut cmd); } } } @@ -2180,9 +2178,8 @@ impl Step for Crate { cargo.arg("--"); cargo.args(&builder.config.cmd.test_args()); - if !builder.config.verbose_tests { - cargo.arg("--quiet"); - } + cargo.arg("-Z").arg("unstable-options"); + cargo.arg("--format").arg("json"); if target.contains("emscripten") { cargo.env( @@ -2210,7 +2207,7 @@ impl Step for Crate { target )); let _time = util::timeit(&builder); - try_run(builder, &mut cargo.into()); + crate::render_tests::try_run_tests(builder, &mut cargo.into()); } } @@ -2330,7 +2327,7 @@ impl Step for CrateRustdoc { )); let _time = util::timeit(&builder); - try_run(builder, &mut cargo.into()); + add_flags_and_try_run_tests(builder, &mut cargo.into()); } } @@ -2391,17 +2388,13 @@ impl Step for CrateRustdocJsonTypes { cargo.arg("'-Ctarget-feature=-crt-static'"); } - if !builder.config.verbose_tests { - cargo.arg("--quiet"); - } - builder.info(&format!( "{} rustdoc-json-types stage{} ({} -> {})", test_kind, compiler.stage, &compiler.host, target )); let _time = util::timeit(&builder); - try_run(builder, &mut cargo.into()); + add_flags_and_try_run_tests(builder, &mut cargo.into()); } } @@ -2570,7 +2563,7 @@ impl Step for Bootstrap { // rustbuild tests are racy on directory creation so just run them one at a time. // Since there's not many this shouldn't be a problem. cmd.arg("--test-threads=1"); - try_run(builder, &mut cmd); + add_flags_and_try_run_tests(builder, &mut cmd); } fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { @@ -2651,7 +2644,7 @@ impl Step for ReplacePlaceholderTest { SourceType::InTree, &[], ); - try_run(builder, &mut cargo.into()); + add_flags_and_try_run_tests(builder, &mut cargo.into()); } fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { diff --git a/src/tools/compiletest/src/common.rs b/src/tools/compiletest/src/common.rs index 7fe2e6257d9e7..7691f5c32b21e 100644 --- a/src/tools/compiletest/src/common.rs +++ b/src/tools/compiletest/src/common.rs @@ -9,7 +9,7 @@ use std::str::FromStr; use crate::util::{add_dylib_path, PathBufExt}; use lazycell::LazyCell; -use test::ColorConfig; +use test::{ColorConfig, OutputFormat}; #[derive(Clone, Copy, PartialEq, Debug)] pub enum Mode { @@ -337,7 +337,7 @@ pub struct Config { pub verbose: bool, /// Print one character per test instead of one line - pub quiet: bool, + pub format: OutputFormat, /// Whether to use colors in test. pub color: ColorConfig, diff --git a/src/tools/compiletest/src/main.rs b/src/tools/compiletest/src/main.rs index 1760c29ec66b7..cbaa599f79308 100644 --- a/src/tools/compiletest/src/main.rs +++ b/src/tools/compiletest/src/main.rs @@ -114,6 +114,7 @@ pub fn parse_config(args: Vec) -> Config { ) .optflag("", "quiet", "print one character per test instead of one line") .optopt("", "color", "coloring: auto, always, never", "WHEN") + .optflag("", "json", "emit json output instead of plaintext output") .optopt("", "logfile", "file to log test execution to", "FILE") .optopt("", "target", "the target to build for", "TARGET") .optopt("", "host", "the host to build for", "HOST") @@ -281,7 +282,12 @@ pub fn parse_config(args: Vec) -> Config { && !opt_str2(matches.opt_str("adb-test-dir")).is_empty(), lldb_python_dir: matches.opt_str("lldb-python-dir"), verbose: matches.opt_present("verbose"), - quiet: matches.opt_present("quiet"), + format: match (matches.opt_present("quiet"), matches.opt_present("json")) { + (true, true) => panic!("--quiet and --json are incompatible"), + (true, false) => test::OutputFormat::Terse, + (false, true) => test::OutputFormat::Json, + (false, false) => test::OutputFormat::Pretty, + }, only_modified: matches.opt_present("only-modified"), color, remote_test_client: matches.opt_str("remote-test-client").map(PathBuf::from), @@ -339,7 +345,7 @@ pub fn log_config(config: &Config) { logv(c, format!("ar: {}", config.ar)); logv(c, format!("linker: {:?}", config.linker)); logv(c, format!("verbose: {}", config.verbose)); - logv(c, format!("quiet: {}", config.quiet)); + logv(c, format!("format: {:?}", config.format)); logv(c, "\n".to_string()); } @@ -416,7 +422,7 @@ pub fn run_tests(config: Config) { // easy to miss which tests failed, and as such fail to reproduce // the failure locally. - eprintln!( + println!( "Some tests failed in compiletest suite={}{} mode={} host={} target={}", config.suite, config.compare_mode.map(|c| format!(" compare_mode={:?}", c)).unwrap_or_default(), @@ -501,7 +507,7 @@ pub fn test_opts(config: &Config) -> test::TestOpts { filters: config.filters.clone(), filter_exact: config.filter_exact, run_ignored: if config.run_ignored { test::RunIgnored::Yes } else { test::RunIgnored::No }, - format: if config.quiet { test::OutputFormat::Terse } else { test::OutputFormat::Pretty }, + format: config.format, logfile: config.logfile.clone(), run_tests: true, bench_benchmarks: true,