-
Notifications
You must be signed in to change notification settings - Fork 15
/
Copy pathreport.rs
374 lines (333 loc) · 11.3 KB
/
report.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
use std::path::PathBuf;
use serde::Serialize;
use crate::{abis::*, full_test_name, WriteBuffer};
/// These are the builtin test-expectations, edit these if there are new rules!
pub fn get_test_rules(test: &TestKey, caller: &dyn AbiImpl, callee: &dyn AbiImpl) -> TestRules {
use TestCheckMode::*;
use TestRunMode::*;
// By default, require tests to run completely and pass
let mut result = TestRules {
run: Check,
check: Pass(Check),
};
// Now apply specific custom expectations for platforms/suites
let is_c = caller.lang() == "c" || callee.lang() == "c";
let is_rust = caller.lang() == "rust" || callee.lang() == "rust";
let is_rust_and_c = is_c && is_rust;
// llvm and gcc disagree on the u128 ABI everywhere but aarch64 (arm64).
// This is Bad! Ideally we should check for all clang<->gcc pairs but to start
// let's mark rust <-> C as disagreeing (because rust also disagrees with clang).
if !cfg!(target_arch = "aarch64") && test.test_name == "ui128" && is_rust_and_c {
result.check = Busted(Check);
}
// i128 types are fake on windows so this is all random garbage that might
// not even compile, but that datapoint is a little interesting/useful
// so let's keep running them and just ignore the result for now.
//
// Anyone who cares about this situation more can make the expectations more precise.
if cfg!(windows) && test.test_name == "ui128" {
result.check = Random;
}
// This test is just for investigation right now, nothing normative
if test.test_name == "sysv_i128_emulation" {
result.check = Random;
}
//
//
// THIS AREA RESERVED FOR VENDORS TO APPLY PATCHES
// END OF VENDOR RESERVED AREA
//
//
result
}
#[derive(Debug, thiserror::Error)]
pub enum BuildError {
#[error("io error\n{0}")]
Io(#[from] std::io::Error),
#[error("rust compile error \n{} \n{}",
std::str::from_utf8(&.0.stdout).unwrap(),
std::str::from_utf8(&.0.stderr).unwrap())]
RustCompile(std::process::Output),
#[error("c compile errror\n{0}")]
CCompile(#[from] cc::Error),
}
#[derive(Debug, thiserror::Error)]
pub enum CheckFailure {
#[error("test {0} {} field {2} mismatch \ncaller: {3:02X?} \ncallee: {4:02X?}", ARG_NAMES[*.1])]
InputFieldMismatch(usize, usize, usize, Vec<u8>, Vec<u8>),
#[error(
"test {0} {} field {2} mismatch \ncaller: {3:02X?} \ncallee: {4:02X?}",
OUTPUT_NAME
)]
OutputFieldMismatch(usize, usize, usize, Vec<u8>, Vec<u8>),
#[error("test {0} {} field count mismatch \ncaller: {2:#02X?} \ncallee: {3:#02X?}", ARG_NAMES[*.1])]
InputFieldCountMismatch(usize, usize, Vec<Vec<u8>>, Vec<Vec<u8>>),
#[error(
"test {0} {} field count mismatch \ncaller: {2:#02X?} \ncallee: {3:#02X?}",
OUTPUT_NAME
)]
OutputFieldCountMismatch(usize, usize, Vec<Vec<u8>>, Vec<Vec<u8>>),
#[error("test {0} input count mismatch \ncaller: {1:#02X?} \ncallee: {2:#02X?}")]
InputCountMismatch(usize, Vec<Vec<Vec<u8>>>, Vec<Vec<Vec<u8>>>),
#[error("test {0} output count mismatch \ncaller: {1:#02X?} \ncallee: {2:#02X?}")]
OutputCountMismatch(usize, Vec<Vec<Vec<u8>>>, Vec<Vec<Vec<u8>>>),
}
#[derive(Debug, thiserror::Error)]
pub enum LinkError {
#[error("io error\n{0}")]
Io(#[from] std::io::Error),
#[error("rust link error \n{} \n{}",
std::str::from_utf8(&.0.stdout).unwrap(),
std::str::from_utf8(&.0.stderr).unwrap())]
RustLink(std::process::Output),
}
#[derive(Debug, thiserror::Error)]
pub enum RunError {
#[error("test loading error (dynamic linking failed)\n{0}")]
LoadError(#[from] libloading::Error),
#[error("wrong number of tests reported! \nExpected {0} \nGot (caller_in: {1}, caller_out: {2}, callee_in: {3}, callee_out: {4})")]
TestCountMismatch(usize, usize, usize, usize, usize),
}
impl Serialize for BuildError {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let string = self.to_string();
serializer.serialize_str(&string)
}
}
impl Serialize for RunError {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let string = self.to_string();
serializer.serialize_str(&string)
}
}
impl Serialize for LinkError {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let string = self.to_string();
serializer.serialize_str(&string)
}
}
impl Serialize for CheckFailure {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let string = self.to_string();
serializer.serialize_str(&string)
}
}
impl Serialize for GenerateError {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let string = self.to_string();
serializer.serialize_str(&string)
}
}
#[derive(Debug, Serialize)]
pub struct FullReport {
pub summary: TestSummary,
pub config: TestConfig,
pub tests: Vec<TestReport>,
}
#[derive(Debug, Serialize)]
pub struct TestReport {
pub key: TestKey,
pub rules: TestRules,
pub results: TestRunResults,
pub conclusion: TestConclusion,
}
#[derive(Debug, Serialize)]
pub struct TestConfig {}
#[derive(Debug, Serialize)]
pub struct TestSummary {
pub num_tests: u64,
pub num_passed: u64,
pub num_busted: u64,
pub num_failed: u64,
pub num_skipped: u64,
}
#[derive(Debug, Clone, Serialize)]
pub struct TestKey {
pub test_name: String,
pub convention: String,
pub caller_id: String,
pub callee_id: String,
}
#[derive(Debug, Clone, Serialize)]
pub struct TestRules {
pub run: TestRunMode,
pub check: TestCheckMode,
}
/// How far the test should be executed
///
/// Each case implies all the previous cases.
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Serialize)]
#[allow(dead_code)]
pub enum TestRunMode {
/// Don't run the test at all (marked as skipped)
Skip,
/// Just generate the source
Generate,
/// Just build the source
Build,
/// Just link the source
Link,
/// Run the tests, but don't check the results
Run,
/// Run the tests, and check the results
Check,
}
/// To what level of correctness should the test be graded?
///
/// Tests that are Skipped ignore this.
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Serialize)]
#[allow(dead_code)]
pub enum TestCheckMode {
/// The test must successfully complete this phase,
/// whatever happens after that is gravy.
Pass(TestRunMode),
/// The test must fail at this exact phase.
Fail(TestRunMode),
/// Same as Fail, but indicates this is a bug/flaw that should eventually
/// be fixed, and not the desired result.
Busted(TestRunMode),
/// The test is flakey and random but we want to run it anyway,
/// so accept whatever result we get as ok.
Random,
}
#[derive(Debug, Serialize)]
pub struct TestRunResults {
pub ran_to: TestRunMode,
pub source: Option<Result<GenerateOutput, GenerateError>>,
pub build: Option<Result<BuildOutput, BuildError>>,
pub link: Option<Result<LinkOutput, LinkError>>,
pub run: Option<Result<RunOutput, RunError>>,
pub check: Option<CheckOutput>,
}
impl Default for TestRunResults {
fn default() -> Self {
Self {
ran_to: TestRunMode::Skip,
source: None,
build: None,
link: None,
run: None,
check: None,
}
}
}
#[derive(Debug, Serialize)]
pub struct GenerateOutput {
pub caller_src: PathBuf,
pub callee_src: PathBuf,
}
#[derive(Debug, Serialize)]
pub struct BuildOutput {
pub caller_lib: PathBuf,
pub callee_lib: PathBuf,
}
#[derive(Debug, Serialize)]
pub struct LinkOutput {
pub test_bin: PathBuf,
}
#[derive(Debug, Serialize)]
pub struct RunOutput {
pub caller_inputs: WriteBuffer,
pub caller_outputs: WriteBuffer,
pub callee_inputs: WriteBuffer,
pub callee_outputs: WriteBuffer,
}
#[derive(Debug, Serialize)]
pub struct CheckOutput {
pub all_passed: bool,
pub subtest_names: Vec<String>,
pub subtest_checks: Vec<Result<(), CheckFailure>>,
}
#[derive(Debug, Clone, Serialize)]
pub enum TestConclusion {
Skipped,
Passed,
Failed,
Busted,
}
impl FullReport {
pub fn print_human(&self, mut f: impl std::io::Write) -> Result<(), std::io::Error> {
use TestCheckMode::*;
use TestConclusion::*;
writeln!(f, "Final Results:")?;
for test in &self.tests {
let pretty_test_name = full_test_name(&test.key);
write!(f, "{pretty_test_name:<40} ")?;
match (&test.conclusion, &test.rules.check) {
(Skipped, _) => write!(f, "skipped")?,
(Passed, Pass(_)) => write!(f, "passed")?,
(Passed, Random) => write!(f, "passed (random, result ignored)")?,
(Passed, Fail(_)) => write!(f, "passed (failed as expected)")?,
(Failed, Pass(_)) => write!(f, "failed")?,
(Failed, Random) => write!(f, "failed!? (failed but random!?)")?,
(Failed, Fail(_)) => write!(f, "failed (passed unexpectedly!)")?,
(Failed, TestCheckMode::Busted(_)) => {
write!(f, "fixed (test was busted, congrats!)")?
}
(TestConclusion::Busted, _) | (Passed, TestCheckMode::Busted(_)) => {
write!(f, "busted (known failure, ignored)")?
}
}
let be_detailed = test.results.ran_to >= TestRunMode::Check;
if !be_detailed {
writeln!(f)?;
continue;
}
let check_result = test.results.check.as_ref().unwrap();
let sub_results = &check_result.subtest_checks;
let num_passed = sub_results.iter().filter(|r| r.is_ok()).count();
writeln!(f, " ({num_passed:>3}/{:<3} passed)", sub_results.len())?;
// If all the subtests pass, don't bother with a breakdown.
if check_result.all_passed {
continue;
}
let max_name_len = check_result
.subtest_names
.iter()
.fold(0, |max, name| max.max(name.len()));
for (subtest_name, result) in check_result.subtest_names.iter().zip(sub_results.iter())
{
write!(f, " {:width$} ", subtest_name, width = max_name_len)?;
if let Err(_e) = result {
writeln!(f, "failed!")?;
} else {
writeln!(f)?;
}
}
writeln!(f)?;
}
writeln!(f)?;
writeln!(
f,
"{} tests run - {} passed, {} busted, {} failed, {} skipped",
self.summary.num_tests,
self.summary.num_passed,
self.summary.num_busted,
self.summary.num_failed,
self.summary.num_skipped,
)?;
Ok(())
}
pub fn print_json(&self, f: impl std::io::Write) -> Result<(), std::io::Error> {
serde_json::to_writer_pretty(f, self)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
}
pub fn failed(&self) -> bool {
self.summary.num_failed > 0
}
}