Skip to content

Commit

Permalink
Split schedule output over multiple lines
Browse files Browse the repository at this point in the history
  • Loading branch information
bkragl committed Apr 10, 2023
1 parent 94b9d8b commit db2a219
Show file tree
Hide file tree
Showing 4 changed files with 58 additions and 6 deletions.
2 changes: 1 addition & 1 deletion src/runtime/failure.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ fn persist_failure_inner(schedule: &Schedule, message: String, config: &Config)
}
}
format!(
"{}\nfailing schedule: \"{}\"\npass that string to `shuttle::replay` to replay the failure",
"{}\nfailing schedule:\n{}pass that string to `shuttle::replay` to replay the failure",
message, serialized_schedule
)
}
Expand Down
16 changes: 15 additions & 1 deletion src/scheduler/serialization.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,9 @@ mod varint {

const SCHEDULE_MAGIC_V2: u8 = 0x91;

const BYTES_PER_LINE: usize = 50;
const LINE_FEED: u8 = 0x0A;

pub(crate) fn serialize_schedule(schedule: &Schedule) -> String {
use self::varint::{space_needed, WriteVarInt};

Expand Down Expand Up @@ -128,12 +131,23 @@ pub(crate) fn serialize_schedule(schedule: &Schedule) -> String {
buf.write_u64_varint(schedule.len() as u64).unwrap();
buf.write_u64_varint(schedule.seed).unwrap();
buf.extend(encoded.as_raw_slice());
hex::encode(buf)

let num_line_feeds = (buf.len() + BYTES_PER_LINE - 1) / BYTES_PER_LINE;
let mut hex_buf = vec![0; buf.len() * 2 + num_line_feeds];
let mut offset = 0;
for chunk in buf.chunks(BYTES_PER_LINE) {
hex::encode_to_slice(chunk, &mut hex_buf[offset..offset + chunk.len() * 2]).unwrap();
offset += chunk.len() * 2;
hex_buf[offset] = LINE_FEED;
offset += 1;
}
String::from_utf8(hex_buf).unwrap()
}

pub(crate) fn deserialize_schedule(str: &str) -> Option<Schedule> {
use self::varint::ReadVarInt;

let str: String = str.chars().filter(|c| !c.is_whitespace()).collect();
let bytes = hex::decode(str).ok()?;

let version = bytes[0];
Expand Down
28 changes: 27 additions & 1 deletion tests/basic/replay.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use crate::{check_replay_roundtrip, check_replay_roundtrip_file, Config, FailurePersistence};
use shuttle::scheduler::{PctScheduler, ReplayScheduler, Schedule};
use shuttle::scheduler::{PctScheduler, RandomScheduler, ReplayScheduler, Schedule};
use shuttle::sync::Mutex;
use shuttle::{replay, thread, Runner};
use std::panic;
Expand Down Expand Up @@ -136,6 +136,32 @@ fn replay_deadlock3_drop_mutex() {
runner.run(deadlock_3);
}

fn long_schedule() {
let mut threads = vec![];
for _ in 0..100 {
threads.push(shuttle::thread::spawn(|| {
for _ in 0..100 {
shuttle::thread::yield_now();
}
}));
}
for t in threads {
t.join().unwrap();
}
// If this would be a `panic!`, downcasting the `catch_unwind` error to `String` fails.
assert_eq!(1, 2, "so much work, and all for nothing");
}

#[test]
fn replay_long_schedule() {
check_replay_roundtrip(long_schedule, RandomScheduler::new(1));
}

#[test]
fn replay_long_schedule_file() {
check_replay_roundtrip_file(long_schedule, RandomScheduler::new(1));
}

// Check that FailurePersistence::None does not print a schedule
#[test]
fn replay_persist_none() {
Expand Down
18 changes: 15 additions & 3 deletions tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,20 @@ mod parse_schedule {
}

pub(super) fn from_stdout<S: AsRef<String>>(output: S) -> Option<String> {
let string_regex = Regex::new("failing schedule: \"([0-9a-f]+)\"").unwrap();
let captures = string_regex.captures(output.as_ref().as_str())?;
Some(captures.get(1)?.as_str().to_string())
let mut schedule = String::new();
let mut lines = output.as_ref().lines();
for line in &mut lines {
if line.eq("failing schedule:") {
break;
}
}
for line in lines {
if line.eq("pass that string to `shuttle::replay` to replay the failure") {
return Some(schedule);
}
schedule.push_str(line);
schedule.push('\n');
}
None
}
}

0 comments on commit db2a219

Please sign in to comment.