diff --git a/benches/bench.rs b/benches/bench.rs index 53826ce..4b882b7 100644 --- a/benches/bench.rs +++ b/benches/bench.rs @@ -73,3 +73,26 @@ fn u32_fastrand(b: &mut Bencher) { sum }) } + +#[bench] +fn fill(b: &mut Bencher) { + let rng = fastrand::Rng::new(); + b.iter(|| { + // Pick a size that isn't divisble by 8. + let mut bytes = [0u8; 367]; + rng.fill(&mut bytes); + bytes + }) +} + +#[bench] +fn fill_naive(b: &mut Bencher) { + let rng = fastrand::Rng::new(); + b.iter(|| { + let mut bytes = [0u8; 367]; + for item in &mut bytes { + *item = rng.u8(..); + } + bytes + }) +} diff --git a/src/lib.rs b/src/lib.rs index e6f9323..24462e9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -441,6 +441,22 @@ impl Rng { } } + /// Fill a byte slice with random data. + #[inline] + pub fn fill(&self, slice: &mut [u8]) { + // Filling the buffer in chunks of 8 is much faster. + let mut chunks = slice.chunks_exact_mut(8); + for items in chunks.by_ref() { + let r = self.u64(..); + items.copy_from_slice(&r.to_le_bytes()); + } + + let remainder = chunks.into_remainder(); + for item in remainder { + *item = self.u8(..); + } + } + rng_integer!( u8, u8, diff --git a/tests/smoke.rs b/tests/smoke.rs index 07e5c8e..e6a23eb 100644 --- a/tests/smoke.rs +++ b/tests/smoke.rs @@ -81,6 +81,19 @@ fn u128() { } } +#[test] +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] +fn fill() { + let r = fastrand::Rng::new(); + let mut a = [0u8; 64]; + let mut b = [0u8; 64]; + + r.fill(&mut a); + r.fill(&mut b); + + assert_ne!(a, b); +} + #[test] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] fn rng() {