diff --git a/benches/generators.rs b/benches/generators.rs index 3f0accd35dd..2d96b8719cb 100644 --- a/benches/generators.rs +++ b/benches/generators.rs @@ -12,6 +12,8 @@ use test::{black_box, Bencher}; use rand::{RngCore, Rng, SeedableRng, NewRng, StdRng, OsRng, JitterRng, EntropyRng}; use rand::{XorShiftRng, Hc128Rng, IsaacRng, Isaac64Rng, ChaChaRng}; use rand::reseeding::ReseedingRng; +use rand::prng::hc128::Hc128Core; +use rand::thread_rng; macro_rules! gen_bytes { ($fnn:ident, $gen:expr) => { @@ -150,10 +152,13 @@ chacha_rounds!(gen_bytes_chacha12, gen_u32_chacha12, gen_u64_chacha12, 12); chacha_rounds!(gen_bytes_chacha20, gen_u32_chacha20, gen_u64_chacha20, 20); +const RESEEDING_THRESHOLD: u64 = 1024*1024*1024; // something high enough to get + // deterministic measurements + #[bench] fn reseeding_hc128_bytes(b: &mut Bencher) { - let mut rng = ReseedingRng::new(Hc128Rng::new(), - 128*1024*1024, + let mut rng = ReseedingRng::new(Hc128Core::new(), + RESEEDING_THRESHOLD, EntropyRng::new()); let mut buf = [0u8; BYTES_LEN]; b.iter(|| { @@ -169,8 +174,8 @@ macro_rules! reseeding_uint { ($fnn:ident, $ty:ty) => { #[bench] fn $fnn(b: &mut Bencher) { - let mut rng = ReseedingRng::new(Hc128Rng::new(), - 128*1024*1024, + let mut rng = ReseedingRng::new(Hc128Core::new(), + RESEEDING_THRESHOLD, EntropyRng::new()); b.iter(|| { for _ in 0..RAND_BENCH_N { @@ -184,3 +189,22 @@ macro_rules! reseeding_uint { reseeding_uint!(reseeding_hc128_u32, u32); reseeding_uint!(reseeding_hc128_u64, u64); + + +macro_rules! threadrng_uint { + ($fnn:ident, $ty:ty) => { + #[bench] + fn $fnn(b: &mut Bencher) { + let mut rng = thread_rng(); + b.iter(|| { + for _ in 0..RAND_BENCH_N { + black_box(rng.gen::<$ty>()); + } + }); + b.bytes = size_of::<$ty>() as u64 * RAND_BENCH_N; + } + } +} + +threadrng_uint!(thread_rng_u32, u32); +threadrng_uint!(thread_rng_u64, u64); diff --git a/rand-core/src/impls.rs b/rand-core/src/impls.rs index 5b35c099d33..e79e3927ab7 100644 --- a/rand-core/src/impls.rs +++ b/rand-core/src/impls.rs @@ -21,10 +21,10 @@ use core::intrinsics::transmute; use core::ptr::copy_nonoverlapping; -use core::slice; +use core::{fmt, slice}; use core::cmp::min; use core::mem::size_of; -use RngCore; +use {RngCore, BlockRngCore, CryptoRng, SeedableRng, Error}; /// Implement `next_u64` via `next_u32`, little-endian order. pub fn next_u64_via_u32(rng: &mut R) -> u64 { @@ -164,4 +164,172 @@ pub fn next_u64_via_fill(rng: &mut R) -> u64 { impl_uint_from_fill!(rng, u64, 8) } +/// Wrapper around PRNGs that implement [`BlockRngCore`] to keep a results +/// buffer and offer the methods from [`RngCore`]. +/// +/// `BlockRng` has heavily optimized implementations of the [`RngCore`] methods +/// reading values from the results buffer, as well as +/// calling `BlockRngCore::generate` directly on the output array when +/// `fill_bytes` / `try_fill_bytes` is called on a large array. These methods +/// also handle the bookkeeping of when to generate a new batch of values. +/// No generated values are ever thown away. +/// +/// Currently `BlockRng` only implements `RngCore` for buffers which are slices +/// of `u32` elements; this may be extended to other types in the future. +/// +/// For easy initialization `BlockRng` also implements [`SeedableRng`]. +/// +/// [`BlockRngCore`]: ../BlockRngCore.t.html +/// [`RngCore`]: ../RngCore.t.html +/// [`SeedableRng`]: ../SeedableRng.t.html +#[derive(Clone)] +pub struct BlockRng> { + pub core: R, + pub results: R::Results, + pub index: usize, +} + +// Custom Debug implementation that does not expose the contents of `results`. +impl + fmt::Debug> fmt::Debug for BlockRng { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("BlockRng") + .field("core", &self.core) + .field("result_len", &self.results.as_ref().len()) + .field("index", &self.index) + .finish() + } +} + +impl> RngCore for BlockRng { + #[inline(always)] + fn next_u32(&mut self) -> u32 { + if self.index >= self.results.as_ref().len() { + self.core.generate(&mut self.results); + self.index = 0; + } + + let value = self.results.as_ref()[self.index]; + self.index += 1; + value + } + + #[inline(always)] + fn next_u64(&mut self) -> u64 { + let read_u64 = |results: &[u32], index| { + if cfg!(any(target_arch = "x86", target_arch = "x86_64")) { + // requires little-endian CPU supporting unaligned reads: + unsafe { *(&results[index] as *const u32 as *const u64) } + } else { + let x = results[index] as u64; + let y = results[index + 1] as u64; + (y << 32) | x + } + }; + + let len = self.results.as_ref().len(); + + let index = self.index; + if index < len-1 { + self.index += 2; + // Read an u64 from the current index + read_u64(self.results.as_ref(), index) + } else if index >= len { + self.core.generate(&mut self.results); + self.index = 2; + read_u64(self.results.as_ref(), 0) + } else { + let x = self.results.as_ref()[len-1] as u64; + self.core.generate(&mut self.results); + self.index = 1; + let y = self.results.as_ref()[0] as u64; + (y << 32) | x + } + } + + // As an optimization we try to write directly into the output buffer. + // This is only enabled for little-endian platforms where unaligned writes + // are known to be safe and fast. + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + fn fill_bytes(&mut self, dest: &mut [u8]) { + let mut filled = 0; + + // Continue filling from the current set of results + if self.index < self.results.as_ref().len() { + let (consumed_u32, filled_u8) = + fill_via_u32_chunks(&self.results.as_ref()[self.index..], + dest); + + self.index += consumed_u32; + filled += filled_u8; + } + + let len_remainder = + (dest.len() - filled) % (self.results.as_ref().len() * 4); + let end_direct = dest.len() - len_remainder; + + while filled < end_direct { + let dest_u32: &mut R::Results = unsafe { + ::core::mem::transmute(dest[filled..].as_mut_ptr()) + }; + self.core.generate(dest_u32); + filled += self.results.as_ref().len() * 4; + } + self.index = self.results.as_ref().len(); + + if len_remainder > 0 { + self.core.generate(&mut self.results); + let (consumed_u32, _) = + fill_via_u32_chunks(&mut self.results.as_ref(), + &mut dest[filled..]); + + self.index = consumed_u32; + } + } + + #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] + fn fill_bytes(&mut self, dest: &mut [u8]) { + let mut read_len = 0; + while read_len < dest.len() { + if self.index >= self.results.as_ref().len() { + self.core.generate(&mut self.results); + self.index = 0; + } + let (consumed_u32, filled_u8) = + fill_via_u32_chunks(&self.results.as_ref()[self.index..], + &mut dest[read_len..]); + + self.index += consumed_u32; + read_len += filled_u8; + } + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + Ok(self.fill_bytes(dest)) + } +} + +impl + SeedableRng> SeedableRng for BlockRng { + type Seed = R::Seed; + + fn from_seed(seed: Self::Seed) -> Self { + let results_empty = R::Results::default(); + Self { + core: R::from_seed(seed), + index: results_empty.as_ref().len(), // generate on first use + results: results_empty, + } + } + + fn from_rng(rng: &mut RNG) -> Result { + let results_empty = R::Results::default(); + Ok(Self { + core: R::from_rng(rng)?, + index: results_empty.as_ref().len(), // generate on first use + results: results_empty, + }) + } +} + +impl + CryptoRng> CryptoRng for BlockRng {} + // TODO: implement tests for the above diff --git a/rand-core/src/lib.rs b/rand-core/src/lib.rs index c29a00573fd..c373c68f1ab 100644 --- a/rand-core/src/lib.rs +++ b/rand-core/src/lib.rs @@ -162,8 +162,55 @@ pub trait RngCore { fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error>; } -/// A marker trait for an `Rng` which may be considered for use in -/// cryptography. +/// A trait for RNGs which do not generate random numbers individually, but in +/// blocks (typically `[u32; N]`). This technique is commonly used by +/// cryptographic RNGs to improve performance. +/// +/// Usage of this trait is optional, but provides two advantages: +/// implementations only need to concern themselves with generation of the +/// block, not the various `RngCore` methods (especially `fill_bytes`, where the +/// optimal implementations are not trivial), and this allows `ReseedingRng` to +/// perform periodic reseeding with very low overhead. +/// +/// # Example +/// +/// ```norun +/// use rand_core::BlockRngCore; +/// use rand_core::impls::BlockRng; +/// +/// struct MyRngCore; +/// +/// impl BlockRngCore for MyRngCore { +/// type Results = [u32; 16]; +/// +/// fn generate(&mut self, results: &mut Self::Results) { +/// unimplemented!() +/// } +/// } +/// +/// impl SeedableRng for MyRngCore { +/// type Seed = unimplemented!(); +/// fn from_seed(seed: Self::Seed) -> Self { +/// unimplemented!() +/// } +/// } +/// +/// // optionally, also implement CryptoRng for MyRngCore +/// +/// // Final RNG. +/// type MyRng = BlockRng; +/// ``` +pub trait BlockRngCore: Sized { + /// Results type. This is the 'block' an RNG implementing `BlockRngCore` + /// generates, which will usually be an array like `[u32; 16]`. + type Results: AsRef<[T]> + Default; + + /// Generate a new block of results. + fn generate(&mut self, results: &mut Self::Results); +} + +/// A marker trait used to indicate that an `RngCore` or `BlockRngCore` +/// implementation is supposed to be cryptographically secure. /// /// *Cryptographically secure generators*, also known as *CSPRNGs*, should /// satisfy an additional properties over other generators: given the first @@ -182,7 +229,7 @@ pub trait RngCore { /// /// Note also that use of a `CryptoRng` does not protect against other /// weaknesses such as seeding from a weak entropy source or leaking state. -pub trait CryptoRng: RngCore {} +pub trait CryptoRng {} /// A random number generator that can be explicitly seeded. /// @@ -263,22 +310,20 @@ pub trait SeedableRng: Sized { impl<'a, R: RngCore + ?Sized> RngCore for &'a mut R { - #[inline] + #[inline(always)] fn next_u32(&mut self) -> u32 { (**self).next_u32() } - #[inline] + #[inline(always)] fn next_u64(&mut self) -> u64 { (**self).next_u64() } - #[inline] fn fill_bytes(&mut self, dest: &mut [u8]) { (**self).fill_bytes(dest) } - - #[inline] + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { (**self).try_fill_bytes(dest) } @@ -286,22 +331,20 @@ impl<'a, R: RngCore + ?Sized> RngCore for &'a mut R { #[cfg(any(feature="std", feature="alloc"))] impl RngCore for Box { - #[inline] + #[inline(always)] fn next_u32(&mut self) -> u32 { (**self).next_u32() } - #[inline] + #[inline(always)] fn next_u64(&mut self) -> u64 { (**self).next_u64() } - #[inline] fn fill_bytes(&mut self, dest: &mut [u8]) { (**self).fill_bytes(dest) } - - #[inline] + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { (**self).try_fill_bytes(dest) } diff --git a/src/lib.rs b/src/lib.rs index 87fe888c0c5..79d56dfd61e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -278,6 +278,7 @@ pub use jitter::JitterRng; #[cfg(feature="std")] pub use os::OsRng; // pseudo rngs +pub mod prng; pub use isaac::{IsaacRng, Isaac64Rng}; pub use chacha::ChaChaRng; pub use prng::XorShiftRng; @@ -312,7 +313,6 @@ pub mod isaac { // private modules #[cfg(feature="std")] mod entropy_rng; -mod prng; #[cfg(feature="std")] mod thread_rng; @@ -800,10 +800,12 @@ impl NewRng for R { pub struct StdRng(Hc128Rng); impl RngCore for StdRng { + #[inline(always)] fn next_u32(&mut self) -> u32 { self.0.next_u32() } + #[inline(always)] fn next_u64(&mut self) -> u64 { self.0.next_u64() } diff --git a/src/prng/chacha.rs b/src/prng/chacha.rs index 0c20a8a30d9..58603bc4feb 100644 --- a/src/prng/chacha.rs +++ b/src/prng/chacha.rs @@ -11,7 +11,8 @@ //! The ChaCha random number generator. use core::fmt; -use rand_core::{RngCore, CryptoRng, SeedableRng, Error, impls, le}; +use rand_core::{BlockRngCore, CryptoRng, RngCore, SeedableRng, Error, le}; +use rand_core::impls::BlockRng; const SEED_WORDS: usize = 8; // 8 words for the 256-bit key const STATE_WORDS: usize = 16; @@ -61,45 +62,45 @@ const STATE_WORDS: usize = 16; /// http://cr.yp.to/papers.html#xsalsa) /// /// [`set_rounds`]: #method.set_counter -#[derive(Clone)] -pub struct ChaChaRng { - buffer: [u32; STATE_WORDS], // Internal buffer of output - state: [u32; STATE_WORDS], // Initial state - index: usize, // Index into state - rounds: usize, -} +#[derive(Clone, Debug)] +pub struct ChaChaRng(BlockRng); -// Custom Debug implementation that does not expose the internal state -impl fmt::Debug for ChaChaRng { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "ChaChaRng {{}}") +impl RngCore for ChaChaRng { + #[inline] + fn next_u32(&mut self) -> u32 { + self.0.next_u32() } -} -macro_rules! quarter_round{ - ($a: expr, $b: expr, $c: expr, $d: expr) => {{ - $a = $a.wrapping_add($b); $d ^= $a; $d = $d.rotate_left(16); - $c = $c.wrapping_add($d); $b ^= $c; $b = $b.rotate_left(12); - $a = $a.wrapping_add($b); $d ^= $a; $d = $d.rotate_left( 8); - $c = $c.wrapping_add($d); $b ^= $c; $b = $b.rotate_left( 7); - }} + #[inline] + fn next_u64(&mut self) -> u64 { + self.0.next_u64() + } + + #[inline] + fn fill_bytes(&mut self, dest: &mut [u8]) { + self.0.fill_bytes(dest) + } + + #[inline] + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + self.0.try_fill_bytes(dest) + } } -macro_rules! double_round{ - ($x: expr) => {{ - // Column round - quarter_round!($x[ 0], $x[ 4], $x[ 8], $x[12]); - quarter_round!($x[ 1], $x[ 5], $x[ 9], $x[13]); - quarter_round!($x[ 2], $x[ 6], $x[10], $x[14]); - quarter_round!($x[ 3], $x[ 7], $x[11], $x[15]); - // Diagonal round - quarter_round!($x[ 0], $x[ 5], $x[10], $x[15]); - quarter_round!($x[ 1], $x[ 6], $x[11], $x[12]); - quarter_round!($x[ 2], $x[ 7], $x[ 8], $x[13]); - quarter_round!($x[ 3], $x[ 4], $x[ 9], $x[14]); - }} +impl SeedableRng for ChaChaRng { + type Seed = ::Seed; + + fn from_seed(seed: Self::Seed) -> Self { + ChaChaRng(BlockRng::::from_seed(seed)) + } + + fn from_rng(rng: &mut R) -> Result { + BlockRng::::from_rng(rng).map(|rng| ChaChaRng(rng)) + } } +impl CryptoRng for ChaChaRng {} + impl ChaChaRng { /// Create an ChaCha random number generator using the default /// fixed key of 8 zero words. @@ -152,11 +153,8 @@ impl ChaChaRng { /// assert_eq!(rng1.next_u32(), rng2.next_u32()); /// ``` pub fn set_counter(&mut self, counter_low: u64, counter_high: u64) { - self.state[12] = counter_low as u32; - self.state[13] = (counter_low >> 32) as u32; - self.state[14] = counter_high as u32; - self.state[15] = (counter_high >> 32) as u32; - self.index = STATE_WORDS; // force recomputation on next use + self.0.core.set_counter(counter_low, counter_high); + self.0.index = STATE_WORDS; // force recomputation on next use } /// Sets the number of rounds to run the ChaCha core algorithm per block to @@ -179,13 +177,52 @@ impl ChaChaRng { /// assert_eq!(rng.next_u32(), 0x2fef003e); /// ``` pub fn set_rounds(&mut self, rounds: usize) { - assert!([4usize, 8, 12, 16, 20].iter().any(|x| *x == rounds)); - self.rounds = rounds; - self.index = STATE_WORDS; // force recomputation on next use + self.0.core.set_rounds(rounds); + self.0.index = STATE_WORDS; // force recomputation on next use } +} - /// Refill the internal output buffer (`self.buffer`) - fn update(&mut self) { +#[derive(Clone)] +pub struct ChaChaCore { + state: [u32; STATE_WORDS], + rounds: usize, +} + +// Custom Debug implementation that does not expose the internal state +impl fmt::Debug for ChaChaCore { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "ChaChaCore {{}}") + } +} + +macro_rules! quarter_round{ + ($a: expr, $b: expr, $c: expr, $d: expr) => {{ + $a = $a.wrapping_add($b); $d ^= $a; $d = $d.rotate_left(16); + $c = $c.wrapping_add($d); $b ^= $c; $b = $b.rotate_left(12); + $a = $a.wrapping_add($b); $d ^= $a; $d = $d.rotate_left( 8); + $c = $c.wrapping_add($d); $b ^= $c; $b = $b.rotate_left( 7); + }} +} + +macro_rules! double_round{ + ($x: expr) => {{ + // Column round + quarter_round!($x[ 0], $x[ 4], $x[ 8], $x[12]); + quarter_round!($x[ 1], $x[ 5], $x[ 9], $x[13]); + quarter_round!($x[ 2], $x[ 6], $x[10], $x[14]); + quarter_round!($x[ 3], $x[ 7], $x[11], $x[15]); + // Diagonal round + quarter_round!($x[ 0], $x[ 5], $x[10], $x[15]); + quarter_round!($x[ 1], $x[ 6], $x[11], $x[12]); + quarter_round!($x[ 2], $x[ 7], $x[ 8], $x[13]); + quarter_round!($x[ 3], $x[ 4], $x[ 9], $x[14]); + }} +} + +impl BlockRngCore for ChaChaCore { + type Results = [u32; STATE_WORDS]; + + fn generate(&mut self, results: &mut Self::Results) { // For some reason extracting this part into a separate function // improves performance by 50%. fn core(results: &mut [u32; STATE_WORDS], @@ -201,79 +238,55 @@ impl ChaChaRng { } } - core(&mut self.buffer, &self.state, self.rounds); - self.index = 0; + core(results, &self.state, self.rounds); + // update 128-bit counter self.state[12] = self.state[12].wrapping_add(1); - if self.state[12] != 0 { return }; + if self.state[12] != 0 { return; }; self.state[13] = self.state[13].wrapping_add(1); - if self.state[13] != 0 { return }; + if self.state[13] != 0 { return; }; self.state[14] = self.state[14].wrapping_add(1); - if self.state[14] != 0 { return }; + if self.state[14] != 0 { return; }; self.state[15] = self.state[15].wrapping_add(1); } } -impl RngCore for ChaChaRng { - #[inline] - fn next_u32(&mut self) -> u32 { - // Using a local variable for `index`, and checking the size avoids a - // bounds check later on. - let mut index = self.index as usize; - if index >= STATE_WORDS { - self.update(); - index = 0; - } - - let value = self.buffer[index]; - self.index += 1; - value - } - - fn next_u64(&mut self) -> u64 { - impls::next_u64_via_u32(self) - } - - fn fill_bytes(&mut self, dest: &mut [u8]) { - let mut read_len = 0; - while read_len < dest.len() { - if self.index >= self.buffer.len() { - self.update(); - } - - let (consumed_u32, filled_u8) = - impls::fill_via_u32_chunks(&self.buffer[self.index..], - &mut dest[read_len..]); - - self.index += consumed_u32; - read_len += filled_u8; - } +impl ChaChaCore { + /// Sets the internal 128-bit ChaCha counter to a user-provided value. This + /// permits jumping arbitrarily ahead (or backwards) in the pseudorandom + /// stream. + pub fn set_counter(&mut self, counter_low: u64, counter_high: u64) { + self.state[12] = counter_low as u32; + self.state[13] = (counter_low >> 32) as u32; + self.state[14] = counter_high as u32; + self.state[15] = (counter_high >> 32) as u32; } - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - Ok(self.fill_bytes(dest)) + /// Sets the number of rounds to run the ChaCha core algorithm per block to + /// generate. + pub fn set_rounds(&mut self, rounds: usize) { + assert!([4usize, 8, 12, 16, 20].iter().any(|x| *x == rounds)); + self.rounds = rounds; } } -impl CryptoRng for ChaChaRng {} - -impl SeedableRng for ChaChaRng { +impl SeedableRng for ChaChaCore { type Seed = [u8; SEED_WORDS*4]; fn from_seed(seed: Self::Seed) -> Self { let mut seed_le = [0u32; SEED_WORDS]; le::read_u32_into(&seed, &mut seed_le); - ChaChaRng { - buffer: [0; STATE_WORDS], + Self { state: [0x61707865, 0x3320646E, 0x79622D32, 0x6B206574, // constants seed_le[0], seed_le[1], seed_le[2], seed_le[3], // seed seed_le[4], seed_le[5], seed_le[6], seed_le[7], // seed 0, 0, 0, 0], // counter - index: STATE_WORDS, // generate on first use rounds: 20, } } } +impl CryptoRng for ChaChaCore {} + #[cfg(test)] mod test { use {RngCore, SeedableRng}; diff --git a/src/prng/hc128.rs b/src/prng/hc128.rs index 78152a0ca52..d8148fe7bd8 100644 --- a/src/prng/hc128.rs +++ b/src/prng/hc128.rs @@ -11,7 +11,8 @@ //! The HC-128 random number generator. use core::fmt; -use rand_core::{RngCore, CryptoRng, SeedableRng, Error, impls, le}; +use rand_core::{BlockRngCore, CryptoRng, RngCore, SeedableRng, Error, le}; +use rand_core::impls::BlockRng; const SEED_WORDS: usize = 8; // 128 bit key followed by 128 bit iv @@ -60,89 +61,109 @@ const SEED_WORDS: usize = 8; // 128 bit key followed by 128 bit iv /// /// [5]: Internet Engineering Task Force (Februari 2015), /// ["Prohibiting RC4 Cipher Suites"](https://tools.ietf.org/html/rfc7465). -#[derive(Clone)] -pub struct Hc128Rng { - state: Hc128, - results: [u32; 16], - index: usize, -} +#[derive(Clone, Debug)] +pub struct Hc128Rng(BlockRng); -#[derive(Copy)] -struct Hc128 { - t: [u32; 1024], - counter1024: usize, +impl RngCore for Hc128Rng { + #[inline(always)] + fn next_u32(&mut self) -> u32 { + self.0.next_u32() + } + + #[inline(always)] + fn next_u64(&mut self) -> u64 { + self.0.next_u64() + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + self.0.fill_bytes(dest) + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + self.0.try_fill_bytes(dest) + } } -// Cannot be derived because [u32; 1024] does not implement Clone in -// Rust < 1.21.0 (since https://github.com/rust-lang/rust/pull/43690) -impl Clone for Hc128 { - fn clone(&self) -> Hc128 { - *self +impl SeedableRng for Hc128Rng { + type Seed = ::Seed; + + fn from_seed(seed: Self::Seed) -> Self { + Hc128Rng(BlockRng::::from_seed(seed)) + } + + fn from_rng(rng: &mut R) -> Result { + BlockRng::::from_rng(rng).map(|rng| Hc128Rng(rng)) } } +impl CryptoRng for Hc128Rng {} + +/// The core of `Hc128Rng`, used with `BlockRng`. +#[derive(Clone)] +pub struct Hc128Core { + t: [u32; 1024], + counter1024: usize, +} + // Custom Debug implementation that does not expose the internal state -impl fmt::Debug for Hc128Rng { +impl fmt::Debug for Hc128Core { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Hc128Rng {{}}") + write!(f, "Hc128Core {{}}") } } -impl Hc128Rng { - // Initialize an HC-128 random number generator. The seed has to be - // 256 bits in length (`[u32; 8]`), matching the 128 bit `key` followed by - // 128 bit `iv` when HC-128 where to be used as a stream cipher. - fn init(seed: [u32; SEED_WORDS]) -> Self { - #[inline] - fn f1(x: u32) -> u32 { - x.rotate_right(7) ^ x.rotate_right(18) ^ (x >> 3) - } - - #[inline] - fn f2(x: u32) -> u32 { - x.rotate_right(17) ^ x.rotate_right(19) ^ (x >> 10) - } +impl BlockRngCore for Hc128Core { + type Results = [u32; 16]; - let mut t = [0u32; 1024]; - - // Expand the key and iv into P and Q - let (key, iv) = seed.split_at(4); - t[..4].copy_from_slice(key); - t[4..8].copy_from_slice(key); - t[8..12].copy_from_slice(iv); - t[12..16].copy_from_slice(iv); + fn generate(&mut self, results: &mut Self::Results) { + assert!(self.counter1024 % 16 == 0); - // Generate the 256 intermediate values W[16] ... W[256+16-1], and - // copy the last 16 generated values to the start op P. - for i in 16..256+16 { - t[i] = f2(t[i-2]).wrapping_add(t[i-7]).wrapping_add(f1(t[i-15])) - .wrapping_add(t[i-16]).wrapping_add(i as u32); - } - { - let (p1, p2) = t.split_at_mut(256); - p1[0..16].copy_from_slice(&p2[0..16]); - } + let cc = self.counter1024 % 512; + let dd = (cc + 16) % 512; + let ee = cc.wrapping_sub(16) % 512; - // Generate both the P and Q tables - for i in 16..1024 { - t[i] = f2(t[i-2]).wrapping_add(t[i-7]).wrapping_add(f1(t[i-15])) - .wrapping_add(t[i-16]).wrapping_add(256 + i as u32); + if self.counter1024 & 512 == 0 { + // P block + results[0] = self.step_p(cc+0, cc+1, ee+13, ee+6, ee+4); + results[1] = self.step_p(cc+1, cc+2, ee+14, ee+7, ee+5); + results[2] = self.step_p(cc+2, cc+3, ee+15, ee+8, ee+6); + results[3] = self.step_p(cc+3, cc+4, cc+0, ee+9, ee+7); + results[4] = self.step_p(cc+4, cc+5, cc+1, ee+10, ee+8); + results[5] = self.step_p(cc+5, cc+6, cc+2, ee+11, ee+9); + results[6] = self.step_p(cc+6, cc+7, cc+3, ee+12, ee+10); + results[7] = self.step_p(cc+7, cc+8, cc+4, ee+13, ee+11); + results[8] = self.step_p(cc+8, cc+9, cc+5, ee+14, ee+12); + results[9] = self.step_p(cc+9, cc+10, cc+6, ee+15, ee+13); + results[10] = self.step_p(cc+10, cc+11, cc+7, cc+0, ee+14); + results[11] = self.step_p(cc+11, cc+12, cc+8, cc+1, ee+15); + results[12] = self.step_p(cc+12, cc+13, cc+9, cc+2, cc+0); + results[13] = self.step_p(cc+13, cc+14, cc+10, cc+3, cc+1); + results[14] = self.step_p(cc+14, cc+15, cc+11, cc+4, cc+2); + results[15] = self.step_p(cc+15, dd+0, cc+12, cc+5, cc+3); + } else { + // Q block + results[0] = self.step_q(cc+0, cc+1, ee+13, ee+6, ee+4); + results[1] = self.step_q(cc+1, cc+2, ee+14, ee+7, ee+5); + results[2] = self.step_q(cc+2, cc+3, ee+15, ee+8, ee+6); + results[3] = self.step_q(cc+3, cc+4, cc+0, ee+9, ee+7); + results[4] = self.step_q(cc+4, cc+5, cc+1, ee+10, ee+8); + results[5] = self.step_q(cc+5, cc+6, cc+2, ee+11, ee+9); + results[6] = self.step_q(cc+6, cc+7, cc+3, ee+12, ee+10); + results[7] = self.step_q(cc+7, cc+8, cc+4, ee+13, ee+11); + results[8] = self.step_q(cc+8, cc+9, cc+5, ee+14, ee+12); + results[9] = self.step_q(cc+9, cc+10, cc+6, ee+15, ee+13); + results[10] = self.step_q(cc+10, cc+11, cc+7, cc+0, ee+14); + results[11] = self.step_q(cc+11, cc+12, cc+8, cc+1, ee+15); + results[12] = self.step_q(cc+12, cc+13, cc+9, cc+2, cc+0); + results[13] = self.step_q(cc+13, cc+14, cc+10, cc+3, cc+1); + results[14] = self.step_q(cc+14, cc+15, cc+11, cc+4, cc+2); + results[15] = self.step_q(cc+15, dd+0, cc+12, cc+5, cc+3); } - - let mut state = Hc128Rng { - state: Hc128 { t: t, counter1024: 0 }, - results: [0; 16], - index: 16, // generate on first use - }; - - // run the cipher 1024 steps - for _ in 0..64 { state.state.sixteen_steps() }; - state.state.counter1024 = 0; - state + self.counter1024 = self.counter1024.wrapping_add(16); } } -impl Hc128 { +impl Hc128Core { // One step of HC-128, update P and generate 32 bits keystream #[inline(always)] fn step_p(&mut self, i: usize, i511: usize, i3: usize, i10: usize, i12: usize) @@ -194,53 +215,6 @@ impl Hc128 { } } - fn update(&mut self, results: &mut [u32]) { - assert!(self.counter1024 % 16 == 0); - - let cc = self.counter1024 % 512; - let dd = (cc + 16) % 512; - let ee = cc.wrapping_sub(16) % 512; - - if self.counter1024 & 512 == 0 { - // P block - results[0] = self.step_p(cc+0, cc+1, ee+13, ee+6, ee+4); - results[1] = self.step_p(cc+1, cc+2, ee+14, ee+7, ee+5); - results[2] = self.step_p(cc+2, cc+3, ee+15, ee+8, ee+6); - results[3] = self.step_p(cc+3, cc+4, cc+0, ee+9, ee+7); - results[4] = self.step_p(cc+4, cc+5, cc+1, ee+10, ee+8); - results[5] = self.step_p(cc+5, cc+6, cc+2, ee+11, ee+9); - results[6] = self.step_p(cc+6, cc+7, cc+3, ee+12, ee+10); - results[7] = self.step_p(cc+7, cc+8, cc+4, ee+13, ee+11); - results[8] = self.step_p(cc+8, cc+9, cc+5, ee+14, ee+12); - results[9] = self.step_p(cc+9, cc+10, cc+6, ee+15, ee+13); - results[10] = self.step_p(cc+10, cc+11, cc+7, cc+0, ee+14); - results[11] = self.step_p(cc+11, cc+12, cc+8, cc+1, ee+15); - results[12] = self.step_p(cc+12, cc+13, cc+9, cc+2, cc+0); - results[13] = self.step_p(cc+13, cc+14, cc+10, cc+3, cc+1); - results[14] = self.step_p(cc+14, cc+15, cc+11, cc+4, cc+2); - results[15] = self.step_p(cc+15, dd+0, cc+12, cc+5, cc+3); - } else { - // Q block - results[0] = self.step_q(cc+0, cc+1, ee+13, ee+6, ee+4); - results[1] = self.step_q(cc+1, cc+2, ee+14, ee+7, ee+5); - results[2] = self.step_q(cc+2, cc+3, ee+15, ee+8, ee+6); - results[3] = self.step_q(cc+3, cc+4, cc+0, ee+9, ee+7); - results[4] = self.step_q(cc+4, cc+5, cc+1, ee+10, ee+8); - results[5] = self.step_q(cc+5, cc+6, cc+2, ee+11, ee+9); - results[6] = self.step_q(cc+6, cc+7, cc+3, ee+12, ee+10); - results[7] = self.step_q(cc+7, cc+8, cc+4, ee+13, ee+11); - results[8] = self.step_q(cc+8, cc+9, cc+5, ee+14, ee+12); - results[9] = self.step_q(cc+9, cc+10, cc+6, ee+15, ee+13); - results[10] = self.step_q(cc+10, cc+11, cc+7, cc+0, ee+14); - results[11] = self.step_q(cc+11, cc+12, cc+8, cc+1, ee+15); - results[12] = self.step_q(cc+12, cc+13, cc+9, cc+2, cc+0); - results[13] = self.step_q(cc+13, cc+14, cc+10, cc+3, cc+1); - results[14] = self.step_q(cc+14, cc+15, cc+11, cc+4, cc+2); - results[15] = self.step_q(cc+15, dd+0, cc+12, cc+5, cc+3); - } - self.counter1024 = self.counter1024.wrapping_add(16); - } - fn sixteen_steps(&mut self) { assert!(self.counter1024 % 16 == 0); @@ -287,119 +261,57 @@ impl Hc128 { } self.counter1024 += 16; } -} -impl RngCore for Hc128Rng { - #[inline] - fn next_u32(&mut self) -> u32 { - if self.index >= 16 { - self.state.update(&mut self.results); - self.index = 0; + // Initialize an HC-128 random number generator. The seed has to be + // 256 bits in length (`[u32; 8]`), matching the 128 bit `key` followed by + // 128 bit `iv` when HC-128 where to be used as a stream cipher. + fn init(seed: [u32; SEED_WORDS]) -> Self { + #[inline] + fn f1(x: u32) -> u32 { + x.rotate_right(7) ^ x.rotate_right(18) ^ (x >> 3) } - let value = self.results[self.index]; - self.index += 1; - value - } - - #[inline] - fn next_u64(&mut self) -> u64 { - let index = self.index; - if index < 15 { - self.index += 2; - // Read an u64 from the current index - if cfg!(any(target_arch = "x86", target_arch = "x86_64")) { - unsafe { *(&self.results[index] as *const u32 as *const u64) } - } else { - let x = self.results[index] as u64; - let y = self.results[index + 1] as u64; - (y << 32) | x - } - } else if index >= 16 { - self.state.update(&mut self.results); - self.index = 2; - let x = self.results[0] as u64; - let y = self.results[1] as u64; - (y << 32) | x - } else { - let x = self.results[15] as u64; - self.state.update(&mut self.results); - self.index = 1; - let y = self.results[0] as u64; - (y << 32) | x + #[inline] + fn f2(x: u32) -> u32 { + x.rotate_right(17) ^ x.rotate_right(19) ^ (x >> 10) } - } - // As an optimization we try to write directly into the output buffer. - // This is only enabled for platforms where unaligned writes are known to - // be safe and fast. - // This improves performance by about 12%. - #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] - fn fill_bytes(&mut self, dest: &mut [u8]) { - use core::slice::from_raw_parts_mut; - let mut filled = 0; + let mut t = [0u32; 1024]; - // Continue filling from the current set of results - if self.index < 16 { - let (consumed_u32, filled_u8) = - impls::fill_via_u32_chunks(&self.results[self.index..], - dest); + // Expand the key and iv into P and Q + let (key, iv) = seed.split_at(4); + t[..4].copy_from_slice(key); + t[4..8].copy_from_slice(key); + t[8..12].copy_from_slice(iv); + t[12..16].copy_from_slice(iv); - self.index += consumed_u32; - filled += filled_u8; + // Generate the 256 intermediate values W[16] ... W[256+16-1], and + // copy the last 16 generated values to the start op P. + for i in 16..256+16 { + t[i] = f2(t[i-2]).wrapping_add(t[i-7]).wrapping_add(f1(t[i-15])) + .wrapping_add(t[i-16]).wrapping_add(i as u32); } - - let len_remainder = (dest.len() - filled) % (16 * 4); - let len_direct = dest.len() - len_remainder; - - while filled < len_direct { - let dest_u32: &mut [u32] = unsafe { - from_raw_parts_mut( - dest[filled..].as_mut_ptr() as *mut u8 as *mut u32, - 16) - }; - self.state.update(dest_u32); - filled += 16 * 4; + { + let (p1, p2) = t.split_at_mut(256); + p1[0..16].copy_from_slice(&p2[0..16]); } - self.index = 16; - if len_remainder > 0 { - self.state.update(&mut self.results); - - let (consumed_u32, _) = - impls::fill_via_u32_chunks(&self.results, - &mut dest[filled..]); - - self.index = consumed_u32; + // Generate both the P and Q tables + for i in 16..1024 { + t[i] = f2(t[i-2]).wrapping_add(t[i-7]).wrapping_add(f1(t[i-15])) + .wrapping_add(t[i-16]).wrapping_add(256 + i as u32); } - } - #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] - fn fill_bytes(&mut self, dest: &mut [u8]) { - let mut read_len = 0; - while read_len < dest.len() { - if self.index >= 16 { - self.state.update(&mut self.results); - self.index = 0; - } - - let (consumed_u32, filled_u8) = - impls::fill_via_u32_chunks(&self.results[self.index..], - &mut dest[read_len..]); - - self.index += consumed_u32; - read_len += filled_u8; - } - } + let mut core = Self { t: t, counter1024: 0 }; - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - Ok(self.fill_bytes(dest)) + // run the cipher 1024 steps + for _ in 0..64 { core.sixteen_steps() }; + core.counter1024 = 0; + core } } -impl CryptoRng for Hc128Rng {} - -impl SeedableRng for Hc128Rng { +impl SeedableRng for Hc128Core { type Seed = [u8; SEED_WORDS*4]; /// Create an HC-128 random number generator with a seed. The seed has to be @@ -408,10 +320,12 @@ impl SeedableRng for Hc128Rng { fn from_seed(seed: Self::Seed) -> Self { let mut seed_u32 = [0u32; SEED_WORDS]; le::read_u32_into(&seed, &mut seed_u32); - Hc128Rng::init(seed_u32) + Self::init(seed_u32) } } +impl CryptoRng for Hc128Core {} + #[cfg(test)] mod test { use {RngCore, SeedableRng}; diff --git a/src/prng/mod.rs b/src/prng/mod.rs index 7eb1dac8e34..0cf0c420f23 100644 --- a/src/prng/mod.rs +++ b/src/prng/mod.rs @@ -40,8 +40,8 @@ //! same algorithm, it is possible that both will yield the same sequence of //! values (with some lag). -mod chacha; -mod hc128; +pub mod chacha; +pub mod hc128; mod isaac; mod isaac64; mod xorshift; @@ -53,4 +53,4 @@ pub use self::chacha::ChaChaRng; pub use self::hc128::Hc128Rng; pub use self::isaac::IsaacRng; pub use self::isaac64::Isaac64Rng; -pub use self::xorshift::XorShiftRng; \ No newline at end of file +pub use self::xorshift::XorShiftRng; diff --git a/src/reseeding.rs b/src/reseeding.rs index 637a0b4b99b..8a4c54b506b 100644 --- a/src/reseeding.rs +++ b/src/reseeding.rs @@ -11,7 +11,11 @@ //! A wrapper around another PRNG that reseeds it after it //! generates a certain number of random bytes. -use {RngCore, SeedableRng, Error, ErrorKind}; +use core::marker::PhantomData; +use core::mem::size_of; + +use rand_core::{RngCore, BlockRngCore, CryptoRng, SeedableRng, Error, ErrorKind}; +use rand_core::impls::BlockRng; /// A wrapper around any PRNG which reseeds the underlying PRNG after it has /// generated a certain number of random bytes. @@ -39,30 +43,25 @@ use {RngCore, SeedableRng, Error, ErrorKind}; /// `ReseedingRng` with the ISAAC RNG. That algorithm, although apparently /// strong and with no known attack, does not come with any proof of security /// and does not meet the current standards for a cryptographically secure -/// PRNG. By reseeding it frequently (every 32 MiB) it seems safe to assume +/// PRNG. By reseeding it frequently (every 32 kiB) it seems safe to assume /// there is no attack that can operate on the tiny window between reseeds. /// /// # Error handling /// -/// If reseeding fails, `try_fill_bytes` is the only `Rng` method to report it. -/// For all other `Rng` methods, `ReseedingRng` will not panic but try to -/// handle the error intelligently; if handling the source error fails these -/// methods will continue generating data from the wrapped PRNG without -/// reseeding. -/// -/// It is usually best to use the infallible methods `next_u32`, `next_u64` and -/// `fill_bytes` because they can make use of this error handling strategy. -/// Use `try_fill_bytes` and possibly `try_reseed` if you want to handle -/// reseeding errors explicitly. +/// Although extremely unlikely, reseeding the wrapped PRNG can fail. +/// `ReseedingRng` will never panic but try to handle the error intelligently +/// through some combination of retrying and delaying reseeding until later. +/// If handling the source error fails `ReseedingRng` will continue generating +/// data from the wrapped PRNG without reseeding. #[derive(Debug)] -pub struct ReseedingRng { - rng: R, - reseeder: Rsdr, - threshold: i64, - bytes_until_reseed: i64, -} +pub struct ReseedingRng(BlockRng>) +where R: BlockRngCore + SeedableRng, + Rsdr: RngCore; -impl ReseedingRng { +impl ReseedingRng +where R: BlockRngCore + SeedableRng, + Rsdr: RngCore +{ /// Create a new `ReseedingRng` with the given parameters. /// /// # Arguments @@ -70,120 +69,146 @@ impl ReseedingRng { /// * `rng`: the random number generator to use. /// * `threshold`: the number of generated bytes after which to reseed the RNG. /// * `reseeder`: the RNG to use for reseeding. - pub fn new(rng: R, threshold: u64, reseeder: Rsdr) -> ReseedingRng { + pub fn new(rng: R, threshold: u64, reseeder: Rsdr) + -> ReseedingRng + { assert!(threshold <= ::core::i64::MAX as u64); - ReseedingRng { - rng: rng, - reseeder: reseeder, - threshold: threshold as i64, - bytes_until_reseed: threshold as i64, - } + let results_empty = R::Results::default(); + ReseedingRng( + BlockRng { + core: ReseedingCore { + phantom: PhantomData, + inner: rng, + reseeder: reseeder, + threshold: threshold as i64, + bytes_until_reseed: threshold as i64, + }, + index: results_empty.as_ref().len(), // generate on first use + results: results_empty, + } + ) } /// Reseed the internal PRNG. - /// - /// This will try to work around errors in the RNG used for reseeding - /// intelligently through some combination of retrying and delaying - /// reseeding until later. So long as the internal PRNG doesn't fail, this - /// method will not fail, i.e. failures from the reseeding source are not - /// fatal. - pub fn reseed(&mut self) { - // Behaviour is identical to `try_reseed`; we just squelch the error. - let _res = self.try_reseed(); - } - - /// Reseed the internal RNG if the number of bytes that have been - /// generated exceed the threshold. - /// - /// If reseeding fails, return an error with the original cause. Note that - /// in case of error we simply delay reseeding, allowing the generator to - /// continue its output of random data and try reseeding again later; - /// because of this we always return kind `ErrorKind::Transient`. - #[inline(never)] - pub fn try_reseed(&mut self) -> Result<(), Error> { - trace!("Reseeding RNG after {} generated bytes", - self.threshold - self.bytes_until_reseed); - if let Err(mut e) = R::from_rng(&mut self.reseeder) - .map(|result| self.rng = result) - { - let delay = match e.kind { - ErrorKind::Transient => 0, - kind @ _ if kind.should_retry() => self.threshold >> 8, - _ => self.threshold, - }; - warn!("Reseeding RNG delayed reseeding by {} bytes due to \ - error from source: {}", delay, e); - self.bytes_until_reseed = delay; - e.kind = ErrorKind::Transient; - Err(e) - } else { - self.bytes_until_reseed = self.threshold; - Ok(()) - } + pub fn reseed(&mut self) -> Result<(), Error> { + self.0.core.reseed() } } -impl RngCore for ReseedingRng { +// TODO: this should be implemented for any type where the inner type +// implements RngCore, but we can't specify that because ReseedingCore is private +impl RngCore for ReseedingRng +where R: BlockRngCore + SeedableRng +{ + #[inline(always)] fn next_u32(&mut self) -> u32 { - let value = self.rng.next_u32(); - self.bytes_until_reseed -= 4; - if self.bytes_until_reseed <= 0 { - self.reseed(); - } - value + self.0.next_u32() } + #[inline(always)] fn next_u64(&mut self) -> u64 { - let value = self.rng.next_u64(); - self.bytes_until_reseed -= 8; - if self.bytes_until_reseed <= 0 { - self.reseed(); - } - value + self.0.next_u64() } fn fill_bytes(&mut self, dest: &mut [u8]) { - self.rng.fill_bytes(dest); - self.bytes_until_reseed -= dest.len() as i64; + self.0.fill_bytes(dest) + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + self.0.try_fill_bytes(dest) + } +} + +impl CryptoRng for ReseedingRng +where R: BlockRngCore + SeedableRng + CryptoRng, + Rsdr: RngCore + CryptoRng {} + +#[derive(Debug)] +struct ReseedingCore { + phantom: PhantomData, // associated with R, but not a parameter; used below + inner: R, + reseeder: Rsdr, + threshold: i64, + bytes_until_reseed: i64, +} + +impl BlockRngCore for ReseedingCore +where R: BlockRngCore + SeedableRng, + Rsdr: RngCore +{ + type Results = >::Results; + + fn generate(&mut self, results: &mut Self::Results) { if self.bytes_until_reseed <= 0 { - self.reseed(); + // We get better performance by not calling only `auto_reseed` here + // and continuing with the rest of the function, but by directly + // returning from a non-inlined function. + return self.reseed_and_generate(results); } + let num_bytes = results.as_ref().len() * size_of::(); + self.bytes_until_reseed -= num_bytes as i64; + self.inner.generate(results); } +} - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - let res1 = self.rng.try_fill_bytes(dest); - self.bytes_until_reseed -= dest.len() as i64; - let res2 = if self.bytes_until_reseed <= 0 { - self.try_reseed() - } else { Ok(()) }; - - if let Err(e) = res1 { - // In the unlikely event the internal PRNG fails, we don't know - // whether this is resolvable; reseed immediately and return - // original error kind. - self.bytes_until_reseed = 0; - Err(e) +impl ReseedingCore +where R: BlockRngCore + SeedableRng, + Rsdr: RngCore +{ + /// Reseed the internal PRNG. + fn reseed(&mut self) -> Result<(), Error> { + R::from_rng(&mut self.reseeder).map(|result| { + self.bytes_until_reseed = self.threshold; + self.inner = result + }) + } + + #[inline(never)] + fn reseed_and_generate(&mut self, + results: &mut >::Results) + { + trace!("Reseeding RNG after {} generated bytes", + self.threshold - self.bytes_until_reseed); + let threshold = if let Err(e) = self.reseed() { + let delay = match e.kind { + ErrorKind::Transient => 0, + kind @ _ if kind.should_retry() => self.threshold >> 8, + _ => self.threshold, + }; + warn!("Reseeding RNG delayed reseeding by {} bytes due to \ + error from source: {}", delay, e); + delay } else { - res2 - } + self.threshold + }; + + let num_bytes = results.as_ref().len() * size_of::(); + self.bytes_until_reseed = threshold - num_bytes as i64; + self.inner.generate(results); } } +impl CryptoRng for ReseedingCore +where R: BlockRngCore + SeedableRng + CryptoRng, + Rsdr: RngCore + CryptoRng {} + #[cfg(test)] mod test { - use {Rng, SeedableRng, StdRng}; + use {Rng, SeedableRng}; + use prng::chacha::ChaChaCore; use mock::StepRng; use super::ReseedingRng; #[test] fn test_reseeding() { let mut zero = StepRng::new(0, 0); - let rng = StdRng::from_rng(&mut zero).unwrap(); - let mut reseeding = ReseedingRng::new(rng, 32, zero); + let rng = ChaChaCore::from_rng(&mut zero).unwrap(); + let mut reseeding = ReseedingRng::new(rng, 32*4, zero); // Currently we only support for arrays up to length 32. // TODO: cannot generate seq via Rng::gen because it uses different alg - let mut buf = [0u8; 32]; + let mut buf = [0u32; 32]; // Needs to be a multiple of the RNGs result + // size to test exactly. reseeding.fill(&mut buf); let seq = buf; for _ in 0..10 { diff --git a/src/thread_rng.rs b/src/thread_rng.rs index 70c21d9d98f..387c74f0624 100644 --- a/src/thread_rng.rs +++ b/src/thread_rng.rs @@ -13,8 +13,9 @@ use std::cell::RefCell; use std::rc::Rc; -use {RngCore, CryptoRng, StdRng, SeedableRng, EntropyRng, Error}; -use {Distribution, Rng, Uniform}; +use {RngCore, CryptoRng, SeedableRng, EntropyRng}; +use prng::hc128::Hc128Core; +use {Distribution, Uniform, Rng, Error}; use reseeding::ReseedingRng; // Number of generated bytes after which to reseed `TreadRng`. @@ -31,13 +32,13 @@ const THREAD_RNG_RESEED_THRESHOLD: u64 = 32*1024*1024; // 32 MiB /// [`thread_rng`]: fn.thread_rng.html #[derive(Clone, Debug)] pub struct ThreadRng { - rng: Rc>>, + rng: Rc>>, } thread_local!( - static THREAD_RNG_KEY: Rc>> = { + static THREAD_RNG_KEY: Rc>> = { let mut entropy_source = EntropyRng::new(); - let r = StdRng::from_rng(&mut entropy_source).unwrap_or_else(|err| + let r = Hc128Core::from_rng(&mut entropy_source).unwrap_or_else(|err| panic!("could not initialize thread_rng: {}", err)); let rng = ReseedingRng::new(r, THREAD_RNG_RESEED_THRESHOLD, @@ -51,11 +52,12 @@ thread_local!( /// chaining style, e.g. `thread_rng().gen::()`, or cached locally, e.g. /// `let mut rng = thread_rng();`. /// -/// `ThreadRng` uses [`ReseedingRng`] wrapping a [`StdRng`] which is reseeded -/// after generating 32 MiB of random data. A single instance is cached per -/// thread and the returned `ThreadRng` is a reference to this instance — hence -/// `ThreadRng` is neither `Send` nor `Sync` but is safe to use within a single -/// thread. This RNG is seeded and reseeded via [`EntropyRng`] as required. +/// `ThreadRng` uses [`ReseedingRng`] wrapping the same PRNG as [`StdRng`], +/// which is reseeded after generating 32 MiB of random data. A single instance +/// is cached per thread and the returned `ThreadRng` is a reference to this +/// instance — hence `ThreadRng` is neither `Send` nor `Sync` but is safe to use +/// within a single thread. This RNG is seeded and reseeded via [`EntropyRng`] +/// as required. /// /// Note that the reseeding is done as an extra precaution against entropy /// leaks and is in theory unnecessary — to predict `thread_rng`'s output, an @@ -76,22 +78,20 @@ pub fn thread_rng() -> ThreadRng { } impl RngCore for ThreadRng { - #[inline] + #[inline(always)] fn next_u32(&mut self) -> u32 { self.rng.borrow_mut().next_u32() } - #[inline] + #[inline(always)] fn next_u64(&mut self) -> u64 { self.rng.borrow_mut().next_u64() } - #[inline] fn fill_bytes(&mut self, bytes: &mut [u8]) { self.rng.borrow_mut().fill_bytes(bytes) } - - #[inline] + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { self.rng.borrow_mut().try_fill_bytes(dest) }