Remove automatic (delayed) reseed-on-fork (#1379)

* benches/generators.rs: standardize thread_rng benchmarks
* Remove cfgs from examples
* Remove ReadRng
* Add ThreadRng::reseed and doc to use
* Remove fork protection from ReseedingRng; remove libc dep
* Enable ReseedingRng without std
* Move ReseedingRng up; remove module rand::rngs::adapter
This commit is contained in:
Diggory Hardy 2024-03-18 17:41:15 +00:00 committed by GitHub
parent b45e892084
commit 4cbbb340ad
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 53 additions and 311 deletions

View File

@ -11,6 +11,10 @@ You may also find the [Upgrade Guide](https://rust-random.github.io/book/update.
## [0.9.1] - unreleased
- Add the `Slice::num_choices` method to the Slice distribution (#1402)
### Generators
- `ReseedingRng::reseed` also resets the random data cache.
- Remove fork-protection from `ReseedingRng` and `ThreadRng`. Instead, it is recommended to call `ThreadRng::reseed` on fork.
## [0.9.0-alpha.0] - 2024-02-18
This is a pre-release. To depend on this version, use `rand = "=0.9.0-alpha.0"` to prevent automatic updates (which can be expected to include breaking changes).

View File

@ -21,7 +21,7 @@ include = ["src/", "LICENSE-*", "README.md", "CHANGELOG.md", "COPYRIGHT"]
# To build locally:
# RUSTDOCFLAGS="--cfg doc_cfg -Zunstable-options --generate-link-to-definition" cargo +nightly doc --all --all-features --no-deps --open
all-features = true
rustdoc-args = ["--cfg", "doc_cfg", "--generate-link-to-definition"]
rustdoc-args = ["--cfg", "doc_cfg", "-Zunstable-options", "--generate-link-to-definition"]
[package.metadata.playground]
features = ["small_rng", "serde1"]
@ -34,7 +34,7 @@ serde1 = ["serde", "rand_core/serde1"]
# Option (enabled by default): without "std" rand uses libcore; this option
# enables functionality expected to be available on a standard platform.
std = ["rand_core/std", "rand_chacha?/std", "alloc", "libc"]
std = ["rand_core/std", "rand_chacha?/std", "alloc"]
# Option: "alloc" enables support for Vec and Box when not using "std"
alloc = ["rand_core/alloc"]
@ -71,10 +71,6 @@ serde = { version = "1.0.103", features = ["derive"], optional = true }
rand_chacha = { path = "rand_chacha", version = "=0.9.0-alpha.0", default-features = false, optional = true }
zerocopy = { version = "=0.8.0-alpha.6", default-features = false, features = ["simd"] }
[target.'cfg(unix)'.dependencies]
# Used for fork protection (reseeding.rs)
libc = { version = "0.2.22", optional = true, default-features = false }
[dev-dependencies]
rand_pcg = { path = "rand_pcg", version = "=0.9.0-alpha.0" }
# Only to test serde1

View File

@ -23,9 +23,6 @@ are expected to provide the following:
For some RNGs, notably `OsRng`, `ThreadRng` and those wrapped by `ReseedingRng`,
we provide limited mitigations against side-channel attacks:
- After a process fork on Unix, there is an upper-bound on the number of bits
output by the RNG before the processes diverge, after which outputs from
each process's RNG are uncorrelated
- After the state (memory) of an RNG is leaked, there is an upper-bound on the
number of bits of output by the RNG before prediction of output by an
observer again becomes computationally-infeasible

View File

@ -18,7 +18,7 @@ use core::mem::size_of;
use test::{black_box, Bencher};
use rand::prelude::*;
use rand::rngs::adapter::ReseedingRng;
use rand::rngs::ReseedingRng;
use rand::rngs::{mock::StepRng, OsRng};
use rand_chacha::{ChaCha12Rng, ChaCha20Core, ChaCha20Rng, ChaCha8Rng};
use rand_pcg::{Pcg32, Pcg64, Pcg64Mcg, Pcg64Dxsm};
@ -52,6 +52,7 @@ gen_bytes!(gen_bytes_std, StdRng::from_entropy());
#[cfg(feature = "small_rng")]
gen_bytes!(gen_bytes_small, SmallRng::from_thread_rng());
gen_bytes!(gen_bytes_os, OsRng);
gen_bytes!(gen_bytes_thread, thread_rng());
macro_rules! gen_uint {
($fnn:ident, $ty:ty, $gen:expr) => {
@ -82,6 +83,7 @@ gen_uint!(gen_u32_std, u32, StdRng::from_entropy());
#[cfg(feature = "small_rng")]
gen_uint!(gen_u32_small, u32, SmallRng::from_thread_rng());
gen_uint!(gen_u32_os, u32, OsRng);
gen_uint!(gen_u32_thread, u32, thread_rng());
gen_uint!(gen_u64_step, u64, StepRng::new(0, 1));
gen_uint!(gen_u64_pcg32, u64, Pcg32::from_entropy());
@ -95,6 +97,7 @@ gen_uint!(gen_u64_std, u64, StdRng::from_entropy());
#[cfg(feature = "small_rng")]
gen_uint!(gen_u64_small, u64, SmallRng::from_thread_rng());
gen_uint!(gen_u64_os, u64, OsRng);
gen_uint!(gen_u64_thread, u64, thread_rng());
macro_rules! init_gen {
($fnn:ident, $gen:ident) => {
@ -141,24 +144,3 @@ reseeding_bytes!(reseeding_chacha20_32k, 32);
reseeding_bytes!(reseeding_chacha20_64k, 64);
reseeding_bytes!(reseeding_chacha20_256k, 256);
reseeding_bytes!(reseeding_chacha20_1M, 1024);
macro_rules! threadrng_uint {
($fnn:ident, $ty:ty) => {
#[bench]
fn $fnn(b: &mut Bencher) {
let mut rng = thread_rng();
b.iter(|| {
let mut accum: $ty = 0;
for _ in 0..RAND_BENCH_N {
accum = accum.wrapping_add(rng.gen::<$ty>());
}
accum
});
b.bytes = size_of::<$ty>() as u64 * RAND_BENCH_N;
}
};
}
threadrng_uint!(thread_rng_u32, u32);
threadrng_uint!(thread_rng_u64, u64);

View File

@ -23,9 +23,6 @@
//! We can use the above fact to estimate the value of π: pick many points in
//! the square at random, calculate the fraction that fall within the circle,
//! and multiply this fraction by 4.
#![cfg(all(feature = "std", feature = "std_rng"))]
use rand::distributions::{Distribution, Uniform};
fn main() {

View File

@ -26,8 +26,6 @@
//!
//! [Monty Hall Problem]: https://en.wikipedia.org/wiki/Monty_Hall_problem
#![cfg(all(feature = "std", feature = "std_rng"))]
use rand::distributions::{Distribution, Uniform};
use rand::Rng;

View File

@ -38,8 +38,6 @@
//! over BATCH_SIZE trials. Manually batching also turns out to be faster
//! for the nondeterministic version of this program as well.
#![cfg(all(feature = "std", feature = "std_rng"))]
use rand::distributions::{Distribution, Uniform};
use rand_chacha::{rand_core::SeedableRng, ChaCha8Rng};
use rayon::prelude::*;

View File

@ -1,16 +0,0 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Wrappers / adapters forming RNGs
mod read;
mod reseeding;
#[allow(deprecated)]
pub use self::read::{ReadError, ReadRng};
pub use self::reseeding::ReseedingRng;

View File

@ -1,150 +0,0 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A wrapper around any Read to treat it as an RNG.
#![allow(deprecated)]
use std::fmt;
use std::io::Read;
use rand_core::{impls, Error, RngCore};
/// An RNG that reads random bytes straight from any type supporting
/// [`std::io::Read`], for example files.
///
/// This will work best with an infinite reader, but that is not required.
///
/// This can be used with `/dev/urandom` on Unix but it is recommended to use
/// [`OsRng`] instead.
///
/// # Panics
///
/// `ReadRng` uses [`std::io::Read::read_exact`], which retries on interrupts.
/// All other errors from the underlying reader, including when it does not
/// have enough data, will only be reported through [`try_fill_bytes`].
/// The other [`RngCore`] methods will panic in case of an error.
///
/// [`OsRng`]: crate::rngs::OsRng
/// [`try_fill_bytes`]: RngCore::try_fill_bytes
#[derive(Debug)]
#[deprecated(since="0.8.4", note="removal due to lack of usage")]
pub struct ReadRng<R> {
reader: R,
}
impl<R: Read> ReadRng<R> {
/// Create a new `ReadRng` from a `Read`.
pub fn new(r: R) -> ReadRng<R> {
ReadRng { reader: r }
}
}
impl<R: Read> RngCore for ReadRng<R> {
fn next_u32(&mut self) -> u32 {
impls::next_u32_via_fill(self)
}
fn next_u64(&mut self) -> u64 {
impls::next_u64_via_fill(self)
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
self.try_fill_bytes(dest).unwrap_or_else(|err| {
panic!(
"reading random bytes from Read implementation failed; error: {}",
err
)
});
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
if dest.is_empty() {
return Ok(());
}
// Use `std::io::read_exact`, which retries on `ErrorKind::Interrupted`.
self.reader
.read_exact(dest)
.map_err(|e| Error::new(ReadError(e)))
}
}
/// `ReadRng` error type
#[derive(Debug)]
#[deprecated(since="0.8.4")]
pub struct ReadError(std::io::Error);
impl fmt::Display for ReadError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ReadError: {}", self.0)
}
}
impl std::error::Error for ReadError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
Some(&self.0)
}
}
#[cfg(test)]
mod test {
use std::println;
use super::ReadRng;
use crate::RngCore;
#[test]
fn test_reader_rng_u64() {
// transmute from the target to avoid endianness concerns.
#[rustfmt::skip]
let v = [0u8, 0, 0, 0, 0, 0, 0, 1,
0, 4, 0, 0, 3, 0, 0, 2,
5, 0, 0, 0, 0, 0, 0, 0];
let mut rng = ReadRng::new(&v[..]);
assert_eq!(rng.next_u64(), 1 << 56);
assert_eq!(rng.next_u64(), (2 << 56) + (3 << 32) + (4 << 8));
assert_eq!(rng.next_u64(), 5);
}
#[test]
fn test_reader_rng_u32() {
let v = [0u8, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0];
let mut rng = ReadRng::new(&v[..]);
assert_eq!(rng.next_u32(), 1 << 24);
assert_eq!(rng.next_u32(), 2 << 16);
assert_eq!(rng.next_u32(), 3);
}
#[test]
fn test_reader_rng_fill_bytes() {
let v = [1u8, 2, 3, 4, 5, 6, 7, 8];
let mut w = [0u8; 8];
let mut rng = ReadRng::new(&v[..]);
rng.fill_bytes(&mut w);
assert!(v == w);
}
#[test]
fn test_reader_rng_insufficient_bytes() {
let v = [1u8, 2, 3, 4, 5, 6, 7, 8];
let mut w = [0u8; 9];
let mut rng = ReadRng::new(&v[..]);
let result = rng.try_fill_bytes(&mut w);
assert!(result.is_err());
println!("Error: {}", result.unwrap_err());
}
}

View File

@ -96,8 +96,8 @@
//! [`rand_xoshiro`]: https://crates.io/crates/rand_xoshiro
//! [`rng` tag]: https://crates.io/keywords/rng
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg(feature = "std")] pub mod adapter;
mod reseeding;
pub use reseeding::ReseedingRng;
pub mod mock; // Public so we don't export `StepRng` directly, making it a bit
// more clear it is intended for testing.

View File

@ -22,10 +22,6 @@ use rand_core::{CryptoRng, Error, RngCore, SeedableRng};
///
/// - On a manual call to [`reseed()`].
/// - After `clone()`, the clone will be reseeded on first use.
/// - When a process is forked on UNIX, the RNGs in both the parent and child
/// processes will be reseeded just before the next call to
/// [`BlockRngCore::generate`], i.e. "soon". For ChaCha and Hc128, this is a
/// maximum of 63 and 15, respectively, `u32` values before reseeding.
/// - After the PRNG has generated a configurable number of random bytes.
///
/// # When should reseeding after a fixed number of generated bytes be used?
@ -43,12 +39,6 @@ use rand_core::{CryptoRng, Error, RngCore, SeedableRng};
/// Use [`ReseedingRng::new`] with a `threshold` of `0` to disable reseeding
/// after a fixed number of generated bytes.
///
/// # Limitations
///
/// It is recommended that a `ReseedingRng` (including `ThreadRng`) not be used
/// from a fork handler.
/// Use `OsRng` or `getrandom`, or defer your use of the RNG until later.
///
/// # Error handling
///
/// Although unlikely, reseeding the wrapped PRNG can fail. `ReseedingRng` will
@ -67,7 +57,7 @@ use rand_core::{CryptoRng, Error, RngCore, SeedableRng};
/// use rand_chacha::ChaCha20Core; // Internal part of ChaChaRng that
/// // implements BlockRngCore
/// use rand::rngs::OsRng;
/// use rand::rngs::adapter::ReseedingRng;
/// use rand::rngs::ReseedingRng;
///
/// let prng = ChaCha20Core::from_entropy();
/// let mut reseeding_rng = ReseedingRng::new(prng, 0, OsRng);
@ -102,8 +92,11 @@ where
ReseedingRng(BlockRng::new(ReseedingCore::new(rng, threshold, reseeder)))
}
/// Reseed the internal PRNG.
/// Immediately reseed the generator
///
/// This discards any remaining random data in the cache.
pub fn reseed(&mut self) -> Result<(), Error> {
self.0.reset();
self.0.core.reseed()
}
}
@ -158,7 +151,6 @@ struct ReseedingCore<R, Rsdr> {
reseeder: Rsdr,
threshold: i64,
bytes_until_reseed: i64,
fork_counter: usize,
}
impl<R, Rsdr> BlockRngCore for ReseedingCore<R, Rsdr>
@ -170,12 +162,11 @@ where
type Results = <R as BlockRngCore>::Results;
fn generate(&mut self, results: &mut Self::Results) {
let global_fork_counter = fork::get_fork_counter();
if self.bytes_until_reseed <= 0 || self.is_forked(global_fork_counter) {
if self.bytes_until_reseed <= 0 {
// We get better performance by not calling only `reseed` here
// and continuing with the rest of the function, but by directly
// returning from a non-inlined function.
return self.reseed_and_generate(results, global_fork_counter);
return self.reseed_and_generate(results);
}
let num_bytes = size_of_val(results.as_ref());
self.bytes_until_reseed -= num_bytes as i64;
@ -191,7 +182,6 @@ where
/// Create a new `ReseedingCore`.
fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self {
use ::core::i64::MAX;
fork::register_fork_handler();
// Because generating more values than `i64::MAX` takes centuries on
// current hardware, we just clamp to that value.
@ -210,7 +200,6 @@ where
reseeder,
threshold,
bytes_until_reseed: threshold,
fork_counter: 0,
}
}
@ -222,30 +211,9 @@ where
})
}
fn is_forked(&self, global_fork_counter: usize) -> bool {
// In theory, on 32-bit platforms, it is possible for
// `global_fork_counter` to wrap around after ~4e9 forks.
//
// This check will detect a fork in the normal case where
// `fork_counter < global_fork_counter`, and also when the difference
// between both is greater than `isize::MAX` (wrapped around).
//
// It will still fail to detect a fork if there have been more than
// `isize::MAX` forks, without any reseed in between. Seems unlikely
// enough.
(self.fork_counter.wrapping_sub(global_fork_counter) as isize) < 0
}
#[inline(never)]
fn reseed_and_generate(
&mut self, results: &mut <Self as BlockRngCore>::Results, global_fork_counter: usize,
) {
#![allow(clippy::if_same_then_else)] // false positive
if self.is_forked(global_fork_counter) {
info!("Fork detected, reseeding RNG");
} else {
trace!("Reseeding RNG (periodic reseed)");
}
fn reseed_and_generate(&mut self, results: &mut <Self as BlockRngCore>::Results) {
trace!("Reseeding RNG (periodic reseed)");
let num_bytes = size_of_val(results.as_ref());
@ -253,7 +221,6 @@ where
warn!("Reseeding RNG failed: {}", e);
let _ = e;
}
self.fork_counter = global_fork_counter;
self.bytes_until_reseed = self.threshold - num_bytes as i64;
self.inner.generate(results);
@ -271,7 +238,6 @@ where
reseeder: self.reseeder.clone(),
threshold: self.threshold,
bytes_until_reseed: 0, // reseed clone on first use
fork_counter: self.fork_counter,
}
}
}
@ -283,61 +249,6 @@ where
{
}
#[cfg(all(unix, not(target_os = "emscripten")))]
mod fork {
use core::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Once;
// Fork protection
//
// We implement fork protection on Unix using `pthread_atfork`.
// When the process is forked, we increment `RESEEDING_RNG_FORK_COUNTER`.
// Every `ReseedingRng` stores the last known value of the static in
// `fork_counter`. If the cached `fork_counter` is less than
// `RESEEDING_RNG_FORK_COUNTER`, it is time to reseed this RNG.
//
// If reseeding fails, we don't deal with this by setting a delay, but just
// don't update `fork_counter`, so a reseed is attempted as soon as
// possible.
static RESEEDING_RNG_FORK_COUNTER: AtomicUsize = AtomicUsize::new(0);
pub fn get_fork_counter() -> usize {
RESEEDING_RNG_FORK_COUNTER.load(Ordering::Relaxed)
}
extern "C" fn fork_handler() {
// Note: fetch_add is defined to wrap on overflow
// (which is what we want).
RESEEDING_RNG_FORK_COUNTER.fetch_add(1, Ordering::Relaxed);
}
pub fn register_fork_handler() {
static REGISTER: Once = Once::new();
REGISTER.call_once(|| {
// Bump the counter before and after forking (see #1169):
let ret = unsafe { libc::pthread_atfork(
Some(fork_handler),
Some(fork_handler),
Some(fork_handler),
) };
if ret != 0 {
panic!("libc::pthread_atfork failed with code {}", ret);
}
});
}
}
#[cfg(not(all(unix, not(target_os = "emscripten"))))]
mod fork {
pub fn get_fork_counter() -> usize {
0
}
pub fn register_fork_handler() {}
}
#[cfg(feature = "std_rng")]
#[cfg(test)]
mod test {

View File

@ -10,7 +10,7 @@
use crate::{CryptoRng, Error, RngCore, SeedableRng};
#[cfg(feature = "getrandom")]
#[cfg(any(test, feature = "getrandom"))]
pub(crate) use rand_chacha::ChaCha12Core as Core;
use rand_chacha::ChaCha12Rng as Rng;

View File

@ -14,7 +14,7 @@ use std::thread_local;
use std::fmt;
use super::std::Core;
use crate::rngs::adapter::ReseedingRng;
use crate::rngs::ReseedingRng;
use crate::rngs::OsRng;
use crate::{CryptoRng, Error, RngCore, SeedableRng};
@ -42,8 +42,6 @@ const THREAD_RNG_RESEED_THRESHOLD: u64 = 1024 * 64;
///
/// This type is a reference to a lazily-initialized thread-local generator.
/// An instance can be obtained via [`thread_rng`] or via `ThreadRng::default()`.
/// This handle is safe to use everywhere (including thread-local destructors),
/// though it is recommended not to use inside a fork handler.
/// The handle cannot be passed between threads (is not `Send` or `Sync`).
///
/// `ThreadRng` uses the same CSPRNG as [`StdRng`], ChaCha12. As with
@ -51,8 +49,23 @@ const THREAD_RNG_RESEED_THRESHOLD: u64 = 1024 * 64;
/// of security and performance.
///
/// `ThreadRng` is automatically seeded from [`OsRng`] with periodic reseeding
/// (every 64 kiB, as well as "soon" after a fork on Unix — see [`ReseedingRng`]
/// documentation for details).
/// (every 64 kiB — see [`ReseedingRng`] documentation for details).
///
/// `ThreadRng` is not automatically reseeded on fork. It is recommended to
/// explicitly call [`ThreadRng::reseed`] immediately after a fork, for example:
/// ```ignore
/// fn do_fork() {
/// let pid = unsafe { libc::fork() };
/// if pid == 0 {
/// // Reseed ThreadRng in child processes:
/// rand::thread_rng().reseed();
/// }
/// }
/// ```
///
/// Methods on `ThreadRng` are not reentrant-safe and thus should not be called
/// from an interrupt (e.g. a fork handler) unless it can be guaranteed that no
/// other method on the same `ThreadRng` is currently executing.
///
/// Security must be considered relative to a threat model and validation
/// requirements. `ThreadRng` attempts to meet basic security considerations
@ -61,7 +74,7 @@ const THREAD_RNG_RESEED_THRESHOLD: u64 = 1024 * 64;
/// leaking internal secrets e.g. via [`Debug`] implementation or serialization.
/// Memory is not zeroized on drop.
///
/// [`ReseedingRng`]: crate::rngs::adapter::ReseedingRng
/// [`ReseedingRng`]: crate::rngs::ReseedingRng
/// [`StdRng`]: crate::rngs::StdRng
#[cfg_attr(doc_cfg, doc(cfg(all(feature = "std", feature = "std_rng", feature = "getrandom"))))]
#[derive(Clone)]
@ -70,6 +83,18 @@ pub struct ThreadRng {
rng: Rc<UnsafeCell<ReseedingRng<Core, OsRng>>>,
}
impl ThreadRng {
/// Immediately reseed the generator
///
/// This discards any remaining random data in the cache.
pub fn reseed(&mut self) -> Result<(), Error> {
// SAFETY: We must make sure to stop using `rng` before anyone else
// creates another mutable reference
let rng = unsafe { &mut *self.rng.get() };
rng.reseed()
}
}
/// Debug implementation does not leak internal state
impl fmt::Debug for ThreadRng {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {