2018-08-16 15:38:40 +01:00
|
|
|
// Copyright 2018 Developers of the Rand project.
|
2017-12-15 11:19:40 +00:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
2018-01-09 08:39:55 +02:00
|
|
|
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
|
2017-12-15 11:19:40 +00:00
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2018-01-25 17:34:28 +00:00
|
|
|
//! Helper functions for implementing `RngCore` functions.
|
2018-01-09 08:35:22 +02:00
|
|
|
//!
|
2017-12-15 11:19:40 +00:00
|
|
|
//! For cross-platform reproducibility, these functions all use Little Endian:
|
|
|
|
//! least-significant part first. For example, `next_u64_via_u32` takes `u32`
|
|
|
|
//! values `x, y`, then outputs `(y << 32) | x`. To implement `next_u32`
|
|
|
|
//! from `next_u64` in little-endian order, one should use `next_u64() as u32`.
|
2018-01-09 08:35:22 +02:00
|
|
|
//!
|
2017-12-15 11:19:40 +00:00
|
|
|
//! Byte-swapping (like the std `to_le` functions) is only needed to convert
|
|
|
|
//! to/from byte sequences, and since its purpose is reproducibility,
|
|
|
|
//! non-reproducible sources (e.g. `OsRng`) need not bother with it.
|
|
|
|
|
2020-01-01 18:01:48 +00:00
|
|
|
use crate::RngCore;
|
2017-12-15 11:19:40 +00:00
|
|
|
use core::cmp::min;
|
2024-05-07 04:38:30 -07:00
|
|
|
use zerocopy::AsBytes;
|
2017-12-15 11:19:40 +00:00
|
|
|
|
|
|
|
/// Implement `next_u64` via `next_u32`, little-endian order.
|
2018-01-25 17:34:28 +00:00
|
|
|
pub fn next_u64_via_u32<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
|
2017-12-15 11:19:40 +00:00
|
|
|
// Use LE; we explicitly generate one value before the next.
|
2018-03-27 14:54:05 +02:00
|
|
|
let x = u64::from(rng.next_u32());
|
|
|
|
let y = u64::from(rng.next_u32());
|
2017-12-15 11:19:40 +00:00
|
|
|
(y << 32) | x
|
|
|
|
}
|
|
|
|
|
2018-04-13 13:57:22 +02:00
|
|
|
/// Implement `fill_bytes` via `next_u64` and `next_u32`, little-endian order.
|
|
|
|
///
|
|
|
|
/// The fastest way to fill a slice is usually to work as long as possible with
|
|
|
|
/// integers. That is why this method mostly uses `next_u64`, and only when
|
|
|
|
/// there are 4 or less bytes remaining at the end of the slice it uses
|
|
|
|
/// `next_u32` once.
|
|
|
|
pub fn fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8]) {
|
|
|
|
let mut left = dest;
|
|
|
|
while left.len() >= 8 {
|
2020-01-02 11:04:23 +00:00
|
|
|
let (l, r) = { left }.split_at_mut(8);
|
2018-04-13 13:57:22 +02:00
|
|
|
left = r;
|
2019-07-09 14:12:35 +02:00
|
|
|
let chunk: [u8; 8] = rng.next_u64().to_le_bytes();
|
2018-04-13 13:57:22 +02:00
|
|
|
l.copy_from_slice(&chunk);
|
|
|
|
}
|
|
|
|
let n = left.len();
|
|
|
|
if n > 4 {
|
2019-07-09 14:12:35 +02:00
|
|
|
let chunk: [u8; 8] = rng.next_u64().to_le_bytes();
|
2018-04-13 13:57:22 +02:00
|
|
|
left.copy_from_slice(&chunk[..n]);
|
|
|
|
} else if n > 0 {
|
2019-07-09 14:12:35 +02:00
|
|
|
let chunk: [u8; 4] = rng.next_u32().to_le_bytes();
|
2018-04-13 13:57:22 +02:00
|
|
|
left.copy_from_slice(&chunk[..n]);
|
|
|
|
}
|
2017-12-15 11:19:40 +00:00
|
|
|
}
|
|
|
|
|
2024-05-07 04:38:30 -07:00
|
|
|
trait Observable: AsBytes + Copy {
|
2022-12-07 09:47:45 +00:00
|
|
|
fn to_le(self) -> Self;
|
2021-09-13 09:30:14 +01:00
|
|
|
}
|
2021-09-15 09:27:11 +01:00
|
|
|
impl Observable for u32 {
|
2022-12-07 09:47:45 +00:00
|
|
|
fn to_le(self) -> Self {
|
|
|
|
self.to_le()
|
2021-09-13 09:30:14 +01:00
|
|
|
}
|
|
|
|
}
|
2021-09-15 09:27:11 +01:00
|
|
|
impl Observable for u64 {
|
2022-12-07 09:47:45 +00:00
|
|
|
fn to_le(self) -> Self {
|
|
|
|
self.to_le()
|
2021-09-13 09:30:14 +01:00
|
|
|
}
|
|
|
|
}
|
2020-08-01 20:32:49 +02:00
|
|
|
|
2022-12-07 09:47:45 +00:00
|
|
|
/// Fill dest from src
|
|
|
|
///
|
|
|
|
/// Returns `(n, byte_len)`. `src[..n]` is consumed (and possibly mutated),
|
|
|
|
/// `dest[..byte_len]` is filled. `src[n..]` and `dest[byte_len..]` are left
|
|
|
|
/// unaltered.
|
|
|
|
fn fill_via_chunks<T: Observable>(src: &mut [T], dest: &mut [u8]) -> (usize, usize) {
|
2021-09-13 09:30:14 +01:00
|
|
|
let size = core::mem::size_of::<T>();
|
2024-05-08 16:10:32 +03:00
|
|
|
let byte_len = min(core::mem::size_of_val(src), dest.len());
|
2021-09-14 08:11:53 +01:00
|
|
|
let num_chunks = (byte_len + size - 1) / size;
|
2021-09-13 09:30:14 +01:00
|
|
|
|
2022-12-07 09:47:45 +00:00
|
|
|
// Byte-swap for portability of results. This must happen before copying
|
|
|
|
// since the size of dest is not guaranteed to be a multiple of T or to be
|
|
|
|
// sufficiently aligned.
|
|
|
|
if cfg!(target_endian = "big") {
|
|
|
|
for x in &mut src[..num_chunks] {
|
|
|
|
*x = x.to_le();
|
2021-09-13 09:30:14 +01:00
|
|
|
}
|
|
|
|
}
|
2020-08-12 02:06:28 +02:00
|
|
|
|
2023-10-29 15:23:48 -04:00
|
|
|
dest[..byte_len].copy_from_slice(&<[T]>::as_bytes(&src[..num_chunks])[..byte_len]);
|
2022-12-07 09:47:45 +00:00
|
|
|
|
2021-09-14 08:11:53 +01:00
|
|
|
(num_chunks, byte_len)
|
2017-12-15 11:19:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Implement `fill_bytes` by reading chunks from the output buffer of a block
|
|
|
|
/// based RNG.
|
|
|
|
///
|
|
|
|
/// The return values are `(consumed_u32, filled_u8)`.
|
|
|
|
///
|
2022-12-07 09:47:45 +00:00
|
|
|
/// On big-endian systems, endianness of `src[..consumed_u32]` values is
|
|
|
|
/// swapped. No other adjustments to `src` are made.
|
|
|
|
///
|
2017-12-15 11:19:40 +00:00
|
|
|
/// `filled_u8` is the number of filled bytes in `dest`, which may be less than
|
|
|
|
/// the length of `dest`.
|
|
|
|
/// `consumed_u32` is the number of words consumed from `src`, which is the same
|
|
|
|
/// as `filled_u8 / 4` rounded up.
|
|
|
|
///
|
|
|
|
/// # Example
|
|
|
|
/// (from `IsaacRng`)
|
|
|
|
///
|
2018-05-11 08:54:14 +02:00
|
|
|
/// ```ignore
|
2017-12-15 11:19:40 +00:00
|
|
|
/// fn fill_bytes(&mut self, dest: &mut [u8]) {
|
|
|
|
/// let mut read_len = 0;
|
|
|
|
/// while read_len < dest.len() {
|
|
|
|
/// if self.index >= self.rsl.len() {
|
|
|
|
/// self.isaac();
|
|
|
|
/// }
|
|
|
|
///
|
|
|
|
/// let (consumed_u32, filled_u8) =
|
|
|
|
/// impls::fill_via_u32_chunks(&mut self.rsl[self.index..],
|
|
|
|
/// &mut dest[read_len..]);
|
|
|
|
///
|
|
|
|
/// self.index += consumed_u32;
|
|
|
|
/// read_len += filled_u8;
|
|
|
|
/// }
|
|
|
|
/// }
|
|
|
|
/// ```
|
2022-12-07 09:47:45 +00:00
|
|
|
pub fn fill_via_u32_chunks(src: &mut [u32], dest: &mut [u8]) -> (usize, usize) {
|
2021-09-13 09:30:14 +01:00
|
|
|
fill_via_chunks(src, dest)
|
2017-12-15 11:19:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Implement `fill_bytes` by reading chunks from the output buffer of a block
|
|
|
|
/// based RNG.
|
|
|
|
///
|
|
|
|
/// The return values are `(consumed_u64, filled_u8)`.
|
2022-12-07 09:47:45 +00:00
|
|
|
///
|
|
|
|
/// On big-endian systems, endianness of `src[..consumed_u64]` values is
|
|
|
|
/// swapped. No other adjustments to `src` are made.
|
|
|
|
///
|
2017-12-15 11:19:40 +00:00
|
|
|
/// `filled_u8` is the number of filled bytes in `dest`, which may be less than
|
|
|
|
/// the length of `dest`.
|
|
|
|
/// `consumed_u64` is the number of words consumed from `src`, which is the same
|
|
|
|
/// as `filled_u8 / 8` rounded up.
|
|
|
|
///
|
|
|
|
/// See `fill_via_u32_chunks` for an example.
|
2022-12-07 09:47:45 +00:00
|
|
|
pub fn fill_via_u64_chunks(src: &mut [u64], dest: &mut [u8]) -> (usize, usize) {
|
2021-09-13 09:30:14 +01:00
|
|
|
fill_via_chunks(src, dest)
|
2017-12-15 11:19:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Implement `next_u32` via `fill_bytes`, little-endian order.
|
2018-01-25 17:34:28 +00:00
|
|
|
pub fn next_u32_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u32 {
|
2020-08-01 20:32:49 +02:00
|
|
|
let mut buf = [0; 4];
|
|
|
|
rng.fill_bytes(&mut buf);
|
2020-10-19 10:32:21 +01:00
|
|
|
u32::from_le_bytes(buf)
|
2017-12-15 11:19:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Implement `next_u64` via `fill_bytes`, little-endian order.
|
2018-01-25 17:34:28 +00:00
|
|
|
pub fn next_u64_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
|
2020-08-01 20:32:49 +02:00
|
|
|
let mut buf = [0; 8];
|
|
|
|
rng.fill_bytes(&mut buf);
|
2020-10-19 10:32:21 +01:00
|
|
|
u64::from_le_bytes(buf)
|
2017-12-15 11:19:40 +00:00
|
|
|
}
|
|
|
|
|
2024-05-08 16:10:32 +03:00
|
|
|
/// Implement [`TryRngCore`] for a type implementing [`RngCore`].
|
|
|
|
///
|
|
|
|
/// Ideally, `rand_core` would define blanket impls for this, but they conflict with blanket impls
|
|
|
|
/// for `&mut R` and `Box<R>`, so until specialziation is stabilized, implementer crates
|
|
|
|
/// have to implement `TryRngCore` directly.
|
|
|
|
#[macro_export]
|
|
|
|
macro_rules! impl_try_rng_from_rng_core {
|
|
|
|
($t:ty) => {
|
|
|
|
impl $crate::TryRngCore for $t {
|
|
|
|
type Error = core::convert::Infallible;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn try_next_u32(&mut self) -> Result<u32, Self::Error> {
|
|
|
|
Ok($crate::RngCore::next_u32(self))
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn try_next_u64(&mut self) -> Result<u64, Self::Error> {
|
|
|
|
Ok($crate::RngCore::next_u64(self))
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn try_fill_bytes(&mut self, dst: &mut [u8]) -> Result<(), Self::Error> {
|
|
|
|
$crate::RngCore::fill_bytes(self, dst);
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Implement [`TryCryptoRng`] and [`TryRngCore`] for a type implementing [`CryptoRng`].
|
|
|
|
///
|
|
|
|
/// Ideally, `rand_core` would define blanket impls for this, but they conflict with blanket impls
|
|
|
|
/// for `&mut R` and `Box<R>`, so until specialziation is stabilized, implementer crates
|
|
|
|
/// have to implement `TryRngCore` and `TryCryptoRng` directly.
|
|
|
|
#[macro_export]
|
|
|
|
macro_rules! impl_try_crypto_rng_from_crypto_rng {
|
|
|
|
($t:ty) => {
|
|
|
|
$crate::impl_try_rng_from_rng_core!($t);
|
2024-05-09 09:50:08 +03:00
|
|
|
|
2024-05-08 16:10:32 +03:00
|
|
|
impl $crate::TryCryptoRng for $t {}
|
|
|
|
|
|
|
|
/// Check at compile time that `$t` implements `CryptoRng`
|
|
|
|
const _: () = {
|
|
|
|
const fn check_crypto_rng_impl<T: $crate::CryptoRng>() {}
|
|
|
|
check_crypto_rng_impl::<$t>();
|
|
|
|
};
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2020-08-02 01:13:22 +02:00
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_fill_via_u32_chunks() {
|
2022-12-07 09:47:45 +00:00
|
|
|
let src_orig = [1, 2, 3];
|
|
|
|
|
|
|
|
let mut src = src_orig;
|
2020-08-02 01:13:22 +02:00
|
|
|
let mut dst = [0u8; 11];
|
2022-12-07 09:47:45 +00:00
|
|
|
assert_eq!(fill_via_u32_chunks(&mut src, &mut dst), (3, 11));
|
2020-08-02 01:13:22 +02:00
|
|
|
assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0]);
|
|
|
|
|
2022-12-07 09:47:45 +00:00
|
|
|
let mut src = src_orig;
|
2020-08-02 01:13:22 +02:00
|
|
|
let mut dst = [0u8; 13];
|
2022-12-07 09:47:45 +00:00
|
|
|
assert_eq!(fill_via_u32_chunks(&mut src, &mut dst), (3, 12));
|
2020-08-02 01:13:22 +02:00
|
|
|
assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0]);
|
|
|
|
|
2022-12-07 09:47:45 +00:00
|
|
|
let mut src = src_orig;
|
2020-08-02 01:13:22 +02:00
|
|
|
let mut dst = [0u8; 5];
|
2022-12-07 09:47:45 +00:00
|
|
|
assert_eq!(fill_via_u32_chunks(&mut src, &mut dst), (2, 5));
|
2020-08-02 01:13:22 +02:00
|
|
|
assert_eq!(dst, [1, 0, 0, 0, 2]);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_fill_via_u64_chunks() {
|
2022-12-07 09:47:45 +00:00
|
|
|
let src_orig = [1, 2];
|
|
|
|
|
|
|
|
let mut src = src_orig;
|
2020-08-02 01:13:22 +02:00
|
|
|
let mut dst = [0u8; 11];
|
2022-12-07 09:47:45 +00:00
|
|
|
assert_eq!(fill_via_u64_chunks(&mut src, &mut dst), (2, 11));
|
2020-08-02 01:13:22 +02:00
|
|
|
assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0]);
|
|
|
|
|
2022-12-07 09:47:45 +00:00
|
|
|
let mut src = src_orig;
|
2020-08-02 01:13:22 +02:00
|
|
|
let mut dst = [0u8; 17];
|
2022-12-07 09:47:45 +00:00
|
|
|
assert_eq!(fill_via_u64_chunks(&mut src, &mut dst), (2, 16));
|
2020-08-02 01:13:22 +02:00
|
|
|
assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0]);
|
|
|
|
|
2022-12-07 09:47:45 +00:00
|
|
|
let mut src = src_orig;
|
2020-08-02 01:13:22 +02:00
|
|
|
let mut dst = [0u8; 5];
|
2022-12-07 09:47:45 +00:00
|
|
|
assert_eq!(fill_via_u64_chunks(&mut src, &mut dst), (1, 5));
|
2020-08-02 01:13:22 +02:00
|
|
|
assert_eq!(dst, [1, 0, 0, 0, 0]);
|
|
|
|
}
|
|
|
|
}
|