Remove some unsafe, update to zerocopy 0.8.0 (#1502)

This commit is contained in:
Joshua Liebow-Feeser 2024-10-07 01:29:52 -07:00 committed by GitHub
parent bc3341185e
commit d2eb51bc29
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 7 additions and 17 deletions

View File

@ -70,7 +70,7 @@ rand_core = { path = "rand_core", version = "=0.9.0-alpha.1", default-features =
log = { version = "0.4.4", optional = true }
serde = { version = "1.0.103", features = ["derive"], optional = true }
rand_chacha = { path = "rand_chacha", version = "=0.9.0-alpha.1", default-features = false, optional = true }
zerocopy = { version = "0.7.33", default-features = false, features = ["simd"] }
zerocopy = { version = "0.8.0", default-features = false, features = ["simd"] }
[dev-dependencies]
rand_pcg = { path = "rand_pcg", version = "=0.9.0-alpha.1" }

View File

@ -32,4 +32,4 @@ serde = ["dep:serde"] # enables serde for BlockRng wrapper
[dependencies]
serde = { version = "1", features = ["derive"], optional = true }
getrandom = { version = "0.2", optional = true }
zerocopy = { version = "0.7.33", default-features = false }
zerocopy = { version = "0.8.0", default-features = false }

View File

@ -19,7 +19,7 @@
use crate::RngCore;
use core::cmp::min;
use zerocopy::AsBytes;
use zerocopy::{Immutable, IntoBytes};
/// Implement `next_u64` via `next_u32`, little-endian order.
pub fn next_u64_via_u32<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
@ -53,7 +53,7 @@ pub fn fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8]) {
}
}
trait Observable: AsBytes + Copy {
trait Observable: IntoBytes + Immutable + Copy {
fn to_le(self) -> Self;
}
impl Observable for u32 {

View File

@ -12,8 +12,8 @@
use crate::distr::uniform::{SampleRange, SampleUniform};
use crate::distr::{self, Distribution, Standard};
use core::num::Wrapping;
use core::{mem, slice};
use rand_core::RngCore;
use zerocopy::IntoBytes;
/// User-level interface for RNGs
///
@ -374,12 +374,7 @@ macro_rules! impl_fill {
#[inline(never)] // in micro benchmarks, this improves performance
fn fill<R: Rng + ?Sized>(&mut self, rng: &mut R) {
if self.len() > 0 {
rng.fill_bytes(unsafe {
slice::from_raw_parts_mut(self.as_mut_ptr()
as *mut u8,
mem::size_of_val(self)
)
});
rng.fill_bytes(self.as_mut_bytes());
for x in self {
*x = x.to_le();
}
@ -391,12 +386,7 @@ macro_rules! impl_fill {
#[inline(never)]
fn fill<R: Rng + ?Sized>(&mut self, rng: &mut R) {
if self.len() > 0 {
rng.fill_bytes(unsafe {
slice::from_raw_parts_mut(self.as_mut_ptr()
as *mut u8,
self.len() * mem::size_of::<$t>()
)
});
rng.fill_bytes(self.as_mut_bytes());
for x in self {
*x = Wrapping(x.0.to_le());
}