diff --git a/benches/buffer.rs b/benches/buffer.rs index 2899a3f7..0063a453 100644 --- a/benches/buffer.rs +++ b/benches/buffer.rs @@ -1,7 +1,10 @@ #![feature(test, maybe_uninit_uninit_array_transpose)] extern crate test; -use std::mem::MaybeUninit; +use std::{ + mem::{size_of, MaybeUninit}, + slice, +}; // Call getrandom on a zero-initialized stack buffer #[inline(always)] @@ -19,6 +22,53 @@ fn bench_fill_uninit() { test::black_box(buf); } +#[bench] +pub fn bench_u32(b: &mut test::Bencher) { + #[inline(never)] + fn inner() -> u32 { + getrandom::u32().unwrap() + } + b.bytes = 4; + b.iter(inner); +} +#[bench] +pub fn bench_u32_via_fill(b: &mut test::Bencher) { + #[inline(never)] + fn inner() -> u32 { + let mut res = MaybeUninit::::uninit(); + let dst: &mut [MaybeUninit] = + unsafe { slice::from_raw_parts_mut(res.as_mut_ptr().cast(), size_of::()) }; + getrandom::fill_uninit(dst).unwrap(); + unsafe { res.assume_init() } + } + b.bytes = 4; + b.iter(inner); +} + +#[bench] +pub fn bench_u64(b: &mut test::Bencher) { + #[inline(never)] + fn inner() -> u64 { + getrandom::u64().unwrap() + } + b.bytes = 8; + b.iter(inner); +} + +#[bench] +pub fn bench_u64_via_fill(b: &mut test::Bencher) { + #[inline(never)] + fn inner() -> u64 { + let mut res = MaybeUninit::::uninit(); + let dst: &mut [MaybeUninit] = + unsafe { slice::from_raw_parts_mut(res.as_mut_ptr().cast(), size_of::()) }; + getrandom::fill_uninit(dst).unwrap(); + unsafe { res.assume_init() } + } + b.bytes = 8; + b.iter(inner); +} + // We benchmark using #[inline(never)] "inner" functions for two reasons: // - Avoiding inlining reduces a source of variance when running benchmarks. // - It is _much_ easier to get the assembly or IR for the inner loop.