Avoid zero-sized allocs in ThinBox if T and H are both ZSTs.

This commit is contained in:
Thom Chiovoloni 2022-05-02 01:45:21 -07:00
parent 764b8615e9
commit fc109bb6c6
No known key found for this signature in database
GPG key ID: E2EFD4309E11C8A8
3 changed files with 277 additions and 18 deletions

View file

@ -138,7 +138,11 @@ fn data(&self) -> *mut u8 {
}
}
/// A pointer to type-erased data, guaranteed to have a header `H` before the pointed-to location.
/// A pointer to type-erased data, guaranteed to either be:
/// 1. `NonNull::dangling()`, in the case where both the pointee (`T`) and
/// metadata (`H`) are ZSTs.
/// 2. A pointer to a valid `T` that has a header `H` directly before the
/// pointed-to location.
struct WithHeader<H>(NonNull<u8>, PhantomData<H>);
impl<H> WithHeader<H> {
@ -156,16 +160,27 @@ fn new<T>(header: H, value: T) -> WithHeader<H> {
};
unsafe {
let ptr = alloc::alloc(layout);
// Note: It's UB to pass a layout with a zero size to `alloc::alloc`, so
// we use `layout.dangling()` for this case, which should have a valid
// alignment for both `T` and `H`.
let ptr = if layout.size() == 0 {
// Some paranoia checking, mostly so that the ThinBox tests are
// more able to catch issues.
debug_assert!(
value_offset == 0 && mem::size_of::<T>() == 0 && mem::size_of::<H>() == 0
);
layout.dangling()
} else {
let ptr = alloc::alloc(layout);
if ptr.is_null() {
alloc::handle_alloc_error(layout);
}
// Safety:
// - The size is at least `aligned_header_size`.
let ptr = ptr.add(value_offset) as *mut _;
if ptr.is_null() {
alloc::handle_alloc_error(layout);
}
// Safety:
// - The size is at least `aligned_header_size`.
let ptr = ptr.add(value_offset) as *mut _;
let ptr = NonNull::new_unchecked(ptr);
NonNull::new_unchecked(ptr)
};
let result = WithHeader(ptr, PhantomData);
ptr::write(result.header(), header);
@ -175,18 +190,28 @@ fn new<T>(header: H, value: T) -> WithHeader<H> {
}
}
// Safety:
// - Assumes that `value` can be dereferenced.
// Safety:
// - Assumes that either `value` can be dereferenced, or is the
// `NonNull::dangling()` we use when both `T` and `H` are ZSTs.
unsafe fn drop<T: ?Sized>(&self, value: *mut T) {
unsafe {
let value_layout = Layout::for_value_raw(value);
// SAFETY: Layout must have been computable if we're in drop
let (layout, value_offset) =
Self::alloc_layout(Layout::for_value_raw(value)).unwrap_unchecked();
let (layout, value_offset) = Self::alloc_layout(value_layout).unwrap_unchecked();
ptr::drop_in_place::<T>(value);
// We only drop the value because the Pointee trait requires that the metadata is copy
// aka trivially droppable
alloc::dealloc(self.0.as_ptr().sub(value_offset), layout);
// aka trivially droppable.
ptr::drop_in_place::<T>(value);
// Note: Don't deallocate if the layout size is zero, because the pointer
// didn't come from the allocator.
if layout.size() != 0 {
alloc::dealloc(self.0.as_ptr().sub(value_offset), layout);
} else {
debug_assert!(
value_offset == 0 && mem::size_of::<H>() == 0 && value_layout.size() == 0
);
}
}
}
@ -198,7 +223,9 @@ fn header(&self) -> *mut H {
// needed to align the header. Subtracting the header size from the aligned data pointer
// will always result in an aligned header pointer, it just may not point to the
// beginning of the allocation.
unsafe { self.0.as_ptr().sub(Self::header_size()) as *mut H }
let hp = unsafe { self.0.as_ptr().sub(Self::header_size()) as *mut H };
debug_assert!((hp.addr() & (core::mem::align_of::<H>() - 1)) == 0);
hp
}
fn value(&self) -> *mut u8 {

View file

@ -42,6 +42,9 @@
#![feature(panic_update_hook)]
#![feature(slice_flatten)]
#![feature(thin_box)]
#![feature(bench_black_box)]
#![feature(strict_provenance)]
#![feature(once_cell)]
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};

View file

@ -1,3 +1,4 @@
use core::fmt::Debug;
use core::mem::size_of;
use std::boxed::ThinBox;
@ -24,3 +25,231 @@ trait Tr {}
assert!(is_thin::<[i32]>());
assert!(is_thin::<i32>());
}
#[track_caller]
fn verify_aligned<T>(ptr: *const T) {
// Use `black_box` to attempt to obscure the fact that we're calling this
// function on pointers that come from box/references, which the compiler
// would otherwise realize is impossible (because it would mean we've
// already executed UB).
//
// That is, we'd *like* it to be possible for the asserts in this function
// to detect brokenness in the ThinBox impl.
//
// It would probably be better if we instead had these as debug_asserts
// inside `ThinBox`, prior to the point where we do the UB. Anyway, in
// practice these checks are mostly just smoke-detectors for an extremely
// broken `ThinBox` impl, since it's an extremely subtle piece of code.
let ptr = core::hint::black_box(ptr);
let align = core::mem::align_of::<T>();
assert!(
(ptr.addr() & (align - 1)) == 0 && !ptr.is_null(),
"misaligned ThinBox data; valid pointers to `{}` should be aligned to {align}: {ptr:p}",
core::any::type_name::<T>(),
);
}
#[track_caller]
fn check_thin_sized<T: Debug + PartialEq + Clone>(make: impl FnOnce() -> T) {
let value = make();
let boxed = ThinBox::new(value.clone());
let val = &*boxed;
verify_aligned(val as *const T);
assert_eq!(val, &value);
}
#[track_caller]
fn check_thin_dyn<T: Debug + PartialEq + Clone>(make: impl FnOnce() -> T) {
let value = make();
let wanted_debug = format!("{value:?}");
let boxed: ThinBox<dyn Debug> = ThinBox::new_unsize(value.clone());
let val = &*boxed;
// wide reference -> wide pointer -> thin pointer
verify_aligned(val as *const dyn Debug as *const T);
let got_debug = format!("{val:?}");
assert_eq!(wanted_debug, got_debug);
}
macro_rules! define_test {
(
@test_name: $testname:ident;
$(#[$m:meta])*
struct $Type:ident($inner:ty);
$($test_stmts:tt)*
) => {
#[test]
fn $testname() {
use core::sync::atomic::{AtomicIsize, Ordering};
// Define the type, and implement new/clone/drop in such a way that
// the number of live instances will be counted.
$(#[$m])*
#[derive(Debug, PartialEq)]
struct $Type {
_priv: $inner,
}
impl Clone for $Type {
fn clone(&self) -> Self {
verify_aligned(self);
Self::new(self._priv.clone())
}
}
impl Drop for $Type {
fn drop(&mut self) {
verify_aligned(self);
Self::modify_live(-1);
}
}
impl $Type {
fn new(i: $inner) -> Self {
Self::modify_live(1);
Self { _priv: i }
}
fn modify_live(n: isize) -> isize {
static COUNTER: AtomicIsize = AtomicIsize::new(0);
COUNTER.fetch_add(n, Ordering::Relaxed) + n
}
fn live_objects() -> isize {
Self::modify_live(0)
}
}
// Run the test statements
let _: () = { $($test_stmts)* };
// Check that we didn't leak anything, or call drop too many times.
assert_eq!(
$Type::live_objects(), 0,
"Wrong number of drops of {}, `initializations - drops` should be 0.",
stringify!($Type),
);
}
};
}
define_test! {
@test_name: align1zst;
struct Align1Zst(());
check_thin_sized(|| Align1Zst::new(()));
check_thin_dyn(|| Align1Zst::new(()));
}
define_test! {
@test_name: align1small;
struct Align1Small(u8);
check_thin_sized(|| Align1Small::new(50));
check_thin_dyn(|| Align1Small::new(50));
}
define_test! {
@test_name: align1_size_not_pow2;
struct Align64NotPow2Size([u8; 79]);
check_thin_sized(|| Align64NotPow2Size::new([100; 79]));
check_thin_dyn(|| Align64NotPow2Size::new([100; 79]));
}
define_test! {
@test_name: align1big;
struct Align1Big([u8; 256]);
check_thin_sized(|| Align1Big::new([5u8; 256]));
check_thin_dyn(|| Align1Big::new([5u8; 256]));
}
// Note: `#[repr(align(2))]` is worth testing because
// - can have pointers which are misaligned, unlike align(1)
// - is still expected to have an alignment less than the alignment of a vtable.
define_test! {
@test_name: align2zst;
#[repr(align(2))]
struct Align2Zst(());
check_thin_sized(|| Align2Zst::new(()));
check_thin_dyn(|| Align2Zst::new(()));
}
define_test! {
@test_name: align2small;
#[repr(align(2))]
struct Align2Small(u8);
check_thin_sized(|| Align2Small::new(60));
check_thin_dyn(|| Align2Small::new(60));
}
define_test! {
@test_name: align2full;
#[repr(align(2))]
struct Align2Full([u8; 2]);
check_thin_sized(|| Align2Full::new([3u8; 2]));
check_thin_dyn(|| Align2Full::new([3u8; 2]));
}
define_test! {
@test_name: align2_size_not_pow2;
#[repr(align(2))]
struct Align2NotPower2Size([u8; 6]);
check_thin_sized(|| Align2NotPower2Size::new([3; 6]));
check_thin_dyn(|| Align2NotPower2Size::new([3; 6]));
}
define_test! {
@test_name: align2big;
#[repr(align(2))]
struct Align2Big([u8; 256]);
check_thin_sized(|| Align2Big::new([5u8; 256]));
check_thin_dyn(|| Align2Big::new([5u8; 256]));
}
define_test! {
@test_name: align64zst;
#[repr(align(64))]
struct Align64Zst(());
check_thin_sized(|| Align64Zst::new(()));
check_thin_dyn(|| Align64Zst::new(()));
}
define_test! {
@test_name: align64small;
#[repr(align(64))]
struct Align64Small(u8);
check_thin_sized(|| Align64Small::new(50));
check_thin_dyn(|| Align64Small::new(50));
}
define_test! {
@test_name: align64med;
#[repr(align(64))]
struct Align64Med([u8; 64]);
check_thin_sized(|| Align64Med::new([10; 64]));
check_thin_dyn(|| Align64Med::new([10; 64]));
}
define_test! {
@test_name: align64_size_not_pow2;
#[repr(align(64))]
struct Align64NotPow2Size([u8; 192]);
check_thin_sized(|| Align64NotPow2Size::new([10; 192]));
check_thin_dyn(|| Align64NotPow2Size::new([10; 192]));
}
define_test! {
@test_name: align64big;
#[repr(align(64))]
struct Align64Big([u8; 256]);
check_thin_sized(|| Align64Big::new([10; 256]));
check_thin_dyn(|| Align64Big::new([10; 256]));
}