From 066fafe2062b17fac343219e0067edbc14b774ae Mon Sep 17 00:00:00 2001 From: "Felix S. Klock II" Date: Tue, 23 May 2017 14:47:09 +0200 Subject: [PATCH 01/10] Add API for `Alloc` trait. Includes `alloc_zeroed` method that `RawVec` has come to depend on. Exposed private `Layout::from_size_align` ctor to be `pub`, and added explicit conditions for when it will panic (namely, when `align` is not power-of-two, or if rounding up `size` to a multiple of `align` overflows). Normalized all `Layout` construction to go through `Layout::from_size_align`. Addressed review feedback regarding `struct Layout` and zero-sized layouts. Restrict specification for `dealloc`, adding additional constraint that the given alignment has to match that used to allocate the block. (This is a maximally conservative constraint on the alignment. An open question to resolve (before stabilization) is whether we can return to a looser constraint such as the one previously specified.) Split `fn realloc_in_place` into separate `fn grow_in_place` and `fn shrink_in_place` methods, which have default impls that check against usable_size for reuse. Make `realloc` default impl try `grow_in_place` or `shrink_in_place` as appropriate before fallback on alloc+copy+dealloc. Drive-by: When reviewing calls to `padding_needed_for`, discovered what I think was an over-conservative choice for its argument alignment. Namely, in `fn extend`, we automatically realign the whole resulting layout to satisfy both old (self) and new alignments. When the old alignment exceeds the new, this means we would insert unnecessary padding. So I changed the code to pass in `next.align` instead of `new_align` to `padding_needed_for`. Replaced ref to `realloc_in_place` with `grow_in_place`/`shrink_in_place`. Revised docs replacing my idiosyncratic style of `fn foo` with just `foo` when referring to the function or method `foo`. (Alpha-renamed `Allocator` to `Alloc`.) Post-rebased, added `Debug` derive for `allocator::Excess` to satisfy `missing_debug_implementations`. --- src/liballoc/allocator.rs | 986 ++++++++++++++++++++++++++++++++++++++ src/liballoc/lib.rs | 4 + 2 files changed, 990 insertions(+) create mode 100644 src/liballoc/allocator.rs diff --git a/src/liballoc/allocator.rs b/src/liballoc/allocator.rs new file mode 100644 index 00000000000..89324cf86f6 --- /dev/null +++ b/src/liballoc/allocator.rs @@ -0,0 +1,986 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![unstable(feature = "allocator_api", + reason = "the precise API and guarantees it provides may be tweaked \ + slightly, especially to possibly take into account the \ + types being stored to make room for a future \ + tracing garbage collector", + issue = "27700")] + +use core::cmp; +use core::mem; +use core::usize; +use core::ptr::{self, Unique}; + +/// Represents the combination of a starting address and +/// a total capacity of the returned block. +#[derive(Debug)] +pub struct Excess(pub *mut u8, pub usize); + +fn size_align() -> (usize, usize) { + (mem::size_of::(), mem::align_of::()) +} + +/// Layout of a block of memory. +/// +/// An instance of `Layout` describes a particular layout of memory. +/// You build a `Layout` up as an input to give to an allocator. +/// +/// All layouts have an associated non-negative size and a +/// power-of-two alignment. +/// +/// (Note however that layouts are *not* required to have positive +/// size, even though many allocators require that all memory +/// requeusts have positive size. A caller to the `Alloc::alloc` +/// method must either ensure that conditions like this are met, or +/// use specific allocators with looser requirements.) +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Layout { + // size of the requested block of memory, measured in bytes. + size: usize, + + // alignment of the requested block of memory, measured in bytes. + // we ensure that this is always a power-of-two, because API's + // like `posix_memalign` require it and it is a reasonable + // constraint to impose on Layout constructors. + // + // (However, we do not analogously require `align >= sizeof(void*)`, + // even though that is *also* a requirement of `posix_memalign`.) + align: usize, +} + + +// FIXME: audit default implementations for overflow errors, +// (potentially switching to overflowing_add and +// overflowing_mul as necessary). + +impl Layout { + /// Constructs a `Layout` from a given `size` and `align`. + /// + /// # Panics + /// + /// Panics if any of the following conditions are not met: + /// + /// * `align` must be a power of two, + /// + /// * `size`, when rounded up to the nearest multiple of `align`, + /// must not overflow (i.e. the rounded value must be less than + /// `usize::MAX`). + pub fn from_size_align(size: usize, align: usize) -> Layout { + assert!(align.is_power_of_two()); // (this implies align != 0.) + + // Rounded up size is: + // size_rounded_up = (size + align - 1) & !(align - 1); + // + // We know from above that align != 0. If adding (align - 1) + // does not overflow, then rounding up will be fine. + // + // Conversely, &-masking with !(align - 1) will subtract off + // only low-order-bits. Thus if overflow occurs with the sum, + // the &-mask cannot subtract enough to undo that overflow. + // + // Above implies that checking for summation overflow is both + // necessary and sufficient. + assert!(size <= usize::MAX - (align - 1)); + + Layout { size: size, align: align } + } + + /// The minimum size in bytes for a memory block of this layout. + pub fn size(&self) -> usize { self.size } + + /// The minimum byte alignment for a memory block of this layout. + pub fn align(&self) -> usize { self.align } + + /// Constructs a `Layout` suitable for holding a value of type `T`. + pub fn new() -> Self { + let (size, align) = size_align::(); + Layout::from_size_align(size, align) + } + + /// Produces layout describing a record that could be used to + /// allocate backing structure for `T` (which could be a trait + /// or other unsized type like a slice). + pub fn for_value(t: &T) -> Self { + let (size, align) = (mem::size_of_val(t), mem::align_of_val(t)); + Layout::from_size_align(size, align) + } + + /// Creates a layout describing the record that can hold a value + /// of the same layout as `self`, but that also is aligned to + /// alignment `align` (measured in bytes). + /// + /// If `self` already meets the prescribed alignment, then returns + /// `self`. + /// + /// Note that this method does not add any padding to the overall + /// size, regardless of whether the returned layout has a different + /// alignment. In other words, if `K` has size 16, `K.align_to(32)` + /// will *still* have size 16. + /// + /// # Panics + /// + /// Panics if `align` is not a power of two. + pub fn align_to(&self, align: usize) -> Self { + assert!(align.is_power_of_two()); + Layout::from_size_align(self.size, cmp::max(self.align, align)) + } + + /// Returns the amount of padding we must insert after `self` + /// to ensure that the following address will satisfy `align` + /// (measured in bytes). + /// + /// E.g. if `self.size` is 9, then `self.padding_needed_for(4)` + /// returns 3, because that is the minimum number of bytes of + /// padding required to get a 4-aligned address (assuming that the + /// corresponding memory block starts at a 4-aligned address). + /// + /// The return value of this function has no meaning if `align` is + /// not a power-of-two. + /// + /// Note that the utility of the returned value requires `align` + /// to be less than or equal to the alignment of the starting + /// address for the whole allocated block of memory. One way to + /// satisfy this constraint is to ensure `align <= self.align`. + pub fn padding_needed_for(&self, align: usize) -> usize { + let len = self.size(); + + // Rounded up value is: + // len_rounded_up = (len + align - 1) & !(align - 1); + // and then we return the padding difference: `len_rounded_up - len`. + // + // We use modular arithmetic throughout: + // + // 1. align is guaranteed to be > 0, so align - 1 is always + // valid. + // + // 2. `len + align - 1` can overflow by at most `align - 1`, + // so the &-mask wth `!(align - 1)` will ensure that in the + // case of overflow, `len_rounded_up` will itself be 0. + // Thus the returned padding, when added to `len`, yields 0, + // which trivially satisfies the alignment `align`. + // + // (Of course, attempts to allocate blocks of memory whose + // size and padding overflow in the above manner should cause + // the allocator to yield an error anyway.) + + let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1); + return len_rounded_up.wrapping_sub(len); + } + + /// Creates a layout describing the record for `n` instances of + /// `self`, with a suitable amount of padding between each to + /// ensure that each instance is given its requested size and + /// alignment. On success, returns `(k, offs)` where `k` is the + /// layout of the array and `offs` is the distance between the start + /// of each element in the array. + /// + /// On arithmetic overflow, returns `None`. + pub fn repeat(&self, n: usize) -> Option<(Self, usize)> { + let padded_size = match self.size.checked_add(self.padding_needed_for(self.align)) { + None => return None, + Some(padded_size) => padded_size, + }; + let alloc_size = match padded_size.checked_mul(n) { + None => return None, + Some(alloc_size) => alloc_size, + }; + Some((Layout::from_size_align(alloc_size, self.align), padded_size)) + } + + /// Creates a layout describing the record for `self` followed by + /// `next`, including any necessary padding to ensure that `next` + /// will be properly aligned. Note that the result layout will + /// satisfy the alignment properties of both `self` and `next`. + /// + /// Returns `Some((k, offset))`, where `k` is layout of the concatenated + /// record and `offset` is the relative location, in bytes, of the + /// start of the `next` embedded witnin the concatenated record + /// (assuming that the record itself starts at offset 0). + /// + /// On arithmetic overflow, returns `None`. + pub fn extend(&self, next: Self) -> Option<(Self, usize)> { + let new_align = cmp::max(self.align, next.align); + let realigned = Layout::from_size_align(self.size, new_align); + let pad = realigned.padding_needed_for(next.align); + let offset = match self.size.checked_add(pad) { + None => return None, + Some(offset) => offset, + }; + let new_size = match offset.checked_add(next.size) { + None => return None, + Some(new_size) => new_size, + }; + Some((Layout::from_size_align(new_size, new_align), offset)) + } + + /// Creates a layout describing the record for `n` instances of + /// `self`, with no padding between each instance. + /// + /// Note that, unlike `repeat`, `repeat_packed` does not guarantee + /// that the repeated instances of `self` will be properly + /// aligned, even if a given instance of `self` is properly + /// aligned. In other words, if the layout returned by + /// `repeat_packed` is used to allocate an array, it is not + /// guaranteed that all elements in the array will be properly + /// aligned. + /// + /// On arithmetic overflow, returns `None`. + pub fn repeat_packed(&self, n: usize) -> Option { + let size = match self.size().checked_mul(n) { + None => return None, + Some(scaled) => scaled, + }; + Some(Layout::from_size_align(size, self.align)) + } + + /// Creates a layout describing the record for `self` followed by + /// `next` with no additional padding between the two. Since no + /// padding is inserted, the alignment of `next` is irrelevant, + /// and is not incoporated *at all* into the resulting layout. + /// + /// Returns `(k, offset)`, where `k` is layout of the concatenated + /// record and `offset` is the relative location, in bytes, of the + /// start of the `next` embedded witnin the concatenated record + /// (assuming that the record itself starts at offset 0). + /// + /// (The `offset` is always the same as `self.size()`; we use this + /// signature out of convenience in matching the signature of + /// `extend`.) + /// + /// On arithmetic overflow, returns `None`. + pub fn extend_packed(&self, next: Self) -> Option<(Self, usize)> { + let new_size = match self.size().checked_add(next.size()) { + None => return None, + Some(new_size) => new_size, + }; + Some((Layout::from_size_align(new_size, self.align), self.size())) + } + + /// Creates a layout describing the record for a `[T; n]`. + /// + /// On arithmetic overflow, returns `None`. + pub fn array(n: usize) -> Option { + Layout::new::() + .repeat(n) + .map(|(k, offs)| { + debug_assert!(offs == mem::size_of::()); + k + }) + } +} + +/// The `AllocErr` error specifies whether an allocation failure is +/// specifically due to resource exhaustion or if it is due to +/// something wrong when combining the given input arguments with this +/// allocator. +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum AllocErr { + /// Error due to hitting some resource limit or otherwise running + /// out of memory. This condition strongly implies that *some* + /// series of deallocations would allow a subsequent reissuing of + /// the original allocation request to succeed. + Exhausted { request: Layout }, + + /// Error due to allocator being fundamentally incapable of + /// satisfying the original request. This condition implies that + /// such an allocation request will never succeed on the given + /// allocator, regardless of environment, memory pressure, or + /// other contextual conditions. + /// + /// For example, an allocator that does not support requests for + /// large memory blocks might return this error variant. + Unsupported { details: &'static str }, +} + +impl AllocErr { + pub fn invalid_input(details: &'static str) -> Self { + AllocErr::Unsupported { details: details } + } + pub fn is_memory_exhausted(&self) -> bool { + if let AllocErr::Exhausted { .. } = *self { true } else { false } + } + pub fn is_request_unsupported(&self) -> bool { + if let AllocErr::Unsupported { .. } = *self { true } else { false } + } +} + +/// The `CannotReallocInPlace` error is used when `grow_in_place` or +/// `shrink_in_place` were unable to reuse the given memory block for +/// a requested layout. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct CannotReallocInPlace; + +/// An implementation of `Alloc` can allocate, reallocate, and +/// deallocate arbitrary blocks of data described via `Layout`. +/// +/// Some of the methods require that a memory block be *currently +/// allocated* via an allocator. This means that: +/// +/// * the starting address for that memory block was previously +/// returned by a previous call to an allocation method (`alloc`, +/// `alloc_zeroed`, `alloc_excess`, `alloc_one`, `alloc_array`) or +/// reallocation method (`realloc`, `realloc_excess`, or +/// `realloc_array`), and +/// +/// * the memory block has not been subsequently deallocated, where +/// blocks are deallocated either by being passed to a deallocation +/// method (`dealloc`, `dealloc_one`, `dealloc_array`) or by being +/// passed to a reallocation method (see above) that returns `Ok`. +/// +/// A note regarding zero-sized types and zero-sized layouts: many +/// methods in the `Alloc` trait state that allocation requests +/// must be non-zero size, or else undefined behavior can result. +/// +/// * However, some higher-level allocation methods (`alloc_one`, +/// `alloc_array`) are well-defined on zero-sized types and can +/// optionally support them: it is left up to the implementor +/// whether to return `Err`, or to return `Ok` with some pointer. +/// +/// * If an `Alloc` implementation chooses to return `Ok` in this +/// case (i.e. the pointer denotes a zero-sized inaccessible block) +/// then that returned pointer must be considered "currently +/// allocated". On such an allocator, *all* methods that take +/// currently-allocated pointers as inputs must accept these +/// zero-sized pointers, *without* causing undefined behavior. +/// +/// * In other words, if a zero-sized pointer can flow out of an +/// allocator, then that allocator must likewise accept that pointer +/// flowing back into its deallocation and reallocation methods. +/// +/// Some of the methods require that a layout *fit* a memory block. +/// What it means for a layout to "fit" a memory block means (or +/// equivalently, for a memory block to "fit" a layout) is that the +/// following two conditions must hold: +/// +/// 1. The block's starting address must be aligned to `layout.align()`. +/// +/// 2. The block's size must fall in the range `[use_min, use_max]`, where: +/// +/// * `use_min` is `self.usable_size(layout).0`, and +/// +/// * `use_max` is the capacity that was (or would have been) +/// returned when (if) the block was allocated via a call to +/// `alloc_excess` or `realloc_excess`. +/// +/// Note that: +/// +/// * the size of the layout most recently used to allocate the block +/// is guaranteed to be in the range `[use_min, use_max]`, and +/// +/// * a lower-bound on `use_max` can be safely approximated by a call to +/// `usable_size`. +/// +/// * if a layout `k` fits a memory block (denoted by `ptr`) +/// currently allocated via an allocator `a`, then it is legal to +/// use that layout to deallocate it, i.e. `a.dealloc(ptr, k);`. +pub unsafe trait Alloc { + + // (Note: existing allocators have unspecified but well-defined + // behavior in response to a zero size allocation request ; + // e.g. in C, `malloc` of 0 will either return a null pointer or a + // unique pointer, but will not have arbitrary undefined + // behavior. Rust should consider revising the alloc::heap crate + // to reflect this reality.) + + /// Returns a pointer meeting the size and alignment guarantees of + /// `layout`. + /// + /// If this method returns an `Ok(addr)`, then the `addr` returned + /// will be non-null address pointing to a block of storage + /// suitable for holding an instance of `layout`. + /// + /// The returned block of storage may or may not have its contents + /// initialized. (Extension subtraits might restrict this + /// behavior, e.g. to ensure initialization to particular sets of + /// bit patterns.) + /// + /// # Unsafety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure that `layout` has non-zero size. + /// + /// (Extension subtraits might provide more specific bounds on + /// behavior, e.g. guarantee a sentinel address or a null pointer + /// in response to a zero-size allocation request.) + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `layout` does not meet allocator's size or alignment + /// constraints. + /// + /// Implementations are encouraged to return `Err` on memory + /// exhaustion rather than panicking or aborting, but this is not + /// a strict requirement. (Specifically: it is *legal* to + /// implement this trait atop an underlying native allocation + /// library that aborts on memory exhaustion.) + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the allocator's `oom` + /// method, rather than directly invoking `panic!` or similar. + unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr>; + + /// Deallocate the memory referenced by `ptr`. + /// + /// # Unsafety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must denote a block of memory currently allocated via + /// this allocator, + /// + /// * `layout` must *fit* that block of memory, + /// + /// * In addition to fitting the block of memory `layout`, the + /// alignment of the `layout` must match the alignment used + /// to allocate that block of memory. + unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout); + + /// Allocator-specific method for signalling an out-of-memory + /// condition. + /// + /// `oom` aborts the thread or process, optionally performing + /// cleanup or logging diagnostic information before panicking or + /// aborting. + /// + /// `oom` is meant to be used by clients unable to cope with an + /// unsatisfied allocation request (signaled by an error such as + /// `AllocErr::Exhausted`), and wish to abandon computation rather + /// than attempt to recover locally. Such clients should pass the + /// signalling error value back into `oom`, where the allocator + /// may incorporate that error value into its diagnostic report + /// before aborting. + /// + /// Implementations of the `oom` method are discouraged from + /// infinitely regressing in nested calls to `oom`. In + /// practice this means implementors should eschew allocating, + /// especially from `self` (directly or indirectly). + /// + /// Implementions of the allocation and reallocation methods + /// (e.g. `alloc`, `alloc_one`, `realloc`) are discouraged from + /// panicking (or aborting) in the event of memory exhaustion; + /// instead they should return an appropriate error from the + /// invoked method, and let the client decide whether to invoke + /// this `oom` method in response. + fn oom(&mut self, _: AllocErr) -> ! { + unsafe { ::core::intrinsics::abort() } + } + + // == ALLOCATOR-SPECIFIC QUANTITIES AND LIMITS == + // usable_size + + /// Returns bounds on the guaranteed usable size of a successful + /// allocation created with the specified `layout`. + /// + /// In particular, if one has a memory block allocated via a given + /// allocator `a` and layout `k` where `a.usable_size(k)` returns + /// `(l, u)`, then one can pass that block to `a.dealloc()` with a + /// layout in the size range [l, u]. + /// + /// (All implementors of `usable_size` must ensure that + /// `l <= k.size() <= u`) + /// + /// Both the lower- and upper-bounds (`l` and `u` respectively) + /// are provided, because an allocator based on size classes could + /// misbehave if one attempts to deallocate a block without + /// providing a correct value for its size (i.e., one within the + /// range `[l, u]`). + /// + /// Clients who wish to make use of excess capacity are encouraged + /// to use the `alloc_excess` and `realloc_excess` instead, as + /// this method is constrained to report conservative values that + /// serve as valid bounds for *all possible* allocation method + /// calls. + /// + /// However, for clients that do not wish to track the capacity + /// returned by `alloc_excess` locally, this method is likely to + /// produce useful results. + fn usable_size(&self, layout: &Layout) -> (usize, usize) { + (layout.size(), layout.size()) + } + + // == METHODS FOR MEMORY REUSE == + // realloc. alloc_excess, realloc_excess + + /// Returns a pointer suitable for holding data described by + /// `new_layout`, meeting its size and alignment guarantees. To + /// accomplish this, this may extend or shrink the allocation + /// referenced by `ptr` to fit `new_layout`. + /// + /// If this returns `Ok`, then ownership of the memory block + /// referenced by `ptr` has been transferred to this + /// allocator. The memory may or may not have been freed, and + /// should be considered unusable (unless of course it was + /// transferred back to the caller again via the return value of + /// this method). + /// + /// If this method returns `Err`, then ownership of the memory + /// block has not been transferred to this allocator, and the + /// contents of the memory block are unaltered. + /// + /// For best results, `new_layout` should not impose a different + /// alignment constraint than `layout`. (In other words, + /// `new_layout.align()` should equal `layout.align()`.) However, + /// behavior is well-defined (though underspecified) when this + /// constraint is violated; further discussion below. + /// + /// # Unsafety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must be currently allocated via this allocator, + /// + /// * `layout` must *fit* the `ptr` (see above). (The `new_layout` + /// argument need not fit it.) + /// + /// * `new_layout` must have size greater than zero. + /// + /// * the alignment of `new_layout` is non-zero. + /// + /// (Extension subtraits might provide more specific bounds on + /// behavior, e.g. guarantee a sentinel address or a null pointer + /// in response to a zero-size allocation request.) + /// + /// # Errors + /// + /// Returns `Err` only if `new_layout` does not match the + /// alignment of `layout`, or does not meet the allocator's size + /// and alignment constraints of the allocator, or if reallocation + /// otherwise fails. + /// + /// (Note the previous sentence did not say "if and only if" -- in + /// particular, an implementation of this method *can* return `Ok` + /// if `new_layout.align() != old_layout.align()`; or it can + /// return `Err` in that scenario, depending on whether this + /// allocator can dynamically adjust the alignment constraint for + /// the block.) + /// + /// Implementations are encouraged to return `Err` on memory + /// exhaustion rather than panicking or aborting, but this is not + /// a strict requirement. (Specifically: it is *legal* to + /// implement this trait atop an underlying native allocation + /// library that aborts on memory exhaustion.) + /// + /// Clients wishing to abort computation in response to an + /// reallocation error are encouraged to call the allocator's `oom` + /// method, rather than directly invoking `panic!` or similar. + unsafe fn realloc(&mut self, + ptr: *mut u8, + layout: Layout, + new_layout: Layout) -> Result<*mut u8, AllocErr> { + let new_size = new_layout.size(); + let old_size = layout.size(); + let aligns_match = layout.align == new_layout.align; + + if new_size >= old_size && aligns_match { + if let Ok(()) = self.grow_in_place(ptr, layout.clone(), new_layout.clone()) { + return Ok(ptr); + } + } else if new_size < old_size && aligns_match { + if let Ok(()) = self.shrink_in_place(ptr, layout.clone(), new_layout.clone()) { + return Ok(ptr); + } + } + + // otherwise, fall back on alloc + copy + dealloc. + let result = self.alloc(new_layout); + if let Ok(new_ptr) = result { + ptr::copy_nonoverlapping(ptr as *const u8, new_ptr, cmp::min(old_size, new_size)); + self.dealloc(ptr, layout); + } + result + } + + /// Behaves like `alloc`, but also ensures that the contents + /// are set to zero before being returned. + /// + /// # Unsafety + /// + /// This function is unsafe for the same reasons that `alloc` is. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `layout` does not meet allocator's size or alignment + /// constraints, just as in `alloc`. + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the allocator's `oom` + /// method, rather than directly invoking `panic!` or similar. + unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { + let size = layout.size(); + let p = self.alloc(layout); + if let Ok(p) = p { + ptr::write_bytes(p, 0, size); + } + p + } + + /// Behaves like `alloc`, but also returns the whole size of + /// the returned block. For some `layout` inputs, like arrays, this + /// may include extra storage usable for additional data. + /// + /// # Unsafety + /// + /// This function is unsafe for the same reasons that `alloc` is. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `layout` does not meet allocator's size or alignment + /// constraints, just as in `alloc`. + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the allocator's `oom` + /// method, rather than directly invoking `panic!` or similar. + unsafe fn alloc_excess(&mut self, layout: Layout) -> Result { + let usable_size = self.usable_size(&layout); + self.alloc(layout).map(|p| Excess(p, usable_size.1)) + } + + /// Behaves like `realloc`, but also returns the whole size of + /// the returned block. For some `layout` inputs, like arrays, this + /// may include extra storage usable for additional data. + /// + /// # Unsafety + /// + /// This function is unsafe for the same reasons that `realloc` is. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `layout` does not meet allocator's size or alignment + /// constraints, just as in `realloc`. + /// + /// Clients wishing to abort computation in response to an + /// reallocation error are encouraged to call the allocator's `oom` + /// method, rather than directly invoking `panic!` or similar. + unsafe fn realloc_excess(&mut self, + ptr: *mut u8, + layout: Layout, + new_layout: Layout) -> Result { + let usable_size = self.usable_size(&new_layout); + self.realloc(ptr, layout, new_layout) + .map(|p| Excess(p, usable_size.1)) + } + + /// Attempts to extend the allocation referenced by `ptr` to fit `new_layout`. + /// + /// If this returns `Ok`, then the allocator has asserted that the + /// memory block referenced by `ptr` now fits `new_layout`, and thus can + /// be used to carry data of that layout. (The allocator is allowed to + /// expend effort to accomplish this, such as extending the memory block to + /// include successor blocks, or virtual memory tricks.) + /// + /// Regardless of what this method returns, ownership of the + /// memory block referenced by `ptr` has not been transferred, and + /// the contents of the memory block are unaltered. + /// + /// # Unsafety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must be currently allocated via this allocator, + /// + /// * `layout` must *fit* the `ptr` (see above); note the + /// `new_layout` argument need not fit it, + /// + /// * `new_layout.size()` must not be less than `layout.size()`, + /// + /// * `new_layout.align()` must equal `layout.align()`. + /// + /// # Errors + /// + /// Returns `Err(CannotReallocInPlace)` when the allocator is + /// unable to assert that the memory block referenced by `ptr` + /// could fit `layout`. + /// + /// Note that one cannot pass `CannotReallocInPlace` to the `oom` + /// method; clients are expected either to be able to recover from + /// `grow_in_place` failures without aborting, or to fall back on + /// another reallocation method before resorting to an abort. + unsafe fn grow_in_place(&mut self, + ptr: *mut u8, + layout: Layout, + new_layout: Layout) -> Result<(), CannotReallocInPlace> { + let _ = ptr; // this default implementation doesn't care about the actual address. + debug_assert!(new_layout.size >= layout.size); + debug_assert!(new_layout.align == layout.align); + let (_l, u) = self.usable_size(&layout); + // _l <= layout.size() [guaranteed by usable_size()] + // layout.size() <= new_layout.size() [required by this method] + if new_layout.size <= u { + return Ok(()); + } else { + return Err(CannotReallocInPlace); + } + } + + /// Attempts to shrink the allocation referenced by `ptr` to fit `new_layout`. + /// + /// If this returns `Ok`, then the allocator has asserted that the + /// memory block referenced by `ptr` now fits `new_layout`, and + /// thus can only be used to carry data of that smaller + /// layout. (The allocator is allowed to take advantage of this, + /// carving off portions of the block for reuse elsewhere.) The + /// truncated contents of the block within the smaller layout are + /// unaltered, and ownership of block has not been transferred. + /// + /// If this returns `Err`, then the memory block is considered to + /// still represent the original (larger) `layout`. None of the + /// block has been carved off for reuse elsewhere, ownership of + /// the memory block has not been transferred, and the contents of + /// the memory block are unaltered. + /// + /// # Unsafety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must be currently allocated via this allocator, + /// + /// * `layout` must *fit* the `ptr` (see above); note the + /// `new_layout` argument need not fit it, + /// + /// * `new_layout.size()` must not be greater than `layout.size()` + /// (and must be greater than zero), + /// + /// * `new_layout.align()` must equal `layout.align()`. + /// + /// # Errors + /// + /// Returns `Err(CannotReallocInPlace)` when the allocator is + /// unable to assert that the memory block referenced by `ptr` + /// could fit `layout`. + /// + /// Note that one cannot pass `CannotReallocInPlace` to the `oom` + /// method; clients are expected either to be able to recover from + /// `shrink_in_place` failures without aborting, or to fall back + /// on another reallocation method before resorting to an abort. + unsafe fn shrink_in_place(&mut self, + ptr: *mut u8, + layout: Layout, + new_layout: Layout) -> Result<(), CannotReallocInPlace> { + let _ = ptr; // this default implementation doesn't care about the actual address. + debug_assert!(new_layout.size <= layout.size); + debug_assert!(new_layout.align == layout.align); + let (l, _u) = self.usable_size(&layout); + // layout.size() <= _u [guaranteed by usable_size()] + // new_layout.size() <= layout.size() [required by this method] + if l <= new_layout.size { + return Ok(()); + } else { + return Err(CannotReallocInPlace); + } + } + + + // == COMMON USAGE PATTERNS == + // alloc_one, dealloc_one, alloc_array, realloc_array. dealloc_array + + /// Allocates a block suitable for holding an instance of `T`. + /// + /// Captures a common usage pattern for allocators. + /// + /// The returned block is suitable for passing to the + /// `alloc`/`realloc` methods of this allocator. + /// + /// Note to implementors: If this returns `Ok(ptr)`, then `ptr` + /// must be considered "currently allocated" and must be + /// acceptable input to methods such as `realloc` or `dealloc`, + /// *even if* `T` is a zero-sized type. In other words, if your + /// `Alloc` implementation overrides this method in a manner + /// that can return a zero-sized `ptr`, then all reallocation and + /// deallocation methods need to be similarly overridden to accept + /// such values as input. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `T` does not meet allocator's size or alignment constraints. + /// + /// For zero-sized `T`, may return either of `Ok` or `Err`, but + /// will *not* yield undefined behavior. + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the allocator's `oom` + /// method, rather than directly invoking `panic!` or similar. + fn alloc_one(&mut self) -> Result, AllocErr> + where Self: Sized + { + let k = Layout::new::(); + if k.size() > 0 { + unsafe { self.alloc(k).map(|p|Unique::new(*p as *mut T)) } + } else { + Err(AllocErr::invalid_input("zero-sized type invalid for alloc_one")) + } + } + + /// Deallocates a block suitable for holding an instance of `T`. + /// + /// The given block must have been produced by this allocator, + /// and must be suitable for storing a `T` (in terms of alignment + /// as well as minimum and maximum size); otherwise yields + /// undefined behavior. + /// + /// Captures a common usage pattern for allocators. + /// + /// # Unsafety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure both: + /// + /// * `ptr` must denote a block of memory currently allocated via this allocator + /// + /// * the layout of `T` must *fit* that block of memory. + unsafe fn dealloc_one(&mut self, ptr: Unique) + where Self: Sized + { + let raw_ptr = ptr.as_ptr() as *mut u8; + let k = Layout::new::(); + if k.size() > 0 { + self.dealloc(raw_ptr, k); + } + } + + /// Allocates a block suitable for holding `n` instances of `T`. + /// + /// Captures a common usage pattern for allocators. + /// + /// The returned block is suitable for passing to the + /// `alloc`/`realloc` methods of this allocator. + /// + /// Note to implementors: If this returns `Ok(ptr)`, then `ptr` + /// must be considered "currently allocated" and must be + /// acceptable input to methods such as `realloc` or `dealloc`, + /// *even if* `T` is a zero-sized type. In other words, if your + /// `Alloc` implementation overrides this method in a manner + /// that can return a zero-sized `ptr`, then all reallocation and + /// deallocation methods need to be similarly overridden to accept + /// such values as input. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `[T; n]` does not meet allocator's size or alignment + /// constraints. + /// + /// For zero-sized `T` or `n == 0`, may return either of `Ok` or + /// `Err`, but will *not* yield undefined behavior. + /// + /// Always returns `Err` on arithmetic overflow. + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the allocator's `oom` + /// method, rather than directly invoking `panic!` or similar. + fn alloc_array(&mut self, n: usize) -> Result, AllocErr> + where Self: Sized + { + match Layout::array::(n) { + Some(ref layout) if layout.size() > 0 => { + unsafe { + self.alloc(layout.clone()) + .map(|p| { + Unique::new(p as *mut T) + }) + } + } + _ => Err(AllocErr::invalid_input("invalid layout for alloc_array")), + } + } + + /// Reallocates a block previously suitable for holding `n_old` + /// instances of `T`, returning a block suitable for holding + /// `n_new` instances of `T`. + /// + /// Captures a common usage pattern for allocators. + /// + /// The returned block is suitable for passing to the + /// `alloc`/`realloc` methods of this allocator. + /// + /// # Unsafety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must be currently allocated via this allocator, + /// + /// * the layout of `[T; n_old]` must *fit* that block of memory. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `[T; n_new]` does not meet allocator's size or alignment + /// constraints. + /// + /// For zero-sized `T` or `n_new == 0`, may return either of `Ok` or + /// `Err`, but will *not* yield undefined behavior. + /// + /// Always returns `Err` on arithmetic overflow. + /// + /// Clients wishing to abort computation in response to an + /// reallocation error are encouraged to call the allocator's `oom` + /// method, rather than directly invoking `panic!` or similar. + unsafe fn realloc_array(&mut self, + ptr: Unique, + n_old: usize, + n_new: usize) -> Result, AllocErr> + where Self: Sized + { + match (Layout::array::(n_old), Layout::array::(n_new), ptr.as_ptr()) { + (Some(ref k_old), Some(ref k_new), ptr) if k_old.size() > 0 && k_new.size() > 0 => { + self.realloc(ptr as *mut u8, k_old.clone(), k_new.clone()) + .map(|p|Unique::new(p as *mut T)) + } + _ => { + Err(AllocErr::invalid_input("invalid layout for realloc_array")) + } + } + } + + /// Deallocates a block suitable for holding `n` instances of `T`. + /// + /// Captures a common usage pattern for allocators. + /// + /// # Unsafety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure both: + /// + /// * `ptr` must denote a block of memory currently allocated via this allocator + /// + /// * the layout of `[T; n]` must *fit* that block of memory. + /// + /// # Errors + /// + /// Returning `Err` indicates that either `[T; n]` or the given + /// memory block does not meet allocator's size or alignment + /// constraints. + /// + /// Always returns `Err` on arithmetic overflow. + unsafe fn dealloc_array(&mut self, ptr: Unique, n: usize) -> Result<(), AllocErr> + where Self: Sized + { + let raw_ptr = ptr.as_ptr() as *mut u8; + match Layout::array::(n) { + Some(ref k) if k.size() > 0 => { + Ok(self.dealloc(raw_ptr, k.clone())) + } + _ => { + Err(AllocErr::invalid_input("invalid layout for dealloc_array")) + } + } + } +} diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 5252dabc127..ca52943ea97 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -143,6 +143,10 @@ #[macro_use] mod macros; +// Allocator trait and helper struct definitions + +pub mod allocator; + // Heaps provided for low-level allocation strategies pub mod heap; From 1d3bc4e90fab35d4debe7d6cb0468d299b38354c Mon Sep 17 00:00:00 2001 From: "Felix S. Klock II" Date: Thu, 15 Jun 2017 22:44:56 +0200 Subject: [PATCH 02/10] Changed `Layout::from_size_align` to return `Option`. Added `unwrap` calls in all the places where I can infer that the conditions are met to avoid panic (or when the calling method itself says it will panic in such a case). --- src/liballoc/allocator.rs | 62 +++++++++++++++++++++++++++------------ 1 file changed, 43 insertions(+), 19 deletions(-) diff --git a/src/liballoc/allocator.rs b/src/liballoc/allocator.rs index 89324cf86f6..c308d99a72c 100644 --- a/src/liballoc/allocator.rs +++ b/src/liballoc/allocator.rs @@ -63,19 +63,21 @@ pub struct Layout { // overflowing_mul as necessary). impl Layout { - /// Constructs a `Layout` from a given `size` and `align`. - /// - /// # Panics - /// - /// Panics if any of the following conditions are not met: + /// Constructs a `Layout` from a given `size` and `align`, + /// or returns `None` if either of the following conditions + /// are not met: /// /// * `align` must be a power of two, /// /// * `size`, when rounded up to the nearest multiple of `align`, /// must not overflow (i.e. the rounded value must be less than /// `usize::MAX`). - pub fn from_size_align(size: usize, align: usize) -> Layout { - assert!(align.is_power_of_two()); // (this implies align != 0.) + pub fn from_size_align(size: usize, align: usize) -> Option { + if !align.is_power_of_two() { + return None; + } + + // (power-of-two implies align != 0.) // Rounded up size is: // size_rounded_up = (size + align - 1) & !(align - 1); @@ -89,9 +91,11 @@ pub fn from_size_align(size: usize, align: usize) -> Layout { // // Above implies that checking for summation overflow is both // necessary and sufficient. - assert!(size <= usize::MAX - (align - 1)); + if size > usize::MAX - (align - 1) { + return None; + } - Layout { size: size, align: align } + Some(Layout { size: size, align: align }) } /// The minimum size in bytes for a memory block of this layout. @@ -103,7 +107,7 @@ pub fn align(&self) -> usize { self.align } /// Constructs a `Layout` suitable for holding a value of type `T`. pub fn new() -> Self { let (size, align) = size_align::(); - Layout::from_size_align(size, align) + Layout::from_size_align(size, align).unwrap() } /// Produces layout describing a record that could be used to @@ -111,7 +115,7 @@ pub fn new() -> Self { /// or other unsized type like a slice). pub fn for_value(t: &T) -> Self { let (size, align) = (mem::size_of_val(t), mem::align_of_val(t)); - Layout::from_size_align(size, align) + Layout::from_size_align(size, align).unwrap() } /// Creates a layout describing the record that can hold a value @@ -128,10 +132,10 @@ pub fn for_value(t: &T) -> Self { /// /// # Panics /// - /// Panics if `align` is not a power of two. + /// Panics if the combination of `self.size` and the given `align` + /// violates the conditions listed in `from_size_align`. pub fn align_to(&self, align: usize) -> Self { - assert!(align.is_power_of_two()); - Layout::from_size_align(self.size, cmp::max(self.align, align)) + Layout::from_size_align(self.size, cmp::max(self.align, align)).unwrap() } /// Returns the amount of padding we must insert after `self` @@ -193,7 +197,12 @@ pub fn repeat(&self, n: usize) -> Option<(Self, usize)> { None => return None, Some(alloc_size) => alloc_size, }; - Some((Layout::from_size_align(alloc_size, self.align), padded_size)) + + // We can assume that `self.align` is a power-of-two. + // Furthermore, `alloc_size` has alreayd been rounded up + // to a multiple of `self.align`; therefore, the call + // to `Layout::from_size_align` below should never panic. + Some((Layout::from_size_align(alloc_size, self.align).unwrap(), padded_size)) } /// Creates a layout describing the record for `self` followed by @@ -209,8 +218,13 @@ pub fn repeat(&self, n: usize) -> Option<(Self, usize)> { /// On arithmetic overflow, returns `None`. pub fn extend(&self, next: Self) -> Option<(Self, usize)> { let new_align = cmp::max(self.align, next.align); - let realigned = Layout::from_size_align(self.size, new_align); + let realigned = match Layout::from_size_align(self.size, new_align) { + None => return None, + Some(l) => l, + }; + let pad = realigned.padding_needed_for(next.align); + let offset = match self.size.checked_add(pad) { None => return None, Some(offset) => offset, @@ -219,7 +233,12 @@ pub fn extend(&self, next: Self) -> Option<(Self, usize)> { None => return None, Some(new_size) => new_size, }; - Some((Layout::from_size_align(new_size, new_align), offset)) + + let layout = match Layout::from_size_align(new_size, new_align) { + None => return None, + Some(l) => l, + }; + Some((layout, offset)) } /// Creates a layout describing the record for `n` instances of @@ -239,7 +258,8 @@ pub fn repeat_packed(&self, n: usize) -> Option { None => return None, Some(scaled) => scaled, }; - Some(Layout::from_size_align(size, self.align)) + + Layout::from_size_align(size, self.align) } /// Creates a layout describing the record for `self` followed by @@ -262,7 +282,11 @@ pub fn extend_packed(&self, next: Self) -> Option<(Self, usize)> { None => return None, Some(new_size) => new_size, }; - Some((Layout::from_size_align(new_size, self.align), self.size())) + let layout = match Layout::from_size_align(new_size, self.align) { + None => return None, + Some(l) => l, + }; + Some((layout, self.size())) } /// Creates a layout describing the record for a `[T; n]`. From 65d02b26c525c07d47686302a15231b28914fda4 Mon Sep 17 00:00:00 2001 From: "Felix S. Klock II" Date: Tue, 23 May 2017 14:47:41 +0200 Subject: [PATCH 03/10] Add impl of `Alloc` for the global rust heap. Alpha-renamed `HeapAllocator` to `HeapAlloc`. `::alloc_zeroed` is hooked up to `heap::allocate_zeroed`. `HeapAlloc::realloc` falls back on alloc+copy+realloc on align mismatch. --- src/liballoc/heap.rs | 79 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 78 insertions(+), 1 deletion(-) diff --git a/src/liballoc/heap.rs b/src/liballoc/heap.rs index 5ff21c86483..d46c6a83ff3 100644 --- a/src/liballoc/heap.rs +++ b/src/liballoc/heap.rs @@ -15,7 +15,8 @@ tracing garbage collector", issue = "27700")] -use core::{isize, usize}; +use allocator::{Alloc, AllocErr, CannotReallocInPlace, Layout}; +use core::{isize, usize, cmp, ptr}; use core::intrinsics::{min_align_of_val, size_of_val}; #[allow(improper_ctypes)] @@ -44,6 +45,82 @@ fn check_size_and_alignment(size: usize, align: usize) { align); } +#[derive(Copy, Clone, Default, Debug)] +pub struct HeapAlloc; + +unsafe impl Alloc for HeapAlloc { + unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { + let addr = allocate(layout.size(), layout.align()); + if addr.is_null() { + Err(AllocErr::Exhausted { request: layout }) + } else { + Ok(addr) + } + } + + unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { + let addr = allocate_zeroed(layout.size(), layout.align()); + if addr.is_null() { + Err(AllocErr::Exhausted { request: layout }) + } else { + Ok(addr) + } + } + + unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) { + deallocate(ptr, layout.size(), layout.align()); + } + + fn usable_size(&self, layout: &Layout) -> (usize, usize) { + (layout.size(), usable_size(layout.size(), layout.align())) + } + + unsafe fn realloc(&mut self, + ptr: *mut u8, + layout: Layout, + new_layout: Layout) + -> Result<*mut u8, AllocErr> + { + let old_size = layout.size(); + let new_size = new_layout.size(); + if layout.align() == new_layout.align() { + let new_ptr = reallocate(ptr, old_size, new_size, layout.align()); + if new_ptr.is_null() { + // We assume `reallocate` already tried alloc + copy + + // dealloc fallback; thus pointless to repeat effort + Err(AllocErr::Exhausted { request: new_layout }) + } else { + Ok(new_ptr) + } + } else { + // if alignments don't match, fall back on alloc + copy + dealloc + let result = self.alloc(new_layout); + if let Ok(new_ptr) = result { + ptr::copy_nonoverlapping(ptr as *const u8, new_ptr, cmp::min(old_size, new_size)); + self.dealloc(ptr, layout); + } + result + } + } + + unsafe fn grow_in_place(&mut self, + ptr: *mut u8, + layout: Layout, + new_layout: Layout) + -> Result<(), CannotReallocInPlace> + { + // grow_in_place spec requires this, and the spec for reallocate_inplace + // makes it hard to detect failure if it does not hold. + debug_assert!(new_layout.size() >= layout.size()); + + if layout.align() != new_layout.align() { // reallocate_inplace requires this. + return Err(CannotReallocInPlace); + } + let usable = reallocate_inplace(ptr, layout.size(), new_layout.size(), layout.align()); + if usable >= new_layout.size() { Ok(()) } else { Err(CannotReallocInPlace) } + } +} + // FIXME: #13996: mark the `allocate` and `reallocate` return value as `noalias` /// Return a pointer to `size` bytes of memory aligned to `align`. From 23ab50455fa1f2f12602e65edc0b16b595ec05df Mon Sep 17 00:00:00 2001 From: "Felix S. Klock II" Date: Wed, 24 May 2017 18:06:11 +0200 Subject: [PATCH 04/10] Allocator integration in `RawVec`. Includes methods exposing underlying allocator and the dellocation routine. Includes test illustrating a tiny `Alloc` that just bounds the total bytes allocated. Alpha-renamed `Allocator` to `Alloc` (and `HeapAllocator` to `HeapAlloc`). --- src/liballoc/raw_vec.rs | 357 ++++++++++++++++++++++++++-------------- 1 file changed, 230 insertions(+), 127 deletions(-) diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index 34ab0a19d4e..7117c446821 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -8,11 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use core::ptr::Unique; +use allocator::{Alloc, Layout}; +use core::ptr::{self, Unique}; use core::mem; use core::slice; -use heap; -use super::oom; +use heap::{HeapAlloc}; use super::boxed::Box; use core::ops::Drop; use core::cmp; @@ -45,17 +45,16 @@ /// field. This allows zero-sized types to not be special-cased by consumers of /// this type. #[allow(missing_debug_implementations)] -pub struct RawVec { +pub struct RawVec { ptr: Unique, cap: usize, + a: A, } -impl RawVec { - /// Creates the biggest possible RawVec without allocating. If T has positive - /// size, then this makes a RawVec with capacity 0. If T has 0 size, then it - /// it makes a RawVec with capacity `usize::MAX`. Useful for implementing - /// delayed allocation. - pub fn new() -> Self { +impl RawVec { + /// Like `new` but parameterized over the choice of allocator for + /// the returned RawVec. + pub fn new_in(a: A) -> Self { // !0 is usize::MAX. This branch should be stripped at compile time. let cap = if mem::size_of::() == 0 { !0 } else { 0 }; @@ -63,13 +62,71 @@ pub fn new() -> Self { RawVec { ptr: Unique::empty(), cap: cap, + a: a, } } - /// Creates a RawVec with exactly the capacity and alignment requirements - /// for a `[T; cap]`. This is equivalent to calling RawVec::new when `cap` is 0 - /// or T is zero-sized. Note that if `T` is zero-sized this means you will *not* - /// get a RawVec with the requested capacity! + /// Like `with_capacity` but parameterized over the choice of + /// allocator for the returned RawVec. + #[inline] + pub fn with_capacity_in(cap: usize, a: A) -> Self { + RawVec::allocate_in(cap, false, a) + } + + /// Like `with_capacity_zeroed` but parameterized over the choice + /// of allocator for the returned RawVec. + #[inline] + pub fn with_capacity_zeroed_in(cap: usize, a: A) -> Self { + RawVec::allocate_in(cap, true, a) + } + + fn allocate_in(cap: usize, zeroed: bool, mut a: A) -> Self { + unsafe { + let elem_size = mem::size_of::(); + + let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow"); + alloc_guard(alloc_size); + + // handles ZSTs and `cap = 0` alike + let ptr = if alloc_size == 0 { + mem::align_of::() as *mut u8 + } else { + let align = mem::align_of::(); + let result = if zeroed { + a.alloc_zeroed(Layout::from_size_align(alloc_size, align).unwrap()) + } else { + a.alloc(Layout::from_size_align(alloc_size, align).unwrap()) + }; + match result { + Ok(ptr) => ptr, + Err(err) => a.oom(err), + } + }; + + RawVec { + ptr: Unique::new(ptr as *mut _), + cap: cap, + a: a, + } + } + } +} + +impl RawVec { + /// Creates the biggest possible RawVec (on the system heap) + /// without allocating. If T has positive size, then this makes a + /// RawVec with capacity 0. If T has 0 size, then it it makes a + /// RawVec with capacity `usize::MAX`. Useful for implementing + /// delayed allocation. + pub fn new() -> Self { + Self::new_in(HeapAlloc) + } + + /// Creates a RawVec (on the system heap) with exactly the + /// capacity and alignment requirements for a `[T; cap]`. This is + /// equivalent to calling RawVec::new when `cap` is 0 or T is + /// zero-sized. Note that if `T` is zero-sized this means you will + /// *not* get a RawVec with the requested capacity! /// /// # Panics /// @@ -82,56 +139,46 @@ pub fn new() -> Self { /// Aborts on OOM #[inline] pub fn with_capacity(cap: usize) -> Self { - RawVec::allocate(cap, false) + RawVec::allocate_in(cap, false, HeapAlloc) } /// Like `with_capacity` but guarantees the buffer is zeroed. #[inline] pub fn with_capacity_zeroed(cap: usize) -> Self { - RawVec::allocate(cap, true) + RawVec::allocate_in(cap, true, HeapAlloc) } +} - fn allocate(cap: usize, zeroed: bool) -> Self { - unsafe { - let elem_size = mem::size_of::(); - - let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow"); - alloc_guard(alloc_size); - - // handles ZSTs and `cap = 0` alike - let ptr = if alloc_size == 0 { - mem::align_of::() as *mut u8 - } else { - let align = mem::align_of::(); - let ptr = if zeroed { - heap::allocate_zeroed(alloc_size, align) - } else { - heap::allocate(alloc_size, align) - }; - if ptr.is_null() { - oom() - } - ptr - }; - - RawVec { - ptr: Unique::new(ptr as *mut _), - cap: cap, - } - } - } - - /// Reconstitutes a RawVec from a pointer and capacity. +impl RawVec { + /// Reconstitutes a RawVec from a pointer, capacity, and allocator. /// /// # Undefined Behavior /// - /// The ptr must be allocated, and with the given capacity. The + /// The ptr must be allocated (via the given allocator `a`), and with the given capacity. The + /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems). + /// If the ptr and capacity come from a RawVec created via `a`, then this is guaranteed. + pub unsafe fn from_raw_parts_in(ptr: *mut T, cap: usize, a: A) -> Self { + RawVec { + ptr: Unique::new(ptr), + cap: cap, + a: a, + } + } +} + +impl RawVec { + /// Reconstitutes a RawVec from a pointer, capacity. + /// + /// # Undefined Behavior + /// + /// The ptr must be allocated (on the system heap), and with the given capacity. The /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems). /// If the ptr and capacity come from a RawVec, then this is guaranteed. pub unsafe fn from_raw_parts(ptr: *mut T, cap: usize) -> Self { RawVec { ptr: Unique::new(ptr), cap: cap, + a: HeapAlloc, } } @@ -145,7 +192,7 @@ pub fn from_box(mut slice: Box<[T]>) -> Self { } } -impl RawVec { +impl RawVec { /// Gets a raw pointer to the start of the allocation. Note that this is /// Unique::empty() if `cap = 0` or T is zero-sized. In the former case, you must /// be careful. @@ -165,6 +212,16 @@ pub fn cap(&self) -> usize { } } + /// Returns a shared reference to the allocator backing this RawVec. + pub fn alloc(&self) -> &A { + &self.a + } + + /// Returns a mutable reference to the allocator backing this RawVec. + pub fn alloc_mut(&mut self) -> &mut A { + &mut self.a + } + /// Doubles the size of the type's backing allocation. This is common enough /// to want to do that it's easiest to just have a dedicated method. Slightly /// more efficient logic can be provided for this than the general case. @@ -215,32 +272,28 @@ pub fn double(&mut self) { // 0, getting to here necessarily means the RawVec is overfull. assert!(elem_size != 0, "capacity overflow"); - let align = mem::align_of::(); - - let (new_cap, ptr) = if self.cap == 0 { + let (new_cap, ptr_res) = if self.cap == 0 { // skip to 4 because tiny Vec's are dumb; but not if that would cause overflow let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 }; - let ptr = heap::allocate(new_cap * elem_size, align); - (new_cap, ptr) + let ptr_res = self.a.alloc_array::(new_cap); + (new_cap, ptr_res) } else { // Since we guarantee that we never allocate more than isize::MAX bytes, // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow let new_cap = 2 * self.cap; let new_alloc_size = new_cap * elem_size; alloc_guard(new_alloc_size); - let ptr = heap::reallocate(self.ptr() as *mut _, - self.cap * elem_size, - new_alloc_size, - align); - (new_cap, ptr) + let ptr_res = self.a.realloc_array(self.ptr, self.cap, new_cap); + (new_cap, ptr_res) }; // If allocate or reallocate fail, we'll get `null` back - if ptr.is_null() { - oom() - } + let uniq = match ptr_res { + Err(err) => self.a.oom(err), + Ok(uniq) => uniq, + }; - self.ptr = Unique::new(ptr as *mut _); + self.ptr = uniq; self.cap = new_cap; } } @@ -262,7 +315,6 @@ pub fn double(&mut self) { pub fn double_in_place(&mut self) -> bool { unsafe { let elem_size = mem::size_of::(); - let align = mem::align_of::(); // since we set the capacity to usize::MAX when elem_size is // 0, getting to here necessarily means the RawVec is overfull. @@ -274,15 +326,20 @@ pub fn double_in_place(&mut self) -> bool { let new_alloc_size = new_cap * elem_size; alloc_guard(new_alloc_size); - let size = heap::reallocate_inplace(self.ptr() as *mut _, - self.cap * elem_size, - new_alloc_size, - align); - if size >= new_alloc_size { - // We can't directly divide `size`. - self.cap = new_cap; + + let ptr = self.ptr() as *mut _; + let old_layout = Layout::new::().repeat(self.cap).unwrap().0; + let new_layout = Layout::new::().repeat(new_cap).unwrap().0; + match self.a.grow_in_place(ptr, old_layout, new_layout) { + Ok(_) => { + // We can't directly divide `size`. + self.cap = new_cap; + true + } + Err(_) => { + false + } } - size >= new_alloc_size } } @@ -309,7 +366,6 @@ pub fn double_in_place(&mut self) -> bool { pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) { unsafe { let elem_size = mem::size_of::(); - let align = mem::align_of::(); // NOTE: we don't early branch on ZSTs here because we want this // to actually catch "asking for more than usize::MAX" in that case. @@ -327,21 +383,19 @@ pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) { let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow"); alloc_guard(new_alloc_size); - let ptr = if self.cap == 0 { - heap::allocate(new_alloc_size, align) + let result = if self.cap == 0 { + self.a.alloc_array::(new_cap) } else { - heap::reallocate(self.ptr() as *mut _, - self.cap * elem_size, - new_alloc_size, - align) + self.a.realloc_array(self.ptr, self.cap, new_cap) }; // If allocate or reallocate fail, we'll get `null` back - if ptr.is_null() { - oom() - } + let uniq = match result { + Err(err) => self.a.oom(err), + Ok(uniq) => uniq, + }; - self.ptr = Unique::new(ptr as *mut _); + self.ptr = uniq; self.cap = new_cap; } } @@ -408,9 +462,6 @@ fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize) -> (usize /// ``` pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) { unsafe { - let elem_size = mem::size_of::(); - let align = mem::align_of::(); - // NOTE: we don't early branch on ZSTs here because we want this // to actually catch "asking for more than usize::MAX" in that case. // If we make it past the first branch then we are guaranteed to @@ -426,21 +477,18 @@ pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) { // FIXME: may crash and burn on over-reserve alloc_guard(new_alloc_size); - let ptr = if self.cap == 0 { - heap::allocate(new_alloc_size, align) + let result = if self.cap == 0 { + self.a.alloc_array::(new_cap) } else { - heap::reallocate(self.ptr() as *mut _, - self.cap * elem_size, - new_alloc_size, - align) + self.a.realloc_array(self.ptr, self.cap, new_cap) }; - // If allocate or reallocate fail, we'll get `null` back - if ptr.is_null() { - oom() - } + let uniq = match result { + Err(err) => self.a.oom(err), + Ok(uniq) => uniq, + }; - self.ptr = Unique::new(ptr as *mut _); + self.ptr = uniq; self.cap = new_cap; } } @@ -464,9 +512,6 @@ pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) { /// `isize::MAX` bytes. pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> bool { unsafe { - let elem_size = mem::size_of::(); - let align = mem::align_of::(); - // NOTE: we don't early branch on ZSTs here because we want this // to actually catch "asking for more than usize::MAX" in that case. // If we make it past the first branch then we are guaranteed to @@ -479,18 +524,26 @@ pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> return false; } - let (_, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap); + let (new_cap, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap); // FIXME: may crash and burn on over-reserve alloc_guard(new_alloc_size); - let size = heap::reallocate_inplace(self.ptr() as *mut _, - self.cap * elem_size, - new_alloc_size, - align); - if size >= new_alloc_size { - self.cap = new_alloc_size / elem_size; + // Here, `cap < used_cap + needed_extra_cap <= new_cap` + // (regardless of whether `self.cap - used_cap` wrapped). + // Therefore we can safely call grow_in_place. + + let ptr = self.ptr() as *mut _; + let old_layout = Layout::new::().repeat(self.cap).unwrap().0; + let new_layout = Layout::new::().repeat(new_cap).unwrap().0; + match self.a.grow_in_place(ptr, old_layout, new_layout) { + Ok(_) => { + self.cap = new_cap; + true + } + Err(_) => { + false + } } - size >= new_alloc_size } } @@ -506,7 +559,6 @@ pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> /// Aborts on OOM. pub fn shrink_to_fit(&mut self, amount: usize) { let elem_size = mem::size_of::(); - let align = mem::align_of::(); // Set the `cap` because they might be about to promote to a `Box<[T]>` if elem_size == 0 { @@ -518,24 +570,30 @@ pub fn shrink_to_fit(&mut self, amount: usize) { assert!(self.cap >= amount, "Tried to shrink to a larger capacity"); if amount == 0 { - mem::replace(self, RawVec::new()); + // We want to create a new zero-length vector within the + // same allocator. We use ptr::write to avoid an + // erroneous attempt to drop the contents, and we use + // ptr::read to sidestep condition against destructuring + // types that implement Drop. + + unsafe { + let a = ptr::read(&self.a as *const A); + self.dealloc_buffer(); + ptr::write(self, RawVec::new_in(a)); + } } else if self.cap != amount { unsafe { - // Overflow check is unnecessary as the vector is already at - // least this large. - let ptr = heap::reallocate(self.ptr() as *mut _, - self.cap * elem_size, - amount * elem_size, - align); - if ptr.is_null() { - oom() + match self.a.realloc_array(self.ptr, self.cap, amount) { + Err(err) => self.a.oom(err), + Ok(uniq) => self.ptr = uniq, } - self.ptr = Unique::new(ptr as *mut _); } self.cap = amount; } } +} +impl RawVec { /// Converts the entire buffer into `Box<[T]>`. /// /// While it is not *strictly* Undefined Behavior to call @@ -553,21 +611,25 @@ pub unsafe fn into_box(self) -> Box<[T]> { } } -unsafe impl<#[may_dangle] T> Drop for RawVec { +impl RawVec { /// Frees the memory owned by the RawVec *without* trying to Drop its contents. - fn drop(&mut self) { + pub unsafe fn dealloc_buffer(&mut self) { let elem_size = mem::size_of::(); if elem_size != 0 && self.cap != 0 { - let align = mem::align_of::(); - - let num_bytes = elem_size * self.cap; - unsafe { - heap::deallocate(self.ptr() as *mut u8, num_bytes, align); - } + let ptr = self.ptr() as *mut u8; + let layout = Layout::new::().repeat(self.cap).unwrap().0; + self.a.dealloc(ptr, layout); } } } +unsafe impl<#[may_dangle] T, A: Alloc> Drop for RawVec { + /// Frees the memory owned by the RawVec *without* trying to Drop its contents. + fn drop(&mut self) { + unsafe { self.dealloc_buffer(); } + } +} + // We need to guarantee the following: @@ -592,6 +654,46 @@ fn alloc_guard(alloc_size: usize) { mod tests { use super::*; + #[test] + fn allocator_param() { + use allocator::{Alloc, AllocErr}; + + // Writing a test of integration between third-party + // allocators and RawVec is a little tricky because the RawVec + // API does not expose fallible allocation methods, so we + // cannot check what happens when allocator is exhausted + // (beyond detecting a panic). + // + // Instead, this just checks that the RawVec methods do at + // least go through the Allocator API when it reserves + // storage. + + // A dumb allocator that consumes a fixed amount of fuel + // before allocation attempts start failing. + struct BoundedAlloc { fuel: usize } + unsafe impl Alloc for BoundedAlloc { + unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { + let size = layout.size(); + if size > self.fuel { + return Err(AllocErr::Unsupported { details: "fuel exhausted" }); + } + match HeapAlloc.alloc(layout) { + ok @ Ok(_) => { self.fuel -= size; ok } + err @ Err(_) => err, + } + } + unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) { + HeapAlloc.dealloc(ptr, layout) + } + } + + let a = BoundedAlloc { fuel: 500 }; + let mut v: RawVec = RawVec::with_capacity_in(50, a); + assert_eq!(v.a.fuel, 450); + v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel) + assert_eq!(v.a.fuel, 250); + } + #[test] fn reserve_does_not_overallocate() { { @@ -624,4 +726,5 @@ fn reserve_does_not_overallocate() { } } + } From 57ab9e7e7c481427a8d59e16b932fbd1e5fe4c02 Mon Sep 17 00:00:00 2001 From: "Felix S. Klock II" Date: Tue, 30 May 2017 17:22:24 +0200 Subject: [PATCH 05/10] placeholder for documentation of `allocator_api` library feature. Alpha-renamed `Allocator` to `Alloc`. --- .../src/library-features/allocator_api.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 src/doc/unstable-book/src/library-features/allocator_api.md diff --git a/src/doc/unstable-book/src/library-features/allocator_api.md b/src/doc/unstable-book/src/library-features/allocator_api.md new file mode 100644 index 00000000000..e3969ace7e9 --- /dev/null +++ b/src/doc/unstable-book/src/library-features/allocator_api.md @@ -0,0 +1,15 @@ +# `allocator_api` + +The tracking issue for this feature is [#32838] + +[#32838]: https://github.com/rust-lang/rust/issues/32838 + +------------------------ + +Sometimes you want the memory for one collection to use a different +allocator than the memory for another collection. In this case, +replacing the global allocator is not a workable option. Instead, +you need to pass in an instance of an `Alloc` to each collection +for which you want a custom allocator. + +TBD From 12d4d12fef4f9024ffcc8eb5fef11f1a21074cd7 Mon Sep 17 00:00:00 2001 From: "Felix S. Klock II" Date: Tue, 13 Jun 2017 21:57:49 +0200 Subject: [PATCH 06/10] implement Error trait for error structs added in allocator API. --- src/liballoc/allocator.rs | 28 ++++++++++++++++++++++++++++ src/libstd/error.rs | 19 +++++++++++++++++++ src/libstd/lib.rs | 1 + 3 files changed, 48 insertions(+) diff --git a/src/liballoc/allocator.rs b/src/liballoc/allocator.rs index c308d99a72c..752acbd0b45 100644 --- a/src/liballoc/allocator.rs +++ b/src/liballoc/allocator.rs @@ -16,6 +16,7 @@ issue = "27700")] use core::cmp; +use core::fmt; use core::mem; use core::usize; use core::ptr::{self, Unique}; @@ -335,6 +336,19 @@ pub fn is_memory_exhausted(&self) -> bool { pub fn is_request_unsupported(&self) -> bool { if let AllocErr::Unsupported { .. } = *self { true } else { false } } + pub fn description(&self) -> &str { + match *self { + AllocErr::Exhausted { .. } => "allocator memory exhausted", + AllocErr::Unsupported { .. } => "unsupported allocator request", + } + } +} + +// (we need this for downstream impl of trait Error) +impl fmt::Display for AllocErr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } } /// The `CannotReallocInPlace` error is used when `grow_in_place` or @@ -343,6 +357,20 @@ pub fn is_request_unsupported(&self) -> bool { #[derive(Clone, PartialEq, Eq, Debug)] pub struct CannotReallocInPlace; +impl CannotReallocInPlace { + pub fn description(&self) -> &str { + "cannot reallocate allocator's memory in place" + } +} + +// (we need this for downstream impl of trait Error) +impl fmt::Display for CannotReallocInPlace { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} + +/// An implementation of `Allocator` can allocate, reallocate, and /// An implementation of `Alloc` can allocate, reallocate, and /// deallocate arbitrary blocks of data described via `Layout`. /// diff --git a/src/libstd/error.rs b/src/libstd/error.rs index f56e3a5d780..3d203429e7b 100644 --- a/src/libstd/error.rs +++ b/src/libstd/error.rs @@ -51,6 +51,7 @@ // coherence challenge (e.g., specialization, neg impls, etc) we can // reconsider what crate these items belong in. +use alloc::allocator; use any::TypeId; use cell; use char; @@ -221,6 +222,24 @@ impl Error for ! { fn description(&self) -> &str { *self } } +#[unstable(feature = "allocator_api", + reason = "the precise API and guarantees it provides may be tweaked.", + issue = "27700")] +impl Error for allocator::AllocErr { + fn description(&self) -> &str { + allocator::AllocErr::description(self) + } +} + +#[unstable(feature = "allocator_api", + reason = "the precise API and guarantees it provides may be tweaked.", + issue = "27700")] +impl Error for allocator::CannotReallocInPlace { + fn description(&self) -> &str { + allocator::CannotReallocInPlace::description(self) + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl Error for str::ParseBoolError { fn description(&self) -> &str { "failed to parse bool" } diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index f307fbb7c00..6938aefb522 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -245,6 +245,7 @@ // std is implemented with unstable features, many of which are internal // compiler details that will never be stable #![feature(alloc)] +#![feature(allocator_api)] #![feature(allow_internal_unstable)] #![feature(asm)] #![feature(associated_consts)] From 5354b91ce5afd0b42f761b5ec7459612a5d6cadd Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Sat, 17 Jun 2017 11:48:01 -0700 Subject: [PATCH 07/10] Correct location of unstable book docs --- .../src/library-features/{allocator_api.md => allocator-api.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/doc/unstable-book/src/library-features/{allocator_api.md => allocator-api.md} (100%) diff --git a/src/doc/unstable-book/src/library-features/allocator_api.md b/src/doc/unstable-book/src/library-features/allocator-api.md similarity index 100% rename from src/doc/unstable-book/src/library-features/allocator_api.md rename to src/doc/unstable-book/src/library-features/allocator-api.md From 879ec55d2eb89dea357812577eaeea583a6e4926 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 19 Jun 2017 07:49:50 -0700 Subject: [PATCH 08/10] Ignore test for not-closed issue Confirmed on IRC that the bug isn't fully fixed, and the "resurgence" here isn't the fault of this PR. --- src/test/run-pass/issue-41696.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/test/run-pass/issue-41696.rs b/src/test/run-pass/issue-41696.rs index ae57e0cf255..40fbf5ba75c 100644 --- a/src/test/run-pass/issue-41696.rs +++ b/src/test/run-pass/issue-41696.rs @@ -9,6 +9,7 @@ // except according to those terms. // this used to cause exponential code-size blowup during LLVM passes. +// ignore-test FIXME #41696 // min-llvm-version 3.9 #![feature(test)] From 609d43a15a59e7610b138335b3d5589e1a7d6187 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 19 Jun 2017 07:51:00 -0700 Subject: [PATCH 09/10] Minor Allocator doc fix --- src/liballoc/allocator.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/liballoc/allocator.rs b/src/liballoc/allocator.rs index 752acbd0b45..9bddce29957 100644 --- a/src/liballoc/allocator.rs +++ b/src/liballoc/allocator.rs @@ -370,7 +370,6 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { } } -/// An implementation of `Allocator` can allocate, reallocate, and /// An implementation of `Alloc` can allocate, reallocate, and /// deallocate arbitrary blocks of data described via `Layout`. /// From 55a629d496f9393dff5c3a8d4511bf2686bf365b Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 19 Jun 2017 12:40:51 -0700 Subject: [PATCH 10/10] Ignore a spuriously failing test on asmjs Other tests are already ignored for missing `rust_begin_unwind`, let's add another. --- src/test/run-pass/vec-macro-no-std.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/test/run-pass/vec-macro-no-std.rs b/src/test/run-pass/vec-macro-no-std.rs index 5dd551ff513..f21027afac3 100644 --- a/src/test/run-pass/vec-macro-no-std.rs +++ b/src/test/run-pass/vec-macro-no-std.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-emscripten missing rust_begin_unwind + #![feature(lang_items, start, libc, alloc)] #![no_std]