Auto merge of #42313 - pnkfelix:allocator-integration, r=alexcrichton

Allocator integration

Lets start getting some feedback on `trait Alloc`.

Here is:
 *  the `trait Alloc` itself,
 * the `struct Layout` and `enum AllocErr` that its API relies on
 * a `struct HeapAlloc` that exposes the system allocator as an instance of `Alloc`
 * an integration of `Alloc` with `RawVec`
 * ~~an integration of `Alloc` with `Vec`~~

 TODO
 * [x] split `fn realloc_in_place` into `grow` and `shrink` variants
 * [x] add `# Unsafety` and `# Errors` sections to documentation for all relevant methods
 * [x] remove `Vec` integration with `Allocator`
 * [x] add `allocate_zeroed` impl to `HeapAllocator`
 * [x] remove typedefs e.g. `type Size = usize;`
 * [x] impl `trait Error` for all error types in PR
 * [x] make `Layout::from_size_align` public
 * [x] clarify docs of `fn padding_needed_for`.
 * [x] revise `Layout` constructors to ensure that [size+align combination is valid](https://github.com/rust-lang/rust/pull/42313#issuecomment-306845446)
 * [x] resolve mismatch re requirements of align on dealloc. See [comment](https://github.com/rust-lang/rust/pull/42313#issuecomment-306202489).
This commit is contained in:
bors 2017-06-20 05:02:19 +00:00
commit 1143eb26a2
9 changed files with 1387 additions and 128 deletions

View file

@ -0,0 +1,15 @@
# `allocator_api`
The tracking issue for this feature is [#32838]
[#32838]: https://github.com/rust-lang/rust/issues/32838
------------------------
Sometimes you want the memory for one collection to use a different
allocator than the memory for another collection. In this case,
replacing the global allocator is not a workable option. Instead,
you need to pass in an instance of an `Alloc` to each collection
for which you want a custom allocator.
TBD

1037
src/liballoc/allocator.rs Normal file

File diff suppressed because it is too large Load diff

View file

@ -15,7 +15,8 @@
tracing garbage collector",
issue = "27700")]
use core::{isize, usize};
use allocator::{Alloc, AllocErr, CannotReallocInPlace, Layout};
use core::{isize, usize, cmp, ptr};
use core::intrinsics::{min_align_of_val, size_of_val};
#[allow(improper_ctypes)]
@ -44,6 +45,82 @@ fn check_size_and_alignment(size: usize, align: usize) {
align);
}
#[derive(Copy, Clone, Default, Debug)]
pub struct HeapAlloc;
unsafe impl Alloc for HeapAlloc {
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
let addr = allocate(layout.size(), layout.align());
if addr.is_null() {
Err(AllocErr::Exhausted { request: layout })
} else {
Ok(addr)
}
}
unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
let addr = allocate_zeroed(layout.size(), layout.align());
if addr.is_null() {
Err(AllocErr::Exhausted { request: layout })
} else {
Ok(addr)
}
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
deallocate(ptr, layout.size(), layout.align());
}
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
(layout.size(), usable_size(layout.size(), layout.align()))
}
unsafe fn realloc(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout)
-> Result<*mut u8, AllocErr>
{
let old_size = layout.size();
let new_size = new_layout.size();
if layout.align() == new_layout.align() {
let new_ptr = reallocate(ptr, old_size, new_size, layout.align());
if new_ptr.is_null() {
// We assume `reallocate` already tried alloc + copy +
// dealloc fallback; thus pointless to repeat effort
Err(AllocErr::Exhausted { request: new_layout })
} else {
Ok(new_ptr)
}
} else {
// if alignments don't match, fall back on alloc + copy + dealloc
let result = self.alloc(new_layout);
if let Ok(new_ptr) = result {
ptr::copy_nonoverlapping(ptr as *const u8, new_ptr, cmp::min(old_size, new_size));
self.dealloc(ptr, layout);
}
result
}
}
unsafe fn grow_in_place(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout)
-> Result<(), CannotReallocInPlace>
{
// grow_in_place spec requires this, and the spec for reallocate_inplace
// makes it hard to detect failure if it does not hold.
debug_assert!(new_layout.size() >= layout.size());
if layout.align() != new_layout.align() { // reallocate_inplace requires this.
return Err(CannotReallocInPlace);
}
let usable = reallocate_inplace(ptr, layout.size(), new_layout.size(), layout.align());
if usable >= new_layout.size() { Ok(()) } else { Err(CannotReallocInPlace) }
}
}
// FIXME: #13996: mark the `allocate` and `reallocate` return value as `noalias`
/// Return a pointer to `size` bytes of memory aligned to `align`.

View file

@ -143,6 +143,10 @@
#[macro_use]
mod macros;
// Allocator trait and helper struct definitions
pub mod allocator;
// Heaps provided for low-level allocation strategies
pub mod heap;

View file

@ -8,11 +8,11 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::ptr::Unique;
use allocator::{Alloc, Layout};
use core::ptr::{self, Unique};
use core::mem;
use core::slice;
use heap;
use super::oom;
use heap::{HeapAlloc};
use super::boxed::Box;
use core::ops::Drop;
use core::cmp;
@ -45,17 +45,16 @@
/// field. This allows zero-sized types to not be special-cased by consumers of
/// this type.
#[allow(missing_debug_implementations)]
pub struct RawVec<T> {
pub struct RawVec<T, A: Alloc = HeapAlloc> {
ptr: Unique<T>,
cap: usize,
a: A,
}
impl<T> RawVec<T> {
/// Creates the biggest possible RawVec without allocating. If T has positive
/// size, then this makes a RawVec with capacity 0. If T has 0 size, then it
/// it makes a RawVec with capacity `usize::MAX`. Useful for implementing
/// delayed allocation.
pub fn new() -> Self {
impl<T, A: Alloc> RawVec<T, A> {
/// Like `new` but parameterized over the choice of allocator for
/// the returned RawVec.
pub fn new_in(a: A) -> Self {
// !0 is usize::MAX. This branch should be stripped at compile time.
let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 };
@ -63,13 +62,71 @@ pub fn new() -> Self {
RawVec {
ptr: Unique::empty(),
cap: cap,
a: a,
}
}
/// Creates a RawVec with exactly the capacity and alignment requirements
/// for a `[T; cap]`. This is equivalent to calling RawVec::new when `cap` is 0
/// or T is zero-sized. Note that if `T` is zero-sized this means you will *not*
/// get a RawVec with the requested capacity!
/// Like `with_capacity` but parameterized over the choice of
/// allocator for the returned RawVec.
#[inline]
pub fn with_capacity_in(cap: usize, a: A) -> Self {
RawVec::allocate_in(cap, false, a)
}
/// Like `with_capacity_zeroed` but parameterized over the choice
/// of allocator for the returned RawVec.
#[inline]
pub fn with_capacity_zeroed_in(cap: usize, a: A) -> Self {
RawVec::allocate_in(cap, true, a)
}
fn allocate_in(cap: usize, zeroed: bool, mut a: A) -> Self {
unsafe {
let elem_size = mem::size_of::<T>();
let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow");
alloc_guard(alloc_size);
// handles ZSTs and `cap = 0` alike
let ptr = if alloc_size == 0 {
mem::align_of::<T>() as *mut u8
} else {
let align = mem::align_of::<T>();
let result = if zeroed {
a.alloc_zeroed(Layout::from_size_align(alloc_size, align).unwrap())
} else {
a.alloc(Layout::from_size_align(alloc_size, align).unwrap())
};
match result {
Ok(ptr) => ptr,
Err(err) => a.oom(err),
}
};
RawVec {
ptr: Unique::new(ptr as *mut _),
cap: cap,
a: a,
}
}
}
}
impl<T> RawVec<T, HeapAlloc> {
/// Creates the biggest possible RawVec (on the system heap)
/// without allocating. If T has positive size, then this makes a
/// RawVec with capacity 0. If T has 0 size, then it it makes a
/// RawVec with capacity `usize::MAX`. Useful for implementing
/// delayed allocation.
pub fn new() -> Self {
Self::new_in(HeapAlloc)
}
/// Creates a RawVec (on the system heap) with exactly the
/// capacity and alignment requirements for a `[T; cap]`. This is
/// equivalent to calling RawVec::new when `cap` is 0 or T is
/// zero-sized. Note that if `T` is zero-sized this means you will
/// *not* get a RawVec with the requested capacity!
///
/// # Panics
///
@ -82,56 +139,46 @@ pub fn new() -> Self {
/// Aborts on OOM
#[inline]
pub fn with_capacity(cap: usize) -> Self {
RawVec::allocate(cap, false)
RawVec::allocate_in(cap, false, HeapAlloc)
}
/// Like `with_capacity` but guarantees the buffer is zeroed.
#[inline]
pub fn with_capacity_zeroed(cap: usize) -> Self {
RawVec::allocate(cap, true)
RawVec::allocate_in(cap, true, HeapAlloc)
}
}
fn allocate(cap: usize, zeroed: bool) -> Self {
unsafe {
let elem_size = mem::size_of::<T>();
let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow");
alloc_guard(alloc_size);
// handles ZSTs and `cap = 0` alike
let ptr = if alloc_size == 0 {
mem::align_of::<T>() as *mut u8
} else {
let align = mem::align_of::<T>();
let ptr = if zeroed {
heap::allocate_zeroed(alloc_size, align)
} else {
heap::allocate(alloc_size, align)
};
if ptr.is_null() {
oom()
}
ptr
};
RawVec {
ptr: Unique::new(ptr as *mut _),
cap: cap,
}
}
}
/// Reconstitutes a RawVec from a pointer and capacity.
impl<T, A: Alloc> RawVec<T, A> {
/// Reconstitutes a RawVec from a pointer, capacity, and allocator.
///
/// # Undefined Behavior
///
/// The ptr must be allocated, and with the given capacity. The
/// The ptr must be allocated (via the given allocator `a`), and with the given capacity. The
/// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
/// If the ptr and capacity come from a RawVec created via `a`, then this is guaranteed.
pub unsafe fn from_raw_parts_in(ptr: *mut T, cap: usize, a: A) -> Self {
RawVec {
ptr: Unique::new(ptr),
cap: cap,
a: a,
}
}
}
impl<T> RawVec<T, HeapAlloc> {
/// Reconstitutes a RawVec from a pointer, capacity.
///
/// # Undefined Behavior
///
/// The ptr must be allocated (on the system heap), and with the given capacity. The
/// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
/// If the ptr and capacity come from a RawVec, then this is guaranteed.
pub unsafe fn from_raw_parts(ptr: *mut T, cap: usize) -> Self {
RawVec {
ptr: Unique::new(ptr),
cap: cap,
a: HeapAlloc,
}
}
@ -145,7 +192,7 @@ pub fn from_box(mut slice: Box<[T]>) -> Self {
}
}
impl<T> RawVec<T> {
impl<T, A: Alloc> RawVec<T, A> {
/// Gets a raw pointer to the start of the allocation. Note that this is
/// Unique::empty() if `cap = 0` or T is zero-sized. In the former case, you must
/// be careful.
@ -165,6 +212,16 @@ pub fn cap(&self) -> usize {
}
}
/// Returns a shared reference to the allocator backing this RawVec.
pub fn alloc(&self) -> &A {
&self.a
}
/// Returns a mutable reference to the allocator backing this RawVec.
pub fn alloc_mut(&mut self) -> &mut A {
&mut self.a
}
/// Doubles the size of the type's backing allocation. This is common enough
/// to want to do that it's easiest to just have a dedicated method. Slightly
/// more efficient logic can be provided for this than the general case.
@ -215,32 +272,28 @@ pub fn double(&mut self) {
// 0, getting to here necessarily means the RawVec is overfull.
assert!(elem_size != 0, "capacity overflow");
let align = mem::align_of::<T>();
let (new_cap, ptr) = if self.cap == 0 {
let (new_cap, ptr_res) = if self.cap == 0 {
// skip to 4 because tiny Vec's are dumb; but not if that would cause overflow
let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
let ptr = heap::allocate(new_cap * elem_size, align);
(new_cap, ptr)
let ptr_res = self.a.alloc_array::<T>(new_cap);
(new_cap, ptr_res)
} else {
// Since we guarantee that we never allocate more than isize::MAX bytes,
// `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow
let new_cap = 2 * self.cap;
let new_alloc_size = new_cap * elem_size;
alloc_guard(new_alloc_size);
let ptr = heap::reallocate(self.ptr() as *mut _,
self.cap * elem_size,
new_alloc_size,
align);
(new_cap, ptr)
let ptr_res = self.a.realloc_array(self.ptr, self.cap, new_cap);
(new_cap, ptr_res)
};
// If allocate or reallocate fail, we'll get `null` back
if ptr.is_null() {
oom()
}
let uniq = match ptr_res {
Err(err) => self.a.oom(err),
Ok(uniq) => uniq,
};
self.ptr = Unique::new(ptr as *mut _);
self.ptr = uniq;
self.cap = new_cap;
}
}
@ -262,7 +315,6 @@ pub fn double(&mut self) {
pub fn double_in_place(&mut self) -> bool {
unsafe {
let elem_size = mem::size_of::<T>();
let align = mem::align_of::<T>();
// since we set the capacity to usize::MAX when elem_size is
// 0, getting to here necessarily means the RawVec is overfull.
@ -274,15 +326,20 @@ pub fn double_in_place(&mut self) -> bool {
let new_alloc_size = new_cap * elem_size;
alloc_guard(new_alloc_size);
let size = heap::reallocate_inplace(self.ptr() as *mut _,
self.cap * elem_size,
new_alloc_size,
align);
if size >= new_alloc_size {
// We can't directly divide `size`.
self.cap = new_cap;
let ptr = self.ptr() as *mut _;
let old_layout = Layout::new::<T>().repeat(self.cap).unwrap().0;
let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
match self.a.grow_in_place(ptr, old_layout, new_layout) {
Ok(_) => {
// We can't directly divide `size`.
self.cap = new_cap;
true
}
Err(_) => {
false
}
}
size >= new_alloc_size
}
}
@ -309,7 +366,6 @@ pub fn double_in_place(&mut self) -> bool {
pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
unsafe {
let elem_size = mem::size_of::<T>();
let align = mem::align_of::<T>();
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
@ -327,21 +383,19 @@ pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
alloc_guard(new_alloc_size);
let ptr = if self.cap == 0 {
heap::allocate(new_alloc_size, align)
let result = if self.cap == 0 {
self.a.alloc_array::<T>(new_cap)
} else {
heap::reallocate(self.ptr() as *mut _,
self.cap * elem_size,
new_alloc_size,
align)
self.a.realloc_array(self.ptr, self.cap, new_cap)
};
// If allocate or reallocate fail, we'll get `null` back
if ptr.is_null() {
oom()
}
let uniq = match result {
Err(err) => self.a.oom(err),
Ok(uniq) => uniq,
};
self.ptr = Unique::new(ptr as *mut _);
self.ptr = uniq;
self.cap = new_cap;
}
}
@ -408,9 +462,6 @@ fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize) -> (usize
/// ```
pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
unsafe {
let elem_size = mem::size_of::<T>();
let align = mem::align_of::<T>();
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
@ -426,21 +477,18 @@ pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
// FIXME: may crash and burn on over-reserve
alloc_guard(new_alloc_size);
let ptr = if self.cap == 0 {
heap::allocate(new_alloc_size, align)
let result = if self.cap == 0 {
self.a.alloc_array::<T>(new_cap)
} else {
heap::reallocate(self.ptr() as *mut _,
self.cap * elem_size,
new_alloc_size,
align)
self.a.realloc_array(self.ptr, self.cap, new_cap)
};
// If allocate or reallocate fail, we'll get `null` back
if ptr.is_null() {
oom()
}
let uniq = match result {
Err(err) => self.a.oom(err),
Ok(uniq) => uniq,
};
self.ptr = Unique::new(ptr as *mut _);
self.ptr = uniq;
self.cap = new_cap;
}
}
@ -464,9 +512,6 @@ pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
/// `isize::MAX` bytes.
pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> bool {
unsafe {
let elem_size = mem::size_of::<T>();
let align = mem::align_of::<T>();
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
@ -479,18 +524,26 @@ pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) ->
return false;
}
let (_, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap);
let (new_cap, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap);
// FIXME: may crash and burn on over-reserve
alloc_guard(new_alloc_size);
let size = heap::reallocate_inplace(self.ptr() as *mut _,
self.cap * elem_size,
new_alloc_size,
align);
if size >= new_alloc_size {
self.cap = new_alloc_size / elem_size;
// Here, `cap < used_cap + needed_extra_cap <= new_cap`
// (regardless of whether `self.cap - used_cap` wrapped).
// Therefore we can safely call grow_in_place.
let ptr = self.ptr() as *mut _;
let old_layout = Layout::new::<T>().repeat(self.cap).unwrap().0;
let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
match self.a.grow_in_place(ptr, old_layout, new_layout) {
Ok(_) => {
self.cap = new_cap;
true
}
Err(_) => {
false
}
}
size >= new_alloc_size
}
}
@ -506,7 +559,6 @@ pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) ->
/// Aborts on OOM.
pub fn shrink_to_fit(&mut self, amount: usize) {
let elem_size = mem::size_of::<T>();
let align = mem::align_of::<T>();
// Set the `cap` because they might be about to promote to a `Box<[T]>`
if elem_size == 0 {
@ -518,24 +570,30 @@ pub fn shrink_to_fit(&mut self, amount: usize) {
assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
if amount == 0 {
mem::replace(self, RawVec::new());
// We want to create a new zero-length vector within the
// same allocator. We use ptr::write to avoid an
// erroneous attempt to drop the contents, and we use
// ptr::read to sidestep condition against destructuring
// types that implement Drop.
unsafe {
let a = ptr::read(&self.a as *const A);
self.dealloc_buffer();
ptr::write(self, RawVec::new_in(a));
}
} else if self.cap != amount {
unsafe {
// Overflow check is unnecessary as the vector is already at
// least this large.
let ptr = heap::reallocate(self.ptr() as *mut _,
self.cap * elem_size,
amount * elem_size,
align);
if ptr.is_null() {
oom()
match self.a.realloc_array(self.ptr, self.cap, amount) {
Err(err) => self.a.oom(err),
Ok(uniq) => self.ptr = uniq,
}
self.ptr = Unique::new(ptr as *mut _);
}
self.cap = amount;
}
}
}
impl<T> RawVec<T, HeapAlloc> {
/// Converts the entire buffer into `Box<[T]>`.
///
/// While it is not *strictly* Undefined Behavior to call
@ -553,21 +611,25 @@ pub unsafe fn into_box(self) -> Box<[T]> {
}
}
unsafe impl<#[may_dangle] T> Drop for RawVec<T> {
impl<T, A: Alloc> RawVec<T, A> {
/// Frees the memory owned by the RawVec *without* trying to Drop its contents.
fn drop(&mut self) {
pub unsafe fn dealloc_buffer(&mut self) {
let elem_size = mem::size_of::<T>();
if elem_size != 0 && self.cap != 0 {
let align = mem::align_of::<T>();
let num_bytes = elem_size * self.cap;
unsafe {
heap::deallocate(self.ptr() as *mut u8, num_bytes, align);
}
let ptr = self.ptr() as *mut u8;
let layout = Layout::new::<T>().repeat(self.cap).unwrap().0;
self.a.dealloc(ptr, layout);
}
}
}
unsafe impl<#[may_dangle] T, A: Alloc> Drop for RawVec<T, A> {
/// Frees the memory owned by the RawVec *without* trying to Drop its contents.
fn drop(&mut self) {
unsafe { self.dealloc_buffer(); }
}
}
// We need to guarantee the following:
@ -592,6 +654,46 @@ fn alloc_guard(alloc_size: usize) {
mod tests {
use super::*;
#[test]
fn allocator_param() {
use allocator::{Alloc, AllocErr};
// Writing a test of integration between third-party
// allocators and RawVec is a little tricky because the RawVec
// API does not expose fallible allocation methods, so we
// cannot check what happens when allocator is exhausted
// (beyond detecting a panic).
//
// Instead, this just checks that the RawVec methods do at
// least go through the Allocator API when it reserves
// storage.
// A dumb allocator that consumes a fixed amount of fuel
// before allocation attempts start failing.
struct BoundedAlloc { fuel: usize }
unsafe impl Alloc for BoundedAlloc {
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
let size = layout.size();
if size > self.fuel {
return Err(AllocErr::Unsupported { details: "fuel exhausted" });
}
match HeapAlloc.alloc(layout) {
ok @ Ok(_) => { self.fuel -= size; ok }
err @ Err(_) => err,
}
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
HeapAlloc.dealloc(ptr, layout)
}
}
let a = BoundedAlloc { fuel: 500 };
let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
assert_eq!(v.a.fuel, 450);
v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
assert_eq!(v.a.fuel, 250);
}
#[test]
fn reserve_does_not_overallocate() {
{
@ -624,4 +726,5 @@ fn reserve_does_not_overallocate() {
}
}
}

View file

@ -51,6 +51,7 @@
// coherence challenge (e.g., specialization, neg impls, etc) we can
// reconsider what crate these items belong in.
use alloc::allocator;
use any::TypeId;
use cell;
use char;
@ -221,6 +222,24 @@ impl Error for ! {
fn description(&self) -> &str { *self }
}
#[unstable(feature = "allocator_api",
reason = "the precise API and guarantees it provides may be tweaked.",
issue = "27700")]
impl Error for allocator::AllocErr {
fn description(&self) -> &str {
allocator::AllocErr::description(self)
}
}
#[unstable(feature = "allocator_api",
reason = "the precise API and guarantees it provides may be tweaked.",
issue = "27700")]
impl Error for allocator::CannotReallocInPlace {
fn description(&self) -> &str {
allocator::CannotReallocInPlace::description(self)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Error for str::ParseBoolError {
fn description(&self) -> &str { "failed to parse bool" }

View file

@ -245,6 +245,7 @@
// std is implemented with unstable features, many of which are internal
// compiler details that will never be stable
#![feature(alloc)]
#![feature(allocator_api)]
#![feature(allow_internal_unstable)]
#![feature(asm)]
#![feature(associated_consts)]

View file

@ -9,6 +9,7 @@
// except according to those terms.
// this used to cause exponential code-size blowup during LLVM passes.
// ignore-test FIXME #41696
// min-llvm-version 3.9
#![feature(test)]

View file

@ -8,6 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-emscripten missing rust_begin_unwind
#![feature(lang_items, start, libc, alloc)]
#![no_std]