Auto merge of #122053 - erikdesjardins:alloca, r=nikic
Some checks failed
CI / auto - ${{ matrix.name }} (map[], dist-various-2, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / bors build finished (push) Blocked by required conditions
CI / try - ${{ matrix.name }} (map[CODEGEN_BACKENDS:llvm,cranelift], dist-x86_64-linux, ubuntu-20.04-16core-64gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], x86_64-gnu-nopt, ubuntu-20.04-4core-16gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], x86_64-gnu-distcheck, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], x86_64-gnu-debug, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], x86_64-gnu-aux, ubuntu-20.04-4core-16gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], x86_64-gnu, ubuntu-20.04-4core-16gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], test-various, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], mingw-check, ubuntu-20.04-4core-16gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], i686-gnu-nopt, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], i686-gnu, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-x86_64-netbsd, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-x86_64-illumos, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-x86_64-freebsd, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-various-1, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-s390x-linux, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-riscv64-linux, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-powerpc64le-linux, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-powerpc64-linux, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-powerpc-linux, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-ohos, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-loongarch64-linux, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-i686-linux, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-i586-gnu-i586-i686-musl, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-armv7-linux, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-armhf-linux, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-arm-linux, ubuntu-20.04-16core-64gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], dist-android, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], armhf-gnu, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[], arm-android, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[RUST_CONFIGURE_ARGS:--build=x86_64-pc-windows-msvc --enable-profiler SCRIPT:make ci-msvc], x86_64-msvc, windows-2019-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[RUST_CONFIGURE_ARGS:--build=x86_64-pc-windows-msvc --enable-extended --enable-profiler SCRIPT:python x.py dist bootstrap --include-default-paths], dist-x86_64-msvc-alt, windows-2019-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[RUST_CONFIGURE_ARGS:--build=i686-pc-windows-msvc SCRIPT:make ci-msvc], i686-msvc, windows-2019-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[RUST_BACKTRACE:1], x86_64-gnu-llvm-18, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[RUST_BACKTRACE:1], x86_64-gnu-llvm-17, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[MACOSX_DEPLOYMENT_TARGET:11 MACOSX_STD_DEPLOYMENT_TARGET:11 NO_DEBUG_ASSERTIONS:1 NO_LLVM_ASSERTIONS:1 NO_OVERFLOW_CHECKS:1 RUSTC_RETRY_LINKER_ON_SEGFAULT:1 RUST_CONFIGURE_ARGS:--enable-sanitizers --enable-profiler --set … (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[MACOSX_DEPLOYMENT_TARGET:10.12 NO_DEBUG_ASSERTIONS:1 NO_LLVM_ASSERTIONS:1 NO_OVERFLOW_CHECKS:1 RUSTC_RETRY_LINKER_ON_SEGFAULT:1 RUST_CONFIGURE_ARGS:--enable-sanitizers --enable-profiler --set rust.jemalloc SCRIPT:./x.py d… (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[MACOSX_DEPLOYMENT_TARGET:10.12 MACOSX_STD_DEPLOYMENT_TARGET:10.12 NO_DEBUG_ASSERTIONS:1 NO_LLVM_ASSERTIONS:1 NO_OVERFLOW_CHECKS:1 RUSTC_RETRY_LINKER_ON_SEGFAULT:1 RUST_CONFIGURE_ARGS:--build=x86_64-apple-darwin --enable-s… (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[DIST_REQUIRE_ALL_TOOLS:1 RUST_CONFIGURE_ARGS:--build=x86_64-pc-windows-msvc --host=x86_64-pc-windows-msvc --target=x86_64-pc-windows-msvc --enable-full-tools --enable-profiler --set rust.codegen-units=1 SCRIPT:python x.py… (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[DIST_REQUIRE_ALL_TOOLS:1 RUST_CONFIGURE_ARGS:--build=x86_64-pc-windows-msvc --host=aarch64-pc-windows-msvc --enable-full-tools --enable-profiler SCRIPT:python x.py dist bootstrap --include-default-paths], dist-aarch64-msv… (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[DIST_REQUIRE_ALL_TOOLS:1 RUST_CONFIGURE_ARGS:--build=i686-pc-windows-msvc --host=i686-pc-windows-msvc --target=i686-pc-windows-msvc,i586-pc-windows-msvc --enable-full-tools --enable-profiler SCRIPT:python x.py dist bootst… (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[DIST_REQUIRE_ALL_TOOLS:1 MACOSX_DEPLOYMENT_TARGET:11 MACOSX_STD_DEPLOYMENT_TARGET:11 NO_DEBUG_ASSERTIONS:1 NO_LLVM_ASSERTIONS:1 NO_OVERFLOW_CHECKS:1 RUSTC_RETRY_LINKER_ON_SEGFAULT:1 RUST_CONFIGURE_ARGS:--enable-full-tools… (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[DEPLOY_TOOLSTATES_JSON:toolstates-windows.json HOST_TARGET:x86_64-pc-windows-msvc RUST_CONFIGURE_ARGS:--build=x86_64-pc-windows-msvc --enable-lld --save-toolstates=/tmp/toolstate/toolstates.json SCRIPT:python x.py --stage… (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[DEPLOY_TOOLSTATES_JSON:toolstates-linux.json], x86_64-gnu-tools, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[CUSTOM_MINGW:1 NO_DOWNLOAD_CI_LLVM:1 RUST_CONFIGURE_ARGS:--build=x86_64-pc-windows-gnu --enable-profiler SCRIPT:make ci-mingw], x86_64-mingw, windows-2019-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[CUSTOM_MINGW:1 NO_DOWNLOAD_CI_LLVM:1 RUST_CONFIGURE_ARGS:--build=i686-pc-windows-gnu SCRIPT:make ci-mingw], i686-mingw, windows-2019-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[CUSTOM_MINGW:1 DIST_REQUIRE_ALL_TOOLS:1 NO_DOWNLOAD_CI_LLVM:1 RUST_CONFIGURE_ARGS:--build=x86_64-pc-windows-gnu --enable-full-tools --enable-profiler SCRIPT:python x.py dist bootstrap --include-default-paths], dist-x86_64… (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[CUSTOM_MINGW:1 DIST_REQUIRE_ALL_TOOLS:1 NO_DOWNLOAD_CI_LLVM:1 RUST_CONFIGURE_ARGS:--build=i686-pc-windows-gnu --enable-full-tools --enable-profiler SCRIPT:python x.py dist bootstrap --include-default-paths], dist-i686-min… (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[CODEGEN_BACKENDS:llvm,cranelift], dist-x86_64-musl, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[CODEGEN_BACKENDS:llvm,cranelift], dist-x86_64-linux, ubuntu-20.04-16core-64gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[CODEGEN_BACKENDS:llvm,cranelift], dist-aarch64-linux, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[CODEGEN_BACKENDS:llvm,cranelift IMAGE:dist-x86_64-linux], dist-x86_64-linux-alt, ubuntu-20.04-16core-64gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[CODEGEN_BACKENDS:llvm,cranelift DIST_REQUIRE_ALL_TOOLS:1 MACOSX_DEPLOYMENT_TARGET:10.12 NO_DEBUG_ASSERTIONS:1 NO_LLVM_ASSERTIONS:1 NO_OVERFLOW_CHECKS:1 RUSTC_RETRY_LINKER_ON_SEGFAULT:1 RUST_CONFIGURE_ARGS:--enable-full-to… (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[CI_ONLY_WHEN_CHANNEL:nightly], x86_64-gnu-integration, ubuntu-20.04-8core-32gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (map[CI_ONLY_WHEN_CHANNEL:nightly IMAGE:x86_64-gnu RUST_CI_OVERRIDE_RELEASE_CHANNEL:stable], x86_64-gnu-stable, ubuntu-20.04-4core-16gb) (push) Waiting to run
CI / auto - ${{ matrix.name }} (aarch64-gnu, [self-hosted ARM64 linux]) (push) Waiting to run
CI / Calculate job matrix (push) Failing after 10s
CI / master (push) Has been skipped
CI / PR - ${{ matrix.name }} (push) Has been skipped

Stop using LLVM struct types for alloca

The alloca type has no semantic meaning, only the size (and alignment, but we specify it explicitly) matter. Using `[N x i8]` is a more direct way to specify that we want `N` bytes, and avoids relying on LLVM's struct layout. It is likely that a future LLVM version will change to an untyped alloca representation.

Split out from #121577.

r? `@ghost`
This commit is contained in:
bors 2024-04-24 03:00:44 +00:00
commit 29a56a3b1c
36 changed files with 234 additions and 226 deletions

View File

@ -898,26 +898,20 @@ fn checked_binop(
self.gcc_checked_binop(oop, typ, lhs, rhs)
}
fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
// FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
// Ideally, we shouldn't need to do this check.
let aligned_type = if ty == self.cx.u128_type || ty == self.cx.i128_type {
ty
} else {
ty.get_aligned(align.bytes())
};
fn alloca(&mut self, size: Size, align: Align) -> RValue<'gcc> {
let ty = self.cx.type_array(self.cx.type_i8(), size.bytes()).get_aligned(align.bytes());
// TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
self.stack_var_count.set(self.stack_var_count.get() + 1);
self.current_func()
.new_local(
self.location,
aligned_type,
ty,
&format!("stack_var_{}", self.stack_var_count.get()),
)
.get_address(self.location)
}
fn byte_array_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
fn dynamic_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
unimplemented!();
}

View File

@ -531,7 +531,7 @@ fn store(
// We instead thus allocate some scratch space...
let scratch_size = cast.size(bx);
let scratch_align = cast.align(bx);
let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align);
let llscratch = bx.alloca(scratch_size, scratch_align);
bx.lifetime_start(llscratch, scratch_size);
// ... where we first store the value...

View File

@ -18,7 +18,7 @@
use rustc_middle::ty::layout::HasTyCtxt;
use rustc_middle::ty::{self, Ty};
use rustc_span::{sym, Span, Symbol};
use rustc_target::abi::Align;
use rustc_target::abi::{Align, Size};
use crate::builder::Builder;
#[cfg(not(feature = "master"))]
@ -558,7 +558,7 @@ macro_rules! arith_binary {
let ze = bx.zext(result, bx.type_ix(expected_bytes * 8));
// Convert the integer to a byte array
let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
bx.store(ze, ptr, Align::ONE);
let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty));

View File

@ -227,7 +227,7 @@ fn store(
// when passed by value, making it larger.
let copy_bytes = cmp::min(scratch_size.bytes(), self.layout.size.bytes());
// Allocate some scratch space...
let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
let llscratch = bx.alloca(scratch_size, scratch_align);
bx.lifetime_start(llscratch, scratch_size);
// ...store the value...
bx.store(val, llscratch, scratch_align);

View File

@ -468,9 +468,10 @@ fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self
val
}
fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
fn alloca(&mut self, size: Size, align: Align) -> &'ll Value {
let mut bx = Builder::with_cx(self.cx);
bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
let ty = self.cx().type_array(self.cx().type_i8(), size.bytes());
unsafe {
let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
@ -478,10 +479,10 @@ fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
}
}
fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value {
fn dynamic_alloca(&mut self, size: &'ll Value, align: Align) -> &'ll Value {
unsafe {
let alloca =
llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED);
llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), size, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca
}

View File

@ -18,7 +18,7 @@
use rustc_middle::ty::{self, GenericArgsRef, Ty};
use rustc_middle::{bug, span_bug};
use rustc_span::{sym, Span, Symbol};
use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size};
use rustc_target::spec::{HasTargetSpec, PanicStrategy};
use std::cmp::Ordering;
@ -649,8 +649,9 @@ fn codegen_msvc_try<'ll>(
// }
//
// More information can be found in libstd's seh.rs implementation.
let ptr_size = bx.tcx().data_layout.pointer_size;
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let slot = bx.alloca(bx.type_ptr(), ptr_align);
let slot = bx.alloca(ptr_size, ptr_align);
let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
@ -920,15 +921,14 @@ fn codegen_emcc_try<'ll>(
// We need to pass two values to catch_func (ptr and is_rust_panic), so
// create an alloca and pass a pointer to that.
let ptr_size = bx.tcx().data_layout.pointer_size;
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let i8_align = bx.tcx().data_layout.i8_align.abi;
let catch_data_type = bx.type_struct(&[bx.type_ptr(), bx.type_bool()], false);
let catch_data = bx.alloca(catch_data_type, ptr_align);
let catch_data_0 =
bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
bx.store(ptr, catch_data_0, ptr_align);
let catch_data_1 =
bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
// Required in order for there to be no padding between the fields.
assert!(i8_align <= ptr_align);
let catch_data = bx.alloca(2 * ptr_size, ptr_align);
bx.store(ptr, catch_data, ptr_align);
let catch_data_1 = bx.inbounds_ptradd(catch_data, bx.const_usize(ptr_size.bytes()));
bx.store(is_rust_panic, catch_data_1, i8_align);
let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
@ -1374,7 +1374,7 @@ macro_rules! require_simd {
let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
// Convert the integer to a byte array
let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
bx.store(ze, ptr, Align::ONE);
let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
return Ok(bx.load(array_ty, ptr, Align::ONE));

View File

@ -508,7 +508,7 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let ptr_size = bx.tcx().data_layout.pointer_size;
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let arg_argc = bx.const_int(cx.type_isize(), 2);
let arg_argv = bx.alloca(cx.type_array(cx.type_ptr(), 2), ptr_align);
let arg_argv = bx.alloca(2 * ptr_size, ptr_align);
bx.store(param_handle, arg_argv, ptr_align);
let arg_argv_el1 = bx.inbounds_ptradd(arg_argv, bx.const_usize(ptr_size.bytes()));
bx.store(param_system_table, arg_argv_el1, ptr_align);

View File

@ -1517,7 +1517,7 @@ fn codegen_argument(
// when passed by value, making it larger.
let copy_bytes = cmp::min(scratch_size.bytes(), arg.layout.size.bytes());
// Allocate some scratch space...
let llscratch = bx.alloca(bx.cast_backend_type(cast), scratch_align);
let llscratch = bx.alloca(scratch_size, scratch_align);
bx.lifetime_start(llscratch, scratch_size);
// ...memcpy the value...
bx.memcpy(

View File

@ -327,7 +327,7 @@ pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
let llfield_ty = bx.cx().backend_type(field);
// Can't bitcast an aggregate, so round trip through memory.
let llptr = bx.alloca(llfield_ty, field.align.abi);
let llptr = bx.alloca(field.size, field.align.abi);
bx.store(*llval, llptr, field.align.abi);
*llval = bx.load(llfield_ty, llptr, field.align.abi);
}
@ -470,7 +470,7 @@ pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
let align_minus_1 = bx.sub(align, one);
let size_extra = bx.add(size, align_minus_1);
let min_align = Align::ONE;
let alloca = bx.byte_array_alloca(size_extra, min_align);
let alloca = bx.dynamic_alloca(size_extra, min_align);
let address = bx.ptrtoint(alloca, bx.type_isize());
let neg_address = bx.neg(address);
let offset = bx.and(neg_address, align_minus_1);

View File

@ -81,7 +81,7 @@ pub fn alloca_aligned<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
align: Align,
) -> Self {
assert!(layout.is_sized(), "tried to statically allocate unsized place");
let tmp = bx.alloca(bx.cx().backend_type(layout), align);
let tmp = bx.alloca(layout.size, align);
Self::new_sized_aligned(tmp, layout, align)
}

View File

@ -144,8 +144,8 @@ fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::V
}
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
fn byte_array_alloca(&mut self, len: Self::Value, align: Align) -> Self::Value;
fn alloca(&mut self, size: Size, align: Align) -> Self::Value;
fn dynamic_alloca(&mut self, size: Self::Value, align: Align) -> Self::Value;
fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;

View File

@ -37,23 +37,9 @@ pub fn array_char(f: fn(*const char)) {
f(&b as *const _);
f(&c as *const _);
// Any type of local array variable leads to stack protection with the
// "strong" heuristic. The 'basic' heuristic only adds stack protection to
// functions with local array variables of a byte-sized type, however. Since
// 'char' is 4 bytes in Rust, this function is not protected by the 'basic'
// heuristic
//
// (This test *also* takes the address of the local stack variables. We
// cannot know that this isn't what triggers the `strong` heuristic.
// However, the test strategy of passing the address of a stack array to an
// external function is sufficient to trigger the `basic` heuristic (see
// test `array_u8_large()`). Since the `basic` heuristic only checks for the
// presence of stack-local array variables, we can be confident that this
// test also captures this part of the `strong` heuristic specification.)
// all: __security_check_cookie
// strong: __security_check_cookie
// basic-NOT: __security_check_cookie
// basic: __security_check_cookie
// none-NOT: __security_check_cookie
// missing-NOT: __security_check_cookie
}
@ -231,8 +217,8 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
// Even though the local variable conceptually doesn't have its address
// taken, it's so large that the "move" is implemented with a reference to a
// stack-local variable in the ABI. Consequently, this function *is*
// protected by the `strong` heuristic. This is also the case for
// rvalue-references in C++, regardless of struct size:
// protected. This is also the case for rvalue-references in C++,
// regardless of struct size:
// ```
// cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
// #include <cstdint>
@ -246,7 +232,7 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
// all: __security_check_cookie
// strong: __security_check_cookie
// basic-NOT: __security_check_cookie
// basic: __security_check_cookie
// none-NOT: __security_check_cookie
// missing-NOT: __security_check_cookie
}
@ -259,9 +245,9 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
// A new instance of `Gigastruct` is passed to `f()`, without any apparent
// connection to this stack frame. Still, since instances of `Gigastruct`
// are sufficiently large, it is allocated in the caller stack frame and
// passed as a pointer. As such, this function is *also* protected by the
// `strong` heuristic, just like `local_large_var_moved`. This is also the
// case for pass-by-value of sufficiently large structs in C++:
// passed as a pointer. As such, this function is *also* protected, just
// like `local_large_var_moved`. This is also the case for pass-by-value
// of sufficiently large structs in C++:
// ```
// cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
// #include <cstdint>
@ -276,7 +262,7 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
// all: __security_check_cookie
// strong: __security_check_cookie
// basic-NOT: __security_check_cookie
// basic: __security_check_cookie
// none-NOT: __security_check_cookie
// missing-NOT: __security_check_cookie
}

View File

@ -37,23 +37,9 @@ pub fn array_char(f: fn(*const char)) {
f(&b as *const _);
f(&c as *const _);
// Any type of local array variable leads to stack protection with the
// "strong" heuristic. The 'basic' heuristic only adds stack protection to
// functions with local array variables of a byte-sized type, however. Since
// 'char' is 4 bytes in Rust, this function is not protected by the 'basic'
// heuristic
//
// (This test *also* takes the address of the local stack variables. We
// cannot know that this isn't what triggers the `strong` heuristic.
// However, the test strategy of passing the address of a stack array to an
// external function is sufficient to trigger the `basic` heuristic (see
// test `array_u8_large()`). Since the `basic` heuristic only checks for the
// presence of stack-local array variables, we can be confident that this
// test also captures this part of the `strong` heuristic specification.)
// all: __security_check_cookie
// strong: __security_check_cookie
// basic-NOT: __security_check_cookie
// basic: __security_check_cookie
// none-NOT: __security_check_cookie
// missing-NOT: __security_check_cookie
}
@ -239,8 +225,8 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
// Even though the local variable conceptually doesn't have its address
// taken, it's so large that the "move" is implemented with a reference to a
// stack-local variable in the ABI. Consequently, this function *is*
// protected by the `strong` heuristic. This is also the case for
// rvalue-references in C++, regardless of struct size:
// protected. This is also the case for rvalue-references in C++,
// regardless of struct size:
// ```
// cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
// #include <cstdint>
@ -254,7 +240,7 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
// all: __security_check_cookie
// strong: __security_check_cookie
// basic-NOT: __security_check_cookie
// basic: __security_check_cookie
// none-NOT: __security_check_cookie
// missing-NOT: __security_check_cookie
}
@ -267,9 +253,9 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
// A new instance of `Gigastruct` is passed to `f()`, without any apparent
// connection to this stack frame. Still, since instances of `Gigastruct`
// are sufficiently large, it is allocated in the caller stack frame and
// passed as a pointer. As such, this function is *also* protected by the
// `strong` heuristic, just like `local_large_var_moved`. This is also the
// case for pass-by-value of sufficiently large structs in C++:
// passed as a pointer. As such, this function is *also* protected, just
// like `local_large_var_moved`. This is also the case for pass-by-value
// of sufficiently large structs in C++:
// ```
// cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
// #include <cstdint>
@ -284,7 +270,7 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
// all: __security_check_cookie
// strong: __security_check_cookie
// basic-NOT: __security_check_cookie
// basic: __security_check_cookie
// none-NOT: __security_check_cookie
// missing-NOT: __security_check_cookie
}

View File

@ -11,6 +11,11 @@
//@ compile-flags: -C opt-level=2 -Z merge-functions=disabled
//@ min-llvm-version: 17.0.2
// NOTE: the heuristics for stack smash protection inappropriately rely on types in LLVM IR,
// despite those types having no semantic meaning. This means that the `basic` and `strong`
// settings do not behave in a coherent way. This is a known issue in LLVM.
// See comments on https://github.com/rust-lang/rust/issues/114903.
#![crate_type = "lib"]
#![allow(incomplete_features)]
@ -39,23 +44,9 @@ pub fn array_char(f: fn(*const char)) {
f(&b as *const _);
f(&c as *const _);
// Any type of local array variable leads to stack protection with the
// "strong" heuristic. The 'basic' heuristic only adds stack protection to
// functions with local array variables of a byte-sized type, however. Since
// 'char' is 4 bytes in Rust, this function is not protected by the 'basic'
// heuristic
//
// (This test *also* takes the address of the local stack variables. We
// cannot know that this isn't what triggers the `strong` heuristic.
// However, the test strategy of passing the address of a stack array to an
// external function is sufficient to trigger the `basic` heuristic (see
// test `array_u8_large()`). Since the `basic` heuristic only checks for the
// presence of stack-local array variables, we can be confident that this
// test also captures this part of the `strong` heuristic specification.)
// all: __stack_chk_fail
// strong: __stack_chk_fail
// basic-NOT: __stack_chk_fail
// basic: __stack_chk_fail
// none-NOT: __stack_chk_fail
// missing-NOT: __stack_chk_fail
}
@ -163,26 +154,11 @@ pub fn local_string_addr_taken(f: fn(&String)) {
f(&x);
// Taking the address of the local variable `x` leads to stack smash
// protection with the `strong` heuristic, but not with the `basic`
// heuristic. It does not matter that the reference is not mut.
//
// An interesting note is that a similar function in C++ *would* be
// protected by the `basic` heuristic, because `std::string` has a char
// array internally as a small object optimization:
// ```
// cat <<EOF | clang++ -O2 -fstack-protector -S -x c++ - -o - | grep stack_chk
// #include <string>
// void f(void (*g)(const std::string&)) {
// std::string x;
// g(x);
// }
// EOF
// ```
//
// protection. It does not matter that the reference is not mut.
// all: __stack_chk_fail
// strong: __stack_chk_fail
// basic-NOT: __stack_chk_fail
// basic: __stack_chk_fail
// none-NOT: __stack_chk_fail
// missing-NOT: __stack_chk_fail
}
@ -233,8 +209,8 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
// Even though the local variable conceptually doesn't have its address
// taken, it's so large that the "move" is implemented with a reference to a
// stack-local variable in the ABI. Consequently, this function *is*
// protected by the `strong` heuristic. This is also the case for
// rvalue-references in C++, regardless of struct size:
// protected. This is also the case for rvalue-references in C++,
// regardless of struct size:
// ```
// cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
// #include <cstdint>
@ -248,7 +224,7 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
// all: __stack_chk_fail
// strong: __stack_chk_fail
// basic-NOT: __stack_chk_fail
// basic: __stack_chk_fail
// none-NOT: __stack_chk_fail
// missing-NOT: __stack_chk_fail
}
@ -261,9 +237,9 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
// A new instance of `Gigastruct` is passed to `f()`, without any apparent
// connection to this stack frame. Still, since instances of `Gigastruct`
// are sufficiently large, it is allocated in the caller stack frame and
// passed as a pointer. As such, this function is *also* protected by the
// `strong` heuristic, just like `local_large_var_moved`. This is also the
// case for pass-by-value of sufficiently large structs in C++:
// passed as a pointer. As such, this function is *also* protected, just
// like `local_large_var_moved`. This is also the case for pass-by-value
// of sufficiently large structs in C++:
// ```
// cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
// #include <cstdint>
@ -275,10 +251,9 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
// EOF
// ```
// all: __stack_chk_fail
// strong: __stack_chk_fail
// basic-NOT: __stack_chk_fail
// basic: __stack_chk_fail
// none-NOT: __stack_chk_fail
// missing-NOT: __stack_chk_fail
}

View File

@ -56,7 +56,7 @@ struct Align16 {
#[no_mangle]
pub unsafe fn rust_to_c_increases_alignment(x: Align1) {
// i686-linux: start:
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca %Align1, align 4
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca [48 x i8], align 4
// i686-linux-NEXT: call void @llvm.memcpy.{{.+}}(ptr {{.*}}align 4 {{.*}}[[ALLOCA]], ptr {{.*}}align 1 {{.*}}%x
// i686-linux-NEXT: call void @extern_c_align1({{.+}} [[ALLOCA]])
@ -90,7 +90,7 @@ pub unsafe fn rust_to_c_decreases_alignment(x: Align16) {
#[no_mangle]
pub unsafe extern "C" fn c_to_rust_increases_alignment(x: Align16) {
// i686-linux: start:
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca %Align16, align 16
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca [48 x i8], align 16
// i686-linux-NEXT: call void @llvm.memcpy.{{.+}}(ptr {{.*}}align 16 {{.*}}[[ALLOCA]], ptr {{.*}}align 4 {{.*}}%0
// i686-linux-NEXT: call void @extern_rust_align16({{.+}} [[ALLOCA]])
@ -116,7 +116,7 @@ pub unsafe fn rust_to_c_decreases_alignment(x: Align16) {
#[no_mangle]
pub unsafe extern "C" fn c_to_rust_ref_increases_alignment(x: Align16) {
// i686-linux: start:
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca %Align16, align 16
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca [48 x i8], align 16
// i686-linux-NEXT: call void @llvm.memcpy.{{.+}}(ptr {{.*}}align 16 {{.*}}[[ALLOCA]], ptr {{.*}}align 4 {{.*}}%0
// i686-linux-NEXT: call void @extern_rust_ref_align16({{.+}} [[ALLOCA]])

View File

@ -106,20 +106,20 @@ pub struct ForceAlign16 {
pub unsafe fn call_na1(x: NaturalAlign1) {
// CHECK: start:
// m68k: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 1
// m68k: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 1
// m68k: call void @natural_align_1({{.*}}byval([2 x i8]) align 1{{.*}} [[ALLOCA]])
// wasm: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 1
// wasm: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 1
// wasm: call void @natural_align_1({{.*}}byval([2 x i8]) align 1{{.*}} [[ALLOCA]])
// x86_64-linux: call void @natural_align_1(i16
// x86_64-windows: call void @natural_align_1(i16
// i686-linux: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 4
// i686-linux: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 4
// i686-linux: call void @natural_align_1({{.*}}byval([2 x i8]) align 4{{.*}} [[ALLOCA]])
// i686-windows: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 4
// i686-windows: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 4
// i686-windows: call void @natural_align_1({{.*}}byval([2 x i8]) align 4{{.*}} [[ALLOCA]])
natural_align_1(x);
}
@ -134,10 +134,10 @@ pub unsafe fn call_na2(x: NaturalAlign2) {
// x86_64-linux-NEXT: call void @natural_align_2
// x86_64-windows-NEXT: call void @natural_align_2
// i686-linux: [[ALLOCA:%[0-9]+]] = alloca %NaturalAlign2, align 4
// i686-linux: [[ALLOCA:%[0-9]+]] = alloca [34 x i8], align 4
// i686-linux: call void @natural_align_2({{.*}}byval([34 x i8]) align 4{{.*}} [[ALLOCA]])
// i686-windows: [[ALLOCA:%[0-9]+]] = alloca %NaturalAlign2, align 4
// i686-windows: [[ALLOCA:%[0-9]+]] = alloca [34 x i8], align 4
// i686-windows: call void @natural_align_2({{.*}}byval([34 x i8]) align 4{{.*}} [[ALLOCA]])
natural_align_2(x);
}

View File

@ -18,7 +18,7 @@ pub struct Nested64 {
// CHECK-LABEL: @align64
#[no_mangle]
pub fn align64(a: u32) -> Align64 {
// CHECK: %a64 = alloca %Align64, align 64
// CHECK: %a64 = alloca [64 x i8], align 64
// CHECK: call void @llvm.memcpy.{{.*}}(ptr align 64 %{{.*}}, ptr align 64 %{{.*}}, i{{[0-9]+}} 64, i1 false)
let a64 = Align64::A(a);
a64
@ -27,7 +27,7 @@ pub fn align64(a: u32) -> Align64 {
// CHECK-LABEL: @nested64
#[no_mangle]
pub fn nested64(a: u8, b: u32, c: u16) -> Nested64 {
// CHECK: %n64 = alloca %Nested64, align 64
// CHECK: %n64 = alloca [128 x i8], align 64
let n64 = Nested64 { a, b: Align64::B(b), c };
n64
}

View File

@ -26,7 +26,7 @@ pub enum Enum64 {
// CHECK-LABEL: @align64
#[no_mangle]
pub fn align64(i : i32) -> Align64 {
// CHECK: %a64 = alloca %Align64, align 64
// CHECK: %a64 = alloca [64 x i8], align 64
// CHECK: call void @llvm.memcpy.{{.*}}(ptr align 64 %{{.*}}, ptr align 64 %{{.*}}, i{{[0-9]+}} 64, i1 false)
let a64 = Align64(i);
a64
@ -44,7 +44,7 @@ pub fn align64_load(a: Align64) -> i32 {
// CHECK-LABEL: @nested64
#[no_mangle]
pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 {
// CHECK: %n64 = alloca %Nested64, align 64
// CHECK: %n64 = alloca [128 x i8], align 64
let n64 = Nested64 { a, b, c, d };
n64
}
@ -52,7 +52,7 @@ pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 {
// CHECK-LABEL: @enum4
#[no_mangle]
pub fn enum4(a: i32) -> Enum4 {
// CHECK: %e4 = alloca %Enum4, align 4
// CHECK: %e4 = alloca [8 x i8], align 4
let e4 = Enum4::A(a);
e4
}
@ -60,7 +60,7 @@ pub fn enum4(a: i32) -> Enum4 {
// CHECK-LABEL: @enum64
#[no_mangle]
pub fn enum64(a: Align64) -> Enum64 {
// CHECK: %e64 = alloca %Enum64, align 64
// CHECK: %e64 = alloca [128 x i8], align 64
let e64 = Enum64::A(a);
e64
}

View File

@ -18,7 +18,7 @@
#[no_mangle]
pub fn array_store(a: [u8; 4], p: &mut [u8; 4]) {
// CHECK-NOT: alloca
// CHECK: %[[TEMP:.+]] = alloca i32, [[TEMPALIGN:align [0-9]+]]
// CHECK: %[[TEMP:.+]] = alloca [4 x i8], [[TEMPALIGN:align [0-9]+]]
// CHECK-NOT: alloca
// CHECK: %a = alloca [4 x i8]
// CHECK-NOT: alloca

View File

@ -27,7 +27,7 @@
#[no_mangle]
pub fn long_integer_map(x: [u32; 512]) -> [u32; 512] {
// CHECK: start:
// CHECK-NEXT: alloca [512 x i32]
// CHECK-NEXT: alloca [2048 x i8]
// CHECK-NOT: alloca
// CHECK: mul <{{[0-9]+}} x i32>
// CHECK: add <{{[0-9]+}} x i32>

View File

@ -77,15 +77,20 @@ pub struct DoubleFloat {
// CHECK-LABEL: @call_twou16s
#[no_mangle]
pub unsafe fn call_twou16s() {
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
// powerpc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i32]], align [[ABI_ALIGN:4]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
// powerpc64: [[ABI_ALLOCA:%.+]] = alloca [4 x i8], align [[ABI_ALIGN:4]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca %TwoU16s, align [[RUST_ALIGN:2]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]]
// CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 4, i1 false)
// CHECK: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i32]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @receives_twou16s([[ABI_TYPE]] [[ABI_VALUE]])
let x = TwoU16s { a: 1, b: 2 };
receives_twou16s(x);
@ -96,23 +101,23 @@ pub unsafe fn call_twou16s() {
pub unsafe fn return_twou16s() -> TwoU16s {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// powerpc64: [[RETVAL:%.+]] = alloca %TwoU16s, align 2
// powerpc64: [[RETVAL:%.+]] = alloca [4 x i8], align 2
// powerpc64: call void @returns_twou16s(ptr {{.+}} [[RETVAL]])
// The other targets copy the cast ABI type to an alloca.
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca %TwoU16s, align [[RUST_ALIGN:2]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca %TwoU16s, align [[RUST_ALIGN:2]]
// sparc64: [[RUST_ALLOCA:%.+]] = alloca %TwoU16s, align [[RUST_ALIGN:2]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]]
// sparc64: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]]
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_twou16s()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_twou16s()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_twou16s()
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:i64]] @returns_twou16s()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:i64]] @returns_twou16s()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:i64]] @returns_twou16s()
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
@ -127,12 +132,12 @@ pub unsafe fn return_twou16s() -> TwoU16s {
// CHECK-LABEL: @call_fiveu16s
#[no_mangle]
pub unsafe fn call_fiveu16s() {
// CHECK: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca %FiveU16s, align 2
// CHECK: [[RUST_ALLOCA:%.+]] = alloca [10 x i8], align 2
// CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 10, i1 false)
// CHECK: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @receives_fiveu16s([[ABI_TYPE]] [[ABI_VALUE]])
let x = FiveU16s { a: 1, b: 2, c: 3, d: 4, e: 5 };
receives_fiveu16s(x);
@ -149,13 +154,13 @@ pub unsafe fn return_fiveu16s() -> FiveU16s {
// The other targets copy the cast ABI type to the sret pointer.
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_fiveu16s()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_fiveu16s()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_fiveu16s()
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s()
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
@ -170,15 +175,17 @@ pub unsafe fn return_fiveu16s() -> FiveU16s {
// CHECK-LABEL: @call_doubledouble
#[no_mangle]
pub unsafe fn call_doubledouble() {
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x double\]]], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, double }]], align [[ABI_ALIGN:8]]
// powerpc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, double }]], align [[ABI_ALIGN:8]]
// CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca %DoubleDouble, align [[RUST_ALIGN:8]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 16, i1 false)
// CHECK: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x double\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @receives_doubledouble([[ABI_TYPE]] [[ABI_VALUE]])
let x = DoubleDouble { f: 1., g: 2. };
receives_doubledouble(x);
@ -189,23 +196,23 @@ pub unsafe fn call_doubledouble() {
pub unsafe fn return_doubledouble() -> DoubleDouble {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// powerpc64: [[RETVAL:%.+]] = alloca %DoubleDouble, align 8
// powerpc64: [[RETVAL:%.+]] = alloca [16 x i8], align 8
// powerpc64: call void @returns_doubledouble(ptr {{.+}} [[RETVAL]])
// The other targets copy the cast ABI type to an alloca.
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x double\]]], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, double }]], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, double }]], align [[ABI_ALIGN:8]]
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca %DoubleDouble, align [[RUST_ALIGN:8]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca %DoubleDouble, align [[RUST_ALIGN:8]]
// sparc64: [[RUST_ALLOCA:%.+]] = alloca %DoubleDouble, align [[RUST_ALIGN:8]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// sparc64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_doubledouble()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_doubledouble()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_doubledouble()
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x double\]]] @returns_doubledouble()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, double }]] @returns_doubledouble()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, double }]] @returns_doubledouble()
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
@ -224,21 +231,21 @@ pub unsafe fn return_doubledouble() -> DoubleDouble {
// powerpc64-LABEL: @call_doublefloat
#[no_mangle]
pub unsafe fn call_doublefloat() {
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, float }]], align [[ABI_ALIGN:8]]
// powerpc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [12 x i8], align [[ABI_ALIGN:8]]
// powerpc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca %DoubleFloat, align [[RUST_ALIGN:8]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca %DoubleFloat, align [[RUST_ALIGN:8]]
// powerpc64: [[RUST_ALLOCA:%.+]] = alloca %DoubleFloat, align [[RUST_ALIGN:8]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// powerpc64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 16, i1 false)
// loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 12, i1 false)
// powerpc64: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 16, i1 false)
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, float }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: call void @receives_doublefloat([[ABI_TYPE]] {{(inreg )?}}[[ABI_VALUE]])
// loongarch64: call void @receives_doublefloat([[ABI_TYPE]] {{(inreg )?}}[[ABI_VALUE]])
@ -256,20 +263,20 @@ pub unsafe fn call_doublefloat() {
pub unsafe fn return_doublefloat() -> DoubleFloat {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// powerpc64: [[RETVAL:%.+]] = alloca %DoubleFloat, align 8
// powerpc64: [[RETVAL:%.+]] = alloca [16 x i8], align 8
// powerpc64: call void @returns_doublefloat(ptr {{.+}} [[RETVAL]])
// The other targets copy the cast ABI type to an alloca.
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, float }]], align [[ABI_ALIGN:8]]
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [12 x i8], align [[ABI_ALIGN:8]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca %DoubleFloat, align [[RUST_ALIGN:8]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca %DoubleFloat, align [[RUST_ALIGN:8]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_doublefloat()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_doublefloat()
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_doublefloat()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, float }]] @returns_doublefloat()
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]

View File

@ -33,7 +33,7 @@ struct S {
pub fn test() {
let s = S { f1: 1, f2: 2, f3: 3 };
unsafe {
// CHECK: [[ALLOCA:%.+]] = alloca { i64, i32 }, align 8
// CHECK: [[ALLOCA:%.+]] = alloca [12 x i8], align 8
// CHECK: [[LOAD:%.+]] = load { i64, i32 }, ptr [[ALLOCA]], align 8
// CHECK: call void @foo({ i64, i32 } [[LOAD]])
foo(s);

View File

@ -12,7 +12,7 @@ pub fn main() {
foo(0, 1, i32::cmp);
}
// CHECK: %compare.dbg.spill = alloca {}, align 1
// CHECK: %compare.dbg.spill = alloca [0 x i8], align 1
// CHECK: call void @llvm.dbg.declare(metadata ptr %compare.dbg.spill, metadata ![[VAR:.*]], metadata !DIExpression()), !dbg !{{.*}}
// CHECK: ![[TYPE:.*]] = !DIDerivedType(tag: DW_TAG_pointer_type, name: "fn(&i32, &i32) -> core::cmp::Ordering", baseType: !{{.*}}, align: 1, dwarfAddressSpace: {{.*}})
// CHECK: ![[VAR]] = !DILocalVariable(name: "compare", scope: !{{.*}}, file: !{{.*}}, line: {{.*}}, type: ![[TYPE]], align: 1)

View File

@ -0,0 +1,59 @@
//@ compile-flags: -O --target wasm32-unknown-emscripten
//@ needs-llvm-components: webassembly
// Emscripten has its own unique implementation of catch_unwind (in `codegen_emcc_try`),
// make sure it generates something reasonable.
#![feature(no_core, lang_items, intrinsics, rustc_attrs)]
#![crate_type = "lib"]
#![no_std]
#![no_core]
#[lang="sized"] trait Sized { }
#[lang="freeze"] trait Freeze { }
#[lang="copy"] trait Copy { }
#[rustc_intrinsic]
fn size_of<T>() -> usize { loop {} }
extern "rust-intrinsic" {
fn catch_unwind(
try_fn: fn(_: *mut u8),
data: *mut u8,
catch_fn: fn(_: *mut u8, _: *mut u8)
) -> i32;
}
// CHECK-LABEL: @ptr_size
#[no_mangle]
pub fn ptr_size() -> usize {
// CHECK: ret [[PTR_SIZE:.*]]
size_of::<*mut u8>()
}
// CHECK-LABEL: @test_catch_unwind
#[no_mangle]
pub unsafe fn test_catch_unwind(
try_fn: fn(_: *mut u8),
data: *mut u8,
catch_fn: fn(_: *mut u8, _: *mut u8)
) -> i32 {
// CHECK: start:
// CHECK: [[ALLOCA:%.*]] = alloca
// CHECK: catch.i:
// CHECK: [[LANDINGPAD:%.*]] = landingpad
// CHECK: [[EXCEPTION:%.*]] = extractvalue {{.*}} [[LANDINGPAD]], 0
// CHECK: [[SELECTOR:%.*]] = extractvalue {{.*}} [[LANDINGPAD]], 1
// CHECK: [[IS_RUST_EXN:%.*]] = icmp eq {{.*}}[[SELECTOR]]
// CHECK: [[IS_RUST_EXN_I8:%.*]] = zext i1 [[IS_RUST_EXN]] to i8
// CHECK: store ptr [[EXCEPTION]], ptr [[ALLOCA]]
// CHECK: [[IS_RUST_SLOT:%.*]] = getelementptr inbounds i8, ptr [[ALLOCA]], [[PTR_SIZE]]
// CHECK: store i8 [[IS_RUST_EXN_I8]], ptr [[IS_RUST_SLOT]]
// CHECK: call void %catch_fn(ptr %data, ptr nonnull [[ALLOCA]])
catch_unwind(try_fn, data, catch_fn)
}

View File

@ -15,7 +15,7 @@ pub enum Enum0 {
// CHECK-NEXT: start:
// CHECK-NEXT: %1 = icmp eq i8 %0, 2
// CHECK-NEXT: %2 = and i8 %0, 1
// CHECK-NEXT: %_0.0 = select i1 %1, i8 13, i8 %2
// CHECK-NEXT: %{{.+}} = select i1 %1, i8 13, i8 %2
#[no_mangle]
pub fn match0(e: Enum0) -> u8 {
use Enum0::*;

View File

@ -6,7 +6,6 @@
// correctly.
// CHECK: %ScalarPair = type { i32, [3 x i32], i128 }
// CHECK: %Struct = type { i32, i32, [2 x i32], i128 }
#![feature(core_intrinsics)]
@ -43,7 +42,7 @@ pub fn store(x: &mut ScalarPair) {
#[no_mangle]
pub fn alloca() {
// CHECK-LABEL: @alloca(
// CHECK: [[X:%.*]] = alloca %ScalarPair, align 16
// CHECK: [[X:%.*]] = alloca [32 x i8], align 16
// CHECK: store i32 1, ptr %x, align 16
// CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr %x, i64 16
// CHECK-NEXT: store i128 2, ptr [[GEP]], align 16
@ -55,7 +54,7 @@ pub fn alloca() {
pub fn load_volatile(x: &ScalarPair) -> ScalarPair {
// CHECK-LABEL: @load_volatile(
// CHECK-SAME: align 16 dereferenceable(32) %x
// CHECK: [[TMP:%.*]] = alloca %ScalarPair, align 16
// CHECK: [[TMP:%.*]] = alloca [32 x i8], align 16
// CHECK: [[LOAD:%.*]] = load volatile %ScalarPair, ptr %x, align 16
// CHECK-NEXT: store %ScalarPair [[LOAD]], ptr [[TMP]], align 16
// CHECK-NEXT: [[A:%.*]] = load i32, ptr [[TMP]], align 16
@ -67,7 +66,7 @@ pub fn load_volatile(x: &ScalarPair) -> ScalarPair {
#[no_mangle]
pub fn transmute(x: ScalarPair) -> (std::mem::MaybeUninit<i128>, i128) {
// CHECK-LABEL: define { i128, i128 } @transmute(i32 noundef %x.0, i128 noundef %x.1)
// CHECK: [[TMP:%.*]] = alloca { i128, i128 }, align 16
// CHECK: [[TMP:%.*]] = alloca [32 x i8], align 16
// CHECK-NEXT: store i32 %x.0, ptr [[TMP]], align 16
// CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 16
// CHECK-NEXT: store i128 %x.1, ptr [[GEP]], align 16
@ -92,7 +91,7 @@ pub struct Struct {
pub fn store_struct(x: &mut Struct) {
// CHECK-LABEL: @store_struct(
// CHECK-SAME: align 16 dereferenceable(32) %x
// CHECK: [[TMP:%.*]] = alloca %Struct, align 16
// CHECK: [[TMP:%.*]] = alloca [32 x i8], align 16
// CHECK: store i32 1, ptr [[TMP]], align 16
// CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 4
// CHECK-NEXT: store i32 2, ptr [[GEP1]], align 4

View File

@ -153,7 +153,7 @@ pub unsafe fn check_from_newtype(x: Scalar64) -> u64 {
// CHECK-LABEL: @check_aggregate_to_bool(
#[no_mangle]
pub unsafe fn check_aggregate_to_bool(x: Aggregate8) -> bool {
// CHECK: %x = alloca %Aggregate8, align 1
// CHECK: %x = alloca [1 x i8], align 1
// CHECK: %[[BYTE:.+]] = load i8, ptr %x, align 1
// CHECK: %[[BOOL:.+]] = trunc i8 %[[BYTE]] to i1
// CHECK: ret i1 %[[BOOL]]
@ -163,7 +163,7 @@ pub unsafe fn check_aggregate_to_bool(x: Aggregate8) -> bool {
// CHECK-LABEL: @check_aggregate_from_bool(
#[no_mangle]
pub unsafe fn check_aggregate_from_bool(x: bool) -> Aggregate8 {
// CHECK: %_0 = alloca %Aggregate8, align 1
// CHECK: %_0 = alloca [1 x i8], align 1
// CHECK: %[[BYTE:.+]] = zext i1 %x to i8
// CHECK: store i8 %[[BYTE]], ptr %_0, align 1
transmute(x)
@ -190,7 +190,7 @@ pub unsafe fn check_byte_from_bool(x: bool) -> u8 {
// CHECK-LABEL: @check_to_pair(
#[no_mangle]
pub unsafe fn check_to_pair(x: u64) -> Option<i32> {
// CHECK: %_0 = alloca %"core::option::Option<i32>", align 4
// CHECK: %_0 = alloca [8 x i8], align 4
// CHECK: store i64 %x, ptr %_0, align 4
transmute(x)
}
@ -202,7 +202,7 @@ pub unsafe fn check_from_pair(x: Option<i32>) -> u64 {
// immediates so we can write using the destination alloca's alignment.
const { assert!(std::mem::align_of::<Option<i32>>() == 4) };
// CHECK: %_0 = alloca i64, align 8
// CHECK: %_0 = alloca [8 x i8], align 8
// CHECK: store i32 %x.0, ptr %_0, align 8
// CHECK: store i32 %x.1, ptr %0, align 4
// CHECK: %[[R:.+]] = load i64, ptr %_0, align 8
@ -248,7 +248,7 @@ pub unsafe fn check_from_float(x: f32) -> u32 {
// CHECK-LABEL: @check_to_aggregate(
#[no_mangle]
pub unsafe fn check_to_aggregate(x: u64) -> Aggregate64 {
// CHECK: %_0 = alloca %Aggregate64, align 4
// CHECK: %_0 = alloca [8 x i8], align 4
// CHECK: store i64 %x, ptr %_0, align 4
// CHECK: %0 = load i64, ptr %_0, align 4
// CHECK: ret i64 %0
@ -258,7 +258,7 @@ pub unsafe fn check_to_aggregate(x: u64) -> Aggregate64 {
// CHECK-LABEL: @check_from_aggregate(
#[no_mangle]
pub unsafe fn check_from_aggregate(x: Aggregate64) -> u64 {
// CHECK: %x = alloca %Aggregate64, align 4
// CHECK: %x = alloca [8 x i8], align 4
// CHECK: %[[VAL:.+]] = load i64, ptr %x, align 4
// CHECK: ret i64 %[[VAL]]
transmute(x)
@ -452,7 +452,7 @@ pub unsafe fn check_maybe_uninit_pair(
// CHECK-LABEL: @check_to_overalign(
#[no_mangle]
pub unsafe fn check_to_overalign(x: u64) -> HighAlignScalar {
// CHECK: %_0 = alloca %HighAlignScalar, align 8
// CHECK: %_0 = alloca [8 x i8], align 8
// CHECK: store i64 %x, ptr %_0, align 8
// CHECK: %0 = load i64, ptr %_0, align 8
// CHECK: ret i64 %0
@ -462,7 +462,7 @@ pub unsafe fn check_to_overalign(x: u64) -> HighAlignScalar {
// CHECK-LABEL: @check_from_overalign(
#[no_mangle]
pub unsafe fn check_from_overalign(x: HighAlignScalar) -> u64 {
// CHECK: %x = alloca %HighAlignScalar, align 8
// CHECK: %x = alloca [8 x i8], align 8
// CHECK: %[[VAL:.+]] = load i64, ptr %x, align 8
// CHECK: ret i64 %[[VAL]]
transmute(x)

View File

@ -15,7 +15,7 @@ pub fn outer_function(x: S, y: S) -> usize {
// Check that we do not attempt to load from the spilled arg before it is assigned to
// when generating debuginfo.
// CHECK-LABEL: @outer_function
// CHECK: [[spill:%.*]] = alloca %"{closure@{{.*.rs}}:9:23: 9:25}"
// CHECK: [[spill:%.*]] = alloca
// CHECK-NOT: [[ptr_tmp:%.*]] = getelementptr inbounds i8, ptr [[spill]]
// CHECK-NOT: [[load:%.*]] = load ptr, ptr
// CHECK: call void @llvm.lifetime.start{{.*}}({{.*}}, ptr [[spill]])

View File

@ -11,7 +11,7 @@ pub fn new_from_array(x: u64) -> Arc<[u64]> {
// Ensure that we only generate one alloca for the array.
// CHECK: alloca
// CHECK-SAME: [1000 x i64]
// CHECK-SAME: [8000 x i8]
// CHECK-NOT: alloca
let array = [x; 1000];
Arc::new(array)

View File

@ -2,7 +2,7 @@
// do not ICE during codegen, and that the LLVM constant has the higher alignment.
//
//@ compile-flags: -Zmir-opt-level=0 -Zmir-enable-passes=+GVN
//@ compile-flags: -Cno-prepopulate-passes
//@ compile-flags: -Cno-prepopulate-passes --crate-type=lib
//@ only-64bit
struct S(i32);
@ -12,9 +12,10 @@
// CHECK: @0 = private unnamed_addr constant
// CHECK-SAME: , align 8
fn main() {
// CHECK-LABEL: @_ZN20overaligned_constant4main
// CHECK: [[full:%_.*]] = alloca %SmallStruct, align 8
#[no_mangle]
pub fn overaligned_constant() {
// CHECK-LABEL: @overaligned_constant
// CHECK: [[full:%_.*]] = alloca [32 x i8], align 8
// CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[full]], ptr align 8 @0, i64 32, i1 false)
// CHECK: %b.0 = load i32, ptr @0, align 4
// CHECK: %b.1 = load i32, ptr getelementptr inbounds ({{.*}}), align 4

View File

@ -51,7 +51,7 @@ pub struct BigPacked2 {
// CHECK-LABEL: @call_pkd1
#[no_mangle]
pub fn call_pkd1(f: fn() -> Array) -> BigPacked1 {
// CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca %Array
// CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca [32 x i8]
// CHECK: call void %{{.*}}(ptr noalias nocapture noundef sret{{.*}} dereferenceable(32) [[ALLOCA]])
// CHECK: call void @llvm.memcpy.{{.*}}(ptr align 1 %{{.*}}, ptr align 4 %{{.*}}, i{{[0-9]+}} 32, i1 false)
// check that calls whose destination is a field of a packed struct
@ -63,7 +63,7 @@ pub fn call_pkd1(f: fn() -> Array) -> BigPacked1 {
// CHECK-LABEL: @call_pkd2
#[no_mangle]
pub fn call_pkd2(f: fn() -> Array) -> BigPacked2 {
// CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca %Array
// CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca [32 x i8]
// CHECK: call void %{{.*}}(ptr noalias nocapture noundef sret{{.*}} dereferenceable(32) [[ALLOCA]])
// CHECK: call void @llvm.memcpy.{{.*}}(ptr align 2 %{{.*}}, ptr align 4 %{{.*}}, i{{[0-9]+}} 32, i1 false)
// check that calls whose destination is a field of a packed struct

View File

@ -23,7 +23,7 @@ pub fn test() {
let _s = S;
// Check that the personality slot alloca gets a lifetime start in each cleanup block, not just
// in the first one.
// CHECK: [[SLOT:%[0-9]+]] = alloca { ptr, i32{{.*}} }
// CHECK: [[SLOT:%[0-9]+]] = alloca [{{[0-9]+}} x i8]
// CHECK-LABEL: cleanup:
// CHECK: call void @llvm.lifetime.start.{{.*}}({{.*}})
// CHECK-LABEL: cleanup1:

View File

@ -14,9 +14,9 @@ pub struct ExtraSlice<'input> {
#[no_mangle]
pub fn extra(s: &[u8]) {
// CHECK: void @extra(
// CHECK: %slice.dbg.spill1 = alloca i32,
// CHECK: %slice.dbg.spill = alloca { ptr, i64 },
// CHECK: %s.dbg.spill = alloca { ptr, i64 },
// CHECK: %slice.dbg.spill1 = alloca [4 x i8],
// CHECK: %slice.dbg.spill = alloca [16 x i8],
// CHECK: %s.dbg.spill = alloca [16 x i8],
// CHECK: call void @llvm.dbg.declare(metadata ptr %s.dbg.spill, metadata ![[S_EXTRA:.*]], metadata !DIExpression()),
// CHECK: call void @llvm.dbg.declare(metadata ptr %slice.dbg.spill, metadata ![[SLICE_EXTRA:.*]], metadata !DIExpression(DW_OP_LLVM_fragment, 0, 128)),
// CHECK: call void @llvm.dbg.declare(metadata ptr %slice.dbg.spill1, metadata ![[SLICE_EXTRA]], metadata !DIExpression(DW_OP_LLVM_fragment, 128, 32)),

View File

@ -15,8 +15,8 @@ pub struct Bytes {
// dependent alignment
#[no_mangle]
pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) {
// CHECK: [[TMP:%.+]] = alloca i32
// CHECK: %y = alloca [4 x i8]
// CHECK: [[TMP:%.+]] = alloca [4 x i8], align 4
// CHECK: %y = alloca [4 x i8], align 1
// CHECK: store i32 %0, ptr [[TMP]]
// CHECK: call void @llvm.memcpy.{{.*}}(ptr align 1 {{.+}}, ptr align 4 {{.+}}, i{{[0-9]+}} 4, i1 false)
*x = y;
@ -27,8 +27,8 @@ pub struct Bytes {
// dependent alignment
#[no_mangle]
pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) {
// CHECK: [[TMP:%.+]] = alloca i32
// CHECK: %y = alloca %Bytes
// CHECK: [[TMP:%.+]] = alloca [4 x i8], align 4
// CHECK: %y = alloca [4 x i8], align 1
// CHECK: store i32 %0, ptr [[TMP]]
// CHECK: call void @llvm.memcpy.{{.*}}(ptr align 1 {{.+}}, ptr align 4 {{.+}}, i{{[0-9]+}} 4, i1 false)
*x = y;

View File

@ -15,7 +15,7 @@
// CHECK-LABEL: @swap_basic
#[no_mangle]
pub fn swap_basic(x: &mut KeccakBuffer, y: &mut KeccakBuffer) {
// CHECK: alloca [5 x [5 x i64]]
// CHECK: alloca [200 x i8]
// SAFETY: exclusive references are always valid to read/write,
// are non-overlapping, and nothing here panics so it's drop-safe.

View File

@ -12,7 +12,7 @@
pub fn swap_rgb48_manually(x: &mut RGB48, y: &mut RGB48) {
// FIXME: See #115212 for why this has an alloca again
// CHECK: alloca [3 x i16], align 2
// CHECK: alloca [6 x i8], align 2
// CHECK: call void @llvm.memcpy.p0.p0.i64({{.+}}, i64 6, i1 false)
// CHECK: call void @llvm.memcpy.p0.p0.i64({{.+}}, i64 6, i1 false)
// CHECK: call void @llvm.memcpy.p0.p0.i64({{.+}}, i64 6, i1 false)