Replace BlockAndBuilder with Builder.

This commit is contained in:
Mark Simulacrum 2016-12-31 16:00:24 -07:00
parent d40d01bd0e
commit 1be170b01a
21 changed files with 344 additions and 357 deletions

View file

@ -10,7 +10,8 @@
use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector, AttributePlace};
use base;
use common::{type_is_fat_ptr, BlockAndBuilder, C_uint};
use builder::Builder;
use common::{type_is_fat_ptr, C_uint};
use context::CrateContext;
use cabi_x86;
use cabi_x86_64;
@ -236,7 +237,7 @@ pub fn memory_ty(&self, ccx: &CrateContext) -> Type {
/// lvalue for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
/// or results of call/invoke instructions into their destinations.
pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) {
pub fn store(&self, bcx: &Builder, mut val: ValueRef, dst: ValueRef) {
if self.is_ignore() {
return;
}
@ -269,7 +270,7 @@ pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) {
// bitcasting to the struct type yields invalid cast errors.
// We instead thus allocate some scratch space...
let llscratch = bcx.fcx().alloca(ty, "abi_cast");
let llscratch = bcx.alloca(ty, "abi_cast");
base::Lifetime::Start.call(bcx, llscratch);
// ...where we first store the value...
@ -293,14 +294,16 @@ pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) {
}
}
pub fn store_fn_arg(&self, bcx: &BlockAndBuilder, idx: &mut usize, dst: ValueRef) {
pub fn store_fn_arg(
&self, bcx: &Builder, idx: &mut usize, dst: ValueRef
) {
if self.pad.is_some() {
*idx += 1;
}
if self.is_ignore() {
return;
}
let val = llvm::get_param(bcx.fcx().llfn, *idx as c_uint);
let val = llvm::get_param(bcx.llfn(), *idx as c_uint);
*idx += 1;
self.store(bcx, val, dst);
}

View file

@ -49,6 +49,7 @@
use rustc::ty::layout;
use rustc::ty::{self, Ty, AdtKind};
use common::*;
use builder::Builder;
use glue;
use base;
use machine;
@ -303,7 +304,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec<Ty<'tcx>>
/// Obtain a representation of the discriminant sufficient to translate
/// destructuring; this may or may not involve the actual discriminant.
pub fn trans_switch<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
t: Ty<'tcx>,
scrutinee: ValueRef,
range_assert: bool
@ -331,7 +332,7 @@ pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool {
/// Obtain the actual discriminant of a value.
pub fn trans_get_discr<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
t: Ty<'tcx>,
scrutinee: ValueRef,
cast_to: Option<Type>,
@ -374,7 +375,7 @@ pub fn trans_get_discr<'a, 'tcx>(
}
fn struct_wrapped_nullable_bitdiscr(
bcx: &BlockAndBuilder,
bcx: &Builder,
nndiscr: u64,
discrfield: &layout::FieldPath,
scrutinee: ValueRef
@ -387,7 +388,7 @@ fn struct_wrapped_nullable_bitdiscr(
}
/// Helper for cases where the discriminant is simply loaded.
fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64,
fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64,
range_assert: bool)
-> ValueRef {
let llty = Type::from_integer(bcx.ccx, ity);
@ -415,7 +416,7 @@ fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u
/// discriminant-like value returned by `trans_switch`.
///
/// This should ideally be less tightly tied to `_match`.
pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef {
pub fn trans_case<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef {
let l = bcx.ccx.layout_of(t);
match *l {
layout::CEnum { discr, .. }
@ -436,7 +437,7 @@ pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value:
/// Set the discriminant for a new value of the given case of the given
/// representation.
pub fn trans_set_discr<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr
bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr
) {
let l = bcx.ccx.layout_of(t);
match *l {
@ -484,8 +485,8 @@ pub fn trans_set_discr<'a, 'tcx>(
}
}
fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>) -> bool {
bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64"
fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool {
bcx.ccx.sess().target.target.arch == "arm" || bcx.ccx.sess().target.target.arch == "aarch64"
}
fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) {
@ -498,7 +499,7 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) {
/// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
t: Ty<'tcx>,
val: MaybeSizedValue,
discr: Disr,
@ -560,7 +561,7 @@ pub fn trans_field_ptr<'a, 'tcx>(
}
fn struct_field_ptr<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
st: &layout::Struct,
fields: &Vec<Ty<'tcx>>,
val: MaybeSizedValue,

View file

@ -15,6 +15,7 @@
use common::*;
use type_of;
use type_::Type;
use builder::Builder;
use rustc::hir;
use rustc::ty::Ty;
@ -25,7 +26,7 @@
// Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
ia: &hir::InlineAsm,
outputs: Vec<(ValueRef, Ty<'tcx>)>,
mut inputs: Vec<ValueRef>
@ -61,7 +62,7 @@ pub fn trans_inline_asm<'a, 'tcx>(
// Default per-arch clobbers
// Basically what clang does
let arch_clobbers = match &bcx.sess().target.target.arch[..] {
let arch_clobbers = match &bcx.ccx.sess().target.target.arch[..] {
"x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
_ => Vec::new()
};

View file

@ -38,7 +38,7 @@
use middle::lang_items::StartFnLangItem;
use rustc::ty::subst::Substs;
use rustc::traits;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::adjustment::CustomCoerceUnsized;
use rustc::dep_graph::{DepNode, WorkProduct};
use rustc::hir::map as hir_map;
@ -51,7 +51,7 @@
use attributes;
use builder::Builder;
use callee::{Callee};
use common::{BlockAndBuilder, C_bool, C_bytes_in_context, C_i32, C_uint};
use common::{C_bool, C_bytes_in_context, C_i32, C_uint};
use collector::{self, TransItemCollectionMode};
use common::{C_struct_in_context, C_u64, C_undef};
use common::{CrateContext, FunctionContext};
@ -161,7 +161,7 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate {
}
pub fn compare_simd_types<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
lhs: ValueRef,
rhs: ValueRef,
t: Ty<'tcx>,
@ -218,7 +218,7 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
pub fn unsize_thin_ptr<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
src: ValueRef,
src_ty: Ty<'tcx>,
dst_ty: Ty<'tcx>
@ -242,7 +242,7 @@ pub fn unsize_thin_ptr<'a, 'tcx>(
/// Coerce `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty` and store the result in `dst`
pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
src: ValueRef,
src_ty: Ty<'tcx>,
dst: ValueRef,
@ -272,10 +272,10 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
assert_eq!(def_a, def_b);
let src_fields = def_a.variants[0].fields.iter().map(|f| {
monomorphize::field_ty(bcx.tcx(), substs_a, f)
monomorphize::field_ty(bcx.ccx.tcx(), substs_a, f)
});
let dst_fields = def_b.variants[0].fields.iter().map(|f| {
monomorphize::field_ty(bcx.tcx(), substs_b, f)
monomorphize::field_ty(bcx.ccx.tcx(), substs_b, f)
});
let src = adt::MaybeSizedValue::sized(src);
@ -322,7 +322,7 @@ pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx
}
pub fn cast_shift_expr_rhs(
cx: &BlockAndBuilder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef
cx: &Builder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef
) -> ValueRef {
cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b))
}
@ -421,7 +421,7 @@ pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> V
/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values.
pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
pub fn store_ty<'a, 'tcx>(cx: &Builder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v));
if common::type_is_fat_ptr(cx.ccx, t) {
@ -433,7 +433,7 @@ pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: Valu
}
}
pub fn store_fat_ptr<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>,
pub fn store_fat_ptr<'a, 'tcx>(cx: &Builder<'a, 'tcx>,
data: ValueRef,
extra: ValueRef,
dst: ValueRef,
@ -459,7 +459,7 @@ pub fn load_fat_ptr<'a, 'tcx>(
(ptr, meta)
}
pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef {
pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef {
if val_ty(val) == Type::i1(bcx.ccx) {
bcx.zext(val, Type::i8(bcx.ccx))
} else {
@ -467,7 +467,7 @@ pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef {
}
}
pub fn to_immediate(bcx: &BlockAndBuilder, val: ValueRef, ty: Ty) -> ValueRef {
pub fn to_immediate(bcx: &Builder, val: ValueRef, ty: Ty) -> ValueRef {
if ty.is_bool() {
bcx.trunc(val, Type::i1(bcx.ccx))
} else {
@ -523,11 +523,13 @@ pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>,
b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
}
pub fn memcpy_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
dst: ValueRef,
src: ValueRef,
t: Ty<'tcx>,
align: Option<u32>) {
pub fn memcpy_ty<'a, 'tcx>(
bcx: &Builder<'a, 'tcx>,
dst: ValueRef,
src: ValueRef,
t: Ty<'tcx>,
align: Option<u32>,
) {
let ccx = bcx.ccx;
if type_is_zero_size(ccx, t) {
@ -553,11 +555,6 @@ pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>,
b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
}
pub fn alloc_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef {
assert!(!ty.has_param_types());
bcx.fcx().alloca(type_of::type_of(bcx.ccx, ty), name)
}
pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) {
let _s = if ccx.sess().trans_stats() {
let mut instance_name = String::new();
@ -623,7 +620,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
// We create an alloca to hold a pointer of type `ret.original_ty`
// which will hold the pointer to the right alloca which has the
// final ret value
fcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot")
bcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot")
};
let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value
let mut llarg_idx = fn_ty.ret.is_indirect() as usize;
@ -756,12 +753,7 @@ fn create_entry_fn(ccx: &CrateContext,
// `main` should respect same config for frame pointer elimination as rest of code
attributes::set_frame_pointer_elimination(ccx, llfn);
let llbb = unsafe {
let name = CString::new("top").unwrap();
llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, name.as_ptr())
};
let bld = Builder::with_ccx(ccx);
bld.position_at_end(llbb);
let bld = Builder::new_block(ccx, llfn, "top");
debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx, &bld);

View file

@ -19,12 +19,16 @@
use type_::Type;
use value::Value;
use libc::{c_uint, c_char};
use rustc::ty::{Ty, TypeFoldable};
use type_of;
use std::borrow::Cow;
use std::ffi::CString;
use std::ptr;
use syntax_pos::Span;
// All Builders must have an llfn associated with them
#[must_use]
pub struct Builder<'a, 'tcx: 'a> {
pub llbuilder: BuilderRef,
pub ccx: &'a CrateContext<'a, 'tcx>,
@ -46,6 +50,20 @@ fn noname() -> *const c_char {
}
impl<'a, 'tcx> Builder<'a, 'tcx> {
pub fn new_block<'b>(ccx: &'a CrateContext<'a, 'tcx>, llfn: ValueRef, name: &'b str) -> Self {
let builder = Builder::with_ccx(ccx);
let llbb = unsafe {
let name = CString::new(name).unwrap();
llvm::LLVMAppendBasicBlockInContext(
ccx.llcx(),
llfn,
name.as_ptr()
)
};
builder.position_at_end(llbb);
builder
}
pub fn with_ccx(ccx: &'a CrateContext<'a, 'tcx>) -> Self {
// Create a fresh builder from the crate context.
let llbuilder = unsafe {
@ -57,6 +75,32 @@ pub fn with_ccx(ccx: &'a CrateContext<'a, 'tcx>) -> Self {
}
}
pub fn build_new_block<'b>(&self, name: &'b str) -> Builder<'a, 'tcx> {
let builder = Builder::with_ccx(self.ccx);
let llbb = unsafe {
let name = CString::new(name).unwrap();
llvm::LLVMAppendBasicBlockInContext(
self.ccx.llcx(),
self.llfn(),
name.as_ptr()
)
};
builder.position_at_end(llbb);
builder
}
pub fn llfn(&self) -> ValueRef {
unsafe {
llvm::LLVMGetBasicBlockParent(self.llbb())
}
}
pub fn llbb(&self) -> BasicBlockRef {
unsafe {
llvm::LLVMGetInsertBlock(self.llbuilder)
}
}
fn count_insn(&self, category: &str) {
if self.ccx.sess().trans_stats() {
self.ccx.stats().n_llvm_insns.set(self.ccx.stats().n_llvm_insns.get() + 1);
@ -435,6 +479,19 @@ pub fn not(&self, v: ValueRef) -> ValueRef {
}
}
pub fn alloca(&self, ty: Type, name: &str) -> ValueRef {
let builder = Builder::with_ccx(self.ccx);
builder.position_at_start(unsafe {
llvm::LLVMGetFirstBasicBlock(self.llfn())
});
builder.dynamic_alloca(ty, name)
}
pub fn alloca_ty(&self, ty: Ty<'tcx>, name: &str) -> ValueRef {
assert!(!ty.has_param_types());
self.alloca(type_of::type_of(self.ccx, ty), name)
}
pub fn dynamic_alloca(&self, ty: Type, name: &str) -> ValueRef {
self.count_insn("alloca");
unsafe {

View file

@ -23,7 +23,6 @@
use abi::{Abi, FnType};
use attributes;
use base;
use base::*;
use common::{
self, CrateContext, FunctionContext, SharedCrateContext
};
@ -348,7 +347,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
let llenv = if env_arg.is_indirect() {
llargs[self_idx]
} else {
let scratch = alloc_ty(&bcx, closure_ty, "self");
let scratch = bcx.alloca_ty(closure_ty, "self");
let mut llarg_idx = self_idx;
env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch);
scratch
@ -365,12 +364,12 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
// Call the by-ref closure body with `self` in a cleanup scope,
// to drop `self` when the body returns, or in case it unwinds.
let self_scope = fcx.schedule_drop_mem(MaybeSizedValue::sized(llenv), closure_ty);
let self_scope = fcx.schedule_drop_mem(&bcx, MaybeSizedValue::sized(llenv), closure_ty);
let llfn = callee.reify(bcx.ccx);
let llret;
if let Some(landing_pad) = self_scope.landing_pad {
let normal_bcx = bcx.fcx().build_new_block("normal-return");
let normal_bcx = bcx.build_new_block("normal-return");
llret = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None);
bcx = normal_bcx;
} else {

View file

@ -21,7 +21,8 @@
use llvm::BasicBlockRef;
use base;
use adt::MaybeSizedValue;
use common::{BlockAndBuilder, FunctionContext, Funclet};
use builder::Builder;
use common::{FunctionContext, Funclet};
use glue;
use type_::Type;
use rustc::ty::Ty;
@ -42,7 +43,7 @@ pub struct DropValue<'tcx> {
}
impl<'tcx> DropValue<'tcx> {
fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &BlockAndBuilder<'a, 'tcx>) {
fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &Builder<'a, 'tcx>) {
glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet)
}
@ -52,13 +53,13 @@ fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &BlockAndBuilder<'a, 'tcx
/// landing_pad -> ... cleanups ... -> [resume]
///
/// This should only be called once per function, as it creates an alloca for the landingpad.
fn get_landing_pad<'a>(&self, fcx: &FunctionContext<'a, 'tcx>) -> BasicBlockRef {
fn get_landing_pad<'a>(&self, bcx: &Builder<'a, 'tcx>) -> BasicBlockRef {
debug!("get_landing_pad");
let bcx = fcx.build_new_block("cleanup_unwind");
let bcx = bcx.build_new_block("cleanup_unwind");
let llpersonality = bcx.ccx.eh_personality();
bcx.set_personality_fn(llpersonality);
if base::wants_msvc_seh(fcx.ccx.sess()) {
if base::wants_msvc_seh(bcx.ccx.sess()) {
let pad = bcx.cleanup_pad(None, &[]);
let funclet = Some(Funclet::new(pad));
self.trans(funclet.as_ref(), &bcx);
@ -68,10 +69,10 @@ fn get_landing_pad<'a>(&self, fcx: &FunctionContext<'a, 'tcx>) -> BasicBlockRef
// The landing pad return type (the type being propagated). Not sure
// what this represents but it's determined by the personality
// function and this is what the EH proposal example uses.
let llretty = Type::struct_(fcx.ccx, &[Type::i8p(fcx.ccx), Type::i32(fcx.ccx)], false);
let llretty = Type::struct_(bcx.ccx, &[Type::i8p(bcx.ccx), Type::i32(bcx.ccx)], false);
// The only landing pad clause will be 'cleanup'
let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.fcx().llfn);
let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.llfn());
// The landing pad block is a cleanup
bcx.set_cleanup(llretval);
@ -79,7 +80,7 @@ fn get_landing_pad<'a>(&self, fcx: &FunctionContext<'a, 'tcx>) -> BasicBlockRef
// Insert cleanup instructions into the cleanup block
self.trans(None, &bcx);
if !bcx.sess().target.target.options.custom_unwind_resume {
if !bcx.ccx.sess().target.target.options.custom_unwind_resume {
bcx.resume(llretval);
} else {
let exc_ptr = bcx.extract_value(llretval, 0);
@ -94,7 +95,9 @@ fn get_landing_pad<'a>(&self, fcx: &FunctionContext<'a, 'tcx>) -> BasicBlockRef
impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
/// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
pub fn schedule_drop_mem(&self, val: MaybeSizedValue, ty: Ty<'tcx>) -> CleanupScope<'tcx> {
pub fn schedule_drop_mem(
&self, bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx>
) -> CleanupScope<'tcx> {
if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); }
let drop = DropValue {
val: val,
@ -102,7 +105,7 @@ pub fn schedule_drop_mem(&self, val: MaybeSizedValue, ty: Ty<'tcx>) -> CleanupSc
skip_dtor: false,
};
CleanupScope::new(self, drop)
CleanupScope::new(bcx, drop)
}
/// Issue #23611: Schedules a (deep) drop of the contents of
@ -110,8 +113,9 @@ pub fn schedule_drop_mem(&self, val: MaybeSizedValue, ty: Ty<'tcx>) -> CleanupSc
/// `ty`. The scheduled code handles extracting the discriminant
/// and dropping the contents associated with that variant
/// *without* executing any associated drop implementation.
pub fn schedule_drop_adt_contents(&self, val: MaybeSizedValue, ty: Ty<'tcx>)
-> CleanupScope<'tcx> {
pub fn schedule_drop_adt_contents(
&self, bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx>
) -> CleanupScope<'tcx> {
// `if` below could be "!contents_needs_drop"; skipping drop
// is just an optimization, so sound to be conservative.
if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); }
@ -122,16 +126,16 @@ pub fn schedule_drop_adt_contents(&self, val: MaybeSizedValue, ty: Ty<'tcx>)
skip_dtor: true,
};
CleanupScope::new(self, drop)
CleanupScope::new(bcx, drop)
}
}
impl<'tcx> CleanupScope<'tcx> {
fn new<'a>(fcx: &FunctionContext<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> {
fn new<'a>(bcx: &Builder<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> {
CleanupScope {
cleanup: Some(drop_val),
landing_pad: if !fcx.ccx.sess().no_landing_pads() {
Some(drop_val.get_landing_pad(fcx))
landing_pad: if !bcx.ccx.sess().no_landing_pads() {
Some(drop_val.get_landing_pad(bcx))
} else {
None
},
@ -145,7 +149,7 @@ pub fn noop() -> CleanupScope<'tcx> {
}
}
pub fn trans<'a>(self, bcx: &'a BlockAndBuilder<'a, 'tcx>) {
pub fn trans<'a>(self, bcx: &'a Builder<'a, 'tcx>) {
if let Some(cleanup) = self.cleanup {
cleanup.trans(None, &bcx);
}

View file

@ -12,7 +12,6 @@
//! Code that is useful in various trans modules.
use session::Session;
use llvm;
use llvm::{ValueRef, BasicBlockRef, ContextRef, TypeKind};
use llvm::{True, False, Bool, OperandBundleDef};
@ -37,7 +36,6 @@
use libc::{c_uint, c_char};
use std::borrow::Cow;
use std::iter;
use std::ops::Deref;
use std::ffi::CString;
use syntax::ast;
@ -235,8 +233,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
// This function's enclosing crate context.
pub ccx: &'a CrateContext<'a, 'tcx>,
alloca_builder: Builder<'a, 'tcx>,
}
impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
@ -247,30 +243,18 @@ pub fn new(ccx: &'a CrateContext<'a, 'tcx>, llfndecl: ValueRef) -> FunctionConte
llfn: llfndecl,
alloca_insert_pt: None,
ccx: ccx,
alloca_builder: Builder::with_ccx(ccx),
};
let val = {
let entry_bcx = fcx.build_new_block("entry-block");
let val = entry_bcx.load(C_null(Type::i8p(ccx)));
fcx.alloca_builder.position_at_start(entry_bcx.llbb());
val
};
let entry_bcx = Builder::new_block(fcx.ccx, fcx.llfn, "entry-block");
entry_bcx.position_at_start(entry_bcx.llbb());
// Use a dummy instruction as the insertion point for all allocas.
// This is later removed in the drop of FunctionContext.
fcx.alloca_insert_pt = Some(val);
fcx.alloca_insert_pt = Some(entry_bcx.load(C_null(Type::i8p(ccx))));
fcx
}
pub fn get_entry_block(&'a self) -> BlockAndBuilder<'a, 'tcx> {
BlockAndBuilder::new(unsafe {
llvm::LLVMGetFirstBasicBlock(self.llfn)
}, self)
}
pub fn new_block(&'a self, name: &str) -> BasicBlockRef {
pub fn new_block(&self, name: &str) -> BasicBlockRef {
unsafe {
let name = CString::new(name).unwrap();
llvm::LLVMAppendBasicBlockInContext(
@ -281,12 +265,14 @@ pub fn new_block(&'a self, name: &str) -> BasicBlockRef {
}
}
pub fn build_new_block(&'a self, name: &str) -> BlockAndBuilder<'a, 'tcx> {
BlockAndBuilder::new(self.new_block(name), self)
pub fn build_new_block(&self, name: &str) -> Builder<'a, 'tcx> {
Builder::new_block(self.ccx, self.llfn, name)
}
pub fn alloca(&self, ty: Type, name: &str) -> ValueRef {
self.alloca_builder.dynamic_alloca(ty, name)
pub fn get_entry_block(&'a self) -> Builder<'a, 'tcx> {
let builder = Builder::with_ccx(self.ccx);
builder.position_at_end(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn) });
builder
}
}
@ -298,65 +284,6 @@ fn drop(&mut self) {
}
}
#[must_use]
pub struct BlockAndBuilder<'a, 'tcx: 'a> {
// The BasicBlockRef returned from a call to
// llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic
// block to the function pointed to by llfn. We insert
// instructions into that block by way of this block context.
// The block pointing to this one in the function's digraph.
llbb: BasicBlockRef,
// The function context for the function to which this block is
// attached.
fcx: &'a FunctionContext<'a, 'tcx>,
builder: Builder<'a, 'tcx>,
}
impl<'a, 'tcx> BlockAndBuilder<'a, 'tcx> {
pub fn new(llbb: BasicBlockRef, fcx: &'a FunctionContext<'a, 'tcx>) -> Self {
let builder = Builder::with_ccx(fcx.ccx);
// Set the builder's position to this block's end.
builder.position_at_end(llbb);
BlockAndBuilder {
llbb: llbb,
fcx: fcx,
builder: builder,
}
}
pub fn at_start<F, R>(&self, f: F) -> R
where F: FnOnce(&BlockAndBuilder<'a, 'tcx>) -> R
{
self.position_at_start(self.llbb);
let r = f(self);
self.position_at_end(self.llbb);
r
}
pub fn fcx(&self) -> &'a FunctionContext<'a, 'tcx> {
self.fcx
}
pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> {
self.ccx.tcx()
}
pub fn sess(&self) -> &'a Session {
self.ccx.sess()
}
pub fn llbb(&self) -> BasicBlockRef {
self.llbb
}
}
impl<'a, 'tcx> Deref for BlockAndBuilder<'a, 'tcx> {
type Target = Builder<'a, 'tcx>;
fn deref(&self) -> &Self::Target {
&self.builder
}
}
/// A structure representing an active landing pad for the duration of a basic
/// block.
///
@ -725,7 +652,7 @@ pub fn langcall(tcx: TyCtxt,
// of Java. (See related discussion on #1877 and #10183.)
pub fn build_unchecked_lshift<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
lhs: ValueRef,
rhs: ValueRef
) -> ValueRef {
@ -736,7 +663,7 @@ pub fn build_unchecked_lshift<'a, 'tcx>(
}
pub fn build_unchecked_rshift<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef
bcx: &Builder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef
) -> ValueRef {
let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
@ -749,13 +676,13 @@ pub fn build_unchecked_rshift<'a, 'tcx>(
}
}
fn shift_mask_rhs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, rhs: ValueRef) -> ValueRef {
fn shift_mask_rhs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, rhs: ValueRef) -> ValueRef {
let rhs_llty = val_ty(rhs);
bcx.and(rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false))
}
pub fn shift_mask_val<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
llty: Type,
mask_llty: Type,
invert: bool

View file

@ -27,7 +27,8 @@
use rustc::ty::subst::Substs;
use abi::Abi;
use common::{CrateContext, BlockAndBuilder};
use common::CrateContext;
use builder::Builder;
use monomorphize::{self, Instance};
use rustc::ty::{self, Ty};
use rustc::mir;
@ -423,7 +424,7 @@ fn get_containing_scope<'ccx, 'tcx>(cx: &CrateContext<'ccx, 'tcx>,
}
}
pub fn declare_local<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
dbg_context: &FunctionDebugContext,
variable_name: ast::Name,
variable_type: Ty<'tcx>,

View file

@ -35,16 +35,17 @@
use value::Value;
use Disr;
use cleanup::CleanupScope;
use builder::Builder;
use syntax_pos::DUMMY_SP;
pub fn trans_exchange_free_ty<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
ptr: MaybeSizedValue,
content_ty: Ty<'tcx>
) {
let def_id = langcall(bcx.tcx(), None, "", BoxFreeFnLangItem);
let substs = bcx.tcx().mk_substs(iter::once(Kind::from(content_ty)));
let def_id = langcall(bcx.ccx.tcx(), None, "", BoxFreeFnLangItem);
let substs = bcx.ccx.tcx().mk_substs(iter::once(Kind::from(content_ty)));
let callee = Callee::def(bcx.ccx, def_id, substs);
let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
@ -93,12 +94,12 @@ pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'t
}
}
fn drop_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, args: MaybeSizedValue, t: Ty<'tcx>) {
fn drop_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, args: MaybeSizedValue, t: Ty<'tcx>) {
call_drop_glue(bcx, args, t, false, None)
}
pub fn call_drop_glue<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
mut args: MaybeSizedValue,
t: Ty<'tcx>,
skip_dtor: bool,
@ -232,7 +233,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
}
ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => {
let shallow_drop = def.is_union();
let tcx = bcx.tcx();
let tcx = bcx.ccx.tcx();
let def = t.ty_adt_def().unwrap();
@ -245,7 +246,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
let contents_scope = if !shallow_drop {
bcx.fcx().schedule_drop_adt_contents(ptr, t)
fcx.schedule_drop_adt_contents(&bcx, ptr, t)
} else {
CleanupScope::noop()
};
@ -264,7 +265,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
let llret;
let args = &[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize];
if let Some(landing_pad) = contents_scope.landing_pad {
let normal_bcx = bcx.fcx().build_new_block("normal-return");
let normal_bcx = bcx.build_new_block("normal-return");
llret = bcx.invoke(callee.reify(ccx), args, normal_bcx.llbb(), landing_pad, None);
bcx = normal_bcx;
} else {
@ -288,8 +289,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
bcx.ret_void();
}
pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
t: Ty<'tcx>, info: ValueRef)
pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, info: ValueRef)
-> (ValueRef, ValueRef) {
debug!("calculate size of DST: {}; with lost info: {:?}",
t, Value(info));
@ -331,7 +331,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
// Recurse to get the size of the dynamically sized field (must be
// the last field).
let last_field = def.struct_variant().fields.last().unwrap();
let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
let field_ty = monomorphize::field_ty(bcx.ccx.tcx(), substs, last_field);
let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
// FIXME (#26403, #27023): We should be adding padding
@ -383,7 +383,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
(bcx.load(size_ptr), bcx.load(align_ptr))
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(bcx.tcx());
let unit_ty = t.sequence_element_type(bcx.ccx.tcx());
// The info in this case is the length of the str, so the size is that
// times the unit size.
let llunit_ty = sizing_type_of(bcx.ccx, unit_ty);
@ -397,16 +397,16 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
}
// Iterates through the elements of a structural type, dropping them.
fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>,
fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>,
ptr: MaybeSizedValue,
t: Ty<'tcx>)
-> BlockAndBuilder<'a, 'tcx> {
fn iter_variant<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>,
-> Builder<'a, 'tcx> {
fn iter_variant<'a, 'tcx>(cx: &Builder<'a, 'tcx>,
t: Ty<'tcx>,
av: adt::MaybeSizedValue,
variant: &'tcx ty::VariantDef,
substs: &Substs<'tcx>) {
let tcx = cx.tcx();
let tcx = cx.ccx.tcx();
for (i, field) in variant.fields.iter().enumerate() {
let arg = monomorphize::field_ty(tcx, substs, field);
let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i);
@ -417,7 +417,7 @@ fn iter_variant<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>,
let mut cx = cx;
match t.sty {
ty::TyClosure(def_id, substs) => {
for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() {
for (i, upvar_ty) in substs.upvar_tys(def_id, cx.ccx.tcx()).enumerate() {
let llupvar = adt::trans_field_ptr(&cx, t, ptr, Disr(0), i);
drop_ty(&cx, MaybeSizedValue::sized(llupvar), upvar_ty);
}
@ -425,12 +425,12 @@ fn iter_variant<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>,
ty::TyArray(_, n) => {
let base = get_dataptr(&cx, ptr.value);
let len = C_uint(cx.ccx, n);
let unit_ty = t.sequence_element_type(cx.tcx());
let unit_ty = t.sequence_element_type(cx.ccx.tcx());
cx = tvec::slice_for_each(&cx, base, unit_ty, len,
|bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty));
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(cx.tcx());
let unit_ty = t.sequence_element_type(cx.ccx.tcx());
cx = tvec::slice_for_each(&cx, ptr.value, unit_ty, ptr.meta,
|bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty));
}
@ -442,7 +442,7 @@ fn iter_variant<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>,
}
ty::TyAdt(adt, substs) => match adt.adt_kind() {
AdtKind::Struct => {
let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.ccx.tcx(), t, None);
for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
let llfld_a = adt::trans_field_ptr(&cx, t, ptr, Disr::from(discr), i);
let ptr = if cx.ccx.shared().type_is_sized(field_ty) {
@ -470,7 +470,7 @@ fn iter_variant<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>,
}
}
(adt::BranchKind::Switch, Some(lldiscrim_a)) => {
let tcx = cx.tcx();
let tcx = cx.ccx.tcx();
drop_ty(&cx, MaybeSizedValue::sized(lldiscrim_a), tcx.types.isize);
// Create a fall-through basic block for the "else" case of
@ -486,15 +486,15 @@ fn iter_variant<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>,
// from the outer function, and any other use case will only
// call this for an already-valid enum in which case the `ret
// void` will never be hit.
let ret_void_cx = cx.fcx().build_new_block("enum-iter-ret-void");
let ret_void_cx = cx.build_new_block("enum-iter-ret-void");
ret_void_cx.ret_void();
let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants);
let next_cx = cx.fcx().build_new_block("enum-iter-next");
let next_cx = cx.build_new_block("enum-iter-next");
for variant in &adt.variants {
let variant_cx_name = format!("enum-iter-variant-{}",
&variant.disr_val.to_string());
let variant_cx = cx.fcx().build_new_block(&variant_cx_name);
let variant_cx = cx.build_new_block(&variant_cx_name);
let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val));
variant_cx.add_case(llswitch, case_val, variant_cx.llbb());
iter_variant(&variant_cx, t, ptr, variant, substs);
@ -508,7 +508,7 @@ fn iter_variant<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>,
},
_ => {
cx.sess().unimpl(&format!("type in drop_structural_ty: {}", t))
cx.ccx.sess().unimpl(&format!("type in drop_structural_ty: {}", t))
}
}
return cx;

View file

@ -28,6 +28,7 @@
use rustc::hir;
use syntax::ast;
use syntax::symbol::Symbol;
use builder::Builder;
use rustc::session::Session;
use syntax_pos::Span;
@ -87,14 +88,15 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
/// add them to librustc_trans/trans/context.rs
pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
fcx: &FunctionContext,
callee_ty: Ty<'tcx>,
fn_ty: &FnType,
llargs: &[ValueRef],
llresult: ValueRef,
span: Span) {
let ccx = bcx.ccx;
let tcx = bcx.tcx();
let tcx = ccx.tcx();
let (def_id, substs, fty) = match callee_ty.sty {
ty::TyFnDef(def_id, substs, ref fty) => (def_id, substs, fty),
@ -125,7 +127,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
bcx.call(expect, &[llargs[0], C_bool(ccx, false)], None)
}
"try" => {
try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult);
try_intrinsic(bcx, fcx, llargs[0], llargs[1], llargs[2], llresult);
C_nil(ccx)
}
"breakpoint" => {
@ -533,7 +535,7 @@ fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
// qux` to be converted into `foo, bar, baz, qux`, integer
// arguments to be truncated as needed and pointers to be
// cast.
fn modify_as_needed<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
t: &intrinsics::Type,
arg_type: Ty<'tcx>,
llarg: ValueRef)
@ -634,7 +636,7 @@ fn modify_as_needed<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
}
}
fn copy_intrinsic<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
allow_overlap: bool,
volatile: bool,
tp_ty: Ty<'tcx>,
@ -670,7 +672,7 @@ fn copy_intrinsic<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
}
fn memset_intrinsic<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
volatile: bool,
ty: Ty<'tcx>,
dst: ValueRef,
@ -686,19 +688,20 @@ fn memset_intrinsic<'a, 'tcx>(
}
fn try_intrinsic<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
fcx: &FunctionContext,
func: ValueRef,
data: ValueRef,
local_ptr: ValueRef,
dest: ValueRef,
) {
if bcx.sess().no_landing_pads() {
if bcx.ccx.sess().no_landing_pads() {
bcx.call(func, &[data], None);
bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None);
} else if wants_msvc_seh(bcx.sess()) {
trans_msvc_try(bcx, func, data, local_ptr, dest);
trans_msvc_try(bcx, fcx, func, data, local_ptr, dest);
} else {
trans_gnu_try(bcx, func, data, local_ptr, dest);
trans_gnu_try(bcx, fcx, func, data, local_ptr, dest);
}
}
@ -709,24 +712,25 @@ fn try_intrinsic<'a, 'tcx>(
// instructions are meant to work for all targets, as of the time of this
// writing, however, LLVM does not recommend the usage of these new instructions
// as the old ones are still more optimized.
fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
fcx: &FunctionContext,
func: ValueRef,
data: ValueRef,
local_ptr: ValueRef,
dest: ValueRef) {
let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| {
let llfn = get_rust_try_fn(fcx, &mut |bcx| {
let ccx = bcx.ccx;
bcx.set_personality_fn(bcx.ccx.eh_personality());
let normal = bcx.fcx().build_new_block("normal");
let catchswitch = bcx.fcx().build_new_block("catchswitch");
let catchpad = bcx.fcx().build_new_block("catchpad");
let caught = bcx.fcx().build_new_block("caught");
let normal = bcx.build_new_block("normal");
let catchswitch = bcx.build_new_block("catchswitch");
let catchpad = bcx.build_new_block("catchpad");
let caught = bcx.build_new_block("caught");
let func = llvm::get_param(bcx.fcx().llfn, 0);
let data = llvm::get_param(bcx.fcx().llfn, 1);
let local_ptr = llvm::get_param(bcx.fcx().llfn, 2);
let func = llvm::get_param(bcx.llfn(), 0);
let data = llvm::get_param(bcx.llfn(), 1);
let local_ptr = llvm::get_param(bcx.llfn(), 2);
// We're generating an IR snippet that looks like:
//
@ -768,7 +772,7 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
//
// More information can be found in libstd's seh.rs implementation.
let i64p = Type::i64(ccx).ptr_to();
let slot = bcx.fcx().alloca(i64p, "slot");
let slot = bcx.alloca(i64p, "slot");
bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
None);
@ -812,12 +816,13 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
// function calling it, and that function may already have other personality
// functions in play. By calling a shim we're guaranteed that our shim will have
// the right personality function.
fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
fcx: &FunctionContext,
func: ValueRef,
data: ValueRef,
local_ptr: ValueRef,
dest: ValueRef) {
let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| {
let llfn = get_rust_try_fn(fcx, &mut |bcx| {
let ccx = bcx.ccx;
// Translates the shims described above:
@ -837,12 +842,12 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
// expected to be `*mut *mut u8` for this to actually work, but that's
// managed by the standard library.
let then = bcx.fcx().build_new_block("then");
let catch = bcx.fcx().build_new_block("catch");
let then = bcx.build_new_block("then");
let catch = bcx.build_new_block("catch");
let func = llvm::get_param(bcx.fcx().llfn, 0);
let data = llvm::get_param(bcx.fcx().llfn, 1);
let local_ptr = llvm::get_param(bcx.fcx().llfn, 2);
let func = llvm::get_param(bcx.llfn(), 0);
let data = llvm::get_param(bcx.llfn(), 1);
let local_ptr = llvm::get_param(bcx.llfn(), 2);
bcx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
then.ret(C_i32(ccx, 0));
@ -854,7 +859,7 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
// rust_try ignores the selector.
let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
false);
let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.fcx().llfn);
let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.llfn());
catch.add_clause(vals, C_null(Type::i8p(ccx)));
let ptr = catch.extract_value(vals, 0);
catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()), None);
@ -873,7 +878,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
name: &str,
inputs: Vec<Ty<'tcx>>,
output: Ty<'tcx>,
trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>))
trans: &mut for<'b> FnMut(Builder<'b, 'tcx>))
-> ValueRef {
let ccx = fcx.ccx;
let sig = ccx.tcx().mk_fn_sig(inputs.into_iter(), output, false);
@ -894,7 +899,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
//
// This function is only generated once and is then cached.
fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>))
trans: &mut for<'b> FnMut(Builder<'b, 'tcx>))
-> ValueRef {
let ccx = fcx.ccx;
if let Some(llfn) = ccx.rust_try_fn().get() {
@ -920,7 +925,7 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
}
fn generic_simd_intrinsic<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
name: &str,
callee_ty: Ty<'tcx>,
llargs: &[ValueRef],
@ -935,7 +940,7 @@ macro_rules! emit_error {
};
($msg: tt, $($fmt: tt)*) => {
span_invalid_monomorphization_error(
bcx.sess(), span,
bcx.ccx.sess(), span,
&format!(concat!("invalid monomorphization of `{}` intrinsic: ",
$msg),
name, $($fmt)*));
@ -957,7 +962,7 @@ macro_rules! require_simd {
let tcx = bcx.tcx();
let tcx = bcx.ccx.tcx();
let sig = tcx.erase_late_bound_regions_and_normalize(callee_ty.fn_sig());
let arg_tys = sig.inputs();

View file

@ -13,6 +13,7 @@
use rustc::traits;
use callee::{Callee, CalleeData};
use common::*;
use builder::Builder;
use consts;
use declare;
use glue;
@ -27,7 +28,7 @@
const VTABLE_OFFSET: usize = 3;
/// Extracts a method from a trait object's vtable, at the specified index.
pub fn get_virtual_method<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
pub fn get_virtual_method<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
llvtable: ValueRef,
vtable_index: usize)
-> ValueRef {

View file

@ -17,7 +17,8 @@
use adt::{self, MaybeSizedValue};
use base::{self, Lifetime};
use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual};
use common::{self, BlockAndBuilder, Funclet};
use builder::Builder;
use common::{self, Funclet};
use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef};
use consts;
use Disr;
@ -57,7 +58,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
let cleanup_pad = funclet.map(|lp| lp.cleanuppad());
let cleanup_bundle = funclet.map(|l| l.bundle());
let funclet_br = |this: &Self, bcx: BlockAndBuilder, bb: mir::BasicBlock| {
let funclet_br = |this: &Self, bcx: Builder, bb: mir::BasicBlock| {
let lltarget = this.blocks[bb];
if let Some(cp) = cleanup_pad {
match this.cleanup_kinds[bb] {
@ -74,7 +75,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
}
};
let llblock = |this: &mut Self, target: mir::BasicBlock| {
let llblock = |this: &mut Self, bcx: &Builder, target: mir::BasicBlock| {
let lltarget = this.blocks[target];
if let Some(cp) = cleanup_pad {
@ -84,7 +85,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
debug!("llblock: creating cleanup trampoline for {:?}", target);
let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target);
let trampoline = this.fcx.build_new_block(name);
let trampoline = bcx.build_new_block(name);
trampoline.cleanup_ret(cp, Some(lltarget));
trampoline.llbb()
}
@ -121,7 +122,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
let ps = self.get_personality_slot(&bcx);
let lp = bcx.load(ps);
Lifetime::End.call(&bcx, ps);
if !bcx.sess().target.target.options.custom_unwind_resume {
if !bcx.ccx.sess().target.target.options.custom_unwind_resume {
bcx.resume(lp);
} else {
let exc_ptr = bcx.extract_value(lp, 0);
@ -138,14 +139,14 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
mir::TerminatorKind::If { ref cond, targets: (true_bb, false_bb) } => {
let cond = self.trans_operand(&bcx, cond);
let lltrue = llblock(self, true_bb);
let llfalse = llblock(self, false_bb);
let lltrue = llblock(self, &bcx, true_bb);
let llfalse = llblock(self, &bcx, false_bb);
bcx.cond_br(cond.immediate(), lltrue, llfalse);
}
mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => {
let discr_lvalue = self.trans_lvalue(&bcx, discr);
let ty = discr_lvalue.ty.to_ty(bcx.tcx());
let ty = discr_lvalue.ty.to_ty(bcx.ccx.tcx());
let discr = adt::trans_get_discr(&bcx, ty, discr_lvalue.llval, None, true);
let mut bb_hist = FxHashMap();
@ -158,7 +159,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
// code. This is especially helpful in cases like an if-let on a huge enum.
// Note: This optimization is only valid for exhaustive matches.
Some((&&bb, &c)) if c > targets.len() / 2 => {
(Some(bb), llblock(self, bb))
(Some(bb), llblock(self, &bcx, bb))
}
// We're generating an exhaustive switch, so the else branch
// can't be hit. Branching to an unreachable instruction
@ -169,7 +170,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
assert_eq!(adt_def.variants.len(), targets.len());
for (adt_variant, &target) in adt_def.variants.iter().zip(targets) {
if default_bb != Some(target) {
let llbb = llblock(self, target);
let llbb = llblock(self, &bcx, target);
let llval = adt::trans_case(&bcx, ty, Disr::from(adt_variant.disr_val));
bcx.add_case(switch, llval, llbb)
}
@ -180,10 +181,10 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
let (otherwise, targets) = targets.split_last().unwrap();
let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval);
let discr = base::to_immediate(&bcx, discr, switch_ty);
let switch = bcx.switch(discr, llblock(self, *otherwise), values.len());
let switch = bcx.switch(discr, llblock(self, &bcx, *otherwise), values.len());
for (value, target) in values.iter().zip(targets) {
let val = Const::from_constval(bcx.ccx, value.clone(), switch_ty);
let llbb = llblock(self, *target);
let llbb = llblock(self, &bcx, *target);
bcx.add_case(switch, val.llval, llbb)
}
}
@ -202,7 +203,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
LocalRef::Lvalue(tr_lvalue) => {
OperandRef {
val: Ref(tr_lvalue.llval),
ty: tr_lvalue.ty.to_ty(bcx.tcx())
ty: tr_lvalue.ty.to_ty(bcx.ccx.tcx())
}
}
};
@ -232,7 +233,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
}
mir::TerminatorKind::Drop { ref location, target, unwind } => {
let ty = location.ty(&self.mir, bcx.tcx()).to_ty(bcx.tcx());
let ty = location.ty(&self.mir, bcx.ccx.tcx()).to_ty(bcx.ccx.tcx());
let ty = self.monomorphize(&ty);
// Double check for necessity to drop
@ -260,7 +261,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
drop_fn,
args,
self.blocks[target],
llblock(self, unwind),
llblock(self, &bcx, unwind),
cleanup_bundle
);
} else {
@ -300,7 +301,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
let cond = bcx.call(expect, &[cond, C_bool(bcx.ccx, expected)], None);
// Create the failure block and the conditional branch to it.
let lltarget = llblock(self, target);
let lltarget = llblock(self, &bcx, target);
let panic_block = self.fcx.build_new_block("panic");
if expected {
bcx.cond_br(cond, lltarget, panic_block.llbb());
@ -313,7 +314,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
self.set_debug_loc(&bcx, terminator.source_info);
// Get the location information.
let loc = bcx.sess().codemap().lookup_char_pos(span.lo);
let loc = bcx.ccx.sess().codemap().lookup_char_pos(span.lo);
let filename = Symbol::intern(&loc.file.name).as_str();
let filename = C_str_slice(bcx.ccx, filename);
let line = C_u32(bcx.ccx, loc.line as u32);
@ -363,15 +364,15 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
if const_cond == Some(!expected) {
if let Some(err) = const_err {
let err = ConstEvalErr{ span: span, kind: err };
let mut diag = bcx.tcx().sess.struct_span_warn(
let mut diag = bcx.ccx.tcx().sess.struct_span_warn(
span, "this expression will panic at run-time");
note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag);
note_const_eval_err(bcx.ccx.tcx(), &err, span, "expression", &mut diag);
diag.emit();
}
}
// Obtain the panic entry point.
let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item);
let def_id = common::langcall(bcx.ccx.tcx(), Some(span), "", lang_item);
let callee = Callee::def(bcx.ccx, def_id,
bcx.ccx.empty_substs_for_def_id(def_id));
let llfn = callee.reify(bcx.ccx);
@ -381,7 +382,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
bcx.invoke(llfn,
&args,
self.unreachable_block(),
llblock(self, unwind),
llblock(self, &bcx, unwind),
cleanup_bundle);
} else {
bcx.call(llfn, &args, cleanup_bundle);
@ -410,12 +411,12 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
_ => bug!("{} is not callable", callee.ty)
};
let sig = bcx.tcx().erase_late_bound_regions_and_normalize(sig);
let sig = bcx.ccx.tcx().erase_late_bound_regions_and_normalize(sig);
// Handle intrinsics old trans wants Expr's for, ourselves.
let intrinsic = match (&callee.ty.sty, &callee.data) {
(&ty::TyFnDef(def_id, ..), &Intrinsic) => {
Some(bcx.tcx().item_name(def_id).as_str())
Some(bcx.ccx.tcx().item_name(def_id).as_str())
}
_ => None
};
@ -443,7 +444,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
let extra_args = &args[sig.inputs().len()..];
let extra_args = extra_args.iter().map(|op_arg| {
let op_ty = op_arg.ty(&self.mir, bcx.tcx());
let op_ty = op_arg.ty(&self.mir, bcx.ccx.tcx());
self.monomorphize(&op_ty)
}).collect::<Vec<_>>();
let fn_ty = callee.direct_fn_type(bcx.ccx, &extra_args);
@ -545,7 +546,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
bug!("Cannot use direct operand with an intrinsic call")
};
trans_intrinsic_call(&bcx, callee.ty, &fn_ty, &llargs, dest,
trans_intrinsic_call(&bcx, self.fcx, callee.ty, &fn_ty, &llargs, dest,
terminator.source_info.span);
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
@ -579,20 +580,20 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
let invokeret = bcx.invoke(fn_ptr,
&llargs,
ret_bcx,
llblock(self, cleanup),
llblock(self, &bcx, cleanup),
cleanup_bundle);
fn_ty.apply_attrs_callsite(invokeret);
if let Some((_, target)) = *destination {
let ret_bcx = self.build_block(target);
ret_bcx.at_start(|ret_bcx| {
self.set_debug_loc(&ret_bcx, terminator.source_info);
let op = OperandRef {
val: Immediate(invokeret),
ty: sig.output(),
};
self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op);
});
ret_bcx.position_at_start(ret_bcx.llbb());
self.set_debug_loc(&ret_bcx, terminator.source_info);
let op = OperandRef {
val: Immediate(invokeret),
ty: sig.output(),
};
self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op);
ret_bcx.position_at_end(ret_bcx.llbb());
}
} else {
let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle);
@ -613,7 +614,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
}
fn trans_argument(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
op: OperandRef<'tcx>,
llargs: &mut Vec<ValueRef>,
fn_ty: &FnType,
@ -634,7 +635,7 @@ fn trans_argument(&mut self,
let imm_op = |x| OperandRef {
val: Immediate(x),
// We won't be checking the type again.
ty: bcx.tcx().types.err
ty: bcx.ccx.tcx().types.err
};
self.trans_argument(bcx, imm_op(ptr), llargs, fn_ty, next_idx, callee);
self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, callee);
@ -689,7 +690,7 @@ fn trans_argument(&mut self,
}
fn trans_arguments_untupled(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
operand: &mir::Operand<'tcx>,
llargs: &mut Vec<ValueRef>,
fn_ty: &FnType,
@ -765,13 +766,13 @@ fn trans_arguments_untupled(&mut self,
}
fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>) -> ValueRef {
fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> ValueRef {
let ccx = bcx.ccx;
if let Some(slot) = self.llpersonalityslot {
slot
} else {
let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
let slot = bcx.fcx().alloca(llretty, "personalityslot");
let slot = bcx.alloca(llretty, "personalityslot");
self.llpersonalityslot = Some(slot);
Lifetime::Start.call(bcx, slot);
slot
@ -815,11 +816,13 @@ fn unreachable_block(&mut self) -> BasicBlockRef {
})
}
pub fn build_block(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'a, 'tcx> {
BlockAndBuilder::new(self.blocks[bb], self.fcx)
pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> {
let builder = Builder::with_ccx(self.fcx.ccx);
builder.position_at_end(self.blocks[bb]);
builder
}
fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>,
fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>,
dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType,
llargs: &mut Vec<ValueRef>, is_intrinsic: bool) -> ReturnDest {
// If the return is ignored, we can just return a do-nothing ReturnDest
@ -836,14 +839,14 @@ fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>,
return if fn_ret_ty.is_indirect() {
// Odd, but possible, case, we have an operand temporary,
// but the calling convention has an indirect return.
let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret");
let tmp = bcx.alloca_ty(ret_ty, "tmp_ret");
llargs.push(tmp);
ReturnDest::IndirectOperand(tmp, index)
} else if is_intrinsic {
// Currently, intrinsics always need a location to store
// the result. so we create a temporary alloca for the
// result
let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret");
let tmp = bcx.alloca_ty(ret_ty, "tmp_ret");
ReturnDest::IndirectOperand(tmp, index)
} else {
ReturnDest::DirectOperand(index)
@ -864,17 +867,17 @@ fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>,
}
}
fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>,
fn trans_transmute(&mut self, bcx: &Builder<'a, 'tcx>,
src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) {
let mut val = self.trans_operand(bcx, src);
if let ty::TyFnDef(def_id, substs, _) = val.ty.sty {
let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.tcx()));
let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.ccx.tcx()));
let out_type_size = llbitsize_of_real(bcx.ccx, llouttype);
if out_type_size != 0 {
// FIXME #19925 Remove this hack after a release cycle.
let f = Callee::def(bcx.ccx, def_id, substs);
let ty = match f.ty.sty {
ty::TyFnDef(.., f) => bcx.tcx().mk_fn_ptr(f),
ty::TyFnDef(.., f) => bcx.ccx.tcx().mk_fn_ptr(f),
_ => f.ty
};
val = OperandRef {
@ -895,7 +898,7 @@ fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>,
// Stores the return value of a function call into it's final location.
fn store_return(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
dest: ReturnDest,
ret_ty: ArgType,
op: OperandRef<'tcx>) {
@ -911,7 +914,7 @@ fn store_return(&mut self,
DirectOperand(index) => {
// If there is a cast, we have to store and reload.
let op = if ret_ty.cast.is_some() {
let tmp = base::alloc_ty(bcx, op.ty, "tmp_ret");
let tmp = bcx.alloca_ty(op.ty, "tmp_ret");
ret_ty.store(bcx, op.immediate(), tmp);
self.trans_load(bcx, tmp, op.ty)
} else {

View file

@ -24,10 +24,11 @@
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
use {abi, adt, base, Disr, machine};
use callee::Callee;
use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty};
use builder::Builder;
use common::{self, CrateContext, const_get_elt, val_ty};
use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral, C_big_integral};
use common::{C_null, C_struct, C_str_slice, C_undef, C_uint};
use common::{const_to_opt_u128};
use common::const_to_opt_u128;
use consts;
use monomorphize::{self, Instance};
use type_of;
@ -900,7 +901,7 @@ pub fn const_scalar_checked_binop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_constant(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
constant: &mir::Constant<'tcx>)
-> Const<'tcx>
{

View file

@ -14,8 +14,8 @@
use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx;
use adt;
use base;
use common::{self, BlockAndBuilder, CrateContext, C_uint, C_undef};
use builder::Builder;
use common::{self, CrateContext, C_uint, C_undef};
use consts;
use machine;
use type_of::type_of;
@ -44,16 +44,6 @@ pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx>
LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty }
}
pub fn alloca<'a>(bcx: &BlockAndBuilder<'a, 'tcx>,
ty: Ty<'tcx>,
name: &str)
-> LvalueRef<'tcx>
{
assert!(!ty.has_erasable_regions());
let lltemp = base::alloc_ty(bcx, ty, name);
LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))
}
pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
let ty = self.ty.to_ty(ccx.tcx());
match ty.sty {
@ -69,13 +59,13 @@ pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_lvalue(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
lvalue: &mir::Lvalue<'tcx>)
-> LvalueRef<'tcx> {
debug!("trans_lvalue(lvalue={:?})", lvalue);
let ccx = bcx.ccx;
let tcx = bcx.tcx();
let tcx = ccx.tcx();
if let mir::Lvalue::Local(index) = *lvalue {
match self.locals[index] {
@ -177,7 +167,7 @@ pub fn trans_lvalue(&mut self,
let llindex = C_uint(bcx.ccx, from);
let llbase = project_index(llindex);
let base_ty = tr_base.ty.to_ty(bcx.tcx());
let base_ty = tr_base.ty.to_ty(bcx.ccx.tcx());
match base_ty.sty {
ty::TyArray(..) => {
// must cast the lvalue pointer type to the new
@ -214,7 +204,7 @@ pub fn trans_lvalue(&mut self,
// Perform an action using the given Lvalue.
// If the Lvalue is an empty LocalRef::Operand, then a temporary stack slot
// is created first, then used as an operand to update the Lvalue.
pub fn with_lvalue_ref<F, U>(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>,
pub fn with_lvalue_ref<F, U>(&mut self, bcx: &Builder<'a, 'tcx>,
lvalue: &mir::Lvalue<'tcx>, f: F) -> U
where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U
{
@ -223,9 +213,9 @@ pub fn with_lvalue_ref<F, U>(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>,
LocalRef::Lvalue(lvalue) => f(self, lvalue),
LocalRef::Operand(None) => {
let lvalue_ty = self.monomorphized_lvalue_ty(lvalue);
let lvalue = LvalueRef::alloca(bcx,
lvalue_ty,
"lvalue_temp");
assert!(!lvalue_ty.has_erasable_regions());
let lltemp = bcx.alloca_ty(lvalue_ty, "lvalue_temp");
let lvalue = LvalueRef::new_sized(lltemp, LvalueTy::from_ty(lvalue_ty));
let ret = f(self, lvalue);
let op = self.trans_load(bcx, lvalue.llval, lvalue_ty);
self.locals[index] = LocalRef::Operand(Some(op));
@ -254,18 +244,13 @@ pub fn with_lvalue_ref<F, U>(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>,
/// than we are.
///
/// nmatsakis: is this still necessary? Not sure.
fn prepare_index(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>,
llindex: ValueRef)
-> ValueRef
{
let ccx = bcx.ccx;
fn prepare_index(&mut self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef {
let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex));
let int_size = machine::llbitsize_of_real(bcx.ccx, ccx.int_type());
let int_size = machine::llbitsize_of_real(bcx.ccx, bcx.ccx.int_type());
if index_size < int_size {
bcx.zext(llindex, ccx.int_type())
bcx.zext(llindex, bcx.ccx.int_type())
} else if index_size > int_size {
bcx.trunc(llindex, ccx.int_type())
bcx.trunc(llindex, bcx.ccx.int_type())
} else {
llindex
}

View file

@ -19,7 +19,8 @@
use rustc::ty::TypeFoldable;
use session::config::FullDebugInfo;
use base;
use common::{self, BlockAndBuilder, CrateContext, FunctionContext, C_null, Funclet};
use builder::Builder;
use common::{self, CrateContext, FunctionContext, C_null, Funclet};
use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
use monomorphize::{self, Instance};
use abi::FnType;
@ -106,7 +107,7 @@ pub fn monomorphize<T>(&self, value: &T) -> T
monomorphize::apply_param_substs(self.ccx.shared(), self.param_substs, value)
}
pub fn set_debug_loc(&mut self, bcx: &BlockAndBuilder, source_info: mir::SourceInfo) {
pub fn set_debug_loc(&mut self, bcx: &Builder, source_info: mir::SourceInfo) {
let (scope, span) = self.debug_loc(source_info);
debuginfo::set_source_location(&self.debug_context, bcx, scope, span);
}
@ -258,7 +259,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(
// User variable
let source_info = decl.source_info.unwrap();
let debug_scope = mircx.scopes[source_info.scope];
let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo;
let dbg = debug_scope.is_valid() && bcx.ccx.sess().opts.debuginfo == FullDebugInfo;
if !lvalue_locals.contains(local.index()) && !dbg {
debug!("alloc: {:?} ({}) -> operand", local, name);
@ -266,7 +267,9 @@ pub fn trans_mir<'a, 'tcx: 'a>(
}
debug!("alloc: {:?} ({}) -> lvalue", local, name);
let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str());
assert!(!ty.has_erasable_regions());
let lltemp = bcx.alloca_ty(ty, &name.as_str());
let lvalue = LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty));
if dbg {
let (scope, span) = mircx.debug_loc(source_info);
declare_local(&bcx, &mircx.debug_context, name, ty, scope,
@ -282,7 +285,9 @@ pub fn trans_mir<'a, 'tcx: 'a>(
LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty)))
} else if lvalue_locals.contains(local.index()) {
debug!("alloc: {:?} -> lvalue", local);
LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", local)))
assert!(!ty.has_erasable_regions());
let lltemp = bcx.alloca_ty(ty, &format!("{:?}", local));
LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)))
} else {
// If this is an immediate local, we do not create an
// alloca in advance. Instead we wait until we see the
@ -347,20 +352,20 @@ pub fn trans_mir<'a, 'tcx: 'a>(
/// Produce, for each argument, a `ValueRef` pointing at the
/// argument's value. As arguments are lvalues, these are always
/// indirect.
fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
mircx: &MirContext<'a, 'tcx>,
scopes: &IndexVec<mir::VisibilityScope, debuginfo::MirDebugScope>,
lvalue_locals: &BitVector)
-> Vec<LocalRef<'tcx>> {
let mir = mircx.mir;
let fcx = bcx.fcx();
let tcx = bcx.tcx();
let fcx = mircx.fcx;
let tcx = bcx.ccx.tcx();
let mut idx = 0;
let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize;
// Get the argument scope, if it exists and if we need it.
let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE];
let arg_scope = if arg_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo {
let arg_scope = if arg_scope.is_valid() && bcx.ccx.sess().opts.debuginfo == FullDebugInfo {
Some(arg_scope.scope_metadata)
} else {
None
@ -381,7 +386,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
_ => bug!("spread argument isn't a tuple?!")
};
let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index));
let lltemp = bcx.alloca_ty(arg_ty, &format!("arg{}", arg_index));
for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
let dst = bcx.struct_gep(lltemp, i);
let arg = &mircx.fn_ty.args[idx];
@ -420,7 +425,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
let arg = &mircx.fn_ty.args[idx];
idx += 1;
let llval = if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo {
let llval = if arg.is_indirect() && bcx.ccx.sess().opts.debuginfo != FullDebugInfo {
// Don't copy an indirect argument to an alloca, the caller
// already put it in a temporary alloca and gave it up, unless
// we emit extra-debug-info, which requires local allocas :(.
@ -462,7 +467,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
};
return LocalRef::Operand(Some(operand.unpack_if_pair(bcx)));
} else {
let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index));
let lltemp = bcx.alloca_ty(arg_ty, &format!("arg{}", arg_index));
if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
// we pass fat pointers as two words, but we want to
// represent them internally as a pointer to two words,

View file

@ -14,7 +14,8 @@
use rustc_data_structures::indexed_vec::Idx;
use base;
use common::{self, BlockAndBuilder};
use common;
use builder::Builder;
use value::Value;
use type_of;
use type_::Type;
@ -85,8 +86,7 @@ pub fn immediate(self) -> ValueRef {
/// If this operand is a Pair, we return an
/// Immediate aggregate with the two values.
pub fn pack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>)
-> OperandRef<'tcx> {
pub fn pack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
if let OperandValue::Pair(a, b) = self.val {
// Reconstruct the immediate aggregate.
let llty = type_of::type_of(bcx.ccx, self.ty);
@ -107,8 +107,7 @@ pub fn pack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>)
/// If this operand is a pair in an Immediate,
/// we return a Pair with the two halves.
pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>)
-> OperandRef<'tcx> {
pub fn unpack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
if let OperandValue::Immediate(llval) = self.val {
// Deconstruct the immediate aggregate.
if common::type_is_imm_pair(bcx.ccx, self.ty) {
@ -136,7 +135,7 @@ pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>)
impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_load(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
llval: ValueRef,
ty: Ty<'tcx>)
-> OperandRef<'tcx>
@ -165,7 +164,7 @@ pub fn trans_load(&mut self,
}
pub fn trans_consume(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
lvalue: &mir::Lvalue<'tcx>)
-> OperandRef<'tcx>
{
@ -212,12 +211,12 @@ pub fn trans_consume(&mut self,
// for most lvalues, to consume them we just load them
// out from their home
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
let ty = tr_lvalue.ty.to_ty(bcx.ccx.tcx());
self.trans_load(bcx, tr_lvalue.llval, ty)
}
pub fn trans_operand(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
operand: &mir::Operand<'tcx>)
-> OperandRef<'tcx>
{
@ -242,7 +241,7 @@ pub fn trans_operand(&mut self,
}
pub fn store_operand(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
lldest: ValueRef,
operand: OperandRef<'tcx>,
align: Option<u32>) {

View file

@ -17,8 +17,9 @@
use asm;
use base;
use builder::Builder;
use callee::Callee;
use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder};
use common::{self, val_ty, C_bool, C_null, C_uint};
use common::{C_integral};
use adt;
use machine;
@ -35,10 +36,10 @@
impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_rvalue(&mut self,
bcx: BlockAndBuilder<'a, 'tcx>,
bcx: Builder<'a, 'tcx>,
dest: LvalueRef<'tcx>,
rvalue: &mir::Rvalue<'tcx>)
-> BlockAndBuilder<'a, 'tcx>
-> Builder<'a, 'tcx>
{
debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
Value(dest.llval), rvalue);
@ -79,7 +80,7 @@ pub fn trans_rvalue(&mut self,
// index into the struct, and this case isn't
// important enough for it.
debug!("trans_rvalue: creating ugly alloca");
let lltemp = base::alloc_ty(&bcx, operand.ty, "__unsize_temp");
let lltemp = bcx.alloca_ty(operand.ty, "__unsize_temp");
base::store_ty(&bcx, llval, lltemp, operand.ty);
lltemp
}
@ -91,7 +92,7 @@ pub fn trans_rvalue(&mut self,
mir::Rvalue::Repeat(ref elem, ref count) => {
let tr_elem = self.trans_operand(&bcx, elem);
let size = count.value.as_u64(bcx.tcx().sess.target.uint_type);
let size = count.value.as_u64(bcx.ccx.tcx().sess.target.uint_type);
let size = C_uint(bcx.ccx, size);
let base = base::get_dataptr(&bcx, dest.llval);
tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| {
@ -103,7 +104,7 @@ pub fn trans_rvalue(&mut self,
match *kind {
mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
let disr = Disr::from(adt_def.variants[variant_index].disr_val);
let dest_ty = dest.ty.to_ty(bcx.tcx());
let dest_ty = dest.ty.to_ty(bcx.ccx.tcx());
adt::trans_set_discr(&bcx, dest_ty, dest.llval, Disr::from(disr));
for (i, operand) in operands.iter().enumerate() {
let op = self.trans_operand(&bcx, operand);
@ -119,7 +120,7 @@ pub fn trans_rvalue(&mut self,
},
_ => {
// If this is a tuple or closure, we need to translate GEP indices.
let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx()));
let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.ccx.tcx()));
let translation = if let Layout::Univariant { ref variant, .. } = *layout {
Some(&variant.memory_index)
} else {
@ -149,7 +150,7 @@ pub fn trans_rvalue(&mut self,
mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
let outputs = outputs.iter().map(|output| {
let lvalue = self.trans_lvalue(&bcx, output);
(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()))
(lvalue.llval, lvalue.ty.to_ty(bcx.ccx.tcx()))
}).collect();
let input_vals = inputs.iter().map(|input| {
@ -170,9 +171,9 @@ pub fn trans_rvalue(&mut self,
}
pub fn trans_rvalue_operand(&mut self,
bcx: BlockAndBuilder<'a, 'tcx>,
bcx: Builder<'a, 'tcx>,
rvalue: &mir::Rvalue<'tcx>)
-> (BlockAndBuilder<'a, 'tcx>, OperandRef<'tcx>)
-> (Builder<'a, 'tcx>, OperandRef<'tcx>)
{
assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
@ -344,9 +345,9 @@ pub fn trans_rvalue_operand(&mut self,
mir::Rvalue::Ref(_, bk, ref lvalue) => {
let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
let ref_ty = bcx.tcx().mk_ref(
bcx.tcx().mk_region(ty::ReErased),
let ty = tr_lvalue.ty.to_ty(bcx.ccx.tcx());
let ref_ty = bcx.ccx.tcx().mk_ref(
bcx.ccx.tcx().mk_region(ty::ReErased),
ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
);
@ -371,7 +372,7 @@ pub fn trans_rvalue_operand(&mut self,
let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
let operand = OperandRef {
val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)),
ty: bcx.tcx().types.usize,
ty: bcx.ccx.tcx().types.usize,
};
(bcx, operand)
}
@ -398,7 +399,7 @@ pub fn trans_rvalue_operand(&mut self,
};
let operand = OperandRef {
val: OperandValue::Immediate(llresult),
ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty),
ty: op.ty(bcx.ccx.tcx(), lhs.ty, rhs.ty),
};
(bcx, operand)
}
@ -408,8 +409,8 @@ pub fn trans_rvalue_operand(&mut self,
let result = self.trans_scalar_checked_binop(&bcx, op,
lhs.immediate(), rhs.immediate(),
lhs.ty);
let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty);
let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool]);
let val_ty = op.ty(bcx.ccx.tcx(), lhs.ty, rhs.ty);
let operand_ty = bcx.ccx.tcx().intern_tup(&[val_ty, bcx.ccx.tcx().types.bool]);
let operand = OperandRef {
val: result,
ty: operand_ty
@ -443,16 +444,16 @@ pub fn trans_rvalue_operand(&mut self,
let align = type_of::align_of(bcx.ccx, content_ty);
let llalign = C_uint(bcx.ccx, align);
let llty_ptr = llty.ptr_to();
let box_ty = bcx.tcx().mk_box(content_ty);
let box_ty = bcx.ccx.tcx().mk_box(content_ty);
// Allocate space:
let def_id = match bcx.tcx().lang_items.require(ExchangeMallocFnLangItem) {
let def_id = match bcx.ccx.tcx().lang_items.require(ExchangeMallocFnLangItem) {
Ok(id) => id,
Err(s) => {
bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
bcx.ccx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
}
};
let r = Callee::def(bcx.ccx, def_id, bcx.tcx().intern_substs(&[]))
let r = Callee::def(bcx.ccx, def_id, bcx.ccx.tcx().intern_substs(&[]))
.reify(bcx.ccx);
let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr);
@ -477,7 +478,7 @@ pub fn trans_rvalue_operand(&mut self,
}
pub fn trans_scalar_binop(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
op: mir::BinOp,
lhs: ValueRef,
rhs: ValueRef,
@ -552,7 +553,7 @@ pub fn trans_scalar_binop(&mut self,
}
pub fn trans_fat_ptr_binop(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
op: mir::BinOp,
lhs_addr: ValueRef,
lhs_extra: ValueRef,
@ -599,7 +600,7 @@ pub fn trans_fat_ptr_binop(&mut self,
}
pub fn trans_scalar_checked_binop(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
op: mir::BinOp,
lhs: ValueRef,
rhs: ValueRef,
@ -617,7 +618,7 @@ pub fn trans_scalar_checked_binop(&mut self,
// will only succeed if both operands are constant.
// This is necessary to determine when an overflow Assert
// will always panic at runtime, and produce a warning.
if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
if let Some((val, of)) = const_scalar_checked_binop(bcx.ccx.tcx(), op, lhs, rhs, input_ty) {
return OperandValue::Pair(val, C_bool(bcx.ccx, of));
}
@ -681,12 +682,12 @@ enum OverflowOp {
Add, Sub, Mul
}
fn get_overflow_intrinsic(oop: OverflowOp, bcx: &BlockAndBuilder, ty: Ty) -> ValueRef {
fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef {
use syntax::ast::IntTy::*;
use syntax::ast::UintTy::*;
use rustc::ty::{TyInt, TyUint};
let tcx = bcx.tcx();
let tcx = bcx.ccx.tcx();
let new_sty = match ty.sty {
TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {

View file

@ -11,7 +11,8 @@
use rustc::mir;
use base;
use common::{self, BlockAndBuilder};
use common;
use builder::Builder;
use super::MirContext;
use super::LocalRef;
@ -20,9 +21,9 @@
impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_statement(&mut self,
bcx: BlockAndBuilder<'a, 'tcx>,
bcx: Builder<'a, 'tcx>,
statement: &mir::Statement<'tcx>)
-> BlockAndBuilder<'a, 'tcx> {
-> Builder<'a, 'tcx> {
debug!("trans_statement(statement={:?})", statement);
self.set_debug_loc(&bcx, statement.source_info);
@ -77,10 +78,10 @@ pub fn trans_statement(&mut self,
}
fn trans_storage_liveness(&self,
bcx: BlockAndBuilder<'a, 'tcx>,
bcx: Builder<'a, 'tcx>,
lvalue: &mir::Lvalue<'tcx>,
intrinsic: base::Lifetime)
-> BlockAndBuilder<'a, 'tcx> {
-> Builder<'a, 'tcx> {
if let mir::Lvalue::Local(index) = *lvalue {
if let LocalRef::Lvalue(tr_lval) = self.locals[index] {
intrinsic.call(&bcx, tr_lval.llval);

View file

@ -9,28 +9,29 @@
// except according to those terms.
use llvm;
use builder::Builder;
use llvm::ValueRef;
use common::*;
use rustc::ty::Ty;
pub fn slice_for_each<'a, 'tcx, F>(
bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &Builder<'a, 'tcx>,
data_ptr: ValueRef,
unit_ty: Ty<'tcx>,
len: ValueRef,
f: F
) -> BlockAndBuilder<'a, 'tcx> where F: FnOnce(&BlockAndBuilder<'a, 'tcx>, ValueRef) {
) -> Builder<'a, 'tcx> where F: FnOnce(&Builder<'a, 'tcx>, ValueRef) {
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
let zst = type_is_zero_size(bcx.ccx, unit_ty);
let add = |bcx: &BlockAndBuilder, a, b| if zst {
let add = |bcx: &Builder, a, b| if zst {
bcx.add(a, b)
} else {
bcx.inbounds_gep(a, &[b])
};
let body_bcx = bcx.fcx().build_new_block("slice_loop_body");
let next_bcx = bcx.fcx().build_new_block("slice_loop_next");
let header_bcx = bcx.fcx().build_new_block("slice_loop_header");
let body_bcx = bcx.build_new_block("slice_loop_body");
let next_bcx = bcx.build_new_block("slice_loop_next");
let header_bcx = bcx.build_new_block("slice_loop_header");
let start = if zst {
C_uint(bcx.ccx, 0usize)

View file

@ -24,8 +24,8 @@ pub struct Bytes {
// dependent alignment
#[no_mangle]
pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) {
// CHECK: %arg1 = alloca [4 x i8]
// CHECK: [[TMP:%.+]] = alloca i32
// CHECK: %arg1 = alloca [4 x i8]
// CHECK: store i32 %1, i32* [[TMP]]
// CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %arg1 to i8*
// CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8*
@ -38,8 +38,8 @@ pub struct Bytes {
// dependent alignment
#[no_mangle]
pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) {
// CHECK: %arg1 = alloca %Bytes
// CHECK: [[TMP:%.+]] = alloca i32
// CHECK: %arg1 = alloca %Bytes
// CHECK: store i32 %1, i32* [[TMP]]
// CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %arg1 to i8*
// CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8*