deduplicate some copy_op code

This commit is contained in:
Ralf Jung 2022-07-02 20:40:41 -04:00
parent 924a4cd008
commit e685530b07
6 changed files with 74 additions and 87 deletions

View file

@ -359,7 +359,7 @@ fn unsize_into(
let src_field = self.operand_field(src, i)?;
let dst_field = self.place_field(dest, i)?;
if src_field.layout.ty == cast_ty_field.ty {
self.copy_op(&src_field, &dst_field)?;
self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?;
} else {
self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
}

View file

@ -800,7 +800,7 @@ pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx>
.local_to_op(self.frame(), mir::RETURN_PLACE, None)
.expect("return place should always be live");
let dest = self.frame().return_place;
let err = self.copy_op_transmute(&op, &dest);
let err = self.copy_op(&op, &dest, /*allow_transmute*/ true);
trace!("return value: {:?}", self.dump_place(*dest));
// We delay actually short-circuiting on this error until *after* the stack frame is
// popped, since we want this error to be attributed to the caller, whose type defines

View file

@ -174,7 +174,7 @@ pub fn emulate_intrinsic(
let val =
self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
self.copy_op(&val, dest)?;
self.copy_op(&val, dest, /*allow_transmute*/ false)?;
}
sym::ctpop
@ -394,7 +394,7 @@ pub fn emulate_intrinsic(
}
sym::transmute => {
self.copy_op_transmute(&args[0], dest)?;
self.copy_op(&args[0], dest, /*allow_transmute*/ true)?;
}
sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
let ty = instance.substs.type_at(0);
@ -461,7 +461,7 @@ pub fn emulate_intrinsic(
let place = self.mplace_index(&dest, i)?;
let value =
if i == index { *elem } else { self.mplace_index(&input, i)?.into() };
self.copy_op(&value, &place.into())?;
self.copy_op(&value, &place.into(), /*allow_transmute*/ false)?;
}
}
sym::simd_extract => {
@ -473,11 +473,15 @@ pub fn emulate_intrinsic(
index,
input_len
);
self.copy_op(&self.mplace_index(&input, index)?.into(), dest)?;
self.copy_op(
&self.mplace_index(&input, index)?.into(),
dest,
/*allow_transmute*/ false,
)?;
}
sym::likely | sym::unlikely | sym::black_box => {
// These just return their argument
self.copy_op(&args[0], dest)?;
self.copy_op(&args[0], dest, /*allow_transmute*/ false)?;
}
sym::assume => {
let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?;

View file

@ -10,8 +10,9 @@
use rustc_middle::mir;
use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
use rustc_middle::ty::{self, Ty};
use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding};
use rustc_target::abi::{HasDataLayout, Size, VariantIdx, Variants};
use rustc_target::abi::{
Abi, Align, FieldsShape, HasDataLayout, Size, TagEncoding, VariantIdx, Variants,
};
use super::{
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
@ -772,19 +773,20 @@ fn write_immediate_no_validate(
}
Place::Ptr(mplace) => mplace, // already referring to memory
};
let dest = MPlaceTy { mplace, layout: dest.layout, align: dest.align };
// This is already in memory, write there.
self.write_immediate_to_mplace_no_validate(src, &dest)
self.write_immediate_to_mplace_no_validate(src, dest.layout, dest.align, mplace)
}
/// Write an immediate to memory.
/// If you use this you are responsible for validating that things got copied at the
/// right type.
/// right layout.
fn write_immediate_to_mplace_no_validate(
&mut self,
value: Immediate<M::PointerTag>,
dest: &MPlaceTy<'tcx, M::PointerTag>,
layout: TyAndLayout<'tcx>,
align: Align,
dest: MemPlace<M::PointerTag>,
) -> InterpResult<'tcx> {
// Note that it is really important that the type here is the right one, and matches the
// type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
@ -792,31 +794,30 @@ fn write_immediate_to_mplace_no_validate(
// wrong type.
let tcx = *self.tcx;
let Some(mut alloc) = self.get_place_alloc_mut(dest)? else {
let Some(mut alloc) = self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout, align })? else {
// zero-sized access
return Ok(());
};
match value {
Immediate::Scalar(scalar) => {
let Abi::Scalar(s) = dest.layout.abi else { span_bug!(
let Abi::Scalar(s) = layout.abi else { span_bug!(
self.cur_span(),
"write_immediate_to_mplace: invalid Scalar layout: {:#?}",
dest.layout
"write_immediate_to_mplace: invalid Scalar layout: {layout:#?}",
)
};
let size = s.size(&tcx);
assert_eq!(size, dest.layout.size, "abi::Scalar size does not match layout size");
assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
alloc.write_scalar(alloc_range(Size::ZERO, size), scalar)
}
Immediate::ScalarPair(a_val, b_val) => {
// We checked `ptr_align` above, so all fields will have the alignment they need.
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let Abi::ScalarPair(a, b) = dest.layout.abi else { span_bug!(
let Abi::ScalarPair(a, b) = layout.abi else { span_bug!(
self.cur_span(),
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
dest.layout
layout
)
};
let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
@ -858,16 +859,17 @@ pub fn write_uninit(&mut self, dest: &PlaceTy<'tcx, M::PointerTag>) -> InterpRes
Ok(())
}
/// Copies the data from an operand to a place. This does not support transmuting!
/// Use `copy_op_transmute` if the layouts could disagree.
/// Copies the data from an operand to a place.
/// `allow_transmute` indicates whether the layouts may disagree.
#[inline(always)]
#[instrument(skip(self), level = "debug")]
pub fn copy_op(
&mut self,
src: &OpTy<'tcx, M::PointerTag>,
dest: &PlaceTy<'tcx, M::PointerTag>,
allow_transmute: bool,
) -> InterpResult<'tcx> {
self.copy_op_no_validate(src, dest)?;
self.copy_op_no_validate(src, dest, allow_transmute)?;
if M::enforce_validity(self) {
// Data got changed, better make sure it matches the type!
@ -877,8 +879,8 @@ pub fn copy_op(
Ok(())
}
/// Copies the data from an operand to a place. This does not support transmuting!
/// Use `copy_op_transmute` if the layouts could disagree.
/// Copies the data from an operand to a place.
/// `allow_transmute` indicates whether the layouts may disagree.
/// Also, if you use this you are responsible for validating that things get copied at the
/// right type.
#[instrument(skip(self), level = "debug")]
@ -886,10 +888,13 @@ fn copy_op_no_validate(
&mut self,
src: &OpTy<'tcx, M::PointerTag>,
dest: &PlaceTy<'tcx, M::PointerTag>,
allow_transmute: bool,
) -> InterpResult<'tcx> {
// We do NOT compare the types for equality, because well-typed code can
// actually "transmute" `&mut T` to `&T` in an assignment without a cast.
if !mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) {
let layout_compat =
mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout);
if !allow_transmute && !layout_compat {
span_bug!(
self.cur_span(),
"type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
@ -898,25 +903,48 @@ fn copy_op_no_validate(
);
}
// Let us see if the layout is simple so we take a shortcut, avoid force_allocation.
// Let us see if the layout is simple so we take a shortcut,
// avoid force_allocation.
let src = match self.read_immediate_raw(src, /*force*/ false)? {
Ok(src_val) => {
assert!(!src.layout.is_unsized(), "cannot have unsized immediates");
assert!(
!dest.layout.is_unsized(),
"the src is sized, so the dest must also be sized"
);
assert_eq!(src.layout.size, dest.layout.size);
// Yay, we got a value that we can write directly.
return self.write_immediate_no_validate(*src_val, dest);
return if layout_compat {
self.write_immediate_no_validate(*src_val, dest)
} else {
// This is tricky. The problematic case is `ScalarPair`: the `src_val` was
// loaded using the offsets defined by `src.layout`. When we put this back into
// the destination, we have to use the same offsets! So (a) we make sure we
// write back to memory, and (b) we use `dest` *with the source layout*.
let dest_mem = self.force_allocation(dest)?;
self.write_immediate_to_mplace_no_validate(
*src_val,
src.layout,
dest_mem.align,
*dest_mem,
)
};
}
Err(mplace) => mplace,
};
// Slow path, this does not fit into an immediate. Just memcpy.
trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
let dest = self.force_allocation(dest)?;
let dest = self.force_allocation(&dest)?;
let Some((dest_size, _)) = self.size_and_align_of_mplace(&dest)? else {
span_bug!(self.cur_span(), "copy_op needs (dynamically) sized values")
};
if cfg!(debug_assertions) {
let src_size = self.size_and_align_of_mplace(&src)?.unwrap().0;
assert_eq!(src_size, dest_size, "Cannot copy differently-sized data");
} else {
// As a cheap approximation, we compare the fixed parts of the size.
assert_eq!(src.layout.size, dest.layout.size);
}
self.mem_copy(
@ -924,56 +952,6 @@ fn copy_op_no_validate(
)
}
/// Copies the data from an operand to a place. The layouts may disagree, but they must
/// have the same size.
pub fn copy_op_transmute(
&mut self,
src: &OpTy<'tcx, M::PointerTag>,
dest: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
if mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) {
// Fast path: Just use normal `copy_op`. This is faster because it tries
// `read_immediate_raw` first before doing `force_allocation`.
return self.copy_op(src, dest);
}
// Unsized copies rely on interpreting `src.meta` with `dest.layout`, we want
// to avoid that here.
assert!(
!src.layout.is_unsized() && !dest.layout.is_unsized(),
"Cannot transmute unsized data"
);
// We still require the sizes to match.
if src.layout.size != dest.layout.size {
span_bug!(
self.cur_span(),
"size-changing transmute, should have been caught by transmute checking: {:#?}\ndest: {:#?}",
src,
dest
);
}
// The hard case is `ScalarPair`. `src` is already read from memory in this case,
// using `src.layout` to figure out which bytes to use for the 1st and 2nd field.
// We have to write them to `dest` at the offsets they were *read at*, which is
// not necessarily the same as the offsets in `dest.layout`!
// Hence we do the copy with the source layout on both sides. We also make sure to write
// into memory, because if `dest` is a local we would not even have a way to write
// at the `src` offsets; the fact that we came from a different layout would
// just be lost.
let dest = self.force_allocation(dest)?;
self.copy_op_no_validate(
src,
&PlaceTy::from(MPlaceTy { mplace: *dest, layout: src.layout, align: dest.align }),
)?;
if M::enforce_validity(self) {
// Data got changed, better make sure it matches the type!
self.validate_operand(&dest.into())?;
}
Ok(())
}
/// Ensures that a place is in memory, and returns where it is.
/// If the place currently refers to a local that doesn't yet have a matching allocation,
/// create such an allocation.
@ -997,18 +975,23 @@ pub fn force_allocation(
if local_layout.is_unsized() {
throw_unsup_format!("unsized locals are not supported");
}
let mplace = self.allocate(local_layout, MemoryKind::Stack)?;
let mplace = *self.allocate(local_layout, MemoryKind::Stack)?;
if !matches!(local_val, Immediate::Uninit) {
// Preserve old value. (As an optimization, we can skip this if it was uninit.)
// We don't have to validate as we can assume the local
// was already valid for its type.
self.write_immediate_to_mplace_no_validate(local_val, &mplace)?;
self.write_immediate_to_mplace_no_validate(
local_val,
local_layout,
local_layout.align.abi,
mplace,
)?;
}
// Now we can call `access_mut` again, asserting it goes well,
// and actually overwrite things.
*M::access_local_mut(self, frame, local).unwrap() =
Operand::Indirect(*mplace);
*mplace
Operand::Indirect(mplace);
mplace
}
&mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
}

View file

@ -169,7 +169,7 @@ pub fn eval_rvalue_into_place(
Use(ref operand) => {
// Avoid recomputing the layout
let op = self.eval_operand(operand, Some(dest.layout))?;
self.copy_op(&op, &dest)?;
self.copy_op(&op, &dest, /*allow_transmute*/ false)?;
}
BinaryOp(bin_op, box (ref left, ref right)) => {
@ -204,7 +204,7 @@ pub fn eval_rvalue_into_place(
for (field_index, operand) in operands.iter().enumerate() {
let op = self.eval_operand(operand, None)?;
let field_dest = self.place_field(&dest, field_index)?;
self.copy_op(&op, &field_dest)?;
self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
}
}
@ -220,7 +220,7 @@ pub fn eval_rvalue_into_place(
} else {
// Write the src to the first element.
let first = self.mplace_field(&dest, 0)?;
self.copy_op(&src, &first.into())?;
self.copy_op(&src, &first.into(), /*allow_transmute*/ false)?;
// This is performance-sensitive code for big static/const arrays! So we
// avoid writing each operand individually and instead just make many copies

View file

@ -321,7 +321,7 @@ fn pass_argument<'x, 'y>(
// FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
// is true for all `copy_op`, but there are a lot of special cases for argument passing
// specifically.)
self.copy_op_transmute(&caller_arg, callee_arg)
self.copy_op(&caller_arg, callee_arg, /*allow_transmute*/ true)
}
/// Call this function -- pushing the stack frame and initializing the arguments.