Auto merge of #99472 - RalfJung:provenance, r=oli-obk

interpret: rename Tag/PointerTag to Prov/Provenance

We were pretty inconsistent with calling this the "tag" vs the "provenance" of the pointer; I think we should consistently call it "provenance".

r? `@oli-obk`
This commit is contained in:
bors 2022-07-20 16:56:31 +00:00
commit a7468c60f8
24 changed files with 606 additions and 601 deletions

View file

@ -309,7 +309,7 @@ fn call_intrinsic(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: &PlaceTy<'tcx, Self::PointerTag>,
dest: &PlaceTy<'tcx, Self::Provenance>,
target: Option<mir::BasicBlock>,
_unwind: StackPopUnwind,
) -> InterpResult<'tcx> {
@ -470,14 +470,14 @@ fn init_frame_extra(
#[inline(always)]
fn stack<'a>(
ecx: &'a InterpCx<'mir, 'tcx, Self>,
) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] {
) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>] {
&ecx.machine.stack
}
#[inline(always)]
fn stack_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>> {
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>> {
&mut ecx.machine.stack
}

View file

@ -18,10 +18,10 @@
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn cast(
&mut self,
src: &OpTy<'tcx, M::PointerTag>,
src: &OpTy<'tcx, M::Provenance>,
cast_kind: CastKind,
cast_ty: Ty<'tcx>,
dest: &PlaceTy<'tcx, M::PointerTag>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
use rustc_middle::mir::CastKind::*;
// FIXME: In which cases should we trigger UB when the source is uninit?
@ -114,9 +114,9 @@ pub fn cast(
pub fn misc_cast(
&mut self,
src: &ImmTy<'tcx, M::PointerTag>,
src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::PointerTag>> {
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
use rustc_type_ir::sty::TyKind::*;
trace!("Casting {:?}: {:?} to {:?}", *src, src.layout.ty, cast_ty);
@ -173,9 +173,9 @@ pub fn misc_cast(
pub fn pointer_expose_address_cast(
&mut self,
src: &ImmTy<'tcx, M::PointerTag>,
src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::PointerTag>> {
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
assert_matches!(src.layout.ty.kind(), ty::RawPtr(_) | ty::FnPtr(_));
assert!(cast_ty.is_integral());
@ -190,9 +190,9 @@ pub fn pointer_expose_address_cast(
pub fn pointer_from_exposed_address_cast(
&mut self,
src: &ImmTy<'tcx, M::PointerTag>,
src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::PointerTag>> {
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
assert!(src.layout.ty.is_integral());
assert_matches!(cast_ty.kind(), ty::RawPtr(_));
@ -208,10 +208,10 @@ pub fn pointer_from_exposed_address_cast(
pub fn cast_from_int_like(
&self,
scalar: Scalar<M::PointerTag>, // input value (there is no ScalarTy so we separate data+layout)
scalar: Scalar<M::Provenance>, // input value (there is no ScalarTy so we separate data+layout)
src_layout: TyAndLayout<'tcx>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
// Let's make sure v is sign-extended *if* it has a signed type.
let signed = src_layout.abi.is_signed(); // Also asserts that abi is `Scalar`.
@ -245,9 +245,9 @@ pub fn cast_from_int_like(
})
}
fn cast_from_float<F>(&self, f: F, dest_ty: Ty<'tcx>) -> Scalar<M::PointerTag>
fn cast_from_float<F>(&self, f: F, dest_ty: Ty<'tcx>) -> Scalar<M::Provenance>
where
F: Float + Into<Scalar<M::PointerTag>> + FloatConvert<Single> + FloatConvert<Double>,
F: Float + Into<Scalar<M::Provenance>> + FloatConvert<Single> + FloatConvert<Double>,
{
use rustc_type_ir::sty::TyKind::*;
match *dest_ty.kind() {
@ -279,8 +279,8 @@ fn cast_from_float<F>(&self, f: F, dest_ty: Ty<'tcx>) -> Scalar<M::PointerTag>
fn unsize_into_ptr(
&mut self,
src: &OpTy<'tcx, M::PointerTag>,
dest: &PlaceTy<'tcx, M::PointerTag>,
src: &OpTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
// The pointee types
source_ty: Ty<'tcx>,
cast_ty: Ty<'tcx>,
@ -335,9 +335,9 @@ fn unsize_into_ptr(
fn unsize_into(
&mut self,
src: &OpTy<'tcx, M::PointerTag>,
src: &OpTy<'tcx, M::Provenance>,
cast_ty: TyAndLayout<'tcx>,
dest: &PlaceTy<'tcx, M::PointerTag>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
trace!("Unsizing {:?} of type {} into {:?}", *src, src.layout.ty, cast_ty.ty);
match (&src.layout.ty.kind(), &cast_ty.ty.kind()) {

View file

@ -81,7 +81,7 @@ fn drop(&mut self) {
}
/// A stack frame.
pub struct Frame<'mir, 'tcx, Tag: Provenance = AllocId, Extra = ()> {
pub struct Frame<'mir, 'tcx, Prov: Provenance = AllocId, Extra = ()> {
////////////////////////////////////////////////////////////////////////////////
// Function and callsite information
////////////////////////////////////////////////////////////////////////////////
@ -102,7 +102,7 @@ pub struct Frame<'mir, 'tcx, Tag: Provenance = AllocId, Extra = ()> {
/// The location where the result of the current stack frame should be written to,
/// and its layout in the caller.
pub return_place: PlaceTy<'tcx, Tag>,
pub return_place: PlaceTy<'tcx, Prov>,
/// The list of locals for this stack frame, stored in order as
/// `[return_ptr, arguments..., variables..., temporaries...]`.
@ -111,7 +111,7 @@ pub struct Frame<'mir, 'tcx, Tag: Provenance = AllocId, Extra = ()> {
/// can either directly contain `Scalar` or refer to some part of an `Allocation`.
///
/// Do *not* access this directly; always go through the machine hook!
pub locals: IndexVec<mir::Local, LocalState<'tcx, Tag>>,
pub locals: IndexVec<mir::Local, LocalState<'tcx, Prov>>,
/// The span of the `tracing` crate is stored here.
/// When the guard is dropped, the span is exited. This gives us
@ -166,15 +166,15 @@ pub enum StackPopCleanup {
/// State of a local variable including a memoized layout
#[derive(Clone, Debug)]
pub struct LocalState<'tcx, Tag: Provenance = AllocId> {
pub value: LocalValue<Tag>,
pub struct LocalState<'tcx, Prov: Provenance = AllocId> {
pub value: LocalValue<Prov>,
/// Don't modify if `Some`, this is only used to prevent computing the layout twice
pub layout: Cell<Option<TyAndLayout<'tcx>>>,
}
/// Current value of a local variable
#[derive(Copy, Clone, Debug)] // Miri debug-prints these
pub enum LocalValue<Tag: Provenance = AllocId> {
pub enum LocalValue<Prov: Provenance = AllocId> {
/// This local is not currently alive, and cannot be used at all.
Dead,
/// A normal, live local.
@ -182,16 +182,16 @@ pub enum LocalValue<Tag: Provenance = AllocId> {
/// This is an optimization over just always having a pointer here;
/// we can thus avoid doing an allocation when the local just stores
/// immediate values *and* never has its address taken.
Live(Operand<Tag>),
Live(Operand<Prov>),
}
impl<'tcx, Tag: Provenance + 'static> LocalState<'tcx, Tag> {
impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> {
/// Read the local's value or error if the local is not yet live or not live anymore.
///
/// Note: This may only be invoked from the `Machine::access_local` hook and not from
/// anywhere else. You may be invalidating machine invariants if you do!
#[inline]
pub fn access(&self) -> InterpResult<'tcx, &Operand<Tag>> {
pub fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
match &self.value {
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
LocalValue::Live(val) => Ok(val),
@ -204,7 +204,7 @@ pub fn access(&self) -> InterpResult<'tcx, &Operand<Tag>> {
/// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from
/// anywhere else. You may be invalidating machine invariants if you do!
#[inline]
pub fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Tag>> {
pub fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> {
match &mut self.value {
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
LocalValue::Live(val) => Ok(val),
@ -212,8 +212,8 @@ pub fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Tag>> {
}
}
impl<'mir, 'tcx, Tag: Provenance> Frame<'mir, 'tcx, Tag> {
pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'mir, 'tcx, Tag, Extra> {
impl<'mir, 'tcx, Prov: Provenance> Frame<'mir, 'tcx, Prov> {
pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'mir, 'tcx, Prov, Extra> {
Frame {
body: self.body,
instance: self.instance,
@ -227,7 +227,7 @@ pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'mir, 'tcx, Tag, Extra> {
}
}
impl<'mir, 'tcx, Tag: Provenance, Extra> Frame<'mir, 'tcx, Tag, Extra> {
impl<'mir, 'tcx, Prov: Provenance, Extra> Frame<'mir, 'tcx, Prov, Extra> {
/// Get the current location within the Frame.
///
/// If this is `Err`, we are not currently executing any particular statement in
@ -422,14 +422,14 @@ pub fn cur_span(&self) -> Span {
}
#[inline(always)]
pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>] {
pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>] {
M::stack(self)
}
#[inline(always)]
pub(crate) fn stack_mut(
&mut self,
) -> &mut Vec<Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>> {
) -> &mut Vec<Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>> {
M::stack_mut(self)
}
@ -441,12 +441,12 @@ pub fn frame_idx(&self) -> usize {
}
#[inline(always)]
pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
pub fn frame(&self) -> &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra> {
self.stack().last().expect("no call frames exist")
}
#[inline(always)]
pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::Provenance, M::FrameExtra> {
self.stack_mut().last_mut().expect("no call frames exist")
}
@ -503,7 +503,7 @@ pub(super) fn subst_from_current_frame_and_normalize_erasing_regions<T: TypeFold
/// stack frame), to bring it into the proper environment for this interpreter.
pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
&self,
frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
value: T,
) -> Result<T, InterpError<'tcx>> {
frame
@ -540,7 +540,7 @@ pub(super) fn resolve(
#[inline(always)]
pub fn layout_of_local(
&self,
frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
local: mir::Local,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
@ -569,7 +569,7 @@ pub fn layout_of_local(
/// This can fail to provide an answer for extern types.
pub(super) fn size_and_align_of(
&self,
metadata: &MemPlaceMeta<M::PointerTag>,
metadata: &MemPlaceMeta<M::Provenance>,
layout: &TyAndLayout<'tcx>,
) -> InterpResult<'tcx, Option<(Size, Align)>> {
if !layout.is_unsized() {
@ -655,7 +655,7 @@ pub(super) fn size_and_align_of(
#[inline]
pub fn size_and_align_of_mplace(
&self,
mplace: &MPlaceTy<'tcx, M::PointerTag>,
mplace: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Option<(Size, Align)>> {
self.size_and_align_of(&mplace.meta, &mplace.layout)
}
@ -665,7 +665,7 @@ pub fn push_stack_frame(
&mut self,
instance: ty::Instance<'tcx>,
body: &'mir mir::Body<'tcx>,
return_place: &PlaceTy<'tcx, M::PointerTag>,
return_place: &PlaceTy<'tcx, M::Provenance>,
return_to_block: StackPopCleanup,
) -> InterpResult<'tcx> {
trace!("body: {:#?}", body);
@ -891,7 +891,7 @@ pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> {
}
#[instrument(skip(self), level = "debug")]
fn deallocate_local(&mut self, local: LocalValue<M::PointerTag>) -> InterpResult<'tcx> {
fn deallocate_local(&mut self, local: LocalValue<M::Provenance>) -> InterpResult<'tcx> {
if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
// All locals have a backing allocation, even if the allocation is empty
// due to the local having ZST type. Hence we can `unwrap`.
@ -909,7 +909,7 @@ fn deallocate_local(&mut self, local: LocalValue<M::PointerTag>) -> InterpResult
pub fn eval_to_allocation(
&self,
gid: GlobalId<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
// For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
// and thus don't care about the parameter environment. While we could just use
// `self.param_env`, that would mean we invoke the query to evaluate the static
@ -927,7 +927,7 @@ pub fn eval_to_allocation(
}
#[must_use]
pub fn dump_place(&self, place: Place<M::PointerTag>) -> PlacePrinter<'_, 'mir, 'tcx, M> {
pub fn dump_place(&self, place: Place<M::Provenance>) -> PlacePrinter<'_, 'mir, 'tcx, M> {
PlacePrinter { ecx: self, place }
}
@ -956,7 +956,7 @@ pub fn generate_stacktrace(&self) -> Vec<FrameInfo<'tcx>> {
/// Helper struct for the `dump_place` function.
pub struct PlacePrinter<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
ecx: &'a InterpCx<'mir, 'tcx, M>,
place: Place<M::PointerTag>,
place: Place<M::Provenance>,
}
impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug

View file

@ -33,7 +33,7 @@ pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine<
'mir,
'tcx,
MemoryKind = T,
PointerTag = AllocId,
Provenance = AllocId,
ExtraFnVal = !,
FrameExtra = (),
AllocExtra = (),
@ -474,7 +474,7 @@ pub fn intern_with_temp_alloc(
layout: TyAndLayout<'tcx>,
f: impl FnOnce(
&mut InterpCx<'mir, 'tcx, M>,
&PlaceTy<'tcx, M::PointerTag>,
&PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ()>,
) -> InterpResult<'tcx, ConstAllocation<'tcx>> {
let dest = self.allocate(layout, MemoryKind::Stack)?;

View file

@ -25,7 +25,7 @@
mod caller_location;
mod type_name;
fn numeric_intrinsic<Tag>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Tag> {
fn numeric_intrinsic<Prov>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Prov> {
let size = match kind {
Primitive::Int(integer, _) => integer.size(),
_ => bug!("invalid `{}` argument: {:?}", name, bits),
@ -114,8 +114,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn emulate_intrinsic(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, M::PointerTag>],
dest: &PlaceTy<'tcx, M::PointerTag>,
args: &[OpTy<'tcx, M::Provenance>],
dest: &PlaceTy<'tcx, M::Provenance>,
ret: Option<mir::BasicBlock>,
) -> InterpResult<'tcx, bool> {
let substs = instance.substs;
@ -502,9 +502,9 @@ pub fn emulate_intrinsic(
pub fn exact_div(
&mut self,
a: &ImmTy<'tcx, M::PointerTag>,
b: &ImmTy<'tcx, M::PointerTag>,
dest: &PlaceTy<'tcx, M::PointerTag>,
a: &ImmTy<'tcx, M::Provenance>,
b: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
// Performs an exact division, resulting in undefined behavior where
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
@ -521,9 +521,9 @@ pub fn exact_div(
pub fn saturating_arith(
&self,
mir_op: BinOp,
l: &ImmTy<'tcx, M::PointerTag>,
r: &ImmTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
l: &ImmTy<'tcx, M::Provenance>,
r: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
assert!(matches!(mir_op, BinOp::Add | BinOp::Sub));
let (val, overflowed, _ty) = self.overflowing_binary_op(mir_op, l, r)?;
Ok(if overflowed {
@ -566,10 +566,10 @@ pub fn saturating_arith(
/// 0, so offset-by-0 (and only 0) is okay -- except that null cannot be offset by _any_ value.
pub fn ptr_offset_inbounds(
&self,
ptr: Pointer<Option<M::PointerTag>>,
ptr: Pointer<Option<M::Provenance>>,
pointee_ty: Ty<'tcx>,
offset_count: i64,
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
// We cannot overflow i64 as a type's size must be <= isize::MAX.
let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
// The computed offset, in bytes, must not overflow an isize.
@ -597,9 +597,9 @@ pub fn ptr_offset_inbounds(
/// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
pub(crate) fn copy_intrinsic(
&mut self,
src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
nonoverlapping: bool,
) -> InterpResult<'tcx> {
let count = self.read_scalar(&count)?.to_machine_usize(self)?;
@ -622,9 +622,9 @@ pub(crate) fn copy_intrinsic(
pub(crate) fn write_bytes_intrinsic(
&mut self,
dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
byte: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
byte: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
) -> InterpResult<'tcx> {
let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap().ty)?;
@ -645,9 +645,9 @@ pub(crate) fn write_bytes_intrinsic(
pub(crate) fn raw_eq_intrinsic(
&mut self,
lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
assert!(!layout.is_unsized());

View file

@ -79,7 +79,7 @@ pub(crate) fn alloc_caller_location(
filename: Symbol,
line: u32,
col: u32,
) -> MPlaceTy<'tcx, M::PointerTag> {
) -> MPlaceTy<'tcx, M::Provenance> {
let loc_details = &self.tcx.sess.opts.unstable_opts.location_detail;
let file = if loc_details.file {
self.allocate_str(filename.as_str(), MemoryKind::CallerLocation, Mutability::Not)
@ -123,7 +123,7 @@ pub(crate) fn location_triple_for_span(&self, span: Span) -> (Symbol, u32, u32)
)
}
pub fn alloc_caller_location_for_span(&mut self, span: Span) -> MPlaceTy<'tcx, M::PointerTag> {
pub fn alloc_caller_location_for_span(&mut self, span: Span) -> MPlaceTy<'tcx, M::Provenance> {
let (file, line, column) = self.location_triple_for_span(span);
self.alloc_caller_location(file, line, column)
}

View file

@ -85,11 +85,11 @@ pub trait Machine<'mir, 'tcx>: Sized {
type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static;
/// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
type PointerTag: Provenance + Eq + Hash + 'static;
type Provenance: Provenance + Eq + Hash + 'static;
/// When getting the AllocId of a pointer, some extra data is also obtained from the tag
/// When getting the AllocId of a pointer, some extra data is also obtained from the provenance
/// that is passed to memory access hooks so they can do things with it.
type TagExtra: Copy + 'static;
type ProvenanceExtra: Copy + 'static;
/// Machines can define extra (non-instance) things that represent values of function pointers.
/// For example, Miri uses this to return a function pointer from `dlsym`
@ -105,7 +105,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
/// Memory's allocation map
type MemoryMap: AllocMap<
AllocId,
(MemoryKind<Self::MemoryKind>, Allocation<Self::PointerTag, Self::AllocExtra>),
(MemoryKind<Self::MemoryKind>, Allocation<Self::Provenance, Self::AllocExtra>),
> + Default
+ Clone;
@ -113,7 +113,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
/// or None if such memory should not be mutated and thus any such attempt will cause
/// a `ModifiedStatic` error to be raised.
/// Statics are copied under two circumstances: When they are mutated, and when
/// `tag_allocation` (see below) returns an owned allocation
/// `adjust_allocation` (see below) returns an owned allocation
/// that is added to the memory so that the work is not done twice.
const GLOBAL_KIND: Option<Self::MemoryKind>;
@ -126,7 +126,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
/// Whether, when checking alignment, we should `force_int` and thus support
/// custom alignment logic based on whatever the integer address happens to be.
///
/// Requires PointerTag::OFFSET_IS_ADDR to be true.
/// Requires Provenance::OFFSET_IS_ADDR to be true.
fn force_int_for_alignment_check(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
/// Whether to enforce the validity invariant
@ -170,8 +170,8 @@ fn find_mir_or_eval_fn(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
abi: CallAbi,
args: &[OpTy<'tcx, Self::PointerTag>],
destination: &PlaceTy<'tcx, Self::PointerTag>,
args: &[OpTy<'tcx, Self::Provenance>],
destination: &PlaceTy<'tcx, Self::Provenance>,
target: Option<mir::BasicBlock>,
unwind: StackPopUnwind,
) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>>;
@ -182,8 +182,8 @@ fn call_extra_fn(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
fn_val: Self::ExtraFnVal,
abi: CallAbi,
args: &[OpTy<'tcx, Self::PointerTag>],
destination: &PlaceTy<'tcx, Self::PointerTag>,
args: &[OpTy<'tcx, Self::Provenance>],
destination: &PlaceTy<'tcx, Self::Provenance>,
target: Option<mir::BasicBlock>,
unwind: StackPopUnwind,
) -> InterpResult<'tcx>;
@ -193,8 +193,8 @@ fn call_extra_fn(
fn call_intrinsic(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Self::PointerTag>],
destination: &PlaceTy<'tcx, Self::PointerTag>,
args: &[OpTy<'tcx, Self::Provenance>],
destination: &PlaceTy<'tcx, Self::Provenance>,
target: Option<mir::BasicBlock>,
unwind: StackPopUnwind,
) -> InterpResult<'tcx>;
@ -217,18 +217,18 @@ fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _msg: String) -> InterpResult<'t
fn binary_ptr_op(
ecx: &InterpCx<'mir, 'tcx, Self>,
bin_op: mir::BinOp,
left: &ImmTy<'tcx, Self::PointerTag>,
right: &ImmTy<'tcx, Self::PointerTag>,
) -> InterpResult<'tcx, (Scalar<Self::PointerTag>, bool, Ty<'tcx>)>;
left: &ImmTy<'tcx, Self::Provenance>,
right: &ImmTy<'tcx, Self::Provenance>,
) -> InterpResult<'tcx, (Scalar<Self::Provenance>, bool, Ty<'tcx>)>;
/// Called to read the specified `local` from the `frame`.
/// Since reading a ZST is not actually accessing memory or locals, this is never invoked
/// for ZST reads.
#[inline]
fn access_local<'a>(
frame: &'a Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
frame: &'a Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
local: mir::Local,
) -> InterpResult<'tcx, &'a Operand<Self::PointerTag>>
) -> InterpResult<'tcx, &'a Operand<Self::Provenance>>
where
'tcx: 'mir,
{
@ -243,7 +243,7 @@ fn access_local_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
frame: usize,
local: mir::Local,
) -> InterpResult<'tcx, &'a mut Operand<Self::PointerTag>>
) -> InterpResult<'tcx, &'a mut Operand<Self::Provenance>>
where
'tcx: 'mir,
{
@ -275,7 +275,7 @@ fn before_access_global(
fn thread_local_static_base_pointer(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
def_id: DefId,
) -> InterpResult<'tcx, Pointer<Self::PointerTag>> {
) -> InterpResult<'tcx, Pointer<Self::Provenance>> {
throw_unsup!(ThreadLocalStatic(def_id))
}
@ -283,35 +283,35 @@ fn thread_local_static_base_pointer(
fn extern_static_base_pointer(
ecx: &InterpCx<'mir, 'tcx, Self>,
def_id: DefId,
) -> InterpResult<'tcx, Pointer<Self::PointerTag>>;
) -> InterpResult<'tcx, Pointer<Self::Provenance>>;
/// Return a "base" pointer for the given allocation: the one that is used for direct
/// accesses to this static/const/fn allocation, or the one returned from the heap allocator.
///
/// Not called on `extern` or thread-local statics (those use the methods above).
fn tag_alloc_base_pointer(
fn adjust_alloc_base_pointer(
ecx: &InterpCx<'mir, 'tcx, Self>,
ptr: Pointer,
) -> Pointer<Self::PointerTag>;
) -> Pointer<Self::Provenance>;
/// "Int-to-pointer cast"
fn ptr_from_addr_cast(
ecx: &InterpCx<'mir, 'tcx, Self>,
addr: u64,
) -> InterpResult<'tcx, Pointer<Option<Self::PointerTag>>>;
) -> InterpResult<'tcx, Pointer<Option<Self::Provenance>>>;
/// Hook for returning a pointer from a transmute-like operation on an addr.
/// This is only needed to support Miri's (unsound) "allow-ptr-int-transmute" flag.
fn ptr_from_addr_transmute(
ecx: &InterpCx<'mir, 'tcx, Self>,
addr: u64,
) -> Pointer<Option<Self::PointerTag>>;
) -> Pointer<Option<Self::Provenance>>;
/// Marks a pointer as exposed, allowing it's provenance
/// to be recovered. "Pointer-to-int cast"
fn expose_ptr(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
ptr: Pointer<Self::PointerTag>,
ptr: Pointer<Self::Provenance>,
) -> InterpResult<'tcx>;
/// Convert a pointer with provenance into an allocation-offset pair
@ -322,30 +322,30 @@ fn expose_ptr(
/// When this fails, that means the pointer does not point to a live allocation.
fn ptr_get_alloc(
ecx: &InterpCx<'mir, 'tcx, Self>,
ptr: Pointer<Self::PointerTag>,
) -> Option<(AllocId, Size, Self::TagExtra)>;
ptr: Pointer<Self::Provenance>,
) -> Option<(AllocId, Size, Self::ProvenanceExtra)>;
/// Called to initialize the "extra" state of an allocation and make the pointers
/// it contains (in relocations) tagged. The way we construct allocations is
/// to always first construct it without extra and then add the extra.
/// This keeps uniform code paths for handling both allocations created by CTFE
/// for globals, and allocations created by Miri during evaluation.
/// Called to adjust allocations to the Provenance and AllocExtra of this machine.
///
/// `kind` is the kind of the allocation being tagged; it can be `None` when
/// The way we construct allocations is to always first construct it without extra and then add
/// the extra. This keeps uniform code paths for handling both allocations created by CTFE for
/// globals, and allocations created by Miri during evaluation.
///
/// `kind` is the kind of the allocation being adjusted; it can be `None` when
/// it's a global and `GLOBAL_KIND` is `None`.
///
/// This should avoid copying if no work has to be done! If this returns an owned
/// allocation (because a copy had to be done to add tags or metadata), machine memory will
/// allocation (because a copy had to be done to adjust things), machine memory will
/// cache the result. (This relies on `AllocMap::get_or` being able to add the
/// owned allocation to the map even when the map is shared.)
///
/// This must only fail if `alloc` contains relocations.
fn init_allocation_extra<'b>(
fn adjust_allocation<'b>(
ecx: &InterpCx<'mir, 'tcx, Self>,
id: AllocId,
alloc: Cow<'b, Allocation>,
kind: Option<MemoryKind<Self::MemoryKind>>,
) -> InterpResult<'tcx, Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>>>;
) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra>>>;
/// Hook for performing extra checks on a memory read access.
///
@ -357,7 +357,7 @@ fn memory_read(
_tcx: TyCtxt<'tcx>,
_machine: &Self,
_alloc_extra: &Self::AllocExtra,
_tag: (AllocId, Self::TagExtra),
_prov: (AllocId, Self::ProvenanceExtra),
_range: AllocRange,
) -> InterpResult<'tcx> {
Ok(())
@ -369,7 +369,7 @@ fn memory_written(
_tcx: TyCtxt<'tcx>,
_machine: &mut Self,
_alloc_extra: &mut Self::AllocExtra,
_tag: (AllocId, Self::TagExtra),
_prov: (AllocId, Self::ProvenanceExtra),
_range: AllocRange,
) -> InterpResult<'tcx> {
Ok(())
@ -381,7 +381,7 @@ fn memory_deallocated(
_tcx: TyCtxt<'tcx>,
_machine: &mut Self,
_alloc_extra: &mut Self::AllocExtra,
_tag: (AllocId, Self::TagExtra),
_prov: (AllocId, Self::ProvenanceExtra),
_range: AllocRange,
) -> InterpResult<'tcx> {
Ok(())
@ -392,7 +392,7 @@ fn memory_deallocated(
fn retag(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_kind: mir::RetagKind,
_place: &PlaceTy<'tcx, Self::PointerTag>,
_place: &PlaceTy<'tcx, Self::Provenance>,
) -> InterpResult<'tcx> {
Ok(())
}
@ -400,18 +400,18 @@ fn retag(
/// Called immediately before a new stack frame gets pushed.
fn init_frame_extra(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
frame: Frame<'mir, 'tcx, Self::PointerTag>,
) -> InterpResult<'tcx, Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>>;
frame: Frame<'mir, 'tcx, Self::Provenance>,
) -> InterpResult<'tcx, Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>>;
/// Borrow the current thread's stack.
fn stack<'a>(
ecx: &'a InterpCx<'mir, 'tcx, Self>,
) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>];
) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>];
/// Mutably borrow the current thread's stack.
fn stack_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>>;
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>>;
/// Called immediately after a stack frame got pushed and its locals got initialized.
fn after_stack_push(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
@ -422,7 +422,7 @@ fn after_stack_push(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx>
/// The `locals` have already been destroyed!
fn after_stack_pop(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_frame: Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
_frame: Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
unwinding: bool,
) -> InterpResult<'tcx, StackPopJump> {
// By default, we do not support unwinding from panics
@ -434,8 +434,8 @@ fn after_stack_pop(
// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
// (CTFE and ConstProp) use the same instance. Here, we share that code.
pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
type PointerTag = AllocId;
type TagExtra = ();
type Provenance = AllocId;
type ProvenanceExtra = ();
type ExtraFnVal = !;
@ -485,7 +485,7 @@ fn call_extra_fn(
fn_val: !,
_abi: CallAbi,
_args: &[OpTy<$tcx>],
_destination: &PlaceTy<$tcx, Self::PointerTag>,
_destination: &PlaceTy<$tcx, Self::Provenance>,
_target: Option<mir::BasicBlock>,
_unwind: StackPopUnwind,
) -> InterpResult<$tcx> {
@ -493,13 +493,12 @@ fn call_extra_fn(
}
#[inline(always)]
fn init_allocation_extra<'b>(
fn adjust_allocation<'b>(
_ecx: &InterpCx<$mir, $tcx, Self>,
_id: AllocId,
alloc: Cow<'b, Allocation>,
_kind: Option<MemoryKind<Self::MemoryKind>>,
) -> InterpResult<$tcx, Cow<'b, Allocation<Self::PointerTag>>> {
// We do not use a tag so we can just cheaply forward the allocation
) -> InterpResult<$tcx, Cow<'b, Allocation<Self::Provenance>>> {
Ok(alloc)
}
@ -512,7 +511,7 @@ fn extern_static_base_pointer(
}
#[inline(always)]
fn tag_alloc_base_pointer(
fn adjust_alloc_base_pointer(
_ecx: &InterpCx<$mir, $tcx, Self>,
ptr: Pointer<AllocId>,
) -> Pointer<AllocId> {
@ -541,7 +540,7 @@ fn ptr_from_addr_cast(
fn ptr_get_alloc(
_ecx: &InterpCx<$mir, $tcx, Self>,
ptr: Pointer<AllocId>,
) -> Option<(AllocId, Size, Self::TagExtra)> {
) -> Option<(AllocId, Size, Self::ProvenanceExtra)> {
// We know `offset` is relative to the allocation, so we can use `into_parts`.
let (alloc_id, offset) = ptr.into_parts();
Some((alloc_id, offset, ()))

View file

@ -112,16 +112,16 @@ pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
/// A reference to some allocation that was already bounds-checked for the given region
/// and had the on-access machine hooks run.
#[derive(Copy, Clone)]
pub struct AllocRef<'a, 'tcx, Tag, Extra> {
alloc: &'a Allocation<Tag, Extra>,
pub struct AllocRef<'a, 'tcx, Prov, Extra> {
alloc: &'a Allocation<Prov, Extra>,
range: AllocRange,
tcx: TyCtxt<'tcx>,
alloc_id: AllocId,
}
/// A reference to some allocation that was already bounds-checked for the given region
/// and had the on-access machine hooks run.
pub struct AllocRefMut<'a, 'tcx, Tag, Extra> {
alloc: &'a mut Allocation<Tag, Extra>,
pub struct AllocRefMut<'a, 'tcx, Prov, Extra> {
alloc: &'a mut Allocation<Prov, Extra>,
range: AllocRange,
tcx: TyCtxt<'tcx>,
alloc_id: AllocId,
@ -156,7 +156,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn global_base_pointer(
&self,
ptr: Pointer<AllocId>,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
) -> InterpResult<'tcx, Pointer<M::Provenance>> {
let alloc_id = ptr.provenance;
// We need to handle `extern static`.
match self.tcx.get_global_alloc(alloc_id) {
@ -168,14 +168,14 @@ pub fn global_base_pointer(
}
_ => {}
}
// And we need to get the tag.
Ok(M::tag_alloc_base_pointer(self, ptr))
// And we need to get the provenance.
Ok(M::adjust_alloc_base_pointer(self, ptr))
}
pub fn create_fn_alloc_ptr(
&mut self,
fn_val: FnVal<'tcx, M::ExtraFnVal>,
) -> Pointer<M::PointerTag> {
) -> Pointer<M::Provenance> {
let id = match fn_val {
FnVal::Instance(instance) => self.tcx.create_fn_alloc(instance),
FnVal::Other(extra) => {
@ -196,7 +196,7 @@ pub fn allocate_ptr(
size: Size,
align: Align,
kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
) -> InterpResult<'tcx, Pointer<M::Provenance>> {
let alloc = Allocation::uninit(size, align, M::PANIC_ON_ALLOC_FAIL)?;
// We can `unwrap` since `alloc` contains no pointers.
Ok(self.allocate_raw_ptr(alloc, kind).unwrap())
@ -208,7 +208,7 @@ pub fn allocate_bytes_ptr(
align: Align,
kind: MemoryKind<M::MemoryKind>,
mutability: Mutability,
) -> Pointer<M::PointerTag> {
) -> Pointer<M::Provenance> {
let alloc = Allocation::from_bytes(bytes, align, mutability);
// We can `unwrap` since `alloc` contains no pointers.
self.allocate_raw_ptr(alloc, kind).unwrap()
@ -219,27 +219,27 @@ pub fn allocate_raw_ptr(
&mut self,
alloc: Allocation,
kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
) -> InterpResult<'tcx, Pointer<M::Provenance>> {
let id = self.tcx.reserve_alloc_id();
debug_assert_ne!(
Some(kind),
M::GLOBAL_KIND.map(MemoryKind::Machine),
"dynamically allocating global memory"
);
let alloc = M::init_allocation_extra(self, id, Cow::Owned(alloc), Some(kind))?;
let alloc = M::adjust_allocation(self, id, Cow::Owned(alloc), Some(kind))?;
self.memory.alloc_map.insert(id, (kind, alloc.into_owned()));
Ok(M::tag_alloc_base_pointer(self, Pointer::from(id)))
Ok(M::adjust_alloc_base_pointer(self, Pointer::from(id)))
}
pub fn reallocate_ptr(
&mut self,
ptr: Pointer<Option<M::PointerTag>>,
ptr: Pointer<Option<M::Provenance>>,
old_size_and_align: Option<(Size, Align)>,
new_size: Size,
new_align: Align,
kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr)?;
) -> InterpResult<'tcx, Pointer<M::Provenance>> {
let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr)?;
if offset.bytes() != 0 {
throw_ub_format!(
"reallocating {:?} which does not point to the beginning of an object",
@ -271,11 +271,11 @@ pub fn reallocate_ptr(
#[instrument(skip(self), level = "debug")]
pub fn deallocate_ptr(
&mut self,
ptr: Pointer<Option<M::PointerTag>>,
ptr: Pointer<Option<M::Provenance>>,
old_size_and_align: Option<(Size, Align)>,
kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx> {
let (alloc_id, offset, tag) = self.ptr_get_alloc_id(ptr)?;
let (alloc_id, offset, prov) = self.ptr_get_alloc_id(ptr)?;
trace!("deallocating: {alloc_id:?}");
if offset.bytes() != 0 {
@ -327,7 +327,7 @@ pub fn deallocate_ptr(
*self.tcx,
&mut self.machine,
&mut alloc.extra,
(alloc_id, tag),
(alloc_id, prov),
alloc_range(Size::ZERO, size),
)?;
@ -344,19 +344,19 @@ pub fn deallocate_ptr(
#[inline(always)]
fn get_ptr_access(
&self,
ptr: Pointer<Option<M::PointerTag>>,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
align: Align,
) -> InterpResult<'tcx, Option<(AllocId, Size, M::TagExtra)>> {
) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
let align = M::enforce_alignment(&self).then_some(align);
self.check_and_deref_ptr(
ptr,
size,
align,
CheckInAllocMsg::MemoryAccessTest,
|alloc_id, offset, tag| {
|alloc_id, offset, prov| {
let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
Ok((size, align, (alloc_id, offset, tag)))
Ok((size, align, (alloc_id, offset, prov)))
},
)
}
@ -367,7 +367,7 @@ fn get_ptr_access(
#[inline(always)]
pub fn check_ptr_access_align(
&self,
ptr: Pointer<Option<M::PointerTag>>,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
align: Align,
msg: CheckInAllocMsg,
@ -385,11 +385,15 @@ pub fn check_ptr_access_align(
/// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
fn check_and_deref_ptr<T>(
&self,
ptr: Pointer<Option<M::PointerTag>>,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
align: Option<Align>,
msg: CheckInAllocMsg,
alloc_size: impl FnOnce(AllocId, Size, M::TagExtra) -> InterpResult<'tcx, (Size, Align, T)>,
alloc_size: impl FnOnce(
AllocId,
Size,
M::ProvenanceExtra,
) -> InterpResult<'tcx, (Size, Align, T)>,
) -> InterpResult<'tcx, Option<T>> {
fn check_offset_align<'tcx>(offset: u64, align: Align) -> InterpResult<'tcx> {
if offset % align.bytes() == 0 {
@ -417,8 +421,8 @@ fn check_offset_align<'tcx>(offset: u64, align: Align) -> InterpResult<'tcx> {
}
None
}
Ok((alloc_id, offset, tag)) => {
let (alloc_size, alloc_align, ret_val) = alloc_size(alloc_id, offset, tag)?;
Ok((alloc_id, offset, prov)) => {
let (alloc_size, alloc_align, ret_val) = alloc_size(alloc_id, offset, prov)?;
// Test bounds. This also ensures non-null.
// It is sufficient to check this for the end pointer. Also check for overflow!
if offset.checked_add(size, &self.tcx).map_or(true, |end| end > alloc_size) {
@ -431,7 +435,7 @@ fn check_offset_align<'tcx>(offset: u64, align: Align) -> InterpResult<'tcx> {
})
}
// Ensure we never consider the null pointer dereferencable.
if M::PointerTag::OFFSET_IS_ADDR {
if M::Provenance::OFFSET_IS_ADDR {
assert_ne!(ptr.addr(), Size::ZERO);
}
// Test align. Check this last; if both bounds and alignment are violated
@ -462,13 +466,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Helper function to obtain a global (tcx) allocation.
/// This attempts to return a reference to an existing allocation if
/// one can be found in `tcx`. That, however, is only possible if `tcx` and
/// this machine use the same pointer tag, so it is indirected through
/// `M::tag_allocation`.
/// this machine use the same pointer provenance, so it is indirected through
/// `M::adjust_allocation`.
fn get_global_alloc(
&self,
id: AllocId,
is_write: bool,
) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra>>> {
let (alloc, def_id) = match self.tcx.get_global_alloc(id) {
Some(GlobalAlloc::Memory(mem)) => {
// Memory of a constant or promoted or anonymous memory referenced by a static.
@ -499,7 +503,7 @@ fn get_global_alloc(
};
M::before_access_global(*self.tcx, &self.machine, id, alloc, def_id, is_write)?;
// We got tcx memory. Let the machine initialize its "extra" stuff.
M::init_allocation_extra(
M::adjust_allocation(
self,
id, // always use the ID we got as input, not the "hidden" one.
Cow::Borrowed(alloc.inner()),
@ -512,11 +516,11 @@ fn get_global_alloc(
fn get_alloc_raw(
&self,
id: AllocId,
) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra>> {
// The error type of the inner closure here is somewhat funny. We have two
// ways of "erroring": An actual error, or because we got a reference from
// `get_global_alloc` that we can actually use directly without inserting anything anywhere.
// So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
// So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
let a = self.memory.alloc_map.get_or(id, || {
let alloc = self.get_global_alloc(id, /*is_write*/ false).map_err(Err)?;
match alloc {
@ -545,24 +549,24 @@ fn get_alloc_raw(
/// "Safe" (bounds and align-checked) allocation access.
pub fn get_ptr_alloc<'a>(
&'a self,
ptr: Pointer<Option<M::PointerTag>>,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
align: Align,
) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra>>> {
let align = M::enforce_alignment(self).then_some(align);
let ptr_and_alloc = self.check_and_deref_ptr(
ptr,
size,
align,
CheckInAllocMsg::MemoryAccessTest,
|alloc_id, offset, tag| {
|alloc_id, offset, prov| {
let alloc = self.get_alloc_raw(alloc_id)?;
Ok((alloc.size(), alloc.align, (alloc_id, offset, tag, alloc)))
Ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
},
)?;
if let Some((alloc_id, offset, tag, alloc)) = ptr_and_alloc {
if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
let range = alloc_range(offset, size);
M::memory_read(*self.tcx, &self.machine, &alloc.extra, (alloc_id, tag), range)?;
M::memory_read(*self.tcx, &self.machine, &alloc.extra, (alloc_id, prov), range)?;
Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
} else {
// Even in this branch we have to be sure that we actually access the allocation, in
@ -586,7 +590,7 @@ pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::A
fn get_alloc_raw_mut(
&mut self,
id: AllocId,
) -> InterpResult<'tcx, (&mut Allocation<M::PointerTag, M::AllocExtra>, &mut M)> {
) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra>, &mut M)> {
// We have "NLL problem case #3" here, which cannot be worked around without loss of
// efficiency even for the common case where the key is in the map.
// <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
@ -612,18 +616,18 @@ fn get_alloc_raw_mut(
/// "Safe" (bounds and align-checked) allocation access.
pub fn get_ptr_alloc_mut<'a>(
&'a mut self,
ptr: Pointer<Option<M::PointerTag>>,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
align: Align,
) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra>>> {
let parts = self.get_ptr_access(ptr, size, align)?;
if let Some((alloc_id, offset, tag)) = parts {
if let Some((alloc_id, offset, prov)) = parts {
let tcx = *self.tcx;
// FIXME: can we somehow avoid looking up the allocation twice here?
// We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
let (alloc, machine) = self.get_alloc_raw_mut(alloc_id)?;
let range = alloc_range(offset, size);
M::memory_written(tcx, machine, &mut alloc.extra, (alloc_id, tag), range)?;
M::memory_written(tcx, machine, &mut alloc.extra, (alloc_id, prov), range)?;
Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id }))
} else {
Ok(None)
@ -710,10 +714,10 @@ fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
pub fn get_ptr_fn(
&self,
ptr: Pointer<Option<M::PointerTag>>,
ptr: Pointer<Option<M::Provenance>>,
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
trace!("get_fn({:?})", ptr);
let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr)?;
let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr)?;
if offset.bytes() != 0 {
throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
}
@ -759,7 +763,7 @@ pub fn leak_report(&self, static_roots: &[AllocId]) -> usize {
// This is a new allocation, add its relocations to `todo`.
if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
todo.extend(
alloc.relocations().values().filter_map(|tag| tag.get_alloc_id()),
alloc.relocations().values().filter_map(|prov| prov.get_alloc_id()),
);
}
}
@ -788,14 +792,14 @@ pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Cannot be a closure because it is generic in `Tag`, `Extra`.
fn write_allocation_track_relocs<'tcx, Tag: Provenance, Extra>(
// Cannot be a closure because it is generic in `Prov`, `Extra`.
fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra>(
fmt: &mut std::fmt::Formatter<'_>,
tcx: TyCtxt<'tcx>,
allocs_to_print: &mut VecDeque<AllocId>,
alloc: &Allocation<Tag, Extra>,
alloc: &Allocation<Prov, Extra>,
) -> std::fmt::Result {
for alloc_id in alloc.relocations().values().filter_map(|tag| tag.get_alloc_id()) {
for alloc_id in alloc.relocations().values().filter_map(|prov| prov.get_alloc_id()) {
allocs_to_print.push_back(alloc_id);
}
write!(fmt, "{}", display_allocation(tcx, alloc))
@ -854,12 +858,12 @@ fn write_allocation_track_relocs<'tcx, Tag: Provenance, Extra>(
}
/// Reading and writing.
impl<'tcx, 'a, Tag: Provenance, Extra> AllocRefMut<'a, 'tcx, Tag, Extra> {
impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> {
/// `range` is relative to this allocation reference, not the base of the allocation.
pub fn write_scalar(
&mut self,
range: AllocRange,
val: ScalarMaybeUninit<Tag>,
val: ScalarMaybeUninit<Prov>,
) -> InterpResult<'tcx> {
let range = self.range.subrange(range);
debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);
@ -873,7 +877,7 @@ pub fn write_scalar(
pub fn write_ptr_sized(
&mut self,
offset: Size,
val: ScalarMaybeUninit<Tag>,
val: ScalarMaybeUninit<Prov>,
) -> InterpResult<'tcx> {
self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val)
}
@ -887,13 +891,13 @@ pub fn write_uninit(&mut self) -> InterpResult<'tcx> {
}
}
impl<'tcx, 'a, Tag: Provenance, Extra> AllocRef<'a, 'tcx, Tag, Extra> {
impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> {
/// `range` is relative to this allocation reference, not the base of the allocation.
pub fn read_scalar(
&self,
range: AllocRange,
read_provenance: bool,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
) -> InterpResult<'tcx, ScalarMaybeUninit<Prov>> {
let range = self.range.subrange(range);
let res = self
.alloc
@ -904,12 +908,12 @@ pub fn read_scalar(
}
/// `range` is relative to this allocation reference, not the base of the allocation.
pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, ScalarMaybeUninit<Prov>> {
self.read_scalar(range, /*read_provenance*/ false)
}
/// `offset` is relative to this allocation reference, not the base of the allocation.
pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, ScalarMaybeUninit<Prov>> {
self.read_scalar(
alloc_range(offset, self.tcx.data_layout().pointer_size),
/*read_provenance*/ true,
@ -941,7 +945,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Performs appropriate bounds checks.
pub fn read_bytes_ptr(
&self,
ptr: Pointer<Option<M::PointerTag>>,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
) -> InterpResult<'tcx, &[u8]> {
let Some(alloc_ref) = self.get_ptr_alloc(ptr, size, Align::ONE)? else {
@ -961,7 +965,7 @@ pub fn read_bytes_ptr(
/// Performs appropriate bounds checks.
pub fn write_bytes_ptr(
&mut self,
ptr: Pointer<Option<M::PointerTag>>,
ptr: Pointer<Option<M::Provenance>>,
src: impl IntoIterator<Item = u8>,
) -> InterpResult<'tcx> {
let mut src = src.into_iter();
@ -998,9 +1002,9 @@ pub fn write_bytes_ptr(
pub fn mem_copy(
&mut self,
src: Pointer<Option<M::PointerTag>>,
src: Pointer<Option<M::Provenance>>,
src_align: Align,
dest: Pointer<Option<M::PointerTag>>,
dest: Pointer<Option<M::Provenance>>,
dest_align: Align,
size: Size,
nonoverlapping: bool,
@ -1010,9 +1014,9 @@ pub fn mem_copy(
pub fn mem_copy_repeatedly(
&mut self,
src: Pointer<Option<M::PointerTag>>,
src: Pointer<Option<M::Provenance>>,
src_align: Align,
dest: Pointer<Option<M::PointerTag>>,
dest: Pointer<Option<M::Provenance>>,
dest_align: Align,
size: Size,
num_copies: u64,
@ -1027,16 +1031,16 @@ pub fn mem_copy_repeatedly(
// and once below to get the underlying `&[mut] Allocation`.
// Source alloc preparations and access hooks.
let Some((src_alloc_id, src_offset, src_tag)) = src_parts else {
let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
// Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
return Ok(());
};
let src_alloc = self.get_alloc_raw(src_alloc_id)?;
let src_range = alloc_range(src_offset, size);
M::memory_read(*tcx, &self.machine, &src_alloc.extra, (src_alloc_id, src_tag), src_range)?;
M::memory_read(*tcx, &self.machine, &src_alloc.extra, (src_alloc_id, src_prov), src_range)?;
// We need the `dest` ptr for the next operation, so we get it now.
// We already did the source checks and called the hooks so we are good to return early.
let Some((dest_alloc_id, dest_offset, dest_tag)) = dest_parts else {
let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else {
// Zero-sized *destination*.
return Ok(());
};
@ -1062,7 +1066,7 @@ pub fn mem_copy_repeatedly(
*tcx,
extra,
&mut dest_alloc.extra,
(dest_alloc_id, dest_tag),
(dest_alloc_id, dest_prov),
dest_range,
)?;
let dest_bytes = dest_alloc
@ -1135,8 +1139,8 @@ pub fn mem_copy_repeatedly(
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn scalar_to_ptr(
&self,
scalar: Scalar<M::PointerTag>,
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
scalar: Scalar<M::Provenance>,
) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
// We use `to_bits_or_ptr_internal` since we are just implementing the method people need to
// call to force getting out a pointer.
Ok(
@ -1155,7 +1159,7 @@ pub fn scalar_to_ptr(
/// Test if this value might be null.
/// If the machine does not support ptr-to-int casts, this is conservative.
pub fn scalar_may_be_null(&self, scalar: Scalar<M::PointerTag>) -> InterpResult<'tcx, bool> {
pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
Ok(match scalar.try_to_int() {
Ok(int) => int.is_null(),
Err(_) => {
@ -1178,13 +1182,13 @@ pub fn scalar_may_be_null(&self, scalar: Scalar<M::PointerTag>) -> InterpResult<
/// about where it points), or an absolute address.
pub fn ptr_try_get_alloc_id(
&self,
ptr: Pointer<Option<M::PointerTag>>,
) -> Result<(AllocId, Size, M::TagExtra), u64> {
ptr: Pointer<Option<M::Provenance>>,
) -> Result<(AllocId, Size, M::ProvenanceExtra), u64> {
match ptr.into_pointer_or_addr() {
Ok(ptr) => match M::ptr_get_alloc(self, ptr) {
Some((alloc_id, offset, extra)) => Ok((alloc_id, offset, extra)),
None => {
assert!(M::PointerTag::OFFSET_IS_ADDR);
assert!(M::Provenance::OFFSET_IS_ADDR);
let (_, addr) = ptr.into_parts();
Err(addr.bytes())
}
@ -1197,8 +1201,8 @@ pub fn ptr_try_get_alloc_id(
#[inline(always)]
pub fn ptr_get_alloc_id(
&self,
ptr: Pointer<Option<M::PointerTag>>,
) -> InterpResult<'tcx, (AllocId, Size, M::TagExtra)> {
ptr: Pointer<Option<M::Provenance>>,
) -> InterpResult<'tcx, (AllocId, Size, M::ProvenanceExtra)> {
self.ptr_try_get_alloc_id(ptr).map_err(|offset| {
err_ub!(DanglingIntPointer(offset, CheckInAllocMsg::InboundsTest)).into()
})

View file

@ -25,14 +25,14 @@
/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
/// defined on `Immediate`, and do not have to work with a `Place`.
#[derive(Copy, Clone, Debug)]
pub enum Immediate<Tag: Provenance = AllocId> {
pub enum Immediate<Prov: Provenance = AllocId> {
/// A single scalar value (must have *initialized* `Scalar` ABI).
/// FIXME: we also currently often use this for ZST.
/// `ScalarMaybeUninit` should reject ZST, and we should use `Uninit` for them instead.
Scalar(ScalarMaybeUninit<Tag>),
Scalar(ScalarMaybeUninit<Prov>),
/// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
/// `Scalar::Initialized`).
ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>),
ScalarPair(ScalarMaybeUninit<Prov>, ScalarMaybeUninit<Prov>),
/// A value of fully uninitialized memory. Can have and size and layout.
Uninit,
}
@ -40,36 +40,36 @@ pub enum Immediate<Tag: Provenance = AllocId> {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(Immediate, 56);
impl<Tag: Provenance> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> {
impl<Prov: Provenance> From<ScalarMaybeUninit<Prov>> for Immediate<Prov> {
#[inline(always)]
fn from(val: ScalarMaybeUninit<Tag>) -> Self {
fn from(val: ScalarMaybeUninit<Prov>) -> Self {
Immediate::Scalar(val)
}
}
impl<Tag: Provenance> From<Scalar<Tag>> for Immediate<Tag> {
impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> {
#[inline(always)]
fn from(val: Scalar<Tag>) -> Self {
fn from(val: Scalar<Prov>) -> Self {
Immediate::Scalar(val.into())
}
}
impl<'tcx, Tag: Provenance> Immediate<Tag> {
pub fn from_pointer(p: Pointer<Tag>, cx: &impl HasDataLayout) -> Self {
impl<'tcx, Prov: Provenance> Immediate<Prov> {
pub fn from_pointer(p: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
Immediate::Scalar(ScalarMaybeUninit::from_pointer(p, cx))
}
pub fn from_maybe_pointer(p: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self {
pub fn from_maybe_pointer(p: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
Immediate::Scalar(ScalarMaybeUninit::from_maybe_pointer(p, cx))
}
pub fn new_slice(val: Scalar<Tag>, len: u64, cx: &impl HasDataLayout) -> Self {
pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self {
Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into())
}
pub fn new_dyn_trait(
val: Scalar<Tag>,
vtable: Pointer<Option<Tag>>,
val: Scalar<Prov>,
vtable: Pointer<Option<Prov>>,
cx: &impl HasDataLayout,
) -> Self {
Immediate::ScalarPair(val.into(), ScalarMaybeUninit::from_maybe_pointer(vtable, cx))
@ -77,7 +77,7 @@ pub fn new_dyn_trait(
#[inline]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Tag> {
pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Prov> {
match self {
Immediate::Scalar(val) => val,
Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"),
@ -87,13 +87,13 @@ pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Tag> {
#[inline]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Tag>> {
pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Prov>> {
self.to_scalar_or_uninit().check_init()
}
#[inline]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn to_scalar_or_uninit_pair(self) -> (ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>) {
pub fn to_scalar_or_uninit_pair(self) -> (ScalarMaybeUninit<Prov>, ScalarMaybeUninit<Prov>) {
match self {
Immediate::ScalarPair(val1, val2) => (val1, val2),
Immediate::Scalar(..) => bug!("Got a scalar where a scalar pair was expected"),
@ -103,7 +103,7 @@ pub fn to_scalar_or_uninit_pair(self) -> (ScalarMaybeUninit<Tag>, ScalarMaybeUni
#[inline]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Prov>, Scalar<Prov>)> {
let (val1, val2) = self.to_scalar_or_uninit_pair();
Ok((val1.check_init()?, val2.check_init()?))
}
@ -112,20 +112,20 @@ pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
// ScalarPair needs a type to interpret, so we often have an immediate and a type together
// as input for binary and cast operations.
#[derive(Clone, Debug)]
pub struct ImmTy<'tcx, Tag: Provenance = AllocId> {
imm: Immediate<Tag>,
pub struct ImmTy<'tcx, Prov: Provenance = AllocId> {
imm: Immediate<Prov>,
pub layout: TyAndLayout<'tcx>,
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
impl<Tag: Provenance> std::fmt::Display for ImmTy<'_, Tag> {
impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
/// Helper function for printing a scalar to a FmtPrinter
fn p<'a, 'tcx, Tag: Provenance>(
fn p<'a, 'tcx, Prov: Provenance>(
cx: FmtPrinter<'a, 'tcx>,
s: ScalarMaybeUninit<Tag>,
s: ScalarMaybeUninit<Prov>,
ty: Ty<'tcx>,
) -> Result<FmtPrinter<'a, 'tcx>, std::fmt::Error> {
match s {
@ -170,10 +170,10 @@ fn p<'a, 'tcx, Tag: Provenance>(
}
}
impl<'tcx, Tag: Provenance> std::ops::Deref for ImmTy<'tcx, Tag> {
type Target = Immediate<Tag>;
impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
type Target = Immediate<Prov>;
#[inline(always)]
fn deref(&self) -> &Immediate<Tag> {
fn deref(&self) -> &Immediate<Prov> {
&self.imm
}
}
@ -182,17 +182,17 @@ fn deref(&self) -> &Immediate<Tag> {
/// or still in memory. The latter is an optimization, to delay reading that chunk of
/// memory and to avoid having to store arbitrary-sized data here.
#[derive(Copy, Clone, Debug)]
pub enum Operand<Tag: Provenance = AllocId> {
Immediate(Immediate<Tag>),
Indirect(MemPlace<Tag>),
pub enum Operand<Prov: Provenance = AllocId> {
Immediate(Immediate<Prov>),
Indirect(MemPlace<Prov>),
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(Operand, 64);
#[derive(Clone, Debug)]
pub struct OpTy<'tcx, Tag: Provenance = AllocId> {
op: Operand<Tag>, // Keep this private; it helps enforce invariants.
pub struct OpTy<'tcx, Prov: Provenance = AllocId> {
op: Operand<Prov>, // Keep this private; it helps enforce invariants.
pub layout: TyAndLayout<'tcx>,
/// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
/// it needs to have a different alignment than the field type would usually have.
@ -207,50 +207,50 @@ pub struct OpTy<'tcx, Tag: Provenance = AllocId> {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(OpTy<'_>, 88);
impl<'tcx, Tag: Provenance> std::ops::Deref for OpTy<'tcx, Tag> {
type Target = Operand<Tag>;
impl<'tcx, Prov: Provenance> std::ops::Deref for OpTy<'tcx, Prov> {
type Target = Operand<Prov>;
#[inline(always)]
fn deref(&self) -> &Operand<Tag> {
fn deref(&self) -> &Operand<Prov> {
&self.op
}
}
impl<'tcx, Tag: Provenance> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
#[inline(always)]
fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout, align: Some(mplace.align) }
}
}
impl<'tcx, Tag: Provenance> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
#[inline(always)]
fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self {
fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
}
}
impl<'tcx, Tag: Provenance> From<&'_ mut MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
#[inline(always)]
fn from(mplace: &mut MPlaceTy<'tcx, Tag>) -> Self {
fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
}
}
impl<'tcx, Tag: Provenance> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
#[inline(always)]
fn from(val: ImmTy<'tcx, Tag>) -> Self {
fn from(val: ImmTy<'tcx, Prov>) -> Self {
OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
}
}
impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> {
impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
#[inline]
pub fn from_scalar(val: Scalar<Tag>, layout: TyAndLayout<'tcx>) -> Self {
pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
ImmTy { imm: val.into(), layout }
}
#[inline]
pub fn from_immediate(imm: Immediate<Tag>, layout: TyAndLayout<'tcx>) -> Self {
pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
ImmTy { imm, layout }
}
@ -286,7 +286,7 @@ pub fn to_const_int(self) -> ConstInt {
}
}
impl<'tcx, Tag: Provenance> OpTy<'tcx, Tag> {
impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
pub fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
if self.layout.is_unsized() {
// There are no unsized immediates.
@ -302,7 +302,7 @@ pub fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
pub fn offset_with_meta(
&self,
offset: Size,
meta: MemPlaceMeta<Tag>,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
@ -338,9 +338,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// This is an internal function; call `read_immediate` instead.
fn read_immediate_from_mplace_raw(
&self,
mplace: &MPlaceTy<'tcx, M::PointerTag>,
mplace: &MPlaceTy<'tcx, M::Provenance>,
force: bool,
) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> {
) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::Provenance>>> {
if mplace.layout.is_unsized() {
// Don't touch unsized
return Ok(None);
@ -418,9 +418,9 @@ fn read_immediate_from_mplace_raw(
/// ConstProp needs it, though.
pub fn read_immediate_raw(
&self,
src: &OpTy<'tcx, M::PointerTag>,
src: &OpTy<'tcx, M::Provenance>,
force: bool,
) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::Provenance>, MPlaceTy<'tcx, M::Provenance>>> {
Ok(match src.try_as_mplace() {
Ok(ref mplace) => {
if let Some(val) = self.read_immediate_from_mplace_raw(mplace, force)? {
@ -437,8 +437,8 @@ pub fn read_immediate_raw(
#[inline(always)]
pub fn read_immediate(
&self,
op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
op: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
if let Ok(imm) = self.read_immediate_raw(op, /*force*/ false)? {
Ok(imm)
} else {
@ -449,21 +449,21 @@ pub fn read_immediate(
/// Read a scalar from a place
pub fn read_scalar(
&self,
op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
op: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ScalarMaybeUninit<M::Provenance>> {
Ok(self.read_immediate(op)?.to_scalar_or_uninit())
}
/// Read a pointer from a place.
pub fn read_pointer(
&self,
op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
op: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
self.scalar_to_ptr(self.read_scalar(op)?.check_init()?)
}
/// Turn the wide MPlace into a string (must already be dereferenced!)
pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, &str> {
let len = mplace.len(self)?;
let bytes = self.read_bytes_ptr(mplace.ptr, Size::from_bytes(len))?;
let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
@ -476,8 +476,8 @@ pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'
/// Can (but does not always) trigger UB if `op` is uninitialized.
pub fn operand_to_simd(
&self,
op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> {
op: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
// Basically we just transmute this place into an array following simd_size_and_type.
// This only works in memory, but repr(simd) types should never be immediates anyway.
assert!(op.layout.ty.is_simd());
@ -501,10 +501,10 @@ pub fn operand_to_simd(
/// OpTy from a local.
pub fn local_to_op(
&self,
frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
local: mir::Local,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
let layout = self.layout_of_local(frame, local, layout)?;
let op = if layout.is_zst() {
// Bypass `access_local` (helps in ConstProp)
@ -521,8 +521,8 @@ pub fn local_to_op(
#[inline(always)]
pub fn place_to_op(
&self,
place: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
place: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
let op = match **place {
Place::Ptr(mplace) => Operand::Indirect(mplace),
Place::Local { frame, local } => {
@ -538,7 +538,7 @@ pub fn eval_place_to_op(
&self,
mir_place: mir::Place<'tcx>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
// Do not use the layout passed in as argument if the base we are looking at
// here is not the entire place.
let layout = if mir_place.projection.is_empty() { layout } else { None };
@ -575,7 +575,7 @@ pub fn eval_operand(
&self,
mir_op: &mir::Operand<'tcx>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
use rustc_middle::mir::Operand::*;
let op = match *mir_op {
// FIXME: do some more logic on `move` to invalidate the old location
@ -600,7 +600,7 @@ pub fn eval_operand(
pub(super) fn eval_operands(
&self,
ops: &[mir::Operand<'tcx>],
) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::Provenance>>> {
ops.iter().map(|op| self.eval_operand(op, None)).collect()
}
@ -612,7 +612,7 @@ pub fn const_to_op(
&self,
c: ty::Const<'tcx>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
match c.kind() {
ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => {
@ -637,7 +637,7 @@ pub fn mir_const_to_op(
&self,
val: &mir::ConstantKind<'tcx>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
match val {
mir::ConstantKind::Ty(ct) => self.const_to_op(*ct, layout),
mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, *ty, layout),
@ -649,9 +649,9 @@ pub(crate) fn const_val_to_op(
val_val: ConstValue<'tcx>,
ty: Ty<'tcx>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
// Other cases need layout.
let tag_scalar = |scalar| -> InterpResult<'tcx, _> {
let adjust_scalar = |scalar| -> InterpResult<'tcx, _> {
Ok(match scalar {
Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_base_pointer(ptr)?, size),
Scalar::Int(int) => Scalar::Int(int),
@ -666,7 +666,7 @@ pub(crate) fn const_val_to_op(
let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
Operand::Indirect(MemPlace::from_ptr(ptr.into()))
}
ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x)?.into()),
ConstValue::Scalar(x) => Operand::Immediate(adjust_scalar(x)?.into()),
ConstValue::ZeroSized => Operand::Immediate(Immediate::Uninit),
ConstValue::Slice { data, start, end } => {
// We rely on mutability being set correctly in `data` to prevent writes
@ -689,8 +689,8 @@ pub(crate) fn const_val_to_op(
/// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
pub fn read_discriminant(
&self,
op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, VariantIdx)> {
op: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
trace!("read_discriminant_value {:#?}", op.layout);
// Get type and layout of the discriminant.
let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;

View file

@ -19,9 +19,9 @@ pub fn binop_with_overflow(
&mut self,
op: mir::BinOp,
force_overflow_checks: bool,
left: &ImmTy<'tcx, M::PointerTag>,
right: &ImmTy<'tcx, M::PointerTag>,
dest: &PlaceTy<'tcx, M::PointerTag>,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
debug_assert_eq!(
@ -58,9 +58,9 @@ pub fn binop_with_overflow(
pub fn binop_ignore_overflow(
&mut self,
op: mir::BinOp,
left: &ImmTy<'tcx, M::PointerTag>,
right: &ImmTy<'tcx, M::PointerTag>,
dest: &PlaceTy<'tcx, M::PointerTag>,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op);
@ -74,7 +74,7 @@ fn binary_char_op(
bin_op: mir::BinOp,
l: char,
r: char,
) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
use rustc_middle::mir::BinOp::*;
let res = match bin_op {
@ -94,7 +94,7 @@ fn binary_bool_op(
bin_op: mir::BinOp,
l: bool,
r: bool,
) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
use rustc_middle::mir::BinOp::*;
let res = match bin_op {
@ -112,13 +112,13 @@ fn binary_bool_op(
(Scalar::from_bool(res), false, self.tcx.types.bool)
}
fn binary_float_op<F: Float + Into<Scalar<M::PointerTag>>>(
fn binary_float_op<F: Float + Into<Scalar<M::Provenance>>>(
&self,
bin_op: mir::BinOp,
ty: Ty<'tcx>,
l: F,
r: F,
) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
use rustc_middle::mir::BinOp::*;
let (val, ty) = match bin_op {
@ -146,7 +146,7 @@ fn binary_int_op(
left_layout: TyAndLayout<'tcx>,
r: u128,
right_layout: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
use rustc_middle::mir::BinOp::*;
// Shift ops can have an RHS with a different numeric type.
@ -314,9 +314,9 @@ fn binary_int_op(
pub fn overflowing_binary_op(
&self,
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::PointerTag>,
right: &ImmTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
trace!(
"Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
bin_op,
@ -393,9 +393,9 @@ pub fn overflowing_binary_op(
pub fn binary_op(
&self,
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::PointerTag>,
right: &ImmTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
}
@ -405,8 +405,8 @@ pub fn binary_op(
pub fn overflowing_unary_op(
&self,
un_op: mir::UnOp,
val: &ImmTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
val: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
use rustc_middle::mir::UnOp::*;
let layout = val.layout;
@ -455,8 +455,8 @@ pub fn overflowing_unary_op(
pub fn unary_op(
&self,
un_op: mir::UnOp,
val: &ImmTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
val: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
}

View file

@ -18,9 +18,9 @@
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
/// Information required for the sound usage of a `MemPlace`.
pub enum MemPlaceMeta<Tag: Provenance = AllocId> {
pub enum MemPlaceMeta<Prov: Provenance = AllocId> {
/// The unsized payload (e.g. length for slices or vtable pointer for trait objects).
Meta(Scalar<Tag>),
Meta(Scalar<Prov>),
/// `Sized` types or unsized `extern type`
None,
}
@ -28,8 +28,8 @@ pub enum MemPlaceMeta<Tag: Provenance = AllocId> {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(MemPlaceMeta, 24);
impl<Tag: Provenance> MemPlaceMeta<Tag> {
pub fn unwrap_meta(self) -> Scalar<Tag> {
impl<Prov: Provenance> MemPlaceMeta<Prov> {
pub fn unwrap_meta(self) -> Scalar<Prov> {
match self {
Self::Meta(s) => s,
Self::None => {
@ -47,13 +47,13 @@ pub fn has_meta(self) -> bool {
}
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
pub struct MemPlace<Tag: Provenance = AllocId> {
/// The pointer can be a pure integer, with the `None` tag.
pub ptr: Pointer<Option<Tag>>,
pub struct MemPlace<Prov: Provenance = AllocId> {
/// The pointer can be a pure integer, with the `None` provenance.
pub ptr: Pointer<Option<Prov>>,
/// Metadata for unsized places. Interpretation is up to the type.
/// Must not be present for sized types, but can be missing for unsized types
/// (e.g., `extern type`).
pub meta: MemPlaceMeta<Tag>,
pub meta: MemPlaceMeta<Prov>,
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
@ -61,8 +61,8 @@ pub struct MemPlace<Tag: Provenance = AllocId> {
/// A MemPlace with its layout. Constructing it is only possible in this module.
#[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)]
pub struct MPlaceTy<'tcx, Tag: Provenance = AllocId> {
mplace: MemPlace<Tag>,
pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
mplace: MemPlace<Prov>,
pub layout: TyAndLayout<'tcx>,
/// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
/// it needs to have a different alignment than the field type would usually have.
@ -75,9 +75,9 @@ pub struct MPlaceTy<'tcx, Tag: Provenance = AllocId> {
rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 64);
#[derive(Copy, Clone, Debug)]
pub enum Place<Tag: Provenance = AllocId> {
pub enum Place<Prov: Provenance = AllocId> {
/// A place referring to a value allocated in the `Memory` system.
Ptr(MemPlace<Tag>),
Ptr(MemPlace<Prov>),
/// To support alloc-free locals, we are able to write directly to a local.
/// (Without that optimization, we'd just always be a `MemPlace`.)
@ -88,8 +88,8 @@ pub enum Place<Tag: Provenance = AllocId> {
rustc_data_structures::static_assert_size!(Place, 48);
#[derive(Clone, Debug)]
pub struct PlaceTy<'tcx, Tag: Provenance = AllocId> {
place: Place<Tag>, // Keep this private; it helps enforce invariants.
pub struct PlaceTy<'tcx, Prov: Provenance = AllocId> {
place: Place<Prov>, // Keep this private; it helps enforce invariants.
pub layout: TyAndLayout<'tcx>,
/// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
/// it needs to have a different alignment than the field type would usually have.
@ -101,58 +101,58 @@ pub struct PlaceTy<'tcx, Tag: Provenance = AllocId> {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(PlaceTy<'_>, 72);
impl<'tcx, Tag: Provenance> std::ops::Deref for PlaceTy<'tcx, Tag> {
type Target = Place<Tag>;
impl<'tcx, Prov: Provenance> std::ops::Deref for PlaceTy<'tcx, Prov> {
type Target = Place<Prov>;
#[inline(always)]
fn deref(&self) -> &Place<Tag> {
fn deref(&self) -> &Place<Prov> {
&self.place
}
}
impl<'tcx, Tag: Provenance> std::ops::Deref for MPlaceTy<'tcx, Tag> {
type Target = MemPlace<Tag>;
impl<'tcx, Prov: Provenance> std::ops::Deref for MPlaceTy<'tcx, Prov> {
type Target = MemPlace<Prov>;
#[inline(always)]
fn deref(&self) -> &MemPlace<Tag> {
fn deref(&self) -> &MemPlace<Prov> {
&self.mplace
}
}
impl<'tcx, Tag: Provenance> From<MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> {
impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
#[inline(always)]
fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
PlaceTy { place: Place::Ptr(*mplace), layout: mplace.layout, align: mplace.align }
}
}
impl<'tcx, Tag: Provenance> From<&'_ MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> {
impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
#[inline(always)]
fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self {
fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
PlaceTy { place: Place::Ptr(**mplace), layout: mplace.layout, align: mplace.align }
}
}
impl<'tcx, Tag: Provenance> From<&'_ mut MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> {
impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
#[inline(always)]
fn from(mplace: &mut MPlaceTy<'tcx, Tag>) -> Self {
fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
PlaceTy { place: Place::Ptr(**mplace), layout: mplace.layout, align: mplace.align }
}
}
impl<Tag: Provenance> MemPlace<Tag> {
impl<Prov: Provenance> MemPlace<Prov> {
#[inline(always)]
pub fn from_ptr(ptr: Pointer<Option<Tag>>) -> Self {
pub fn from_ptr(ptr: Pointer<Option<Prov>>) -> Self {
MemPlace { ptr, meta: MemPlaceMeta::None }
}
/// Adjust the provenance of the main pointer (metadata is unaffected).
pub fn map_provenance(self, f: impl FnOnce(Option<Tag>) -> Option<Tag>) -> Self {
pub fn map_provenance(self, f: impl FnOnce(Option<Prov>) -> Option<Prov>) -> Self {
MemPlace { ptr: self.ptr.map_provenance(f), ..self }
}
/// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
/// This is the inverse of `ref_to_mplace`.
#[inline(always)]
pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Tag> {
pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> {
match self.meta {
MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)),
MemPlaceMeta::Meta(meta) => {
@ -165,14 +165,14 @@ pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Tag> {
pub fn offset_with_meta<'tcx>(
self,
offset: Size,
meta: MemPlaceMeta<Tag>,
meta: MemPlaceMeta<Prov>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
Ok(MemPlace { ptr: self.ptr.offset(offset, cx)?, meta })
}
}
impl<Tag: Provenance> Place<Tag> {
impl<Prov: Provenance> Place<Prov> {
/// Asserts that this points to some local variable.
/// Returns the frame idx and the variable idx.
#[inline]
@ -185,7 +185,7 @@ pub fn assert_local(&self) -> (usize, mir::Local) {
}
}
impl<'tcx, Tag: Provenance> MPlaceTy<'tcx, Tag> {
impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
/// Produces a MemPlace that works for ZST but nothing else.
/// Conceptually this is a new allocation, but it doesn't actually create an allocation so you
/// don't need to worry about memory leaks.
@ -201,7 +201,7 @@ pub fn fake_alloc_zst(layout: TyAndLayout<'tcx>) -> Self {
pub fn offset_with_meta(
&self,
offset: Size,
meta: MemPlaceMeta<Tag>,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
@ -223,15 +223,15 @@ pub fn offset(
}
#[inline]
pub fn from_aligned_ptr(ptr: Pointer<Option<Tag>>, layout: TyAndLayout<'tcx>) -> Self {
pub fn from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self {
MPlaceTy { mplace: MemPlace::from_ptr(ptr), layout, align: layout.align.abi }
}
#[inline]
pub fn from_aligned_ptr_with_meta(
ptr: Pointer<Option<Tag>>,
ptr: Pointer<Option<Prov>>,
layout: TyAndLayout<'tcx>,
meta: MemPlaceMeta<Tag>,
meta: MemPlaceMeta<Prov>,
) -> Self {
let mut mplace = MemPlace::from_ptr(ptr);
mplace.meta = meta;
@ -258,7 +258,7 @@ pub(crate) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
}
#[inline]
pub(super) fn vtable(&self) -> Scalar<Tag> {
pub(super) fn vtable(&self) -> Scalar<Prov> {
match self.layout.ty.kind() {
ty::Dynamic(..) => self.mplace.meta.unwrap_meta(),
_ => bug!("vtable not supported on type {:?}", self.layout.ty),
@ -267,11 +267,11 @@ pub(super) fn vtable(&self) -> Scalar<Tag> {
}
// These are defined here because they produce a place.
impl<'tcx, Tag: Provenance> OpTy<'tcx, Tag> {
impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
#[inline(always)]
/// Note: do not call `as_ref` on the resulting place. This function should only be used to
/// read from the resulting mplace, not to get its address back.
pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> {
pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
match **self {
Operand::Indirect(mplace) => {
Ok(MPlaceTy { mplace, layout: self.layout, align: self.align.unwrap() })
@ -284,15 +284,15 @@ pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> {
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
/// Note: do not call `as_ref` on the resulting place. This function should only be used to
/// read from the resulting mplace, not to get its address back.
pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Tag> {
pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
self.try_as_mplace().unwrap()
}
}
impl<'tcx, Tag: Provenance> PlaceTy<'tcx, Tag> {
impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
/// A place is either an mplace or some local.
#[inline]
pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Tag>, (usize, mir::Local)> {
pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Prov>, (usize, mir::Local)> {
match **self {
Place::Ptr(mplace) => Ok(MPlaceTy { mplace, layout: self.layout, align: self.align }),
Place::Local { frame, local } => Err((frame, local)),
@ -301,16 +301,16 @@ pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Tag>, (usize, mir::Local)>
#[inline(always)]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> {
pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Prov> {
self.try_as_mplace().unwrap()
}
}
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M>
impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
where
Tag: Provenance + Eq + Hash + 'static,
M: Machine<'mir, 'tcx, PointerTag = Tag>,
Prov: Provenance + Eq + Hash + 'static,
M: Machine<'mir, 'tcx, Provenance = Prov>,
{
/// Take a value, which represents a (thin or wide) reference, and make it a place.
/// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`.
@ -320,8 +320,8 @@ impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M>
/// Generally prefer `deref_operand`.
pub fn ref_to_mplace(
&self,
val: &ImmTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
val: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let pointee_type =
val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty;
let layout = self.layout_of(pointee_type)?;
@ -342,8 +342,8 @@ pub fn ref_to_mplace(
#[instrument(skip(self), level = "debug")]
pub fn deref_operand(
&self,
src: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
src: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let val = self.read_immediate(src)?;
trace!("deref to {} on {:?}", val.layout.ty, *val);
@ -359,8 +359,8 @@ pub fn deref_operand(
#[inline]
pub(super) fn get_place_alloc(
&self,
place: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::PointerTag, M::AllocExtra>>> {
place: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra>>> {
assert!(!place.layout.is_unsized());
assert!(!place.meta.has_meta());
let size = place.layout.size;
@ -370,8 +370,8 @@ pub(super) fn get_place_alloc(
#[inline]
pub(super) fn get_place_alloc_mut(
&mut self,
place: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::PointerTag, M::AllocExtra>>> {
place: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra>>> {
assert!(!place.layout.is_unsized());
assert!(!place.meta.has_meta());
let size = place.layout.size;
@ -381,7 +381,7 @@ pub(super) fn get_place_alloc_mut(
/// Check if this mplace is dereferenceable and sufficiently aligned.
fn check_mplace_access(
&self,
mplace: MPlaceTy<'tcx, M::PointerTag>,
mplace: MPlaceTy<'tcx, M::Provenance>,
msg: CheckInAllocMsg,
) -> InterpResult<'tcx> {
let (size, align) = self
@ -397,8 +397,8 @@ fn check_mplace_access(
/// Also returns the number of elements.
pub fn mplace_to_simd(
&self,
mplace: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> {
mplace: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
// Basically we just transmute this place into an array following simd_size_and_type.
// (Transmuting is okay since this is an in-memory place. We also double-check the size
// stays the same.)
@ -413,8 +413,8 @@ pub fn mplace_to_simd(
/// Also returns the number of elements.
pub fn place_to_simd(
&mut self,
place: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> {
place: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
let mplace = self.force_allocation(place)?;
self.mplace_to_simd(&mplace)
}
@ -423,7 +423,7 @@ pub fn local_to_place(
&self,
frame: usize,
local: mir::Local,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
let layout = self.layout_of_local(&self.stack()[frame], local, None)?;
let place = Place::Local { frame, local };
Ok(PlaceTy { place, layout, align: layout.align.abi })
@ -435,7 +435,7 @@ pub fn local_to_place(
pub fn eval_place(
&mut self,
mir_place: mir::Place<'tcx>,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
let mut place = self.local_to_place(self.frame_idx(), mir_place.local)?;
// Using `try_fold` turned out to be bad for performance, hence the loop.
for elem in mir_place.projection.iter() {
@ -465,8 +465,8 @@ pub fn eval_place(
#[instrument(skip(self), level = "debug")]
pub fn write_immediate(
&mut self,
src: Immediate<M::PointerTag>,
dest: &PlaceTy<'tcx, M::PointerTag>,
src: Immediate<M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
self.write_immediate_no_validate(src, dest)?;
@ -482,8 +482,8 @@ pub fn write_immediate(
#[inline(always)]
pub fn write_scalar(
&mut self,
val: impl Into<ScalarMaybeUninit<M::PointerTag>>,
dest: &PlaceTy<'tcx, M::PointerTag>,
val: impl Into<ScalarMaybeUninit<M::Provenance>>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
self.write_immediate(Immediate::Scalar(val.into()), dest)
}
@ -492,8 +492,8 @@ pub fn write_scalar(
#[inline(always)]
pub fn write_pointer(
&mut self,
ptr: impl Into<Pointer<Option<M::PointerTag>>>,
dest: &PlaceTy<'tcx, M::PointerTag>,
ptr: impl Into<Pointer<Option<M::Provenance>>>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
self.write_scalar(Scalar::from_maybe_pointer(ptr.into(), self), dest)
}
@ -503,8 +503,8 @@ pub fn write_pointer(
/// right type.
fn write_immediate_no_validate(
&mut self,
src: Immediate<M::PointerTag>,
dest: &PlaceTy<'tcx, M::PointerTag>,
src: Immediate<M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
@ -537,10 +537,10 @@ fn write_immediate_no_validate(
/// right layout.
fn write_immediate_to_mplace_no_validate(
&mut self,
value: Immediate<M::PointerTag>,
value: Immediate<M::Provenance>,
layout: TyAndLayout<'tcx>,
align: Align,
dest: MemPlace<M::PointerTag>,
dest: MemPlace<M::Provenance>,
) -> InterpResult<'tcx> {
// Note that it is really important that the type here is the right one, and matches the
// type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
@ -589,7 +589,7 @@ fn write_immediate_to_mplace_no_validate(
}
}
pub fn write_uninit(&mut self, dest: &PlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
pub fn write_uninit(&mut self, dest: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
let mplace = match dest.try_as_mplace() {
Ok(mplace) => mplace,
Err((frame, local)) => {
@ -619,8 +619,8 @@ pub fn write_uninit(&mut self, dest: &PlaceTy<'tcx, M::PointerTag>) -> InterpRes
#[instrument(skip(self), level = "debug")]
pub fn copy_op(
&mut self,
src: &OpTy<'tcx, M::PointerTag>,
dest: &PlaceTy<'tcx, M::PointerTag>,
src: &OpTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
allow_transmute: bool,
) -> InterpResult<'tcx> {
self.copy_op_no_validate(src, dest, allow_transmute)?;
@ -640,8 +640,8 @@ pub fn copy_op(
#[instrument(skip(self), level = "debug")]
fn copy_op_no_validate(
&mut self,
src: &OpTy<'tcx, M::PointerTag>,
dest: &PlaceTy<'tcx, M::PointerTag>,
src: &OpTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
allow_transmute: bool,
) -> InterpResult<'tcx> {
// We do NOT compare the types for equality, because well-typed code can
@ -713,8 +713,8 @@ fn copy_op_no_validate(
#[instrument(skip(self), level = "debug")]
pub fn force_allocation(
&mut self,
place: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
place: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let mplace = match place.place {
Place::Local { frame, local } => {
match M::access_local_mut(self, frame, local)? {
@ -760,7 +760,7 @@ pub fn allocate(
&mut self,
layout: TyAndLayout<'tcx>,
kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
assert!(!layout.is_unsized());
let ptr = self.allocate_ptr(layout.size, layout.align.abi, kind)?;
Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
@ -772,7 +772,7 @@ pub fn allocate_str(
str: &str,
kind: MemoryKind<M::MemoryKind>,
mutbl: Mutability,
) -> MPlaceTy<'tcx, M::PointerTag> {
) -> MPlaceTy<'tcx, M::Provenance> {
let ptr = self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl);
let meta = Scalar::from_machine_usize(u64::try_from(str.len()).unwrap(), self);
let mplace = MemPlace { ptr: ptr.into(), meta: MemPlaceMeta::Meta(meta) };
@ -790,7 +790,7 @@ pub fn allocate_str(
pub fn write_discriminant(
&mut self,
variant_index: VariantIdx,
dest: &PlaceTy<'tcx, M::PointerTag>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
// This must be an enum or generator.
match dest.layout.ty.kind() {
@ -876,7 +876,7 @@ pub fn write_discriminant(
pub fn raw_const_to_mplace(
&self,
raw: ConstAlloc<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
// This must be an allocation in `tcx`
let _ = self.tcx.global_alloc(raw.alloc_id);
let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?;
@ -888,8 +888,8 @@ pub fn raw_const_to_mplace(
/// Also return some more information so drop doesn't have to run the same code twice.
pub(super) fn unpack_dyn_trait(
&self,
mplace: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
mplace: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::Provenance>)> {
let vtable = self.scalar_to_ptr(mplace.vtable())?; // also sanity checks the type
let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
let layout = self.layout_of(ty)?;

View file

@ -20,10 +20,10 @@
};
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M>
impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
where
Tag: Provenance + Eq + Hash + 'static,
M: Machine<'mir, 'tcx, PointerTag = Tag>,
Prov: Provenance + Eq + Hash + 'static,
M: Machine<'mir, 'tcx, Provenance = Prov>,
{
//# Field access
@ -35,9 +35,9 @@ impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M>
/// For indexing into arrays, use `mplace_index`.
pub fn mplace_field(
&self,
base: &MPlaceTy<'tcx, M::PointerTag>,
base: &MPlaceTy<'tcx, M::Provenance>,
field: usize,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let offset = base.layout.fields.offset(field);
let field_layout = base.layout.field(self, field);
@ -72,9 +72,9 @@ pub fn mplace_field(
/// into the field of a local `ScalarPair`, we have to first allocate it.
pub fn place_field(
&mut self,
base: &PlaceTy<'tcx, M::PointerTag>,
base: &PlaceTy<'tcx, M::Provenance>,
field: usize,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
// FIXME: We could try to be smarter and avoid allocation for fields that span the
// entire place.
let base = self.force_allocation(base)?;
@ -83,9 +83,9 @@ pub fn place_field(
pub fn operand_field(
&self,
base: &OpTy<'tcx, M::PointerTag>,
base: &OpTy<'tcx, M::Provenance>,
field: usize,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
let base = match base.try_as_mplace() {
Ok(ref mplace) => {
// We can reuse the mplace field computation logic for indirect operands.
@ -139,9 +139,9 @@ pub fn operand_field(
pub fn mplace_downcast(
&self,
base: &MPlaceTy<'tcx, M::PointerTag>,
base: &MPlaceTy<'tcx, M::Provenance>,
variant: VariantIdx,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
// Downcasts only change the layout.
// (In particular, no check about whether this is even the active variant -- that's by design,
// see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
@ -153,9 +153,9 @@ pub fn mplace_downcast(
pub fn place_downcast(
&self,
base: &PlaceTy<'tcx, M::PointerTag>,
base: &PlaceTy<'tcx, M::Provenance>,
variant: VariantIdx,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
// Downcast just changes the layout
let mut base = base.clone();
base.layout = base.layout.for_variant(self, variant);
@ -164,9 +164,9 @@ pub fn place_downcast(
pub fn operand_downcast(
&self,
base: &OpTy<'tcx, M::PointerTag>,
base: &OpTy<'tcx, M::Provenance>,
variant: VariantIdx,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
// Downcast just changes the layout
let mut base = base.clone();
base.layout = base.layout.for_variant(self, variant);
@ -178,9 +178,9 @@ pub fn operand_downcast(
#[inline(always)]
pub fn operand_index(
&self,
base: &OpTy<'tcx, M::PointerTag>,
base: &OpTy<'tcx, M::Provenance>,
index: u64,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
// Not using the layout method because we want to compute on u64
match base.layout.fields {
abi::FieldsShape::Array { stride, count: _ } => {
@ -207,8 +207,8 @@ pub fn operand_index(
// same by repeatedly calling `operand_index`.
pub fn operand_array_fields<'a>(
&self,
base: &'a OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, OpTy<'tcx, Tag>>> + 'a> {
base: &'a OpTy<'tcx, Prov>,
) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, OpTy<'tcx, Prov>>> + 'a> {
let len = base.len(self)?; // also asserts that we have a type where this makes sense
let abi::FieldsShape::Array { stride, .. } = base.layout.fields else {
span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
@ -222,17 +222,17 @@ pub fn operand_array_fields<'a>(
/// Index into an array.
pub fn mplace_index(
&self,
base: &MPlaceTy<'tcx, M::PointerTag>,
base: &MPlaceTy<'tcx, M::Provenance>,
index: u64,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
Ok(self.operand_index(&base.into(), index)?.assert_mem_place())
}
pub fn place_index(
&mut self,
base: &PlaceTy<'tcx, M::PointerTag>,
base: &PlaceTy<'tcx, M::Provenance>,
index: u64,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
// There's not a lot we can do here, since we cannot have a place to a part of a local. If
// we are accessing the only element of a 1-element array, it's still the entire local...
// that doesn't seem worth it.
@ -244,11 +244,11 @@ pub fn place_index(
fn operand_constant_index(
&self,
base: &OpTy<'tcx, M::PointerTag>,
base: &OpTy<'tcx, M::Provenance>,
offset: u64,
min_length: u64,
from_end: bool,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
let n = base.len(self)?;
if n < min_length {
// This can only be reached in ConstProp and non-rustc-MIR.
@ -268,11 +268,11 @@ fn operand_constant_index(
fn place_constant_index(
&mut self,
base: &PlaceTy<'tcx, M::PointerTag>,
base: &PlaceTy<'tcx, M::Provenance>,
offset: u64,
min_length: u64,
from_end: bool,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
let base = self.force_allocation(base)?;
Ok(self
.operand_constant_index(&base.into(), offset, min_length, from_end)?
@ -284,11 +284,11 @@ fn place_constant_index(
fn operand_subslice(
&self,
base: &OpTy<'tcx, M::PointerTag>,
base: &OpTy<'tcx, M::Provenance>,
from: u64,
to: u64,
from_end: bool,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
let len = base.len(self)?; // also asserts that we have a type where this makes sense
let actual_to = if from_end {
if from.checked_add(to).map_or(true, |to| to > len) {
@ -329,11 +329,11 @@ fn operand_subslice(
pub fn place_subslice(
&mut self,
base: &PlaceTy<'tcx, M::PointerTag>,
base: &PlaceTy<'tcx, M::Provenance>,
from: u64,
to: u64,
from_end: bool,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
let base = self.force_allocation(base)?;
Ok(self.operand_subslice(&base.into(), from, to, from_end)?.assert_mem_place().into())
}
@ -344,9 +344,9 @@ pub fn place_subslice(
#[instrument(skip(self), level = "trace")]
pub fn place_projection(
&mut self,
base: &PlaceTy<'tcx, M::PointerTag>,
base: &PlaceTy<'tcx, M::Provenance>,
proj_elem: mir::PlaceElem<'tcx>,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
Field(field, _) => self.place_field(base, field.index())?,
@ -368,9 +368,9 @@ pub fn place_projection(
#[instrument(skip(self), level = "trace")]
pub fn operand_projection(
&self,
base: &OpTy<'tcx, M::PointerTag>,
base: &OpTy<'tcx, M::Provenance>,
proj_elem: mir::PlaceElem<'tcx>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
Field(field, _) => self.operand_field(base, field.index())?,

View file

@ -267,10 +267,10 @@ fn check_argument_compat(
fn pass_argument<'x, 'y>(
&mut self,
caller_args: &mut impl Iterator<
Item = (&'x OpTy<'tcx, M::PointerTag>, &'y ArgAbi<'tcx, Ty<'tcx>>),
Item = (&'x OpTy<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
>,
callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
callee_arg: &PlaceTy<'tcx, M::PointerTag>,
callee_arg: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx>
where
'tcx: 'x,
@ -336,9 +336,9 @@ pub(crate) fn eval_fn_call(
&mut self,
fn_val: FnVal<'tcx, M::ExtraFnVal>,
(caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>),
args: &[OpTy<'tcx, M::PointerTag>],
args: &[OpTy<'tcx, M::Provenance>],
with_caller_location: bool,
destination: &PlaceTy<'tcx, M::PointerTag>,
destination: &PlaceTy<'tcx, M::Provenance>,
target: Option<mir::BasicBlock>,
mut unwind: StackPopUnwind,
) -> InterpResult<'tcx> {
@ -437,7 +437,7 @@ pub(crate) fn eval_fn_call(
// last incoming argument. These two iterators do not have the same type,
// so to keep the code paths uniform we accept an allocation
// (for RustCall ABI only).
let caller_args: Cow<'_, [OpTy<'tcx, M::PointerTag>]> =
let caller_args: Cow<'_, [OpTy<'tcx, M::Provenance>]> =
if caller_abi == Abi::RustCall && !args.is_empty() {
// Untuple
let (untuple_arg, args) = args.split_last().unwrap();
@ -449,7 +449,7 @@ pub(crate) fn eval_fn_call(
(0..untuple_arg.layout.fields.count())
.map(|i| self.operand_field(untuple_arg, i)),
)
.collect::<InterpResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>(
.collect::<InterpResult<'_, Vec<OpTy<'tcx, M::Provenance>>>>(
)?,
)
} else {
@ -593,7 +593,7 @@ pub(crate) fn eval_fn_call(
fn drop_in_place(
&mut self,
place: &PlaceTy<'tcx, M::PointerTag>,
place: &PlaceTy<'tcx, M::Provenance>,
instance: ty::Instance<'tcx>,
target: mir::BasicBlock,
unwind: Option<mir::BasicBlock>,

View file

@ -21,7 +21,7 @@ pub fn get_vtable(
&mut self,
ty: Ty<'tcx>,
poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
trace!("get_vtable(trait_ref={:?})", poly_trait_ref);
let (ty, poly_trait_ref) = self.tcx.erase_regions((ty, poly_trait_ref));
@ -42,7 +42,7 @@ pub fn get_vtable(
/// corresponds to the first method declared in the trait of the provided vtable.
pub fn get_vtable_slot(
&self,
vtable: Pointer<Option<M::PointerTag>>,
vtable: Pointer<Option<M::Provenance>>,
idx: u64,
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
let ptr_size = self.pointer_size();
@ -57,7 +57,7 @@ pub fn get_vtable_slot(
/// Returns the drop fn instance as well as the actual dynamic type.
pub fn read_drop_type_from_vtable(
&self,
vtable: Pointer<Option<M::PointerTag>>,
vtable: Pointer<Option<M::Provenance>>,
) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
let pointer_size = self.pointer_size();
// We don't care about the pointee type; we just want a pointer.
@ -89,7 +89,7 @@ pub fn read_drop_type_from_vtable(
pub fn read_size_and_align_from_vtable(
&self,
vtable: Pointer<Option<M::PointerTag>>,
vtable: Pointer<Option<M::Provenance>>,
) -> InterpResult<'tcx, (Size, Align)> {
let pointer_size = self.pointer_size();
// We check for `size = 3 * ptr_size`, which covers the drop fn (unused here),
@ -126,9 +126,9 @@ pub fn read_size_and_align_from_vtable(
pub fn read_new_vtable_after_trait_upcasting_from_vtable(
&self,
vtable: Pointer<Option<M::PointerTag>>,
vtable: Pointer<Option<M::Provenance>>,
idx: u64,
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
let pointer_size = self.pointer_size();
let vtable_slot = vtable.offset(pointer_size * idx, self)?;

View file

@ -206,7 +206,7 @@ struct ValidityVisitor<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
/// starts must not be changed! `visit_fields` and `visit_array` rely on
/// this stack discipline.
path: Vec<PathElem>,
ref_tracking: Option<&'rt mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>>,
ref_tracking: Option<&'rt mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>>,
/// `None` indicates this is not validating for CTFE (but for runtime).
ctfe_mode: Option<CtfeValidationMode>,
ecx: &'rt InterpCx<'mir, 'tcx, M>,
@ -306,7 +306,7 @@ fn with_elem<R>(
fn check_wide_ptr_meta(
&mut self,
meta: MemPlaceMeta<M::PointerTag>,
meta: MemPlaceMeta<M::Provenance>,
pointee: TyAndLayout<'tcx>,
) -> InterpResult<'tcx> {
let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
@ -380,7 +380,7 @@ fn check_wide_ptr_meta(
/// Check a reference or `Box`.
fn check_safe_pointer(
&mut self,
value: &OpTy<'tcx, M::PointerTag>,
value: &OpTy<'tcx, M::Provenance>,
kind: &str,
) -> InterpResult<'tcx> {
let value = try_validation!(
@ -445,7 +445,7 @@ fn check_safe_pointer(
if let Some(ref mut ref_tracking) = self.ref_tracking {
// Proceed recursively even for ZST, no reason to skip them!
// `!` is a ZST and we want to validate it.
if let Ok((alloc_id, _offset, _tag)) = self.ecx.ptr_try_get_alloc_id(place.ptr) {
if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr) {
// Special handling for pointers to statics (irrespective of their type).
let alloc_kind = self.ecx.tcx.get_global_alloc(alloc_id);
if let Some(GlobalAlloc::Static(did)) = alloc_kind {
@ -491,8 +491,8 @@ fn check_safe_pointer(
fn read_scalar(
&self,
op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
op: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ScalarMaybeUninit<M::Provenance>> {
Ok(try_validation!(
self.ecx.read_scalar(op),
self.path,
@ -502,8 +502,8 @@ fn read_scalar(
fn read_immediate_forced(
&self,
op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Immediate<M::PointerTag>> {
op: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
Ok(*try_validation!(
self.ecx.read_immediate_raw(op, /*force*/ true),
self.path,
@ -515,7 +515,7 @@ fn read_immediate_forced(
/// at that type. Return `true` if the type is indeed primitive.
fn try_visit_primitive(
&mut self,
value: &OpTy<'tcx, M::PointerTag>,
value: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, bool> {
// Go over all the primitive types
let ty = value.layout.ty;
@ -652,7 +652,7 @@ fn try_visit_primitive(
fn visit_scalar(
&mut self,
scalar: ScalarMaybeUninit<M::PointerTag>,
scalar: ScalarMaybeUninit<M::Provenance>,
scalar_layout: ScalarAbi,
) -> InterpResult<'tcx> {
// We check `is_full_range` in a slightly complicated way because *if* we are checking
@ -735,7 +735,7 @@ fn visit_scalar(
impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
for ValidityVisitor<'rt, 'mir, 'tcx, M>
{
type V = OpTy<'tcx, M::PointerTag>;
type V = OpTy<'tcx, M::Provenance>;
#[inline(always)]
fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
@ -744,7 +744,7 @@ fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
fn read_discriminant(
&mut self,
op: &OpTy<'tcx, M::PointerTag>,
op: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, VariantIdx> {
self.with_elem(PathElem::EnumTag, move |this| {
Ok(try_validation!(
@ -764,9 +764,9 @@ fn read_discriminant(
#[inline]
fn visit_field(
&mut self,
old_op: &OpTy<'tcx, M::PointerTag>,
old_op: &OpTy<'tcx, M::Provenance>,
field: usize,
new_op: &OpTy<'tcx, M::PointerTag>,
new_op: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
let elem = self.aggregate_field_path_elem(old_op.layout, field);
self.with_elem(elem, move |this| this.visit_value(new_op))
@ -775,9 +775,9 @@ fn visit_field(
#[inline]
fn visit_variant(
&mut self,
old_op: &OpTy<'tcx, M::PointerTag>,
old_op: &OpTy<'tcx, M::Provenance>,
variant_id: VariantIdx,
new_op: &OpTy<'tcx, M::PointerTag>,
new_op: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
let name = match old_op.layout.ty.kind() {
ty::Adt(adt, _) => PathElem::Variant(adt.variant(variant_id).name),
@ -791,7 +791,7 @@ fn visit_variant(
#[inline(always)]
fn visit_union(
&mut self,
op: &OpTy<'tcx, M::PointerTag>,
op: &OpTy<'tcx, M::Provenance>,
_fields: NonZeroUsize,
) -> InterpResult<'tcx> {
// Special check preventing `UnsafeCell` inside unions in the inner part of constants.
@ -804,13 +804,13 @@ fn visit_union(
}
#[inline]
fn visit_box(&mut self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
fn visit_box(&mut self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
self.check_safe_pointer(op, "box")?;
Ok(())
}
#[inline]
fn visit_value(&mut self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
fn visit_value(&mut self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
trace!("visit_value: {:?}, {:?}", *op, op.layout);
// Check primitive types -- the leaves of our recursive descent.
@ -881,7 +881,7 @@ fn visit_value(&mut self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx>
fn visit_aggregate(
&mut self,
op: &OpTy<'tcx, M::PointerTag>,
op: &OpTy<'tcx, M::Provenance>,
fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
) -> InterpResult<'tcx> {
match op.layout.ty.kind() {
@ -992,9 +992,9 @@ fn visit_aggregate(
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
fn validate_operand_internal(
&self,
op: &OpTy<'tcx, M::PointerTag>,
op: &OpTy<'tcx, M::Provenance>,
path: Vec<PathElem>,
ref_tracking: Option<&mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>>,
ref_tracking: Option<&mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>>,
ctfe_mode: Option<CtfeValidationMode>,
) -> InterpResult<'tcx> {
trace!("validate_operand_internal: {:?}, {:?}", *op, op.layout.ty);
@ -1031,9 +1031,9 @@ fn validate_operand_internal(
#[inline(always)]
pub fn const_validate_operand(
&self,
op: &OpTy<'tcx, M::PointerTag>,
op: &OpTy<'tcx, M::Provenance>,
path: Vec<PathElem>,
ref_tracking: &mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>,
ref_tracking: &mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>,
ctfe_mode: CtfeValidationMode,
) -> InterpResult<'tcx> {
self.validate_operand_internal(op, path, Some(ref_tracking), Some(ctfe_mode))
@ -1043,7 +1043,7 @@ pub fn const_validate_operand(
/// `op` is assumed to cover valid memory if it is an indirect operand.
/// It will error if the bits at the destination do not match the ones described by the layout.
#[inline(always)]
pub fn validate_operand(&self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
pub fn validate_operand(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
self.validate_operand_internal(op, vec![], None, None)
}
}

View file

@ -21,20 +21,20 @@ pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Sized {
fn to_op_for_read(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>;
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
/// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
fn to_op_for_proj(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
self.to_op_for_read(ecx)
}
/// Creates this from an `OpTy`.
///
/// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
fn from_op(op: &OpTy<'tcx, M::PointerTag>) -> Self;
fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
/// Projects to the given enum variant.
fn project_downcast(
@ -62,18 +62,18 @@ pub trait ValueMut<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Sized {
fn to_op_for_read(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>;
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
/// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
fn to_op_for_proj(
&self,
ecx: &mut InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>;
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
/// Creates this from an `OpTy`.
///
/// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
fn from_op(op: &OpTy<'tcx, M::PointerTag>) -> Self;
fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
/// Projects to the given enum variant.
fn project_downcast(
@ -95,7 +95,7 @@ fn project_field(
// So we have some copy-paste here. (We could have a macro but since we only have 2 types with this
// double-impl, that would barely make the code shorter, if at all.)
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M::PointerTag> {
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M::Provenance> {
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
@ -105,12 +105,12 @@ fn layout(&self) -> TyAndLayout<'tcx> {
fn to_op_for_read(
&self,
_ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.clone())
}
#[inline(always)]
fn from_op(op: &OpTy<'tcx, M::PointerTag>) -> Self {
fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
op.clone()
}
@ -134,7 +134,7 @@ fn project_field(
}
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
for OpTy<'tcx, M::PointerTag>
for OpTy<'tcx, M::Provenance>
{
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
@ -145,7 +145,7 @@ fn layout(&self) -> TyAndLayout<'tcx> {
fn to_op_for_read(
&self,
_ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.clone())
}
@ -153,12 +153,12 @@ fn to_op_for_read(
fn to_op_for_proj(
&self,
_ecx: &mut InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.clone())
}
#[inline(always)]
fn from_op(op: &OpTy<'tcx, M::PointerTag>) -> Self {
fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
op.clone()
}
@ -182,7 +182,7 @@ fn project_field(
}
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M>
for MPlaceTy<'tcx, M::PointerTag>
for MPlaceTy<'tcx, M::Provenance>
{
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
@ -193,12 +193,12 @@ fn layout(&self) -> TyAndLayout<'tcx> {
fn to_op_for_read(
&self,
_ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.into())
}
#[inline(always)]
fn from_op(op: &OpTy<'tcx, M::PointerTag>) -> Self {
fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
// assert is justified because our `to_op_for_read` only ever produces `Indirect` operands.
op.assert_mem_place()
}
@ -223,7 +223,7 @@ fn project_field(
}
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
for MPlaceTy<'tcx, M::PointerTag>
for MPlaceTy<'tcx, M::Provenance>
{
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
@ -234,7 +234,7 @@ fn layout(&self) -> TyAndLayout<'tcx> {
fn to_op_for_read(
&self,
_ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.into())
}
@ -242,12 +242,12 @@ fn to_op_for_read(
fn to_op_for_proj(
&self,
_ecx: &mut InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.into())
}
#[inline(always)]
fn from_op(op: &OpTy<'tcx, M::PointerTag>) -> Self {
fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
// assert is justified because our `to_op_for_proj` only ever produces `Indirect` operands.
op.assert_mem_place()
}
@ -272,7 +272,7 @@ fn project_field(
}
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
for PlaceTy<'tcx, M::PointerTag>
for PlaceTy<'tcx, M::Provenance>
{
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
@ -283,7 +283,7 @@ fn layout(&self) -> TyAndLayout<'tcx> {
fn to_op_for_read(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
// We `force_allocation` here so that `from_op` below can work.
ecx.place_to_op(self)
}
@ -292,13 +292,13 @@ fn to_op_for_read(
fn to_op_for_proj(
&self,
ecx: &mut InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
// We `force_allocation` here so that `from_op` below can work.
Ok(ecx.force_allocation(self)?.into())
}
#[inline(always)]
fn from_op(op: &OpTy<'tcx, M::PointerTag>) -> Self {
fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
// assert is justified because our `to_op` only ever produces `Indirect` operands.
op.assert_mem_place().into()
}
@ -336,7 +336,7 @@ fn ecx(&$($mutability)? self)
#[inline(always)]
fn read_discriminant(
&mut self,
op: &OpTy<'tcx, M::PointerTag>,
op: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, VariantIdx> {
Ok(self.ecx().read_discriminant(op)?.1)
}

View file

@ -30,7 +30,7 @@
// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
#[derive(HashStable)]
pub struct Allocation<Tag = AllocId, Extra = ()> {
pub struct Allocation<Prov = AllocId, Extra = ()> {
/// The actual bytes of the allocation.
/// Note that the bytes of a pointer represent the offset of the pointer.
bytes: Box<[u8]>,
@ -38,7 +38,7 @@ pub struct Allocation<Tag = AllocId, Extra = ()> {
/// Only the first byte of a pointer is inserted into the map; i.e.,
/// every entry in this map applies to `pointer_size` consecutive bytes starting
/// at the given offset.
relocations: Relocations<Tag>,
relocations: Relocations<Prov>,
/// Denotes which part of this allocation is initialized.
init_mask: InitMask,
/// The alignment of the allocation to detect unaligned reads.
@ -102,8 +102,8 @@ fn hash<H: hash::Hasher>(&self, state: &mut H) {
/// (`ConstAllocation`) are used quite a bit.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)]
#[rustc_pass_by_value]
pub struct ConstAllocation<'tcx, Tag = AllocId, Extra = ()>(
pub Interned<'tcx, Allocation<Tag, Extra>>,
pub struct ConstAllocation<'tcx, Prov = AllocId, Extra = ()>(
pub Interned<'tcx, Allocation<Prov, Extra>>,
);
impl<'tcx> fmt::Debug for ConstAllocation<'tcx> {
@ -114,8 +114,8 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
}
}
impl<'tcx, Tag, Extra> ConstAllocation<'tcx, Tag, Extra> {
pub fn inner(self) -> &'tcx Allocation<Tag, Extra> {
impl<'tcx, Prov, Extra> ConstAllocation<'tcx, Prov, Extra> {
pub fn inner(self) -> &'tcx Allocation<Prov, Extra> {
self.0.0
}
}
@ -200,7 +200,7 @@ pub fn subrange(self, subrange: AllocRange) -> AllocRange {
}
// The constructors are all without extra; the extra gets added by a machine hook later.
impl<Tag> Allocation<Tag> {
impl<Prov> Allocation<Prov> {
/// Creates an allocation initialized by the given bytes
pub fn from_bytes<'a>(
slice: impl Into<Cow<'a, [u8]>>,
@ -256,14 +256,15 @@ pub fn uninit<'tcx>(size: Size, align: Align, panic_on_fail: bool) -> InterpResu
}
impl Allocation {
/// Convert Tag and add Extra fields
pub fn convert_tag_add_extra<Tag, Extra, Err>(
/// Adjust allocation from the ones in tcx to a custom Machine instance
/// with a different Provenance and Extra type.
pub fn adjust_from_tcx<Prov, Extra, Err>(
self,
cx: &impl HasDataLayout,
extra: Extra,
mut tagger: impl FnMut(Pointer<AllocId>) -> Result<Pointer<Tag>, Err>,
) -> Result<Allocation<Tag, Extra>, Err> {
// Compute new pointer tags, which also adjusts the bytes.
mut adjust_ptr: impl FnMut(Pointer<AllocId>) -> Result<Pointer<Prov>, Err>,
) -> Result<Allocation<Prov, Extra>, Err> {
// Compute new pointer provenance, which also adjusts the bytes.
let mut bytes = self.bytes;
let mut new_relocations = Vec::with_capacity(self.relocations.0.len());
let ptr_size = cx.data_layout().pointer_size.bytes_usize();
@ -272,10 +273,10 @@ pub fn convert_tag_add_extra<Tag, Extra, Err>(
let idx = offset.bytes_usize();
let ptr_bytes = &mut bytes[idx..idx + ptr_size];
let bits = read_target_uint(endian, ptr_bytes).unwrap();
let (ptr_tag, ptr_offset) =
tagger(Pointer::new(alloc_id, Size::from_bytes(bits)))?.into_parts();
let (ptr_prov, ptr_offset) =
adjust_ptr(Pointer::new(alloc_id, Size::from_bytes(bits)))?.into_parts();
write_target_uint(endian, ptr_bytes, ptr_offset.bytes().into()).unwrap();
new_relocations.push((offset, ptr_tag));
new_relocations.push((offset, ptr_prov));
}
// Create allocation.
Ok(Allocation {
@ -290,7 +291,7 @@ pub fn convert_tag_add_extra<Tag, Extra, Err>(
}
/// Raw accessors. Provide access to otherwise private bytes.
impl<Tag, Extra> Allocation<Tag, Extra> {
impl<Prov, Extra> Allocation<Prov, Extra> {
pub fn len(&self) -> usize {
self.bytes.len()
}
@ -313,13 +314,13 @@ pub fn init_mask(&self) -> &InitMask {
}
/// Returns the relocation list.
pub fn relocations(&self) -> &Relocations<Tag> {
pub fn relocations(&self) -> &Relocations<Prov> {
&self.relocations
}
}
/// Byte accessors.
impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
/// This is the entirely abstraction-violating way to just grab the raw bytes without
/// caring about relocations. It just deduplicates some code between `read_scalar`
/// and `get_bytes_internal`.
@ -413,7 +414,7 @@ pub fn get_bytes_mut_ptr(
}
/// Reading and writing.
impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
/// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
/// relocation. If `allow_uninit`/`allow_ptr` is `false`, also enforces that the memory in the
/// given range contains no uninitialized bytes/relocations.
@ -451,7 +452,7 @@ pub fn read_scalar(
cx: &impl HasDataLayout,
range: AllocRange,
read_provenance: bool,
) -> AllocResult<ScalarMaybeUninit<Tag>> {
) -> AllocResult<ScalarMaybeUninit<Prov>> {
if read_provenance {
assert_eq!(range.size, cx.data_layout().pointer_size);
}
@ -475,7 +476,7 @@ pub fn read_scalar(
// If we are *not* reading a pointer, and we can just ignore relocations,
// then do exactly that.
if !read_provenance && Tag::OFFSET_IS_ADDR {
if !read_provenance && Prov::OFFSET_IS_ADDR {
// We just strip provenance.
let bytes = self.get_bytes_even_more_internal(range);
let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
@ -506,7 +507,7 @@ pub fn write_scalar(
&mut self,
cx: &impl HasDataLayout,
range: AllocRange,
val: ScalarMaybeUninit<Tag>,
val: ScalarMaybeUninit<Prov>,
) -> AllocResult {
assert!(self.mutability == Mutability::Mut);
@ -548,9 +549,9 @@ pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> Al
}
/// Relocations.
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
/// Returns all relocations overlapping with the given pointer-offset pair.
fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Tag)] {
fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Prov)] {
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
// the beginning of this range.
let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
@ -580,7 +581,7 @@ fn check_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> Alloc
/// immediately in that case.
fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult
where
Tag: Provenance,
Prov: Provenance,
{
// Find the start and end of the given range and its outermost relocations.
let (first, last) = {
@ -602,7 +603,7 @@ fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> A
// FIXME: Miri should preserve partial relocations; see
// https://github.com/rust-lang/miri/issues/2181.
if first < start {
if Tag::ERR_ON_PARTIAL_PTR_OVERWRITE {
if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE {
return Err(AllocError::PartialPointerOverwrite(first));
}
warn!(
@ -611,7 +612,7 @@ fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> A
self.init_mask.set_range(first, start, false);
}
if last > end {
if Tag::ERR_ON_PARTIAL_PTR_OVERWRITE {
if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE {
return Err(AllocError::PartialPointerOverwrite(
last - cx.data_layout().pointer_size,
));
@ -642,22 +643,22 @@ fn check_relocation_edges(&self, cx: &impl HasDataLayout, range: AllocRange) ->
/// "Relocations" stores the provenance information of pointers stored in memory.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
pub struct Relocations<Tag = AllocId>(SortedMap<Size, Tag>);
pub struct Relocations<Prov = AllocId>(SortedMap<Size, Prov>);
impl<Tag> Relocations<Tag> {
impl<Prov> Relocations<Prov> {
pub fn new() -> Self {
Relocations(SortedMap::new())
}
// The caller must guarantee that the given relocations are already sorted
// by address and contain no duplicates.
pub fn from_presorted(r: Vec<(Size, Tag)>) -> Self {
pub fn from_presorted(r: Vec<(Size, Prov)>) -> Self {
Relocations(SortedMap::from_presorted_elements(r))
}
}
impl<Tag> Deref for Relocations<Tag> {
type Target = SortedMap<Size, Tag>;
impl<Prov> Deref for Relocations<Prov> {
type Target = SortedMap<Size, Prov>;
fn deref(&self) -> &Self::Target {
&self.0
@ -667,18 +668,18 @@ fn deref(&self) -> &Self::Target {
/// A partial, owned list of relocations to transfer into another allocation.
///
/// Offsets are already adjusted to the destination allocation.
pub struct AllocationRelocations<Tag> {
dest_relocations: Vec<(Size, Tag)>,
pub struct AllocationRelocations<Prov> {
dest_relocations: Vec<(Size, Prov)>,
}
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
pub fn prepare_relocation_copy(
&self,
cx: &impl HasDataLayout,
src: AllocRange,
dest: Size,
count: u64,
) -> AllocationRelocations<Tag> {
) -> AllocationRelocations<Prov> {
let relocations = self.get_relocations(cx, src);
if relocations.is_empty() {
return AllocationRelocations { dest_relocations: Vec::new() };
@ -688,7 +689,7 @@ pub fn prepare_relocation_copy(
let mut new_relocations = Vec::with_capacity(relocations.len() * (count as usize));
// If `count` is large, this is rather wasteful -- we are allocating a big array here, which
// is mostly filled with redundant information since it's just N copies of the same `Tag`s
// is mostly filled with redundant information since it's just N copies of the same `Prov`s
// at slightly adjusted offsets. The reason we do this is so that in `mark_relocation_range`
// we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
// the right sequence of relocations for all N copies.
@ -713,7 +714,7 @@ pub fn prepare_relocation_copy(
///
/// This is dangerous to use as it can violate internal `Allocation` invariants!
/// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Tag>) {
pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Prov>) {
self.relocations.0.insert_presorted(relocations.dest_relocations);
}
}
@ -1178,7 +1179,7 @@ fn next(&mut self) -> Option<Self::Item> {
}
/// Uninitialized bytes.
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
/// Checks whether the given range is entirely initialized.
///
/// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
@ -1226,7 +1227,7 @@ pub fn no_bytes_init(&self) -> bool {
}
/// Transferring the initialization mask to other allocations.
impl<Tag, Extra> Allocation<Tag, Extra> {
impl<Prov, Extra> Allocation<Prov, Extra> {
/// Creates a run-length encoding of the initialization mask; panics if range is empty.
///
/// This is essentially a more space-efficient version of

View file

@ -159,34 +159,34 @@ fn get_alloc_id(self) -> Option<AllocId> {
/// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)]
#[derive(HashStable)]
pub struct Pointer<Tag = AllocId> {
pub(super) offset: Size, // kept private to avoid accidental misinterpretation (meaning depends on `Tag` type)
pub provenance: Tag,
pub struct Pointer<Prov = AllocId> {
pub(super) offset: Size, // kept private to avoid accidental misinterpretation (meaning depends on `Prov` type)
pub provenance: Prov,
}
static_assert_size!(Pointer, 16);
// `Option<Tag>` pointers are also passed around quite a bit
// `Option<Prov>` pointers are also passed around quite a bit
// (but not stored in permanent machine state).
static_assert_size!(Pointer<Option<AllocId>>, 16);
// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
// all the Miri types.
impl<Tag: Provenance> fmt::Debug for Pointer<Tag> {
impl<Prov: Provenance> fmt::Debug for Pointer<Prov> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Provenance::fmt(self, f)
}
}
impl<Tag: Provenance> fmt::Debug for Pointer<Option<Tag>> {
impl<Prov: Provenance> fmt::Debug for Pointer<Option<Prov>> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.provenance {
Some(tag) => Provenance::fmt(&Pointer::new(tag, self.offset), f),
Some(prov) => Provenance::fmt(&Pointer::new(prov, self.offset), f),
None => write!(f, "{:#x}[noalloc]", self.offset.bytes()),
}
}
}
impl<Tag: Provenance> fmt::Display for Pointer<Option<Tag>> {
impl<Prov: Provenance> fmt::Display for Pointer<Option<Prov>> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.provenance.is_none() && self.offset.bytes() == 0 {
write!(f, "null pointer")
@ -204,38 +204,38 @@ fn from(alloc_id: AllocId) -> Self {
}
}
impl<Tag> From<Pointer<Tag>> for Pointer<Option<Tag>> {
impl<Prov> From<Pointer<Prov>> for Pointer<Option<Prov>> {
#[inline(always)]
fn from(ptr: Pointer<Tag>) -> Self {
let (tag, offset) = ptr.into_parts();
Pointer::new(Some(tag), offset)
fn from(ptr: Pointer<Prov>) -> Self {
let (prov, offset) = ptr.into_parts();
Pointer::new(Some(prov), offset)
}
}
impl<Tag> Pointer<Option<Tag>> {
/// Convert this pointer that *might* have a tag into a pointer that *definitely* has a tag, or
/// an absolute address.
impl<Prov> Pointer<Option<Prov>> {
/// Convert this pointer that *might* have a provenance into a pointer that *definitely* has a
/// provenance, or an absolute address.
///
/// This is rarely what you want; call `ptr_try_get_alloc_id` instead.
pub fn into_pointer_or_addr(self) -> Result<Pointer<Tag>, Size> {
pub fn into_pointer_or_addr(self) -> Result<Pointer<Prov>, Size> {
match self.provenance {
Some(tag) => Ok(Pointer::new(tag, self.offset)),
Some(prov) => Ok(Pointer::new(prov, self.offset)),
None => Err(self.offset),
}
}
/// Returns the absolute address the pointer points to.
/// Only works if Tag::OFFSET_IS_ADDR is true!
/// Only works if Prov::OFFSET_IS_ADDR is true!
pub fn addr(self) -> Size
where
Tag: Provenance,
Prov: Provenance,
{
assert!(Tag::OFFSET_IS_ADDR);
assert!(Prov::OFFSET_IS_ADDR);
self.offset
}
}
impl<Tag> Pointer<Option<Tag>> {
impl<Prov> Pointer<Option<Prov>> {
#[inline(always)]
pub fn from_addr(addr: u64) -> Self {
Pointer { provenance: None, offset: Size::from_bytes(addr) }
@ -247,21 +247,21 @@ pub fn null() -> Self {
}
}
impl<'tcx, Tag> Pointer<Tag> {
impl<'tcx, Prov> Pointer<Prov> {
#[inline(always)]
pub fn new(provenance: Tag, offset: Size) -> Self {
pub fn new(provenance: Prov, offset: Size) -> Self {
Pointer { provenance, offset }
}
/// Obtain the constituents of this pointer. Not that the meaning of the offset depends on the type `Tag`!
/// Obtain the constituents of this pointer. Not that the meaning of the offset depends on the type `Prov`!
/// This function must only be used in the implementation of `Machine::ptr_get_alloc`,
/// and when a `Pointer` is taken apart to be stored efficiently in an `Allocation`.
#[inline(always)]
pub fn into_parts(self) -> (Tag, Size) {
pub fn into_parts(self) -> (Prov, Size) {
(self.provenance, self.offset)
}
pub fn map_provenance(self, f: impl FnOnce(Tag) -> Tag) -> Self {
pub fn map_provenance(self, f: impl FnOnce(Prov) -> Prov) -> Self {
Pointer { provenance: f(self.provenance), ..self }
}

View file

@ -126,7 +126,7 @@ pub fn from_machine_usize(i: u64, cx: &impl HasDataLayout) -> Self {
/// Do *not* match on a `Scalar`! Use the various `to_*` methods instead.
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)]
#[derive(HashStable)]
pub enum Scalar<Tag = AllocId> {
pub enum Scalar<Prov = AllocId> {
/// The raw bytes of a simple value.
Int(ScalarInt),
@ -137,7 +137,7 @@ pub enum Scalar<Tag = AllocId> {
/// We also store the size of the pointer, such that a `Scalar` always knows how big it is.
/// The size is always the pointer size of the current target, but this is not information
/// that we always have readily available.
Ptr(Pointer<Tag>, u8),
Ptr(Pointer<Prov>, u8),
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
@ -145,7 +145,7 @@ pub enum Scalar<Tag = AllocId> {
// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
// all the Miri types.
impl<Tag: Provenance> fmt::Debug for Scalar<Tag> {
impl<Prov: Provenance> fmt::Debug for Scalar<Prov> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Scalar::Ptr(ptr, _size) => write!(f, "{:?}", ptr),
@ -154,7 +154,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
}
}
impl<Tag: Provenance> fmt::Display for Scalar<Tag> {
impl<Prov: Provenance> fmt::Display for Scalar<Prov> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Scalar::Ptr(ptr, _size) => write!(f, "pointer to {:?}", ptr),
@ -163,7 +163,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
}
}
impl<Tag: Provenance> fmt::LowerHex for Scalar<Tag> {
impl<Prov: Provenance> fmt::LowerHex for Scalar<Prov> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Scalar::Ptr(ptr, _size) => write!(f, "pointer to {:?}", ptr),
@ -172,37 +172,38 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
}
}
impl<Tag> From<Single> for Scalar<Tag> {
impl<Prov> From<Single> for Scalar<Prov> {
#[inline(always)]
fn from(f: Single) -> Self {
Scalar::from_f32(f)
}
}
impl<Tag> From<Double> for Scalar<Tag> {
impl<Prov> From<Double> for Scalar<Prov> {
#[inline(always)]
fn from(f: Double) -> Self {
Scalar::from_f64(f)
}
}
impl<Tag> From<ScalarInt> for Scalar<Tag> {
impl<Prov> From<ScalarInt> for Scalar<Prov> {
#[inline(always)]
fn from(ptr: ScalarInt) -> Self {
Scalar::Int(ptr)
}
}
impl<Tag> Scalar<Tag> {
impl<Prov> Scalar<Prov> {
#[inline(always)]
pub fn from_pointer(ptr: Pointer<Tag>, cx: &impl HasDataLayout) -> Self {
pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
Scalar::Ptr(ptr, u8::try_from(cx.pointer_size().bytes()).unwrap())
}
/// Create a Scalar from a pointer with an `Option<_>` tag (where `None` represents a plain integer).
pub fn from_maybe_pointer(ptr: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self {
/// Create a Scalar from a pointer with an `Option<_>` provenance (where `None` represents a
/// plain integer / "invalid" pointer).
pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
match ptr.into_parts() {
(Some(tag), offset) => Scalar::from_pointer(Pointer::new(tag, offset), cx),
(Some(prov), offset) => Scalar::from_pointer(Pointer::new(prov, offset), cx),
(None, offset) => {
Scalar::Int(ScalarInt::try_from_uint(offset.bytes(), cx.pointer_size()).unwrap())
}
@ -310,7 +311,7 @@ pub fn from_f64(f: Double) -> Self {
pub fn to_bits_or_ptr_internal(
self,
target_size: Size,
) -> Result<Result<u128, Pointer<Tag>>, ScalarSizeMismatch> {
) -> Result<Result<u128, Pointer<Prov>>, ScalarSizeMismatch> {
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
Ok(match self {
Scalar::Int(int) => Ok(int.to_bits(target_size).map_err(|size| {
@ -329,7 +330,7 @@ pub fn to_bits_or_ptr_internal(
}
}
impl<'tcx, Tag: Provenance> Scalar<Tag> {
impl<'tcx, Prov: Provenance> Scalar<Prov> {
/// Fundamental scalar-to-int (cast) operation. Many convenience wrappers exist below, that you
/// likely want to use instead.
///
@ -341,13 +342,13 @@ pub fn try_to_int(self) -> Result<ScalarInt, Scalar<AllocId>> {
match self {
Scalar::Int(int) => Ok(int),
Scalar::Ptr(ptr, sz) => {
if Tag::OFFSET_IS_ADDR {
if Prov::OFFSET_IS_ADDR {
Ok(ScalarInt::try_from_uint(ptr.offset.bytes(), Size::from_bytes(sz)).unwrap())
} else {
// We know `offset` is relative, since `OFFSET_IS_ADDR == false`.
let (tag, offset) = ptr.into_parts();
let (prov, offset) = ptr.into_parts();
// Because `OFFSET_IS_ADDR == false`, this unwrap can never fail.
Err(Scalar::Ptr(Pointer::new(tag.get_alloc_id().unwrap(), offset), sz))
Err(Scalar::Ptr(Pointer::new(prov.get_alloc_id().unwrap(), offset), sz))
}
}
}
@ -489,24 +490,24 @@ pub fn to_f64(self) -> InterpResult<'tcx, Double> {
}
#[derive(Clone, Copy, Eq, PartialEq, TyEncodable, TyDecodable, HashStable, Hash)]
pub enum ScalarMaybeUninit<Tag = AllocId> {
Scalar(Scalar<Tag>),
pub enum ScalarMaybeUninit<Prov = AllocId> {
Scalar(Scalar<Prov>),
Uninit,
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(ScalarMaybeUninit, 24);
impl<Tag> From<Scalar<Tag>> for ScalarMaybeUninit<Tag> {
impl<Prov> From<Scalar<Prov>> for ScalarMaybeUninit<Prov> {
#[inline(always)]
fn from(s: Scalar<Tag>) -> Self {
fn from(s: Scalar<Prov>) -> Self {
ScalarMaybeUninit::Scalar(s)
}
}
// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
// all the Miri types.
impl<Tag: Provenance> fmt::Debug for ScalarMaybeUninit<Tag> {
impl<Prov: Provenance> fmt::Debug for ScalarMaybeUninit<Prov> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ScalarMaybeUninit::Uninit => write!(f, "<uninitialized>"),
@ -515,7 +516,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
}
}
impl<Tag: Provenance> fmt::LowerHex for ScalarMaybeUninit<Tag> {
impl<Prov: Provenance> fmt::LowerHex for ScalarMaybeUninit<Prov> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ScalarMaybeUninit::Uninit => write!(f, "uninitialized bytes"),
@ -524,19 +525,19 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
}
}
impl<Tag> ScalarMaybeUninit<Tag> {
impl<Prov> ScalarMaybeUninit<Prov> {
#[inline]
pub fn from_pointer(ptr: Pointer<Tag>, cx: &impl HasDataLayout) -> Self {
pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
ScalarMaybeUninit::Scalar(Scalar::from_pointer(ptr, cx))
}
#[inline]
pub fn from_maybe_pointer(ptr: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self {
pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
ScalarMaybeUninit::Scalar(Scalar::from_maybe_pointer(ptr, cx))
}
#[inline]
pub fn check_init<'tcx>(self) -> InterpResult<'tcx, Scalar<Tag>> {
pub fn check_init<'tcx>(self) -> InterpResult<'tcx, Scalar<Prov>> {
match self {
ScalarMaybeUninit::Scalar(scalar) => Ok(scalar),
ScalarMaybeUninit::Uninit => throw_ub!(InvalidUninitBytes(None)),
@ -544,7 +545,7 @@ pub fn check_init<'tcx>(self) -> InterpResult<'tcx, Scalar<Tag>> {
}
}
impl<'tcx, Tag: Provenance> ScalarMaybeUninit<Tag> {
impl<'tcx, Prov: Provenance> ScalarMaybeUninit<Prov> {
#[inline(always)]
pub fn to_bool(self) -> InterpResult<'tcx, bool> {
self.check_init()?.to_bool()

View file

@ -767,21 +767,21 @@ fn visit_constant(&mut self, c: &Constant<'tcx>, loc: Location) {
/// After the hex dump, an ascii dump follows, replacing all unprintable characters (control
/// characters or characters whose value is larger than 127) with a `.`
/// This also prints relocations adequately.
pub fn display_allocation<'a, 'tcx, Tag, Extra>(
pub fn display_allocation<'a, 'tcx, Prov, Extra>(
tcx: TyCtxt<'tcx>,
alloc: &'a Allocation<Tag, Extra>,
) -> RenderAllocation<'a, 'tcx, Tag, Extra> {
alloc: &'a Allocation<Prov, Extra>,
) -> RenderAllocation<'a, 'tcx, Prov, Extra> {
RenderAllocation { tcx, alloc }
}
#[doc(hidden)]
pub struct RenderAllocation<'a, 'tcx, Tag, Extra> {
pub struct RenderAllocation<'a, 'tcx, Prov, Extra> {
tcx: TyCtxt<'tcx>,
alloc: &'a Allocation<Tag, Extra>,
alloc: &'a Allocation<Prov, Extra>,
}
impl<'a, 'tcx, Tag: Provenance, Extra> std::fmt::Display
for RenderAllocation<'a, 'tcx, Tag, Extra>
impl<'a, 'tcx, Prov: Provenance, Extra> std::fmt::Display
for RenderAllocation<'a, 'tcx, Prov, Extra>
{
fn fmt(&self, w: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let RenderAllocation { tcx, alloc } = *self;
@ -825,9 +825,9 @@ fn write_allocation_newline(
/// The `prefix` argument allows callers to add an arbitrary prefix before each line (even if there
/// is only one line). Note that your prefix should contain a trailing space as the lines are
/// printed directly after it.
fn write_allocation_bytes<'tcx, Tag: Provenance, Extra>(
fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
tcx: TyCtxt<'tcx>,
alloc: &Allocation<Tag, Extra>,
alloc: &Allocation<Prov, Extra>,
w: &mut dyn std::fmt::Write,
prefix: &str,
) -> std::fmt::Result {
@ -861,7 +861,7 @@ fn write_allocation_bytes<'tcx, Tag: Provenance, Extra>(
if i != line_start {
write!(w, " ")?;
}
if let Some(&tag) = alloc.relocations().get(&i) {
if let Some(&prov) = alloc.relocations().get(&i) {
// Memory with a relocation must be defined
assert!(alloc.init_mask().is_range_initialized(i, i + ptr_size).is_ok());
let j = i.bytes_usize();
@ -870,7 +870,7 @@ fn write_allocation_bytes<'tcx, Tag: Provenance, Extra>(
let offset = read_target_uint(tcx.data_layout.endian, offset).unwrap();
let offset = Size::from_bytes(offset);
let relocation_width = |bytes| bytes * 3;
let ptr = Pointer::new(tag, offset);
let ptr = Pointer::new(prov, offset);
let mut target = format!("{:?}", ptr);
if target.len() > relocation_width(ptr_size.bytes_usize() - 1) {
// This is too long, try to save some space.

View file

@ -153,9 +153,9 @@ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHas
}
// `Relocations` with default type parameters is a sorted map.
impl<'a, Tag> HashStable<StableHashingContext<'a>> for mir::interpret::Relocations<Tag>
impl<'a, Prov> HashStable<StableHashingContext<'a>> for mir::interpret::Relocations<Prov>
where
Tag: HashStable<StableHashingContext<'a>>,
Prov: HashStable<StableHashingContext<'a>>,
{
fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
self.len().hash_stable(hcx, hasher);

View file

@ -1377,9 +1377,9 @@ fn pretty_print_const_scalar_int(
/// This is overridden for MIR printing because we only want to hide alloc ids from users, not
/// from MIR where it is actually useful.
fn pretty_print_const_pointer<Tag: Provenance>(
fn pretty_print_const_pointer<Prov: Provenance>(
mut self,
_: Pointer<Tag>,
_: Pointer<Prov>,
ty: Ty<'tcx>,
print_ty: bool,
) -> Result<Self::Const, Self::Error> {
@ -1952,9 +1952,9 @@ fn should_print_region(&self, region: ty::Region<'tcx>) -> bool {
}
}
fn pretty_print_const_pointer<Tag: Provenance>(
fn pretty_print_const_pointer<Prov: Provenance>(
self,
p: Pointer<Tag>,
p: Pointer<Prov>,
ty: Ty<'tcx>,
print_ty: bool,
) -> Result<Self::Const, Self::Error> {

View file

@ -234,9 +234,9 @@ fn binary_ptr_op(
}
fn access_local<'a>(
frame: &'a Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
frame: &'a Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
local: Local,
) -> InterpResult<'tcx, &'a interpret::Operand<Self::PointerTag>> {
) -> InterpResult<'tcx, &'a interpret::Operand<Self::Provenance>> {
let l = &frame.locals[local];
if matches!(
@ -255,7 +255,7 @@ fn access_local_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
frame: usize,
local: Local,
) -> InterpResult<'tcx, &'a mut interpret::Operand<Self::PointerTag>> {
) -> InterpResult<'tcx, &'a mut interpret::Operand<Self::Provenance>> {
if ecx.machine.can_const_prop[local] == ConstPropMode::NoPropagation {
throw_machine_stop_str!("tried to write to a local that is marked as not propagatable")
}
@ -274,7 +274,7 @@ fn before_access_global(
_tcx: TyCtxt<'tcx>,
_machine: &Self,
_alloc_id: AllocId,
alloc: ConstAllocation<'tcx, Self::PointerTag, Self::AllocExtra>,
alloc: ConstAllocation<'tcx, Self::Provenance, Self::AllocExtra>,
_static_def_id: Option<DefId>,
is_write: bool,
) -> InterpResult<'tcx> {
@ -309,14 +309,14 @@ fn init_frame_extra(
#[inline(always)]
fn stack<'a>(
ecx: &'a InterpCx<'mir, 'tcx, Self>,
) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] {
) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>] {
&ecx.machine.stack
}
#[inline(always)]
fn stack_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>> {
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>> {
&mut ecx.machine.stack
}
}

View file

@ -230,9 +230,9 @@ fn binary_ptr_op(
}
fn access_local<'a>(
frame: &'a Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
frame: &'a Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
local: Local,
) -> InterpResult<'tcx, &'a interpret::Operand<Self::PointerTag>> {
) -> InterpResult<'tcx, &'a interpret::Operand<Self::Provenance>> {
let l = &frame.locals[local];
if matches!(
@ -251,7 +251,7 @@ fn access_local_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
frame: usize,
local: Local,
) -> InterpResult<'tcx, &'a mut interpret::Operand<Self::PointerTag>> {
) -> InterpResult<'tcx, &'a mut interpret::Operand<Self::Provenance>> {
if ecx.machine.can_const_prop[local] == ConstPropMode::NoPropagation {
throw_machine_stop_str!("tried to write to a local that is marked as not propagatable")
}
@ -270,7 +270,7 @@ fn before_access_global(
_tcx: TyCtxt<'tcx>,
_machine: &Self,
_alloc_id: AllocId,
alloc: ConstAllocation<'tcx, Self::PointerTag, Self::AllocExtra>,
alloc: ConstAllocation<'tcx, Self::Provenance, Self::AllocExtra>,
_static_def_id: Option<DefId>,
is_write: bool,
) -> InterpResult<'tcx> {
@ -305,14 +305,14 @@ fn init_frame_extra(
#[inline(always)]
fn stack<'a>(
ecx: &'a InterpCx<'mir, 'tcx, Self>,
) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] {
) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>] {
&ecx.machine.stack
}
#[inline(always)]
fn stack_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>> {
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>> {
&mut ecx.machine.stack
}
}