enable Miri to fix the bytes in an allocation (since ptr offsets have different meanings there)

This commit is contained in:
Ralf Jung 2021-07-15 18:03:22 +02:00
parent f4b61ba509
commit adbe7554d7
3 changed files with 65 additions and 69 deletions

View file

@ -3,7 +3,7 @@
use std::borrow::Cow;
use std::convert::TryFrom;
use std::iter;
use std::ops::{Deref, DerefMut, Range};
use std::ops::{Deref, Range};
use std::ptr;
use rustc_ast::Mutability;
@ -156,16 +156,30 @@ pub fn uninit(size: Size, align: Align, panic_on_fail: bool) -> InterpResult<'st
impl Allocation {
/// Convert Tag and add Extra fields
pub fn with_prov_and_extra<Tag, Extra>(
pub fn convert_tag_add_extra<Tag, Extra>(
self,
mut tagger: impl FnMut(AllocId) -> Tag,
cx: &impl HasDataLayout,
extra: Extra,
mut tagger: impl FnMut(Pointer<AllocId>) -> Pointer<Tag>,
) -> Allocation<Tag, Extra> {
// Compute new pointer tags, which also adjusts the bytes.
let mut bytes = self.bytes;
let mut new_relocations = Vec::with_capacity(self.relocations.0.len());
let ptr_size = cx.data_layout().pointer_size.bytes_usize();
let endian = cx.data_layout().endian;
for &(offset, alloc_id) in self.relocations.iter() {
let idx = offset.bytes_usize();
let ptr_bytes = &mut bytes[idx..idx + ptr_size];
let bits = read_target_uint(endian, ptr_bytes).unwrap();
let (ptr_tag, ptr_offset) =
tagger(Pointer::new(alloc_id, Size::from_bytes(bits))).into_parts();
write_target_uint(endian, ptr_bytes, ptr_offset.bytes().into()).unwrap();
new_relocations.push((offset, ptr_tag));
}
// Create allocation.
Allocation {
bytes: self.bytes,
relocations: Relocations::from_presorted(
self.relocations.iter().map(|&(offset, tag)| (offset, tagger(tag))).collect(),
),
bytes,
relocations: Relocations::from_presorted(new_relocations),
init_mask: self.init_mask,
align: self.align,
mutability: self.mutability,
@ -377,7 +391,7 @@ pub fn write_scalar(
// See if we have to also write a relocation.
if let Some(provenance) = provenance {
self.relocations.insert(range.start, provenance);
self.relocations.0.insert(range.start, provenance);
}
Ok(())
@ -437,7 +451,7 @@ fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) {
}
// Forget all the relocations.
self.relocations.remove_range(first..last);
self.relocations.0.remove_range(first..last);
}
/// Errors if there are relocations overlapping with the edges of the
@ -597,12 +611,6 @@ fn deref(&self) -> &Self::Target {
}
}
impl<Tag> DerefMut for Relocations<Tag> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A partial, owned list of relocations to transfer into another allocation.
pub struct AllocationRelocations<Tag> {
relative_relocations: Vec<(Size, Tag)>,
@ -643,7 +651,7 @@ pub fn prepare_relocation_copy(
/// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
/// to be clear of relocations.
pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Tag>) {
self.relocations.insert_presorted(relocations.relative_relocations);
self.relocations.0.insert_presorted(relocations.relative_relocations);
}
}

View file

@ -7,7 +7,7 @@
use std::hash::Hash;
use rustc_middle::mir;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::ty::{self, Ty};
use rustc_span::def_id::DefId;
use rustc_target::abi::Size;
use rustc_target::spec::abi::Abi;
@ -310,8 +310,7 @@ fn ptr_get_alloc(
/// cache the result. (This relies on `AllocMap::get_or` being able to add the
/// owned allocation to the map even when the map is shared.)
fn init_allocation_extra<'b>(
memory_extra: &Self::MemoryExtra,
tcx: TyCtxt<'tcx>,
mem: &Memory<'mir, 'tcx, Self>,
id: AllocId,
alloc: Cow<'b, Allocation>,
kind: Option<MemoryKind<Self::MemoryKind>>,
@ -441,8 +440,7 @@ fn call_extra_fn(
#[inline(always)]
fn init_allocation_extra<'b>(
_memory_extra: &Self::MemoryExtra,
_tcx: TyCtxt<$tcx>,
_mem: &Memory<$mir, $tcx, Self>,
_id: AllocId,
alloc: Cow<'b, Allocation>,
_kind: Option<MemoryKind<Self::MemoryKind>>,
@ -473,10 +471,7 @@ fn ptr_from_addr(_mem: &Memory<$mir, $tcx, Self>, addr: u64) -> Pointer<Option<A
}
#[inline(always)]
fn ptr_get_alloc(
_mem: &Memory<$mir, $tcx, Self>,
ptr: Pointer<AllocId>,
) -> (AllocId, Size) {
fn ptr_get_alloc(_mem: &Memory<$mir, $tcx, Self>, ptr: Pointer<AllocId>) -> (AllocId, Size) {
// We know `offset` is relative to the allocation, so we can use `into_parts`.
let (alloc_id, offset) = ptr.into_parts();
(alloc_id, offset)

View file

@ -232,10 +232,10 @@ pub fn allocate_with(
M::GLOBAL_KIND.map(MemoryKind::Machine),
"dynamically allocating global memory"
);
// This is a new allocation, not a new global one, so no `global_base_ptr`.
let alloc = M::init_allocation_extra(&self.extra, self.tcx, id, Cow::Owned(alloc), Some(kind));
let alloc =
M::init_allocation_extra(self, id, Cow::Owned(alloc), Some(kind));
self.alloc_map.insert(id, (kind, alloc.into_owned()));
M::tag_alloc_base_pointer(self, Pointer::new(id, Size::ZERO))
M::tag_alloc_base_pointer(self, Pointer::from(id))
}
pub fn reallocate(
@ -334,7 +334,12 @@ pub fn deallocate(
// Let the machine take some extra action
let size = alloc.size();
M::memory_deallocated(&mut self.extra, &mut alloc.extra, ptr.provenance, alloc_range(Size::ZERO, size))?;
M::memory_deallocated(
&mut self.extra,
&mut alloc.extra,
ptr.provenance,
alloc_range(Size::ZERO, size),
)?;
// Don't forget to remember size and align of this now-dead allocation
let old = self.dead_alloc_map.insert(alloc_id, (size, alloc.align));
@ -492,12 +497,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// this machine use the same pointer tag, so it is indirected through
/// `M::tag_allocation`.
fn get_global_alloc(
memory_extra: &M::MemoryExtra,
tcx: TyCtxt<'tcx>,
&self,
id: AllocId,
is_write: bool,
) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
let (alloc, def_id) = match tcx.get_global_alloc(id) {
let (alloc, def_id) = match self.tcx.get_global_alloc(id) {
Some(GlobalAlloc::Memory(mem)) => {
// Memory of a constant or promoted or anonymous memory referenced by a static.
(mem, None)
@ -505,8 +509,8 @@ fn get_global_alloc(
Some(GlobalAlloc::Function(..)) => throw_ub!(DerefFunctionPointer(id)),
None => throw_ub!(PointerUseAfterFree(id)),
Some(GlobalAlloc::Static(def_id)) => {
assert!(tcx.is_static(def_id));
assert!(!tcx.is_thread_local_static(def_id));
assert!(self.tcx.is_static(def_id));
assert!(!self.tcx.is_thread_local_static(def_id));
// Notice that every static has two `AllocId` that will resolve to the same
// thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
// and the other one is maps to `GlobalAlloc::Memory`, this is returned by
@ -517,19 +521,18 @@ fn get_global_alloc(
// The `GlobalAlloc::Memory` branch here is still reachable though; when a static
// contains a reference to memory that was created during its evaluation (i.e., not
// to another static), those inner references only exist in "resolved" form.
if tcx.is_foreign_item(def_id) {
if self.tcx.is_foreign_item(def_id) {
throw_unsup!(ReadExternStatic(def_id));
}
(tcx.eval_static_initializer(def_id)?, Some(def_id))
(self.tcx.eval_static_initializer(def_id)?, Some(def_id))
}
};
M::before_access_global(memory_extra, id, alloc, def_id, is_write)?;
M::before_access_global(&self.extra, id, alloc, def_id, is_write)?;
let alloc = Cow::Borrowed(alloc);
// We got tcx memory. Let the machine initialize its "extra" stuff.
let alloc = M::init_allocation_extra(
memory_extra,
tcx,
self,
id, // always use the ID we got as input, not the "hidden" one.
alloc,
M::GLOBAL_KIND.map(MemoryKind::Machine),
@ -548,7 +551,7 @@ fn get_raw(
// `get_global_alloc` that we can actually use directly without inserting anything anywhere.
// So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
let a = self.alloc_map.get_or(id, || {
let alloc = Self::get_global_alloc(&self.extra, self.tcx, id, /*is_write*/ false)
let alloc = self.get_global_alloc(id, /*is_write*/ false)
.map_err(Err)?;
match alloc {
Cow::Borrowed(alloc) => {
@ -619,30 +622,26 @@ fn get_raw_mut(
id: AllocId,
) -> InterpResult<'tcx, (&mut Allocation<M::PointerTag, M::AllocExtra>, &mut M::MemoryExtra)>
{
let tcx = self.tcx;
let memory_extra = &mut self.extra;
let a = self.alloc_map.get_mut_or(id, || {
// Need to make a copy, even if `get_global_alloc` is able
// to give us a cheap reference.
let alloc = Self::get_global_alloc(memory_extra, tcx, id, /*is_write*/ true)?;
// We have "NLL problem case #3" here, which cannot be worked around without loss of
// efficiency even for the common case where the key is in the map.
// <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
// (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`.)
if self.alloc_map.get_mut(id).is_none() {
// Slow path.
// Allocation not found locally, go look global.
let alloc = self.get_global_alloc(id, /*is_write*/ true)?;
let kind = M::GLOBAL_KIND.expect(
"I got a global allocation that I have to copy but the machine does \
not expect that to happen",
);
Ok((MemoryKind::Machine(kind), alloc.into_owned()))
});
// Unpack the error type manually because type inference doesn't
// work otherwise (and we cannot help it because `impl Trait`)
match a {
Err(e) => Err(e),
Ok(a) => {
let a = &mut a.1;
if a.mutability == Mutability::Not {
throw_ub!(WriteToReadOnly(id))
}
Ok((a, memory_extra))
}
self.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));
}
let (_kind, alloc) = self.alloc_map.get_mut(id).unwrap();
if alloc.mutability == Mutability::Not {
throw_ub!(WriteToReadOnly(id))
}
Ok((alloc, &mut self.extra))
}
/// "Safe" (bounds and align-checked) allocation access.
@ -737,7 +736,6 @@ pub fn get_size_and_align(
}
fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
trace!("reading fn ptr: {}", id);
if let Some(extra) = self.extra_fn_ptr_map.get(&id) {
Some(FnVal::Other(*extra))
} else {
@ -752,6 +750,7 @@ pub fn get_fn(
&self,
ptr: Pointer<Option<M::PointerTag>>,
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
trace!("get_fn({:?})", ptr);
let (alloc_id, offset, ptr) = self.ptr_get_alloc(ptr)?;
if offset.bytes() != 0 {
throw_ub!(InvalidFunctionPointer(ptr.erase_for_fmt()))
@ -1046,12 +1045,8 @@ pub fn copy_repeatedly(
// since we don't want to keep any relocations at the target.
// (`get_bytes_with_uninit_and_ptr` below checks that there are no
// relocations overlapping the edges; those would not be handled correctly).
let relocations = src_alloc.prepare_relocation_copy(
self,
src_range,
dest_offset,
num_copies,
);
let relocations =
src_alloc.prepare_relocation_copy(self, src_range, dest_offset, num_copies);
// Prepare a copy of the initialization mask.
let compressed = src_alloc.compress_uninit_range(src_range);
// This checks relocation edges on the src.
@ -1064,9 +1059,7 @@ pub fn copy_repeatedly(
let (dest_alloc, extra) = self.get_raw_mut(dest_alloc_id)?;
let dest_range = alloc_range(dest_offset, size * num_copies);
M::memory_written(extra, &mut dest_alloc.extra, dest.provenance, dest_range)?;
let dest_bytes = dest_alloc
.get_bytes_mut_ptr(&tcx, dest_range)
.as_mut_ptr();
let dest_bytes = dest_alloc.get_bytes_mut_ptr(&tcx, dest_range).as_mut_ptr();
if compressed.no_bytes_init() {
// Fast path: If all bytes are `uninit` then there is nothing to copy. The target range