Auto merge of #124193 - RalfJung:miri, r=RalfJung

Miri subtree update

r? `@ghost`
This commit is contained in:
bors 2024-04-21 11:01:46 +00:00
commit fecb7b4309
114 changed files with 1400 additions and 685 deletions

View file

@ -295,6 +295,16 @@ up the sysroot. If you are using `miri` (the Miri driver) directly, see the
Miri adds its own set of `-Z` flags, which are usually set via the `MIRIFLAGS`
environment variable. We first document the most relevant and most commonly used flags:
* `-Zmiri-address-reuse-rate=<rate>` changes the probability that a freed *non-stack* allocation
will be added to the pool for address reuse, and the probability that a new *non-stack* allocation
will be taken from the pool. Stack allocations never get added to or taken from the pool. The
default is `0.5`.
* `-Zmiri-address-reuse-cross-thread-rate=<rate>` changes the probability that an allocation which
attempts to reuse a previously freed block of memory will also consider blocks freed by *other
threads*. The default is `0.1`, which means by default, in 90% of the cases where an address reuse
attempt is made, only addresses from the same thread will be considered. Reusing an address from
another thread induces synchronization between those threads, which can mask data races and weak
memory bugs.
* `-Zmiri-compare-exchange-weak-failure-rate=<rate>` changes the failure rate of
`compare_exchange_weak` operations. The default is `0.8` (so 4 out of 5 weak ops will fail).
You can change it to any value between `0.0` and `1.0`, where `1.0` means it

View file

@ -1 +1 @@
23d47dba319331d4418827cfbb8c1af283497d3c
c8d19a92aa9022eb690899cf6d54fd23cb6877e5

View file

@ -13,8 +13,9 @@
use rustc_span::Span;
use rustc_target::abi::{Align, HasDataLayout, Size};
use crate::*;
use reuse_pool::ReusePool;
use crate::{concurrency::VClock, *};
use self::reuse_pool::ReusePool;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ProvenanceMode {
@ -77,7 +78,7 @@ pub fn new(config: &MiriConfig, stack_addr: u64) -> Self {
GlobalStateInner {
int_to_ptr_map: Vec::default(),
base_addr: FxHashMap::default(),
reuse: ReusePool::new(),
reuse: ReusePool::new(config),
exposed: FxHashSet::default(),
next_base_addr: stack_addr,
provenance_mode: config.provenance_mode,
@ -144,7 +145,7 @@ fn alloc_id_from_addr(&self, addr: u64) -> Option<AllocId> {
fn addr_from_alloc_id(
&self,
alloc_id: AllocId,
_kind: MemoryKind,
memory_kind: MemoryKind,
) -> InterpResult<'tcx, u64> {
let ecx = self.eval_context_ref();
let mut global_state = ecx.machine.alloc_addresses.borrow_mut();
@ -163,9 +164,18 @@ fn addr_from_alloc_id(
assert!(!matches!(kind, AllocKind::Dead));
// This allocation does not have a base address yet, pick or reuse one.
let base_addr = if let Some(reuse_addr) =
global_state.reuse.take_addr(&mut *rng, size, align)
{
let base_addr = if let Some((reuse_addr, clock)) = global_state.reuse.take_addr(
&mut *rng,
size,
align,
memory_kind,
ecx.get_active_thread(),
) {
if let Some(clock) = clock
&& let Some(data_race) = &ecx.machine.data_race
{
data_race.acquire_clock(&clock, ecx.get_active_thread());
}
reuse_addr
} else {
// We have to pick a fresh address.
@ -333,14 +343,11 @@ fn ptr_get_alloc(&self, ptr: Pointer<Provenance>) -> Option<(AllocId, Size)> {
}
}
impl GlobalStateInner {
pub fn free_alloc_id(
&mut self,
rng: &mut impl Rng,
dead_id: AllocId,
size: Size,
align: Align,
) {
impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
pub fn free_alloc_id(&mut self, dead_id: AllocId, size: Size, align: Align, kind: MemoryKind) {
let global_state = self.alloc_addresses.get_mut();
let rng = self.rng.get_mut();
// We can *not* remove this from `base_addr`, since the interpreter design requires that we
// be able to retrieve an AllocId + offset for any memory access *before* we check if the
// access is valid. Specifically, `ptr_get_alloc` is called on each attempt at a memory
@ -353,15 +360,25 @@ pub fn free_alloc_id(
// returns a dead allocation.
// To avoid a linear scan we first look up the address in `base_addr`, and then find it in
// `int_to_ptr_map`.
let addr = *self.base_addr.get(&dead_id).unwrap();
let pos = self.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr).unwrap();
let removed = self.int_to_ptr_map.remove(pos);
let addr = *global_state.base_addr.get(&dead_id).unwrap();
let pos =
global_state.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr).unwrap();
let removed = global_state.int_to_ptr_map.remove(pos);
assert_eq!(removed, (addr, dead_id)); // double-check that we removed the right thing
// We can also remove it from `exposed`, since this allocation can anyway not be returned by
// `alloc_id_from_addr` any more.
self.exposed.remove(&dead_id);
global_state.exposed.remove(&dead_id);
// Also remember this address for future reuse.
self.reuse.add_addr(rng, addr, size, align)
let thread = self.threads.get_active_thread_id();
global_state.reuse.add_addr(rng, addr, size, align, kind, thread, || {
if let Some(data_race) = &self.data_race {
data_race
.release_clock(thread, self.threads.active_thread_ref().current_span())
.clone()
} else {
VClock::default()
}
})
}
}

View file

@ -4,11 +4,9 @@
use rustc_target::abi::{Align, Size};
const MAX_POOL_SIZE: usize = 64;
use crate::{concurrency::VClock, MemoryKind, MiriConfig, ThreadId};
// Just use fair coins, until we have evidence that other numbers are better.
const ADDR_REMEMBER_CHANCE: f64 = 0.5;
const ADDR_TAKE_CHANCE: f64 = 0.5;
const MAX_POOL_SIZE: usize = 64;
/// The pool strikes a balance between exploring more possible executions and making it more likely
/// to find bugs. The hypothesis is that bugs are more likely to occur when reuse happens for
@ -16,20 +14,29 @@
/// structure. Therefore we only reuse allocations when size and alignment match exactly.
#[derive(Debug)]
pub struct ReusePool {
address_reuse_rate: f64,
address_reuse_cross_thread_rate: f64,
/// The i-th element in `pool` stores allocations of alignment `2^i`. We store these reusable
/// allocations as address-size pairs, the list must be sorted by the size.
/// allocations as address-size pairs, the list must be sorted by the size and then the thread ID.
///
/// Each of these maps has at most MAX_POOL_SIZE elements, and since alignment is limited to
/// less than 64 different possible value, that bounds the overall size of the pool.
pool: Vec<Vec<(u64, Size)>>,
///
/// We also store the ID and the data-race clock of the thread that donated this pool element,
/// to ensure synchronization with the thread that picks up this address.
pool: Vec<Vec<(u64, Size, ThreadId, VClock)>>,
}
impl ReusePool {
pub fn new() -> Self {
ReusePool { pool: vec![] }
pub fn new(config: &MiriConfig) -> Self {
ReusePool {
address_reuse_rate: config.address_reuse_rate,
address_reuse_cross_thread_rate: config.address_reuse_cross_thread_rate,
pool: vec![],
}
}
fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size)> {
fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size, ThreadId, VClock)> {
let pool_idx: usize = align.bytes().trailing_zeros().try_into().unwrap();
if self.pool.len() <= pool_idx {
self.pool.resize(pool_idx + 1, Vec::new());
@ -37,40 +44,73 @@ fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size)> {
&mut self.pool[pool_idx]
}
pub fn add_addr(&mut self, rng: &mut impl Rng, addr: u64, size: Size, align: Align) {
pub fn add_addr(
&mut self,
rng: &mut impl Rng,
addr: u64,
size: Size,
align: Align,
kind: MemoryKind,
thread: ThreadId,
clock: impl FnOnce() -> VClock,
) {
// Let's see if we even want to remember this address.
if !rng.gen_bool(ADDR_REMEMBER_CHANCE) {
// We don't remember stack addresses: there's a lot of them (so the perf impact is big),
// and we only want to reuse stack slots within the same thread or else we'll add a lot of
// undesired synchronization.
if kind == MemoryKind::Stack || !rng.gen_bool(self.address_reuse_rate) {
return;
}
let clock = clock();
// Determine the pool to add this to, and where in the pool to put it.
let subpool = self.subpool(align);
let pos = subpool.partition_point(|(_addr, other_size)| *other_size < size);
let pos = subpool.partition_point(|(_addr, other_size, other_thread, _)| {
(*other_size, *other_thread) < (size, thread)
});
// Make sure the pool does not grow too big.
if subpool.len() >= MAX_POOL_SIZE {
// Pool full. Replace existing element, or last one if this would be even bigger.
let clamped_pos = pos.min(subpool.len() - 1);
subpool[clamped_pos] = (addr, size);
subpool[clamped_pos] = (addr, size, thread, clock);
return;
}
// Add address to pool, at the right position.
subpool.insert(pos, (addr, size));
subpool.insert(pos, (addr, size, thread, clock));
}
pub fn take_addr(&mut self, rng: &mut impl Rng, size: Size, align: Align) -> Option<u64> {
// Determine whether we'll even attempt a reuse.
if !rng.gen_bool(ADDR_TAKE_CHANCE) {
/// Returns the address to use and optionally a clock we have to synchronize with.
pub fn take_addr(
&mut self,
rng: &mut impl Rng,
size: Size,
align: Align,
kind: MemoryKind,
thread: ThreadId,
) -> Option<(u64, Option<VClock>)> {
// Determine whether we'll even attempt a reuse. As above, we don't do reuse for stack addresses.
if kind == MemoryKind::Stack || !rng.gen_bool(self.address_reuse_rate) {
return None;
}
let cross_thread_reuse = rng.gen_bool(self.address_reuse_cross_thread_rate);
// Determine the pool to take this from.
let subpool = self.subpool(align);
// Let's see if we can find something of the right size. We want to find the full range of
// such items, beginning with the first, so we can't use `binary_search_by_key`.
let begin = subpool.partition_point(|(_addr, other_size)| *other_size < size);
// such items, beginning with the first, so we can't use `binary_search_by_key`. If we do
// *not* want to consider other thread's allocations, we effectively use the lexicographic
// order on `(size, thread)`.
let begin = subpool.partition_point(|(_addr, other_size, other_thread, _)| {
*other_size < size
|| (*other_size == size && !cross_thread_reuse && *other_thread < thread)
});
let mut end = begin;
while let Some((_addr, other_size)) = subpool.get(end) {
while let Some((_addr, other_size, other_thread, _)) = subpool.get(end) {
if *other_size != size {
break;
}
if !cross_thread_reuse && *other_thread != thread {
// We entered the allocations of another thread.
break;
}
end += 1;
}
if end == begin {
@ -80,8 +120,10 @@ pub fn take_addr(&mut self, rng: &mut impl Rng, size: Size, align: Align) -> Opt
// Pick a random element with the desired size.
let idx = rng.gen_range(begin..end);
// Remove it from the pool and return.
let (chosen_addr, chosen_size) = subpool.remove(idx);
let (chosen_addr, chosen_size, chosen_thread, clock) = subpool.remove(idx);
debug_assert!(chosen_size >= size && chosen_addr % align.bytes() == 0);
Some(chosen_addr)
debug_assert!(cross_thread_reuse || chosen_thread == thread);
// No synchronization needed if we reused from the current thread.
Some((chosen_addr, if chosen_thread == thread { None } else { Some(clock) }))
}
}

View file

@ -307,6 +307,15 @@ fn parse_comma_list<T: FromStr>(input: &str) -> Result<Vec<T>, T::Err> {
input.split(',').map(str::parse::<T>).collect()
}
/// Parses the input as a float in the range from 0.0 to 1.0 (inclusive).
fn parse_rate(input: &str) -> Result<f64, &'static str> {
match input.parse::<f64>() {
Ok(rate) if rate >= 0.0 && rate <= 1.0 => Ok(rate),
Ok(_) => Err("must be between `0.0` and `1.0`"),
Err(_) => Err("requires a `f64` between `0.0` and `1.0`"),
}
}
#[cfg(any(target_os = "linux", target_os = "macos"))]
fn jemalloc_magic() {
// These magic runes are copied from
@ -499,14 +508,9 @@ fn main() {
} else if let Some(param) = arg.strip_prefix("-Zmiri-env-forward=") {
miri_config.forwarded_env_vars.push(param.to_owned());
} else if let Some(param) = arg.strip_prefix("-Zmiri-track-pointer-tag=") {
let ids: Vec<u64> = match parse_comma_list(param) {
Ok(ids) => ids,
Err(err) =>
show_error!(
"-Zmiri-track-pointer-tag requires a comma separated list of valid `u64` arguments: {}",
err
),
};
let ids: Vec<u64> = parse_comma_list(param).unwrap_or_else(|err| {
show_error!("-Zmiri-track-pointer-tag requires a comma separated list of valid `u64` arguments: {err}")
});
for id in ids.into_iter().map(miri::BorTag::new) {
if let Some(id) = id {
miri_config.tracked_pointer_tags.insert(id);
@ -515,14 +519,9 @@ fn main() {
}
}
} else if let Some(param) = arg.strip_prefix("-Zmiri-track-call-id=") {
let ids: Vec<u64> = match parse_comma_list(param) {
Ok(ids) => ids,
Err(err) =>
show_error!(
"-Zmiri-track-call-id requires a comma separated list of valid `u64` arguments: {}",
err
),
};
let ids: Vec<u64> = parse_comma_list(param).unwrap_or_else(|err| {
show_error!("-Zmiri-track-call-id requires a comma separated list of valid `u64` arguments: {err}")
});
for id in ids.into_iter().map(miri::CallId::new) {
if let Some(id) = id {
miri_config.tracked_call_ids.insert(id);
@ -531,56 +530,37 @@ fn main() {
}
}
} else if let Some(param) = arg.strip_prefix("-Zmiri-track-alloc-id=") {
let ids: Vec<miri::AllocId> = match parse_comma_list::<NonZero<u64>>(param) {
Ok(ids) => ids.into_iter().map(miri::AllocId).collect(),
Err(err) =>
show_error!(
"-Zmiri-track-alloc-id requires a comma separated list of valid non-zero `u64` arguments: {}",
err
),
};
miri_config.tracked_alloc_ids.extend(ids);
let ids = parse_comma_list::<NonZero<u64>>(param).unwrap_or_else(|err| {
show_error!("-Zmiri-track-alloc-id requires a comma separated list of valid non-zero `u64` arguments: {err}")
});
miri_config.tracked_alloc_ids.extend(ids.into_iter().map(miri::AllocId));
} else if arg == "-Zmiri-track-alloc-accesses" {
miri_config.track_alloc_accesses = true;
} else if let Some(param) = arg.strip_prefix("-Zmiri-address-reuse-rate=") {
miri_config.address_reuse_rate = parse_rate(param)
.unwrap_or_else(|err| show_error!("-Zmiri-address-reuse-rate {err}"));
} else if let Some(param) = arg.strip_prefix("-Zmiri-address-reuse-cross-thread-rate=") {
miri_config.address_reuse_cross_thread_rate = parse_rate(param)
.unwrap_or_else(|err| show_error!("-Zmiri-address-reuse-cross-thread-rate {err}"));
} else if let Some(param) = arg.strip_prefix("-Zmiri-compare-exchange-weak-failure-rate=") {
let rate = match param.parse::<f64>() {
Ok(rate) if rate >= 0.0 && rate <= 1.0 => rate,
Ok(_) =>
show_error!(
"-Zmiri-compare-exchange-weak-failure-rate must be between `0.0` and `1.0`"
),
Err(err) =>
show_error!(
"-Zmiri-compare-exchange-weak-failure-rate requires a `f64` between `0.0` and `1.0`: {}",
err
),
};
miri_config.cmpxchg_weak_failure_rate = rate;
miri_config.cmpxchg_weak_failure_rate = parse_rate(param).unwrap_or_else(|err| {
show_error!("-Zmiri-compare-exchange-weak-failure-rate {err}")
});
} else if let Some(param) = arg.strip_prefix("-Zmiri-preemption-rate=") {
let rate = match param.parse::<f64>() {
Ok(rate) if rate >= 0.0 && rate <= 1.0 => rate,
Ok(_) => show_error!("-Zmiri-preemption-rate must be between `0.0` and `1.0`"),
Err(err) =>
show_error!(
"-Zmiri-preemption-rate requires a `f64` between `0.0` and `1.0`: {}",
err
),
};
miri_config.preemption_rate = rate;
miri_config.preemption_rate =
parse_rate(param).unwrap_or_else(|err| show_error!("-Zmiri-preemption-rate {err}"));
} else if arg == "-Zmiri-report-progress" {
// This makes it take a few seconds between progress reports on my laptop.
miri_config.report_progress = Some(1_000_000);
} else if let Some(param) = arg.strip_prefix("-Zmiri-report-progress=") {
let interval = match param.parse::<u32>() {
Ok(i) => i,
Err(err) => show_error!("-Zmiri-report-progress requires a `u32`: {}", err),
};
let interval = param.parse::<u32>().unwrap_or_else(|err| {
show_error!("-Zmiri-report-progress requires a `u32`: {}", err)
});
miri_config.report_progress = Some(interval);
} else if let Some(param) = arg.strip_prefix("-Zmiri-provenance-gc=") {
let interval = match param.parse::<u32>() {
Ok(i) => i,
Err(err) => show_error!("-Zmiri-provenance-gc requires a `u32`: {}", err),
};
let interval = param.parse::<u32>().unwrap_or_else(|err| {
show_error!("-Zmiri-provenance-gc requires a `u32`: {}", err)
});
miri_config.gc_interval = interval;
} else if let Some(param) = arg.strip_prefix("-Zmiri-measureme=") {
miri_config.measureme_out = Some(param.to_string());
@ -605,23 +585,20 @@ fn main() {
show_error!("-Zmiri-extern-so-file `{}` does not exist", filename);
}
} else if let Some(param) = arg.strip_prefix("-Zmiri-num-cpus=") {
let num_cpus = match param.parse::<u32>() {
Ok(i) => i,
Err(err) => show_error!("-Zmiri-num-cpus requires a `u32`: {}", err),
};
let num_cpus = param
.parse::<u32>()
.unwrap_or_else(|err| show_error!("-Zmiri-num-cpus requires a `u32`: {}", err));
miri_config.num_cpus = num_cpus;
} else if let Some(param) = arg.strip_prefix("-Zmiri-force-page-size=") {
let page_size = match param.parse::<u64>() {
Ok(i) =>
if i.is_power_of_two() {
i * 1024
} else {
show_error!("-Zmiri-force-page-size requires a power of 2: {}", i)
},
Err(err) => show_error!("-Zmiri-force-page-size requires a `u64`: {}", err),
let page_size = param.parse::<u64>().unwrap_or_else(|err| {
show_error!("-Zmiri-force-page-size requires a `u64`: {}", err)
});
// Convert from kilobytes to bytes.
let page_size = if page_size.is_power_of_two() {
page_size * 1024
} else {
show_error!("-Zmiri-force-page-size requires a power of 2: {page_size}");
};
miri_config.page_size = Some(page_size);
} else {
// Forward to rustc.

View file

@ -438,7 +438,7 @@ pub(super) fn protector_error(&self, item: &Item, kind: ProtectorKind) -> Interp
.machine
.threads
.all_stacks()
.flatten()
.flat_map(|(_id, stack)| stack)
.map(|frame| {
frame.extra.borrow_tracker.as_ref().expect("we should have borrow tracking data")
})

View file

@ -2,7 +2,6 @@
//! (These are used in Tree Borrows `#[test]`s for thorough verification
//! of the behavior of the state machine of permissions,
//! but the contents of this file are extremely generic)
#![cfg(test)]
pub trait Exhaustive: Sized {
fn exhaustive() -> Box<dyn Iterator<Item = Self>>;

View file

@ -547,9 +547,9 @@ fn read_race_detect(
) -> Result<(), DataRace> {
trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, thread_clocks);
if !current_span.is_dummy() {
thread_clocks.clock[index].span = current_span;
thread_clocks.clock.index_mut(index).span = current_span;
}
thread_clocks.clock[index].set_read_type(read_type);
thread_clocks.clock.index_mut(index).set_read_type(read_type);
if self.write_was_before(&thread_clocks.clock) {
let race_free = if let Some(atomic) = self.atomic() {
// We must be ordered-after all atomic accesses, reads and writes.
@ -577,7 +577,7 @@ fn write_race_detect(
) -> Result<(), DataRace> {
trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, thread_clocks);
if !current_span.is_dummy() {
thread_clocks.clock[index].span = current_span;
thread_clocks.clock.index_mut(index).span = current_span;
}
if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock {
let race_free = if let Some(atomic) = self.atomic() {
@ -1701,49 +1701,34 @@ fn print_thread_metadata(
format!("thread `{thread_name}`")
}
/// Acquire a lock, express that the previous call of
/// `validate_lock_release` must happen before this.
/// Acquire the given clock into the given thread, establishing synchronization with
/// the moment when that clock snapshot was taken via `release_clock`.
/// As this is an acquire operation, the thread timestamp is not
/// incremented.
pub fn validate_lock_acquire(&self, lock: &VClock, thread: ThreadId) {
let (_, mut clocks) = self.load_thread_state_mut(thread);
pub fn acquire_clock(&self, lock: &VClock, thread: ThreadId) {
let (_, mut clocks) = self.thread_state_mut(thread);
clocks.clock.join(lock);
}
/// Release a lock handle, express that this happens-before
/// any subsequent calls to `validate_lock_acquire`.
/// For normal locks this should be equivalent to `validate_lock_release_shared`
/// since an acquire operation should have occurred before, however
/// for futex & condvar operations this is not the case and this
/// operation must be used.
pub fn validate_lock_release(&self, lock: &mut VClock, thread: ThreadId, current_span: Span) {
let (index, mut clocks) = self.load_thread_state_mut(thread);
lock.clone_from(&clocks.clock);
clocks.increment_clock(index, current_span);
}
/// Release a lock handle, express that this happens-before
/// any subsequent calls to `validate_lock_acquire` as well
/// as any previous calls to this function after any
/// `validate_lock_release` calls.
/// For normal locks this should be equivalent to `validate_lock_release`.
/// This function only exists for joining over the set of concurrent readers
/// in a read-write lock and should not be used for anything else.
pub fn validate_lock_release_shared(
&self,
lock: &mut VClock,
thread: ThreadId,
current_span: Span,
) {
let (index, mut clocks) = self.load_thread_state_mut(thread);
lock.join(&clocks.clock);
/// Returns the `release` clock of the given thread.
/// Other threads can acquire this clock in the future to establish synchronization
/// with this program point.
pub fn release_clock(&self, thread: ThreadId, current_span: Span) -> Ref<'_, VClock> {
// We increment the clock each time this happens, to ensure no two releases
// can be confused with each other.
let (index, mut clocks) = self.thread_state_mut(thread);
clocks.increment_clock(index, current_span);
drop(clocks);
// To return a read-only view, we need to release the RefCell
// and borrow it again.
let (_index, clocks) = self.thread_state(thread);
Ref::map(clocks, |c| &c.clock)
}
/// Load the vector index used by the given thread as well as the set of vector clocks
/// used by the thread.
#[inline]
fn load_thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
fn thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
let index = self.thread_info.borrow()[thread]
.vector_index
.expect("Loading thread state for thread with no assigned vector");
@ -1752,6 +1737,18 @@ fn load_thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, Thre
(index, clocks)
}
/// Load the vector index used by the given thread as well as the set of vector clocks
/// used by the thread.
#[inline]
fn thread_state(&self, thread: ThreadId) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
let index = self.thread_info.borrow()[thread]
.vector_index
.expect("Loading thread state for thread with no assigned vector");
let ref_vector = self.vector_clocks.borrow();
let clocks = Ref::map(ref_vector, |vec| &vec[index]);
(index, clocks)
}
/// Load the current vector clock in use and the current set of thread clocks
/// in use for the vector.
#[inline]
@ -1759,10 +1756,7 @@ pub(super) fn current_thread_state(
&self,
thread_mgr: &ThreadManager<'_, '_>,
) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
let index = self.current_index(thread_mgr);
let ref_vector = self.vector_clocks.borrow();
let clocks = Ref::map(ref_vector, |vec| &vec[index]);
(index, clocks)
self.thread_state(thread_mgr.get_active_thread_id())
}
/// Load the current vector clock in use and the current set of thread clocks
@ -1772,10 +1766,7 @@ pub(super) fn current_thread_state_mut(
&self,
thread_mgr: &ThreadManager<'_, '_>,
) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
let index = self.current_index(thread_mgr);
let ref_vector = self.vector_clocks.borrow_mut();
let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
(index, clocks)
self.thread_state_mut(thread_mgr.get_active_thread_id())
}
/// Return the current thread, should be the same

View file

@ -41,7 +41,7 @@ pub enum InitOnceStatus {
pub(super) struct InitOnce<'mir, 'tcx> {
status: InitOnceStatus,
waiters: VecDeque<InitOnceWaiter<'mir, 'tcx>>,
data_race: VClock,
clock: VClock,
}
impl<'mir, 'tcx> VisitProvenance for InitOnce<'mir, 'tcx> {
@ -61,10 +61,8 @@ fn init_once_observe_attempt(&mut self, id: InitOnceId) {
let current_thread = this.get_active_thread();
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_acquire(
&this.machine.threads.sync.init_onces[id].data_race,
current_thread,
);
data_race
.acquire_clock(&this.machine.threads.sync.init_onces[id].clock, current_thread);
}
}
@ -77,7 +75,7 @@ fn init_once_wake_waiter(
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
this.unblock_thread(waiter.thread);
this.unblock_thread(waiter.thread, BlockReason::InitOnce(id));
// Call callback, with the woken-up thread as `current`.
this.set_active_thread(waiter.thread);
@ -142,7 +140,7 @@ fn init_once_enqueue_and_block(
let init_once = &mut this.machine.threads.sync.init_onces[id];
assert_ne!(init_once.status, InitOnceStatus::Complete, "queueing on complete init once");
init_once.waiters.push_back(InitOnceWaiter { thread, callback });
this.block_thread(thread);
this.block_thread(thread, BlockReason::InitOnce(id));
}
/// Begin initializing this InitOnce. Must only be called after checking that it is currently
@ -176,7 +174,7 @@ fn init_once_complete(&mut self, id: InitOnceId) -> InterpResult<'tcx> {
// Each complete happens-before the end of the wait
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut init_once.data_race, current_thread, current_span);
init_once.clock.clone_from(&data_race.release_clock(current_thread, current_span));
}
// Wake up everyone.
@ -202,7 +200,7 @@ fn init_once_fail(&mut self, id: InitOnceId) -> InterpResult<'tcx> {
// Each complete happens-before the end of the wait
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut init_once.data_race, current_thread, current_span);
init_once.clock.clone_from(&data_race.release_clock(current_thread, current_span));
}
// Wake up one waiting thread, so they can go ahead and try to init this.

View file

@ -6,3 +6,5 @@
pub mod thread;
mod vector_clock;
pub mod weak_memory;
pub use vector_clock::VClock;

View file

@ -69,12 +69,8 @@ struct Mutex {
lock_count: usize,
/// The queue of threads waiting for this mutex.
queue: VecDeque<ThreadId>,
/// Data race handle. This tracks the happens-before
/// relationship between each mutex access. It is
/// released to during unlock and acquired from during
/// locking, and therefore stores the clock of the last
/// thread to release this mutex.
data_race: VClock,
/// Mutex clock. This tracks the moment of the last unlock.
clock: VClock,
}
declare_id!(RwLockId);
@ -91,7 +87,7 @@ struct RwLock {
writer_queue: VecDeque<ThreadId>,
/// The queue of reader threads waiting for this lock.
reader_queue: VecDeque<ThreadId>,
/// Data race handle for writers. Tracks the happens-before
/// Data race clock for writers. Tracks the happens-before
/// ordering between each write access to a rwlock and is updated
/// after a sequence of concurrent readers to track the happens-
/// before ordering between the set of previous readers and
@ -99,8 +95,8 @@ struct RwLock {
/// Contains the clock of the last thread to release a writer
/// lock or the joined clock of the set of last threads to release
/// shared reader locks.
data_race: VClock,
/// Data race handle for readers. This is temporary storage
clock_unlocked: VClock,
/// Data race clock for readers. This is temporary storage
/// for the combined happens-before ordering for between all
/// concurrent readers and the next writer, and the value
/// is stored to the main data_race variable once all
@ -110,30 +106,18 @@ struct RwLock {
/// add happens-before orderings between shared reader
/// locks.
/// This is only relevant when there is an active reader.
data_race_reader: VClock,
clock_current_readers: VClock,
}
declare_id!(CondvarId);
#[derive(Debug, Copy, Clone)]
pub enum RwLockMode {
Read,
Write,
}
#[derive(Debug)]
pub enum CondvarLock {
Mutex(MutexId),
RwLock { id: RwLockId, mode: RwLockMode },
}
/// A thread waiting on a conditional variable.
#[derive(Debug)]
struct CondvarWaiter {
/// The thread that is waiting on this variable.
thread: ThreadId,
/// The mutex or rwlock on which the thread is waiting.
lock: CondvarLock,
/// The mutex on which the thread is waiting.
lock: MutexId,
}
/// The conditional variable state.
@ -144,8 +128,8 @@ struct Condvar {
/// between a cond-var signal and a cond-var
/// wait during a non-spurious signal event.
/// Contains the clock of the last thread to
/// perform a futex-signal.
data_race: VClock,
/// perform a condvar-signal.
clock: VClock,
}
/// The futex state.
@ -157,7 +141,7 @@ struct Futex {
/// during a non-spurious wake event.
/// Contains the clock of the last thread to
/// perform a futex-wake.
data_race: VClock,
clock: VClock,
}
/// A thread waiting on a futex.
@ -232,7 +216,7 @@ fn get_or_create_id<Id: SyncId>(
fn rwlock_dequeue_and_lock_reader(&mut self, id: RwLockId) -> bool {
let this = self.eval_context_mut();
if let Some(reader) = this.machine.threads.sync.rwlocks[id].reader_queue.pop_front() {
this.unblock_thread(reader);
this.unblock_thread(reader, BlockReason::RwLock(id));
this.rwlock_reader_lock(id, reader);
true
} else {
@ -246,7 +230,7 @@ fn rwlock_dequeue_and_lock_reader(&mut self, id: RwLockId) -> bool {
fn rwlock_dequeue_and_lock_writer(&mut self, id: RwLockId) -> bool {
let this = self.eval_context_mut();
if let Some(writer) = this.machine.threads.sync.rwlocks[id].writer_queue.pop_front() {
this.unblock_thread(writer);
this.unblock_thread(writer, BlockReason::RwLock(id));
this.rwlock_writer_lock(id, writer);
true
} else {
@ -260,7 +244,7 @@ fn rwlock_dequeue_and_lock_writer(&mut self, id: RwLockId) -> bool {
fn mutex_dequeue_and_lock(&mut self, id: MutexId) -> bool {
let this = self.eval_context_mut();
if let Some(thread) = this.machine.threads.sync.mutexes[id].queue.pop_front() {
this.unblock_thread(thread);
this.unblock_thread(thread, BlockReason::Mutex(id));
this.mutex_lock(id, thread);
true
} else {
@ -358,7 +342,7 @@ fn mutex_lock(&mut self, id: MutexId, thread: ThreadId) {
}
mutex.lock_count = mutex.lock_count.checked_add(1).unwrap();
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_acquire(&mutex.data_race, thread);
data_race.acquire_clock(&mutex.clock, thread);
}
}
@ -385,11 +369,7 @@ fn mutex_unlock(&mut self, id: MutexId, expected_owner: ThreadId) -> Option<usiz
// The mutex is completely unlocked. Try transferring ownership
// to another thread.
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(
&mut mutex.data_race,
current_owner,
current_span,
);
mutex.clock.clone_from(&data_race.release_clock(current_owner, current_span));
}
this.mutex_dequeue_and_lock(id);
}
@ -406,7 +386,7 @@ fn mutex_enqueue_and_block(&mut self, id: MutexId, thread: ThreadId) {
let this = self.eval_context_mut();
assert!(this.mutex_is_locked(id), "queing on unlocked mutex");
this.machine.threads.sync.mutexes[id].queue.push_back(thread);
this.block_thread(thread);
this.block_thread(thread, BlockReason::Mutex(id));
}
/// Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None,
@ -460,7 +440,7 @@ fn rwlock_reader_lock(&mut self, id: RwLockId, reader: ThreadId) {
let count = rwlock.readers.entry(reader).or_insert(0);
*count = count.checked_add(1).expect("the reader counter overflowed");
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_acquire(&rwlock.data_race, reader);
data_race.acquire_clock(&rwlock.clock_unlocked, reader);
}
}
@ -486,20 +466,16 @@ fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool {
}
if let Some(data_race) = &this.machine.data_race {
// Add this to the shared-release clock of all concurrent readers.
data_race.validate_lock_release_shared(
&mut rwlock.data_race_reader,
reader,
current_span,
);
rwlock.clock_current_readers.join(&data_race.release_clock(reader, current_span));
}
// The thread was a reader. If the lock is not held any more, give it to a writer.
if this.rwlock_is_locked(id).not() {
// All the readers are finished, so set the writer data-race handle to the value
// of the union of all reader data race handles, since the set of readers
// happen-before the writers
// of the union of all reader data race handles, since the set of readers
// happen-before the writers
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
rwlock.data_race.clone_from(&rwlock.data_race_reader);
rwlock.clock_unlocked.clone_from(&rwlock.clock_current_readers);
this.rwlock_dequeue_and_lock_writer(id);
}
true
@ -511,7 +487,7 @@ fn rwlock_enqueue_and_block_reader(&mut self, id: RwLockId, reader: ThreadId) {
let this = self.eval_context_mut();
assert!(this.rwlock_is_write_locked(id), "read-queueing on not write locked rwlock");
this.machine.threads.sync.rwlocks[id].reader_queue.push_back(reader);
this.block_thread(reader);
this.block_thread(reader, BlockReason::RwLock(id));
}
/// Lock by setting the writer that owns the lock.
@ -523,7 +499,7 @@ fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) {
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
rwlock.writer = Some(writer);
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_acquire(&rwlock.data_race, writer);
data_race.acquire_clock(&rwlock.clock_unlocked, writer);
}
}
@ -542,11 +518,9 @@ fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> b
trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer);
// Release memory to next lock holder.
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(
&mut rwlock.data_race,
current_writer,
current_span,
);
rwlock
.clock_unlocked
.clone_from(&*data_race.release_clock(current_writer, current_span));
}
// The thread was a writer.
//
@ -573,7 +547,7 @@ fn rwlock_enqueue_and_block_writer(&mut self, id: RwLockId, writer: ThreadId) {
let this = self.eval_context_mut();
assert!(this.rwlock_is_locked(id), "write-queueing on unlocked rwlock");
this.machine.threads.sync.rwlocks[id].writer_queue.push_back(writer);
this.block_thread(writer);
this.block_thread(writer, BlockReason::RwLock(id));
}
/// Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None,
@ -605,7 +579,7 @@ fn condvar_is_awaited(&mut self, id: CondvarId) -> bool {
}
/// Mark that the thread is waiting on the conditional variable.
fn condvar_wait(&mut self, id: CondvarId, thread: ThreadId, lock: CondvarLock) {
fn condvar_wait(&mut self, id: CondvarId, thread: ThreadId, lock: MutexId) {
let this = self.eval_context_mut();
let waiters = &mut this.machine.threads.sync.condvars[id].waiters;
assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
@ -614,7 +588,7 @@ fn condvar_wait(&mut self, id: CondvarId, thread: ThreadId, lock: CondvarLock) {
/// Wake up some thread (if there is any) sleeping on the conditional
/// variable.
fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, CondvarLock)> {
fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, MutexId)> {
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let current_span = this.machine.current_span();
@ -623,11 +597,11 @@ fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, CondvarLock)> {
// Each condvar signal happens-before the end of the condvar wake
if let Some(data_race) = data_race {
data_race.validate_lock_release(&mut condvar.data_race, current_thread, current_span);
condvar.clock.clone_from(&*data_race.release_clock(current_thread, current_span));
}
condvar.waiters.pop_front().map(|waiter| {
if let Some(data_race) = data_race {
data_race.validate_lock_acquire(&condvar.data_race, waiter.thread);
data_race.acquire_clock(&condvar.clock, waiter.thread);
}
(waiter.thread, waiter.lock)
})
@ -657,14 +631,14 @@ fn futex_wake(&mut self, addr: u64, bitset: u32) -> Option<ThreadId> {
// Each futex-wake happens-before the end of the futex wait
if let Some(data_race) = data_race {
data_race.validate_lock_release(&mut futex.data_race, current_thread, current_span);
futex.clock.clone_from(&*data_race.release_clock(current_thread, current_span));
}
// Wake up the first thread in the queue that matches any of the bits in the bitset.
futex.waiters.iter().position(|w| w.bitset & bitset != 0).map(|i| {
let waiter = futex.waiters.remove(i).unwrap();
if let Some(data_race) = data_race {
data_race.validate_lock_acquire(&futex.data_race, waiter.thread);
data_race.acquire_clock(&futex.clock, waiter.thread);
}
waiter.thread
})

View file

@ -88,18 +88,33 @@ fn from(t: ThreadId) -> Self {
}
}
/// Keeps track of what the thread is blocked on.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum BlockReason {
/// The thread tried to join the specified thread and is blocked until that
/// thread terminates.
Join(ThreadId),
/// Waiting for time to pass.
Sleep,
/// Blocked on a mutex.
Mutex(MutexId),
/// Blocked on a condition variable.
Condvar(CondvarId),
/// Blocked on a reader-writer lock.
RwLock(RwLockId),
/// Blocled on a Futex variable.
Futex { addr: u64 },
/// Blocked on an InitOnce.
InitOnce(InitOnceId),
}
/// The state of a thread.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ThreadState {
/// The thread is enabled and can be executed.
Enabled,
/// The thread tried to join the specified thread and is blocked until that
/// thread terminates.
BlockedOnJoin(ThreadId),
/// The thread is blocked on some synchronization primitive. It is the
/// responsibility of the synchronization primitives to track threads that
/// are blocked by them.
BlockedOnSync,
/// The thread is blocked on something.
Blocked(BlockReason),
/// The thread has terminated its execution. We do not delete terminated
/// threads (FIXME: why?).
Terminated,
@ -208,6 +223,12 @@ pub fn top_user_relevant_frame(&self) -> Option<usize> {
// empty stacks.
self.top_user_relevant_frame.or_else(|| self.stack.len().checked_sub(1))
}
pub fn current_span(&self) -> Span {
self.top_user_relevant_frame()
.map(|frame_idx| self.stack[frame_idx].current_span())
.unwrap_or(rustc_span::DUMMY_SP)
}
}
impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> {
@ -296,17 +317,17 @@ fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
/// A specific moment in time.
#[derive(Debug)]
pub enum Time {
pub enum CallbackTime {
Monotonic(Instant),
RealTime(SystemTime),
}
impl Time {
impl CallbackTime {
/// How long do we have to wait from now until the specified time?
fn get_wait_time(&self, clock: &Clock) -> Duration {
match self {
Time::Monotonic(instant) => instant.duration_since(clock.now()),
Time::RealTime(time) =>
CallbackTime::Monotonic(instant) => instant.duration_since(clock.now()),
CallbackTime::RealTime(time) =>
time.duration_since(SystemTime::now()).unwrap_or(Duration::new(0, 0)),
}
}
@ -318,7 +339,7 @@ fn get_wait_time(&self, clock: &Clock) -> Duration {
/// conditional variable, the signal handler deletes the callback.
struct TimeoutCallbackInfo<'mir, 'tcx> {
/// The callback should be called no earlier than this time.
call_time: Time,
call_time: CallbackTime,
/// The called function.
callback: TimeoutCallback<'mir, 'tcx>,
}
@ -430,11 +451,10 @@ fn active_thread_stack_mut(
) -> &mut Vec<Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>> {
&mut self.threads[self.active_thread].stack
}
pub fn all_stacks(
&self,
) -> impl Iterator<Item = &[Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>]> {
self.threads.iter().map(|t| &t.stack[..])
) -> impl Iterator<Item = (ThreadId, &[Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>])> {
self.threads.iter_enumerated().map(|(id, t)| (id, &t.stack[..]))
}
/// Create a new thread and returns its id.
@ -539,7 +559,8 @@ fn join_thread(
self.threads[joined_thread_id].join_status = ThreadJoinStatus::Joined;
if self.threads[joined_thread_id].state != ThreadState::Terminated {
// The joined thread is still running, we need to wait for it.
self.active_thread_mut().state = ThreadState::BlockedOnJoin(joined_thread_id);
self.active_thread_mut().state =
ThreadState::Blocked(BlockReason::Join(joined_thread_id));
trace!(
"{:?} blocked on {:?} when trying to join",
self.active_thread,
@ -569,10 +590,11 @@ fn join_thread_exclusive(
throw_ub_format!("trying to join itself");
}
// Sanity check `join_status`.
assert!(
self.threads
.iter()
.all(|thread| thread.state != ThreadState::BlockedOnJoin(joined_thread_id)),
self.threads.iter().all(|thread| {
thread.state != ThreadState::Blocked(BlockReason::Join(joined_thread_id))
}),
"this thread already has threads waiting for its termination"
);
@ -594,16 +616,17 @@ pub fn get_thread_display_name(&self, thread: ThreadId) -> String {
}
/// Put the thread into the blocked state.
fn block_thread(&mut self, thread: ThreadId) {
fn block_thread(&mut self, thread: ThreadId, reason: BlockReason) {
let state = &mut self.threads[thread].state;
assert_eq!(*state, ThreadState::Enabled);
*state = ThreadState::BlockedOnSync;
*state = ThreadState::Blocked(reason);
}
/// Put the blocked thread into the enabled state.
fn unblock_thread(&mut self, thread: ThreadId) {
/// Sanity-checks that the thread previously was blocked for the right reason.
fn unblock_thread(&mut self, thread: ThreadId, reason: BlockReason) {
let state = &mut self.threads[thread].state;
assert_eq!(*state, ThreadState::BlockedOnSync);
assert_eq!(*state, ThreadState::Blocked(reason));
*state = ThreadState::Enabled;
}
@ -622,7 +645,7 @@ fn yield_active_thread(&mut self) {
fn register_timeout_callback(
&mut self,
thread: ThreadId,
call_time: Time,
call_time: CallbackTime,
callback: TimeoutCallback<'mir, 'tcx>,
) {
self.timeout_callbacks
@ -683,7 +706,7 @@ fn thread_terminated(
// Check if we need to unblock any threads.
let mut joined_threads = vec![]; // store which threads joined, we'll need it
for (i, thread) in self.threads.iter_enumerated_mut() {
if thread.state == ThreadState::BlockedOnJoin(self.active_thread) {
if thread.state == ThreadState::Blocked(BlockReason::Join(self.active_thread)) {
// The thread has terminated, mark happens-before edge to joining thread
if data_race.is_some() {
joined_threads.push(i);
@ -999,13 +1022,13 @@ fn get_thread_name<'c>(&'c self, thread: ThreadId) -> Option<&[u8]>
}
#[inline]
fn block_thread(&mut self, thread: ThreadId) {
self.eval_context_mut().machine.threads.block_thread(thread);
fn block_thread(&mut self, thread: ThreadId, reason: BlockReason) {
self.eval_context_mut().machine.threads.block_thread(thread, reason);
}
#[inline]
fn unblock_thread(&mut self, thread: ThreadId) {
self.eval_context_mut().machine.threads.unblock_thread(thread);
fn unblock_thread(&mut self, thread: ThreadId, reason: BlockReason) {
self.eval_context_mut().machine.threads.unblock_thread(thread, reason);
}
#[inline]
@ -1027,11 +1050,11 @@ fn maybe_preempt_active_thread(&mut self) {
fn register_timeout_callback(
&mut self,
thread: ThreadId,
call_time: Time,
call_time: CallbackTime,
callback: TimeoutCallback<'mir, 'tcx>,
) {
let this = self.eval_context_mut();
if !this.machine.communicate() && matches!(call_time, Time::RealTime(..)) {
if !this.machine.communicate() && matches!(call_time, CallbackTime::RealTime(..)) {
panic!("cannot have `RealTime` callback with isolation enabled!")
}
this.machine.threads.register_timeout_callback(thread, call_time, callback);

View file

@ -4,7 +4,7 @@
use std::{
cmp::Ordering,
fmt::Debug,
ops::{Index, IndexMut, Shr},
ops::{Index, Shr},
};
use super::data_race::NaReadType;
@ -92,7 +92,7 @@ pub fn read_type(&self) -> NaReadType {
}
#[inline]
pub fn set_read_type(&mut self, read_type: NaReadType) {
pub(super) fn set_read_type(&mut self, read_type: NaReadType) {
self.time_and_read_type = Self::encode_time_and_read_type(self.time(), read_type);
}
@ -138,7 +138,7 @@ fn cmp(&self, other: &Self) -> Ordering {
impl VClock {
/// Create a new vector-clock containing all zeros except
/// for a value at the given index
pub fn new_with_index(index: VectorIdx, timestamp: VTimestamp) -> VClock {
pub(super) fn new_with_index(index: VectorIdx, timestamp: VTimestamp) -> VClock {
let len = index.index() + 1;
let mut vec = smallvec::smallvec![VTimestamp::ZERO; len];
vec[index.index()] = timestamp;
@ -147,12 +147,18 @@ pub fn new_with_index(index: VectorIdx, timestamp: VTimestamp) -> VClock {
/// Load the internal timestamp slice in the vector clock
#[inline]
pub fn as_slice(&self) -> &[VTimestamp] {
pub(super) fn as_slice(&self) -> &[VTimestamp] {
debug_assert!(!self.0.last().is_some_and(|t| t.time() == 0));
self.0.as_slice()
}
#[inline]
pub(super) fn index_mut(&mut self, index: VectorIdx) -> &mut VTimestamp {
self.0.as_mut_slice().get_mut(index.to_u32() as usize).unwrap()
}
/// Get a mutable slice to the internal vector with minimum `min_len`
/// elements, to preserve invariants this vector must modify
/// elements. To preserve invariants, the caller must modify
/// the `min_len`-1 nth element to a non-zero value
#[inline]
fn get_mut_with_min_len(&mut self, min_len: usize) -> &mut [VTimestamp] {
@ -166,7 +172,7 @@ fn get_mut_with_min_len(&mut self, min_len: usize) -> &mut [VTimestamp] {
/// Increment the vector clock at a known index
/// this will panic if the vector index overflows
#[inline]
pub fn increment_index(&mut self, idx: VectorIdx, current_span: Span) {
pub(super) fn increment_index(&mut self, idx: VectorIdx, current_span: Span) {
let idx = idx.index();
let mut_slice = self.get_mut_with_min_len(idx + 1);
let idx_ref = &mut mut_slice[idx];
@ -190,28 +196,36 @@ pub fn join(&mut self, other: &Self) {
}
}
/// Set the element at the current index of the vector
pub fn set_at_index(&mut self, other: &Self, idx: VectorIdx) {
/// Set the element at the current index of the vector. May only increase elements.
pub(super) fn set_at_index(&mut self, other: &Self, idx: VectorIdx) {
let new_timestamp = other[idx];
// Setting to 0 is different, since the last element cannot be 0.
if new_timestamp.time() == 0 {
if idx.index() >= self.0.len() {
// This index does not even exist yet in our clock. Just do nothing.
return;
}
// This changes an existing element. Since it can only increase, that
// can never make the last element 0.
}
let mut_slice = self.get_mut_with_min_len(idx.index() + 1);
let mut_timestamp = &mut mut_slice[idx.index()];
let prev_span = mut_slice[idx.index()].span;
let prev_span = mut_timestamp.span;
mut_slice[idx.index()] = other[idx];
assert!(*mut_timestamp <= new_timestamp, "set_at_index: may only increase the timestamp");
*mut_timestamp = new_timestamp;
let span = &mut mut_slice[idx.index()].span;
let span = &mut mut_timestamp.span;
*span = span.substitute_dummy(prev_span);
}
/// Set the vector to the all-zero vector
#[inline]
pub fn set_zero_vector(&mut self) {
pub(super) fn set_zero_vector(&mut self) {
self.0.clear();
}
/// Return if this vector is the all-zero vector
pub fn is_zero_vector(&self) -> bool {
self.0.is_empty()
}
}
impl Clone for VClock {
@ -407,13 +421,6 @@ fn index(&self, index: VectorIdx) -> &VTimestamp {
}
}
impl IndexMut<VectorIdx> for VClock {
#[inline]
fn index_mut(&mut self, index: VectorIdx) -> &mut VTimestamp {
self.0.as_mut_slice().get_mut(index.to_u32() as usize).unwrap()
}
}
/// Test vector clock ordering operations
/// data-race detection is tested in the external
/// test suite
@ -553,4 +560,15 @@ fn assert_order(l: &[u32], r: &[u32], o: Option<Ordering>) {
"Invalid alt (>=):\n l: {l:?}\n r: {r:?}"
);
}
#[test]
fn set_index_to_0() {
let mut clock1 = from_slice(&[0, 1, 2, 3]);
let clock2 = from_slice(&[0, 2, 3, 4, 0, 5]);
// Naively, this would extend clock1 with a new index and set it to 0, making
// the last index 0. Make sure that does not happen.
clock1.set_at_index(&clock2, VectorIdx(4));
// This must not have made the last element 0.
assert!(clock1.0.last().unwrap().time() != 0);
}
}

View file

@ -291,7 +291,7 @@ pub fn report_error<'tcx, 'mir>(
ValidationErrorKind::PointerAsInt { .. } | ValidationErrorKind::PartialPointer
) =>
{
ecx.handle_ice(); // print interpreter backtrace
ecx.handle_ice(); // print interpreter backtrace (this is outside the eval `catch_unwind`)
bug!(
"This validation error should be impossible in Miri: {}",
format_interp_error(ecx.tcx.dcx(), e)
@ -308,7 +308,7 @@ pub fn report_error<'tcx, 'mir>(
InvalidProgramInfo::AlreadyReported(_) | InvalidProgramInfo::Layout(..),
) => "post-monomorphization error",
_ => {
ecx.handle_ice(); // print interpreter backtrace
ecx.handle_ice(); // print interpreter backtrace (this is outside the eval `catch_unwind`)
bug!(
"This error should be impossible in Miri: {}",
format_interp_error(ecx.tcx.dcx(), e)
@ -361,9 +361,12 @@ pub fn report_error<'tcx, 'mir>(
};
let stacktrace = ecx.generate_stacktrace();
let (stacktrace, was_pruned) = prune_stacktrace(stacktrace, &ecx.machine);
let (stacktrace, mut any_pruned) = prune_stacktrace(stacktrace, &ecx.machine);
// We want to dump the allocation if this is `InvalidUninitBytes`. Since `format_error` consumes `e`, we compute the outut early.
let mut show_all_threads = false;
// We want to dump the allocation if this is `InvalidUninitBytes`.
// Since `format_interp_error` consumes `e`, we compute the outut early.
let mut extra = String::new();
match e.kind() {
UndefinedBehavior(InvalidUninitBytes(Some((alloc_id, access)))) => {
@ -375,6 +378,15 @@ pub fn report_error<'tcx, 'mir>(
.unwrap();
writeln!(extra, "{:?}", ecx.dump_alloc(*alloc_id)).unwrap();
}
MachineStop(info) => {
let info = info.downcast_ref::<TerminationInfo>().expect("invalid MachineStop payload");
match info {
TerminationInfo::Deadlock => {
show_all_threads = true;
}
_ => {}
}
}
_ => {}
}
@ -387,18 +399,39 @@ pub fn report_error<'tcx, 'mir>(
vec![],
helps,
&stacktrace,
Some(ecx.get_active_thread()),
&ecx.machine,
);
eprint!("{extra}"); // newlines are already in the string
if show_all_threads {
for (thread, stack) in ecx.machine.threads.all_stacks() {
if thread != ecx.get_active_thread() {
let stacktrace = Frame::generate_stacktrace_from_stack(stack);
let (stacktrace, was_pruned) = prune_stacktrace(stacktrace, &ecx.machine);
any_pruned |= was_pruned;
report_msg(
DiagLevel::Error,
format!("deadlock: the evaluated program deadlocked"),
vec![format!("the evaluated program deadlocked")],
vec![],
vec![],
&stacktrace,
Some(thread),
&ecx.machine,
)
}
}
}
// Include a note like `std` does when we omit frames from a backtrace
if was_pruned {
if any_pruned {
ecx.tcx.dcx().note(
"some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace",
);
}
eprint!("{extra}"); // newlines are already in the string
// Debug-dump all locals.
for (i, frame) in ecx.active_thread_stack().iter().enumerate() {
trace!("-------------------");
@ -435,6 +468,7 @@ pub fn report_leaks<'mir, 'tcx>(
vec![],
vec![],
&backtrace,
None, // we don't know the thread this is from
&ecx.machine,
);
}
@ -457,6 +491,7 @@ pub fn report_msg<'tcx>(
notes: Vec<(Option<SpanData>, String)>,
helps: Vec<(Option<SpanData>, String)>,
stacktrace: &[FrameInfo<'tcx>],
thread: Option<ThreadId>,
machine: &MiriMachine<'_, 'tcx>,
) {
let span = stacktrace.first().map_or(DUMMY_SP, |fi| fi.span);
@ -506,12 +541,13 @@ pub fn report_msg<'tcx>(
if extra_span {
write!(backtrace_title, " (of the first span)").unwrap();
}
let thread_name =
machine.threads.get_thread_display_name(machine.threads.get_active_thread_id());
if thread_name != "main" {
// Only print thread name if it is not `main`.
write!(backtrace_title, " on thread `{thread_name}`").unwrap();
};
if let Some(thread) = thread {
let thread_name = machine.threads.get_thread_display_name(thread);
if thread_name != "main" {
// Only print thread name if it is not `main`.
write!(backtrace_title, " on thread `{thread_name}`").unwrap();
};
}
write!(backtrace_title, ":").unwrap();
err.note(backtrace_title);
for (idx, frame_info) in stacktrace.iter().enumerate() {
@ -628,7 +664,16 @@ pub fn emit_diagnostic(&self, e: NonHaltingDiagnostic) {
_ => vec![],
};
report_msg(diag_level, title, vec![msg], notes, helps, &stacktrace, self);
report_msg(
diag_level,
title,
vec![msg],
notes,
helps,
&stacktrace,
Some(self.threads.get_active_thread_id()),
self,
);
}
}
@ -654,6 +699,7 @@ fn handle_ice(&self) {
vec![],
vec![],
&stacktrace,
Some(this.get_active_thread()),
&this.machine,
);
}

View file

@ -150,6 +150,10 @@ pub struct MiriConfig {
pub page_size: Option<u64>,
/// Whether to collect a backtrace when each allocation is created, just in case it leaks.
pub collect_leak_backtraces: bool,
/// Probability for address reuse.
pub address_reuse_rate: f64,
/// Probability for address reuse across threads.
pub address_reuse_cross_thread_rate: f64,
}
impl Default for MiriConfig {
@ -186,6 +190,8 @@ fn default() -> MiriConfig {
num_cpus: 1,
page_size: None,
collect_leak_backtraces: true,
address_reuse_rate: 0.5,
address_reuse_cross_thread_rate: 0.1,
}
}
}

View file

@ -912,10 +912,25 @@ fn read_timespec(
})
}
/// Read bytes from a byte slice.
fn read_byte_slice<'a>(
&'a self,
slice: &ImmTy<'tcx, Provenance>,
) -> InterpResult<'tcx, &'a [u8]>
where
'mir: 'a,
{
let this = self.eval_context_ref();
let (ptr, len) = slice.to_scalar_pair();
let ptr = ptr.to_pointer(this)?;
let len = len.to_target_usize(this)?;
let bytes = this.read_bytes_ptr_strip_provenance(ptr, Size::from_bytes(len))?;
Ok(bytes)
}
/// Read a sequence of bytes until the first null terminator.
fn read_c_str<'a>(&'a self, ptr: Pointer<Option<Provenance>>) -> InterpResult<'tcx, &'a [u8]>
where
'tcx: 'a,
'mir: 'a,
{
let this = self.eval_context_ref();
@ -1265,9 +1280,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
/// This function is backed by a cache, and can be assumed to be very fast.
/// It will work even when the stack is empty.
pub fn current_span(&self) -> Span {
self.top_user_relevant_frame()
.map(|frame_idx| self.stack()[frame_idx].current_span())
.unwrap_or(rustc_span::DUMMY_SP)
self.threads.active_thread_ref().current_span()
}
/// Returns the span of the *caller* of the current operation, again
@ -1279,7 +1292,7 @@ pub fn caller_span(&self) -> Span {
// We need to go down at least to the caller (len - 2), or however
// far we have to go to find a frame in a local crate which is also not #[track_caller].
let frame_idx = self.top_user_relevant_frame().unwrap();
let frame_idx = cmp::min(frame_idx, self.stack().len().checked_sub(2).unwrap());
let frame_idx = cmp::min(frame_idx, self.stack().len().saturating_sub(2));
self.stack()[frame_idx].current_span()
}

View file

@ -116,7 +116,9 @@
data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _},
init_once::{EvalContextExt as _, InitOnceId},
sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SyncId},
thread::{EvalContextExt as _, StackEmptyCallback, ThreadId, ThreadManager, Time},
thread::{
BlockReason, CallbackTime, EvalContextExt as _, StackEmptyCallback, ThreadId, ThreadManager,
},
};
pub use crate::diagnostics::{
report_error, EvalContextExt as _, NonHaltingDiagnostic, TerminationInfo,

View file

@ -1282,7 +1282,7 @@ fn before_memory_deallocation(
(alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra),
size: Size,
align: Align,
_kind: MemoryKind,
kind: MemoryKind,
) -> InterpResult<'tcx> {
if machine.tracked_alloc_ids.contains(&alloc_id) {
machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
@ -1303,12 +1303,7 @@ fn before_memory_deallocation(
{
*deallocated_at = Some(machine.current_span());
}
machine.alloc_addresses.get_mut().free_alloc_id(
machine.rng.get_mut(),
alloc_id,
size,
align,
);
machine.free_alloc_id(alloc_id, size, align, kind);
Ok(())
}

View file

@ -0,0 +1,152 @@
use std::iter;
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_target::abi::{Align, Size};
use crate::*;
use shims::foreign_items::EmulateForeignItemResult;
/// Check some basic requirements for this allocation request:
/// non-zero size, power-of-two alignment.
pub(super) fn check_alloc_request<'tcx>(size: u64, align: u64) -> InterpResult<'tcx> {
if size == 0 {
throw_ub_format!("creating allocation with size 0");
}
if !align.is_power_of_two() {
throw_ub_format!("creating allocation with non-power-of-two alignment {}", align);
}
Ok(())
}
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
/// Returns the minimum alignment for the target architecture for allocations of the given size.
fn min_align(&self, size: u64, kind: MiriMemoryKind) -> Align {
let this = self.eval_context_ref();
// List taken from `library/std/src/sys/pal/common/alloc.rs`.
// This list should be kept in sync with the one from libstd.
let min_align = match this.tcx.sess.target.arch.as_ref() {
"x86" | "arm" | "mips" | "mips32r6" | "powerpc" | "powerpc64" | "wasm32" => 8,
"x86_64" | "aarch64" | "mips64" | "mips64r6" | "s390x" | "sparc64" | "loongarch64" =>
16,
arch => bug!("unsupported target architecture for malloc: `{}`", arch),
};
// Windows always aligns, even small allocations.
// Source: <https://support.microsoft.com/en-us/help/286470/how-to-use-pageheap-exe-in-windows-xp-windows-2000-and-windows-server>
// But jemalloc does not, so for the C heap we only align if the allocation is sufficiently big.
if kind == MiriMemoryKind::WinHeap || size >= min_align {
return Align::from_bytes(min_align).unwrap();
}
// We have `size < min_align`. Round `size` *down* to the next power of two and use that.
fn prev_power_of_two(x: u64) -> u64 {
let next_pow2 = x.next_power_of_two();
if next_pow2 == x {
// x *is* a power of two, just use that.
x
} else {
// x is between two powers, so next = 2*prev.
next_pow2 / 2
}
}
Align::from_bytes(prev_power_of_two(size)).unwrap()
}
/// Emulates calling the internal __rust_* allocator functions
fn emulate_allocator(
&mut self,
default: impl FnOnce(&mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx>,
) -> InterpResult<'tcx, EmulateForeignItemResult> {
let this = self.eval_context_mut();
let Some(allocator_kind) = this.tcx.allocator_kind(()) else {
// in real code, this symbol does not exist without an allocator
return Ok(EmulateForeignItemResult::NotSupported);
};
match allocator_kind {
AllocatorKind::Global => {
// When `#[global_allocator]` is used, `__rust_*` is defined by the macro expansion
// of this attribute. As such we have to call an exported Rust function,
// and not execute any Miri shim. Somewhat unintuitively doing so is done
// by returning `NotSupported`, which triggers the `lookup_exported_symbol`
// fallback case in `emulate_foreign_item`.
return Ok(EmulateForeignItemResult::NotSupported);
}
AllocatorKind::Default => {
default(this)?;
Ok(EmulateForeignItemResult::NeedsJumping)
}
}
}
fn malloc(
&mut self,
size: u64,
zero_init: bool,
kind: MiriMemoryKind,
) -> InterpResult<'tcx, Pointer<Option<Provenance>>> {
let this = self.eval_context_mut();
if size == 0 {
Ok(Pointer::null())
} else {
let align = this.min_align(size, kind);
let ptr = this.allocate_ptr(Size::from_bytes(size), align, kind.into())?;
if zero_init {
// We just allocated this, the access is definitely in-bounds and fits into our address space.
this.write_bytes_ptr(
ptr.into(),
iter::repeat(0u8).take(usize::try_from(size).unwrap()),
)
.unwrap();
}
Ok(ptr.into())
}
}
fn free(
&mut self,
ptr: Pointer<Option<Provenance>>,
kind: MiriMemoryKind,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if !this.ptr_is_null(ptr)? {
this.deallocate_ptr(ptr, None, kind.into())?;
}
Ok(())
}
fn realloc(
&mut self,
old_ptr: Pointer<Option<Provenance>>,
new_size: u64,
kind: MiriMemoryKind,
) -> InterpResult<'tcx, Pointer<Option<Provenance>>> {
let this = self.eval_context_mut();
let new_align = this.min_align(new_size, kind);
if this.ptr_is_null(old_ptr)? {
// Here we must behave like `malloc`.
if new_size == 0 {
Ok(Pointer::null())
} else {
let new_ptr =
this.allocate_ptr(Size::from_bytes(new_size), new_align, kind.into())?;
Ok(new_ptr.into())
}
} else {
if new_size == 0 {
// C, in their infinite wisdom, made this UB.
// <https://www.open-std.org/jtc1/sc22/wg14/www/docs/n2464.pdf>
throw_ub_format!("`realloc` with a size of zero");
} else {
let new_ptr = this.reallocate_ptr(
old_ptr,
None,
Size::from_bytes(new_size),
new_align,
kind.into(),
)?;
Ok(new_ptr.into())
}
}
}
}

View file

@ -32,9 +32,14 @@ fn null_ptr_extern_statics(
/// Sets up the "extern statics" for this machine.
pub fn init_extern_statics(this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
// "__rust_no_alloc_shim_is_unstable"
let val = ImmTy::from_int(0, this.machine.layouts.u8);
let val = ImmTy::from_int(0, this.machine.layouts.u8); // always 0, value does not matter
Self::alloc_extern_static(this, "__rust_no_alloc_shim_is_unstable", val)?;
// "__rust_alloc_error_handler_should_panic"
let val = this.tcx.sess.opts.unstable_opts.oom.should_panic();
let val = ImmTy::from_int(val, this.machine.layouts.u8);
Self::alloc_extern_static(this, "__rust_alloc_error_handler_should_panic", val)?;
match this.tcx.sess.target.os.as_ref() {
"linux" => {
Self::null_ptr_extern_statics(

View file

@ -1,7 +1,7 @@
use std::{collections::hash_map::Entry, io::Write, iter, path::Path};
use rustc_apfloat::Float;
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_ast::expand::allocator::alloc_error_handler_name;
use rustc_hir::{def::DefKind, def_id::CrateNum};
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir;
@ -12,6 +12,7 @@
spec::abi::Abi,
};
use super::alloc::{check_alloc_request, EvalContextExt as _};
use super::backtrace::EvalContextExt as _;
use crate::*;
use helpers::{ToHost, ToSoft};
@ -80,6 +81,20 @@ fn emulate_foreign_item(
panic_impl_instance,
)));
}
"__rust_alloc_error_handler" => {
// Forward to the right symbol that implements this function.
let Some(handler_kind) = this.tcx.alloc_error_handler_kind(()) else {
// in real code, this symbol does not exist without an allocator
throw_unsup_format!(
"`__rust_alloc_error_handler` cannot be called when no alloc error handler is set"
);
};
let name = alloc_error_handler_name(handler_kind);
let handler = this
.lookup_exported_symbol(Symbol::intern(name))?
.expect("missing alloc error handler symbol");
return Ok(Some(handler));
}
#[rustfmt::skip]
| "exit"
| "ExitProcess"
@ -218,151 +233,10 @@ fn lookup_exported_symbol(
Some(instance) => Ok(Some((this.load_mir(instance.def, None)?, instance))),
}
}
fn malloc(
&mut self,
size: u64,
zero_init: bool,
kind: MiriMemoryKind,
) -> InterpResult<'tcx, Pointer<Option<Provenance>>> {
let this = self.eval_context_mut();
if size == 0 {
Ok(Pointer::null())
} else {
let align = this.min_align(size, kind);
let ptr = this.allocate_ptr(Size::from_bytes(size), align, kind.into())?;
if zero_init {
// We just allocated this, the access is definitely in-bounds and fits into our address space.
this.write_bytes_ptr(
ptr.into(),
iter::repeat(0u8).take(usize::try_from(size).unwrap()),
)
.unwrap();
}
Ok(ptr.into())
}
}
fn free(
&mut self,
ptr: Pointer<Option<Provenance>>,
kind: MiriMemoryKind,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if !this.ptr_is_null(ptr)? {
this.deallocate_ptr(ptr, None, kind.into())?;
}
Ok(())
}
fn realloc(
&mut self,
old_ptr: Pointer<Option<Provenance>>,
new_size: u64,
kind: MiriMemoryKind,
) -> InterpResult<'tcx, Pointer<Option<Provenance>>> {
let this = self.eval_context_mut();
let new_align = this.min_align(new_size, kind);
if this.ptr_is_null(old_ptr)? {
if new_size == 0 {
Ok(Pointer::null())
} else {
let new_ptr =
this.allocate_ptr(Size::from_bytes(new_size), new_align, kind.into())?;
Ok(new_ptr.into())
}
} else {
if new_size == 0 {
this.deallocate_ptr(old_ptr, None, kind.into())?;
Ok(Pointer::null())
} else {
let new_ptr = this.reallocate_ptr(
old_ptr,
None,
Size::from_bytes(new_size),
new_align,
kind.into(),
)?;
Ok(new_ptr.into())
}
}
}
}
impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
/// Read bytes from a `(ptr, len)` argument
fn read_byte_slice<'i>(&'i self, bytes: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, &'i [u8]>
where
'mir: 'i,
{
let this = self.eval_context_ref();
let (ptr, len) = this.read_immediate(bytes)?.to_scalar_pair();
let ptr = ptr.to_pointer(this)?;
let len = len.to_target_usize(this)?;
let bytes = this.read_bytes_ptr_strip_provenance(ptr, Size::from_bytes(len))?;
Ok(bytes)
}
/// Returns the minimum alignment for the target architecture for allocations of the given size.
fn min_align(&self, size: u64, kind: MiriMemoryKind) -> Align {
let this = self.eval_context_ref();
// List taken from `library/std/src/sys/pal/common/alloc.rs`.
// This list should be kept in sync with the one from libstd.
let min_align = match this.tcx.sess.target.arch.as_ref() {
"x86" | "arm" | "mips" | "mips32r6" | "powerpc" | "powerpc64" | "wasm32" => 8,
"x86_64" | "aarch64" | "mips64" | "mips64r6" | "s390x" | "sparc64" | "loongarch64" =>
16,
arch => bug!("unsupported target architecture for malloc: `{}`", arch),
};
// Windows always aligns, even small allocations.
// Source: <https://support.microsoft.com/en-us/help/286470/how-to-use-pageheap-exe-in-windows-xp-windows-2000-and-windows-server>
// But jemalloc does not, so for the C heap we only align if the allocation is sufficiently big.
if kind == MiriMemoryKind::WinHeap || size >= min_align {
return Align::from_bytes(min_align).unwrap();
}
// We have `size < min_align`. Round `size` *down* to the next power of two and use that.
fn prev_power_of_two(x: u64) -> u64 {
let next_pow2 = x.next_power_of_two();
if next_pow2 == x {
// x *is* a power of two, just use that.
x
} else {
// x is between two powers, so next = 2*prev.
next_pow2 / 2
}
}
Align::from_bytes(prev_power_of_two(size)).unwrap()
}
/// Emulates calling the internal __rust_* allocator functions
fn emulate_allocator(
&mut self,
default: impl FnOnce(&mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx>,
) -> InterpResult<'tcx, EmulateForeignItemResult> {
let this = self.eval_context_mut();
let Some(allocator_kind) = this.tcx.allocator_kind(()) else {
// in real code, this symbol does not exist without an allocator
return Ok(EmulateForeignItemResult::NotSupported);
};
match allocator_kind {
AllocatorKind::Global => {
// When `#[global_allocator]` is used, `__rust_*` is defined by the macro expansion
// of this attribute. As such we have to call an exported Rust function,
// and not execute any Miri shim. Somewhat unintuitively doing so is done
// by returning `NotSupported`, which triggers the `lookup_exported_symbol`
// fallback case in `emulate_foreign_item`.
return Ok(EmulateForeignItemResult::NotSupported);
}
AllocatorKind::Default => {
default(this)?;
Ok(EmulateForeignItemResult::NeedsJumping)
}
}
}
fn emulate_foreign_item_inner(
&mut self,
link_name: Symbol,
@ -452,7 +326,9 @@ fn emulate_foreign_item_inner(
let [ptr, nth_parent, name] = this.check_shim(abi, Abi::Rust, link_name, args)?;
let ptr = this.read_pointer(ptr)?;
let nth_parent = this.read_scalar(nth_parent)?.to_u8()?;
let name = this.read_byte_slice(name)?;
let name = this.read_immediate(name)?;
let name = this.read_byte_slice(&name)?;
// We must make `name` owned because we need to
// end the shared borrow from `read_byte_slice` before we can
// start the mutable borrow for `give_pointer_debug_name`.
@ -513,7 +389,8 @@ fn emulate_foreign_item_inner(
// README for details.
"miri_write_to_stdout" | "miri_write_to_stderr" => {
let [msg] = this.check_shim(abi, Abi::Rust, link_name, args)?;
let msg = this.read_byte_slice(msg)?;
let msg = this.read_immediate(msg)?;
let msg = this.read_byte_slice(&msg)?;
// Note: we're ignoring errors writing to host stdout/stderr.
let _ignore = match link_name.as_str() {
"miri_write_to_stdout" => std::io::stdout().write_all(msg),
@ -606,7 +483,7 @@ fn emulate_foreign_item_inner(
let size = this.read_target_usize(size)?;
let align = this.read_target_usize(align)?;
Self::check_alloc_request(size, align)?;
check_alloc_request(size, align)?;
let memory_kind = match link_name.as_str() {
"__rust_alloc" => MiriMemoryKind::Rust,
@ -640,7 +517,7 @@ fn emulate_foreign_item_inner(
let size = this.read_target_usize(size)?;
let align = this.read_target_usize(align)?;
Self::check_alloc_request(size, align)?;
check_alloc_request(size, align)?;
let ptr = this.allocate_ptr(
Size::from_bytes(size),
@ -704,7 +581,7 @@ fn emulate_foreign_item_inner(
let new_size = this.read_target_usize(new_size)?;
// No need to check old_size; we anyway check that they match the allocation.
Self::check_alloc_request(new_size, align)?;
check_alloc_request(new_size, align)?;
let align = Align::from_bytes(align).unwrap();
let new_ptr = this.reallocate_ptr(
@ -1096,16 +973,4 @@ fn emulate_foreign_item_inner(
// i.e., if we actually emulated the function with one of the shims.
Ok(EmulateForeignItemResult::NeedsJumping)
}
/// Check some basic requirements for this allocation request:
/// non-zero size, power-of-two alignment.
fn check_alloc_request(size: u64, align: u64) -> InterpResult<'tcx> {
if size == 0 {
throw_ub_format!("creating allocation with size 0");
}
if !align.is_power_of_two() {
throw_ub_format!("creating allocation with non-power-of-two alignment {}", align);
}
Ok(())
}
}

View file

@ -1,5 +1,6 @@
#![warn(clippy::arithmetic_side_effects)]
mod alloc;
mod backtrace;
#[cfg(target_os = "linux")]
pub mod ffi_support;

View file

@ -251,7 +251,6 @@ fn alloc_path_as_wide_str(
this.alloc_os_str_as_wide_str(&os_str, memkind)
}
#[allow(clippy::get_first)]
fn convert_path<'a>(
&self,
os_str: Cow<'a, OsStr>,
@ -260,100 +259,97 @@ fn convert_path<'a>(
let this = self.eval_context_ref();
let target_os = &this.tcx.sess.target.os;
/// Adjust a Windows path to Unix conventions such that it un-does everything that
/// `unix_to_windows` did, and such that if the Windows input path was absolute, then the
/// Unix output path is absolute.
fn windows_to_unix<T>(path: &mut Vec<T>)
where
T: From<u8> + Copy + Eq,
{
let sep = T::from(b'/');
// Make sure all path separators are `/`.
for c in path.iter_mut() {
if *c == b'\\'.into() {
*c = sep;
}
}
// If this starts with `//?/`, it was probably produced by `unix_to_windows`` and we
// remove the `//?` that got added to get the Unix path back out.
if path.get(0..4) == Some(&[sep, sep, b'?'.into(), sep]) {
// Remove first 3 characters. It still starts with `/` so it is absolute on Unix.
path.splice(0..3, std::iter::empty());
}
// If it starts with a drive letter (`X:/`), convert it to an absolute Unix path.
else if path.get(1..3) == Some(&[b':'.into(), sep]) {
// We add a `/` at the beginning, to store the absolute Windows
// path in something that looks like an absolute Unix path.
path.insert(0, sep);
}
}
/// Adjust a Unix path to Windows conventions such that it un-does everything that
/// `windows_to_unix` did, and such that if the Unix input path was absolute, then the
/// Windows output path is absolute.
fn unix_to_windows<T>(path: &mut Vec<T>)
where
T: From<u8> + Copy + Eq,
{
let sep = T::from(b'\\');
// Make sure all path separators are `\`.
for c in path.iter_mut() {
if *c == b'/'.into() {
*c = sep;
}
}
// If the path is `\X:\`, the leading separator was probably added by `windows_to_unix`
// and we should get rid of it again.
if path.get(2..4) == Some(&[b':'.into(), sep]) && path[0] == sep {
// The new path is still absolute on Windows.
path.remove(0);
}
// If this starts withs a `\` but not a `\\`, then this was absolute on Unix but is
// relative on Windows (relative to "the root of the current directory", e.g. the
// drive letter).
else if path.first() == Some(&sep) && path.get(1) != Some(&sep) {
// We add `\\?` so it starts with `\\?\` which is some magic path on Windows
// that *is* considered absolute. This way we store the absolute Unix path
// in something that looks like an absolute Windows path.
path.splice(0..0, [sep, sep, b'?'.into()]);
}
}
// Below we assume that everything non-Windows works like Unix, at least
// when it comes to file system path conventions.
#[cfg(windows)]
return if target_os == "windows" {
// Windows-on-Windows, all fine.
os_str
} else {
// Unix target, Windows host.
let (from, to) = match direction {
PathConversion::HostToTarget => ('\\', '/'),
PathConversion::TargetToHost => ('/', '\\'),
};
let mut converted = os_str
.encode_wide()
.map(|wchar| if wchar == from as u16 { to as u16 } else { wchar })
.collect::<Vec<_>>();
// We also have to ensure that absolute paths remain absolute.
let mut path: Vec<u16> = os_str.encode_wide().collect();
match direction {
PathConversion::HostToTarget => {
// If this is an absolute Windows path that starts with a drive letter (`C:/...`
// after separator conversion), it would not be considered absolute by Unix
// target code.
if converted.get(1).copied() == Some(b':' as u16)
&& converted.get(2).copied() == Some(b'/' as u16)
{
// We add a `/` at the beginning, to store the absolute Windows
// path in something that looks like an absolute Unix path.
converted.insert(0, b'/' as u16);
}
windows_to_unix(&mut path);
}
PathConversion::TargetToHost => {
// If the path is `\C:\`, the leading backslash was probably added by the above code
// and we should get rid of it again.
if converted.get(0).copied() == Some(b'\\' as u16)
&& converted.get(2).copied() == Some(b':' as u16)
&& converted.get(3).copied() == Some(b'\\' as u16)
{
converted.remove(0);
}
unix_to_windows(&mut path);
}
}
Cow::Owned(OsString::from_wide(&converted))
Cow::Owned(OsString::from_wide(&path))
};
#[cfg(unix)]
return if target_os == "windows" {
// Windows target, Unix host.
let (from, to) = match direction {
PathConversion::HostToTarget => (b'/', b'\\'),
PathConversion::TargetToHost => (b'\\', b'/'),
};
let mut converted = os_str
.as_bytes()
.iter()
.map(|&wchar| if wchar == from { to } else { wchar })
.collect::<Vec<_>>();
// We also have to ensure that absolute paths remain absolute.
let mut path: Vec<u8> = os_str.into_owned().into_encoded_bytes();
match direction {
PathConversion::HostToTarget => {
// If the path is `/C:/`, the leading backslash was probably added by the below
// driver letter handling and we should get rid of it again.
if converted.get(0).copied() == Some(b'\\')
&& converted.get(2).copied() == Some(b':')
&& converted.get(3).copied() == Some(b'\\')
{
converted.remove(0);
}
// If this start withs a `\` but not a `\\`, then for Windows this is a relative
// path. But the host path is absolute as it started with `/`. We add `\\?` so
// it starts with `\\?\` which is some magic path on Windows that *is*
// considered absolute.
else if converted.get(0).copied() == Some(b'\\')
&& converted.get(1).copied() != Some(b'\\')
{
converted.splice(0..0, b"\\\\?".iter().copied());
}
unix_to_windows(&mut path);
}
PathConversion::TargetToHost => {
// If this starts with `//?/`, it was probably produced by the above code and we
// remove the `//?` that got added to get the Unix path back out.
if converted.get(0).copied() == Some(b'/')
&& converted.get(1).copied() == Some(b'/')
&& converted.get(2).copied() == Some(b'?')
&& converted.get(3).copied() == Some(b'/')
{
// Remove first 3 characters
converted.splice(0..3, std::iter::empty());
}
// If it starts with a drive letter, convert it to an absolute Unix path.
else if converted.get(1).copied() == Some(b':')
&& converted.get(2).copied() == Some(b'/')
{
converted.insert(0, b'/');
}
windows_to_unix(&mut path);
}
}
Cow::Owned(OsString::from_vec(converted))
Cow::Owned(OsString::from_vec(path))
} else {
// Unix-on-Unix, all is fine.
os_str

View file

@ -236,11 +236,11 @@ fn nanosleep(
.unwrap_or_else(|| now.checked_add(Duration::from_secs(3600)).unwrap());
let active_thread = this.get_active_thread();
this.block_thread(active_thread);
this.block_thread(active_thread, BlockReason::Sleep);
this.register_timeout_callback(
active_thread,
Time::Monotonic(timeout_time),
CallbackTime::Monotonic(timeout_time),
Box::new(UnblockCallback { thread_to_unblock: active_thread }),
);
@ -259,11 +259,11 @@ fn Sleep(&mut self, timeout: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let timeout_time = this.machine.clock.now().checked_add(duration).unwrap();
let active_thread = this.get_active_thread();
this.block_thread(active_thread);
this.block_thread(active_thread, BlockReason::Sleep);
this.register_timeout_callback(
active_thread,
Time::Monotonic(timeout_time),
CallbackTime::Monotonic(timeout_time),
Box::new(UnblockCallback { thread_to_unblock: active_thread }),
);
@ -281,7 +281,7 @@ fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
impl<'mir, 'tcx: 'mir> MachineCallback<'mir, 'tcx> for UnblockCallback {
fn call(&self, ecx: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
ecx.unblock_thread(self.thread_to_unblock);
ecx.unblock_thread(self.thread_to_unblock, BlockReason::Sleep);
Ok(())
}
}

View file

@ -6,6 +6,7 @@
use rustc_target::abi::{Align, Size};
use rustc_target::spec::abi::Abi;
use crate::shims::alloc::EvalContextExt as _;
use crate::shims::unix::*;
use crate::*;
use shims::foreign_items::EmulateForeignItemResult;

View file

@ -196,13 +196,12 @@ struct OpenDir {
read_dir: ReadDir,
/// The most recent entry returned by readdir().
/// Will be freed by the next call.
entry: Pointer<Option<Provenance>>,
entry: Option<Pointer<Option<Provenance>>>,
}
impl OpenDir {
fn new(read_dir: ReadDir) -> Self {
// We rely on `free` being a NOP on null pointers.
Self { read_dir, entry: Pointer::null() }
Self { read_dir, entry: None }
}
}
@ -924,8 +923,12 @@ fn linux_readdir64(
let d_name_offset = dirent64_layout.fields.offset(4 /* d_name */).bytes();
let size = d_name_offset.checked_add(name_len).unwrap();
let entry =
this.malloc(size, /*zero_init:*/ false, MiriMemoryKind::Runtime)?;
let entry = this.allocate_ptr(
Size::from_bytes(size),
dirent64_layout.align.abi,
MiriMemoryKind::Runtime.into(),
)?;
let entry: Pointer<Option<Provenance>> = entry.into();
// If the host is a Unix system, fill in the inode number with its real value.
// If not, use 0 as a fallback value.
@ -949,23 +952,25 @@ fn linux_readdir64(
let name_ptr = entry.offset(Size::from_bytes(d_name_offset), this)?;
this.write_bytes_ptr(name_ptr, name_bytes.iter().copied())?;
entry
Some(entry)
}
None => {
// end of stream: return NULL
Pointer::null()
None
}
Some(Err(e)) => {
this.set_last_error_from_io_error(e.kind())?;
Pointer::null()
None
}
};
let open_dir = this.machine.dirs.streams.get_mut(&dirp).unwrap();
let old_entry = std::mem::replace(&mut open_dir.entry, entry);
this.free(old_entry, MiriMemoryKind::Runtime)?;
if let Some(old_entry) = old_entry {
this.deallocate_ptr(old_entry, None, MiriMemoryKind::Runtime.into())?;
}
Ok(Scalar::from_maybe_pointer(entry, this))
Ok(Scalar::from_maybe_pointer(entry.unwrap_or_else(Pointer::null), this))
}
fn macos_fbsd_readdir_r(
@ -1106,7 +1111,9 @@ fn closedir(&mut self, dirp_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i
}
if let Some(open_dir) = this.machine.dirs.streams.remove(&dirp) {
this.free(open_dir.entry, MiriMemoryKind::Runtime)?;
if let Some(entry) = open_dir.entry {
this.deallocate_ptr(entry, None, MiriMemoryKind::Runtime.into())?;
}
drop(open_dir);
Ok(0)
} else {

View file

@ -107,16 +107,22 @@ pub fn futex<'tcx>(
Some(if wait_bitset {
// FUTEX_WAIT_BITSET uses an absolute timestamp.
if realtime {
Time::RealTime(SystemTime::UNIX_EPOCH.checked_add(duration).unwrap())
CallbackTime::RealTime(
SystemTime::UNIX_EPOCH.checked_add(duration).unwrap(),
)
} else {
Time::Monotonic(this.machine.clock.anchor().checked_add(duration).unwrap())
CallbackTime::Monotonic(
this.machine.clock.anchor().checked_add(duration).unwrap(),
)
}
} else {
// FUTEX_WAIT uses a relative timestamp.
if realtime {
Time::RealTime(SystemTime::now().checked_add(duration).unwrap())
CallbackTime::RealTime(SystemTime::now().checked_add(duration).unwrap())
} else {
Time::Monotonic(this.machine.clock.now().checked_add(duration).unwrap())
CallbackTime::Monotonic(
this.machine.clock.now().checked_add(duration).unwrap(),
)
}
})
};
@ -169,7 +175,7 @@ pub fn futex<'tcx>(
let futex_val = this.read_scalar_atomic(&addr, AtomicReadOrd::Relaxed)?.to_i32()?;
if val == futex_val {
// The value still matches, so we block the thread make it wait for FUTEX_WAKE.
this.block_thread(thread);
this.block_thread(thread, BlockReason::Futex { addr: addr_usize });
this.futex_wait(addr_usize, thread, bitset);
// Succesfully waking up from FUTEX_WAIT always returns zero.
this.write_scalar(Scalar::from_target_isize(0, this), dest)?;
@ -191,7 +197,10 @@ fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
impl<'mir, 'tcx: 'mir> MachineCallback<'mir, 'tcx> for Callback<'tcx> {
fn call(&self, this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
this.unblock_thread(self.thread);
this.unblock_thread(
self.thread,
BlockReason::Futex { addr: self.addr_usize },
);
this.futex_remove_waiter(self.addr_usize, self.thread);
let etimedout = this.eval_libc("ETIMEDOUT");
this.set_last_error(etimedout)?;
@ -249,7 +258,7 @@ fn call(&self, this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
#[allow(clippy::arithmetic_side_effects)]
for _ in 0..val {
if let Some(thread) = this.futex_wake(addr_usize, bitset) {
this.unblock_thread(thread);
this.unblock_thread(thread, BlockReason::Futex { addr: addr_usize });
this.unregister_timeout_callback_if_exists(thread);
n += 1;
} else {

View file

@ -1,6 +1,5 @@
use std::time::SystemTime;
use crate::concurrency::sync::CondvarLock;
use crate::concurrency::thread::MachineCallback;
use crate::*;
@ -225,9 +224,10 @@ fn cond_set_clock_id<'mir, 'tcx: 'mir>(
fn reacquire_cond_mutex<'mir, 'tcx: 'mir>(
ecx: &mut MiriInterpCx<'mir, 'tcx>,
thread: ThreadId,
condvar: CondvarId,
mutex: MutexId,
) -> InterpResult<'tcx> {
ecx.unblock_thread(thread);
ecx.unblock_thread(thread, BlockReason::Condvar(condvar));
if ecx.mutex_is_locked(mutex) {
ecx.mutex_enqueue_and_block(mutex, thread);
} else {
@ -242,9 +242,10 @@ fn reacquire_cond_mutex<'mir, 'tcx: 'mir>(
fn post_cond_signal<'mir, 'tcx: 'mir>(
ecx: &mut MiriInterpCx<'mir, 'tcx>,
thread: ThreadId,
condvar: CondvarId,
mutex: MutexId,
) -> InterpResult<'tcx> {
reacquire_cond_mutex(ecx, thread, mutex)?;
reacquire_cond_mutex(ecx, thread, condvar, mutex)?;
// Waiting for the mutex is not included in the waiting time because we need
// to acquire the mutex always even if we get a timeout.
ecx.unregister_timeout_callback_if_exists(thread);
@ -256,6 +257,7 @@ fn post_cond_signal<'mir, 'tcx: 'mir>(
fn release_cond_mutex_and_block<'mir, 'tcx: 'mir>(
ecx: &mut MiriInterpCx<'mir, 'tcx>,
active_thread: ThreadId,
condvar: CondvarId,
mutex: MutexId,
) -> InterpResult<'tcx> {
if let Some(old_locked_count) = ecx.mutex_unlock(mutex, active_thread) {
@ -265,7 +267,7 @@ fn release_cond_mutex_and_block<'mir, 'tcx: 'mir>(
} else {
throw_ub_format!("awaiting on unlocked or owned by a different thread mutex");
}
ecx.block_thread(active_thread);
ecx.block_thread(active_thread, BlockReason::Condvar(condvar));
Ok(())
}
@ -792,12 +794,8 @@ fn pthread_cond_init(
fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = cond_get_id(this, cond_op)?;
if let Some((thread, lock)) = this.condvar_signal(id) {
if let CondvarLock::Mutex(mutex) = lock {
post_cond_signal(this, thread, mutex)?;
} else {
panic!("condvar should not have an rwlock on unix");
}
if let Some((thread, mutex)) = this.condvar_signal(id) {
post_cond_signal(this, thread, id, mutex)?;
}
Ok(0)
@ -810,12 +808,8 @@ fn pthread_cond_broadcast(
let this = self.eval_context_mut();
let id = cond_get_id(this, cond_op)?;
while let Some((thread, lock)) = this.condvar_signal(id) {
if let CondvarLock::Mutex(mutex) = lock {
post_cond_signal(this, thread, mutex)?;
} else {
panic!("condvar should not have an rwlock on unix");
}
while let Some((thread, mutex)) = this.condvar_signal(id) {
post_cond_signal(this, thread, id, mutex)?;
}
Ok(0)
@ -832,8 +826,8 @@ fn pthread_cond_wait(
let mutex_id = mutex_get_id(this, mutex_op)?;
let active_thread = this.get_active_thread();
release_cond_mutex_and_block(this, active_thread, mutex_id)?;
this.condvar_wait(id, active_thread, CondvarLock::Mutex(mutex_id));
release_cond_mutex_and_block(this, active_thread, id, mutex_id)?;
this.condvar_wait(id, active_thread, mutex_id);
Ok(0)
}
@ -866,15 +860,15 @@ fn pthread_cond_timedwait(
let timeout_time = if clock_id == this.eval_libc_i32("CLOCK_REALTIME") {
this.check_no_isolation("`pthread_cond_timedwait` with `CLOCK_REALTIME`")?;
Time::RealTime(SystemTime::UNIX_EPOCH.checked_add(duration).unwrap())
CallbackTime::RealTime(SystemTime::UNIX_EPOCH.checked_add(duration).unwrap())
} else if clock_id == this.eval_libc_i32("CLOCK_MONOTONIC") {
Time::Monotonic(this.machine.clock.anchor().checked_add(duration).unwrap())
CallbackTime::Monotonic(this.machine.clock.anchor().checked_add(duration).unwrap())
} else {
throw_unsup_format!("unsupported clock id: {}", clock_id);
};
release_cond_mutex_and_block(this, active_thread, mutex_id)?;
this.condvar_wait(id, active_thread, CondvarLock::Mutex(mutex_id));
release_cond_mutex_and_block(this, active_thread, id, mutex_id)?;
this.condvar_wait(id, active_thread, mutex_id);
// We return success for now and override it in the timeout callback.
this.write_scalar(Scalar::from_i32(0), dest)?;
@ -897,7 +891,7 @@ impl<'mir, 'tcx: 'mir> MachineCallback<'mir, 'tcx> for Callback<'tcx> {
fn call(&self, ecx: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
// We are not waiting for the condvar any more, wait for the
// mutex instead.
reacquire_cond_mutex(ecx, self.active_thread, self.mutex_id)?;
reacquire_cond_mutex(ecx, self.active_thread, self.id, self.mutex_id)?;
// Remove the thread from the conditional variable.
ecx.condvar_remove_waiter(self.id, self.active_thread);

View file

@ -8,6 +8,7 @@
use rustc_target::abi::Size;
use rustc_target::spec::abi::Abi;
use crate::shims::alloc::EvalContextExt as _;
use crate::shims::os_str::bytes_to_os_str;
use crate::*;
use shims::foreign_items::EmulateForeignItemResult;

View file

@ -170,7 +170,7 @@ fn WaitOnAddress(
None
} else {
let duration = Duration::from_millis(timeout_ms.into());
Some(Time::Monotonic(this.machine.clock.now().checked_add(duration).unwrap()))
Some(CallbackTime::Monotonic(this.machine.clock.now().checked_add(duration).unwrap()))
};
// See the Linux futex implementation for why this fence exists.
@ -183,7 +183,7 @@ fn WaitOnAddress(
if futex_val == compare_val {
// If the values are the same, we have to block.
this.block_thread(thread);
this.block_thread(thread, BlockReason::Futex { addr });
this.futex_wait(addr, thread, u32::MAX);
if let Some(timeout_time) = timeout_time {
@ -202,7 +202,7 @@ fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
impl<'mir, 'tcx: 'mir> MachineCallback<'mir, 'tcx> for Callback<'tcx> {
fn call(&self, this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
this.unblock_thread(self.thread);
this.unblock_thread(self.thread, BlockReason::Futex { addr: self.addr });
this.futex_remove_waiter(self.addr, self.thread);
let error_timeout = this.eval_windows("c", "ERROR_TIMEOUT");
this.set_last_error(error_timeout)?;
@ -233,8 +233,9 @@ fn WakeByAddressSingle(&mut self, ptr_op: &OpTy<'tcx, Provenance>) -> InterpResu
// See the Linux futex implementation for why this fence exists.
this.atomic_fence(AtomicFenceOrd::SeqCst)?;
if let Some(thread) = this.futex_wake(ptr.addr().bytes(), u32::MAX) {
this.unblock_thread(thread);
let addr = ptr.addr().bytes();
if let Some(thread) = this.futex_wake(addr, u32::MAX) {
this.unblock_thread(thread, BlockReason::Futex { addr });
this.unregister_timeout_callback_if_exists(thread);
}
@ -248,8 +249,9 @@ fn WakeByAddressAll(&mut self, ptr_op: &OpTy<'tcx, Provenance>) -> InterpResult<
// See the Linux futex implementation for why this fence exists.
this.atomic_fence(AtomicFenceOrd::SeqCst)?;
while let Some(thread) = this.futex_wake(ptr.addr().bytes(), u32::MAX) {
this.unblock_thread(thread);
let addr = ptr.addr().bytes();
while let Some(thread) = this.futex_wake(addr, u32::MAX) {
this.unblock_thread(thread, BlockReason::Futex { addr });
this.unregister_timeout_callback_if_exists(thread);
}

View file

@ -3,5 +3,5 @@ running 2 tests
test test::dev_dependency ... ok
test test::exported_symbol ... ok
test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -1,11 +1,11 @@
running 2 tests
..
test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME
imported main
running 6 tests
...i..
test result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out
test result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -1,13 +1,13 @@
running 2 tests
..
test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME
imported main
running 6 tests
...i..
test result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out
test result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out; finished in $TIME
running 5 tests

View file

@ -1,12 +1,12 @@
running 0 tests
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in $TIME
imported main
running 1 test
test simple ... ok
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 5 filtered out
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 5 filtered out; finished in $TIME

View file

@ -1,14 +1,14 @@
running 0 tests
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in $TIME
imported main
running 1 test
test simple ... ok
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 5 filtered out
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 5 filtered out; finished in $TIME
running 0 tests

View file

@ -1,6 +1,6 @@
running 0 tests
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME
subcrate testing

View file

@ -7,5 +7,5 @@ test does_not_work_on_miri ... ignored
test fail_index_check - should panic ... ok
test simple ... ok
test result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out
test result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -1,6 +1,7 @@
//@only-target-windows: Uses win32 api functions
// We are making scheduler assumptions here.
//@compile-flags: -Zmiri-preemption-rate=0
//@error-in-other-file: deadlock
// On windows, joining main is not UB, but it will block a thread forever.

View file

@ -8,7 +8,28 @@ LL | assert_eq!(WaitForSingleObject(MAIN_THREAD, INFINITE), WAIT_OBJ
= note: inside closure at RUSTLIB/core/src/macros/mod.rs:LL:CC
= note: this error originates in the macro `assert_eq` (in Nightly builds, run with -Z macro-backtrace for more info)
error: deadlock: the evaluated program deadlocked
--> RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
|
LL | let rc = unsafe { c::WaitForSingleObject(self.handle.as_raw_handle(), c::INFINITE) };
| ^ the evaluated program deadlocked
|
= note: BACKTRACE:
= note: inside `std::sys::pal::PLATFORM::thread::Thread::join` at RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
= note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
= note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
note: inside `main`
--> $DIR/windows_join_main.rs:LL:CC
|
LL | / thread::spawn(|| {
LL | | unsafe {
LL | | assert_eq!(WaitForSingleObject(MAIN_THREAD, INFINITE), WAIT_OBJECT_0);
LL | | }
LL | | })
LL | | .join()
| |___________^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error
error: aborting due to 2 previous errors

View file

@ -1,6 +1,7 @@
//@only-target-windows: Uses win32 api functions
// We are making scheduler assumptions here.
//@compile-flags: -Zmiri-preemption-rate=0
//@error-in-other-file: deadlock
// On windows, a thread joining itself is not UB, but it will deadlock.

View file

@ -7,7 +7,29 @@ LL | assert_eq!(WaitForSingleObject(native, INFINITE), WAIT_OBJECT_0
= note: BACKTRACE on thread `unnamed-ID`:
= note: inside closure at $DIR/windows_join_self.rs:LL:CC
error: deadlock: the evaluated program deadlocked
--> RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
|
LL | let rc = unsafe { c::WaitForSingleObject(self.handle.as_raw_handle(), c::INFINITE) };
| ^ the evaluated program deadlocked
|
= note: BACKTRACE:
= note: inside `std::sys::pal::PLATFORM::thread::Thread::join` at RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
= note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
= note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
note: inside `main`
--> $DIR/windows_join_self.rs:LL:CC
|
LL | / thread::spawn(|| {
LL | | unsafe {
LL | | let native = GetCurrentThread();
LL | | assert_eq!(WaitForSingleObject(native, INFINITE), WAIT_OBJECT_0);
LL | | }
LL | | })
LL | | .join()
| |___________^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error
error: aborting due to 2 previous errors

View file

@ -0,0 +1,10 @@
//@ignore-target-windows: No libc on Windows
fn main() {
unsafe {
let p1 = libc::malloc(20);
// C made this UB...
let p2 = libc::realloc(p1, 0); //~ERROR: `realloc` with a size of zero
assert!(p2.is_null());
}
}

View file

@ -0,0 +1,15 @@
error: Undefined Behavior: `realloc` with a size of zero
--> $DIR/realloc-zero.rs:LL:CC
|
LL | let p2 = libc::realloc(p1, 0);
| ^^^^^^^^^^^^^^^^^^^^ `realloc` with a size of zero
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE:
= note: inside `main` at $DIR/realloc-zero.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -1,4 +1,5 @@
//@ignore-target-windows: No libc on Windows
//@error-in-other-file: deadlock
use std::cell::UnsafeCell;
use std::sync::Arc;

View file

@ -7,7 +7,26 @@ LL | assert_eq!(libc::pthread_mutex_lock(lock_copy.0.get() as *mut _
= note: BACKTRACE on thread `unnamed-ID`:
= note: inside closure at $DIR/libc_pthread_mutex_deadlock.rs:LL:CC
error: deadlock: the evaluated program deadlocked
--> RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
|
LL | let ret = libc::pthread_join(self.id, ptr::null_mut());
| ^ the evaluated program deadlocked
|
= note: BACKTRACE:
= note: inside `std::sys::pal::PLATFORM::thread::Thread::join` at RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
= note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
= note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
note: inside `main`
--> $DIR/libc_pthread_mutex_deadlock.rs:LL:CC
|
LL | / thread::spawn(move || {
LL | | assert_eq!(libc::pthread_mutex_lock(lock_copy.0.get() as *mut _), 0);
LL | | })
LL | | .join()
| |_______________^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error
error: aborting due to 2 previous errors

View file

@ -1,4 +1,5 @@
//@ignore-target-windows: No libc on Windows
//@error-in-other-file: deadlock
use std::cell::UnsafeCell;
use std::sync::Arc;

View file

@ -7,7 +7,26 @@ LL | assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mu
= note: BACKTRACE on thread `unnamed-ID`:
= note: inside closure at $DIR/libc_pthread_rwlock_write_read_deadlock.rs:LL:CC
error: deadlock: the evaluated program deadlocked
--> RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
|
LL | let ret = libc::pthread_join(self.id, ptr::null_mut());
| ^ the evaluated program deadlocked
|
= note: BACKTRACE:
= note: inside `std::sys::pal::PLATFORM::thread::Thread::join` at RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
= note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
= note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
note: inside `main`
--> $DIR/libc_pthread_rwlock_write_read_deadlock.rs:LL:CC
|
LL | / thread::spawn(move || {
LL | | assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mut _), 0);
LL | | })
LL | | .join()
| |_______________^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error
error: aborting due to 2 previous errors

View file

@ -1,4 +1,5 @@
//@ignore-target-windows: No libc on Windows
//@error-in-other-file: deadlock
use std::cell::UnsafeCell;
use std::sync::Arc;

View file

@ -7,7 +7,26 @@ LL | assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mu
= note: BACKTRACE on thread `unnamed-ID`:
= note: inside closure at $DIR/libc_pthread_rwlock_write_write_deadlock.rs:LL:CC
error: deadlock: the evaluated program deadlocked
--> RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
|
LL | let ret = libc::pthread_join(self.id, ptr::null_mut());
| ^ the evaluated program deadlocked
|
= note: BACKTRACE:
= note: inside `std::sys::pal::PLATFORM::thread::Thread::join` at RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
= note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
= note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
note: inside `main`
--> $DIR/libc_pthread_rwlock_write_write_deadlock.rs:LL:CC
|
LL | / thread::spawn(move || {
LL | | assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mut _), 0);
LL | | })
LL | | .join()
| |_______________^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error
error: aborting due to 2 previous errors

View file

@ -0,0 +1,10 @@
//@error-in-other-file: aborted
//@normalize-stderr-test: "unsafe \{ libc::abort\(\) \}|crate::intrinsics::abort\(\);" -> "ABORT();"
//@normalize-stderr-test: "\| +\^+" -> "| ^"
#![feature(allocator_api)]
use std::alloc::*;
fn main() {
handle_alloc_error(Layout::for_value(&0));
}

View file

@ -0,0 +1,24 @@
memory allocation of 4 bytes failed
error: abnormal termination: the program aborted execution
--> RUSTLIB/std/src/sys/pal/PLATFORM/mod.rs:LL:CC
|
LL | ABORT();
| ^ the program aborted execution
|
= note: BACKTRACE:
= note: inside `std::sys::pal::PLATFORM::abort_internal` at RUSTLIB/std/src/sys/pal/PLATFORM/mod.rs:LL:CC
= note: inside `std::process::abort` at RUSTLIB/std/src/process.rs:LL:CC
= note: inside `std::alloc::rust_oom` at RUSTLIB/std/src/alloc.rs:LL:CC
= note: inside `std::alloc::_::__rg_oom` at RUSTLIB/std/src/alloc.rs:LL:CC
= note: inside `std::alloc::handle_alloc_error::rt_error` at RUSTLIB/alloc/src/alloc.rs:LL:CC
= note: inside `std::alloc::handle_alloc_error` at RUSTLIB/alloc/src/alloc.rs:LL:CC
note: inside `main`
--> $DIR/alloc_error_handler.rs:LL:CC
|
LL | handle_alloc_error(Layout::for_value(&0));
| ^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -0,0 +1,49 @@
//@compile-flags: -Cpanic=abort
#![feature(start, core_intrinsics)]
#![feature(alloc_error_handler)]
#![feature(allocator_api)]
#![no_std]
extern crate alloc;
use alloc::alloc::*;
use core::fmt::Write;
#[path = "../../utils/mod.no_std.rs"]
mod utils;
#[alloc_error_handler]
fn alloc_error_handler(layout: Layout) -> ! {
let _ = writeln!(utils::MiriStderr, "custom alloc error handler: {layout:?}");
core::intrinsics::abort(); //~ERROR: aborted
}
// rustc requires us to provide some more things that aren't actually used by this test
mod plumbing {
use super::*;
#[panic_handler]
fn panic_handler(_: &core::panic::PanicInfo) -> ! {
loop {}
}
struct NoAlloc;
unsafe impl GlobalAlloc for NoAlloc {
unsafe fn alloc(&self, _: Layout) -> *mut u8 {
unreachable!();
}
unsafe fn dealloc(&self, _: *mut u8, _: Layout) {
unreachable!();
}
}
#[global_allocator]
static GLOBAL: NoAlloc = NoAlloc;
}
#[start]
fn start(_: isize, _: *const *const u8) -> isize {
handle_alloc_error(Layout::for_value(&0));
}

View file

@ -0,0 +1,27 @@
custom alloc error handler: Layout { size: 4, align: 4 (1 << 2) }
error: abnormal termination: the program aborted execution
--> $DIR/alloc_error_handler_custom.rs:LL:CC
|
LL | core::intrinsics::abort();
| ^^^^^^^^^^^^^^^^^^^^^^^^^ the program aborted execution
|
= note: BACKTRACE:
= note: inside `alloc_error_handler` at $DIR/alloc_error_handler_custom.rs:LL:CC
note: inside `_::__rg_oom`
--> $DIR/alloc_error_handler_custom.rs:LL:CC
|
LL | #[alloc_error_handler]
| ---------------------- in this procedural macro expansion
LL | fn alloc_error_handler(layout: Layout) -> ! {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: inside `alloc::alloc::handle_alloc_error::rt_error` at RUSTLIB/alloc/src/alloc.rs:LL:CC
= note: inside `alloc::alloc::handle_alloc_error` at RUSTLIB/alloc/src/alloc.rs:LL:CC
note: inside `start`
--> $DIR/alloc_error_handler_custom.rs:LL:CC
|
LL | handle_alloc_error(Layout::for_value(&0));
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: this error originates in the attribute macro `alloc_error_handler` (in Nightly builds, run with -Z macro-backtrace for more info)
error: aborting due to 1 previous error

View file

@ -0,0 +1,47 @@
//@compile-flags: -Cpanic=abort
#![feature(start, core_intrinsics)]
#![feature(alloc_error_handler)]
#![feature(allocator_api)]
#![no_std]
extern crate alloc;
use alloc::alloc::*;
use core::fmt::Write;
#[path = "../../utils/mod.no_std.rs"]
mod utils;
// The default no_std alloc_error_handler is a panic.
#[panic_handler]
fn panic_handler(panic_info: &core::panic::PanicInfo) -> ! {
let _ = writeln!(utils::MiriStderr, "custom panic handler called!");
let _ = writeln!(utils::MiriStderr, "{panic_info}");
core::intrinsics::abort(); //~ERROR: aborted
}
// rustc requires us to provide some more things that aren't actually used by this test
mod plumbing {
use super::*;
struct NoAlloc;
unsafe impl GlobalAlloc for NoAlloc {
unsafe fn alloc(&self, _: Layout) -> *mut u8 {
unreachable!();
}
unsafe fn dealloc(&self, _: *mut u8, _: Layout) {
unreachable!();
}
}
#[global_allocator]
static GLOBAL: NoAlloc = NoAlloc;
}
#[start]
fn start(_: isize, _: *const *const u8) -> isize {
handle_alloc_error(Layout::for_value(&0));
}

View file

@ -0,0 +1,24 @@
custom panic handler called!
panicked at RUSTLIB/alloc/src/alloc.rs:LL:CC:
memory allocation of 4 bytes failed
error: abnormal termination: the program aborted execution
--> $DIR/alloc_error_handler_no_std.rs:LL:CC
|
LL | core::intrinsics::abort();
| ^^^^^^^^^^^^^^^^^^^^^^^^^ the program aborted execution
|
= note: BACKTRACE:
= note: inside `panic_handler` at $DIR/alloc_error_handler_no_std.rs:LL:CC
= note: inside `alloc::alloc::__alloc_error_handler::__rdl_oom` at RUSTLIB/alloc/src/alloc.rs:LL:CC
= note: inside `alloc::alloc::handle_alloc_error::rt_error` at RUSTLIB/alloc/src/alloc.rs:LL:CC
= note: inside `alloc::alloc::handle_alloc_error` at RUSTLIB/alloc/src/alloc.rs:LL:CC
note: inside `start`
--> $DIR/alloc_error_handler_no_std.rs:LL:CC
|
LL | handle_alloc_error(Layout::for_value(&0));
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -1,6 +1,8 @@
//! Make sure that a retag acts like a write for the data race model.
//@revisions: stack tree
//@compile-flags: -Zmiri-preemption-rate=0
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
//@[tree]compile-flags: -Zmiri-tree-borrows
#[derive(Copy, Clone)]
struct SendPtr(*mut u8);

View file

@ -1,4 +1,6 @@
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
#![feature(new_uninit)]
use std::mem::MaybeUninit;

View file

@ -1,4 +1,6 @@
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
#![feature(new_uninit)]
use std::ptr::null_mut;

View file

@ -1,5 +1,7 @@
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread::spawn;

View file

@ -1,5 +1,7 @@
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;

View file

@ -1,5 +1,7 @@
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;

View file

@ -1,5 +1,7 @@
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread::spawn;

View file

@ -1,5 +1,7 @@
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread::spawn;

View file

@ -1,5 +1,7 @@
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;

View file

@ -1,5 +1,7 @@
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::mem;
use std::thread::{sleep, spawn};

View file

@ -1,5 +1,7 @@
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::mem;
use std::thread::{sleep, spawn};

View file

@ -1,5 +1,7 @@
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::thread::spawn;

View file

@ -1,5 +1,7 @@
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::thread::spawn;

View file

@ -1,4 +1,6 @@
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::ptr::null_mut;
use std::sync::atomic::{AtomicPtr, Ordering};

View file

@ -1,5 +1,7 @@
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::thread::spawn;

View file

@ -1,5 +1,7 @@
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::thread::spawn;

View file

@ -1,4 +1,6 @@
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::ptr::null_mut;
use std::sync::atomic::{AtomicPtr, Ordering};

View file

@ -1,5 +1,7 @@
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::thread::spawn;

View file

@ -1,5 +1,8 @@
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::{fence, AtomicUsize, Ordering};
use std::sync::Arc;
use std::thread;

View file

@ -1,4 +1,7 @@
//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::{AtomicU16, AtomicU8, Ordering};
use std::thread;

View file

@ -1,4 +1,7 @@
//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::{AtomicU16, AtomicU8, Ordering};
use std::thread;

View file

@ -1,4 +1,7 @@
//@compile-flags: -Zmiri-preemption-rate=0.0
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::{AtomicU16, Ordering};
use std::thread;

View file

@ -1,4 +1,7 @@
//@compile-flags: -Zmiri-preemption-rate=0.0
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::{AtomicU16, Ordering};
use std::thread;

View file

@ -1,5 +1,7 @@
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::thread::spawn;

View file

@ -1,4 +1,6 @@
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::ptr::null_mut;
use std::sync::atomic::{AtomicPtr, Ordering};

View file

@ -1,4 +1,6 @@
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread::spawn;

View file

@ -1,4 +1,6 @@
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread::{sleep, spawn};

View file

@ -1,4 +1,6 @@
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread::spawn;

View file

@ -1,4 +1,6 @@
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread::spawn;

View file

@ -1,4 +1,7 @@
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::thread;
#[derive(Copy, Clone)]

View file

@ -1,5 +1,7 @@
// We want to control preemption here.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::thread::spawn;

View file

@ -1,4 +1,6 @@
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
// Avoid accidental synchronization via address reuse inside `thread::spawn`.
//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
use std::ptr::null_mut;
use std::sync::atomic::{AtomicPtr, Ordering};

View file

@ -1,31 +1,11 @@
//@compile-flags: -Cpanic=abort
#![feature(start, core_intrinsics)]
#![no_std]
//@compile-flags: -Cpanic=abort
// windows tls dtors go through libstd right now, thus this test
// cannot pass. When windows tls dtors go through the special magic
// windows linker section, we can run this test on windows again.
//@ignore-target-windows: no-std not supported on Windows
// Plumbing to let us use `writeln!` to host stderr:
extern "Rust" {
fn miri_write_to_stderr(bytes: &[u8]);
}
struct HostErr;
use core::fmt::Write;
impl Write for HostErr {
fn write_str(&mut self, s: &str) -> core::fmt::Result {
unsafe {
miri_write_to_stderr(s.as_bytes());
}
Ok(())
}
}
// Aaaand the test:
#[path = "../../utils/mod.no_std.rs"]
mod utils;
#[start]
fn start(_: isize, _: *const *const u8) -> isize {
@ -34,6 +14,6 @@ fn start(_: isize, _: *const *const u8) -> isize {
#[panic_handler]
fn panic_handler(panic_info: &core::panic::PanicInfo) -> ! {
writeln!(HostErr, "{panic_info}").ok();
writeln!(utils::MiriStderr, "{panic_info}").ok();
core::intrinsics::abort(); //~ ERROR: the program aborted execution
}

View file

@ -1,4 +1,5 @@
//@compile-flags: -Zmiri-preemption-rate=0
// Avoid accidental synchronization via address reuse.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0
use std::thread;
#[derive(Copy, Clone)]

View file

@ -1,5 +1,6 @@
//! Make sure that a retag acts like a read for the data race model.
//@compile-flags: -Zmiri-preemption-rate=0
// Avoid accidental synchronization via address reuse.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0
#[derive(Copy, Clone)]
struct SendPtr(*mut u8);

View file

@ -15,13 +15,13 @@ note: inside `main`
LL | drop(slice1.cmp(slice2));
| ^^^^^^^^^^^^^^^^^^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
Uninitialized memory occurred at ALLOC[0x4..0x10], in this allocation:
ALLOC (Rust heap, size: 32, align: 8) {
0x00 │ 41 42 43 44 __ __ __ __ __ __ __ __ __ __ __ __ │ ABCD░░░░░░░░░░░░
0x10 │ 00 __ __ __ __ __ __ __ __ __ __ __ __ __ __ __ │ .░░░░░░░░░░░░░░░
}
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -15,8 +15,6 @@ note: inside `main`
LL | drop(slice1.cmp(slice2));
| ^^^^^^^^^^^^^^^^^^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
Uninitialized memory occurred at ALLOC[0x4..0x8], in this allocation:
ALLOC (Rust heap, size: 16, align: 8) {
╾42[ALLOC]<TAG> (1 ptr byte)╼ 12 13 ╾43[ALLOC]<TAG> (1 ptr byte)╼ __ __ __ __ __ __ __ __ __ __ __ __ │ ━..━░░░░░░░░░░░░
@ -28,5 +26,7 @@ ALLOC (global (static or const), size: 1, align: 1) {
00 │ .
}
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -1,5 +1,6 @@
// We want to control preemption here.
//@compile-flags: -Zmiri-preemption-rate=0
// Avoid accidental synchronization via address reuse.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0
#![feature(core_intrinsics)]

View file

@ -1,5 +1,6 @@
// We want to control preemption here.
//@compile-flags: -Zmiri-preemption-rate=0
// Avoid accidental synchronization via address reuse.
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0
use std::sync::atomic::Ordering::*;
use std::sync::atomic::{AtomicU16, AtomicU32};

View file

@ -0,0 +1,20 @@
#![feature(allocator_api, alloc_error_hook)]
use std::alloc::*;
struct Bomb;
impl Drop for Bomb {
fn drop(&mut self) {
eprintln!("yes we are unwinding!");
}
}
#[allow(unreachable_code, unused_variables)]
fn main() {
// This is a particularly tricky hook, since it unwinds, which the default one does not.
set_alloc_error_hook(|_layout| panic!("alloc error hook called"));
let bomb = Bomb;
handle_alloc_error(Layout::for_value(&0));
std::mem::forget(bomb); // defuse unwinding bomb
}

View file

@ -0,0 +1,5 @@
thread 'main' panicked at $DIR/alloc_error_handler_hook.rs:LL:CC:
alloc error hook called
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
note: in Miri, you may have to set `-Zmiri-env-forward=RUST_BACKTRACE` for the environment variable to have an effect
yes we are unwinding!

View file

@ -0,0 +1,18 @@
//@compile-flags: -Zoom=panic
#![feature(allocator_api)]
use std::alloc::*;
struct Bomb;
impl Drop for Bomb {
fn drop(&mut self) {
eprintln!("yes we are unwinding!");
}
}
#[allow(unreachable_code, unused_variables)]
fn main() {
let bomb = Bomb;
handle_alloc_error(Layout::for_value(&0));
std::mem::forget(bomb); // defuse unwinding bomb
}

View file

@ -0,0 +1,5 @@
thread 'main' panicked at RUSTLIB/std/src/alloc.rs:LL:CC:
memory allocation of 4 bytes failed
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
note: in Miri, you may have to set `-Zmiri-env-forward=RUST_BACKTRACE` for the environment variable to have an effect
yes we are unwinding!

View file

@ -0,0 +1,26 @@
//! Ensure that the MIR validator runs on Miri's input.
//@rustc-env:RUSTC_ICE=0
//@normalize-stderr-test: "\n +[0-9]+:.+" -> ""
//@normalize-stderr-test: "\n +at .+" -> ""
//@normalize-stderr-test: "\n +\[\.\.\. omitted [0-9]+ frames? \.\.\.\].*" -> ""
//@normalize-stderr-test: "\n[ =]*note:.*" -> ""
//@normalize-stderr-test: "DefId\([^()]*\)" -> "DefId"
// Somehow on rustc Windows CI, the "Miri caused an ICE" message is not shown
// and we don't even get a regular panic; rustc aborts with a different exit code instead.
//@ignore-host-windows
#![feature(custom_mir, core_intrinsics)]
use core::intrinsics::mir::*;
#[custom_mir(dialect = "runtime", phase = "optimized")]
pub fn main() {
mir! {
let x: i32;
let tuple: (*mut i32,);
{
tuple.0 = core::ptr::addr_of_mut!(x);
// Deref at the wrong place!
*(tuple.0) = 1;
Return()
}
}
}

View file

@ -0,0 +1,21 @@
thread 'rustc' panicked at compiler/rustc_const_eval/src/transform/validate.rs:LL:CC:
broken MIR in Item(DefId) (after phase change to runtime-optimized) at bb0[1]:
(*(_2.0: *mut i32)), has deref at the wrong place
stack backtrace:
error: the compiler unexpectedly panicked. this is a bug.
query stack during panic:
#0 [optimized_mir] optimizing MIR for `main`
end of query stack
Miri caused an ICE during evaluation. Here's the interpreter backtrace at the time of the panic:
--> RUSTLIB/core/src/ops/function.rs:LL:CC
|
LL | extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|

Some files were not shown because too many files have changed in this diff Show more