Auto merge of #103392 - RalfJung:miri, r=oli-obk

update Miri

I had to use a hacked version of josh to create this, so let's be careful with merging this and maybe wait a bit to see if the josh issue becomes more clear. But the history looks good to me, we are not adding duplicates of rustc commits that were previously mirrored to Miri.

Also I want to add some cross-testing of Miri in x.py.
This commit is contained in:
bors 2022-10-25 12:33:39 +00:00
commit 85d089b41e
67 changed files with 1522 additions and 470 deletions

View file

@ -2262,6 +2262,7 @@ dependencies = [
"rand 0.8.5",
"regex",
"rustc-workspace-hack",
"rustc_version",
"shell-escape",
"smallvec",
"ui_test",

View file

@ -461,24 +461,30 @@ fn run(self, builder: &Builder<'_>) {
pub struct Miri {
stage: u32,
host: TargetSelection,
target: TargetSelection,
}
impl Step for Miri {
type Output = ();
const ONLY_HOSTS: bool = true;
const ONLY_HOSTS: bool = false;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/miri")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Miri { stage: run.builder.top_stage, host: run.target });
run.builder.ensure(Miri {
stage: run.builder.top_stage,
host: run.build_triple(),
target: run.target,
});
}
/// Runs `cargo test` for miri.
fn run(self, builder: &Builder<'_>) {
let stage = self.stage;
let host = self.host;
let target = self.target;
let compiler = builder.compiler(stage, host);
// We need the stdlib for the *next* stage, as it was built with this compiler that also built Miri.
// Except if we are at stage 2, the bootstrap loop is complete and we can stick with our current stage.
@ -495,7 +501,7 @@ fn run(self, builder: &Builder<'_>) {
builder.ensure(compile::Std::new(compiler_std, host));
let sysroot = builder.sysroot(compiler_std);
// # Run `cargo miri setup`.
// # Run `cargo miri setup` for the given target.
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
@ -508,6 +514,7 @@ fn run(self, builder: &Builder<'_>) {
);
cargo.add_rustc_lib_path(builder, compiler);
cargo.arg("--").arg("miri").arg("setup");
cargo.arg("--target").arg(target.rustc_target_arg());
// Tell `cargo miri setup` where to find the sources.
cargo.env("MIRI_LIB_SRC", builder.src.join("library"));
@ -556,19 +563,56 @@ fn run(self, builder: &Builder<'_>) {
cargo.add_rustc_lib_path(builder, compiler);
// miri tests need to know about the stage sysroot
cargo.env("MIRI_SYSROOT", miri_sysroot);
cargo.env("MIRI_SYSROOT", &miri_sysroot);
cargo.env("MIRI_HOST_SYSROOT", sysroot);
cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler));
cargo.env("MIRI", miri);
cargo.env("MIRI", &miri);
// propagate --bless
if builder.config.cmd.bless() {
cargo.env("MIRI_BLESS", "Gesundheit");
}
// Set the target.
cargo.env("MIRI_TEST_TARGET", target.rustc_target_arg());
// Forward test filters.
cargo.arg("--").args(builder.config.cmd.test_args());
let mut cargo = Command::from(cargo);
builder.run(&mut cargo);
// # Run `cargo miri test`.
// This is just a smoke test (Miri's own CI invokes this in a bunch of different ways and ensures
// that we get the desired output), but that is sufficient to make sure that the libtest harness
// itself executes properly under Miri.
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolRustc,
host,
"run",
"src/tools/miri/cargo-miri",
SourceType::Submodule,
&[],
);
cargo.add_rustc_lib_path(builder, compiler);
cargo.arg("--").arg("miri").arg("test");
cargo
.arg("--manifest-path")
.arg(builder.src.join("src/tools/miri/test-cargo-miri/Cargo.toml"));
cargo.arg("--target").arg(target.rustc_target_arg());
cargo.arg("--tests"); // don't run doctests, they are too confused by the staging
cargo.arg("--").args(builder.config.cmd.test_args());
// Tell `cargo miri` where to find things.
cargo.env("MIRI_SYSROOT", &miri_sysroot);
cargo.env("MIRI_HOST_SYSROOT", sysroot);
cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler));
cargo.env("MIRI", &miri);
// Debug things.
cargo.env("RUST_BACKTRACE", "1");
let mut cargo = Command::from(cargo);
builder.run(&mut cargo);
}
}

View file

@ -25,3 +25,8 @@ python3 "$X_PY" test --stage 2 check-tools
python3 "$X_PY" test --stage 2 src/tools/clippy
python3 "$X_PY" test --stage 2 src/tools/rustfmt
python3 "$X_PY" test --stage 2 src/tools/miri
# We natively run this script on x86_64-unknown-linux-gnu and x86_64-pc-windows-msvc.
# Also cover some other targets (on both of these hosts) via cross-testing.
python3 "$X_PY" test --stage 2 src/tools/miri --target i686-pc-windows-msvc
#FIXME(https://github.com/rust-lang/rust/issues/103519): macOS testing is currently disabled
# python3 "$X_PY" test --stage 2 src/tools/miri --target aarch64-apple-darwin

View file

@ -22,6 +22,7 @@ jobs:
RUST_BACKTRACE: 1
HOST_TARGET: ${{ matrix.host_target }}
strategy:
fail-fast: false
matrix:
build: [linux64, macos, win32]
include:
@ -61,7 +62,7 @@ jobs:
restore-keys: ${{ runner.os }}-cargo
- name: Install rustup-toolchain-install-master
if: ${{ steps.cache-npm.outputs.cache-hit != 'true' }}
if: ${{ steps.cache.outputs.cache-hit != 'true' }}
shell: bash
run: |
cargo install -f rustup-toolchain-install-master
@ -89,11 +90,46 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install required toolchain
# We need a toolchain that can actually build Miri, just a nightly won't do.
# This is exactly duplicated from above. GHA is pretty terrible when it comes
# to avoiding code duplication.
# Cache the global cargo directory, but NOT the local `target` directory which
# we cannot reuse anyway when the nightly changes (and it grows quite large
# over time).
- name: Add cache for cargo
id: cache
uses: actions/cache@v3
with:
path: |
# Taken from <https://doc.rust-lang.org/nightly/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci>.
~/.cargo/bin
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git/db
# contains package information of crates installed via `cargo install`.
~/.cargo/.crates.toml
~/.cargo/.crates2.json
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: ${{ runner.os }}-cargo
- name: Install rustup-toolchain-install-master
if: ${{ steps.cache.outputs.cache-hit != 'true' }}
shell: bash
run: |
cargo install -f rustup-toolchain-install-master
- name: Install "master" toolchain
shell: bash
run: |
cargo install rustup-toolchain-install-master # TODO: cache this?
./rustup-toolchain "" -c clippy
- name: Show Rust version
run: |
rustup show
rustc -Vv
cargo -V
- name: rustfmt
run: ./miri fmt --check
- name: clippy

View file

@ -104,7 +104,7 @@ MIRI_LOG=rustc_mir::interpret=info,miri::stacked_borrows ./miri run tests/pass/v
In addition, you can set `MIRI_BACKTRACE=1` to get a backtrace of where an
evaluation error was originally raised.
#### UI testing
### UI testing
We use ui-testing in Miri, meaning we generate `.stderr` and `.stdout` files for the output
produced by Miri. You can use `./miri bless` to automatically (re)generate these files when
@ -257,7 +257,7 @@ Note: When you are working with a locally built rustc or any other toolchain tha
is not the same as the one in `rust-version`, you should not have `.auto-everything` or
`.auto-toolchain` as that will keep resetting your toolchain.
```
```sh
rm -f .auto-everything .auto-toolchain
```
@ -275,3 +275,51 @@ see <https://rustc-dev-guide.rust-lang.org/building/how-to-build-and-run.html>.
With this, you should now have a working development setup! See
[above](#building-and-testing-miri) for how to proceed working on Miri.
## Advanced topic: Syncing with the rustc repo
We use the [`josh` proxy](https://github.com/josh-project/josh) to transmit
changes between the rustc and Miri repositories. For now, a fork of josh needs to be built
from source. This downloads and runs josh:
```sh
git clone https://github.com/RalfJung/josh
cd josh
cargo run --release -p josh-proxy -- --local=$(pwd)/local --remote=https://github.com --no-background
```
### Importing changes from the rustc repo
We assume we start on an up-to-date master branch in the Miri repo.
```sh
# Fetch rustc side of the history. Takes ca 5 min the first time.
# Do NOT change that commit ID, it needs to be exactly this!
git fetch http://localhost:8000/rust-lang/rust.git:at_commit=75dd959a3a40eb5b4574f8d2e23aa6efbeb33573[:prefix=src/tools/miri]:/src/tools/miri.git master
# Include that history into ours.
git merge FETCH_HEAD -m "merge rustc history"
# Update toolchain reference and apply formatting.
./rustup-toolchain HEAD && ./miri fmt
git commit -am "rustup"
```
Now push this to a new branch in your Miri fork, and create a PR. It is worth
running `./miri test` locally in parallel, since the test suite in the Miri repo
is stricter than the one on the rustc side, so some small tweaks might be
needed.
### Exporting changes to the rustc repo
We will use the josh proxy to push to your fork of rustc. You need to make sure
that the master branch of your fork is up-to-date. Also make sure that there
exists no branch called `miri` in your fork. Then run the following in the Miri
repo, assuming we are on an up-to-date master branch:
```sh
# Push the Miri changes to your rustc fork (substitute your github handle for YOUR_NAME).
# Do NOT change that commit ID, it needs to be exactly this!
git push http://localhost:8000/YOUR_NAME/rust.git:at_commit=75dd959a3a40eb5b4574f8d2e23aa6efbeb33573[:prefix=src/tools/miri]:/src/tools/miri.git -o base=master HEAD:miri
```
This will create a new branch in your fork, and the output should include a link
to create a rustc PR that will integrate those changes into the main repository.

View file

@ -320,9 +320,9 @@ dependencies = [
[[package]]
name = "libffi-sys"
version = "2.0.0"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab4106b7f09d7b87d021334d5618fac1dfcfb824d4c5fe111ff0074dfd242e15"
checksum = "84e78d02e5a8eae9c24c38ce6e6026f80e16dff76adcdae4bc5c6c52c2de4a60"
dependencies = [
"cc",
]
@ -419,6 +419,7 @@ dependencies = [
"rand",
"regex",
"rustc-workspace-hack",
"rustc_version",
"shell-escape",
"smallvec",
"ui_test",

View file

@ -31,14 +31,17 @@ smallvec = "1.7"
rustc-workspace-hack = "1.0.0"
measureme = "10.0.0"
[target."cfg(unix)".dependencies]
[target.'cfg(unix)'.dependencies]
libc = "0.2"
[target.'cfg(target_os = "linux")'.dependencies]
libffi = "3.0.0"
libloading = "0.7"
[dev-dependencies]
colored = "2"
ui_test = "0.3.1"
rustc_version = "0.4"
# Features chosen to match those required by env_logger, to avoid rebuilds
regex = { version = "1.5.5", default-features = false, features = ["perf", "std"] }
lazy_static = "1.4.0"

View file

@ -377,6 +377,11 @@ to Miri failing to detect cases of undefined behavior in a program.
* `-Zmiri-retag-fields` changes Stacked Borrows retagging to recurse into fields.
This means that references in fields of structs/enums/tuples/arrays/... are retagged,
and in particular, they are protected when passed as function arguments.
* `-Zmiri-retag-fields=<all|none|scalar>` controls when Stacked Borrows retagging recurses into
fields. `all` means it always recurses (like `-Zmiri-retag-fields`), `none` means it never
recurses (the default), `scalar` means it only recurses for types where we would also emit
`noalias` annotations in the generated LLVM IR (types passed as indivudal scalars or pairs of
scalars).
* `-Zmiri-tag-gc=<blocks>` configures how often the pointer tag garbage collector runs. The default
is to search for and remove unreachable tags once every `10000` basic blocks. Setting this to
`0` disables the garbage collector, which causes some programs to have explosive memory usage
@ -435,11 +440,10 @@ Moreover, Miri recognizes some environment variables:
purpose.
* `MIRI_NO_STD` (recognized by `cargo miri` and the test suite) makes sure that the target's
sysroot is built without libstd. This allows testing and running no_std programs.
* `MIRI_BLESS` (recognized by the test suite) overwrite all `stderr` and `stdout` files
instead of checking whether the output matches.
* `MIRI_SKIP_UI_CHECKS` (recognized by the test suite) don't check whether the
`stderr` or `stdout` files match the actual output. Useful for the rustc test suite
which has subtle differences that we don't care about.
* `MIRI_BLESS` (recognized by the test suite and `cargo-miri-test/run-test.py`): overwrite all
`stderr` and `stdout` files instead of checking whether the output matches.
* `MIRI_SKIP_UI_CHECKS` (recognized by the test suite): don't check whether the
`stderr` or `stdout` files match the actual output.
The following environment variables are *internal* and must not be used by
anyone but Miri itself. They are used to communicate between different Miri
@ -532,6 +536,27 @@ extern "Rust" {
/// This is internal and unstable and should not be used; we give it here
/// just to be complete.
fn miri_start_panic(payload: *mut u8) -> !;
/// Miri-provided extern function to get the internal unique identifier for the allocation that a pointer
/// points to. This is only useful as an input to `miri_print_stacks`, and it is a separate call because
/// getting a pointer to an allocation at runtime can change the borrow stacks in the allocation.
fn miri_get_alloc_id(ptr: *const ()) -> u64;
/// Miri-provided extern function to print (from the interpreter, not the program) the contents of all
/// borrow stacks in an allocation. The format of what this emits is unstable and may change at any time.
/// In particular, users should be aware that Miri will periodically attempt to garbage collect the
/// contents of all stacks. Callers of this function may wish to pass `-Zmiri-tag-gc=0` to disable the GC.
fn miri_print_stacks(alloc_id: u64);
/// Miri-provided extern function to print (from the interpreter, not the
/// program) the contents of a section of program memory, as bytes. Bytes
/// written using this function will emerge from the interpreter's stdout.
fn miri_write_to_stdout(bytes: &[u8]);
/// Miri-provided extern function to print (from the interpreter, not the
/// program) the contents of a section of program memory, as bytes. Bytes
/// written using this function will emerge from the interpreter's stderr.
fn miri_write_to_stderr(bytes: &[u8]);
}
```

View file

@ -5,6 +5,7 @@
use std::thread;
#[derive(Deserialize)]
#[allow(unused)]
struct DeriveStruct {
buffer: Vec<i16>,
}

View file

@ -1 +1 @@
acb8934fd57b3c2740c4abac0a5728c2c9b1423b
b1ab3b738ac718da74cd4aa0bb7f362d0adbdf84

View file

@ -32,7 +32,7 @@
};
use rustc_session::{config::CrateType, search_paths::PathKind, CtfeBacktrace};
use miri::{BacktraceStyle, ProvenanceMode};
use miri::{BacktraceStyle, ProvenanceMode, RetagFields};
struct MiriCompilerCalls {
miri_config: miri::MiriConfig,
@ -426,7 +426,14 @@ fn main() {
} else if arg == "-Zmiri-mute-stdout-stderr" {
miri_config.mute_stdout_stderr = true;
} else if arg == "-Zmiri-retag-fields" {
miri_config.retag_fields = true;
miri_config.retag_fields = RetagFields::Yes;
} else if let Some(retag_fields) = arg.strip_prefix("-Zmiri-retag-fields=") {
miri_config.retag_fields = match retag_fields {
"all" => RetagFields::Yes,
"none" => RetagFields::No,
"scalar" => RetagFields::OnlyScalar,
_ => show_error!("`-Zmiri-retag-fields` can only be `all`, `none`, or `scalar`"),
};
} else if arg == "-Zmiri-track-raw-pointers" {
eprintln!(
"WARNING: `-Zmiri-track-raw-pointers` has no effect; it is enabled by default"

View file

@ -0,0 +1,204 @@
use std::collections::VecDeque;
use std::num::NonZeroU32;
use rustc_index::vec::Idx;
use super::sync::EvalContextExtPriv;
use super::thread::MachineCallback;
use super::vector_clock::VClock;
use crate::*;
declare_id!(InitOnceId);
/// A thread waiting on an InitOnce object.
struct InitOnceWaiter<'mir, 'tcx> {
/// The thread that is waiting.
thread: ThreadId,
/// The callback that should be executed, after the thread has been woken up.
callback: Box<dyn MachineCallback<'mir, 'tcx> + 'tcx>,
}
impl<'mir, 'tcx> std::fmt::Debug for InitOnceWaiter<'mir, 'tcx> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("InitOnce")
.field("thread", &self.thread)
.field("callback", &"dyn MachineCallback")
.finish()
}
}
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
/// The current status of a one time initialization.
pub enum InitOnceStatus {
#[default]
Uninitialized,
Begun,
Complete,
}
/// The one time initialization state.
#[derive(Default, Debug)]
pub(super) struct InitOnce<'mir, 'tcx> {
status: InitOnceStatus,
waiters: VecDeque<InitOnceWaiter<'mir, 'tcx>>,
data_race: VClock,
}
impl<'mir, 'tcx> VisitTags for InitOnce<'mir, 'tcx> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
for waiter in self.waiters.iter() {
waiter.callback.visit_tags(visit);
}
}
}
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn init_once_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64,
) -> InterpResult<'tcx, InitOnceId> {
let this = self.eval_context_mut();
this.init_once_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset))
}
/// Provides the closure with the next InitOnceId. Creates that InitOnce if the closure returns None,
/// otherwise returns the value from the closure.
#[inline]
fn init_once_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, InitOnceId>
where
F: FnOnce(
&mut MiriInterpCx<'mir, 'tcx>,
InitOnceId,
) -> InterpResult<'tcx, Option<InitOnceId>>,
{
let this = self.eval_context_mut();
let next_index = this.machine.threads.sync.init_onces.next_index();
if let Some(old) = existing(this, next_index)? {
Ok(old)
} else {
let new_index = this.machine.threads.sync.init_onces.push(Default::default());
assert_eq!(next_index, new_index);
Ok(new_index)
}
}
#[inline]
fn init_once_status(&mut self, id: InitOnceId) -> InitOnceStatus {
let this = self.eval_context_ref();
this.machine.threads.sync.init_onces[id].status
}
/// Put the thread into the queue waiting for the initialization.
#[inline]
fn init_once_enqueue_and_block(
&mut self,
id: InitOnceId,
thread: ThreadId,
callback: Box<dyn MachineCallback<'mir, 'tcx> + 'tcx>,
) {
let this = self.eval_context_mut();
let init_once = &mut this.machine.threads.sync.init_onces[id];
assert_ne!(init_once.status, InitOnceStatus::Complete, "queueing on complete init once");
init_once.waiters.push_back(InitOnceWaiter { thread, callback });
this.block_thread(thread);
}
/// Begin initializing this InitOnce. Must only be called after checking that it is currently
/// uninitialized.
#[inline]
fn init_once_begin(&mut self, id: InitOnceId) {
let this = self.eval_context_mut();
let init_once = &mut this.machine.threads.sync.init_onces[id];
assert_eq!(
init_once.status,
InitOnceStatus::Uninitialized,
"begining already begun or complete init once"
);
init_once.status = InitOnceStatus::Begun;
}
#[inline]
fn init_once_complete(&mut self, id: InitOnceId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let init_once = &mut this.machine.threads.sync.init_onces[id];
assert_eq!(
init_once.status,
InitOnceStatus::Begun,
"completing already complete or uninit init once"
);
init_once.status = InitOnceStatus::Complete;
// Each complete happens-before the end of the wait
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut init_once.data_race, current_thread);
}
// Wake up everyone.
// need to take the queue to avoid having `this` be borrowed multiple times
for waiter in std::mem::take(&mut init_once.waiters) {
// End of the wait happens-before woken-up thread.
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_acquire(
&this.machine.threads.sync.init_onces[id].data_race,
waiter.thread,
);
}
this.unblock_thread(waiter.thread);
// Call callback, with the woken-up thread as `current`.
this.set_active_thread(waiter.thread);
waiter.callback.call(this)?;
this.set_active_thread(current_thread);
}
Ok(())
}
#[inline]
fn init_once_fail(&mut self, id: InitOnceId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let init_once = &mut this.machine.threads.sync.init_onces[id];
assert_eq!(
init_once.status,
InitOnceStatus::Begun,
"failing already completed or uninit init once"
);
// Each complete happens-before the end of the wait
// FIXME: should this really induce synchronization? If we think of it as a lock, then yes,
// but the docs don't talk about such details.
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut init_once.data_race, current_thread);
}
// Wake up one waiting thread, so they can go ahead and try to init this.
if let Some(waiter) = init_once.waiters.pop_front() {
// End of the wait happens-before woken-up thread.
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_acquire(
&this.machine.threads.sync.init_onces[id].data_race,
waiter.thread,
);
}
this.unblock_thread(waiter.thread);
// Call callback, with the woken-up thread as `current`.
this.set_active_thread(waiter.thread);
waiter.callback.call(this)?;
this.set_active_thread(current_thread);
} else {
// Nobody there to take this, so go back to 'uninit'
init_once.status = InitOnceStatus::Uninitialized;
}
Ok(())
}
}

View file

@ -1,6 +1,8 @@
pub mod data_race;
mod range_object_map;
#[macro_use]
pub mod sync;
pub mod init_once;
pub mod thread;
mod vector_clock;
pub mod weak_memory;

View file

@ -7,9 +7,15 @@
use rustc_data_structures::fx::FxHashMap;
use rustc_index::vec::{Idx, IndexVec};
use super::init_once::InitOnce;
use super::vector_clock::VClock;
use crate::*;
pub trait SyncId {
fn from_u32(id: u32) -> Self;
fn to_u32(&self) -> u32;
}
/// We cannot use the `newtype_index!` macro because we have to use 0 as a
/// sentinel value meaning that the identifier is not assigned. This is because
/// the pthreads static initializers initialize memory with zeros (see the
@ -21,11 +27,14 @@ macro_rules! declare_id {
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
pub struct $name(NonZeroU32);
impl $name {
impl SyncId for $name {
// Panics if `id == 0`.
pub fn from_u32(id: u32) -> Self {
fn from_u32(id: u32) -> Self {
Self(NonZeroU32::new(id).unwrap())
}
fn to_u32(&self) -> u32 {
self.0.get()
}
}
impl Idx for $name {
@ -151,16 +160,58 @@ struct FutexWaiter {
/// The state of all synchronization variables.
#[derive(Default, Debug)]
pub(crate) struct SynchronizationState {
pub(crate) struct SynchronizationState<'mir, 'tcx> {
mutexes: IndexVec<MutexId, Mutex>,
rwlocks: IndexVec<RwLockId, RwLock>,
condvars: IndexVec<CondvarId, Condvar>,
futexes: FxHashMap<u64, Futex>,
pub(super) init_onces: IndexVec<InitOnceId, InitOnce<'mir, 'tcx>>,
}
impl<'mir, 'tcx> VisitTags for SynchronizationState<'mir, 'tcx> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
for init_once in self.init_onces.iter() {
init_once.visit_tags(visit);
}
}
}
// Private extension trait for local helper methods
impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
pub(super) trait EvalContextExtPriv<'mir, 'tcx: 'mir>:
crate::MiriInterpCxExt<'mir, 'tcx>
{
#[inline]
// Miri sync structures contain zero-initialized ids stored at some offset behind a pointer
fn get_or_create_id<Id: SyncId>(
&mut self,
next_id: Id,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64,
) -> InterpResult<'tcx, Option<Id>> {
let this = self.eval_context_mut();
let value_place =
this.deref_operand_and_offset(lock_op, offset, this.machine.layouts.u32)?;
let (old, success) = this
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, this.machine.layouts.u32),
Scalar::from_u32(next_id.to_u32()),
AtomicRwOrd::Relaxed, // deliberately *no* synchronization
AtomicReadOrd::Relaxed,
false,
)?
.to_scalar_pair();
Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(Id::from_u32(old.to_u32().expect("layout is u32")))
})
}
/// Take a reader out of the queue waiting for the lock.
/// Returns `true` if some thread got the rwlock.
#[inline]
@ -210,11 +261,31 @@ fn mutex_dequeue_and_lock(&mut self, id: MutexId) -> bool {
// situations.
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
#[inline]
/// Create state for a new mutex.
fn mutex_create(&mut self) -> MutexId {
fn mutex_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64,
) -> InterpResult<'tcx, MutexId> {
let this = self.eval_context_mut();
this.machine.threads.sync.mutexes.push(Default::default())
this.mutex_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset))
}
fn rwlock_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64,
) -> InterpResult<'tcx, RwLockId> {
let this = self.eval_context_mut();
this.rwlock_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset))
}
fn condvar_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64,
) -> InterpResult<'tcx, CondvarId> {
let this = self.eval_context_mut();
this.condvar_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset))
}
#[inline]
@ -301,8 +372,8 @@ fn mutex_unlock(&mut self, id: MutexId, expected_owner: ThreadId) -> Option<usiz
}
}
#[inline]
/// Put the thread into the queue waiting for the mutex.
#[inline]
fn mutex_enqueue_and_block(&mut self, id: MutexId, thread: ThreadId) {
let this = self.eval_context_mut();
assert!(this.mutex_is_locked(id), "queing on unlocked mutex");
@ -310,16 +381,9 @@ fn mutex_enqueue_and_block(&mut self, id: MutexId, thread: ThreadId) {
this.block_thread(thread);
}
#[inline]
/// Create state for a new read write lock.
fn rwlock_create(&mut self) -> RwLockId {
let this = self.eval_context_mut();
this.machine.threads.sync.rwlocks.push(Default::default())
}
#[inline]
/// Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None,
/// otherwise returns the value from the closure
#[inline]
fn rwlock_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, RwLockId>
where
F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, RwLockId) -> InterpResult<'tcx, Option<RwLockId>>,
@ -349,8 +413,8 @@ fn rwlock_is_locked(&self, id: RwLockId) -> bool {
rwlock.writer.is_some() || rwlock.readers.is_empty().not()
}
#[inline]
/// Check if write locked.
#[inline]
fn rwlock_is_write_locked(&self, id: RwLockId) -> bool {
let this = self.eval_context_ref();
let rwlock = &this.machine.threads.sync.rwlocks[id];
@ -407,8 +471,8 @@ fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool {
true
}
#[inline]
/// Put the reader in the queue waiting for the lock and block it.
#[inline]
fn rwlock_enqueue_and_block_reader(&mut self, id: RwLockId, reader: ThreadId) {
let this = self.eval_context_mut();
assert!(this.rwlock_is_write_locked(id), "read-queueing on not write locked rwlock");
@ -416,8 +480,8 @@ fn rwlock_enqueue_and_block_reader(&mut self, id: RwLockId, reader: ThreadId) {
this.block_thread(reader);
}
#[inline]
/// Lock by setting the writer that owns the lock.
#[inline]
fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) {
let this = self.eval_context_mut();
assert!(!this.rwlock_is_locked(id), "the rwlock is already locked");
@ -429,8 +493,8 @@ fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) {
}
}
#[inline]
/// Try to unlock by removing the writer.
#[inline]
fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> bool {
let this = self.eval_context_mut();
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
@ -467,8 +531,8 @@ fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> b
}
}
#[inline]
/// Put the writer in the queue waiting for the lock.
#[inline]
fn rwlock_enqueue_and_block_writer(&mut self, id: RwLockId, writer: ThreadId) {
let this = self.eval_context_mut();
assert!(this.rwlock_is_locked(id), "write-queueing on unlocked rwlock");
@ -476,16 +540,9 @@ fn rwlock_enqueue_and_block_writer(&mut self, id: RwLockId, writer: ThreadId) {
this.block_thread(writer);
}
#[inline]
/// Create state for a new conditional variable.
fn condvar_create(&mut self) -> CondvarId {
let this = self.eval_context_mut();
this.machine.threads.sync.condvars.push(Default::default())
}
#[inline]
/// Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None,
/// otherwise returns the value from the closure
#[inline]
fn condvar_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, CondvarId>
where
F: FnOnce(
@ -504,8 +561,8 @@ fn condvar_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, Condva
}
}
#[inline]
/// Is the conditional variable awaited?
#[inline]
fn condvar_is_awaited(&mut self, id: CondvarId) -> bool {
let this = self.eval_context_mut();
!this.machine.threads.sync.condvars[id].waiters.is_empty()

View file

@ -30,8 +30,7 @@ pub enum SchedulingAction {
Stop,
}
/// Timeout callbacks can be created by synchronization primitives to tell the
/// scheduler that they should be called once some period of time passes.
/// Trait for callbacks that can be executed when some event happens, such as after a timeout.
pub trait MachineCallback<'mir, 'tcx>: VisitTags {
fn call(&self, ecx: &mut InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>) -> InterpResult<'tcx>;
}
@ -269,7 +268,7 @@ pub struct ThreadManager<'mir, 'tcx> {
threads: IndexVec<ThreadId, Thread<'mir, 'tcx>>,
/// This field is pub(crate) because the synchronization primitives
/// (`crate::sync`) need a way to access it.
pub(crate) sync: SynchronizationState,
pub(crate) sync: SynchronizationState<'mir, 'tcx>,
/// A mapping from a thread-local static to an allocation id of a thread
/// specific allocation.
thread_local_alloc_ids: RefCell<FxHashMap<(DefId, ThreadId), Pointer<Provenance>>>,
@ -303,7 +302,7 @@ fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
timeout_callbacks,
active_thread: _,
yield_active_thread: _,
sync: _,
sync,
} = self;
for thread in threads {
@ -315,6 +314,7 @@ fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
for callback in timeout_callbacks.values() {
callback.callback.visit_tags(visit);
}
sync.visit_tags(visit);
}
}

View file

@ -17,7 +17,7 @@
//! load to the first, as a result of C++20's coherence-ordered before rules.
//!
//! Rust follows the C++20 memory model (except for the Consume ordering and some operations not performable through C++'s
//! std::atomic<T> API). It is therefore possible for this implementation to generate behaviours never observable when the
//! `std::atomic<T>` API). It is therefore possible for this implementation to generate behaviours never observable when the
//! same program is compiled and run natively. Unfortunately, no literature exists at the time of writing which proposes
//! an implementable and C++20-compatible relaxed memory model that supports all atomic operation existing in Rust. The closest one is
//! A Promising Semantics for Relaxed-Memory Concurrency by Jeehoon Kang et al. (<https://www.cs.tau.ac.il/~orilahav/papers/popl17.pdf>)

View file

@ -126,7 +126,7 @@ pub struct MiriConfig {
/// Report the current instruction being executed every N basic blocks.
pub report_progress: Option<u32>,
/// Whether Stacked Borrows retagging should recurse into fields of datatypes.
pub retag_fields: bool,
pub retag_fields: RetagFields,
/// The location of a shared object file to load when calling external functions
/// FIXME! consider allowing users to specify paths to multiple SO files, or to a directory
pub external_so_file: Option<PathBuf>,
@ -163,7 +163,7 @@ fn default() -> MiriConfig {
mute_stdout_stderr: false,
preemption_rate: 0.01, // 1%
report_progress: None,
retag_fields: false,
retag_fields: RetagFields::No,
external_so_file: None,
gc_interval: 10_000,
num_cpus: 1,

View file

@ -9,6 +9,7 @@
#![feature(is_some_and)]
#![feature(nonzero_ops)]
#![feature(local_key_cell_methods)]
#![feature(is_terminal)]
// Configure clippy and other lints
#![allow(
clippy::collapsible_else_if,
@ -26,6 +27,7 @@
clippy::type_complexity,
clippy::single_element_loop,
clippy::needless_return,
clippy::bool_to_int_with_if,
// We are not implementing queries here so it's fine
rustc::potential_query_instability
)]
@ -82,34 +84,28 @@
pub use crate::clock::{Clock, Instant};
pub use crate::concurrency::{
data_race::{
AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd,
EvalContextExt as DataRaceEvalContextExt,
},
sync::{CondvarId, EvalContextExt as SyncEvalContextExt, MutexId, RwLockId},
thread::{
EvalContextExt as ThreadsEvalContextExt, SchedulingAction, ThreadId, ThreadManager,
ThreadState, Time,
},
data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _},
init_once::{EvalContextExt as _, InitOnceId},
sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SyncId},
thread::{EvalContextExt as _, SchedulingAction, ThreadId, ThreadManager, ThreadState, Time},
};
pub use crate::diagnostics::{
report_error, EvalContextExt as DiagnosticsEvalContextExt, NonHaltingDiagnostic,
TerminationInfo,
report_error, EvalContextExt as _, NonHaltingDiagnostic, TerminationInfo,
};
pub use crate::eval::{
create_ecx, eval_entry, AlignmentCheck, BacktraceStyle, IsolatedOp, MiriConfig, RejectOpWith,
};
pub use crate::helpers::{CurrentSpan, EvalContextExt as HelpersEvalContextExt};
pub use crate::helpers::{CurrentSpan, EvalContextExt as _};
pub use crate::intptrcast::ProvenanceMode;
pub use crate::machine::{
AllocExtra, FrameData, MiriInterpCx, MiriInterpCxExt, MiriMachine, MiriMemoryKind, Provenance,
ProvenanceExtra, PAGE_SIZE, STACK_ADDR, STACK_SIZE,
};
pub use crate::mono_hash_map::MonoHashMap;
pub use crate::operator::EvalContextExt as OperatorEvalContextExt;
pub use crate::operator::EvalContextExt as _;
pub use crate::range_map::RangeMap;
pub use crate::stacked_borrows::{
CallId, EvalContextExt as StackedBorEvalContextExt, Item, Permission, SbTag, Stack, Stacks,
CallId, EvalContextExt as _, Item, Permission, RetagFields, SbTag, Stack, Stacks,
};
pub use crate::tag_gc::{EvalContextExt as _, VisitTags};

View file

@ -421,8 +421,10 @@ pub struct MiriMachine<'mir, 'tcx> {
pub(crate) basic_block_count: u64,
/// Handle of the optional shared object file for external functions.
#[cfg(unix)]
#[cfg(target_os = "linux")]
pub external_so_lib: Option<(libloading::Library, std::path::PathBuf)>,
#[cfg(not(target_os = "linux"))]
pub external_so_lib: Option<!>,
/// Run a garbage collector for SbTags every N basic blocks.
pub(crate) gc_interval: u32,
@ -485,7 +487,7 @@ pub(crate) fn new(config: &MiriConfig, layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>)
report_progress: config.report_progress,
basic_block_count: 0,
clock: Clock::new(config.isolated_op == IsolatedOp::Allow),
#[cfg(unix)]
#[cfg(target_os = "linux")]
external_so_lib: config.external_so_file.as_ref().map(|lib_file_path| {
let target_triple = layout_cx.tcx.sess.opts.target_triple.triple();
// Check if host target == the session target.
@ -507,6 +509,10 @@ pub(crate) fn new(config: &MiriConfig, layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>)
lib_file_path.clone(),
)
}),
#[cfg(not(target_os = "linux"))]
external_so_lib: config.external_so_file.as_ref().map(|_| {
panic!("loading external .so files is only supported on Linux")
}),
gc_interval: config.gc_interval,
since_gc: 0,
num_cpus: config.num_cpus,
@ -648,7 +654,6 @@ fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
preemption_rate: _,
report_progress: _,
basic_block_count: _,
#[cfg(unix)]
external_so_lib: _,
gc_interval: _,
since_gc: _,

View file

@ -91,6 +91,10 @@ pub fn iter_mut_all(&mut self) -> impl Iterator<Item = &mut T> {
self.v.iter_mut().map(|elem| &mut elem.data)
}
pub fn iter_all(&self) -> impl Iterator<Item = (ops::Range<u64>, &T)> {
self.v.iter().map(|elem| (elem.range.clone(), &elem.data))
}
// Splits the element situated at the given `index`, such that the 2nd one starts at offset
// `split_offset`. Do nothing if the element already starts there.
// Returns whether a split was necessary.

View file

@ -144,7 +144,7 @@ fn GetEnvironmentVariableW(
name_op: &OpTy<'tcx, Provenance>, // LPCWSTR
buf_op: &OpTy<'tcx, Provenance>, // LPWSTR
size_op: &OpTy<'tcx, Provenance>, // DWORD
) -> InterpResult<'tcx, u32> {
) -> InterpResult<'tcx, Scalar<Provenance>> {
// ^ Returns DWORD (u32 on Windows)
let this = self.eval_context_mut();
@ -165,12 +165,14 @@ fn GetEnvironmentVariableW(
let buf_ptr = this.read_pointer(buf_op)?;
// `buf_size` represents the size in characters.
let buf_size = u64::from(this.read_scalar(size_op)?.to_u32()?);
windows_check_buffer_size(this.write_os_str_to_wide_str(&var, buf_ptr, buf_size)?)
Scalar::from_u32(windows_check_buffer_size(
this.write_os_str_to_wide_str(&var, buf_ptr, buf_size)?,
))
}
None => {
let envvar_not_found = this.eval_windows("c", "ERROR_ENVVAR_NOT_FOUND")?;
this.set_last_error(envvar_not_found)?;
0 // return zero upon failure
Scalar::from_u32(0) // return zero upon failure
}
})
}
@ -200,14 +202,14 @@ fn GetEnvironmentStringsW(&mut self) -> InterpResult<'tcx, Pointer<Option<Proven
fn FreeEnvironmentStringsW(
&mut self,
env_block_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, i32> {
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
this.assert_target_os("windows", "FreeEnvironmentStringsW");
let env_block_ptr = this.read_pointer(env_block_op)?;
let result = this.deallocate_ptr(env_block_ptr, None, MiriMemoryKind::Runtime.into());
// If the function succeeds, the return value is nonzero.
Ok(i32::from(result.is_ok()))
Ok(Scalar::from_i32(i32::from(result.is_ok())))
}
fn setenv(
@ -249,7 +251,7 @@ fn SetEnvironmentVariableW(
&mut self,
name_op: &OpTy<'tcx, Provenance>, // LPCWSTR
value_op: &OpTy<'tcx, Provenance>, // LPCWSTR
) -> InterpResult<'tcx, i32> {
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
this.assert_target_os("windows", "SetEnvironmentVariableW");
@ -272,7 +274,7 @@ fn SetEnvironmentVariableW(
this.deallocate_ptr(var, None, MiriMemoryKind::Runtime.into())?;
this.update_environ()?;
}
Ok(1) // return non-zero on success
Ok(this.eval_windows("c", "TRUE")?)
} else {
let value = this.read_os_str_from_wide_str(value_ptr)?;
let var_ptr = alloc_env_var_as_wide_str(&name, &value, this)?;
@ -280,7 +282,7 @@ fn SetEnvironmentVariableW(
this.deallocate_ptr(var, None, MiriMemoryKind::Runtime.into())?;
}
this.update_environ()?;
Ok(1) // return non-zero on success
Ok(this.eval_windows("c", "TRUE")?)
}
}
@ -347,7 +349,7 @@ fn GetCurrentDirectoryW(
&mut self,
size_op: &OpTy<'tcx, Provenance>, // DWORD
buf_op: &OpTy<'tcx, Provenance>, // LPTSTR
) -> InterpResult<'tcx, u32> {
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
this.assert_target_os("windows", "GetCurrentDirectoryW");
@ -357,16 +359,18 @@ fn GetCurrentDirectoryW(
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`GetCurrentDirectoryW`", reject_with)?;
this.set_last_error_from_io_error(ErrorKind::PermissionDenied)?;
return Ok(0);
return Ok(Scalar::from_u32(0));
}
// If we cannot get the current directory, we return 0
match env::current_dir() {
Ok(cwd) =>
return Ok(windows_check_buffer_size(this.write_path_to_wide_str(&cwd, buf, size)?)),
return Ok(Scalar::from_u32(windows_check_buffer_size(
this.write_path_to_wide_str(&cwd, buf, size)?,
))),
Err(e) => this.set_last_error_from_io_error(e.kind())?,
}
Ok(0)
Ok(Scalar::from_u32(0))
}
fn chdir(&mut self, path_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32> {
@ -395,7 +399,7 @@ fn chdir(&mut self, path_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32>
fn SetCurrentDirectoryW(
&mut self,
path_op: &OpTy<'tcx, Provenance>, // LPCTSTR
) -> InterpResult<'tcx, i32> {
) -> InterpResult<'tcx, Scalar<Provenance>> {
// ^ Returns BOOL (i32 on Windows)
let this = self.eval_context_mut();
@ -407,14 +411,14 @@ fn SetCurrentDirectoryW(
this.reject_in_isolation("`SetCurrentDirectoryW`", reject_with)?;
this.set_last_error_from_io_error(ErrorKind::PermissionDenied)?;
return Ok(0);
return this.eval_windows("c", "FALSE");
}
match env::set_current_dir(path) {
Ok(()) => Ok(1),
Ok(()) => this.eval_windows("c", "TRUE"),
Err(e) => {
this.set_last_error_from_io_error(e.kind())?;
Ok(0)
this.eval_windows("c", "FALSE")
}
}
}

View file

@ -183,9 +183,7 @@ fn get_func_ptr_explicitly_from_lib(&mut self, link_name: Symbol) -> Option<Code
// from: https://docs.rs/libloading/0.7.3/src/libloading/os/unix/mod.rs.html#411
// using the `libc` crate where this interface is public.
// No `libc::dladdr` on windows.
#[cfg(unix)]
let mut info = std::mem::MaybeUninit::<libc::Dl_info>::uninit();
#[cfg(unix)]
unsafe {
if libc::dladdr(*func.deref() as *const _, info.as_mut_ptr()) != 0 {
if std::ffi::CStr::from_ptr(info.assume_init().dli_fname).to_str().unwrap()

View file

@ -1,4 +1,4 @@
use std::{collections::hash_map::Entry, iter};
use std::{collections::hash_map::Entry, io::Write, iter};
use log::trace;
@ -23,8 +23,6 @@
use super::backtrace::EvalContextExt as _;
use crate::helpers::{convert::Truncate, target_os_is_unix};
#[cfg(unix)]
use crate::shims::ffi_support::EvalContextExt as _;
use crate::*;
/// Returned by `emulate_foreign_item_by_name`.
@ -372,8 +370,9 @@ fn emulate_foreign_item_by_name(
let this = self.eval_context_mut();
// First deal with any external C functions in linked .so file.
#[cfg(unix)]
#[cfg(target_os = "linux")]
if this.machine.external_so_lib.as_ref().is_some() {
use crate::shims::ffi_support::EvalContextExt as _;
// An Ok(false) here means that the function being called was not exported
// by the specified `.so` file; we should continue and check if it corresponds to
// a provided shim.
@ -418,6 +417,19 @@ fn emulate_foreign_item_by_name(
// shim, add it to the corresponding submodule.
match link_name.as_str() {
// Miri-specific extern functions
"miri_get_alloc_id" => {
let [ptr] = this.check_shim(abi, Abi::Rust, link_name, args)?;
let ptr = this.read_pointer(ptr)?;
let (alloc_id, _, _) = this.ptr_get_alloc_id(ptr)?;
this.write_scalar(Scalar::from_u64(alloc_id.0.get()), dest)?;
}
"miri_print_stacks" => {
let [id] = this.check_shim(abi, Abi::Rust, link_name, args)?;
let id = this.read_scalar(id)?.to_u64()?;
if let Some(id) = std::num::NonZeroU64::new(id) {
this.print_stacks(AllocId(id))?;
}
}
"miri_static_root" => {
let [ptr] = this.check_shim(abi, Abi::Rust, link_name, args)?;
let ptr = this.read_pointer(ptr)?;
@ -450,6 +462,23 @@ fn emulate_foreign_item_by_name(
this.handle_miri_resolve_frame_names(abi, link_name, args)?;
}
// Writes some bytes to the interpreter's stdout/stderr. See the
// README for details.
"miri_write_to_stdout" | "miri_write_to_stderr" => {
let [bytes] = this.check_shim(abi, Abi::Rust, link_name, args)?;
let (ptr, len) = this.read_immediate(bytes)?.to_scalar_pair();
let ptr = ptr.to_pointer(this)?;
let len = len.to_machine_usize(this)?;
let msg = this.read_bytes_ptr_strip_provenance(ptr, Size::from_bytes(len))?;
// Note: we're ignoring errors writing to host stdout/stderr.
let _ignore = match link_name.as_str() {
"miri_write_to_stdout" => std::io::stdout().write_all(msg),
"miri_write_to_stderr" => std::io::stderr().write_all(msg),
_ => unreachable!(),
};
}
// Standard C allocation
"malloc" => {
let [size] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;

View file

@ -1,7 +1,7 @@
#![warn(clippy::integer_arithmetic)]
mod backtrace;
#[cfg(unix)]
#[cfg(target_os = "linux")]
pub mod ffi_support;
pub mod foreign_items;
pub mod intrinsics;

View file

@ -119,7 +119,7 @@ fn GetSystemTimeAsFileTime(
fn QueryPerformanceCounter(
&mut self,
lpPerformanceCount_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, i32> {
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
this.assert_target_os("windows", "QueryPerformanceCounter");
@ -134,14 +134,14 @@ fn QueryPerformanceCounter(
Scalar::from_i64(qpc),
&this.deref_operand(lpPerformanceCount_op)?.into(),
)?;
Ok(-1) // return non-zero on success
Ok(Scalar::from_i32(-1)) // return non-zero on success
}
#[allow(non_snake_case)]
fn QueryPerformanceFrequency(
&mut self,
lpFrequency_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, i32> {
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
this.assert_target_os("windows", "QueryPerformanceFrequency");
@ -155,7 +155,7 @@ fn QueryPerformanceFrequency(
Scalar::from_i64(1_000_000_000),
&this.deref_operand(lpFrequency_op)?.into(),
)?;
Ok(-1) // Return non-zero on success
Ok(Scalar::from_i32(-1)) // Return non-zero on success
}
fn mach_absolute_time(&self) -> InterpResult<'tcx, Scalar<Provenance>> {

View file

@ -42,7 +42,7 @@ fn call_dlsym(
);
}
let &[ref _sig, ref _func] = check_arg_count(args)?;
let [_sig, _func] = check_arg_count(args)?;
this.write_null(dest)?;
}
}

View file

@ -452,7 +452,7 @@ fn emulate_foreign_item_by_name(
"isatty" => {
let [fd] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
let result = this.isatty(fd)?;
this.write_scalar(Scalar::from_i32(result), dest)?;
this.write_scalar(result, dest)?;
}
"pthread_atfork" => {
let [prepare, parent, child] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;

View file

@ -4,7 +4,7 @@
use std::fs::{
read_dir, remove_dir, remove_file, rename, DirBuilder, File, FileType, OpenOptions, ReadDir,
};
use std::io::{self, ErrorKind, Read, Seek, SeekFrom, Write};
use std::io::{self, ErrorKind, IsTerminal, Read, Seek, SeekFrom, Write};
use std::path::{Path, PathBuf};
use std::time::SystemTime;
@ -65,6 +65,8 @@ fn close<'tcx>(
fn dup(&mut self) -> io::Result<Box<dyn FileDescriptor>>;
fn is_tty(&self) -> bool;
#[cfg(unix)]
fn as_unix_host_fd(&self) -> Option<i32> {
None
@ -143,6 +145,10 @@ fn as_unix_host_fd(&self) -> Option<i32> {
use std::os::unix::io::AsRawFd;
Some(self.file.as_raw_fd())
}
fn is_tty(&self) -> bool {
self.file.is_terminal()
}
}
impl FileDescriptor for io::Stdin {
@ -170,6 +176,10 @@ fn dup(&mut self) -> io::Result<Box<dyn FileDescriptor>> {
fn as_unix_host_fd(&self) -> Option<i32> {
Some(libc::STDIN_FILENO)
}
fn is_tty(&self) -> bool {
self.is_terminal()
}
}
impl FileDescriptor for io::Stdout {
@ -202,6 +212,10 @@ fn dup(&mut self) -> io::Result<Box<dyn FileDescriptor>> {
fn as_unix_host_fd(&self) -> Option<i32> {
Some(libc::STDOUT_FILENO)
}
fn is_tty(&self) -> bool {
self.is_terminal()
}
}
impl FileDescriptor for io::Stderr {
@ -227,12 +241,16 @@ fn dup(&mut self) -> io::Result<Box<dyn FileDescriptor>> {
fn as_unix_host_fd(&self) -> Option<i32> {
Some(libc::STDERR_FILENO)
}
fn is_tty(&self) -> bool {
self.is_terminal()
}
}
#[derive(Debug)]
struct DummyOutput;
struct NullOutput;
impl FileDescriptor for DummyOutput {
impl FileDescriptor for NullOutput {
fn name(&self) -> &'static str {
"stderr and stdout"
}
@ -247,7 +265,11 @@ fn write<'tcx>(
}
fn dup(&mut self) -> io::Result<Box<dyn FileDescriptor>> {
Ok(Box::new(DummyOutput))
Ok(Box::new(NullOutput))
}
fn is_tty(&self) -> bool {
false
}
}
@ -267,8 +289,8 @@ pub(crate) fn new(mute_stdout_stderr: bool) -> FileHandler {
let mut handles: BTreeMap<_, Box<dyn FileDescriptor>> = BTreeMap::new();
handles.insert(0i32, Box::new(io::stdin()));
if mute_stdout_stderr {
handles.insert(1i32, Box::new(DummyOutput));
handles.insert(2i32, Box::new(DummyOutput));
handles.insert(1i32, Box::new(NullOutput));
handles.insert(2i32, Box::new(NullOutput));
} else {
handles.insert(1i32, Box::new(io::stdout()));
handles.insert(2i32, Box::new(io::stderr()));
@ -1073,7 +1095,7 @@ fn linux_statx(
mask |= this.eval_libc("STATX_ATIME")?.to_u32()?;
InterpResult::Ok(tup)
})
.unwrap_or(Ok((0, 0)))?;
.unwrap_or_else(|| Ok((0, 0)))?;
let (created_sec, created_nsec) = metadata
.created
@ -1081,7 +1103,7 @@ fn linux_statx(
mask |= this.eval_libc("STATX_BTIME")?.to_u32()?;
InterpResult::Ok(tup)
})
.unwrap_or(Ok((0, 0)))?;
.unwrap_or_else(|| Ok((0, 0)))?;
let (modified_sec, modified_nsec) = metadata
.modified
@ -1089,7 +1111,7 @@ fn linux_statx(
mask |= this.eval_libc("STATX_MTIME")?.to_u32()?;
InterpResult::Ok(tup)
})
.unwrap_or(Ok((0, 0)))?;
.unwrap_or_else(|| Ok((0, 0)))?;
// Now we write everything to `statxbuf`. We write a zero for the unavailable fields.
this.write_int_fields_named(
@ -1662,35 +1684,23 @@ fn readlink(
}
#[cfg_attr(not(unix), allow(unused))]
fn isatty(&mut self, miri_fd: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32> {
fn isatty(
&mut self,
miri_fd: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
#[cfg(unix)]
// "returns 1 if fd is an open file descriptor referring to a terminal;
// otherwise 0 is returned, and errno is set to indicate the error"
if matches!(this.machine.isolated_op, IsolatedOp::Allow) {
let miri_fd = this.read_scalar(miri_fd)?.to_i32()?;
if let Some(host_fd) =
this.machine.file_handler.handles.get(&miri_fd).and_then(|fd| fd.as_unix_host_fd())
{
// "returns 1 if fd is an open file descriptor referring to a terminal;
// otherwise 0 is returned, and errno is set to indicate the error"
// SAFETY: isatty has no preconditions
let is_tty = unsafe { libc::isatty(host_fd) };
if is_tty == 0 {
let errno = std::io::Error::last_os_error()
.raw_os_error()
.map(Scalar::from_i32)
.unwrap();
this.set_last_error(errno)?;
}
return Ok(is_tty);
let fd = this.read_scalar(miri_fd)?.to_i32()?;
if this.machine.file_handler.handles.get(&fd).map(|fd| fd.is_tty()) == Some(true) {
return Ok(Scalar::from_i32(1));
}
}
// We are attemping to use a Unix interface on a non-Unix platform, or we are on a Unix
// platform and the passed file descriptor is not open, or isolation is enabled
// FIXME: It should be possible to emulate this at least on Windows by using
// GetConsoleMode.
// Fallback when the FD was not found or isolation is enabled.
let enotty = this.eval_libc("ENOTTY")?;
this.set_last_error(enotty)?;
Ok(0)
Ok(Scalar::from_i32(0))
}
fn realpath(

View file

@ -19,6 +19,10 @@
/// in `pthread_mutexattr_settype` function.
const PTHREAD_MUTEX_NORMAL_FLAG: i32 = 0x8000000;
const MUTEX_ID_OFFSET: u64 = 4;
const RWLOCK_ID_OFFSET: u64 = 4;
const CONDVAR_ID_OFFSET: u64 = 4;
fn is_mutex_kind_default<'mir, 'tcx: 'mir>(
ecx: &mut MiriInterpCx<'mir, 'tcx>,
kind: Scalar<Provenance>,
@ -108,33 +112,6 @@ fn mutex_set_id<'mir, 'tcx: 'mir>(
)
}
fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriInterpCx<'mir, 'tcx>,
mutex_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, MutexId> {
let value_place = ecx.deref_operand_and_offset(mutex_op, 4, ecx.machine.layouts.u32)?;
ecx.mutex_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar(),
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
false,
)?
.to_scalar_pair();
Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(MutexId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}
// pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
// Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!):
@ -149,33 +126,6 @@ fn rwlock_get_id<'mir, 'tcx: 'mir>(
ecx.read_scalar_at_offset_atomic(rwlock_op, 4, ecx.machine.layouts.u32, AtomicReadOrd::Relaxed)
}
fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriInterpCx<'mir, 'tcx>,
rwlock_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, RwLockId> {
let value_place = ecx.deref_operand_and_offset(rwlock_op, 4, ecx.machine.layouts.u32)?;
ecx.rwlock_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar(),
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
false,
)?
.to_scalar_pair();
Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}
// pthread_condattr_t
// Our chosen memory layout for emulation (does not have to match the platform layout!):
@ -232,33 +182,6 @@ fn cond_set_id<'mir, 'tcx: 'mir>(
)
}
fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriInterpCx<'mir, 'tcx>,
cond_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, CondvarId> {
let value_place = ecx.deref_operand_and_offset(cond_op, 4, ecx.machine.layouts.u32)?;
ecx.condvar_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar(),
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
false,
)?
.to_scalar_pair();
Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(CondvarId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}
fn cond_get_clock_id<'mir, 'tcx: 'mir>(
ecx: &MiriInterpCx<'mir, 'tcx>,
cond_op: &OpTy<'tcx, Provenance>,
@ -435,7 +358,7 @@ fn pthread_mutex_lock(&mut self, mutex_op: &OpTy<'tcx, Provenance>) -> InterpRes
let this = self.eval_context_mut();
let kind = mutex_get_kind(this, mutex_op)?;
let id = mutex_get_or_create_id(this, mutex_op)?;
let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.mutex_is_locked(id) {
@ -475,7 +398,7 @@ fn pthread_mutex_trylock(
let this = self.eval_context_mut();
let kind = mutex_get_kind(this, mutex_op)?;
let id = mutex_get_or_create_id(this, mutex_op)?;
let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.mutex_is_locked(id) {
@ -511,7 +434,7 @@ fn pthread_mutex_unlock(
let this = self.eval_context_mut();
let kind = mutex_get_kind(this, mutex_op)?;
let id = mutex_get_or_create_id(this, mutex_op)?;
let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if let Some(_old_locked_count) = this.mutex_unlock(id, active_thread) {
@ -545,7 +468,7 @@ fn pthread_mutex_destroy(
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = mutex_get_or_create_id(this, mutex_op)?;
let id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?;
if this.mutex_is_locked(id) {
throw_ub_format!("destroyed a locked mutex");
@ -568,7 +491,7 @@ fn pthread_rwlock_rdlock(
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = rwlock_get_or_create_id(this, rwlock_op)?;
let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_write_locked(id) {
@ -586,7 +509,7 @@ fn pthread_rwlock_tryrdlock(
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = rwlock_get_or_create_id(this, rwlock_op)?;
let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_write_locked(id) {
@ -603,7 +526,7 @@ fn pthread_rwlock_wrlock(
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = rwlock_get_or_create_id(this, rwlock_op)?;
let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_locked(id) {
@ -633,7 +556,7 @@ fn pthread_rwlock_trywrlock(
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = rwlock_get_or_create_id(this, rwlock_op)?;
let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_locked(id) {
@ -650,7 +573,7 @@ fn pthread_rwlock_unlock(
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = rwlock_get_or_create_id(this, rwlock_op)?;
let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
#[allow(clippy::if_same_then_else)]
@ -669,7 +592,7 @@ fn pthread_rwlock_destroy(
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = rwlock_get_or_create_id(this, rwlock_op)?;
let id = this.rwlock_get_or_create_id(rwlock_op, RWLOCK_ID_OFFSET)?;
if this.rwlock_is_locked(id) {
throw_ub_format!("destroyed a locked rwlock");
@ -772,7 +695,7 @@ fn pthread_cond_init(
fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = cond_get_or_create_id(this, cond_op)?;
let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?;
if let Some((thread, mutex)) = this.condvar_signal(id) {
post_cond_signal(this, thread, mutex)?;
}
@ -785,7 +708,7 @@ fn pthread_cond_broadcast(
cond_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = cond_get_or_create_id(this, cond_op)?;
let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?;
while let Some((thread, mutex)) = this.condvar_signal(id) {
post_cond_signal(this, thread, mutex)?;
@ -801,8 +724,8 @@ fn pthread_cond_wait(
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = cond_get_or_create_id(this, cond_op)?;
let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?;
let mutex_id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?;
let active_thread = this.get_active_thread();
release_cond_mutex_and_block(this, active_thread, mutex_id)?;
@ -822,8 +745,8 @@ fn pthread_cond_timedwait(
this.check_no_isolation("`pthread_cond_timedwait`")?;
let id = cond_get_or_create_id(this, cond_op)?;
let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?;
let mutex_id = this.mutex_get_or_create_id(mutex_op, MUTEX_ID_OFFSET)?;
let active_thread = this.get_active_thread();
// Extract the timeout.
@ -899,7 +822,7 @@ fn pthread_cond_destroy(
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let id = cond_get_or_create_id(this, cond_op)?;
let id = this.condvar_get_or_create_id(cond_op, CONDVAR_ID_OFFSET)?;
if this.condvar_is_awaited(id) {
throw_ub_format!("destroying an awaited conditional variable");
}

View file

@ -37,13 +37,13 @@ fn emulate_foreign_item_by_name(
let [name, buf, size] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let result = this.GetEnvironmentVariableW(name, buf, size)?;
this.write_scalar(Scalar::from_u32(result), dest)?;
this.write_scalar(result, dest)?;
}
"SetEnvironmentVariableW" => {
let [name, value] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let result = this.SetEnvironmentVariableW(name, value)?;
this.write_scalar(Scalar::from_i32(result), dest)?;
this.write_scalar(result, dest)?;
}
"GetEnvironmentStringsW" => {
let [] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
@ -54,19 +54,19 @@ fn emulate_foreign_item_by_name(
let [env_block] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let result = this.FreeEnvironmentStringsW(env_block)?;
this.write_scalar(Scalar::from_i32(result), dest)?;
this.write_scalar(result, dest)?;
}
"GetCurrentDirectoryW" => {
let [size, buf] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let result = this.GetCurrentDirectoryW(size, buf)?;
this.write_scalar(Scalar::from_u32(result), dest)?;
this.write_scalar(result, dest)?;
}
"SetCurrentDirectoryW" => {
let [path] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let result = this.SetCurrentDirectoryW(path)?;
this.write_scalar(Scalar::from_i32(result), dest)?;
this.write_scalar(result, dest)?;
}
// Allocation
@ -218,14 +218,14 @@ fn emulate_foreign_item_by_name(
let [lpPerformanceCount] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let result = this.QueryPerformanceCounter(lpPerformanceCount)?;
this.write_scalar(Scalar::from_i32(result), dest)?;
this.write_scalar(result, dest)?;
}
"QueryPerformanceFrequency" => {
#[allow(non_snake_case)]
let [lpFrequency] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let result = this.QueryPerformanceFrequency(lpFrequency)?;
this.write_scalar(Scalar::from_i32(result), dest)?;
this.write_scalar(result, dest)?;
}
"Sleep" => {
let [timeout] =
@ -246,7 +246,7 @@ fn emulate_foreign_item_by_name(
"TryAcquireSRWLockExclusive" => {
let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let ret = this.TryAcquireSRWLockExclusive(ptr)?;
this.write_scalar(Scalar::from_u8(ret), dest)?;
this.write_scalar(ret, dest)?;
}
"AcquireSRWLockShared" => {
let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
@ -259,7 +259,19 @@ fn emulate_foreign_item_by_name(
"TryAcquireSRWLockShared" => {
let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let ret = this.TryAcquireSRWLockShared(ptr)?;
this.write_scalar(Scalar::from_u8(ret), dest)?;
this.write_scalar(ret, dest)?;
}
"InitOnceBeginInitialize" => {
let [ptr, flags, pending, context] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let result = this.InitOnceBeginInitialize(ptr, flags, pending, context)?;
this.write_scalar(result, dest)?;
}
"InitOnceComplete" => {
let [ptr, flags, context] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let result = this.InitOnceComplete(ptr, flags, context)?;
this.write_scalar(result, dest)?;
}
// Dynamic symbol loading
@ -331,16 +343,6 @@ fn emulate_foreign_item_by_name(
// FIXME: we should set last_error, but to what?
this.write_null(dest)?;
}
"GetConsoleMode" => {
// Windows "isatty" (in libtest) needs this, so we fake it.
let [console, mode] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
this.read_scalar(console)?.to_machine_isize(this)?;
this.deref_operand(mode)?;
// Indicate an error.
// FIXME: we should set last_error, but to what?
this.write_null(dest)?;
}
"GetStdHandle" => {
let [which] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
@ -392,14 +394,14 @@ fn emulate_foreign_item_by_name(
let [] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
// Just fake a HANDLE
// It's fine to not use the Handle type here because its a stub
this.write_scalar(Scalar::from_machine_isize(1, this), dest)?;
this.write_int(1, dest)?;
}
"GetModuleHandleA" if this.frame_in_std() => {
#[allow(non_snake_case)]
let [_lpModuleName] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
// We need to return something non-null here to make `compat_fn!` work.
this.write_scalar(Scalar::from_machine_isize(1, this), dest)?;
this.write_int(1, dest)?;
}
"SetConsoleTextAttribute" if this.frame_in_std() => {
#[allow(non_snake_case)]
@ -408,24 +410,46 @@ fn emulate_foreign_item_by_name(
// Pretend these does not exist / nothing happened, by returning zero.
this.write_null(dest)?;
}
"GetConsoleMode" if this.frame_in_std() => {
let [console, mode] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
this.read_scalar(console)?.to_machine_isize(this)?;
this.deref_operand(mode)?;
// Indicate an error.
this.write_null(dest)?;
}
"GetFileInformationByHandleEx" if this.frame_in_std() => {
#[allow(non_snake_case)]
let [_hFile, _FileInformationClass, _lpFileInformation, _dwBufferSize] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
// Just make it fail.
this.write_null(dest)?;
}
"GetFileType" if this.frame_in_std() => {
#[allow(non_snake_case)]
let [_hFile] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
// Return unknown file type.
this.write_null(dest)?;
}
"AddVectoredExceptionHandler" if this.frame_in_std() => {
#[allow(non_snake_case)]
let [_First, _Handler] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
// Any non zero value works for the stdlib. This is just used for stack overflows anyway.
this.write_scalar(Scalar::from_machine_usize(1, this), dest)?;
this.write_int(1, dest)?;
}
"SetThreadStackGuarantee" if this.frame_in_std() => {
#[allow(non_snake_case)]
let [_StackSizeInBytes] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
// Any non zero value works for the stdlib. This is just used for stack overflows anyway.
this.write_scalar(Scalar::from_u32(1), dest)?;
this.write_int(1, dest)?;
}
"GetCurrentProcessId" if this.frame_in_std() => {
let [] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let result = this.GetCurrentProcessId()?;
this.write_scalar(Scalar::from_u32(result), dest)?;
this.write_int(result, dest)?;
}
// this is only callable from std because we know that std ignores the return value
"SwitchToThread" if this.frame_in_std() => {

View file

@ -1,41 +1,17 @@
use crate::concurrency::init_once::InitOnceStatus;
use crate::concurrency::thread::MachineCallback;
use crate::*;
// Locks are pointer-sized pieces of data, initialized to 0.
// We use the first 4 bytes to store the RwLockId.
fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriInterpCx<'mir, 'tcx>,
lock_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, RwLockId> {
let value_place = ecx.deref_operand_and_offset(lock_op, 0, ecx.machine.layouts.u32)?;
ecx.rwlock_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar(),
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
false,
)?
.to_scalar_pair();
Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}
const SRWLOCK_ID_OFFSET: u64 = 0;
const INIT_ONCE_ID_OFFSET: u64 = 0;
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
#[allow(non_snake_case)]
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
#[allow(non_snake_case)]
fn AcquireSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_locked(id) {
@ -54,28 +30,26 @@ fn AcquireSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> Inter
Ok(())
}
#[allow(non_snake_case)]
fn TryAcquireSRWLockExclusive(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, u8> {
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_locked(id) {
// Lock is already held.
Ok(0)
Ok(Scalar::from_u8(0))
} else {
this.rwlock_writer_lock(id, active_thread);
Ok(1)
Ok(Scalar::from_u8(1))
}
}
#[allow(non_snake_case)]
fn ReleaseSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if !this.rwlock_writer_unlock(id, active_thread) {
@ -88,10 +62,9 @@ fn ReleaseSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> Inter
Ok(())
}
#[allow(non_snake_case)]
fn AcquireSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_write_locked(id) {
@ -103,27 +76,25 @@ fn AcquireSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpRe
Ok(())
}
#[allow(non_snake_case)]
fn TryAcquireSRWLockShared(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, u8> {
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_write_locked(id) {
Ok(0)
Ok(Scalar::from_u8(0))
} else {
this.rwlock_reader_lock(id, active_thread);
Ok(1)
Ok(Scalar::from_u8(1))
}
}
#[allow(non_snake_case)]
fn ReleaseSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let id = this.rwlock_get_or_create_id(lock_op, SRWLOCK_ID_OFFSET)?;
let active_thread = this.get_active_thread();
if !this.rwlock_reader_unlock(id, active_thread) {
@ -135,4 +106,119 @@ fn ReleaseSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpRe
Ok(())
}
fn InitOnceBeginInitialize(
&mut self,
init_once_op: &OpTy<'tcx, Provenance>,
flags_op: &OpTy<'tcx, Provenance>,
pending_op: &OpTy<'tcx, Provenance>,
context_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
let active_thread = this.get_active_thread();
let id = this.init_once_get_or_create_id(init_once_op, INIT_ONCE_ID_OFFSET)?;
let flags = this.read_scalar(flags_op)?.to_u32()?;
let pending_place = this.deref_operand(pending_op)?.into();
let context = this.read_pointer(context_op)?;
if flags != 0 {
throw_unsup_format!("unsupported `dwFlags` {flags} in `InitOnceBeginInitialize`");
}
if !this.ptr_is_null(context)? {
throw_unsup_format!("non-null `lpContext` in `InitOnceBeginInitialize`");
}
match this.init_once_status(id) {
InitOnceStatus::Uninitialized => {
this.init_once_begin(id);
this.write_scalar(this.eval_windows("c", "TRUE")?, &pending_place)?;
}
InitOnceStatus::Begun => {
// Someone else is already on it.
// Block this thread until they are done.
// When we are woken up, set the `pending` flag accordingly.
struct Callback<'tcx> {
init_once_id: InitOnceId,
pending_place: PlaceTy<'tcx, Provenance>,
}
impl<'tcx> VisitTags for Callback<'tcx> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
let Callback { init_once_id: _, pending_place } = self;
pending_place.visit_tags(visit);
}
}
impl<'mir, 'tcx> MachineCallback<'mir, 'tcx> for Callback<'tcx> {
fn call(&self, this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
let pending = match this.init_once_status(self.init_once_id) {
InitOnceStatus::Uninitialized =>
unreachable!(
"status should have either been set to begun or complete"
),
InitOnceStatus::Begun => this.eval_windows("c", "TRUE")?,
InitOnceStatus::Complete => this.eval_windows("c", "FALSE")?,
};
this.write_scalar(pending, &self.pending_place)?;
Ok(())
}
}
this.init_once_enqueue_and_block(
id,
active_thread,
Box::new(Callback { init_once_id: id, pending_place }),
)
}
InitOnceStatus::Complete =>
this.write_scalar(this.eval_windows("c", "FALSE")?, &pending_place)?,
}
// This always succeeds (even if the thread is blocked, we will succeed if we ever unblock).
this.eval_windows("c", "TRUE")
}
fn InitOnceComplete(
&mut self,
init_once_op: &OpTy<'tcx, Provenance>,
flags_op: &OpTy<'tcx, Provenance>,
context_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
let id = this.init_once_get_or_create_id(init_once_op, INIT_ONCE_ID_OFFSET)?;
let flags = this.read_scalar(flags_op)?.to_u32()?;
let context = this.read_pointer(context_op)?;
let success = if flags == 0 {
true
} else if flags == this.eval_windows("c", "INIT_ONCE_INIT_FAILED")?.to_u32()? {
false
} else {
throw_unsup_format!("unsupported `dwFlags` {flags} in `InitOnceBeginInitialize`");
};
if !this.ptr_is_null(context)? {
throw_unsup_format!("non-null `lpContext` in `InitOnceBeginInitialize`");
}
if this.init_once_status(id) != InitOnceStatus::Begun {
// The docs do not say anything about this case, but it seems better to not allow it.
throw_ub_format!(
"calling InitOnceComplete on a one time initialization that has not begun or is already completed"
);
}
if success {
this.init_once_complete(id)?;
} else {
this.init_once_fail(id)?;
}
this.eval_windows("c", "TRUE")
}
}

View file

@ -17,6 +17,7 @@
Ty,
};
use rustc_span::DUMMY_SP;
use rustc_target::abi::Abi;
use rustc_target::abi::Size;
use smallvec::SmallVec;
@ -45,6 +46,7 @@ pub fn new(i: u64) -> Option<Self> {
}
// The default to be used when SB is disabled
#[allow(clippy::should_implement_trait)]
pub fn default() -> Self {
Self::new(1).unwrap()
}
@ -113,7 +115,18 @@ pub struct GlobalStateInner {
/// The call ids to trace
tracked_call_ids: FxHashSet<CallId>,
/// Whether to recurse into datatypes when searching for pointers to retag.
retag_fields: bool,
retag_fields: RetagFields,
}
#[derive(Copy, Clone, Debug)]
pub enum RetagFields {
/// Don't retag any fields.
No,
/// Retag all fields.
Yes,
/// Only retag fields of types with Scalar and ScalarPair layout,
/// to match the LLVM `noalias` we generate.
OnlyScalar,
}
impl VisitTags for GlobalStateInner {
@ -172,7 +185,7 @@ impl GlobalStateInner {
pub fn new(
tracked_pointer_tags: FxHashSet<SbTag>,
tracked_call_ids: FxHashSet<CallId>,
retag_fields: bool,
retag_fields: RetagFields,
) -> Self {
GlobalStateInner {
next_ptr_tag: SbTag(NonZeroU64::new(1).unwrap()),
@ -998,7 +1011,7 @@ struct RetagVisitor<'ecx, 'mir, 'tcx> {
ecx: &'ecx mut MiriInterpCx<'mir, 'tcx>,
kind: RetagKind,
retag_cause: RetagCause,
retag_fields: bool,
retag_fields: RetagFields,
}
impl<'ecx, 'mir, 'tcx> RetagVisitor<'ecx, 'mir, 'tcx> {
#[inline(always)] // yes this helps in our benchmarks
@ -1045,6 +1058,18 @@ fn visit_value(&mut self, place: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tc
return Ok(());
}
let recurse_for_fields = || {
match self.retag_fields {
RetagFields::No => false,
RetagFields::Yes => true,
RetagFields::OnlyScalar => {
// Matching `ArgAbi::new` at the time of writing, only fields of
// `Scalar` and `ScalarPair` ABI are considered.
matches!(place.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..))
}
}
};
if let Some((ref_kind, protector)) = qualify(place.layout.ty, self.kind) {
self.retag_place(place, ref_kind, self.retag_cause, protector)?;
} else if matches!(place.layout.ty.kind(), ty::RawPtr(..)) {
@ -1053,7 +1078,7 @@ fn visit_value(&mut self, place: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tc
// Do *not* recurse into them.
// (No need to worry about wide references, those always "qualify". And Boxes
// are handles specially by the visitor anyway.)
} else if self.retag_fields
} else if recurse_for_fields()
|| place.layout.ty.ty_adt_def().is_some_and(|adt| adt.is_box())
{
// Recurse deeper. Need to always recurse for `Box` to even hit `visit_box`.
@ -1122,4 +1147,19 @@ fn expose_tag(&mut self, alloc_id: AllocId, tag: SbTag) -> InterpResult<'tcx> {
}
Ok(())
}
fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let alloc_extra = this.get_alloc_extra(alloc_id)?;
let stacks = alloc_extra.stacked_borrows.as_ref().unwrap().borrow();
for (range, stack) in stacks.stacks.iter_all() {
print!("{:?}: [", range);
for i in 0..stack.len() {
let item = stack.get(i).unwrap();
print!(" {:?}{:?}", item.perm(), item.tag());
}
println!(" ]");
}
Ok(())
}
}

View file

@ -43,10 +43,14 @@ impl Stack {
pub fn retain(&mut self, tags: &FxHashSet<SbTag>) {
let mut first_removed = None;
// For stacks with a known bottom, we never consider removing the bottom-most tag, because
// that is the base tag which exists whether or not there are any pointers to the
// allocation.
let mut read_idx = if self.unknown_bottom.is_some() { 0 } else { 1 };
// We never consider removing the bottom-most tag. For stacks without an unknown
// bottom this preserves the base tag.
// Note that the algorithm below is based on considering the tag at read_idx - 1,
// so precisely considering the tag at index 0 for removal when we have an unknown
// bottom would complicate the implementation. The simplification of not considering
// it does not have a significant impact on the degree to which the GC mititages
// memory growth.
let mut read_idx = 1;
let mut write_idx = read_idx;
while read_idx < self.borrows.len() {
let left = self.borrows[read_idx - 1];

View file

@ -33,10 +33,13 @@ def normalize_stderr(str):
return str
def check_output(actual, path, name):
if 'MIRI_BLESS' in os.environ:
open(path, mode='w').write(actual)
return True
expected = open(path).read()
if expected == actual:
return True
print(f"{path} did not match reference!")
print(f"{name} output did not match reference in {path}!")
print(f"--- BEGIN diff {name} ---")
for text in difflib.unified_diff(expected.split("\n"), actual.split("\n")):
print(text)

View file

@ -8,6 +8,12 @@ fn miri_path() -> PathBuf {
PathBuf::from(option_env!("MIRI").unwrap_or(env!("CARGO_BIN_EXE_miri")))
}
fn get_host() -> String {
rustc_version::VersionMeta::for_command(std::process::Command::new(miri_path()))
.expect("failed to parse rustc version info")
.host
}
// Build the shared object file for testing external C function calls.
fn build_so_for_c_ffi_tests() -> PathBuf {
let cc = option_env!("CC").unwrap_or("cc");
@ -37,14 +43,9 @@ fn build_so_for_c_ffi_tests() -> PathBuf {
so_file_path
}
fn run_tests(
mode: Mode,
path: &str,
target: Option<String>,
with_dependencies: bool,
) -> Result<()> {
fn run_tests(mode: Mode, path: &str, target: &str, with_dependencies: bool) -> Result<()> {
let mut config = Config {
target,
target: Some(target.to_owned()),
stderr_filters: STDERR.clone(),
stdout_filters: STDOUT.clone(),
root_dir: PathBuf::from(path),
@ -138,6 +139,8 @@ macro_rules! regexes {
STDOUT:
// Windows file paths
r"\\" => "/",
// erase Stacked Borrows tags
"<[0-9]+>" => "<TAG>",
}
regexes! {
@ -179,13 +182,8 @@ enum Dependencies {
use Dependencies::*;
fn ui(mode: Mode, path: &str, with_dependencies: Dependencies) -> Result<()> {
let target = get_target();
let msg = format!(
"## Running ui tests in {path} against miri for {}",
target.as_deref().unwrap_or("host")
);
fn ui(mode: Mode, path: &str, target: &str, with_dependencies: Dependencies) -> Result<()> {
let msg = format!("## Running ui tests in {path} against miri for {target}");
eprintln!("{}", msg.green().bold());
let with_dependencies = match with_dependencies {
@ -195,25 +193,31 @@ fn ui(mode: Mode, path: &str, with_dependencies: Dependencies) -> Result<()> {
run_tests(mode, path, target, with_dependencies)
}
fn get_target() -> Option<String> {
env::var("MIRI_TEST_TARGET").ok()
fn get_target() -> String {
env::var("MIRI_TEST_TARGET").ok().unwrap_or_else(get_host)
}
fn main() -> Result<()> {
ui_test::color_eyre::install()?;
let target = get_target();
// Add a test env var to do environment communication tests.
env::set_var("MIRI_ENV_VAR_TEST", "0");
// Let the tests know where to store temp files (they might run for a different target, which can make this hard to find).
env::set_var("MIRI_TEMP", env::temp_dir());
ui(Mode::Pass, "tests/pass", WithoutDependencies)?;
ui(Mode::Pass, "tests/pass-dep", WithDependencies)?;
ui(Mode::Panic, "tests/panic", WithDependencies)?;
ui(Mode::Fail { require_patterns: true }, "tests/fail", WithDependencies)?;
ui(Mode::Pass, "tests/pass", &target, WithoutDependencies)?;
ui(Mode::Pass, "tests/pass-dep", &target, WithDependencies)?;
ui(Mode::Panic, "tests/panic", &target, WithDependencies)?;
ui(Mode::Fail { require_patterns: true }, "tests/fail", &target, WithDependencies)?;
if cfg!(target_os = "linux") {
ui(Mode::Pass, "tests/extern-so/pass", WithoutDependencies)?;
ui(Mode::Fail { require_patterns: true }, "tests/extern-so/fail", WithoutDependencies)?;
ui(Mode::Pass, "tests/extern-so/pass", &target, WithoutDependencies)?;
ui(
Mode::Fail { require_patterns: true },
"tests/extern-so/fail",
&target,
WithoutDependencies,
)?;
}
Ok(())

View file

@ -2,7 +2,6 @@
// Make sure we find these even with many checks disabled.
//@compile-flags: -Zmiri-disable-alignment-check -Zmiri-disable-stacked-borrows -Zmiri-disable-validation
fn main() {
let b = unsafe { std::mem::transmute::<u8, bool>(2) };
let _x = b == std::hint::black_box(true); //~ ERROR: interpreting an invalid 8-bit value as a bool

View file

@ -0,0 +1,41 @@
#![feature(lang_items, start, core_intrinsics)]
#![no_std]
// windows tls dtors go through libstd right now, thus this test
// cannot pass. When windows tls dtors go through the special magic
// windows linker section, we can run this test on windows again.
//@ignore-target-windows
// Plumbing to let us use `writeln!` to host stderr:
extern "Rust" {
fn miri_write_to_stderr(bytes: &[u8]);
}
struct HostErr;
use core::fmt::Write;
impl Write for HostErr {
fn write_str(&mut self, s: &str) -> core::fmt::Result {
unsafe {
miri_write_to_stderr(s.as_bytes());
}
Ok(())
}
}
// Aaaand the test:
#[start]
fn start(_: isize, _: *const *const u8) -> isize {
panic!("blarg I am dead")
}
#[panic_handler]
fn panic_handler(panic_info: &core::panic::PanicInfo) -> ! {
writeln!(HostErr, "{panic_info}").ok();
core::intrinsics::abort(); //~ ERROR: the program aborted execution
}
#[lang = "eh_personality"]
fn eh_personality() {}

View file

@ -0,0 +1,19 @@
panicked at 'blarg I am dead', $DIR/no_std.rs:LL:CC
error: abnormal termination: the program aborted execution
--> $DIR/no_std.rs:LL:CC
|
LL | core::intrinsics::abort();
| ^^^^^^^^^^^^^^^^^^^^^^^^^ the program aborted execution
|
= note: inside `panic_handler` at $DIR/no_std.rs:LL:CC
note: inside `start` at RUSTLIB/core/src/panic.rs:LL:CC
--> $DIR/no_std.rs:LL:CC
|
LL | panic!("blarg I am dead")
| ^^^^^^^^^^^^^^^^^^^^^^^^^
= note: this error originates in the macro `$crate::panic::panic_2015` which comes from the expansion of the macro `panic` (in Nightly builds, run with -Z macro-backtrace for more info)
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to previous error

View file

@ -0,0 +1,19 @@
//@compile-flags: -Zmiri-retag-fields=scalar
//@error-pattern: which is protected
struct Newtype<'a>(&'a mut i32, i32);
fn dealloc_while_running(_n: Newtype<'_>, dealloc: impl FnOnce()) {
dealloc();
}
// Make sure that we protect references inside structs that are passed as ScalarPair.
fn main() {
let ptr = Box::into_raw(Box::new(0i32));
#[rustfmt::skip] // I like my newlines
unsafe {
dealloc_while_running(
Newtype(&mut *ptr, 0),
|| drop(Box::from_raw(ptr)),
)
};
}

View file

@ -0,0 +1,44 @@
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is protected because it is an argument of call ID
--> RUSTLIB/alloc/src/boxed.rs:LL:CC
|
LL | Box(unsafe { Unique::new_unchecked(raw) }, alloc)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is protected because it is an argument of call ID
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
help: <TAG> was created by a SharedReadWrite retag at offsets [0x0..0x4]
--> $DIR/newtype_pair_retagging.rs:LL:CC
|
LL | let ptr = Box::into_raw(Box::new(0i32));
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
help: <TAG> is this argument
--> $DIR/newtype_pair_retagging.rs:LL:CC
|
LL | fn dealloc_while_running(_n: Newtype<'_>, dealloc: impl FnOnce()) {
| ^^
= note: BACKTRACE:
= note: inside `std::boxed::Box::<i32>::from_raw_in` at RUSTLIB/alloc/src/boxed.rs:LL:CC
= note: inside `std::boxed::Box::<i32>::from_raw` at RUSTLIB/alloc/src/boxed.rs:LL:CC
note: inside closure at $DIR/newtype_pair_retagging.rs:LL:CC
--> $DIR/newtype_pair_retagging.rs:LL:CC
|
LL | || drop(Box::from_raw(ptr)),
| ^^^^^^^^^^^^^^^^^^
note: inside `dealloc_while_running::<[closure@$DIR/newtype_pair_retagging.rs:LL:CC]>` at $DIR/newtype_pair_retagging.rs:LL:CC
--> $DIR/newtype_pair_retagging.rs:LL:CC
|
LL | dealloc();
| ^^^^^^^^^
note: inside `main` at $DIR/newtype_pair_retagging.rs:LL:CC
--> $DIR/newtype_pair_retagging.rs:LL:CC
|
LL | / dealloc_while_running(
LL | | Newtype(&mut *ptr, 0),
LL | | || drop(Box::from_raw(ptr)),
LL | | )
| |_________^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to previous error

View file

@ -1,4 +1,4 @@
//@compile-flags: -Zmiri-retag-fields
//@compile-flags: -Zmiri-retag-fields=scalar
//@error-pattern: which is protected
struct Newtype<'a>(&'a mut i32);

View file

@ -7,7 +7,7 @@
fn main() {
// Try many times as this might work by chance.
for _ in 0..10 {
for _ in 0..20 {
let buf = [0u32; 256];
// `buf` is sufficiently aligned for `layout.align` on a `dyn Debug`, but not
// for the actual alignment required by `MuchAlign`.

View file

@ -11,7 +11,7 @@ struct Foo {
fn main() {
// Try many times as this might work by chance.
for _ in 0..10 {
for _ in 0..20 {
let foo = Foo { x: 42, y: 99 };
let p = &foo.x;
let i = *p; //~ERROR: alignment 4 is required

View file

@ -3,7 +3,7 @@
fn main() {
// Try many times as this might work by chance.
for _ in 0..10 {
for _ in 0..20 {
let x = [2u16, 3, 4, 5]; // Make it big enough so we don't get an out-of-bounds error.
let x = &x[0] as *const _ as *const *const u8; // cast to ptr-to-ptr, so that we load a ptr
// This must fail because alignment is violated. Test specifically for loading pointers,

View file

@ -6,7 +6,7 @@ fn main() {
// (This would be missed if u8 allocations are *always* at odd addresses.)
//
// Try many times as this might work by chance.
for _ in 0..10 {
for _ in 0..20 {
let x = [0u8; 4];
let ptr = x.as_ptr().wrapping_offset(1).cast::<u16>();
let _val = unsafe { *ptr }; //~ERROR: but alignment

View file

@ -4,7 +4,7 @@
fn main() {
// Try many times as this might work by chance.
for i in 0..10 {
for i in 0..20 {
let x = i as u8;
let x = &x as *const _ as *const [u32; 0];
// This must fail because alignment is violated. Test specifically for loading ZST.

View file

@ -0,0 +1,28 @@
//@ignore-target-windows: no libc on Windows
//@compile-flags: -Zmiri-isolation-error=warn-nobacktrace
//@normalize-stderr-test: "(stat(x)?)" -> "$$STAT"
use std::ffi::CString;
use std::fs;
use std::io::{Error, ErrorKind};
fn main() {
// test `fcntl`
unsafe {
assert_eq!(libc::fcntl(1, libc::F_DUPFD, 0), -1);
assert_eq!(Error::last_os_error().raw_os_error(), Some(libc::EPERM));
}
// test `readlink`
let symlink_c_str = CString::new("foo.txt").unwrap();
let mut buf = vec![0; "foo_link.txt".len() + 1];
unsafe {
assert_eq!(libc::readlink(symlink_c_str.as_ptr(), buf.as_mut_ptr(), buf.len()), -1);
assert_eq!(Error::last_os_error().raw_os_error(), Some(libc::EACCES));
}
// test `stat`
assert_eq!(fs::metadata("foo.txt").unwrap_err().kind(), ErrorKind::PermissionDenied);
// check that it is the right kind of `PermissionDenied`
assert_eq!(Error::last_os_error().raw_os_error(), Some(libc::EACCES));
}

View file

@ -0,0 +1,6 @@
warning: `fcntl` was made to return an error due to isolation
warning: `readlink` was made to return an error due to isolation
warning: `$STAT` was made to return an error due to isolation

View file

@ -0,0 +1,137 @@
//@ignore-target-windows: no libc on Windows
//@compile-flags: -Zmiri-disable-isolation
#![feature(io_error_more)]
#![feature(io_error_uncategorized)]
use std::convert::TryInto;
use std::ffi::CString;
use std::fs::{canonicalize, remove_file, File};
use std::io::{Error, ErrorKind, Write};
use std::os::unix::ffi::OsStrExt;
use std::path::PathBuf;
fn main() {
test_dup_stdout_stderr();
test_canonicalize_too_long();
test_readlink();
test_file_open_unix_allow_two_args();
test_file_open_unix_needs_three_args();
test_file_open_unix_extra_third_arg();
}
fn tmp() -> PathBuf {
std::env::var("MIRI_TEMP")
.map(|tmp| {
// MIRI_TEMP is set outside of our emulated
// program, so it may have path separators that don't
// correspond to our target platform. We normalize them here
// before constructing a `PathBuf`
#[cfg(windows)]
return PathBuf::from(tmp.replace("/", "\\"));
#[cfg(not(windows))]
return PathBuf::from(tmp.replace("\\", "/"));
})
.unwrap_or_else(|_| std::env::temp_dir())
}
/// Prepare: compute filename and make sure the file does not exist.
fn prepare(filename: &str) -> PathBuf {
let path = tmp().join(filename);
// Clean the paths for robustness.
remove_file(&path).ok();
path
}
/// Prepare like above, and also write some initial content to the file.
fn prepare_with_content(filename: &str, content: &[u8]) -> PathBuf {
let path = prepare(filename);
let mut file = File::create(&path).unwrap();
file.write(content).unwrap();
path
}
fn test_file_open_unix_allow_two_args() {
let path = prepare_with_content("test_file_open_unix_allow_two_args.txt", &[]);
let mut name = path.into_os_string();
name.push("\0");
let name_ptr = name.as_bytes().as_ptr().cast::<libc::c_char>();
let _fd = unsafe { libc::open(name_ptr, libc::O_RDONLY) };
}
fn test_file_open_unix_needs_three_args() {
let path = prepare_with_content("test_file_open_unix_needs_three_args.txt", &[]);
let mut name = path.into_os_string();
name.push("\0");
let name_ptr = name.as_bytes().as_ptr().cast::<libc::c_char>();
let _fd = unsafe { libc::open(name_ptr, libc::O_CREAT, 0o666) };
}
fn test_file_open_unix_extra_third_arg() {
let path = prepare_with_content("test_file_open_unix_extra_third_arg.txt", &[]);
let mut name = path.into_os_string();
name.push("\0");
let name_ptr = name.as_bytes().as_ptr().cast::<libc::c_char>();
let _fd = unsafe { libc::open(name_ptr, libc::O_RDONLY, 42) };
}
fn test_dup_stdout_stderr() {
let bytes = b"hello dup fd\n";
unsafe {
let new_stdout = libc::fcntl(1, libc::F_DUPFD, 0);
let new_stderr = libc::fcntl(2, libc::F_DUPFD, 0);
libc::write(new_stdout, bytes.as_ptr() as *const libc::c_void, bytes.len());
libc::write(new_stderr, bytes.as_ptr() as *const libc::c_void, bytes.len());
}
}
fn test_canonicalize_too_long() {
// Make sure we get an error for long paths.
let too_long = "x/".repeat(libc::PATH_MAX.try_into().unwrap());
assert!(canonicalize(too_long).is_err());
}
fn test_readlink() {
let bytes = b"Hello, World!\n";
let path = prepare_with_content("miri_test_fs_link_target.txt", bytes);
let expected_path = path.as_os_str().as_bytes();
let symlink_path = prepare("miri_test_fs_symlink.txt");
std::os::unix::fs::symlink(&path, &symlink_path).unwrap();
// Test that the expected string gets written to a buffer of proper
// length, and that a trailing null byte is not written.
let symlink_c_str = CString::new(symlink_path.as_os_str().as_bytes()).unwrap();
let symlink_c_ptr = symlink_c_str.as_ptr();
// Make the buf one byte larger than it needs to be,
// and check that the last byte is not overwritten.
let mut large_buf = vec![0xFF; expected_path.len() + 1];
let res =
unsafe { libc::readlink(symlink_c_ptr, large_buf.as_mut_ptr().cast(), large_buf.len()) };
// Check that the resovled path was properly written into the buf.
assert_eq!(&large_buf[..(large_buf.len() - 1)], expected_path);
assert_eq!(large_buf.last(), Some(&0xFF));
assert_eq!(res, large_buf.len() as isize - 1);
// Test that the resolved path is truncated if the provided buffer
// is too small.
let mut small_buf = [0u8; 2];
let res =
unsafe { libc::readlink(symlink_c_ptr, small_buf.as_mut_ptr().cast(), small_buf.len()) };
assert_eq!(small_buf, &expected_path[..small_buf.len()]);
assert_eq!(res, small_buf.len() as isize);
// Test that we report a proper error for a missing path.
let bad_path = CString::new("MIRI_MISSING_FILE_NAME").unwrap();
let res = unsafe {
libc::readlink(bad_path.as_ptr(), small_buf.as_mut_ptr().cast(), small_buf.len())
};
assert_eq!(res, -1);
assert_eq!(Error::last_os_error().kind(), ErrorKind::NotFound);
}

View file

@ -0,0 +1 @@
hello dup fd

View file

@ -22,7 +22,7 @@ fn align_to() {
fn main() {
// Do this a couple times in a loop because it may work "by chance".
for _ in 0..10 {
for _ in 0..20 {
manual_alignment();
align_to();
}

View file

@ -0,0 +1,138 @@
//@only-target-windows: Uses win32 api functions
// We are making scheduler assumptions here.
//@compile-flags: -Zmiri-preemption-rate=0
use std::ffi::c_void;
use std::ptr::null_mut;
use std::thread;
#[derive(Copy, Clone)]
struct SendPtr<T>(*mut T);
unsafe impl<T> Send for SendPtr<T> {}
extern "system" {
fn InitOnceBeginInitialize(
init: *mut *mut c_void,
flags: u32,
pending: *mut i32,
context: *mut c_void,
) -> i32;
fn InitOnceComplete(init: *mut *mut c_void, flags: u32, context: *mut c_void) -> i32;
}
const TRUE: i32 = 1;
const FALSE: i32 = 0;
const INIT_ONCE_INIT_FAILED: u32 = 4;
fn single_thread() {
let mut init_once = null_mut();
let mut pending = 0;
unsafe {
assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE);
assert_eq!(pending, TRUE);
assert_eq!(InitOnceComplete(&mut init_once, 0, null_mut()), TRUE);
assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE);
assert_eq!(pending, FALSE);
}
let mut init_once = null_mut();
unsafe {
assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE);
assert_eq!(pending, TRUE);
assert_eq!(InitOnceComplete(&mut init_once, INIT_ONCE_INIT_FAILED, null_mut()), TRUE);
assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE);
assert_eq!(pending, TRUE);
}
}
fn block_until_complete() {
let mut init_once = null_mut();
let mut pending = 0;
unsafe {
assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE);
assert_eq!(pending, TRUE);
}
let init_once_ptr = SendPtr(&mut init_once);
let waiter = move || unsafe {
let mut pending = 0;
assert_eq!(InitOnceBeginInitialize(init_once_ptr.0, 0, &mut pending, null_mut()), TRUE);
assert_eq!(pending, FALSE);
println!("finished waiting for initialization");
};
let waiter1 = thread::spawn(waiter);
let waiter2 = thread::spawn(waiter);
// this yield ensures `waiter1` & `waiter2` are blocked on the main thread
thread::yield_now();
println!("completing initialization");
unsafe {
assert_eq!(InitOnceComplete(init_once_ptr.0, 0, null_mut()), TRUE);
}
waiter1.join().unwrap();
waiter2.join().unwrap();
}
fn retry_on_fail() {
let mut init_once = null_mut();
let mut pending = 0;
unsafe {
assert_eq!(InitOnceBeginInitialize(&mut init_once, 0, &mut pending, null_mut()), TRUE);
assert_eq!(pending, TRUE);
}
let init_once_ptr = SendPtr(&mut init_once);
let waiter = move || unsafe {
let mut pending = 0;
assert_eq!(InitOnceBeginInitialize(init_once_ptr.0, 0, &mut pending, null_mut()), TRUE);
if pending == 1 {
println!("retrying initialization");
assert_eq!(InitOnceComplete(init_once_ptr.0, 0, null_mut()), TRUE);
} else {
println!("finished waiting for initialization");
}
};
let waiter1 = thread::spawn(waiter);
let waiter2 = thread::spawn(waiter);
// this yield ensures `waiter1` & `waiter2` are blocked on the main thread
thread::yield_now();
println!("failing initialization");
unsafe {
assert_eq!(InitOnceComplete(init_once_ptr.0, INIT_ONCE_INIT_FAILED, null_mut()), TRUE);
}
waiter1.join().unwrap();
waiter2.join().unwrap();
}
fn main() {
single_thread();
block_until_complete();
retry_on_fail();
}

View file

@ -0,0 +1,6 @@
completing initialization
finished waiting for initialization
finished waiting for initialization
failing initialization
retrying initialization
finished waiting for initialization

View file

@ -0,0 +1,57 @@
//@compile-flags: -Zmiri-permissive-provenance
#![deny(unsafe_op_in_unsafe_fn)]
//! This does some tricky ptr-int-casting.
use core::alloc::{GlobalAlloc, Layout};
use std::alloc::System;
/// # Safety
/// `ptr` must be valid for writes of `len` bytes
unsafe fn volatile_write_zeroize_mem(ptr: *mut u8, len: usize) {
for i in 0..len {
// ptr as usize + i can't overlow because `ptr` is valid for writes of `len`
let ptr_new: *mut u8 = ((ptr as usize) + i) as *mut u8;
// SAFETY: `ptr` is valid for writes of `len` bytes, so `ptr_new` is valid for a
// byte write
unsafe {
core::ptr::write_volatile(ptr_new, 0u8);
}
}
}
pub struct ZeroizeAlloc;
unsafe impl GlobalAlloc for ZeroizeAlloc {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// SAFETY: uphold by caller
unsafe { System.alloc(layout) }
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// securely wipe the deallocated memory
// SAFETY: `ptr` is valid for writes of `layout.size()` bytes since it was
// previously successfully allocated (by the safety assumption on this function)
// and not yet deallocated
unsafe {
volatile_write_zeroize_mem(ptr, layout.size());
}
// SAFETY: uphold by caller
unsafe { System.dealloc(ptr, layout) }
}
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
// SAFETY: uphold by caller
unsafe { System.alloc_zeroed(layout) }
}
}
#[global_allocator]
static GLOBAL: ZeroizeAlloc = ZeroizeAlloc;
fn main() {
let layout = Layout::new::<[u8; 16]>();
let ptr = unsafe { std::alloc::alloc_zeroed(layout) };
unsafe {
std::alloc::dealloc(ptr, layout);
}
}

View file

@ -1,6 +1,8 @@
#![feature(type_alias_impl_trait)]
trait T { type Item; }
trait T {
type Item;
}
type Alias<'a> = impl T<Item = &'a ()>;

View file

@ -5,10 +5,30 @@
// windows linker section, we can run this test on windows again.
//@ignore-target-windows
// Plumbing to let us use `writeln!` to host stdout:
extern "Rust" {
fn miri_write_to_stdout(bytes: &[u8]);
}
struct Host;
use core::fmt::Write;
impl Write for Host {
fn write_str(&mut self, s: &str) -> core::fmt::Result {
unsafe {
miri_write_to_stdout(s.as_bytes());
}
Ok(())
}
}
// Aaaand the test:
#[start]
fn start(_: isize, _: *const *const u8) -> isize {
for _ in 0..10 {}
writeln!(Host, "hello, world!").unwrap();
0
}

View file

@ -0,0 +1 @@
hello, world!

View file

@ -2,21 +2,14 @@
//@compile-flags: -Zmiri-isolation-error=warn-nobacktrace
//@normalize-stderr-test: "(stat(x)?)" -> "$$STAT"
use std::ffi::CString;
use std::fs::{self, File};
use std::io::{Error, ErrorKind};
use std::io::ErrorKind;
use std::os::unix;
fn main() {
// test `open`
assert_eq!(File::create("foo.txt").unwrap_err().kind(), ErrorKind::PermissionDenied);
// test `fcntl`
unsafe {
assert_eq!(libc::fcntl(1, libc::F_DUPFD, 0), -1);
assert_eq!(Error::last_os_error().raw_os_error(), Some(libc::EPERM));
}
// test `unlink`
assert_eq!(fs::remove_file("foo.txt").unwrap_err().kind(), ErrorKind::PermissionDenied);
@ -26,17 +19,8 @@ fn main() {
ErrorKind::PermissionDenied
);
// test `readlink`
let symlink_c_str = CString::new("foo.txt").unwrap();
let mut buf = vec![0; "foo_link.txt".len() + 1];
unsafe {
assert_eq!(libc::readlink(symlink_c_str.as_ptr(), buf.as_mut_ptr(), buf.len()), -1);
assert_eq!(Error::last_os_error().raw_os_error(), Some(libc::EACCES));
}
// test `stat`
assert_eq!(fs::metadata("foo.txt").unwrap_err().kind(), ErrorKind::PermissionDenied);
assert_eq!(Error::last_os_error().raw_os_error(), Some(libc::EACCES));
// test `rename`
assert_eq!(fs::rename("a.txt", "b.txt").unwrap_err().kind(), ErrorKind::PermissionDenied);
@ -49,5 +33,4 @@ fn main() {
// test `opendir`
assert_eq!(fs::read_dir("foo/bar").unwrap_err().kind(), ErrorKind::PermissionDenied);
assert_eq!(Error::last_os_error().raw_os_error(), Some(libc::EACCES));
}

View file

@ -1,13 +1,9 @@
warning: `open` was made to return an error due to isolation
warning: `fcntl` was made to return an error due to isolation
warning: `unlink` was made to return an error due to isolation
warning: `symlink` was made to return an error due to isolation
warning: `readlink` was made to return an error due to isolation
warning: `$STAT` was made to return an error due to isolation
warning: `rename` was made to return an error due to isolation

View file

@ -3,14 +3,15 @@
#![feature(io_error_more)]
#![feature(io_error_uncategorized)]
#![feature(is_terminal)]
use std::collections::HashMap;
use std::ffi::{CString, OsString};
use std::ffi::OsString;
use std::fs::{
create_dir, read_dir, read_link, remove_dir, remove_dir_all, remove_file, rename, File,
OpenOptions,
canonicalize, create_dir, read_dir, read_link, remove_dir, remove_dir_all, remove_file, rename,
File, OpenOptions,
};
use std::io::{Error, ErrorKind, Read, Result, Seek, SeekFrom, Write};
use std::io::{Error, ErrorKind, IsTerminal, Read, Result, Seek, SeekFrom, Write};
use std::path::{Path, PathBuf};
fn main() {
@ -26,13 +27,7 @@ fn main() {
test_rename();
test_directory();
test_canonicalize();
test_dup_stdout_stderr();
test_from_raw_os_error();
// These all require unix, if the test is changed to no longer `ignore-windows`, move these to a unix test
test_file_open_unix_allow_two_args();
test_file_open_unix_needs_three_args();
test_file_open_unix_extra_third_arg();
}
fn tmp() -> PathBuf {
@ -97,43 +92,12 @@ fn test_file() {
file.read_to_end(&mut contents).unwrap();
assert_eq!(bytes, contents.as_slice());
assert!(!file.is_terminal());
// Removing file should succeed.
remove_file(&path).unwrap();
}
fn test_file_open_unix_allow_two_args() {
use std::os::unix::ffi::OsStrExt;
let path = prepare_with_content("test_file_open_unix_allow_two_args.txt", &[]);
let mut name = path.into_os_string();
name.push("\0");
let name_ptr = name.as_bytes().as_ptr().cast::<libc::c_char>();
let _fd = unsafe { libc::open(name_ptr, libc::O_RDONLY) };
}
fn test_file_open_unix_needs_three_args() {
use std::os::unix::ffi::OsStrExt;
let path = prepare_with_content("test_file_open_unix_needs_three_args.txt", &[]);
let mut name = path.into_os_string();
name.push("\0");
let name_ptr = name.as_bytes().as_ptr().cast::<libc::c_char>();
let _fd = unsafe { libc::open(name_ptr, libc::O_CREAT, 0o666) };
}
fn test_file_open_unix_extra_third_arg() {
use std::os::unix::ffi::OsStrExt;
let path = prepare_with_content("test_file_open_unix_extra_third_arg.txt", &[]);
let mut name = path.into_os_string();
name.push("\0");
let name_ptr = name.as_bytes().as_ptr().cast::<libc::c_char>();
let _fd = unsafe { libc::open(name_ptr, libc::O_RDONLY, 42) };
}
fn test_file_clone() {
let bytes = b"Hello, World!\n";
let path = prepare_with_content("miri_test_fs_file_clone.txt", bytes);
@ -279,46 +243,6 @@ fn test_symlink() {
symlink_file.read_to_end(&mut contents).unwrap();
assert_eq!(bytes, contents.as_slice());
#[cfg(unix)]
{
use std::os::unix::ffi::OsStrExt;
let expected_path = path.as_os_str().as_bytes();
// Test that the expected string gets written to a buffer of proper
// length, and that a trailing null byte is not written.
let symlink_c_str = CString::new(symlink_path.as_os_str().as_bytes()).unwrap();
let symlink_c_ptr = symlink_c_str.as_ptr();
// Make the buf one byte larger than it needs to be,
// and check that the last byte is not overwritten.
let mut large_buf = vec![0xFF; expected_path.len() + 1];
let res = unsafe {
libc::readlink(symlink_c_ptr, large_buf.as_mut_ptr().cast(), large_buf.len())
};
// Check that the resovled path was properly written into the buf.
assert_eq!(&large_buf[..(large_buf.len() - 1)], expected_path);
assert_eq!(large_buf.last(), Some(&0xFF));
assert_eq!(res, large_buf.len() as isize - 1);
// Test that the resolved path is truncated if the provided buffer
// is too small.
let mut small_buf = [0u8; 2];
let res = unsafe {
libc::readlink(symlink_c_ptr, small_buf.as_mut_ptr().cast(), small_buf.len())
};
assert_eq!(small_buf, &expected_path[..small_buf.len()]);
assert_eq!(res, small_buf.len() as isize);
// Test that we report a proper error for a missing path.
let bad_path = CString::new("MIRI_MISSING_FILE_NAME").unwrap();
let res = unsafe {
libc::readlink(bad_path.as_ptr(), small_buf.as_mut_ptr().cast(), small_buf.len())
};
assert_eq!(res, -1);
assert_eq!(Error::last_os_error().kind(), ErrorKind::NotFound);
}
// Test that metadata of a symbolic link (i.e., the file it points to) is correct.
check_metadata(bytes, &symlink_path).unwrap();
// Test that the metadata of a symbolic link is correct when not following it.
@ -369,7 +293,6 @@ fn test_rename() {
}
fn test_canonicalize() {
use std::fs::canonicalize;
let dir_path = prepare_dir("miri_test_fs_dir");
create_dir(&dir_path).unwrap();
let path = dir_path.join("test_file");
@ -379,11 +302,6 @@ fn test_canonicalize() {
assert_eq!(p.to_string_lossy().find('.'), None);
remove_dir_all(&dir_path).unwrap();
// Make sure we get an error for long paths.
use std::convert::TryInto;
let too_long = "x/".repeat(libc::PATH_MAX.try_into().unwrap());
assert!(canonicalize(too_long).is_err());
}
fn test_directory() {
@ -440,16 +358,6 @@ fn test_directory() {
remove_dir_all(&dir_path).unwrap();
}
fn test_dup_stdout_stderr() {
let bytes = b"hello dup fd\n";
unsafe {
let new_stdout = libc::fcntl(1, libc::F_DUPFD, 0);
let new_stderr = libc::fcntl(2, libc::F_DUPFD, 0);
libc::write(new_stdout, bytes.as_ptr() as *const libc::c_void, bytes.len());
libc::write(new_stderr, bytes.as_ptr() as *const libc::c_void, bytes.len());
}
}
fn test_from_raw_os_error() {
let code = 6; // not a code that std or Miri know
let error = Error::from_raw_os_error(code);

View file

@ -0,0 +1,9 @@
#![feature(is_terminal)]
use std::io::IsTerminal;
fn main() {
// We can't really assume that this is truly a terminal, and anyway on Windows Miri will always
// return `false` here, but we can check that the call succeeds.
std::io::stdout().is_terminal();
}

View file

@ -0,0 +1,19 @@
//@compile-flags: -Zmiri-retag-fields=scalar
struct Newtype<'a>(&'a mut i32, i32, i32);
fn dealloc_while_running(_n: Newtype<'_>, dealloc: impl FnOnce()) {
dealloc();
}
// Make sure that with -Zmiri-retag-fields=scalar, we do *not* retag the fields of `Newtype`.
fn main() {
let ptr = Box::into_raw(Box::new(0i32));
#[rustfmt::skip] // I like my newlines
unsafe {
dealloc_while_running(
Newtype(&mut *ptr, 0, 0),
|| drop(Box::from_raw(ptr)),
)
};
}

View file

@ -0,0 +1,29 @@
use std::{
alloc::{self, Layout},
mem::ManuallyDrop,
};
extern "Rust" {
fn miri_get_alloc_id(ptr: *const u8) -> u64;
fn miri_print_stacks(alloc_id: u64);
}
fn main() {
let ptr = unsafe { alloc::alloc(Layout::new::<u8>()) };
let alloc_id = unsafe { miri_get_alloc_id(ptr) };
unsafe { miri_print_stacks(alloc_id) };
assert!(!ptr.is_null());
unsafe { miri_print_stacks(alloc_id) };
unsafe { *ptr = 42 };
unsafe { miri_print_stacks(alloc_id) };
let _b = unsafe { ManuallyDrop::new(Box::from_raw(ptr)) };
unsafe { miri_print_stacks(alloc_id) };
let _ptr = unsafe { &*ptr };
unsafe { miri_print_stacks(alloc_id) };
unsafe { alloc::dealloc(ptr, Layout::new::<u8>()) };
}

View file

@ -0,0 +1,5 @@
0..1: [ SharedReadWrite<TAG> ]
0..1: [ SharedReadWrite<TAG> ]
0..1: [ SharedReadWrite<TAG> ]
0..1: [ SharedReadWrite<TAG> Unique<TAG> Unique<TAG> Unique<TAG> Unique<TAG> Unique<TAG> ]
0..1: [ SharedReadWrite<TAG> Disabled<TAG> Disabled<TAG> Disabled<TAG> Disabled<TAG> Disabled<TAG> SharedReadOnly<TAG> ]

View file

@ -0,0 +1,21 @@
//@compile-flags: -Zmiri-permissive-provenance
#![feature(strict_provenance)]
use std::ptr;
fn main() {
let mut v = 1u8;
let ptr = &mut v as *mut u8;
// Expose the allocation and use the exposed pointer, creating an unknown bottom
unsafe {
let p: *mut u8 = ptr::from_exposed_addr::<u8>(ptr.expose_addr()) as *mut u8;
*p = 1;
}
// Pile on a lot of SharedReadOnly at the top of the stack
let r = &v;
for _ in 0..1024 {
let _x = &*r;
}
}