Auto merge of #107009 - cjgillot:jump-threading, r=pnkfelix

Implement jump threading MIR opt

This pass is an attempt to generalize `ConstGoto` and `SeparateConstSwitch` passes into a more complete jump threading pass.

This pass is rather heavy, as it performs a truncated backwards DFS on MIR starting from each `SwitchInt` terminator. This backwards DFS remains very limited, as it only walks through `Goto` terminators.

It is build to support constants and discriminants, and a propagating through a very limited set of operations.

The pass successfully manages to disentangle the `Some(x?)` use case and the DFA use case. It still needs a few tests before being ready.
This commit is contained in:
bors 2023-10-23 18:05:44 +00:00
commit 1322f92634
31 changed files with 2799 additions and 139 deletions

View file

@ -4279,6 +4279,7 @@ dependencies = [
"coverage_test_macros",
"either",
"itertools",
"rustc_arena",
"rustc_ast",
"rustc_attr",
"rustc_const_eval",

View file

@ -28,6 +28,15 @@ pub fn static_if(value: u128, then: BasicBlock, else_: BasicBlock) -> Self {
Self { values: smallvec![value], targets: smallvec![then, else_] }
}
/// Inverse of `SwitchTargets::static_if`.
pub fn as_static_if(&self) -> Option<(u128, BasicBlock, BasicBlock)> {
if let &[value] = &self.values[..] && let &[then, else_] = &self.targets[..] {
Some((value, then, else_))
} else {
None
}
}
/// Returns the fallback target that is jumped to when none of the values match the operand.
pub fn otherwise(&self) -> BasicBlock {
*self.targets.last().unwrap()

View file

@ -463,7 +463,19 @@ fn clone_from(&mut self, source: &Self) {
}
}
impl<V: Clone + HasTop + HasBottom> State<V> {
impl<V: Clone> State<V> {
pub fn new(init: V, map: &Map) -> State<V> {
let values = IndexVec::from_elem_n(init, map.value_count);
State(StateData::Reachable(values))
}
pub fn all(&self, f: impl Fn(&V) -> bool) -> bool {
match self.0 {
StateData::Unreachable => true,
StateData::Reachable(ref values) => values.iter().all(f),
}
}
pub fn is_reachable(&self) -> bool {
matches!(&self.0, StateData::Reachable(_))
}
@ -472,7 +484,10 @@ pub fn mark_unreachable(&mut self) {
self.0 = StateData::Unreachable;
}
pub fn flood_all(&mut self) {
pub fn flood_all(&mut self)
where
V: HasTop,
{
self.flood_all_with(V::TOP)
}
@ -481,28 +496,52 @@ pub fn flood_all_with(&mut self, value: V) {
values.raw.fill(value);
}
/// Assign `value` to all places that are contained in `place` or may alias one.
pub fn flood_with(&mut self, place: PlaceRef<'_>, map: &Map, value: V) {
let StateData::Reachable(values) = &mut self.0 else { return };
map.for_each_aliasing_place(place, None, &mut |vi| {
values[vi] = value.clone();
});
self.flood_with_tail_elem(place, None, map, value)
}
pub fn flood(&mut self, place: PlaceRef<'_>, map: &Map) {
/// Assign `TOP` to all places that are contained in `place` or may alias one.
pub fn flood(&mut self, place: PlaceRef<'_>, map: &Map)
where
V: HasTop,
{
self.flood_with(place, map, V::TOP)
}
/// Assign `value` to the discriminant of `place` and all places that may alias it.
pub fn flood_discr_with(&mut self, place: PlaceRef<'_>, map: &Map, value: V) {
let StateData::Reachable(values) = &mut self.0 else { return };
map.for_each_aliasing_place(place, Some(TrackElem::Discriminant), &mut |vi| {
values[vi] = value.clone();
});
self.flood_with_tail_elem(place, Some(TrackElem::Discriminant), map, value)
}
pub fn flood_discr(&mut self, place: PlaceRef<'_>, map: &Map) {
/// Assign `TOP` to the discriminant of `place` and all places that may alias it.
pub fn flood_discr(&mut self, place: PlaceRef<'_>, map: &Map)
where
V: HasTop,
{
self.flood_discr_with(place, map, V::TOP)
}
/// This method is the most general version of the `flood_*` method.
///
/// Assign `value` on the given place and all places that may alias it. In particular, when
/// the given place has a variant downcast, we invoke the function on all the other variants.
///
/// `tail_elem` allows to support discriminants that are not a place in MIR, but that we track
/// as such.
pub fn flood_with_tail_elem(
&mut self,
place: PlaceRef<'_>,
tail_elem: Option<TrackElem>,
map: &Map,
value: V,
) {
let StateData::Reachable(values) = &mut self.0 else { return };
map.for_each_aliasing_place(place, tail_elem, &mut |vi| {
values[vi] = value.clone();
});
}
/// Low-level method that assigns to a place.
/// This does nothing if the place is not tracked.
///
@ -553,7 +592,10 @@ pub fn insert_place_idx(&mut self, target: PlaceIndex, source: PlaceIndex, map:
}
/// Helper method to interpret `target = result`.
pub fn assign(&mut self, target: PlaceRef<'_>, result: ValueOrPlace<V>, map: &Map) {
pub fn assign(&mut self, target: PlaceRef<'_>, result: ValueOrPlace<V>, map: &Map)
where
V: HasTop,
{
self.flood(target, map);
if let Some(target) = map.find(target) {
self.insert_idx(target, result, map);
@ -561,36 +603,93 @@ pub fn assign(&mut self, target: PlaceRef<'_>, result: ValueOrPlace<V>, map: &Ma
}
/// Helper method for assignments to a discriminant.
pub fn assign_discr(&mut self, target: PlaceRef<'_>, result: ValueOrPlace<V>, map: &Map) {
pub fn assign_discr(&mut self, target: PlaceRef<'_>, result: ValueOrPlace<V>, map: &Map)
where
V: HasTop,
{
self.flood_discr(target, map);
if let Some(target) = map.find_discr(target) {
self.insert_idx(target, result, map);
}
}
/// Retrieve the value stored for a place, or if it is not tracked.
pub fn get(&self, place: PlaceRef<'_>, map: &Map) -> V {
map.find(place).map(|place| self.get_idx(place, map)).unwrap_or(V::TOP)
/// Retrieve the value stored for a place, or `None` if it is not tracked.
pub fn try_get(&self, place: PlaceRef<'_>, map: &Map) -> Option<V> {
let place = map.find(place)?;
self.try_get_idx(place, map)
}
/// Retrieve the value stored for a place, or if it is not tracked.
pub fn get_discr(&self, place: PlaceRef<'_>, map: &Map) -> V {
match map.find_discr(place) {
Some(place) => self.get_idx(place, map),
None => V::TOP,
/// Retrieve the discriminant stored for a place, or `None` if it is not tracked.
pub fn try_get_discr(&self, place: PlaceRef<'_>, map: &Map) -> Option<V> {
let place = map.find_discr(place)?;
self.try_get_idx(place, map)
}
/// Retrieve the slice length stored for a place, or `None` if it is not tracked.
pub fn try_get_len(&self, place: PlaceRef<'_>, map: &Map) -> Option<V> {
let place = map.find_len(place)?;
self.try_get_idx(place, map)
}
/// Retrieve the value stored for a place index, or `None` if it is not tracked.
pub fn try_get_idx(&self, place: PlaceIndex, map: &Map) -> Option<V> {
match &self.0 {
StateData::Reachable(values) => {
map.places[place].value_index.map(|v| values[v].clone())
}
StateData::Unreachable => None,
}
}
/// Retrieve the value stored for a place, or if it is not tracked.
pub fn get_len(&self, place: PlaceRef<'_>, map: &Map) -> V {
match map.find_len(place) {
Some(place) => self.get_idx(place, map),
None => V::TOP,
///
/// This method returns ⊥ if the place is tracked and the state is unreachable.
pub fn get(&self, place: PlaceRef<'_>, map: &Map) -> V
where
V: HasBottom + HasTop,
{
match &self.0 {
StateData::Reachable(_) => self.try_get(place, map).unwrap_or(V::TOP),
// Because this is unreachable, we can return any value we want.
StateData::Unreachable => V::BOTTOM,
}
}
/// Retrieve the value stored for a place, or if it is not tracked.
///
/// This method returns ⊥ the current state is unreachable.
pub fn get_discr(&self, place: PlaceRef<'_>, map: &Map) -> V
where
V: HasBottom + HasTop,
{
match &self.0 {
StateData::Reachable(_) => self.try_get_discr(place, map).unwrap_or(V::TOP),
// Because this is unreachable, we can return any value we want.
StateData::Unreachable => V::BOTTOM,
}
}
/// Retrieve the value stored for a place, or if it is not tracked.
///
/// This method returns ⊥ the current state is unreachable.
pub fn get_len(&self, place: PlaceRef<'_>, map: &Map) -> V
where
V: HasBottom + HasTop,
{
match &self.0 {
StateData::Reachable(_) => self.try_get_len(place, map).unwrap_or(V::TOP),
// Because this is unreachable, we can return any value we want.
StateData::Unreachable => V::BOTTOM,
}
}
/// Retrieve the value stored for a place index, or if it is not tracked.
pub fn get_idx(&self, place: PlaceIndex, map: &Map) -> V {
///
/// This method returns ⊥ the current state is unreachable.
pub fn get_idx(&self, place: PlaceIndex, map: &Map) -> V
where
V: HasBottom + HasTop,
{
match &self.0 {
StateData::Reachable(values) => {
map.places[place].value_index.map(|v| values[v].clone()).unwrap_or(V::TOP)

View file

@ -11,6 +11,7 @@ smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
tracing = "0.1"
either = "1"
rustc_ast = { path = "../rustc_ast" }
rustc_arena = { path = "../rustc_arena" }
rustc_attr = { path = "../rustc_attr" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }

View file

@ -0,0 +1,98 @@
use rustc_middle::mir::visit::*;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt};
const INSTR_COST: usize = 5;
const CALL_PENALTY: usize = 25;
const LANDINGPAD_PENALTY: usize = 50;
const RESUME_PENALTY: usize = 45;
/// Verify that the callee body is compatible with the caller.
#[derive(Clone)]
pub(crate) struct CostChecker<'b, 'tcx> {
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
cost: usize,
callee_body: &'b Body<'tcx>,
instance: Option<ty::Instance<'tcx>>,
}
impl<'b, 'tcx> CostChecker<'b, 'tcx> {
pub fn new(
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
instance: Option<ty::Instance<'tcx>>,
callee_body: &'b Body<'tcx>,
) -> CostChecker<'b, 'tcx> {
CostChecker { tcx, param_env, callee_body, instance, cost: 0 }
}
pub fn cost(&self) -> usize {
self.cost
}
fn instantiate_ty(&self, v: Ty<'tcx>) -> Ty<'tcx> {
if let Some(instance) = self.instance {
instance.instantiate_mir(self.tcx, ty::EarlyBinder::bind(&v))
} else {
v
}
}
}
impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) {
// Don't count StorageLive/StorageDead in the inlining cost.
match statement.kind {
StatementKind::StorageLive(_)
| StatementKind::StorageDead(_)
| StatementKind::Deinit(_)
| StatementKind::Nop => {}
_ => self.cost += INSTR_COST,
}
}
fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, _: Location) {
let tcx = self.tcx;
match terminator.kind {
TerminatorKind::Drop { ref place, unwind, .. } => {
// If the place doesn't actually need dropping, treat it like a regular goto.
let ty = self.instantiate_ty(place.ty(self.callee_body, tcx).ty);
if ty.needs_drop(tcx, self.param_env) {
self.cost += CALL_PENALTY;
if let UnwindAction::Cleanup(_) = unwind {
self.cost += LANDINGPAD_PENALTY;
}
} else {
self.cost += INSTR_COST;
}
}
TerminatorKind::Call { func: Operand::Constant(ref f), unwind, .. } => {
let fn_ty = self.instantiate_ty(f.const_.ty());
self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind() && tcx.is_intrinsic(def_id) {
// Don't give intrinsics the extra penalty for calls
INSTR_COST
} else {
CALL_PENALTY
};
if let UnwindAction::Cleanup(_) = unwind {
self.cost += LANDINGPAD_PENALTY;
}
}
TerminatorKind::Assert { unwind, .. } => {
self.cost += CALL_PENALTY;
if let UnwindAction::Cleanup(_) = unwind {
self.cost += LANDINGPAD_PENALTY;
}
}
TerminatorKind::UnwindResume => self.cost += RESUME_PENALTY,
TerminatorKind::InlineAsm { unwind, .. } => {
self.cost += INSTR_COST;
if let UnwindAction::Cleanup(_) = unwind {
self.cost += LANDINGPAD_PENALTY;
}
}
_ => self.cost += INSTR_COST,
}
}
}

View file

@ -14,6 +14,7 @@
use rustc_target::abi::FieldIdx;
use rustc_target::spec::abi::Abi;
use crate::cost_checker::CostChecker;
use crate::simplify::{remove_dead_blocks, CfgSimplifier};
use crate::util;
use crate::MirPass;
@ -22,11 +23,6 @@
pub(crate) mod cycle;
const INSTR_COST: usize = 5;
const CALL_PENALTY: usize = 25;
const LANDINGPAD_PENALTY: usize = 50;
const RESUME_PENALTY: usize = 45;
const TOP_DOWN_DEPTH_LIMIT: usize = 5;
pub struct Inline;
@ -479,13 +475,8 @@ fn check_mir_body(
// FIXME: Give a bonus to functions with only a single caller
let mut checker = CostChecker {
tcx: self.tcx,
param_env: self.param_env,
instance: callsite.callee,
callee_body,
cost: 0,
};
let mut checker =
CostChecker::new(self.tcx, self.param_env, Some(callsite.callee), callee_body);
// Traverse the MIR manually so we can account for the effects of inlining on the CFG.
let mut work_list = vec![START_BLOCK];
@ -530,7 +521,7 @@ fn check_mir_body(
// That attribute is often applied to very large functions that exceed LLVM's (very
// generous) inlining threshold. Such functions are very poor MIR inlining candidates.
// Always inlining #[inline(always)] functions in MIR, on net, slows down the compiler.
let cost = checker.cost;
let cost = checker.cost();
if cost <= threshold {
debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
Ok(())
@ -803,81 +794,6 @@ fn new_call_temp(
}
}
/// Verify that the callee body is compatible with the caller.
///
/// This visitor mostly computes the inlining cost,
/// but also needs to verify that types match because of normalization failure.
struct CostChecker<'b, 'tcx> {
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
cost: usize,
callee_body: &'b Body<'tcx>,
instance: ty::Instance<'tcx>,
}
impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) {
// Don't count StorageLive/StorageDead in the inlining cost.
match statement.kind {
StatementKind::StorageLive(_)
| StatementKind::StorageDead(_)
| StatementKind::Deinit(_)
| StatementKind::Nop => {}
_ => self.cost += INSTR_COST,
}
}
fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, _: Location) {
let tcx = self.tcx;
match terminator.kind {
TerminatorKind::Drop { ref place, unwind, .. } => {
// If the place doesn't actually need dropping, treat it like a regular goto.
let ty = self.instance.instantiate_mir(
tcx,
ty::EarlyBinder::bind(&place.ty(self.callee_body, tcx).ty),
);
if ty.needs_drop(tcx, self.param_env) {
self.cost += CALL_PENALTY;
if let UnwindAction::Cleanup(_) = unwind {
self.cost += LANDINGPAD_PENALTY;
}
} else {
self.cost += INSTR_COST;
}
}
TerminatorKind::Call { func: Operand::Constant(ref f), unwind, .. } => {
let fn_ty =
self.instance.instantiate_mir(tcx, ty::EarlyBinder::bind(&f.const_.ty()));
self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind()
&& tcx.is_intrinsic(def_id)
{
// Don't give intrinsics the extra penalty for calls
INSTR_COST
} else {
CALL_PENALTY
};
if let UnwindAction::Cleanup(_) = unwind {
self.cost += LANDINGPAD_PENALTY;
}
}
TerminatorKind::Assert { unwind, .. } => {
self.cost += CALL_PENALTY;
if let UnwindAction::Cleanup(_) = unwind {
self.cost += LANDINGPAD_PENALTY;
}
}
TerminatorKind::UnwindResume => self.cost += RESUME_PENALTY,
TerminatorKind::InlineAsm { unwind, .. } => {
self.cost += INSTR_COST;
if let UnwindAction::Cleanup(_) = unwind {
self.cost += LANDINGPAD_PENALTY;
}
}
_ => self.cost += INSTR_COST,
}
}
}
/**
* Integrator.
*

View file

@ -0,0 +1,759 @@
//! A jump threading optimization.
//!
//! This optimization seeks to replace join-then-switch control flow patterns by straight jumps
//! X = 0 X = 0
//! ------------\ /-------- ------------
//! X = 1 X----X SwitchInt(X) => X = 1
//! ------------/ \-------- ------------
//!
//!
//! We proceed by walking the cfg backwards starting from each `SwitchInt` terminator,
//! looking for assignments that will turn the `SwitchInt` into a simple `Goto`.
//!
//! The algorithm maintains a set of replacement conditions:
//! - `conditions[place]` contains `Condition { value, polarity: Eq, target }`
//! if assigning `value` to `place` turns the `SwitchInt` into `Goto { target }`.
//! - `conditions[place]` contains `Condition { value, polarity: Ne, target }`
//! if assigning anything different from `value` to `place` turns the `SwitchInt`
//! into `Goto { target }`.
//!
//! In this file, we denote as `place ?= value` the existence of a replacement condition
//! on `place` with given `value`, irrespective of the polarity and target of that
//! replacement condition.
//!
//! We then walk the CFG backwards transforming the set of conditions.
//! When we find a fulfilling assignment, we record a `ThreadingOpportunity`.
//! All `ThreadingOpportunity`s are applied to the body, by duplicating blocks if required.
//!
//! The optimization search can be very heavy, as it performs a DFS on MIR starting from
//! each `SwitchInt` terminator. To manage the complexity, we:
//! - bound the maximum depth by a constant `MAX_BACKTRACK`;
//! - we only traverse `Goto` terminators.
//!
//! We try to avoid creating irreducible control-flow by not threading through a loop header.
//!
//! Likewise, applying the optimisation can create a lot of new MIR, so we bound the instruction
//! cost by `MAX_COST`.
use rustc_arena::DroplessArena;
use rustc_data_structures::fx::FxHashSet;
use rustc_index::bit_set::BitSet;
use rustc_index::IndexVec;
use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
use rustc_mir_dataflow::value_analysis::{Map, PlaceIndex, State, TrackElem};
use crate::cost_checker::CostChecker;
use crate::MirPass;
pub struct JumpThreading;
const MAX_BACKTRACK: usize = 5;
const MAX_COST: usize = 100;
const MAX_PLACES: usize = 100;
impl<'tcx> MirPass<'tcx> for JumpThreading {
fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
sess.mir_opt_level() >= 4
}
#[instrument(skip_all level = "debug")]
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let def_id = body.source.def_id();
debug!(?def_id);
let param_env = tcx.param_env_reveal_all_normalized(def_id);
let map = Map::new(tcx, body, Some(MAX_PLACES));
let loop_headers = loop_headers(body);
let arena = DroplessArena::default();
let mut finder = TOFinder {
tcx,
param_env,
body,
arena: &arena,
map: &map,
loop_headers: &loop_headers,
opportunities: Vec::new(),
};
for (bb, bbdata) in body.basic_blocks.iter_enumerated() {
debug!(?bb, term = ?bbdata.terminator());
if bbdata.is_cleanup || loop_headers.contains(bb) {
continue;
}
let Some((discr, targets)) = bbdata.terminator().kind.as_switch() else { continue };
let Some(discr) = discr.place() else { continue };
debug!(?discr, ?bb);
let discr_ty = discr.ty(body, tcx).ty;
let Ok(discr_layout) = tcx.layout_of(param_env.and(discr_ty)) else { continue };
let Some(discr) = finder.map.find(discr.as_ref()) else { continue };
debug!(?discr);
let cost = CostChecker::new(tcx, param_env, None, body);
let mut state = State::new(ConditionSet::default(), &finder.map);
let conds = if let Some((value, then, else_)) = targets.as_static_if() {
let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else {
continue;
};
arena.alloc_from_iter([
Condition { value, polarity: Polarity::Eq, target: then },
Condition { value, polarity: Polarity::Ne, target: else_ },
])
} else {
arena.alloc_from_iter(targets.iter().filter_map(|(value, target)| {
let value = ScalarInt::try_from_uint(value, discr_layout.size)?;
Some(Condition { value, polarity: Polarity::Eq, target })
}))
};
let conds = ConditionSet(conds);
state.insert_value_idx(discr, conds, &finder.map);
finder.find_opportunity(bb, state, cost, 0);
}
let opportunities = finder.opportunities;
debug!(?opportunities);
if opportunities.is_empty() {
return;
}
// Verify that we do not thread through a loop header.
for to in opportunities.iter() {
assert!(to.chain.iter().all(|&block| !loop_headers.contains(block)));
}
OpportunitySet::new(body, opportunities).apply(body);
}
}
#[derive(Debug)]
struct ThreadingOpportunity {
/// The list of `BasicBlock`s from the one that found the opportunity to the `SwitchInt`.
chain: Vec<BasicBlock>,
/// The `SwitchInt` will be replaced by `Goto { target }`.
target: BasicBlock,
}
struct TOFinder<'tcx, 'a> {
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
body: &'a Body<'tcx>,
map: &'a Map,
loop_headers: &'a BitSet<BasicBlock>,
/// We use an arena to avoid cloning the slices when cloning `state`.
arena: &'a DroplessArena,
opportunities: Vec<ThreadingOpportunity>,
}
/// Represent the following statement. If we can prove that the current local is equal/not-equal
/// to `value`, jump to `target`.
#[derive(Copy, Clone, Debug)]
struct Condition {
value: ScalarInt,
polarity: Polarity,
target: BasicBlock,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum Polarity {
Ne,
Eq,
}
impl Condition {
fn matches(&self, value: ScalarInt) -> bool {
(self.value == value) == (self.polarity == Polarity::Eq)
}
fn inv(mut self) -> Self {
self.polarity = match self.polarity {
Polarity::Eq => Polarity::Ne,
Polarity::Ne => Polarity::Eq,
};
self
}
}
#[derive(Copy, Clone, Debug, Default)]
struct ConditionSet<'a>(&'a [Condition]);
impl<'a> ConditionSet<'a> {
fn iter(self) -> impl Iterator<Item = Condition> + 'a {
self.0.iter().copied()
}
fn iter_matches(self, value: ScalarInt) -> impl Iterator<Item = Condition> + 'a {
self.iter().filter(move |c| c.matches(value))
}
fn map(self, arena: &'a DroplessArena, f: impl Fn(Condition) -> Condition) -> ConditionSet<'a> {
ConditionSet(arena.alloc_from_iter(self.iter().map(f)))
}
}
impl<'tcx, 'a> TOFinder<'tcx, 'a> {
fn is_empty(&self, state: &State<ConditionSet<'a>>) -> bool {
state.all(|cs| cs.0.is_empty())
}
/// Recursion entry point to find threading opportunities.
#[instrument(level = "trace", skip(self, cost), ret)]
fn find_opportunity(
&mut self,
bb: BasicBlock,
mut state: State<ConditionSet<'a>>,
mut cost: CostChecker<'_, 'tcx>,
depth: usize,
) {
// Do not thread through loop headers.
if self.loop_headers.contains(bb) {
return;
}
debug!(cost = ?cost.cost());
for (statement_index, stmt) in
self.body.basic_blocks[bb].statements.iter().enumerate().rev()
{
if self.is_empty(&state) {
return;
}
cost.visit_statement(stmt, Location { block: bb, statement_index });
if cost.cost() > MAX_COST {
return;
}
// Attempt to turn the `current_condition` on `lhs` into a condition on another place.
self.process_statement(bb, stmt, &mut state);
// When a statement mutates a place, assignments to that place that happen
// above the mutation cannot fulfill a condition.
// _1 = 5 // Whatever happens here, it won't change the result of a `SwitchInt`.
// _1 = 6
if let Some((lhs, tail)) = self.mutated_statement(stmt) {
state.flood_with_tail_elem(lhs.as_ref(), tail, self.map, ConditionSet::default());
}
}
if self.is_empty(&state) || depth >= MAX_BACKTRACK {
return;
}
let last_non_rec = self.opportunities.len();
let predecessors = &self.body.basic_blocks.predecessors()[bb];
if let &[pred] = &predecessors[..] && bb != START_BLOCK {
let term = self.body.basic_blocks[pred].terminator();
match term.kind {
TerminatorKind::SwitchInt { ref discr, ref targets } => {
self.process_switch_int(discr, targets, bb, &mut state);
self.find_opportunity(pred, state, cost, depth + 1);
}
_ => self.recurse_through_terminator(pred, &state, &cost, depth),
}
} else {
for &pred in predecessors {
self.recurse_through_terminator(pred, &state, &cost, depth);
}
}
let new_tos = &mut self.opportunities[last_non_rec..];
debug!(?new_tos);
// Try to deduplicate threading opportunities.
if new_tos.len() > 1
&& new_tos.len() == predecessors.len()
&& predecessors
.iter()
.zip(new_tos.iter())
.all(|(&pred, to)| to.chain == &[pred] && to.target == new_tos[0].target)
{
// All predecessors have a threading opportunity, and they all point to the same block.
debug!(?new_tos, "dedup");
let first = &mut new_tos[0];
*first = ThreadingOpportunity { chain: vec![bb], target: first.target };
self.opportunities.truncate(last_non_rec + 1);
return;
}
for op in self.opportunities[last_non_rec..].iter_mut() {
op.chain.push(bb);
}
}
/// Extract the mutated place from a statement.
///
/// This method returns the `Place` so we can flood the state in case of a partial assignment.
/// (_1 as Ok).0 = _5;
/// (_1 as Err).0 = _6;
/// We want to ensure that a `SwitchInt((_1 as Ok).0)` does not see the first assignment, as
/// the value may have been mangled by the second assignment.
///
/// In case we assign to a discriminant, we return `Some(TrackElem::Discriminant)`, so we can
/// stop at flooding the discriminant, and preserve the variant fields.
/// (_1 as Some).0 = _6;
/// SetDiscriminant(_1, 1);
/// switchInt((_1 as Some).0)
#[instrument(level = "trace", skip(self), ret)]
fn mutated_statement(
&self,
stmt: &Statement<'tcx>,
) -> Option<(Place<'tcx>, Option<TrackElem>)> {
match stmt.kind {
StatementKind::Assign(box (place, _))
| StatementKind::Deinit(box place) => Some((place, None)),
StatementKind::SetDiscriminant { box place, variant_index: _ } => {
Some((place, Some(TrackElem::Discriminant)))
}
StatementKind::StorageLive(local) | StatementKind::StorageDead(local) => {
Some((Place::from(local), None))
}
StatementKind::Retag(..)
| StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(..))
// copy_nonoverlapping takes pointers and mutated the pointed-to value.
| StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(..))
| StatementKind::AscribeUserType(..)
| StatementKind::Coverage(..)
| StatementKind::FakeRead(..)
| StatementKind::ConstEvalCounter
| StatementKind::PlaceMention(..)
| StatementKind::Nop => None,
}
}
#[instrument(level = "trace", skip(self))]
fn process_operand(
&mut self,
bb: BasicBlock,
lhs: PlaceIndex,
rhs: &Operand<'tcx>,
state: &mut State<ConditionSet<'a>>,
) -> Option<!> {
let register_opportunity = |c: Condition| {
debug!(?bb, ?c.target, "register");
self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
};
match rhs {
// If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
Operand::Constant(constant) => {
let conditions = state.try_get_idx(lhs, self.map)?;
let constant =
constant.const_.normalize(self.tcx, self.param_env).try_to_scalar_int()?;
conditions.iter_matches(constant).for_each(register_opportunity);
}
// Transfer the conditions on the copied rhs.
Operand::Move(rhs) | Operand::Copy(rhs) => {
let rhs = self.map.find(rhs.as_ref())?;
state.insert_place_idx(rhs, lhs, self.map);
}
}
None
}
#[instrument(level = "trace", skip(self))]
fn process_statement(
&mut self,
bb: BasicBlock,
stmt: &Statement<'tcx>,
state: &mut State<ConditionSet<'a>>,
) -> Option<!> {
let register_opportunity = |c: Condition| {
debug!(?bb, ?c.target, "register");
self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
};
// Below, `lhs` is the return value of `mutated_statement`,
// the place to which `conditions` apply.
let discriminant_for_variant = |enum_ty: Ty<'tcx>, variant_index| {
let discr = enum_ty.discriminant_for_variant(self.tcx, variant_index)?;
let discr_layout = self.tcx.layout_of(self.param_env.and(discr.ty)).ok()?;
let scalar = ScalarInt::try_from_uint(discr.val, discr_layout.size)?;
Some(Operand::const_from_scalar(
self.tcx,
discr.ty,
scalar.into(),
rustc_span::DUMMY_SP,
))
};
match &stmt.kind {
// If we expect `discriminant(place) ?= A`,
// we have an opportunity if `variant_index ?= A`.
StatementKind::SetDiscriminant { box place, variant_index } => {
let discr_target = self.map.find_discr(place.as_ref())?;
let enum_ty = place.ty(self.body, self.tcx).ty;
let discr = discriminant_for_variant(enum_ty, *variant_index)?;
self.process_operand(bb, discr_target, &discr, state)?;
}
// If we expect `lhs ?= true`, we have an opportunity if we assume `lhs == true`.
StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(
Operand::Copy(place) | Operand::Move(place),
)) => {
let conditions = state.try_get(place.as_ref(), self.map)?;
conditions.iter_matches(ScalarInt::TRUE).for_each(register_opportunity);
}
StatementKind::Assign(box (lhs_place, rhs)) => {
if let Some(lhs) = self.map.find(lhs_place.as_ref()) {
match rhs {
Rvalue::Use(operand) => self.process_operand(bb, lhs, operand, state)?,
// Transfer the conditions on the copy rhs.
Rvalue::CopyForDeref(rhs) => {
self.process_operand(bb, lhs, &Operand::Copy(*rhs), state)?
}
Rvalue::Discriminant(rhs) => {
let rhs = self.map.find_discr(rhs.as_ref())?;
state.insert_place_idx(rhs, lhs, self.map);
}
// If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
Rvalue::Aggregate(box ref kind, ref operands) => {
let agg_ty = lhs_place.ty(self.body, self.tcx).ty;
let lhs = match kind {
// Do not support unions.
AggregateKind::Adt(.., Some(_)) => return None,
AggregateKind::Adt(_, variant_index, ..) if agg_ty.is_enum() => {
if let Some(discr_target) = self.map.apply(lhs, TrackElem::Discriminant)
&& let Some(discr_value) = discriminant_for_variant(agg_ty, *variant_index)
{
self.process_operand(bb, discr_target, &discr_value, state);
}
self.map.apply(lhs, TrackElem::Variant(*variant_index))?
}
_ => lhs,
};
for (field_index, operand) in operands.iter_enumerated() {
if let Some(field) =
self.map.apply(lhs, TrackElem::Field(field_index))
{
self.process_operand(bb, field, operand, state);
}
}
}
// Transfer the conditions on the copy rhs, after inversing polarity.
Rvalue::UnaryOp(UnOp::Not, Operand::Move(place) | Operand::Copy(place)) => {
let conditions = state.try_get_idx(lhs, self.map)?;
let place = self.map.find(place.as_ref())?;
let conds = conditions.map(self.arena, Condition::inv);
state.insert_value_idx(place, conds, self.map);
}
// We expect `lhs ?= A`. We found `lhs = Eq(rhs, B)`.
// Create a condition on `rhs ?= B`.
Rvalue::BinaryOp(
op,
box (
Operand::Move(place) | Operand::Copy(place),
Operand::Constant(value),
)
| box (
Operand::Constant(value),
Operand::Move(place) | Operand::Copy(place),
),
) => {
let conditions = state.try_get_idx(lhs, self.map)?;
let place = self.map.find(place.as_ref())?;
let equals = match op {
BinOp::Eq => ScalarInt::TRUE,
BinOp::Ne => ScalarInt::FALSE,
_ => return None,
};
let value = value
.const_
.normalize(self.tcx, self.param_env)
.try_to_scalar_int()?;
let conds = conditions.map(self.arena, |c| Condition {
value,
polarity: if c.matches(equals) {
Polarity::Eq
} else {
Polarity::Ne
},
..c
});
state.insert_value_idx(place, conds, self.map);
}
_ => {}
}
}
}
_ => {}
}
None
}
#[instrument(level = "trace", skip(self, cost))]
fn recurse_through_terminator(
&mut self,
bb: BasicBlock,
state: &State<ConditionSet<'a>>,
cost: &CostChecker<'_, 'tcx>,
depth: usize,
) {
let register_opportunity = |c: Condition| {
debug!(?bb, ?c.target, "register");
self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
};
let term = self.body.basic_blocks[bb].terminator();
let place_to_flood = match term.kind {
// We come from a target, so those are not possible.
TerminatorKind::UnwindResume
| TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::CoroutineDrop => bug!("{term:?} has no terminators"),
// Disallowed during optimizations.
TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. }
| TerminatorKind::Yield { .. } => bug!("{term:?} invalid"),
// Cannot reason about inline asm.
TerminatorKind::InlineAsm { .. } => return,
// `SwitchInt` is handled specially.
TerminatorKind::SwitchInt { .. } => return,
// We can recurse, no thing particular to do.
TerminatorKind::Goto { .. } => None,
// Flood the overwritten place, and progress through.
TerminatorKind::Drop { place: destination, .. }
| TerminatorKind::Call { destination, .. } => Some(destination),
// Treat as an `assume(cond == expected)`.
TerminatorKind::Assert { ref cond, expected, .. } => {
if let Some(place) = cond.place()
&& let Some(conditions) = state.try_get(place.as_ref(), self.map)
{
let expected = if expected { ScalarInt::TRUE } else { ScalarInt::FALSE };
conditions.iter_matches(expected).for_each(register_opportunity);
}
None
}
};
// We can recurse through this terminator.
let mut state = state.clone();
if let Some(place_to_flood) = place_to_flood {
state.flood_with(place_to_flood.as_ref(), self.map, ConditionSet::default());
}
self.find_opportunity(bb, state, cost.clone(), depth + 1);
}
#[instrument(level = "trace", skip(self))]
fn process_switch_int(
&mut self,
discr: &Operand<'tcx>,
targets: &SwitchTargets,
target_bb: BasicBlock,
state: &mut State<ConditionSet<'a>>,
) -> Option<!> {
debug_assert_ne!(target_bb, START_BLOCK);
debug_assert_eq!(self.body.basic_blocks.predecessors()[target_bb].len(), 1);
let discr = discr.place()?;
let discr_ty = discr.ty(self.body, self.tcx).ty;
let discr_layout = self.tcx.layout_of(self.param_env.and(discr_ty)).ok()?;
let conditions = state.try_get(discr.as_ref(), self.map)?;
if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) {
let value = ScalarInt::try_from_uint(value, discr_layout.size)?;
debug_assert_eq!(targets.iter().filter(|&(_, target)| target == target_bb).count(), 1);
// We are inside `target_bb`. Since we have a single predecessor, we know we passed
// through the `SwitchInt` before arriving here. Therefore, we know that
// `discr == value`. If one condition can be fulfilled by `discr == value`,
// that's an opportunity.
for c in conditions.iter_matches(value) {
debug!(?target_bb, ?c.target, "register");
self.opportunities.push(ThreadingOpportunity { chain: vec![], target: c.target });
}
} else if let Some((value, _, else_bb)) = targets.as_static_if()
&& target_bb == else_bb
{
let value = ScalarInt::try_from_uint(value, discr_layout.size)?;
// We only know that `discr != value`. That's much weaker information than
// the equality we had in the previous arm. All we can conclude is that
// the replacement condition `discr != value` can be threaded, and nothing else.
for c in conditions.iter() {
if c.value == value && c.polarity == Polarity::Ne {
debug!(?target_bb, ?c.target, "register");
self.opportunities
.push(ThreadingOpportunity { chain: vec![], target: c.target });
}
}
}
None
}
}
struct OpportunitySet {
opportunities: Vec<ThreadingOpportunity>,
/// For each bb, give the TOs in which it appears. The pair corresponds to the index
/// in `opportunities` and the index in `ThreadingOpportunity::chain`.
involving_tos: IndexVec<BasicBlock, Vec<(usize, usize)>>,
/// Cache the number of predecessors for each block, as we clear the basic block cache..
predecessors: IndexVec<BasicBlock, usize>,
}
impl OpportunitySet {
fn new(body: &Body<'_>, opportunities: Vec<ThreadingOpportunity>) -> OpportunitySet {
let mut involving_tos = IndexVec::from_elem(Vec::new(), &body.basic_blocks);
for (index, to) in opportunities.iter().enumerate() {
for (ibb, &bb) in to.chain.iter().enumerate() {
involving_tos[bb].push((index, ibb));
}
involving_tos[to.target].push((index, to.chain.len()));
}
let predecessors = predecessor_count(body);
OpportunitySet { opportunities, involving_tos, predecessors }
}
/// Apply the opportunities on the graph.
fn apply(&mut self, body: &mut Body<'_>) {
for i in 0..self.opportunities.len() {
self.apply_once(i, body);
}
}
#[instrument(level = "trace", skip(self, body))]
fn apply_once(&mut self, index: usize, body: &mut Body<'_>) {
debug!(?self.predecessors);
debug!(?self.involving_tos);
// Check that `predecessors` satisfies its invariant.
debug_assert_eq!(self.predecessors, predecessor_count(body));
// Remove the TO from the vector to allow modifying the other ones later.
let op = &mut self.opportunities[index];
debug!(?op);
let op_chain = std::mem::take(&mut op.chain);
let op_target = op.target;
debug_assert_eq!(op_chain.len(), op_chain.iter().collect::<FxHashSet<_>>().len());
let Some((current, chain)) = op_chain.split_first() else { return };
let basic_blocks = body.basic_blocks.as_mut();
// Invariant: the control-flow is well-formed at the end of each iteration.
let mut current = *current;
for &succ in chain {
debug!(?current, ?succ);
// `succ` must be a successor of `current`. If it is not, this means this TO is not
// satisfiable and a previous TO erased this edge, so we bail out.
if basic_blocks[current].terminator().successors().find(|s| *s == succ).is_none() {
debug!("impossible");
return;
}
// Fast path: `succ` is only used once, so we can reuse it directly.
if self.predecessors[succ] == 1 {
debug!("single");
current = succ;
continue;
}
let new_succ = basic_blocks.push(basic_blocks[succ].clone());
debug!(?new_succ);
// Replace `succ` by `new_succ` where it appears.
let mut num_edges = 0;
for s in basic_blocks[current].terminator_mut().successors_mut() {
if *s == succ {
*s = new_succ;
num_edges += 1;
}
}
// Update predecessors with the new block.
let _new_succ = self.predecessors.push(num_edges);
debug_assert_eq!(new_succ, _new_succ);
self.predecessors[succ] -= num_edges;
self.update_predecessor_count(basic_blocks[new_succ].terminator(), Update::Incr);
// Replace the `current -> succ` edge by `current -> new_succ` in all the following
// TOs. This is necessary to avoid trying to thread through a non-existing edge. We
// use `involving_tos` here to avoid traversing the full set of TOs on each iteration.
let mut new_involved = Vec::new();
for &(to_index, in_to_index) in &self.involving_tos[current] {
// That TO has already been applied, do nothing.
if to_index <= index {
continue;
}
let other_to = &mut self.opportunities[to_index];
if other_to.chain.get(in_to_index) != Some(&current) {
continue;
}
let s = other_to.chain.get_mut(in_to_index + 1).unwrap_or(&mut other_to.target);
if *s == succ {
// `other_to` references the `current -> succ` edge, so replace `succ`.
*s = new_succ;
new_involved.push((to_index, in_to_index + 1));
}
}
// The TOs that we just updated now reference `new_succ`. Update `involving_tos`
// in case we need to duplicate an edge starting at `new_succ` later.
let _new_succ = self.involving_tos.push(new_involved);
debug_assert_eq!(new_succ, _new_succ);
current = new_succ;
}
let current = &mut basic_blocks[current];
self.update_predecessor_count(current.terminator(), Update::Decr);
current.terminator_mut().kind = TerminatorKind::Goto { target: op_target };
self.predecessors[op_target] += 1;
}
fn update_predecessor_count(&mut self, terminator: &Terminator<'_>, incr: Update) {
match incr {
Update::Incr => {
for s in terminator.successors() {
self.predecessors[s] += 1;
}
}
Update::Decr => {
for s in terminator.successors() {
self.predecessors[s] -= 1;
}
}
}
}
}
fn predecessor_count(body: &Body<'_>) -> IndexVec<BasicBlock, usize> {
let mut predecessors: IndexVec<_, _> =
body.basic_blocks.predecessors().iter().map(|ps| ps.len()).collect();
predecessors[START_BLOCK] += 1; // Account for the implicit entry edge.
predecessors
}
enum Update {
Incr,
Decr,
}
/// Compute the set of loop headers in the given body. We define a loop header as a block which has
/// at least a predecessor which it dominates. This definition is only correct for reducible CFGs.
/// But if the CFG is already irreducible, there is no point in trying much harder.
/// is already irreducible.
fn loop_headers(body: &Body<'_>) -> BitSet<BasicBlock> {
let mut loop_headers = BitSet::new_empty(body.basic_blocks.len());
let dominators = body.basic_blocks.dominators();
// Only visit reachable blocks.
for (bb, bbdata) in traversal::preorder(body) {
for succ in bbdata.terminator().successors() {
if dominators.dominates(succ, bb) {
loop_headers.insert(succ);
}
}
}
loop_headers
}

View file

@ -62,6 +62,7 @@
mod const_prop_lint;
mod copy_prop;
mod coroutine;
mod cost_checker;
mod coverage;
mod cross_crate_inline;
mod ctfe_limit;
@ -81,6 +82,7 @@
mod gvn;
pub mod inline;
mod instsimplify;
mod jump_threading;
mod large_enums;
mod lower_intrinsics;
mod lower_slice_len;
@ -571,6 +573,7 @@ fn o1<T>(x: T) -> WithMinOptLevel<T> {
&dataflow_const_prop::DataflowConstProp,
&const_debuginfo::ConstDebugInfo,
&o1(simplify_branches::SimplifyConstCondition::AfterConstProp),
&jump_threading::JumpThreading,
&early_otherwise_branch::EarlyOtherwiseBranch,
&simplify_comparison_integral::SimplifyComparisonIntegral,
&dead_store_elimination::DeadStoreElimination,

View file

@ -0,0 +1,57 @@
- // MIR for `custom_discr` before JumpThreading
+ // MIR for `custom_discr` after JumpThreading
fn custom_discr(_1: bool) -> u8 {
debug x => _1;
let mut _0: u8;
let mut _2: CustomDiscr;
let mut _3: bool;
let mut _4: u8;
bb0: {
StorageLive(_2);
StorageLive(_3);
_3 = _1;
switchInt(move _3) -> [0: bb2, otherwise: bb1];
}
bb1: {
_2 = CustomDiscr::A;
- goto -> bb3;
+ goto -> bb7;
}
bb2: {
_2 = CustomDiscr::B;
goto -> bb3;
}
bb3: {
StorageDead(_3);
_4 = discriminant(_2);
- switchInt(move _4) -> [35: bb5, otherwise: bb4];
+ goto -> bb4;
}
bb4: {
_0 = const 13_u8;
goto -> bb6;
}
bb5: {
_0 = const 5_u8;
goto -> bb6;
}
bb6: {
StorageDead(_2);
return;
+ }
+
+ bb7: {
+ StorageDead(_3);
+ _4 = discriminant(_2);
+ goto -> bb5;
}
}

View file

@ -0,0 +1,57 @@
- // MIR for `custom_discr` before JumpThreading
+ // MIR for `custom_discr` after JumpThreading
fn custom_discr(_1: bool) -> u8 {
debug x => _1;
let mut _0: u8;
let mut _2: CustomDiscr;
let mut _3: bool;
let mut _4: u8;
bb0: {
StorageLive(_2);
StorageLive(_3);
_3 = _1;
switchInt(move _3) -> [0: bb2, otherwise: bb1];
}
bb1: {
_2 = CustomDiscr::A;
- goto -> bb3;
+ goto -> bb7;
}
bb2: {
_2 = CustomDiscr::B;
goto -> bb3;
}
bb3: {
StorageDead(_3);
_4 = discriminant(_2);
- switchInt(move _4) -> [35: bb5, otherwise: bb4];
+ goto -> bb4;
}
bb4: {
_0 = const 13_u8;
goto -> bb6;
}
bb5: {
_0 = const 5_u8;
goto -> bb6;
}
bb6: {
StorageDead(_2);
return;
+ }
+
+ bb7: {
+ StorageDead(_3);
+ _4 = discriminant(_2);
+ goto -> bb5;
}
}

View file

@ -0,0 +1,68 @@
- // MIR for `dfa` before JumpThreading
+ // MIR for `dfa` after JumpThreading
fn dfa() -> () {
let mut _0: ();
let mut _1: DFA;
let mut _2: !;
let mut _3: ();
let mut _4: isize;
let mut _5: DFA;
let mut _6: DFA;
let mut _7: DFA;
let mut _8: !;
scope 1 {
debug state => _1;
}
bb0: {
StorageLive(_1);
_1 = DFA::A;
StorageLive(_2);
goto -> bb1;
}
bb1: {
_4 = discriminant(_1);
switchInt(move _4) -> [0: bb4, 1: bb5, 2: bb6, 3: bb2, otherwise: bb3];
}
bb2: {
_0 = const ();
StorageDead(_2);
StorageDead(_1);
return;
}
bb3: {
unreachable;
}
bb4: {
StorageLive(_5);
_5 = DFA::B;
_1 = move _5;
_3 = const ();
StorageDead(_5);
goto -> bb1;
}
bb5: {
StorageLive(_6);
_6 = DFA::C;
_1 = move _6;
_3 = const ();
StorageDead(_6);
goto -> bb1;
}
bb6: {
StorageLive(_7);
_7 = DFA::D;
_1 = move _7;
_3 = const ();
StorageDead(_7);
goto -> bb1;
}
}

View file

@ -0,0 +1,68 @@
- // MIR for `dfa` before JumpThreading
+ // MIR for `dfa` after JumpThreading
fn dfa() -> () {
let mut _0: ();
let mut _1: DFA;
let mut _2: !;
let mut _3: ();
let mut _4: isize;
let mut _5: DFA;
let mut _6: DFA;
let mut _7: DFA;
let mut _8: !;
scope 1 {
debug state => _1;
}
bb0: {
StorageLive(_1);
_1 = DFA::A;
StorageLive(_2);
goto -> bb1;
}
bb1: {
_4 = discriminant(_1);
switchInt(move _4) -> [0: bb4, 1: bb5, 2: bb6, 3: bb2, otherwise: bb3];
}
bb2: {
_0 = const ();
StorageDead(_2);
StorageDead(_1);
return;
}
bb3: {
unreachable;
}
bb4: {
StorageLive(_5);
_5 = DFA::B;
_1 = move _5;
_3 = const ();
StorageDead(_5);
goto -> bb1;
}
bb5: {
StorageLive(_6);
_6 = DFA::C;
_1 = move _6;
_3 = const ();
StorageDead(_6);
goto -> bb1;
}
bb6: {
StorageLive(_7);
_7 = DFA::D;
_1 = move _7;
_3 = const ();
StorageDead(_7);
goto -> bb1;
}
}

View file

@ -0,0 +1,59 @@
- // MIR for `disappearing_bb` before JumpThreading
+ // MIR for `disappearing_bb` after JumpThreading
fn disappearing_bb(_1: u8) -> u8 {
let mut _0: u8;
let mut _2: bool;
let mut _3: bool;
bb0: {
_2 = const true;
_3 = const true;
switchInt(_1) -> [0: bb3, 1: bb3, 2: bb1, otherwise: bb2];
}
bb1: {
_3 = const false;
- goto -> bb4;
+ goto -> bb9;
}
bb2: {
unreachable;
}
bb3: {
_2 = const false;
goto -> bb4;
}
bb4: {
switchInt(_3) -> [0: bb5, otherwise: bb7];
}
bb5: {
switchInt(_2) -> [0: bb6, otherwise: bb8];
}
bb6: {
return;
}
bb7: {
- goto -> bb5;
+ goto -> bb10;
}
bb8: {
+ goto -> bb6;
+ }
+
+ bb9: {
+ goto -> bb5;
+ }
+
+ bb10: {
goto -> bb6;
}
}

View file

@ -0,0 +1,59 @@
- // MIR for `disappearing_bb` before JumpThreading
+ // MIR for `disappearing_bb` after JumpThreading
fn disappearing_bb(_1: u8) -> u8 {
let mut _0: u8;
let mut _2: bool;
let mut _3: bool;
bb0: {
_2 = const true;
_3 = const true;
switchInt(_1) -> [0: bb3, 1: bb3, 2: bb1, otherwise: bb2];
}
bb1: {
_3 = const false;
- goto -> bb4;
+ goto -> bb9;
}
bb2: {
unreachable;
}
bb3: {
_2 = const false;
goto -> bb4;
}
bb4: {
switchInt(_3) -> [0: bb5, otherwise: bb7];
}
bb5: {
switchInt(_2) -> [0: bb6, otherwise: bb8];
}
bb6: {
return;
}
bb7: {
- goto -> bb5;
+ goto -> bb10;
}
bb8: {
+ goto -> bb6;
+ }
+
+ bb9: {
+ goto -> bb5;
+ }
+
+ bb10: {
goto -> bb6;
}
}

View file

@ -0,0 +1,45 @@
- // MIR for `duplicate_chain` before JumpThreading
+ // MIR for `duplicate_chain` after JumpThreading
fn duplicate_chain(_1: bool) -> u8 {
let mut _0: u8;
let mut _2: u8;
let mut _3: i32;
let mut _4: i32;
bb0: {
switchInt(_1) -> [1: bb1, otherwise: bb2];
}
bb1: {
_2 = const 5_u8;
goto -> bb3;
}
bb2: {
_2 = const 5_u8;
goto -> bb3;
}
bb3: {
_3 = const 13_i32;
goto -> bb4;
}
bb4: {
_4 = const 15_i32;
- switchInt(_2) -> [5: bb5, otherwise: bb6];
+ goto -> bb5;
}
bb5: {
_0 = const 7_u8;
return;
}
bb6: {
_0 = const 9_u8;
return;
}
}

View file

@ -0,0 +1,45 @@
- // MIR for `duplicate_chain` before JumpThreading
+ // MIR for `duplicate_chain` after JumpThreading
fn duplicate_chain(_1: bool) -> u8 {
let mut _0: u8;
let mut _2: u8;
let mut _3: i32;
let mut _4: i32;
bb0: {
switchInt(_1) -> [1: bb1, otherwise: bb2];
}
bb1: {
_2 = const 5_u8;
goto -> bb3;
}
bb2: {
_2 = const 5_u8;
goto -> bb3;
}
bb3: {
_3 = const 13_i32;
goto -> bb4;
}
bb4: {
_4 = const 15_i32;
- switchInt(_2) -> [5: bb5, otherwise: bb6];
+ goto -> bb5;
}
bb5: {
_0 = const 7_u8;
return;
}
bb6: {
_0 = const 9_u8;
return;
}
}

View file

@ -0,0 +1,139 @@
- // MIR for `identity` before JumpThreading
+ // MIR for `identity` after JumpThreading
fn identity(_1: Result<i32, i32>) -> Result<i32, i32> {
debug x => _1;
let mut _0: std::result::Result<i32, i32>;
let mut _2: i32;
let mut _3: std::ops::ControlFlow<std::result::Result<std::convert::Infallible, i32>, i32>;
let mut _4: std::result::Result<i32, i32>;
let mut _5: isize;
let _6: std::result::Result<std::convert::Infallible, i32>;
let mut _7: !;
let mut _8: std::result::Result<std::convert::Infallible, i32>;
let _9: i32;
scope 1 {
debug residual => _6;
scope 2 {
scope 8 (inlined #[track_caller] <Result<i32, i32> as FromResidual<Result<Infallible, i32>>>::from_residual) {
debug residual => _8;
let _14: i32;
let mut _15: i32;
scope 9 {
debug e => _14;
scope 10 (inlined <i32 as From<i32>>::from) {
debug t => _14;
}
}
}
}
}
scope 3 {
debug val => _9;
scope 4 {
}
}
scope 5 (inlined <Result<i32, i32> as Try>::branch) {
debug self => _4;
let mut _10: isize;
let _11: i32;
let _12: i32;
let mut _13: std::result::Result<std::convert::Infallible, i32>;
scope 6 {
debug v => _11;
}
scope 7 {
debug e => _12;
}
}
bb0: {
StorageLive(_2);
StorageLive(_3);
StorageLive(_4);
_4 = _1;
StorageLive(_10);
StorageLive(_11);
StorageLive(_12);
_10 = discriminant(_4);
switchInt(move _10) -> [0: bb8, 1: bb6, otherwise: bb7];
}
bb1: {
StorageDead(_12);
StorageDead(_11);
StorageDead(_10);
StorageDead(_4);
_5 = discriminant(_3);
- switchInt(move _5) -> [0: bb2, 1: bb4, otherwise: bb3];
+ goto -> bb2;
}
bb2: {
StorageLive(_9);
_9 = ((_3 as Continue).0: i32);
_2 = _9;
StorageDead(_9);
_0 = Result::<i32, i32>::Ok(move _2);
StorageDead(_2);
StorageDead(_3);
goto -> bb5;
}
bb3: {
unreachable;
}
bb4: {
StorageLive(_6);
_6 = ((_3 as Break).0: std::result::Result<std::convert::Infallible, i32>);
StorageLive(_8);
_8 = _6;
StorageLive(_14);
_14 = move ((_8 as Err).0: i32);
StorageLive(_15);
_15 = move _14;
_0 = Result::<i32, i32>::Err(move _15);
StorageDead(_15);
StorageDead(_14);
StorageDead(_8);
StorageDead(_6);
StorageDead(_2);
StorageDead(_3);
goto -> bb5;
}
bb5: {
return;
}
bb6: {
_12 = move ((_4 as Err).0: i32);
StorageLive(_13);
_13 = Result::<Infallible, i32>::Err(move _12);
_3 = ControlFlow::<Result<Infallible, i32>, i32>::Break(move _13);
StorageDead(_13);
- goto -> bb1;
+ goto -> bb9;
}
bb7: {
unreachable;
}
bb8: {
_11 = move ((_4 as Ok).0: i32);
_3 = ControlFlow::<Result<Infallible, i32>, i32>::Continue(move _11);
goto -> bb1;
+ }
+
+ bb9: {
+ StorageDead(_12);
+ StorageDead(_11);
+ StorageDead(_10);
+ StorageDead(_4);
+ _5 = discriminant(_3);
+ goto -> bb4;
}
}

View file

@ -0,0 +1,139 @@
- // MIR for `identity` before JumpThreading
+ // MIR for `identity` after JumpThreading
fn identity(_1: Result<i32, i32>) -> Result<i32, i32> {
debug x => _1;
let mut _0: std::result::Result<i32, i32>;
let mut _2: i32;
let mut _3: std::ops::ControlFlow<std::result::Result<std::convert::Infallible, i32>, i32>;
let mut _4: std::result::Result<i32, i32>;
let mut _5: isize;
let _6: std::result::Result<std::convert::Infallible, i32>;
let mut _7: !;
let mut _8: std::result::Result<std::convert::Infallible, i32>;
let _9: i32;
scope 1 {
debug residual => _6;
scope 2 {
scope 8 (inlined #[track_caller] <Result<i32, i32> as FromResidual<Result<Infallible, i32>>>::from_residual) {
debug residual => _8;
let _14: i32;
let mut _15: i32;
scope 9 {
debug e => _14;
scope 10 (inlined <i32 as From<i32>>::from) {
debug t => _14;
}
}
}
}
}
scope 3 {
debug val => _9;
scope 4 {
}
}
scope 5 (inlined <Result<i32, i32> as Try>::branch) {
debug self => _4;
let mut _10: isize;
let _11: i32;
let _12: i32;
let mut _13: std::result::Result<std::convert::Infallible, i32>;
scope 6 {
debug v => _11;
}
scope 7 {
debug e => _12;
}
}
bb0: {
StorageLive(_2);
StorageLive(_3);
StorageLive(_4);
_4 = _1;
StorageLive(_10);
StorageLive(_11);
StorageLive(_12);
_10 = discriminant(_4);
switchInt(move _10) -> [0: bb8, 1: bb6, otherwise: bb7];
}
bb1: {
StorageDead(_12);
StorageDead(_11);
StorageDead(_10);
StorageDead(_4);
_5 = discriminant(_3);
- switchInt(move _5) -> [0: bb2, 1: bb4, otherwise: bb3];
+ goto -> bb2;
}
bb2: {
StorageLive(_9);
_9 = ((_3 as Continue).0: i32);
_2 = _9;
StorageDead(_9);
_0 = Result::<i32, i32>::Ok(move _2);
StorageDead(_2);
StorageDead(_3);
goto -> bb5;
}
bb3: {
unreachable;
}
bb4: {
StorageLive(_6);
_6 = ((_3 as Break).0: std::result::Result<std::convert::Infallible, i32>);
StorageLive(_8);
_8 = _6;
StorageLive(_14);
_14 = move ((_8 as Err).0: i32);
StorageLive(_15);
_15 = move _14;
_0 = Result::<i32, i32>::Err(move _15);
StorageDead(_15);
StorageDead(_14);
StorageDead(_8);
StorageDead(_6);
StorageDead(_2);
StorageDead(_3);
goto -> bb5;
}
bb5: {
return;
}
bb6: {
_12 = move ((_4 as Err).0: i32);
StorageLive(_13);
_13 = Result::<Infallible, i32>::Err(move _12);
_3 = ControlFlow::<Result<Infallible, i32>, i32>::Break(move _13);
StorageDead(_13);
- goto -> bb1;
+ goto -> bb9;
}
bb7: {
unreachable;
}
bb8: {
_11 = move ((_4 as Ok).0: i32);
_3 = ControlFlow::<Result<Infallible, i32>, i32>::Continue(move _11);
goto -> bb1;
+ }
+
+ bb9: {
+ StorageDead(_12);
+ StorageDead(_11);
+ StorageDead(_10);
+ StorageDead(_4);
+ _5 = discriminant(_3);
+ goto -> bb4;
}
}

View file

@ -0,0 +1,54 @@
- // MIR for `multiple_match` before JumpThreading
+ // MIR for `multiple_match` after JumpThreading
fn multiple_match(_1: u8) -> u8 {
let mut _0: u8;
let mut _2: u8;
let mut _3: u8;
bb0: {
switchInt(_1) -> [3: bb1, otherwise: bb2];
}
bb1: {
_2 = _1;
- switchInt(_2) -> [3: bb3, otherwise: bb4];
+ goto -> bb3;
}
bb2: {
_3 = _1;
- switchInt(_3) -> [3: bb5, otherwise: bb6];
+ goto -> bb6;
}
bb3: {
_0 = const 5_u8;
return;
}
bb4: {
_0 = const 7_u8;
return;
}
bb5: {
_0 = const 9_u8;
return;
}
bb6: {
switchInt(_3) -> [1: bb7, otherwise: bb8];
}
bb7: {
_0 = const 9_u8;
return;
}
bb8: {
_0 = const 11_u8;
return;
}
}

View file

@ -0,0 +1,54 @@
- // MIR for `multiple_match` before JumpThreading
+ // MIR for `multiple_match` after JumpThreading
fn multiple_match(_1: u8) -> u8 {
let mut _0: u8;
let mut _2: u8;
let mut _3: u8;
bb0: {
switchInt(_1) -> [3: bb1, otherwise: bb2];
}
bb1: {
_2 = _1;
- switchInt(_2) -> [3: bb3, otherwise: bb4];
+ goto -> bb3;
}
bb2: {
_3 = _1;
- switchInt(_3) -> [3: bb5, otherwise: bb6];
+ goto -> bb6;
}
bb3: {
_0 = const 5_u8;
return;
}
bb4: {
_0 = const 7_u8;
return;
}
bb5: {
_0 = const 9_u8;
return;
}
bb6: {
switchInt(_3) -> [1: bb7, otherwise: bb8];
}
bb7: {
_0 = const 9_u8;
return;
}
bb8: {
_0 = const 11_u8;
return;
}
}

View file

@ -0,0 +1,56 @@
- // MIR for `mutable_ref` before JumpThreading
+ // MIR for `mutable_ref` after JumpThreading
fn mutable_ref() -> bool {
let mut _0: bool;
let mut _1: i32;
let _3: ();
let mut _4: bool;
let mut _5: i32;
scope 1 {
debug x => _1;
let _2: *mut i32;
scope 2 {
debug a => _2;
scope 3 {
}
}
}
bb0: {
StorageLive(_1);
_1 = const 5_i32;
StorageLive(_2);
_2 = &raw mut _1;
_1 = const 7_i32;
StorageLive(_3);
(*_2) = const 8_i32;
_3 = const ();
StorageDead(_3);
StorageLive(_4);
StorageLive(_5);
_5 = _1;
_4 = Eq(move _5, const 7_i32);
switchInt(move _4) -> [0: bb2, otherwise: bb1];
}
bb1: {
StorageDead(_5);
_0 = const true;
goto -> bb3;
}
bb2: {
StorageDead(_5);
_0 = const false;
goto -> bb3;
}
bb3: {
StorageDead(_4);
StorageDead(_2);
StorageDead(_1);
return;
}
}

View file

@ -0,0 +1,56 @@
- // MIR for `mutable_ref` before JumpThreading
+ // MIR for `mutable_ref` after JumpThreading
fn mutable_ref() -> bool {
let mut _0: bool;
let mut _1: i32;
let _3: ();
let mut _4: bool;
let mut _5: i32;
scope 1 {
debug x => _1;
let _2: *mut i32;
scope 2 {
debug a => _2;
scope 3 {
}
}
}
bb0: {
StorageLive(_1);
_1 = const 5_i32;
StorageLive(_2);
_2 = &raw mut _1;
_1 = const 7_i32;
StorageLive(_3);
(*_2) = const 8_i32;
_3 = const ();
StorageDead(_3);
StorageLive(_4);
StorageLive(_5);
_5 = _1;
_4 = Eq(move _5, const 7_i32);
switchInt(move _4) -> [0: bb2, otherwise: bb1];
}
bb1: {
StorageDead(_5);
_0 = const true;
goto -> bb3;
}
bb2: {
StorageDead(_5);
_0 = const false;
goto -> bb3;
}
bb3: {
StorageDead(_4);
StorageDead(_2);
StorageDead(_1);
return;
}
}

View file

@ -0,0 +1,26 @@
- // MIR for `mutate_discriminant` before JumpThreading
+ // MIR for `mutate_discriminant` after JumpThreading
fn mutate_discriminant() -> u8 {
let mut _0: u8;
let mut _1: std::option::Option<NonZeroUsize>;
let mut _2: isize;
bb0: {
discriminant(_1) = 1;
(((_1 as variant#1).0: NonZeroUsize).0: usize) = const 0_usize;
_2 = discriminant(_1);
switchInt(_2) -> [0: bb1, otherwise: bb2];
}
bb1: {
_0 = const 1_u8;
return;
}
bb2: {
_0 = const 2_u8;
unreachable;
}
}

View file

@ -0,0 +1,26 @@
- // MIR for `mutate_discriminant` before JumpThreading
+ // MIR for `mutate_discriminant` after JumpThreading
fn mutate_discriminant() -> u8 {
let mut _0: u8;
let mut _1: std::option::Option<NonZeroUsize>;
let mut _2: isize;
bb0: {
discriminant(_1) = 1;
(((_1 as variant#1).0: NonZeroUsize).0: usize) = const 0_usize;
_2 = discriminant(_1);
switchInt(_2) -> [0: bb1, otherwise: bb2];
}
bb1: {
_0 = const 1_u8;
return;
}
bb2: {
_0 = const 2_u8;
unreachable;
}
}

View file

@ -0,0 +1,57 @@
- // MIR for `renumbered_bb` before JumpThreading
+ // MIR for `renumbered_bb` after JumpThreading
fn renumbered_bb(_1: bool) -> u8 {
let mut _0: u8;
let mut _2: bool;
let mut _3: bool;
bb0: {
_3 = const false;
switchInt(_1) -> [1: bb1, otherwise: bb2];
}
bb1: {
_2 = const false;
- goto -> bb3;
+ goto -> bb8;
}
bb2: {
_2 = _1;
_3 = _1;
goto -> bb3;
}
bb3: {
switchInt(_2) -> [0: bb4, otherwise: bb5];
}
bb4: {
switchInt(_3) -> [0: bb6, otherwise: bb7];
}
bb5: {
_0 = const 7_u8;
return;
}
bb6: {
_0 = const 9_u8;
return;
}
bb7: {
_0 = const 11_u8;
return;
+ }
+
+ bb8: {
+ goto -> bb9;
+ }
+
+ bb9: {
+ goto -> bb6;
}
}

View file

@ -0,0 +1,57 @@
- // MIR for `renumbered_bb` before JumpThreading
+ // MIR for `renumbered_bb` after JumpThreading
fn renumbered_bb(_1: bool) -> u8 {
let mut _0: u8;
let mut _2: bool;
let mut _3: bool;
bb0: {
_3 = const false;
switchInt(_1) -> [1: bb1, otherwise: bb2];
}
bb1: {
_2 = const false;
- goto -> bb3;
+ goto -> bb8;
}
bb2: {
_2 = _1;
_3 = _1;
goto -> bb3;
}
bb3: {
switchInt(_2) -> [0: bb4, otherwise: bb5];
}
bb4: {
switchInt(_3) -> [0: bb6, otherwise: bb7];
}
bb5: {
_0 = const 7_u8;
return;
}
bb6: {
_0 = const 9_u8;
return;
}
bb7: {
_0 = const 11_u8;
return;
+ }
+
+ bb8: {
+ goto -> bb9;
+ }
+
+ bb9: {
+ goto -> bb6;
}
}

View file

@ -0,0 +1,480 @@
// unit-test: JumpThreading
// compile-flags: -Zmir-enable-passes=+Inline
// EMIT_MIR_FOR_EACH_PANIC_STRATEGY
#![feature(control_flow_enum)]
#![feature(try_trait_v2)]
#![feature(custom_mir, core_intrinsics, rustc_attrs)]
use std::intrinsics::mir::*;
use std::ops::ControlFlow;
fn too_complex(x: Result<i32, usize>) -> Option<i32> {
// CHECK-LABEL: fn too_complex(
// CHECK: bb0: {
// CHECK: switchInt(move {{_.*}}) -> [0: bb3, 1: bb1, otherwise: bb2];
// CHECK: bb1: {
// CHECK: [[controlflow:_.*]] = ControlFlow::<usize, i32>::Break(
// CHECK: goto -> bb8;
// CHECK: bb2: {
// CHECK: unreachable;
// CHECK: bb3: {
// CHECK: [[controlflow]] = ControlFlow::<usize, i32>::Continue(
// CHECK: goto -> bb4;
// CHECK: bb4: {
// CHECK: goto -> bb6;
// CHECK: bb5: {
// CHECK: {{_.*}} = (([[controlflow]] as Break).0: usize);
// CHECK: _0 = Option::<i32>::None;
// CHECK: goto -> bb7;
// CHECK: bb6: {
// CHECK: {{_.*}} = (([[controlflow]] as Continue).0: i32);
// CHECK: _0 = Option::<i32>::Some(
// CHECK: goto -> bb7;
// CHECK: bb7: {
// CHECK: return;
// CHECK: bb8: {
// CHECK: goto -> bb5;
match {
match x {
Ok(v) => ControlFlow::Continue(v),
Err(r) => ControlFlow::Break(r),
}
} {
ControlFlow::Continue(v) => Some(v),
ControlFlow::Break(r) => None,
}
}
fn identity(x: Result<i32, i32>) -> Result<i32, i32> {
// CHECK-LABEL: fn identity(
// CHECK: bb0: {
// CHECK: [[x:_.*]] = _1;
// CHECK: switchInt(move {{_.*}}) -> [0: bb8, 1: bb6, otherwise: bb7];
// CHECK: bb1: {
// CHECK: goto -> bb2;
// CHECK: bb2: {
// CHECK: {{_.*}} = (([[controlflow:_.*]] as Continue).0: i32);
// CHECK: _0 = Result::<i32, i32>::Ok(
// CHECK: goto -> bb5;
// CHECK: bb3: {
// CHECK: unreachable;
// CHECK: bb4: {
// CHECK: {{_.*}} = (([[controlflow]] as Break).0: std::result::Result<std::convert::Infallible, i32>);
// CHECK: _0 = Result::<i32, i32>::Err(
// CHECK: goto -> bb5;
// CHECK: bb5: {
// CHECK: return;
// CHECK: bb6: {
// CHECK: {{_.*}} = move (([[x]] as Err).0: i32);
// CHECK: [[controlflow]] = ControlFlow::<Result<Infallible, i32>, i32>::Break(
// CHECK: goto -> bb9;
// CHECK: bb7: {
// CHECK: unreachable;
// CHECK: bb8: {
// CHECK: {{_.*}} = move (([[x]] as Ok).0: i32);
// CHECK: [[controlflow]] = ControlFlow::<Result<Infallible, i32>, i32>::Continue(
// CHECK: goto -> bb1;
// CHECK: bb9: {
// CHECK: goto -> bb4;
Ok(x?)
}
enum DFA {
A,
B,
C,
D,
}
/// Check that we do not thread through a loop header,
/// to avoid creating an irreducible CFG.
fn dfa() {
// CHECK-LABEL: fn dfa(
// CHECK: bb0: {
// CHECK: {{_.*}} = DFA::A;
// CHECK: goto -> bb1;
// CHECK: bb1: {
// CHECK: switchInt({{.*}}) -> [0: bb4, 1: bb5, 2: bb6, 3: bb2, otherwise: bb3];
// CHECK: bb2: {
// CHECK: return;
// CHECK: bb3: {
// CHECK: unreachable;
// CHECK: bb4: {
// CHECK: {{_.*}} = DFA::B;
// CHECK: goto -> bb1;
// CHECK: bb5: {
// CHECK: {{_.*}} = DFA::C;
// CHECK: goto -> bb1;
// CHECK: bb6: {
// CHECK: {{_.*}} = DFA::D;
// CHECK: goto -> bb1;
let mut state = DFA::A;
loop {
match state {
DFA::A => state = DFA::B,
DFA::B => state = DFA::C,
DFA::C => state = DFA::D,
DFA::D => return,
}
}
}
#[repr(u8)]
enum CustomDiscr {
A = 35,
B = 73,
C = 99,
}
/// Verify that we correctly match the discriminant value, and not its index.
fn custom_discr(x: bool) -> u8 {
// CHECK-LABEL: fn custom_discr(
// CHECK: bb0: {
// CHECK: switchInt({{.*}}) -> [0: bb2, otherwise: bb1];
// CHECK: bb1: {
// CHECK: {{_.*}} = CustomDiscr::A;
// CHECK: goto -> bb7;
// CHECK: bb2: {
// CHECK: {{_.*}} = CustomDiscr::B;
// CHECK: goto -> bb3;
// CHECK: bb3: {
// CHECK: goto -> bb4;
// CHECK: bb4: {
// CHECK: _0 = const 13_u8;
// CHECK: goto -> bb6;
// CHECK: bb5: {
// CHECK: _0 = const 5_u8;
// CHECK: goto -> bb6;
// CHECK: bb6: {
// CHECK: return;
// CHECK: bb7: {
// CHECK: goto -> bb5;
match if x { CustomDiscr::A } else { CustomDiscr::B } {
CustomDiscr::A => 5,
_ => 13,
}
}
#[custom_mir(dialect = "runtime", phase = "post-cleanup")]
fn multiple_match(x: u8) -> u8 {
// CHECK-LABEL: fn multiple_match(
mir!(
{
// CHECK: bb0: {
// CHECK: switchInt([[x:_.*]]) -> [3: bb1, otherwise: bb2];
match x { 3 => bb1, _ => bb2 }
}
bb1 = {
// We know `x == 3`, so we can take `bb3`.
// CHECK: bb1: {
// CHECK: {{_.*}} = [[x]];
// CHECK: goto -> bb3;
let y = x;
match y { 3 => bb3, _ => bb4 }
}
bb2 = {
// We know `x != 3`, so we can take `bb6`.
// CHECK: bb2: {
// CHECK: [[z:_.*]] = [[x]];
// CHECK: goto -> bb6;
let z = x;
match z { 3 => bb5, _ => bb6 }
}
bb3 = {
// CHECK: bb3: {
// CHECK: _0 = const 5_u8;
// CHECK: return;
RET = 5;
Return()
}
bb4 = {
// CHECK: bb4: {
// CHECK: _0 = const 7_u8;
// CHECK: return;
RET = 7;
Return()
}
bb5 = {
// CHECK: bb5: {
// CHECK: _0 = const 9_u8;
// CHECK: return;
RET = 9;
Return()
}
bb6 = {
// We know `z != 3`, so we CANNOT take `bb7`.
// CHECK: bb6: {
// CHECK: switchInt([[z]]) -> [1: bb7, otherwise: bb8];
match z { 1 => bb7, _ => bb8 }
}
bb7 = {
// CHECK: bb7: {
// CHECK: _0 = const 9_u8;
// CHECK: return;
RET = 9;
Return()
}
bb8 = {
// CHECK: bb8: {
// CHECK: _0 = const 11_u8;
// CHECK: return;
RET = 11;
Return()
}
)
}
/// Both 1-3-4 and 2-3-4 are threadable. As 1 and 2 are the only predecessors of 3,
/// verify that we only thread the 3-4 part.
#[custom_mir(dialect = "runtime", phase = "post-cleanup")]
fn duplicate_chain(x: bool) -> u8 {
// CHECK-LABEL: fn duplicate_chain(
mir!(
let a: u8;
{
// CHECK: bb0: {
// CHECK: switchInt({{.*}}) -> [1: bb1, otherwise: bb2];
match x { true => bb1, _ => bb2 }
}
bb1 = {
// CHECK: bb1: {
// CHECK: [[a:_.*]] = const 5_u8;
// CHECK: goto -> bb3;
a = 5;
Goto(bb3)
}
bb2 = {
// CHECK: bb2: {
// CHECK: [[a]] = const 5_u8;
// CHECK: goto -> bb3;
a = 5;
Goto(bb3)
}
bb3 = {
// CHECK: bb3: {
// CHECK: {{_.*}} = const 13_i32;
// CHECK: goto -> bb4;
let b = 13;
Goto(bb4)
}
bb4 = {
// CHECK: bb4: {
// CHECK: {{_.*}} = const 15_i32;
// CHECK-NOT: switchInt(
// CHECK: goto -> bb5;
let c = 15;
match a { 5 => bb5, _ => bb6 }
}
bb5 = {
// CHECK: bb5: {
// CHECK: _0 = const 7_u8;
// CHECK: return;
RET = 7;
Return()
}
bb6 = {
// CHECK: bb6: {
// CHECK: _0 = const 9_u8;
// CHECK: return;
RET = 9;
Return()
}
)
}
#[rustc_layout_scalar_valid_range_start(1)]
#[rustc_nonnull_optimization_guaranteed]
struct NonZeroUsize(usize);
/// Verify that we correctly discard threads that may mutate a discriminant by aliasing.
#[custom_mir(dialect = "runtime", phase = "post-cleanup")]
fn mutate_discriminant() -> u8 {
// CHECK-LABEL: fn mutate_discriminant(
// CHECK-NOT: goto -> {{bb.*}};
// CHECK: switchInt(
// CHECK-NOT: goto -> {{bb.*}};
mir!(
let x: Option<NonZeroUsize>;
{
SetDiscriminant(x, 1);
// This assignment overwrites the niche in which the discriminant is stored.
place!(Field(Field(Variant(x, 1), 0), 0)) = 0_usize;
// So we cannot know the value of this discriminant.
let a = Discriminant(x);
match a {
0 => bb1,
_ => bad,
}
}
bb1 = {
RET = 1;
Return()
}
bad = {
RET = 2;
Unreachable()
}
)
}
/// Verify that we do not try to reason when there are mutable pointers involved.
fn mutable_ref() -> bool {
// CHECK-LABEL: fn mutable_ref(
// CHECK-NOT: goto -> {{bb.*}};
// CHECK: switchInt(
// CHECK: goto -> [[bbret:bb.*]];
// CHECK: goto -> [[bbret]];
// CHECK: [[bbret]]: {
// CHECK-NOT: {{bb.*}}: {
// CHECK: return;
let mut x = 5;
let a = std::ptr::addr_of_mut!(x);
x = 7;
unsafe { *a = 8 };
if x == 7 {
true
} else {
false
}
}
/// This function has 2 TOs: 1-3-4 and 0-1-3-4-6.
/// We verify that the second TO does not modify 3 once the first has been applied.
#[custom_mir(dialect = "runtime", phase = "post-cleanup")]
fn renumbered_bb(x: bool) -> u8 {
// CHECK-LABEL: fn renumbered_bb(
mir!(
let a: bool;
let b: bool;
{
// CHECK: bb0: {
// CHECK: switchInt({{.*}}) -> [1: bb1, otherwise: bb2];
b = false;
match x { true => bb1, _ => bb2 }
}
bb1 = {
// CHECK: bb1: {
// CHECK: goto -> bb8;
a = false;
Goto(bb3)
}
bb2 = {
// CHECK: bb2: {
// CHECK: goto -> bb3;
a = x;
b = x;
Goto(bb3)
}
bb3 = {
// CHECK: bb3: {
// CHECK: switchInt({{.*}}) -> [0: bb4, otherwise: bb5];
match a { false => bb4, _ => bb5 }
}
bb4 = {
// CHECK: bb4: {
// CHECK: switchInt({{.*}}) -> [0: bb6, otherwise: bb7];
match b { false => bb6, _ => bb7 }
}
bb5 = {
// CHECK: bb5: {
// CHECK: _0 = const 7_u8;
RET = 7;
Return()
}
bb6 = {
// CHECK: bb6: {
// CHECK: _0 = const 9_u8;
RET = 9;
Return()
}
bb7 = {
// CHECK: bb7: {
// CHECK: _0 = const 11_u8;
RET = 11;
Return()
}
// Duplicate of bb3.
// CHECK: bb8: {
// CHECK-NEXT: goto -> bb9;
// Duplicate of bb4.
// CHECK: bb9: {
// CHECK-NEXT: goto -> bb6;
)
}
/// This function has 3 TOs: 1-4-5, 0-1-4-7-5-8 and 3-4-7-5-6
/// After applying the first TO, we create bb9 to replace 4, and rename 1-4 edge by 1-9. The
/// second TO may try to thread non-existing edge 9-4.
/// This test verifies that we preserve semantics by bailing out of this second TO.
#[custom_mir(dialect = "runtime", phase = "post-cleanup")]
fn disappearing_bb(x: u8) -> u8 {
// CHECK-LABEL: fn disappearing_bb(
mir!(
let a: bool;
let b: bool;
{
a = true;
b = true;
match x { 0 => bb3, 1 => bb3, 2 => bb1, _ => bb2 }
}
bb1 = {
// CHECK: bb1: {
// CHECK: goto -> bb9;
b = false;
Goto(bb4)
}
bb2 = {
Unreachable()
}
bb3 = {
// CHECK: bb3: {
// CHECK: goto -> bb10;
a = false;
Goto(bb4)
}
bb4 = {
match b { false => bb5, _ => bb7 }
}
bb5 = {
match a { false => bb6, _ => bb8 }
}
bb6 = {
Return()
}
bb7 = {
Goto(bb5)
}
bb8 = {
Goto(bb6)
}
// CHECK: bb9: {
// CHECK: goto -> bb5;
// CHECK: bb10: {
// CHECK: goto -> bb6;
)
}
fn main() {
too_complex(Ok(0));
identity(Ok(0));
custom_discr(false);
dfa();
multiple_match(5);
duplicate_chain(false);
mutate_discriminant();
mutable_ref();
renumbered_bb(true);
disappearing_bb(7);
}
// EMIT_MIR jump_threading.too_complex.JumpThreading.diff
// EMIT_MIR jump_threading.identity.JumpThreading.diff
// EMIT_MIR jump_threading.custom_discr.JumpThreading.diff
// EMIT_MIR jump_threading.dfa.JumpThreading.diff
// EMIT_MIR jump_threading.multiple_match.JumpThreading.diff
// EMIT_MIR jump_threading.duplicate_chain.JumpThreading.diff
// EMIT_MIR jump_threading.mutate_discriminant.JumpThreading.diff
// EMIT_MIR jump_threading.mutable_ref.JumpThreading.diff
// EMIT_MIR jump_threading.renumbered_bb.JumpThreading.diff
// EMIT_MIR jump_threading.disappearing_bb.JumpThreading.diff

View file

@ -0,0 +1,98 @@
- // MIR for `too_complex` before JumpThreading
+ // MIR for `too_complex` after JumpThreading
fn too_complex(_1: Result<i32, usize>) -> Option<i32> {
debug x => _1;
let mut _0: std::option::Option<i32>;
let mut _2: std::ops::ControlFlow<usize, i32>;
let mut _3: isize;
let _4: i32;
let mut _5: i32;
let _6: usize;
let mut _7: usize;
let mut _8: isize;
let _9: i32;
let mut _10: i32;
let _11: usize;
scope 1 {
debug v => _4;
}
scope 2 {
debug r => _6;
}
scope 3 {
debug v => _9;
}
scope 4 {
debug r => _11;
}
bb0: {
StorageLive(_2);
_3 = discriminant(_1);
switchInt(move _3) -> [0: bb3, 1: bb1, otherwise: bb2];
}
bb1: {
StorageLive(_6);
_6 = ((_1 as Err).0: usize);
StorageLive(_7);
_7 = _6;
_2 = ControlFlow::<usize, i32>::Break(move _7);
StorageDead(_7);
StorageDead(_6);
- goto -> bb4;
+ goto -> bb8;
}
bb2: {
unreachable;
}
bb3: {
StorageLive(_4);
_4 = ((_1 as Ok).0: i32);
StorageLive(_5);
_5 = _4;
_2 = ControlFlow::<usize, i32>::Continue(move _5);
StorageDead(_5);
StorageDead(_4);
goto -> bb4;
}
bb4: {
_8 = discriminant(_2);
- switchInt(move _8) -> [0: bb6, 1: bb5, otherwise: bb2];
+ goto -> bb6;
}
bb5: {
StorageLive(_11);
_11 = ((_2 as Break).0: usize);
_0 = Option::<i32>::None;
StorageDead(_11);
goto -> bb7;
}
bb6: {
StorageLive(_9);
_9 = ((_2 as Continue).0: i32);
StorageLive(_10);
_10 = _9;
_0 = Option::<i32>::Some(move _10);
StorageDead(_10);
StorageDead(_9);
goto -> bb7;
}
bb7: {
StorageDead(_2);
return;
+ }
+
+ bb8: {
+ _8 = discriminant(_2);
+ goto -> bb5;
}
}

View file

@ -0,0 +1,98 @@
- // MIR for `too_complex` before JumpThreading
+ // MIR for `too_complex` after JumpThreading
fn too_complex(_1: Result<i32, usize>) -> Option<i32> {
debug x => _1;
let mut _0: std::option::Option<i32>;
let mut _2: std::ops::ControlFlow<usize, i32>;
let mut _3: isize;
let _4: i32;
let mut _5: i32;
let _6: usize;
let mut _7: usize;
let mut _8: isize;
let _9: i32;
let mut _10: i32;
let _11: usize;
scope 1 {
debug v => _4;
}
scope 2 {
debug r => _6;
}
scope 3 {
debug v => _9;
}
scope 4 {
debug r => _11;
}
bb0: {
StorageLive(_2);
_3 = discriminant(_1);
switchInt(move _3) -> [0: bb3, 1: bb1, otherwise: bb2];
}
bb1: {
StorageLive(_6);
_6 = ((_1 as Err).0: usize);
StorageLive(_7);
_7 = _6;
_2 = ControlFlow::<usize, i32>::Break(move _7);
StorageDead(_7);
StorageDead(_6);
- goto -> bb4;
+ goto -> bb8;
}
bb2: {
unreachable;
}
bb3: {
StorageLive(_4);
_4 = ((_1 as Ok).0: i32);
StorageLive(_5);
_5 = _4;
_2 = ControlFlow::<usize, i32>::Continue(move _5);
StorageDead(_5);
StorageDead(_4);
goto -> bb4;
}
bb4: {
_8 = discriminant(_2);
- switchInt(move _8) -> [0: bb6, 1: bb5, otherwise: bb2];
+ goto -> bb6;
}
bb5: {
StorageLive(_11);
_11 = ((_2 as Break).0: usize);
_0 = Option::<i32>::None;
StorageDead(_11);
goto -> bb7;
}
bb6: {
StorageLive(_9);
_9 = ((_2 as Continue).0: i32);
StorageLive(_10);
_10 = _9;
_0 = Option::<i32>::Some(move _10);
StorageDead(_10);
StorageDead(_9);
goto -> bb7;
}
bb7: {
StorageDead(_2);
return;
+ }
+
+ bb8: {
+ _8 = discriminant(_2);
+ goto -> bb5;
}
}

View file

@ -11,18 +11,6 @@ fn while_loop(_1: bool) -> () {
}
bb0: {
goto -> bb1;
}
bb1: {
switchInt(_1) -> [0: bb3, otherwise: bb2];
}
bb2: {
switchInt(_1) -> [0: bb1, otherwise: bb3];
}
bb3: {
return;
}
}

View file

@ -11,18 +11,6 @@ fn while_loop(_1: bool) -> () {
}
bb0: {
goto -> bb1;
}
bb1: {
switchInt(_1) -> [0: bb3, otherwise: bb2];
}
bb2: {
switchInt(_1) -> [0: bb1, otherwise: bb3];
}
bb3: {
return;
}
}