Auto merge of #126541 - scottmcm:more-ptr-metadata-gvn, r=cjgillot

More ptr metadata gvn

There's basically 3 parts to this PR.

1. Allow references as arguments to `UnOp::PtrMetadata`

This is a MIR semantics addition, so
r? mir

Rather than just raw pointers, also allow references to be passed to `PtrMetadata`.  That means the length of a slice can be just `PtrMetadata(_1)` instead of also needing a ref-to-pointer statement (`_2 = &raw *_1` + `PtrMetadata(_2)`).

AFAIK there should be no provenance or tagging implications of looking at the *metadata* of a pointer, and the code in the backends actually already supported it (other than a debug assert, given that they don't care about ptr vs reference, really), so we might as well allow it.

2. Simplify the argument to `PtrMetadata` in GVN

Because the specific kind of pointer-like thing isn't that important, GVN can simplify all those details away.  Things like `*const`-to-`*mut` casts and `&mut`-to-`&` reborrows are irrelevant, and skipping them lets it see more interesting things.

cc `@cjgillot`

Notably, unsizing casts for arrays.  GVN supported that for `Len`, and now it sees it for `PtrMetadata` as well, allowing `PtrMetadata(pointer)` to become a constant if that pointer came from an array-to-slice unsizing, even through a bunch of other possible steps.

3. Replace `NormalizeArrayLen` with GVN

The `NormalizeArrayLen` pass hasn't been running even in optimized builds for well over a year, and it turns out that GVN -- which *is* on in optimized builds -- can do everything it was trying to do.

So the code for the pass is deleted, but the tests are kept, just changed to the different pass.

As part of this, `LowerSliceLen` was changed to emit `PtrMetadata(_1)` instead of `Len(*_1)`, a small step on the road to eventually eliminating `Rvalue::Len`.
This commit is contained in:
bors 2024-06-21 07:12:50 +00:00
commit e32ea4822b
51 changed files with 622 additions and 323 deletions

View file

@ -639,7 +639,9 @@ pub fn codegen_rvalue_operand(
(OperandValue::Immediate(llval), operand.layout)
}
mir::UnOp::PtrMetadata => {
debug_assert!(operand.layout.ty.is_unsafe_ptr());
debug_assert!(
operand.layout.ty.is_unsafe_ptr() || operand.layout.ty.is_ref(),
);
let (_, meta) = operand.val.pointer_parts();
assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
if let Some(meta) = meta {

View file

@ -460,7 +460,7 @@ pub fn unary_op(
let res = ScalarInt::truncate_from_uint(res, layout.size).0;
Ok(ImmTy::from_scalar(res.into(), layout))
}
ty::RawPtr(..) => {
ty::RawPtr(..) | ty::Ref(..) => {
assert_eq!(un_op, PtrMetadata);
let (_, meta) = val.to_scalar_and_meta();
Ok(match meta {

View file

@ -1446,10 +1446,12 @@ pub enum UnOp {
Not,
/// The `-` operator for negation
Neg,
/// Get the metadata `M` from a `*const/mut impl Pointee<Metadata = M>`.
/// Gets the metadata `M` from a `*const`/`*mut`/`&`/`&mut` to
/// `impl Pointee<Metadata = M>`.
///
/// For example, this will give a `()` from `*const i32`, a `usize` from
/// `*mut [u8]`, or a pointer to a vtable from a `*const dyn Foo`.
/// `&mut [u8]`, or a `ptr::DynMetadata<dyn Foo>` (internally a pointer)
/// from a `*mut dyn Foo`.
///
/// Allowed only in [`MirPhase::Runtime`]; earlier it's an intrinsic.
PtrMetadata,

View file

@ -193,6 +193,10 @@ fn parse_rvalue(&self, expr_id: ExprId) -> PResult<Rvalue<'tcx>> {
let source = self.parse_operand(args[0])?;
Ok(Rvalue::Cast(CastKind::Transmute, source, expr.ty))
},
@call(mir_cast_ptr_to_ptr, args) => {
let source = self.parse_operand(args[0])?;
Ok(Rvalue::Cast(CastKind::PtrToPtr, source, expr.ty))
},
@call(mir_checked, args) => {
parse_by_kind!(self, args[0], _, "binary op",
ExprKind::Binary { op, lhs, rhs } => {

View file

@ -836,12 +836,8 @@ fn simplify_rvalue(
}
Value::BinaryOp(op, lhs, rhs)
}
Rvalue::UnaryOp(op, ref mut arg) => {
let arg = self.simplify_operand(arg, location)?;
if let Some(value) = self.simplify_unary(op, arg) {
return Some(value);
}
Value::UnaryOp(op, arg)
Rvalue::UnaryOp(op, ref mut arg_op) => {
return self.simplify_unary(op, arg_op, location);
}
Rvalue::Discriminant(ref mut place) => {
let place = self.simplify_place_value(place, location)?;
@ -949,13 +945,8 @@ fn simplify_aggregate(
was_updated = true;
}
if was_updated {
if let Some(const_) = self.try_as_constant(fields[0]) {
field_ops[FieldIdx::ZERO] = Operand::Constant(Box::new(const_));
} else if let Some(local) = self.try_as_local(fields[0], location) {
field_ops[FieldIdx::ZERO] = Operand::Copy(Place::from(local));
self.reused_locals.insert(local);
}
if was_updated && let Some(op) = self.try_as_operand(fields[0], location) {
field_ops[FieldIdx::ZERO] = op;
}
}
@ -965,11 +956,8 @@ fn simplify_aggregate(
let first = fields[0];
if fields.iter().all(|&v| v == first) {
let len = ty::Const::from_target_usize(self.tcx, fields.len().try_into().unwrap());
if let Some(const_) = self.try_as_constant(first) {
*rvalue = Rvalue::Repeat(Operand::Constant(Box::new(const_)), len);
} else if let Some(local) = self.try_as_local(first, location) {
*rvalue = Rvalue::Repeat(Operand::Copy(local.into()), len);
self.reused_locals.insert(local);
if let Some(op) = self.try_as_operand(first, location) {
*rvalue = Rvalue::Repeat(op, len);
}
return Some(self.insert(Value::Repeat(first, len)));
}
@ -979,8 +967,71 @@ fn simplify_aggregate(
}
#[instrument(level = "trace", skip(self), ret)]
fn simplify_unary(&mut self, op: UnOp, value: VnIndex) -> Option<VnIndex> {
let value = match (op, self.get(value)) {
fn simplify_unary(
&mut self,
op: UnOp,
arg_op: &mut Operand<'tcx>,
location: Location,
) -> Option<VnIndex> {
let mut arg_index = self.simplify_operand(arg_op, location)?;
// PtrMetadata doesn't care about *const vs *mut vs & vs &mut,
// so start by removing those distinctions so we can update the `Operand`
if op == UnOp::PtrMetadata {
let mut was_updated = false;
loop {
match self.get(arg_index) {
// Pointer casts that preserve metadata, such as
// `*const [i32]` <-> `*mut [i32]` <-> `*mut [f32]`.
// It's critical that this not eliminate cases like
// `*const [T]` -> `*const T` which remove metadata.
// We run on potentially-generic MIR, though, so unlike codegen
// we can't always know exactly what the metadata are.
// Thankfully, equality on `ptr_metadata_ty_or_tail` gives us
// what we need: `Ok(meta_ty)` if the metadata is known, or
// `Err(tail_ty)` if not. Matching metadata is ok, but if
// that's not known, then matching tail types is also ok,
// allowing things like `*mut (?A, ?T)` <-> `*mut (?B, ?T)`.
// FIXME: Would it be worth trying to normalize, rather than
// passing the identity closure? Or are the types in the
// Cast realistically about as normalized as we can get anyway?
Value::Cast { kind: CastKind::PtrToPtr, value: inner, from, to }
if from
.builtin_deref(true)
.unwrap()
.ptr_metadata_ty_or_tail(self.tcx, |t| t)
== to
.builtin_deref(true)
.unwrap()
.ptr_metadata_ty_or_tail(self.tcx, |t| t) =>
{
arg_index = *inner;
was_updated = true;
continue;
}
// `&mut *p`, `&raw *p`, etc don't change metadata.
Value::Address { place, kind: _, provenance: _ }
if let PlaceRef { local, projection: [PlaceElem::Deref] } =
place.as_ref()
&& let Some(local_index) = self.locals[local] =>
{
arg_index = local_index;
was_updated = true;
continue;
}
_ => {
if was_updated && let Some(op) = self.try_as_operand(arg_index, location) {
*arg_op = op;
}
break;
}
}
}
}
let value = match (op, self.get(arg_index)) {
(UnOp::Not, Value::UnaryOp(UnOp::Not, inner)) => return Some(*inner),
(UnOp::Neg, Value::UnaryOp(UnOp::Neg, inner)) => return Some(*inner),
(UnOp::Not, Value::BinaryOp(BinOp::Eq, lhs, rhs)) => {
@ -992,9 +1043,26 @@ fn simplify_unary(&mut self, op: UnOp, value: VnIndex) -> Option<VnIndex> {
(UnOp::PtrMetadata, Value::Aggregate(AggregateTy::RawPtr { .. }, _, fields)) => {
return Some(fields[1]);
}
_ => return None,
// We have an unsizing cast, which assigns the length to fat pointer metadata.
(
UnOp::PtrMetadata,
Value::Cast {
kind: CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize),
from,
to,
..
},
) if let ty::Slice(..) = to.builtin_deref(true).unwrap().kind()
&& let ty::Array(_, len) = from.builtin_deref(true).unwrap().kind() =>
{
return self.insert_constant(Const::from_ty_const(
*len,
self.tcx.types.usize,
self.tcx,
));
}
_ => Value::UnaryOp(op, arg_index),
};
Some(self.insert(value))
}
@ -1174,13 +1242,8 @@ fn simplify_cast(
}
}
if was_updated {
if let Some(const_) = self.try_as_constant(value) {
*operand = Operand::Constant(Box::new(const_));
} else if let Some(local) = self.try_as_local(value, location) {
*operand = Operand::Copy(local.into());
self.reused_locals.insert(local);
}
if was_updated && let Some(op) = self.try_as_operand(value, location) {
*operand = op;
}
Some(self.insert(Value::Cast { kind: *kind, value, from, to }))
@ -1296,6 +1359,19 @@ fn op_to_prop_const<'tcx>(
}
impl<'tcx> VnState<'_, 'tcx> {
/// If either [`Self::try_as_constant`] as [`Self::try_as_local`] succeeds,
/// returns that result as an [`Operand`].
fn try_as_operand(&mut self, index: VnIndex, location: Location) -> Option<Operand<'tcx>> {
if let Some(const_) = self.try_as_constant(index) {
Some(Operand::Constant(Box::new(const_)))
} else if let Some(local) = self.try_as_local(index, location) {
self.reused_locals.insert(local);
Some(Operand::Copy(local.into()))
} else {
None
}
}
/// If `index` is a `Value::Constant`, return the `Constant` to be put in the MIR.
fn try_as_constant(&mut self, index: VnIndex) -> Option<ConstOperand<'tcx>> {
// This was already constant in MIR, do not change it.

View file

@ -88,7 +88,6 @@
mod match_branches;
mod mentioned_items;
mod multiple_return_terminators;
mod normalize_array_len;
mod nrvo;
mod prettify;
mod promote_consts;
@ -581,9 +580,6 @@ fn o1<T>(x: T) -> WithMinOptLevel<T> {
&o1(simplify::SimplifyCfg::AfterUnreachableEnumBranching),
// Inlining may have introduced a lot of redundant code and a large move pattern.
// Now, we need to shrink the generated MIR.
// Has to run after `slice::len` lowering
&normalize_array_len::NormalizeArrayLen,
&ref_prop::ReferencePropagation,
&sroa::ScalarReplacementOfAggregates,
&match_branches::MatchBranchSimplification,

View file

@ -1,10 +1,9 @@
//! This pass lowers calls to core::slice::len to just Len op.
//! This pass lowers calls to core::slice::len to just PtrMetadata op.
//! It should run before inlining!
use rustc_hir::def_id::DefId;
use rustc_index::IndexSlice;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, TyCtxt};
use rustc_middle::ty::TyCtxt;
pub struct LowerSliceLenCalls;
@ -29,16 +28,11 @@ pub fn lower_slice_len_calls<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let basic_blocks = body.basic_blocks.as_mut_preserves_cfg();
for block in basic_blocks {
// lower `<[_]>::len` calls
lower_slice_len_call(tcx, block, &body.local_decls, slice_len_fn_item_def_id);
lower_slice_len_call(block, slice_len_fn_item_def_id);
}
}
fn lower_slice_len_call<'tcx>(
tcx: TyCtxt<'tcx>,
block: &mut BasicBlockData<'tcx>,
local_decls: &IndexSlice<Local, LocalDecl<'tcx>>,
slice_len_fn_item_def_id: DefId,
) {
fn lower_slice_len_call<'tcx>(block: &mut BasicBlockData<'tcx>, slice_len_fn_item_def_id: DefId) {
let terminator = block.terminator();
if let TerminatorKind::Call {
func,
@ -50,19 +44,17 @@ fn lower_slice_len_call<'tcx>(
} = &terminator.kind
// some heuristics for fast rejection
&& let [arg] = &args[..]
&& let Some(arg) = arg.node.place()
&& let ty::FnDef(fn_def_id, _) = func.ty(local_decls, tcx).kind()
&& *fn_def_id == slice_len_fn_item_def_id
&& let Some((fn_def_id, _)) = func.const_fn_def()
&& fn_def_id == slice_len_fn_item_def_id
{
// perform modifications from something like:
// _5 = core::slice::<impl [u8]>::len(move _6) -> bb1
// into:
// _5 = Len(*_6)
// _5 = PtrMetadata(move _6)
// goto bb1
// make new RValue for Len
let deref_arg = tcx.mk_place_deref(arg);
let r_value = Rvalue::Len(deref_arg);
let r_value = Rvalue::UnaryOp(UnOp::PtrMetadata, arg.node.clone());
let len_statement_kind = StatementKind::Assign(Box::new((*destination, r_value)));
let add_statement =
Statement { kind: len_statement_kind, source_info: terminator.source_info };

View file

@ -1,103 +0,0 @@
//! This pass eliminates casting of arrays into slices when their length
//! is taken using `.len()` method. Handy to preserve information in MIR for const prop
use crate::ssa::SsaLocals;
use rustc_index::IndexVec;
use rustc_middle::mir::visit::*;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, TyCtxt};
pub struct NormalizeArrayLen;
impl<'tcx> MirPass<'tcx> for NormalizeArrayLen {
fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
sess.mir_opt_level() >= 3
}
#[instrument(level = "trace", skip(self, tcx, body))]
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
debug!(def_id = ?body.source.def_id());
normalize_array_len_calls(tcx, body)
}
}
fn normalize_array_len_calls<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
let ssa = SsaLocals::new(tcx, body, param_env);
let slice_lengths = compute_slice_length(tcx, &ssa, body);
debug!(?slice_lengths);
Replacer { tcx, slice_lengths }.visit_body_preserves_cfg(body);
}
fn compute_slice_length<'tcx>(
tcx: TyCtxt<'tcx>,
ssa: &SsaLocals,
body: &Body<'tcx>,
) -> IndexVec<Local, Option<ty::Const<'tcx>>> {
let mut slice_lengths = IndexVec::from_elem(None, &body.local_decls);
for (local, rvalue, _) in ssa.assignments(body) {
match rvalue {
Rvalue::Cast(
CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize),
operand,
cast_ty,
) => {
let operand_ty = operand.ty(body, tcx);
debug!(?operand_ty);
if let Some(operand_ty) = operand_ty.builtin_deref(true)
&& let ty::Array(_, len) = operand_ty.kind()
&& let Some(cast_ty) = cast_ty.builtin_deref(true)
&& let ty::Slice(..) = cast_ty.kind()
{
slice_lengths[local] = Some(*len);
}
}
// The length information is stored in the fat pointer, so we treat `operand` as a value.
Rvalue::Use(operand) => {
if let Some(rhs) = operand.place()
&& let Some(rhs) = rhs.as_local()
{
slice_lengths[local] = slice_lengths[rhs];
}
}
// The length information is stored in the fat pointer.
// Reborrowing copies length information from one pointer to the other.
Rvalue::Ref(_, _, rhs) | Rvalue::AddressOf(_, rhs) => {
if let [PlaceElem::Deref] = rhs.projection[..] {
slice_lengths[local] = slice_lengths[rhs.local];
}
}
_ => {}
}
}
slice_lengths
}
struct Replacer<'tcx> {
tcx: TyCtxt<'tcx>,
slice_lengths: IndexVec<Local, Option<ty::Const<'tcx>>>,
}
impl<'tcx> MutVisitor<'tcx> for Replacer<'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn visit_rvalue(&mut self, rvalue: &mut Rvalue<'tcx>, loc: Location) {
if let Rvalue::Len(place) = rvalue
&& let [PlaceElem::Deref] = &place.projection[..]
&& let Some(len) = self.slice_lengths[place.local]
{
*rvalue = Rvalue::Use(Operand::Constant(Box::new(ConstOperand {
span: rustc_span::DUMMY_SP,
user_ty: None,
const_: Const::from_ty_const(len, self.tcx.types.usize, self.tcx),
})));
}
self.super_rvalue(rvalue, loc);
}
}

View file

@ -1116,12 +1116,17 @@ macro_rules! check_kinds {
UnOp::PtrMetadata => {
if !matches!(self.mir_phase, MirPhase::Runtime(_)) {
// It would probably be fine to support this in earlier phases,
// but at the time of writing it's only ever introduced from intrinsic lowering,
// but at the time of writing it's only ever introduced from intrinsic lowering
// or other runtime-phase optimization passes,
// so earlier things can just `bug!` on it.
self.fail(location, "PtrMetadata should be in runtime MIR only");
}
check_kinds!(a, "Cannot PtrMetadata non-pointer type {:?}", ty::RawPtr(..));
check_kinds!(
a,
"Cannot PtrMetadata non-pointer non-reference type {:?}",
ty::RawPtr(..) | ty::Ref(..)
);
}
}
}

View file

@ -1175,6 +1175,7 @@
mir_assume,
mir_basic_block,
mir_call,
mir_cast_ptr_to_ptr,
mir_cast_transmute,
mir_checked,
mir_copy_for_deref,

View file

@ -444,6 +444,13 @@ fn Variant<T>(place: T, index: u32) -> ()
/// generated via the normal `mem::transmute`.
fn CastTransmute<T, U>(operand: T) -> U
);
define!(
"mir_cast_ptr_to_ptr",
/// Emits a `CastKind::PtrToPtr` cast.
///
/// This allows bypassing normal validation to generate strange casts.
fn CastPtrToPtr<T, U>(operand: T) -> U
);
define!(
"mir_make_place",
#[doc(hidden)]

View file

@ -22,19 +22,19 @@ Number of file 0 mappings: 5
- Code(Counter(0)) at (prev + 3, 1) to (start + 0, 2)
Function name: closure_macro::main::{closure#0}
Raw bytes (35): 0x[01, 01, 03, 01, 05, 05, 0b, 09, 00, 05, 01, 10, 1c, 03, 21, 05, 04, 11, 01, 27, 02, 03, 11, 00, 16, 00, 00, 17, 00, 1e, 07, 02, 09, 00, 0a]
Raw bytes (35): 0x[01, 01, 03, 01, 05, 05, 0b, 09, 0d, 05, 01, 10, 1c, 03, 21, 05, 04, 11, 01, 27, 02, 03, 11, 00, 16, 0d, 00, 17, 00, 1e, 07, 02, 09, 00, 0a]
Number of files: 1
- file 0 => global file 1
Number of expressions: 3
- expression 0 operands: lhs = Counter(0), rhs = Counter(1)
- expression 1 operands: lhs = Counter(1), rhs = Expression(2, Add)
- expression 2 operands: lhs = Counter(2), rhs = Zero
- expression 2 operands: lhs = Counter(2), rhs = Counter(3)
Number of file 0 mappings: 5
- Code(Counter(0)) at (prev + 16, 28) to (start + 3, 33)
- Code(Counter(1)) at (prev + 4, 17) to (start + 1, 39)
- Code(Expression(0, Sub)) at (prev + 3, 17) to (start + 0, 22)
= (c0 - c1)
- Code(Zero) at (prev + 0, 23) to (start + 0, 30)
- Code(Counter(3)) at (prev + 0, 23) to (start + 0, 30)
- Code(Expression(1, Add)) at (prev + 2, 9) to (start + 0, 10)
= (c1 + (c2 + Zero))
= (c1 + (c2 + c3))

View file

@ -30,19 +30,19 @@ Number of file 0 mappings: 5
- Code(Counter(0)) at (prev + 3, 1) to (start + 0, 2)
Function name: closure_macro_async::test::{closure#0}::{closure#0}
Raw bytes (35): 0x[01, 01, 03, 01, 05, 05, 0b, 09, 00, 05, 01, 12, 1c, 03, 21, 05, 04, 11, 01, 27, 02, 03, 11, 00, 16, 00, 00, 17, 00, 1e, 07, 02, 09, 00, 0a]
Raw bytes (35): 0x[01, 01, 03, 01, 05, 05, 0b, 09, 0d, 05, 01, 12, 1c, 03, 21, 05, 04, 11, 01, 27, 02, 03, 11, 00, 16, 0d, 00, 17, 00, 1e, 07, 02, 09, 00, 0a]
Number of files: 1
- file 0 => global file 1
Number of expressions: 3
- expression 0 operands: lhs = Counter(0), rhs = Counter(1)
- expression 1 operands: lhs = Counter(1), rhs = Expression(2, Add)
- expression 2 operands: lhs = Counter(2), rhs = Zero
- expression 2 operands: lhs = Counter(2), rhs = Counter(3)
Number of file 0 mappings: 5
- Code(Counter(0)) at (prev + 18, 28) to (start + 3, 33)
- Code(Counter(1)) at (prev + 4, 17) to (start + 1, 39)
- Code(Expression(0, Sub)) at (prev + 3, 17) to (start + 0, 22)
= (c0 - c1)
- Code(Zero) at (prev + 0, 23) to (start + 0, 30)
- Code(Counter(3)) at (prev + 0, 23) to (start + 0, 30)
- Code(Expression(1, Add)) at (prev + 2, 9) to (start + 0, 10)
= (c1 + (c2 + Zero))
= (c1 + (c2 + c3))

View file

@ -0,0 +1,31 @@
- // MIR for `array_len` before GVN
+ // MIR for `array_len` after GVN
fn array_len(_1: &mut [i32; 42]) -> usize {
debug x => _1;
let mut _0: usize;
let _2: &[i32];
let mut _3: &[i32; 42];
let mut _4: *const [i32];
scope 1 {
debug x => _2;
}
bb0: {
- StorageLive(_2);
+ nop;
StorageLive(_3);
_3 = &(*_1);
_2 = move _3 as &[i32] (PointerCoercion(Unsize));
StorageDead(_3);
StorageLive(_4);
_4 = &raw const (*_2);
- _0 = PtrMetadata(move _4);
+ _0 = const 42_usize;
StorageDead(_4);
- StorageDead(_2);
+ nop;
return;
}
}

View file

@ -0,0 +1,31 @@
- // MIR for `array_len` before GVN
+ // MIR for `array_len` after GVN
fn array_len(_1: &mut [i32; 42]) -> usize {
debug x => _1;
let mut _0: usize;
let _2: &[i32];
let mut _3: &[i32; 42];
let mut _4: *const [i32];
scope 1 {
debug x => _2;
}
bb0: {
- StorageLive(_2);
+ nop;
StorageLive(_3);
_3 = &(*_1);
_2 = move _3 as &[i32] (PointerCoercion(Unsize));
StorageDead(_3);
StorageLive(_4);
_4 = &raw const (*_2);
- _0 = PtrMetadata(move _4);
+ _0 = const 42_usize;
StorageDead(_4);
- StorageDead(_2);
+ nop;
return;
}
}

View file

@ -8,10 +8,10 @@
let mut _3: fn(u8) -> u8;
let _5: ();
let mut _6: fn(u8) -> u8;
let mut _9: {closure@$DIR/gvn.rs:612:19: 612:21};
let mut _9: {closure@$DIR/gvn.rs:614:19: 614:21};
let _10: ();
let mut _11: fn();
let mut _13: {closure@$DIR/gvn.rs:612:19: 612:21};
let mut _13: {closure@$DIR/gvn.rs:614:19: 614:21};
let _14: ();
let mut _15: fn();
scope 1 {
@ -19,7 +19,7 @@
let _4: fn(u8) -> u8;
scope 2 {
debug g => _4;
let _7: {closure@$DIR/gvn.rs:612:19: 612:21};
let _7: {closure@$DIR/gvn.rs:614:19: 614:21};
scope 3 {
debug closure => _7;
let _8: fn();
@ -62,16 +62,16 @@
StorageDead(_6);
StorageDead(_5);
- StorageLive(_7);
- _7 = {closure@$DIR/gvn.rs:612:19: 612:21};
- _7 = {closure@$DIR/gvn.rs:614:19: 614:21};
- StorageLive(_8);
+ nop;
+ _7 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21};
+ _7 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21};
+ nop;
StorageLive(_9);
- _9 = _7;
- _8 = move _9 as fn() (PointerCoercion(ClosureFnPointer(Safe)));
+ _9 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21};
+ _8 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21} as fn() (PointerCoercion(ClosureFnPointer(Safe)));
+ _9 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21};
+ _8 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21} as fn() (PointerCoercion(ClosureFnPointer(Safe)));
StorageDead(_9);
StorageLive(_10);
StorageLive(_11);
@ -88,8 +88,8 @@
StorageLive(_13);
- _13 = _7;
- _12 = move _13 as fn() (PointerCoercion(ClosureFnPointer(Safe)));
+ _13 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21};
+ _12 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21} as fn() (PointerCoercion(ClosureFnPointer(Safe)));
+ _13 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21};
+ _12 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21} as fn() (PointerCoercion(ClosureFnPointer(Safe)));
StorageDead(_13);
StorageLive(_14);
StorageLive(_15);

View file

@ -8,10 +8,10 @@
let mut _3: fn(u8) -> u8;
let _5: ();
let mut _6: fn(u8) -> u8;
let mut _9: {closure@$DIR/gvn.rs:612:19: 612:21};
let mut _9: {closure@$DIR/gvn.rs:614:19: 614:21};
let _10: ();
let mut _11: fn();
let mut _13: {closure@$DIR/gvn.rs:612:19: 612:21};
let mut _13: {closure@$DIR/gvn.rs:614:19: 614:21};
let _14: ();
let mut _15: fn();
scope 1 {
@ -19,7 +19,7 @@
let _4: fn(u8) -> u8;
scope 2 {
debug g => _4;
let _7: {closure@$DIR/gvn.rs:612:19: 612:21};
let _7: {closure@$DIR/gvn.rs:614:19: 614:21};
scope 3 {
debug closure => _7;
let _8: fn();
@ -62,16 +62,16 @@
StorageDead(_6);
StorageDead(_5);
- StorageLive(_7);
- _7 = {closure@$DIR/gvn.rs:612:19: 612:21};
- _7 = {closure@$DIR/gvn.rs:614:19: 614:21};
- StorageLive(_8);
+ nop;
+ _7 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21};
+ _7 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21};
+ nop;
StorageLive(_9);
- _9 = _7;
- _8 = move _9 as fn() (PointerCoercion(ClosureFnPointer(Safe)));
+ _9 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21};
+ _8 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21} as fn() (PointerCoercion(ClosureFnPointer(Safe)));
+ _9 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21};
+ _8 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21} as fn() (PointerCoercion(ClosureFnPointer(Safe)));
StorageDead(_9);
StorageLive(_10);
StorageLive(_11);
@ -88,8 +88,8 @@
StorageLive(_13);
- _13 = _7;
- _12 = move _13 as fn() (PointerCoercion(ClosureFnPointer(Safe)));
+ _13 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21};
+ _12 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21} as fn() (PointerCoercion(ClosureFnPointer(Safe)));
+ _13 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21};
+ _12 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21} as fn() (PointerCoercion(ClosureFnPointer(Safe)));
StorageDead(_13);
StorageLive(_14);
StorageLive(_15);

View file

@ -0,0 +1,38 @@
- // MIR for `generic_cast_metadata` before GVN
+ // MIR for `generic_cast_metadata` after GVN
fn generic_cast_metadata(_1: *const [T], _2: *const A, _3: *const B) -> () {
let mut _0: ();
let mut _4: *const T;
let mut _5: ();
let mut _6: *const (&A, [T]);
let mut _7: usize;
let mut _8: *const (T, B);
let mut _9: <B as std::ptr::Pointee>::Metadata;
let mut _10: *const (T, A);
let mut _11: <A as std::ptr::Pointee>::Metadata;
let mut _12: *mut A;
let mut _13: <A as std::ptr::Pointee>::Metadata;
let mut _14: *mut B;
let mut _15: <B as std::ptr::Pointee>::Metadata;
bb0: {
_4 = _1 as *const T (PtrToPtr);
_5 = PtrMetadata(_4);
_6 = _1 as *const (&A, [T]) (PtrToPtr);
- _7 = PtrMetadata(_6);
+ _7 = PtrMetadata(_1);
_8 = _2 as *const (T, B) (PtrToPtr);
_9 = PtrMetadata(_8);
_10 = _2 as *const (T, A) (PtrToPtr);
- _11 = PtrMetadata(_10);
+ _11 = PtrMetadata(_2);
_12 = _3 as *mut A (PtrToPtr);
_13 = PtrMetadata(_12);
_14 = _3 as *mut B (PtrToPtr);
- _15 = PtrMetadata(_14);
+ _15 = PtrMetadata(_3);
return;
}
}

View file

@ -0,0 +1,38 @@
- // MIR for `generic_cast_metadata` before GVN
+ // MIR for `generic_cast_metadata` after GVN
fn generic_cast_metadata(_1: *const [T], _2: *const A, _3: *const B) -> () {
let mut _0: ();
let mut _4: *const T;
let mut _5: ();
let mut _6: *const (&A, [T]);
let mut _7: usize;
let mut _8: *const (T, B);
let mut _9: <B as std::ptr::Pointee>::Metadata;
let mut _10: *const (T, A);
let mut _11: <A as std::ptr::Pointee>::Metadata;
let mut _12: *mut A;
let mut _13: <A as std::ptr::Pointee>::Metadata;
let mut _14: *mut B;
let mut _15: <B as std::ptr::Pointee>::Metadata;
bb0: {
_4 = _1 as *const T (PtrToPtr);
_5 = PtrMetadata(_4);
_6 = _1 as *const (&A, [T]) (PtrToPtr);
- _7 = PtrMetadata(_6);
+ _7 = PtrMetadata(_1);
_8 = _2 as *const (T, B) (PtrToPtr);
_9 = PtrMetadata(_8);
_10 = _2 as *const (T, A) (PtrToPtr);
- _11 = PtrMetadata(_10);
+ _11 = PtrMetadata(_2);
_12 = _3 as *mut A (PtrToPtr);
_13 = PtrMetadata(_12);
_14 = _3 as *mut B (PtrToPtr);
- _15 = PtrMetadata(_14);
+ _15 = PtrMetadata(_3);
return;
}
}

View file

@ -0,0 +1,41 @@
- // MIR for `manual_slice_mut_len` before GVN
+ // MIR for `manual_slice_mut_len` after GVN
fn manual_slice_mut_len(_1: &mut [i32]) -> usize {
debug x => _1;
let mut _0: usize;
let _2: *mut [i32];
let mut _4: *mut [i32];
let mut _5: *const [i32];
scope 1 {
debug x => _2;
let _3: *const [i32];
scope 2 {
debug x => _3;
}
}
bb0: {
- StorageLive(_2);
+ nop;
_2 = &raw mut (*_1);
- StorageLive(_3);
+ nop;
StorageLive(_4);
_4 = _2;
- _3 = move _4 as *const [i32] (PtrToPtr);
+ _3 = _2 as *const [i32] (PtrToPtr);
StorageDead(_4);
StorageLive(_5);
_5 = _3;
- _0 = PtrMetadata(move _5);
+ _0 = PtrMetadata(_1);
StorageDead(_5);
- StorageDead(_3);
- StorageDead(_2);
+ nop;
+ nop;
return;
}
}

View file

@ -0,0 +1,41 @@
- // MIR for `manual_slice_mut_len` before GVN
+ // MIR for `manual_slice_mut_len` after GVN
fn manual_slice_mut_len(_1: &mut [i32]) -> usize {
debug x => _1;
let mut _0: usize;
let _2: *mut [i32];
let mut _4: *mut [i32];
let mut _5: *const [i32];
scope 1 {
debug x => _2;
let _3: *const [i32];
scope 2 {
debug x => _3;
}
}
bb0: {
- StorageLive(_2);
+ nop;
_2 = &raw mut (*_1);
- StorageLive(_3);
+ nop;
StorageLive(_4);
_4 = _2;
- _3 = move _4 as *const [i32] (PtrToPtr);
+ _3 = _2 as *const [i32] (PtrToPtr);
StorageDead(_4);
StorageLive(_5);
_5 = _3;
- _0 = PtrMetadata(move _5);
+ _0 = PtrMetadata(_1);
StorageDead(_5);
- StorageDead(_3);
- StorageDead(_2);
+ nop;
+ nop;
return;
}
}

View file

@ -7,7 +7,9 @@
#![feature(custom_mir)]
#![feature(core_intrinsics)]
#![feature(freeze)]
#![allow(ambiguous_wide_pointer_comparisons)]
#![allow(unconditional_panic)]
#![allow(unused)]
use std::intrinsics::mir::*;
use std::marker::Freeze;
@ -816,6 +818,71 @@ fn casts_before_aggregate_raw_ptr(x: *const u32) -> *const [u8] {
std::intrinsics::aggregate_raw_ptr(x, 4)
}
fn manual_slice_mut_len(x: &mut [i32]) -> usize {
// CHECK-LABEL: fn manual_slice_mut_len
// CHECK: _0 = PtrMetadata(_1);
let x: *mut [i32] = x;
let x: *const [i32] = x;
std::intrinsics::ptr_metadata(x)
}
// `.len()` on arrays ends up being something like this
fn array_len(x: &mut [i32; 42]) -> usize {
// CHECK-LABEL: fn array_len
// CHECK: _0 = const 42_usize;
let x: &[i32] = x;
std::intrinsics::ptr_metadata(x)
}
#[custom_mir(dialect = "runtime")]
fn generic_cast_metadata<T, A: ?Sized, B: ?Sized>(ps: *const [T], pa: *const A, pb: *const B) {
// CHECK-LABEL: fn generic_cast_metadata
mir! {
{
// These tests check that we correctly do or don't elide casts
// when the pointee metadata do or don't match, respectively.
// Metadata usize -> (), do not optimize.
// CHECK: [[T:_.+]] = _1 as
// CHECK-NEXT: PtrMetadata([[T]])
let t1 = CastPtrToPtr::<_, *const T>(ps);
let m1 = PtrMetadata(t1);
// `(&A, [T])` has `usize` metadata, same as `[T]`, yes optimize.
// CHECK: [[T:_.+]] = _1 as
// CHECK-NEXT: PtrMetadata(_1)
let t2 = CastPtrToPtr::<_, *const (&A, [T])>(ps);
let m2 = PtrMetadata(t2);
// Tail `A` and tail `B`, do not optimize.
// CHECK: [[T:_.+]] = _2 as
// CHECK-NEXT: PtrMetadata([[T]])
let t3 = CastPtrToPtr::<_, *const (T, B)>(pa);
let m3 = PtrMetadata(t3);
// Both have tail `A`, yes optimize.
// CHECK: [[T:_.+]] = _2 as
// CHECK-NEXT: PtrMetadata(_2)
let t4 = CastPtrToPtr::<_, *const (T, A)>(pa);
let m4 = PtrMetadata(t4);
// Tail `B` and tail `A`, do not optimize.
// CHECK: [[T:_.+]] = _3 as
// CHECK-NEXT: PtrMetadata([[T]])
let t5 = CastPtrToPtr::<_, *mut A>(pb);
let m5 = PtrMetadata(t5);
// Both have tail `B`, yes optimize.
// CHECK: [[T:_.+]] = _3 as
// CHECK-NEXT: PtrMetadata(_3)
let t6 = CastPtrToPtr::<_, *mut B>(pb);
let m6 = PtrMetadata(t6);
Return()
}
}
}
fn main() {
subexpression_elimination(2, 4, 5);
wrap_unwrap(5);
@ -880,3 +947,6 @@ fn identity<T>(x: T) -> T {
// EMIT_MIR gvn.meta_of_ref_to_slice.GVN.diff
// EMIT_MIR gvn.slice_from_raw_parts_as_ptr.GVN.diff
// EMIT_MIR gvn.casts_before_aggregate_raw_ptr.GVN.diff
// EMIT_MIR gvn.manual_slice_mut_len.GVN.diff
// EMIT_MIR gvn.array_len.GVN.diff
// EMIT_MIR gvn.generic_cast_metadata.GVN.diff

View file

@ -1,6 +1,5 @@
// skip-filecheck
// EMIT_MIR_FOR_EACH_PANIC_STRATEGY
//@ compile-flags: -Zmir-enable-passes=-NormalizeArrayLen
// Check that we do not insert StorageDead at each target if StorageDead was never seen
use std::fmt::Debug;

View file

@ -1,5 +1,5 @@
- // MIR for `array_bound` before NormalizeArrayLen
+ // MIR for `array_bound` after NormalizeArrayLen
- // MIR for `array_bound` before GVN
+ // MIR for `array_bound` after GVN
fn array_bound(_1: usize, _2: &[u8; N]) -> u8 {
debug index => _1;
@ -24,14 +24,15 @@
_7 = &(*_2);
_6 = move _7 as &[u8] (PointerCoercion(Unsize));
StorageDead(_7);
- _5 = Len((*_6));
- _5 = PtrMetadata(move _6);
+ _5 = const N;
goto -> bb1;
}
bb1: {
StorageDead(_6);
_3 = Lt(move _4, move _5);
- _3 = Lt(move _4, move _5);
+ _3 = Lt(_1, move _5);
switchInt(move _3) -> [0: bb4, otherwise: bb2];
}
@ -40,13 +41,17 @@
StorageDead(_4);
StorageLive(_8);
_8 = _1;
_9 = Len((*_2));
_10 = Lt(_8, _9);
assert(move _10, "index out of bounds: the length is {} but the index is {}", move _9, _8) -> [success: bb3, unwind unreachable];
- _9 = Len((*_2));
- _10 = Lt(_8, _9);
- assert(move _10, "index out of bounds: the length is {} but the index is {}", move _9, _8) -> [success: bb3, unwind unreachable];
+ _9 = const N;
+ _10 = Lt(_1, const N);
+ assert(move _10, "index out of bounds: the length is {} but the index is {}", const N, _1) -> [success: bb3, unwind unreachable];
}
bb3: {
_0 = (*_2)[_8];
- _0 = (*_2)[_8];
+ _0 = (*_2)[_1];
StorageDead(_8);
goto -> bb5;
}

View file

@ -1,5 +1,5 @@
- // MIR for `array_bound` before NormalizeArrayLen
+ // MIR for `array_bound` after NormalizeArrayLen
- // MIR for `array_bound` before GVN
+ // MIR for `array_bound` after GVN
fn array_bound(_1: usize, _2: &[u8; N]) -> u8 {
debug index => _1;
@ -24,14 +24,15 @@
_7 = &(*_2);
_6 = move _7 as &[u8] (PointerCoercion(Unsize));
StorageDead(_7);
- _5 = Len((*_6));
- _5 = PtrMetadata(move _6);
+ _5 = const N;
goto -> bb1;
}
bb1: {
StorageDead(_6);
_3 = Lt(move _4, move _5);
- _3 = Lt(move _4, move _5);
+ _3 = Lt(_1, move _5);
switchInt(move _3) -> [0: bb4, otherwise: bb2];
}
@ -40,13 +41,17 @@
StorageDead(_4);
StorageLive(_8);
_8 = _1;
_9 = Len((*_2));
_10 = Lt(_8, _9);
assert(move _10, "index out of bounds: the length is {} but the index is {}", move _9, _8) -> [success: bb3, unwind continue];
- _9 = Len((*_2));
- _10 = Lt(_8, _9);
- assert(move _10, "index out of bounds: the length is {} but the index is {}", move _9, _8) -> [success: bb3, unwind continue];
+ _9 = const N;
+ _10 = Lt(_1, const N);
+ assert(move _10, "index out of bounds: the length is {} but the index is {}", const N, _1) -> [success: bb3, unwind continue];
}
bb3: {
_0 = (*_2)[_8];
- _0 = (*_2)[_8];
+ _0 = (*_2)[_1];
StorageDead(_8);
goto -> bb5;
}

View file

@ -1,5 +1,5 @@
- // MIR for `array_bound_mut` before NormalizeArrayLen
+ // MIR for `array_bound_mut` after NormalizeArrayLen
- // MIR for `array_bound_mut` before GVN
+ // MIR for `array_bound_mut` after GVN
fn array_bound_mut(_1: usize, _2: &mut [u8; N]) -> u8 {
debug index => _1;
@ -27,14 +27,15 @@
_7 = &(*_2);
_6 = move _7 as &[u8] (PointerCoercion(Unsize));
StorageDead(_7);
- _5 = Len((*_6));
- _5 = PtrMetadata(move _6);
+ _5 = const N;
goto -> bb1;
}
bb1: {
StorageDead(_6);
_3 = Lt(move _4, move _5);
- _3 = Lt(move _4, move _5);
+ _3 = Lt(_1, move _5);
switchInt(move _3) -> [0: bb4, otherwise: bb2];
}
@ -43,13 +44,17 @@
StorageDead(_4);
StorageLive(_8);
_8 = _1;
_9 = Len((*_2));
_10 = Lt(_8, _9);
assert(move _10, "index out of bounds: the length is {} but the index is {}", move _9, _8) -> [success: bb3, unwind unreachable];
- _9 = Len((*_2));
- _10 = Lt(_8, _9);
- assert(move _10, "index out of bounds: the length is {} but the index is {}", move _9, _8) -> [success: bb3, unwind unreachable];
+ _9 = const N;
+ _10 = Lt(_1, const N);
+ assert(move _10, "index out of bounds: the length is {} but the index is {}", const N, _1) -> [success: bb3, unwind unreachable];
}
bb3: {
_0 = (*_2)[_8];
- _0 = (*_2)[_8];
+ _0 = (*_2)[_1];
StorageDead(_8);
goto -> bb6;
}
@ -59,13 +64,17 @@
StorageDead(_4);
StorageLive(_11);
_11 = const 0_usize;
_12 = Len((*_2));
_13 = Lt(_11, _12);
assert(move _13, "index out of bounds: the length is {} but the index is {}", move _12, _11) -> [success: bb5, unwind unreachable];
- _12 = Len((*_2));
- _13 = Lt(_11, _12);
- assert(move _13, "index out of bounds: the length is {} but the index is {}", move _12, _11) -> [success: bb5, unwind unreachable];
+ _12 = const N;
+ _13 = Lt(const 0_usize, const N);
+ assert(move _13, "index out of bounds: the length is {} but the index is {}", const N, const 0_usize) -> [success: bb5, unwind unreachable];
}
bb5: {
(*_2)[_11] = const 42_u8;
- (*_2)[_11] = const 42_u8;
+ (*_2)[0 of 1] = const 42_u8;
StorageDead(_11);
_0 = const 42_u8;
goto -> bb6;

View file

@ -1,5 +1,5 @@
- // MIR for `array_bound_mut` before NormalizeArrayLen
+ // MIR for `array_bound_mut` after NormalizeArrayLen
- // MIR for `array_bound_mut` before GVN
+ // MIR for `array_bound_mut` after GVN
fn array_bound_mut(_1: usize, _2: &mut [u8; N]) -> u8 {
debug index => _1;
@ -27,14 +27,15 @@
_7 = &(*_2);
_6 = move _7 as &[u8] (PointerCoercion(Unsize));
StorageDead(_7);
- _5 = Len((*_6));
- _5 = PtrMetadata(move _6);
+ _5 = const N;
goto -> bb1;
}
bb1: {
StorageDead(_6);
_3 = Lt(move _4, move _5);
- _3 = Lt(move _4, move _5);
+ _3 = Lt(_1, move _5);
switchInt(move _3) -> [0: bb4, otherwise: bb2];
}
@ -43,13 +44,17 @@
StorageDead(_4);
StorageLive(_8);
_8 = _1;
_9 = Len((*_2));
_10 = Lt(_8, _9);
assert(move _10, "index out of bounds: the length is {} but the index is {}", move _9, _8) -> [success: bb3, unwind continue];
- _9 = Len((*_2));
- _10 = Lt(_8, _9);
- assert(move _10, "index out of bounds: the length is {} but the index is {}", move _9, _8) -> [success: bb3, unwind continue];
+ _9 = const N;
+ _10 = Lt(_1, const N);
+ assert(move _10, "index out of bounds: the length is {} but the index is {}", const N, _1) -> [success: bb3, unwind continue];
}
bb3: {
_0 = (*_2)[_8];
- _0 = (*_2)[_8];
+ _0 = (*_2)[_1];
StorageDead(_8);
goto -> bb6;
}
@ -59,13 +64,17 @@
StorageDead(_4);
StorageLive(_11);
_11 = const 0_usize;
_12 = Len((*_2));
_13 = Lt(_11, _12);
assert(move _13, "index out of bounds: the length is {} but the index is {}", move _12, _11) -> [success: bb5, unwind continue];
- _12 = Len((*_2));
- _13 = Lt(_11, _12);
- assert(move _13, "index out of bounds: the length is {} but the index is {}", move _12, _11) -> [success: bb5, unwind continue];
+ _12 = const N;
+ _13 = Lt(const 0_usize, const N);
+ assert(move _13, "index out of bounds: the length is {} but the index is {}", const N, const 0_usize) -> [success: bb5, unwind continue];
}
bb5: {
(*_2)[_11] = const 42_u8;
- (*_2)[_11] = const 42_u8;
+ (*_2)[0 of 1] = const 42_u8;
StorageDead(_11);
_0 = const 42_u8;
goto -> bb6;

View file

@ -1,5 +1,5 @@
- // MIR for `array_len` before NormalizeArrayLen
+ // MIR for `array_len` after NormalizeArrayLen
- // MIR for `array_len` before GVN
+ // MIR for `array_len` after GVN
fn array_len(_1: &[u8; N]) -> usize {
debug arr => _1;
@ -13,7 +13,7 @@
_3 = &(*_1);
_2 = move _3 as &[u8] (PointerCoercion(Unsize));
StorageDead(_3);
- _0 = Len((*_2));
- _0 = PtrMetadata(move _2);
+ _0 = const N;
goto -> bb1;
}

View file

@ -1,5 +1,5 @@
- // MIR for `array_len` before NormalizeArrayLen
+ // MIR for `array_len` after NormalizeArrayLen
- // MIR for `array_len` before GVN
+ // MIR for `array_len` after GVN
fn array_len(_1: &[u8; N]) -> usize {
debug arr => _1;
@ -13,7 +13,7 @@
_3 = &(*_1);
_2 = move _3 as &[u8] (PointerCoercion(Unsize));
StorageDead(_3);
- _0 = Len((*_2));
- _0 = PtrMetadata(move _2);
+ _0 = const N;
goto -> bb1;
}

View file

@ -1,5 +1,5 @@
- // MIR for `array_len_by_value` before NormalizeArrayLen
+ // MIR for `array_len_by_value` after NormalizeArrayLen
- // MIR for `array_len_by_value` before GVN
+ // MIR for `array_len_by_value` after GVN
fn array_len_by_value(_1: [u8; N]) -> usize {
debug arr => _1;
@ -13,7 +13,7 @@
_3 = &_1;
_2 = move _3 as &[u8] (PointerCoercion(Unsize));
StorageDead(_3);
- _0 = Len((*_2));
- _0 = PtrMetadata(move _2);
+ _0 = const N;
goto -> bb1;
}

View file

@ -1,5 +1,5 @@
- // MIR for `array_len_by_value` before NormalizeArrayLen
+ // MIR for `array_len_by_value` after NormalizeArrayLen
- // MIR for `array_len_by_value` before GVN
+ // MIR for `array_len_by_value` after GVN
fn array_len_by_value(_1: [u8; N]) -> usize {
debug arr => _1;
@ -13,7 +13,7 @@
_3 = &_1;
_2 = move _3 as &[u8] (PointerCoercion(Unsize));
StorageDead(_3);
- _0 = Len((*_2));
- _0 = PtrMetadata(move _2);
+ _0 = const N;
goto -> bb1;
}

View file

@ -1,5 +1,5 @@
- // MIR for `array_len_raw` before NormalizeArrayLen
+ // MIR for `array_len_raw` after NormalizeArrayLen
- // MIR for `array_len_raw` before GVN
+ // MIR for `array_len_raw` after GVN
fn array_len_raw(_1: [u8; N]) -> usize {
debug arr => _1;
@ -18,7 +18,8 @@
}
bb0: {
StorageLive(_2);
- StorageLive(_2);
+ nop;
StorageLive(_3);
StorageLive(_4);
_4 = &_1;
@ -32,7 +33,7 @@
StorageLive(_7);
_7 = &(*_5);
_6 = &(*_7);
- _0 = Len((*_6));
- _0 = PtrMetadata(move _6);
+ _0 = const N;
goto -> bb1;
}
@ -40,7 +41,8 @@
bb1: {
StorageDead(_6);
StorageDead(_5);
StorageDead(_2);
- StorageDead(_2);
+ nop;
StorageDead(_7);
return;
}

View file

@ -1,5 +1,5 @@
- // MIR for `array_len_raw` before NormalizeArrayLen
+ // MIR for `array_len_raw` after NormalizeArrayLen
- // MIR for `array_len_raw` before GVN
+ // MIR for `array_len_raw` after GVN
fn array_len_raw(_1: [u8; N]) -> usize {
debug arr => _1;
@ -18,7 +18,8 @@
}
bb0: {
StorageLive(_2);
- StorageLive(_2);
+ nop;
StorageLive(_3);
StorageLive(_4);
_4 = &_1;
@ -32,7 +33,7 @@
StorageLive(_7);
_7 = &(*_5);
_6 = &(*_7);
- _0 = Len((*_6));
- _0 = PtrMetadata(move _6);
+ _0 = const N;
goto -> bb1;
}
@ -40,7 +41,8 @@
bb1: {
StorageDead(_6);
StorageDead(_5);
StorageDead(_2);
- StorageDead(_2);
+ nop;
StorageDead(_7);
return;
}

View file

@ -1,5 +1,5 @@
- // MIR for `array_len_reborrow` before NormalizeArrayLen
+ // MIR for `array_len_reborrow` after NormalizeArrayLen
- // MIR for `array_len_reborrow` before GVN
+ // MIR for `array_len_reborrow` after GVN
fn array_len_reborrow(_1: [u8; N]) -> usize {
debug arr => _1;
@ -17,7 +17,8 @@
}
bb0: {
StorageLive(_2);
- StorageLive(_2);
+ nop;
StorageLive(_3);
StorageLive(_4);
_4 = &mut _1;
@ -29,7 +30,7 @@
_5 = &(*_2);
StorageLive(_6);
_6 = &(*_5);
- _0 = Len((*_6));
- _0 = PtrMetadata(move _6);
+ _0 = const N;
goto -> bb1;
}
@ -37,7 +38,8 @@
bb1: {
StorageDead(_6);
StorageDead(_5);
StorageDead(_2);
- StorageDead(_2);
+ nop;
return;
}
}

View file

@ -1,5 +1,5 @@
- // MIR for `array_len_reborrow` before NormalizeArrayLen
+ // MIR for `array_len_reborrow` after NormalizeArrayLen
- // MIR for `array_len_reborrow` before GVN
+ // MIR for `array_len_reborrow` after GVN
fn array_len_reborrow(_1: [u8; N]) -> usize {
debug arr => _1;
@ -17,7 +17,8 @@
}
bb0: {
StorageLive(_2);
- StorageLive(_2);
+ nop;
StorageLive(_3);
StorageLive(_4);
_4 = &mut _1;
@ -29,7 +30,7 @@
_5 = &(*_2);
StorageLive(_6);
_6 = &(*_5);
- _0 = Len((*_6));
- _0 = PtrMetadata(move _6);
+ _0 = const N;
goto -> bb1;
}
@ -37,7 +38,8 @@
bb1: {
StorageDead(_6);
StorageDead(_5);
StorageDead(_2);
- StorageDead(_2);
+ nop;
return;
}
}

View file

@ -1,20 +1,20 @@
// EMIT_MIR_FOR_EACH_PANIC_STRATEGY
//@ test-mir-pass: NormalizeArrayLen
//@ test-mir-pass: GVN
//@ compile-flags: -Zmir-enable-passes=+LowerSliceLenCalls
// EMIT_MIR lower_array_len.array_bound.NormalizeArrayLen.diff
// EMIT_MIR lower_array_len.array_bound.GVN.diff
pub fn array_bound<const N: usize>(index: usize, slice: &[u8; N]) -> u8 {
// CHECK-LABEL: fn array_bound(
// CHECK: [[len:_.*]] = const N;
// CHECK: Lt(move {{_.*}}, move [[len]]);
// CHECK: Lt(_1, move [[len]]);
if index < slice.len() { slice[index] } else { 42 }
}
// EMIT_MIR lower_array_len.array_bound_mut.NormalizeArrayLen.diff
// EMIT_MIR lower_array_len.array_bound_mut.GVN.diff
pub fn array_bound_mut<const N: usize>(index: usize, slice: &mut [u8; N]) -> u8 {
// CHECK-LABEL: fn array_bound_mut(
// CHECK: [[len:_.*]] = const N;
// CHECK: Lt(move {{_.*}}, move [[len]]);
// CHECK: Lt(_1, move [[len]]);
if index < slice.len() {
slice[index]
} else {
@ -24,21 +24,21 @@
}
}
// EMIT_MIR lower_array_len.array_len.NormalizeArrayLen.diff
// EMIT_MIR lower_array_len.array_len.GVN.diff
pub fn array_len<const N: usize>(arr: &[u8; N]) -> usize {
// CHECK-LABEL: fn array_len(
// CHECK: _0 = const N;
arr.len()
}
// EMIT_MIR lower_array_len.array_len_by_value.NormalizeArrayLen.diff
// EMIT_MIR lower_array_len.array_len_by_value.GVN.diff
pub fn array_len_by_value<const N: usize>(arr: [u8; N]) -> usize {
// CHECK-LABEL: fn array_len_by_value(
// CHECK: _0 = const N;
arr.len()
}
// EMIT_MIR lower_array_len.array_len_reborrow.NormalizeArrayLen.diff
// EMIT_MIR lower_array_len.array_len_reborrow.GVN.diff
pub fn array_len_reborrow<const N: usize>(mut arr: [u8; N]) -> usize {
// CHECK-LABEL: fn array_len_reborrow(
// CHECK: _0 = const N;
@ -47,7 +47,7 @@
arr.len()
}
// EMIT_MIR lower_array_len.array_len_raw.NormalizeArrayLen.diff
// EMIT_MIR lower_array_len.array_len_raw.GVN.diff
pub fn array_len_raw<const N: usize>(arr: [u8; N]) -> usize {
// CHECK-LABEL: fn array_len_raw(
// CHECK: _0 = const N;

View file

@ -21,7 +21,7 @@
StorageLive(_6);
_6 = &(*_2);
- _5 = core::slice::<impl [u8]>::len(move _6) -> [return: bb1, unwind unreachable];
+ _5 = Len((*_6));
+ _5 = PtrMetadata(move _6);
+ goto -> bb1;
}

View file

@ -21,7 +21,7 @@
StorageLive(_6);
_6 = &(*_2);
- _5 = core::slice::<impl [u8]>::len(move _6) -> [return: bb1, unwind continue];
+ _5 = Len((*_6));
+ _5 = PtrMetadata(move _6);
+ goto -> bb1;
}

View file

@ -19,7 +19,7 @@ pub fn slice_index_usize(slice: &[u32], index: usize) -> u32 {
// EMIT_MIR slice_index.slice_get_mut_usize.PreCodegen.after.mir
pub fn slice_get_mut_usize(slice: &mut [u32], index: usize) -> Option<&mut u32> {
// CHECK-LABEL: slice_get_mut_usize
// CHECK: [[LEN:_[0-9]+]] = Len((*_1))
// CHECK: [[LEN:_[0-9]+]] = PtrMetadata(_1)
// CHECK: Lt(_2, move [[LEN]])
// CHECK-NOT: precondition_check
slice.get_mut(index)

View file

@ -23,7 +23,7 @@ fn slice_get_mut_usize(_1: &mut [u32], _2: usize) -> Option<&mut u32> {
StorageLive(_7);
StorageLive(_4);
StorageLive(_3);
_3 = Len((*_1));
_3 = PtrMetadata(_1);
_4 = Lt(_2, move _3);
switchInt(move _4) -> [0: bb1, otherwise: bb2];
}

View file

@ -23,7 +23,7 @@ fn slice_get_mut_usize(_1: &mut [u32], _2: usize) -> Option<&mut u32> {
StorageLive(_7);
StorageLive(_4);
StorageLive(_3);
_3 = Len((*_1));
_3 = PtrMetadata(_1);
_4 = Lt(_2, move _3);
switchInt(move _4) -> [0: bb1, otherwise: bb2];
}

View file

@ -8,25 +8,24 @@ fn slice_get_unchecked_mut_range(_1: &mut [u32], _2: std::ops::Range<usize>) ->
let mut _4: usize;
scope 1 (inlined core::slice::<impl [u32]>::get_unchecked_mut::<std::ops::Range<usize>>) {
let mut _5: *mut [u32];
let mut _12: *mut [u32];
let mut _11: *mut [u32];
scope 2 (inlined <std::ops::Range<usize> as SliceIndex<[u32]>>::get_unchecked_mut) {
let mut _7: usize;
let _8: ();
let _9: usize;
let mut _6: usize;
let _7: ();
let _8: usize;
scope 3 {
scope 6 (inlined core::slice::index::get_offset_len_mut_noubcheck::<u32>) {
let _11: *mut u32;
let _10: *mut u32;
scope 7 {
}
scope 8 (inlined core::slice::index::get_mut_noubcheck::<u32>) {
let _10: *mut u32;
let _9: *mut u32;
scope 9 {
}
}
}
}
scope 4 (inlined std::ptr::mut_ptr::<impl *mut [u32]>::len) {
let mut _6: *const [u32];
scope 5 (inlined std::ptr::metadata::<[u32]>) {
}
}
@ -38,28 +37,25 @@ fn slice_get_unchecked_mut_range(_1: &mut [u32], _2: std::ops::Range<usize>) ->
_4 = move (_2.1: usize);
StorageLive(_5);
_5 = &raw mut (*_1);
StorageLive(_9);
StorageLive(_7);
StorageLive(_8);
StorageLive(_6);
_6 = _5 as *const [u32] (PtrToPtr);
_7 = PtrMetadata(_6);
StorageDead(_6);
_8 = <std::ops::Range<usize> as SliceIndex<[T]>>::get_unchecked_mut::precondition_check(_3, _4, move _7) -> [return: bb1, unwind unreachable];
_6 = PtrMetadata(_1);
_7 = <std::ops::Range<usize> as SliceIndex<[T]>>::get_unchecked_mut::precondition_check(_3, _4, move _6) -> [return: bb1, unwind unreachable];
}
bb1: {
StorageDead(_7);
_9 = SubUnchecked(_4, _3);
StorageLive(_11);
StorageDead(_6);
_8 = SubUnchecked(_4, _3);
StorageLive(_10);
_10 = _5 as *mut u32 (PtrToPtr);
_11 = Offset(_10, _3);
StorageDead(_10);
_12 = *mut [u32] from (_11, _9);
StorageDead(_11);
StorageLive(_9);
_9 = _5 as *mut u32 (PtrToPtr);
_10 = Offset(_9, _3);
StorageDead(_9);
_11 = *mut [u32] from (_10, _8);
StorageDead(_10);
StorageDead(_8);
StorageDead(_5);
_0 = &mut (*_12);
_0 = &mut (*_11);
return;
}
}

View file

@ -8,25 +8,24 @@ fn slice_get_unchecked_mut_range(_1: &mut [u32], _2: std::ops::Range<usize>) ->
let mut _4: usize;
scope 1 (inlined core::slice::<impl [u32]>::get_unchecked_mut::<std::ops::Range<usize>>) {
let mut _5: *mut [u32];
let mut _12: *mut [u32];
let mut _11: *mut [u32];
scope 2 (inlined <std::ops::Range<usize> as SliceIndex<[u32]>>::get_unchecked_mut) {
let mut _7: usize;
let _8: ();
let _9: usize;
let mut _6: usize;
let _7: ();
let _8: usize;
scope 3 {
scope 6 (inlined core::slice::index::get_offset_len_mut_noubcheck::<u32>) {
let _11: *mut u32;
let _10: *mut u32;
scope 7 {
}
scope 8 (inlined core::slice::index::get_mut_noubcheck::<u32>) {
let _10: *mut u32;
let _9: *mut u32;
scope 9 {
}
}
}
}
scope 4 (inlined std::ptr::mut_ptr::<impl *mut [u32]>::len) {
let mut _6: *const [u32];
scope 5 (inlined std::ptr::metadata::<[u32]>) {
}
}
@ -38,28 +37,25 @@ fn slice_get_unchecked_mut_range(_1: &mut [u32], _2: std::ops::Range<usize>) ->
_4 = move (_2.1: usize);
StorageLive(_5);
_5 = &raw mut (*_1);
StorageLive(_9);
StorageLive(_7);
StorageLive(_8);
StorageLive(_6);
_6 = _5 as *const [u32] (PtrToPtr);
_7 = PtrMetadata(_6);
StorageDead(_6);
_8 = <std::ops::Range<usize> as SliceIndex<[T]>>::get_unchecked_mut::precondition_check(_3, _4, move _7) -> [return: bb1, unwind unreachable];
_6 = PtrMetadata(_1);
_7 = <std::ops::Range<usize> as SliceIndex<[T]>>::get_unchecked_mut::precondition_check(_3, _4, move _6) -> [return: bb1, unwind unreachable];
}
bb1: {
StorageDead(_7);
_9 = SubUnchecked(_4, _3);
StorageLive(_11);
StorageDead(_6);
_8 = SubUnchecked(_4, _3);
StorageLive(_10);
_10 = _5 as *mut u32 (PtrToPtr);
_11 = Offset(_10, _3);
StorageDead(_10);
_12 = *mut [u32] from (_11, _9);
StorageDead(_11);
StorageLive(_9);
_9 = _5 as *mut u32 (PtrToPtr);
_10 = Offset(_9, _3);
StorageDead(_9);
_11 = *mut [u32] from (_10, _8);
StorageDead(_10);
StorageDead(_8);
StorageDead(_5);
_0 = &mut (*_12);
_0 = &mut (*_11);
return;
}
}

View file

@ -90,7 +90,7 @@ fn enumerated_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () {
StorageLive(_6);
StorageLive(_4);
StorageLive(_5);
_3 = Len((*_1));
_3 = PtrMetadata(_1);
_4 = &raw const (*_1);
_5 = _4 as *const T (PtrToPtr);
_6 = NonNull::<T> { pointer: _5 };

View file

@ -65,7 +65,7 @@ fn enumerated_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () {
StorageLive(_6);
StorageLive(_4);
StorageLive(_5);
_3 = Len((*_1));
_3 = PtrMetadata(_1);
_4 = &raw const (*_1);
_5 = _4 as *const T (PtrToPtr);
_6 = NonNull::<T> { pointer: _5 };

View file

@ -57,7 +57,7 @@ fn forward_loop(_1: &[T], _2: impl Fn(&T)) -> () {
StorageLive(_6);
StorageLive(_4);
StorageLive(_5);
_3 = Len((*_1));
_3 = PtrMetadata(_1);
_4 = &raw const (*_1);
_5 = _4 as *const T (PtrToPtr);
_6 = NonNull::<T> { pointer: _5 };

View file

@ -57,7 +57,7 @@ fn forward_loop(_1: &[T], _2: impl Fn(&T)) -> () {
StorageLive(_6);
StorageLive(_4);
StorageLive(_5);
_3 = Len((*_1));
_3 = PtrMetadata(_1);
_4 = &raw const (*_1);
_5 = _4 as *const T (PtrToPtr);
_6 = NonNull::<T> { pointer: _5 };

View file

@ -40,7 +40,7 @@ fn range_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () {
}
bb0: {
_3 = Len((*_1));
_3 = PtrMetadata(_1);
StorageLive(_4);
_4 = const 0_usize;
goto -> bb1;

View file

@ -40,7 +40,7 @@ fn range_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () {
}
bb0: {
_3 = Len((*_1));
_3 = PtrMetadata(_1);
StorageLive(_4);
_4 = const 0_usize;
goto -> bb1;

View file

@ -65,7 +65,7 @@ fn reverse_loop(_1: &[T], _2: impl Fn(&T)) -> () {
StorageLive(_6);
StorageLive(_4);
StorageLive(_5);
_3 = Len((*_1));
_3 = PtrMetadata(_1);
_4 = &raw const (*_1);
_5 = _4 as *const T (PtrToPtr);
_6 = NonNull::<T> { pointer: _5 };

View file

@ -65,7 +65,7 @@ fn reverse_loop(_1: &[T], _2: impl Fn(&T)) -> () {
StorageLive(_6);
StorageLive(_4);
StorageLive(_5);
_3 = Len((*_1));
_3 = PtrMetadata(_1);
_4 = &raw const (*_1);
_5 = _4 as *const T (PtrToPtr);
_6 = NonNull::<T> { pointer: _5 };