Merge from rustc

This commit is contained in:
The Miri Conjob Bot 2024-02-17 05:17:43 +00:00
commit d523cab910
10173 changed files with 18873 additions and 18285 deletions

View file

@ -13,7 +13,7 @@
use rustc_session::{RustcVersion, Session};
use rustc_span::hygiene::Transparency;
use rustc_span::{symbol::sym, symbol::Symbol, Span};
use std::num::NonZeroU32;
use std::num::NonZero;
use crate::session_diagnostics::{self, IncorrectReprFormatGenericCause};
@ -113,7 +113,7 @@ pub enum StabilityLevel {
/// Reason for the current stability level.
reason: UnstableReason,
/// Relevant `rust-lang/rust` issue.
issue: Option<NonZeroU32>,
issue: Option<NonZero<u32>>,
is_soft: bool,
/// If part of a feature is stabilized and a new feature is added for the remaining parts,
/// then the `implied_by` attribute is used to indicate which now-stable feature previously
@ -442,7 +442,7 @@ fn parse_unstability(sess: &Session, attr: &Attribute) -> Option<(Symbol, Stabil
// is a name/value pair string literal.
issue_num = match issue.unwrap().as_str() {
"none" => None,
issue => match issue.parse::<NonZeroU32>() {
issue => match issue.parse::<NonZero<u32>>() {
Ok(num) => Some(num),
Err(err) => {
sess.dcx().emit_err(

View file

@ -7,6 +7,7 @@
#![allow(internal_features)]
#![feature(rustdoc_internals)]
#![doc(rust_logo)]
#![feature(generic_nonzero)]
#![feature(let_chains)]
#[macro_use]

View file

@ -698,7 +698,7 @@ fn is_error_in_trait(&self, local: Local) -> (bool, Option<Span>) {
),
..
}) => {
let hir::Ty { span, .. } = inputs[local.index() - 1];
let hir::Ty { span, .. } = *inputs.get(local.index() - 1)?;
Some(span)
}
_ => None,

View file

@ -1666,16 +1666,9 @@ fn check_call_inputs(
let func_ty = func.ty(body, self.infcx.tcx);
if let ty::FnDef(def_id, _) = *func_ty.kind() {
if self.tcx().is_intrinsic(def_id) {
match self.tcx().item_name(def_id) {
sym::simd_shuffle => {
if !matches!(args[2], Spanned { node: Operand::Constant(_), .. }) {
self.tcx()
.dcx()
.emit_err(SimdShuffleLastConst { span: term.source_info.span });
}
}
_ => {}
if let Some(sym::simd_shuffle) = self.tcx().intrinsic(def_id) {
if !matches!(args[2], Spanned { node: Operand::Constant(_), .. }) {
self.tcx().dcx().emit_err(SimdShuffleLastConst { span: term.source_info.span });
}
}
}

View file

@ -387,15 +387,17 @@ pub(crate) fn codegen_terminator_call<'tcx>(
match instance.def {
InstanceDef::Intrinsic(_) => {
crate::intrinsics::codegen_intrinsic_call(
match crate::intrinsics::codegen_intrinsic_call(
fx,
instance,
args,
ret_place,
target,
source_info,
);
return;
) {
Ok(()) => return,
Err(instance) => Some(instance),
}
}
InstanceDef::DropGlue(_, None) => {
// empty drop glue - a nop.

View file

@ -268,7 +268,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
destination: CPlace<'tcx>,
target: Option<BasicBlock>,
source_info: mir::SourceInfo,
) {
) -> Result<(), Instance<'tcx>> {
let intrinsic = fx.tcx.item_name(instance.def_id());
let instance_args = instance.args;
@ -295,8 +295,9 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
destination,
target,
source_info,
);
)?;
}
Ok(())
}
fn codegen_float_intrinsic_call<'tcx>(
@ -430,25 +431,20 @@ fn codegen_regular_intrinsic_call<'tcx>(
ret: CPlace<'tcx>,
destination: Option<BasicBlock>,
source_info: mir::SourceInfo,
) {
) -> Result<(), Instance<'tcx>> {
assert_eq!(generic_args, instance.args);
let usize_layout = fx.layout_of(fx.tcx.types.usize);
match intrinsic {
sym::abort => {
fx.bcx.ins().trap(TrapCode::User(0));
return;
return Ok(());
}
sym::likely | sym::unlikely => {
intrinsic_args!(fx, args => (a); intrinsic);
ret.write_cvalue(fx, a);
}
sym::is_val_statically_known => {
intrinsic_args!(fx, args => (_a); intrinsic);
let res = fx.bcx.ins().iconst(types::I8, 0);
ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
}
sym::breakpoint => {
intrinsic_args!(fx, args => (); intrinsic);
@ -697,7 +693,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
})
});
crate::base::codegen_panic_nounwind(fx, &msg_str, Some(source_info.span));
return;
return Ok(());
}
}
}
@ -792,7 +788,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
// special case for compiler-builtins to avoid having to patch it
crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
return;
return Ok(());
} else {
fx.tcx
.dcx()
@ -802,7 +798,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
return;
return Ok(());
}
}
let clif_ty = fx.clif_type(ty).unwrap();
@ -823,7 +819,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
// special case for compiler-builtins to avoid having to patch it
crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
return;
return Ok(());
} else {
fx.tcx
.dcx()
@ -833,7 +829,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
return;
return Ok(());
}
}
@ -850,7 +846,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return;
return Ok(());
}
}
let ty = fx.clif_type(layout.ty).unwrap();
@ -872,7 +868,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return;
return Ok(());
}
}
@ -895,7 +891,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return;
return Ok(());
}
}
let ty = fx.clif_type(layout.ty).unwrap();
@ -917,7 +913,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return;
return Ok(());
}
}
let ty = fx.clif_type(layout.ty).unwrap();
@ -939,7 +935,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return;
return Ok(());
}
}
let ty = fx.clif_type(layout.ty).unwrap();
@ -960,7 +956,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return;
return Ok(());
}
}
let ty = fx.clif_type(layout.ty).unwrap();
@ -981,7 +977,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return;
return Ok(());
}
}
let ty = fx.clif_type(layout.ty).unwrap();
@ -1002,7 +998,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return;
return Ok(());
}
}
let ty = fx.clif_type(layout.ty).unwrap();
@ -1023,7 +1019,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return;
return Ok(());
}
}
let ty = fx.clif_type(layout.ty).unwrap();
@ -1044,7 +1040,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return;
return Ok(());
}
}
let ty = fx.clif_type(layout.ty).unwrap();
@ -1065,7 +1061,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return;
return Ok(());
}
}
let ty = fx.clif_type(layout.ty).unwrap();
@ -1086,7 +1082,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return;
return Ok(());
}
}
let ty = fx.clif_type(layout.ty).unwrap();
@ -1233,19 +1229,6 @@ fn codegen_regular_intrinsic_call<'tcx>(
ret.write_cvalue(fx, CValue::by_val(cmp, ret.layout()));
}
sym::const_allocate => {
intrinsic_args!(fx, args => (_size, _align); intrinsic);
// returns a null pointer at runtime.
let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
}
sym::const_deallocate => {
intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
// nop at runtime.
}
sym::black_box => {
intrinsic_args!(fx, args => (a); intrinsic);
@ -1261,13 +1244,12 @@ fn codegen_regular_intrinsic_call<'tcx>(
);
}
_ => {
fx.tcx
.dcx()
.span_fatal(source_info.span, format!("unsupported intrinsic {}", intrinsic));
}
// Unimplemented intrinsics must have a fallback body. The fallback body is obtained
// by converting the `InstanceDef::Intrinsic` to an `InstanceDef::Item`.
_ => return Err(Instance::new(instance.def_id(), instance.args)),
}
let ret_block = fx.get_block(destination.unwrap());
fx.bcx.ins().jump(ret_block, &[]);
Ok(())
}

View file

@ -90,7 +90,7 @@ fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) ->
}
impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) {
fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) -> Result<(), Instance<'tcx>> {
let tcx = self.tcx;
let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
@ -137,7 +137,7 @@ fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'t
args[2].immediate(),
llresult,
);
return;
return Ok(());
}
sym::breakpoint => {
unimplemented!();
@ -166,12 +166,12 @@ fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'t
sym::volatile_store => {
let dst = args[0].deref(self.cx());
args[1].val.volatile_store(self, dst);
return;
return Ok(());
}
sym::unaligned_volatile_store => {
let dst = args[0].deref(self.cx());
args[1].val.unaligned_volatile_store(self, dst);
return;
return Ok(());
}
sym::prefetch_read_data
| sym::prefetch_write_data
@ -269,7 +269,7 @@ fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'t
},
None => {
tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
return;
return Ok(());
}
}
}
@ -339,7 +339,7 @@ fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'t
extended_asm.set_volatile_flag(true);
// We have copied the value to `result` already.
return;
return Ok(());
}
sym::ptr_mask => {
@ -357,11 +357,12 @@ fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'t
_ if name_str.starts_with("simd_") => {
match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
Ok(llval) => llval,
Err(()) => return,
Err(()) => return Ok(()),
}
}
_ => bug!("unknown intrinsic '{}'", name),
// Fall back to default body
_ => return Err(Instance::new(instance.def_id(), instance.args)),
};
if !fn_abi.ret.is_ignore() {
@ -376,6 +377,7 @@ fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'t
.store(self, result);
}
}
Ok(())
}
fn abort(&mut self) {

View file

@ -86,7 +86,7 @@ fn codegen_intrinsic_call(
args: &[OperandRef<'tcx, &'ll Value>],
llresult: &'ll Value,
span: Span,
) {
) -> Result<(), ty::Instance<'tcx>> {
let tcx = self.tcx;
let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
@ -141,7 +141,7 @@ fn codegen_intrinsic_call(
args[2].immediate(),
llresult,
);
return;
return Ok(());
}
sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[]),
sym::va_copy => {
@ -194,17 +194,17 @@ fn codegen_intrinsic_call(
if !result.layout.is_zst() {
self.store(load, result.llval, result.align);
}
return;
return Ok(());
}
sym::volatile_store => {
let dst = args[0].deref(self.cx());
args[1].val.volatile_store(self, dst);
return;
return Ok(());
}
sym::unaligned_volatile_store => {
let dst = args[0].deref(self.cx());
args[1].val.unaligned_volatile_store(self, dst);
return;
return Ok(());
}
sym::prefetch_read_data
| sym::prefetch_write_data
@ -305,7 +305,7 @@ fn codegen_intrinsic_call(
name,
ty,
});
return;
return Ok(());
}
}
}
@ -387,7 +387,7 @@ fn codegen_intrinsic_call(
.unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`"));
// We have copied the value to `result` already.
return;
return Ok(());
}
_ if name.as_str().starts_with("simd_") => {
@ -395,11 +395,15 @@ fn codegen_intrinsic_call(
self, name, callee_ty, fn_args, args, ret_ty, llret_ty, span,
) {
Ok(llval) => llval,
Err(()) => return,
Err(()) => return Ok(()),
}
}
_ => bug!("unknown intrinsic '{}' -- should it have been lowered earlier?", name),
_ => {
debug!("unknown intrinsic '{}' -- falling back to default body", name);
// Call the fallback body instead of generating the intrinsic code
return Err(ty::Instance::new(instance.def_id(), instance.args));
}
};
if !fn_abi.ret.is_ignore() {
@ -411,6 +415,7 @@ fn codegen_intrinsic_call(
.store(self, result);
}
}
Ok(())
}
fn abort(&mut self) {

View file

@ -787,7 +787,7 @@ fn codegen_call_terminator(
// Handle intrinsics old codegen wants Expr's for, ourselves.
let intrinsic = match def {
Some(ty::InstanceDef::Intrinsic(def_id)) => Some(bx.tcx().item_name(def_id)),
Some(ty::InstanceDef::Intrinsic(def_id)) => Some(bx.tcx().intrinsic(def_id).unwrap()),
_ => None,
};
@ -817,21 +817,16 @@ fn codegen_call_terminator(
// The arguments we'll be passing. Plus one to account for outptr, if used.
let arg_count = fn_abi.args.len() + fn_abi.ret.is_indirect() as usize;
let mut llargs = Vec::with_capacity(arg_count);
// Prepare the return value destination
let ret_dest = if target.is_some() {
let is_intrinsic = intrinsic.is_some();
self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic)
} else {
ReturnDest::Nothing
};
if intrinsic == Some(sym::caller_location) {
return if let Some(target) = target {
let location =
self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info });
let mut llargs = Vec::with_capacity(arg_count);
let ret_dest =
self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs, true, true);
assert_eq!(llargs, []);
if let ReturnDest::IndirectOperand(tmp, _) = ret_dest {
location.val.store(bx, tmp);
}
@ -842,9 +837,18 @@ fn codegen_call_terminator(
};
}
match intrinsic {
None | Some(sym::drop_in_place) => {}
let instance = match intrinsic {
None | Some(sym::drop_in_place) => instance,
Some(intrinsic) => {
let mut llargs = Vec::with_capacity(1);
let ret_dest = self.make_return_dest(
bx,
destination,
&fn_abi.ret,
&mut llargs,
true,
target.is_some(),
);
let dest = match ret_dest {
_ if fn_abi.ret.is_indirect() => llargs[0],
ReturnDest::Nothing => bx.const_undef(bx.type_ptr()),
@ -878,27 +882,29 @@ fn codegen_call_terminator(
})
.collect();
Self::codegen_intrinsic_call(
bx,
*instance.as_ref().unwrap(),
fn_abi,
&args,
dest,
span,
);
let instance = *instance.as_ref().unwrap();
match Self::codegen_intrinsic_call(bx, instance, fn_abi, &args, dest, span) {
Ok(()) => {
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval);
}
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval);
return if let Some(target) = target {
helper.funclet_br(self, bx, target, mergeable_succ)
} else {
bx.unreachable();
MergingSucc::False
};
}
Err(instance) => Some(instance),
}
return if let Some(target) = target {
helper.funclet_br(self, bx, target, mergeable_succ)
} else {
bx.unreachable();
MergingSucc::False
};
}
}
};
let mut llargs = Vec::with_capacity(arg_count);
let destination = target.as_ref().map(|&target| {
(self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs, false, true), target)
});
// Split the rust-call tupled arguments off.
let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
@ -1040,14 +1046,13 @@ fn codegen_call_terminator(
(_, Some(llfn)) => llfn,
_ => span_bug!(span, "no instance or llfn for call"),
};
helper.do_call(
self,
bx,
fn_abi,
fn_ptr,
&llargs,
target.as_ref().map(|&target| (ret_dest, target)),
destination,
unwind,
&copied_constant_arguments,
mergeable_succ,
@ -1632,7 +1637,11 @@ fn make_return_dest(
fn_ret: &ArgAbi<'tcx, Ty<'tcx>>,
llargs: &mut Vec<Bx::Value>,
is_intrinsic: bool,
has_target: bool,
) -> ReturnDest<'tcx, Bx::Value> {
if !has_target {
return ReturnDest::Nothing;
}
// If the return is ignored, we can just return a do-nothing `ReturnDest`.
if fn_ret.is_ignore() {
return ReturnDest::Nothing;

View file

@ -54,6 +54,7 @@ fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
}
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
/// In the `Err` case, returns the instance that should be called instead.
pub fn codegen_intrinsic_call(
bx: &mut Bx,
instance: ty::Instance<'tcx>,
@ -61,7 +62,7 @@ pub fn codegen_intrinsic_call(
args: &[OperandRef<'tcx, Bx::Value>],
llresult: Bx::Value,
span: Span,
) {
) -> Result<(), ty::Instance<'tcx>> {
let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
@ -81,7 +82,7 @@ pub fn codegen_intrinsic_call(
let llval = match name {
sym::abort => {
bx.abort();
return;
return Ok(());
}
sym::va_start => bx.va_start(args[0].immediate()),
@ -150,7 +151,7 @@ pub fn codegen_intrinsic_call(
args[0].immediate(),
args[2].immediate(),
);
return;
return Ok(());
}
sym::write_bytes => {
memset_intrinsic(
@ -161,7 +162,7 @@ pub fn codegen_intrinsic_call(
args[1].immediate(),
args[2].immediate(),
);
return;
return Ok(());
}
sym::volatile_copy_nonoverlapping_memory => {
@ -174,7 +175,7 @@ pub fn codegen_intrinsic_call(
args[1].immediate(),
args[2].immediate(),
);
return;
return Ok(());
}
sym::volatile_copy_memory => {
copy_intrinsic(
@ -186,7 +187,7 @@ pub fn codegen_intrinsic_call(
args[1].immediate(),
args[2].immediate(),
);
return;
return Ok(());
}
sym::volatile_set_memory => {
memset_intrinsic(
@ -197,17 +198,17 @@ pub fn codegen_intrinsic_call(
args[1].immediate(),
args[2].immediate(),
);
return;
return Ok(());
}
sym::volatile_store => {
let dst = args[0].deref(bx.cx());
args[1].val.volatile_store(bx, dst);
return;
return Ok(());
}
sym::unaligned_volatile_store => {
let dst = args[0].deref(bx.cx());
args[1].val.unaligned_volatile_store(bx, dst);
return;
return Ok(());
}
sym::exact_div => {
let ty = arg_tys[0];
@ -225,7 +226,7 @@ pub fn codegen_intrinsic_call(
name,
ty,
});
return;
return Ok(());
}
}
}
@ -245,7 +246,7 @@ pub fn codegen_intrinsic_call(
name,
ty: arg_tys[0],
});
return;
return Ok(());
}
}
}
@ -256,14 +257,14 @@ pub fn codegen_intrinsic_call(
span,
ty: arg_tys[0],
});
return;
return Ok(());
}
let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
span,
ty: ret_ty,
});
return;
return Ok(());
};
if signed {
bx.fptosi(args[0].immediate(), llret_ty)
@ -280,16 +281,6 @@ pub fn codegen_intrinsic_call(
}
}
sym::const_allocate => {
// returns a null pointer at runtime.
bx.const_null(bx.type_ptr())
}
sym::const_deallocate => {
// nop at runtime.
return;
}
// This requires that atomic intrinsics follow a specific naming pattern:
// "atomic_<operation>[_<ordering>]"
name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
@ -350,10 +341,10 @@ pub fn codegen_intrinsic_call(
bx.store(val, dest.llval, dest.align);
let dest = result.project_field(bx, 1);
bx.store(success, dest.llval, dest.align);
return;
} else {
return invalid_monomorphization(ty);
invalid_monomorphization(ty);
}
return Ok(());
}
"load" => {
@ -383,7 +374,8 @@ pub fn codegen_intrinsic_call(
)
}
} else {
return invalid_monomorphization(ty);
invalid_monomorphization(ty);
return Ok(());
}
}
@ -399,10 +391,10 @@ pub fn codegen_intrinsic_call(
val = bx.ptrtoint(val, bx.type_isize());
}
bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
return;
} else {
return invalid_monomorphization(ty);
invalid_monomorphization(ty);
}
return Ok(());
}
"fence" => {
@ -410,7 +402,7 @@ pub fn codegen_intrinsic_call(
parse_ordering(bx, ordering),
SynchronizationScope::CrossThread,
);
return;
return Ok(());
}
"singlethreadfence" => {
@ -418,7 +410,7 @@ pub fn codegen_intrinsic_call(
parse_ordering(bx, ordering),
SynchronizationScope::SingleThread,
);
return;
return Ok(());
}
// These are all AtomicRMW ops
@ -449,7 +441,8 @@ pub fn codegen_intrinsic_call(
}
bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
} else {
return invalid_monomorphization(ty);
invalid_monomorphization(ty);
return Ok(());
}
}
}
@ -458,7 +451,7 @@ pub fn codegen_intrinsic_call(
sym::nontemporal_store => {
let dst = args[0].deref(bx.cx());
args[1].val.nontemporal_store(bx, dst);
return;
return Ok(());
}
sym::ptr_guaranteed_cmp => {
@ -493,8 +486,7 @@ pub fn codegen_intrinsic_call(
_ => {
// Need to use backend-specific things in the implementation.
bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
return;
return bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
}
};
@ -507,6 +499,7 @@ pub fn codegen_intrinsic_call(
.store(bx, result);
}
}
Ok(())
}
}

View file

@ -8,6 +8,8 @@ pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
/// Remember to add all intrinsics here, in `compiler/rustc_hir_analysis/src/check/mod.rs`,
/// and in `library/core/src/intrinsics.rs`; if you need access to any LLVM intrinsics,
/// add them to `compiler/rustc_codegen_llvm/src/context.rs`.
/// Returns `Err` if another instance should be called instead. This is used to invoke
/// intrinsic default bodies in case an intrinsic is not implemented by the backend.
fn codegen_intrinsic_call(
&mut self,
instance: ty::Instance<'tcx>,
@ -15,7 +17,7 @@ fn codegen_intrinsic_call(
args: &[OperandRef<'tcx, Self::Value>],
llresult: Self::Value,
span: Span,
);
) -> Result<(), ty::Instance<'tcx>>;
fn abort(&mut self);
fn assume(&mut self, val: Self::Value);

View file

@ -453,7 +453,7 @@ const_eval_validation_invalid_fn_ptr = {$front_matter}: encountered {$value}, bu
const_eval_validation_invalid_ref_meta = {$front_matter}: encountered invalid reference metadata: total size is bigger than largest supported object
const_eval_validation_invalid_ref_slice_meta = {$front_matter}: encountered invalid reference metadata: slice is bigger than largest supported object
const_eval_validation_invalid_vtable_ptr = {$front_matter}: encountered {$value}, but expected a vtable pointer
const_eval_validation_mutable_ref_in_const = {$front_matter}: encountered mutable reference in a `const` or `static`
const_eval_validation_mutable_ref_in_const_or_static = {$front_matter}: encountered mutable reference in a `const` or `static`
const_eval_validation_mutable_ref_to_immutable = {$front_matter}: encountered mutable reference or box pointing to read-only memory
const_eval_validation_never_val = {$front_matter}: encountered a value of the never type `!`
const_eval_validation_null_box = {$front_matter}: encountered a null box

View file

@ -49,7 +49,7 @@ fn constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness {
hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..), .. }) => {
// Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other
// foreign items cannot be evaluated at compile-time.
let is_const = if tcx.is_intrinsic(def_id) {
let is_const = if tcx.intrinsic(def_id).is_some() {
tcx.lookup_const_stability(def_id).is_some()
} else {
false

View file

@ -603,18 +603,18 @@ fn diagnostic_message(&self) -> DiagnosticMessage {
PtrToUninhabited { ptr_kind: PointerKind::Box, .. } => {
const_eval_validation_box_to_uninhabited
}
PtrToUninhabited { ptr_kind: PointerKind::Ref, .. } => {
PtrToUninhabited { ptr_kind: PointerKind::Ref(_), .. } => {
const_eval_validation_ref_to_uninhabited
}
PtrToStatic { ptr_kind: PointerKind::Box } => const_eval_validation_box_to_static,
PtrToStatic { ptr_kind: PointerKind::Ref } => const_eval_validation_ref_to_static,
PtrToStatic { ptr_kind: PointerKind::Ref(_) } => const_eval_validation_ref_to_static,
PointerAsInt { .. } => const_eval_validation_pointer_as_int,
PartialPointer => const_eval_validation_partial_pointer,
ConstRefToMutable => const_eval_validation_const_ref_to_mutable,
ConstRefToExtern => const_eval_validation_const_ref_to_extern,
MutableRefInConst => const_eval_validation_mutable_ref_in_const,
MutableRefInConstOrStatic => const_eval_validation_mutable_ref_in_const_or_static,
MutableRefToImmutable => const_eval_validation_mutable_ref_to_immutable,
NullFnPtr => const_eval_validation_null_fn_ptr,
NeverVal => const_eval_validation_never_val,
@ -630,37 +630,39 @@ fn diagnostic_message(&self) -> DiagnosticMessage {
InvalidMetaSliceTooLarge { ptr_kind: PointerKind::Box } => {
const_eval_validation_invalid_box_slice_meta
}
InvalidMetaSliceTooLarge { ptr_kind: PointerKind::Ref } => {
InvalidMetaSliceTooLarge { ptr_kind: PointerKind::Ref(_) } => {
const_eval_validation_invalid_ref_slice_meta
}
InvalidMetaTooLarge { ptr_kind: PointerKind::Box } => {
const_eval_validation_invalid_box_meta
}
InvalidMetaTooLarge { ptr_kind: PointerKind::Ref } => {
InvalidMetaTooLarge { ptr_kind: PointerKind::Ref(_) } => {
const_eval_validation_invalid_ref_meta
}
UnalignedPtr { ptr_kind: PointerKind::Ref, .. } => const_eval_validation_unaligned_ref,
UnalignedPtr { ptr_kind: PointerKind::Ref(_), .. } => {
const_eval_validation_unaligned_ref
}
UnalignedPtr { ptr_kind: PointerKind::Box, .. } => const_eval_validation_unaligned_box,
NullPtr { ptr_kind: PointerKind::Box } => const_eval_validation_null_box,
NullPtr { ptr_kind: PointerKind::Ref } => const_eval_validation_null_ref,
NullPtr { ptr_kind: PointerKind::Ref(_) } => const_eval_validation_null_ref,
DanglingPtrNoProvenance { ptr_kind: PointerKind::Box, .. } => {
const_eval_validation_dangling_box_no_provenance
}
DanglingPtrNoProvenance { ptr_kind: PointerKind::Ref, .. } => {
DanglingPtrNoProvenance { ptr_kind: PointerKind::Ref(_), .. } => {
const_eval_validation_dangling_ref_no_provenance
}
DanglingPtrOutOfBounds { ptr_kind: PointerKind::Box } => {
const_eval_validation_dangling_box_out_of_bounds
}
DanglingPtrOutOfBounds { ptr_kind: PointerKind::Ref } => {
DanglingPtrOutOfBounds { ptr_kind: PointerKind::Ref(_) } => {
const_eval_validation_dangling_ref_out_of_bounds
}
DanglingPtrUseAfterFree { ptr_kind: PointerKind::Box } => {
const_eval_validation_dangling_box_use_after_free
}
DanglingPtrUseAfterFree { ptr_kind: PointerKind::Ref } => {
DanglingPtrUseAfterFree { ptr_kind: PointerKind::Ref(_) } => {
const_eval_validation_dangling_ref_use_after_free
}
InvalidBool { .. } => const_eval_validation_invalid_bool,
@ -766,7 +768,7 @@ fn add_range_arg<G: EmissionGuarantee>(
}
NullPtr { .. }
| PtrToStatic { .. }
| MutableRefInConst
| MutableRefInConstOrStatic
| ConstRefToMutable
| ConstRefToExtern
| MutableRefToImmutable

View file

@ -526,7 +526,7 @@ pub(crate) fn eval_fn_call(
match instance.def {
ty::InstanceDef::Intrinsic(def_id) => {
assert!(self.tcx.is_intrinsic(def_id));
assert!(self.tcx.intrinsic(def_id).is_some());
// FIXME: Should `InPlace` arguments be reset to uninit?
M::call_intrinsic(
self,

View file

@ -5,7 +5,7 @@
//! to be const-safe.
use std::fmt::Write;
use std::num::NonZeroUsize;
use std::num::NonZero;
use either::{Left, Right};
@ -445,22 +445,22 @@ fn check_safe_pointer(
// Determine whether this pointer expects to be pointing to something mutable.
let ptr_expected_mutbl = match ptr_kind {
PointerKind::Box => Mutability::Mut,
PointerKind::Ref => {
let tam = value.layout.ty.builtin_deref(false).unwrap();
// ZST never require mutability. We do not take into account interior mutability
// here since we cannot know if there really is an `UnsafeCell` inside
// `Option<UnsafeCell>` -- so we check that in the recursive descent behind this
// reference.
if size == Size::ZERO { Mutability::Not } else { tam.mutbl }
PointerKind::Ref(mutbl) => {
// We do not take into account interior mutability here since we cannot know if
// there really is an `UnsafeCell` inside `Option<UnsafeCell>` -- so we check
// that in the recursive descent behind this reference (controlled by
// `allow_immutable_unsafe_cell`).
mutbl
}
};
// Proceed recursively even for ZST, no reason to skip them!
// `!` is a ZST and we want to validate it.
if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr()) {
let mut skip_recursive_check = false;
// Let's see what kind of memory this points to.
// `unwrap` since dangling pointers have already been handled.
let alloc_kind = self.ecx.tcx.try_get_global_alloc(alloc_id).unwrap();
match alloc_kind {
let alloc_actual_mutbl = match alloc_kind {
GlobalAlloc::Static(did) => {
// Special handling for pointers to statics (irrespective of their type).
assert!(!self.ecx.tcx.is_thread_local_static(did));
@ -474,12 +474,6 @@ fn check_safe_pointer(
.no_bound_vars()
.expect("statics should not have generic parameters")
.is_freeze(*self.ecx.tcx, ty::ParamEnv::reveal_all());
// Mutability check.
if ptr_expected_mutbl == Mutability::Mut {
if !is_mut {
throw_validation_failure!(self.path, MutableRefToImmutable);
}
}
// Mode-specific checks
match self.ctfe_mode {
Some(
@ -494,15 +488,9 @@ fn check_safe_pointer(
// trigger cycle errors if we try to compute the value of the other static
// and that static refers back to us (potentially through a promoted).
// This could miss some UB, but that's fine.
return Ok(());
skip_recursive_check = true;
}
Some(CtfeValidationMode::Const { .. }) => {
// For consts on the other hand we have to recursively check;
// pattern matching assumes a valid value. However we better make
// sure this is not mutable.
if is_mut {
throw_validation_failure!(self.path, ConstRefToMutable);
}
// We can't recursively validate `extern static`, so we better reject them.
if self.ecx.tcx.is_foreign_item(did) {
throw_validation_failure!(self.path, ConstRefToExtern);
@ -510,25 +498,38 @@ fn check_safe_pointer(
}
None => {}
}
// Return alloc mutability
if is_mut { Mutability::Mut } else { Mutability::Not }
}
GlobalAlloc::Memory(alloc) => {
if alloc.inner().mutability == Mutability::Mut
&& matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. }))
{
throw_validation_failure!(self.path, ConstRefToMutable);
}
if ptr_expected_mutbl == Mutability::Mut
&& alloc.inner().mutability == Mutability::Not
{
throw_validation_failure!(self.path, MutableRefToImmutable);
}
}
GlobalAlloc::Memory(alloc) => alloc.inner().mutability,
GlobalAlloc::Function(..) | GlobalAlloc::VTable(..) => {
// These are immutable, we better don't allow mutable pointers here.
if ptr_expected_mutbl == Mutability::Mut {
throw_validation_failure!(self.path, MutableRefToImmutable);
}
Mutability::Not
}
};
// Mutability check.
// If this allocation has size zero, there is no actual mutability here.
let (size, _align, _alloc_kind) = self.ecx.get_alloc_info(alloc_id);
if size != Size::ZERO {
if ptr_expected_mutbl == Mutability::Mut
&& alloc_actual_mutbl == Mutability::Not
{
throw_validation_failure!(self.path, MutableRefToImmutable);
}
if ptr_expected_mutbl == Mutability::Mut
&& self.ctfe_mode.is_some_and(|c| !c.may_contain_mutable_ref())
{
throw_validation_failure!(self.path, MutableRefInConstOrStatic);
}
if alloc_actual_mutbl == Mutability::Mut
&& matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. }))
{
throw_validation_failure!(self.path, ConstRefToMutable);
}
}
// Potentially skip recursive check.
if skip_recursive_check {
return Ok(());
}
}
let path = &self.path;
@ -598,16 +599,8 @@ fn try_visit_primitive(
}
Ok(true)
}
ty::Ref(_, ty, mutbl) => {
if self.ctfe_mode.is_some_and(|c| !c.may_contain_mutable_ref())
&& *mutbl == Mutability::Mut
{
let layout = self.ecx.layout_of(*ty)?;
if !layout.is_zst() {
throw_validation_failure!(self.path, MutableRefInConst);
}
}
self.check_safe_pointer(value, PointerKind::Ref)?;
ty::Ref(_, _ty, mutbl) => {
self.check_safe_pointer(value, PointerKind::Ref(*mutbl))?;
Ok(true)
}
ty::FnPtr(_sig) => {
@ -785,7 +778,7 @@ fn visit_variant(
fn visit_union(
&mut self,
op: &OpTy<'tcx, M::Provenance>,
_fields: NonZeroUsize,
_fields: NonZero<usize>,
) -> InterpResult<'tcx> {
// Special check for CTFE validation, preventing `UnsafeCell` inside unions in immutable memory.
if self.ctfe_mode.is_some_and(|c| !c.allow_immutable_unsafe_cell()) {

View file

@ -7,7 +7,7 @@
use rustc_target::abi::FieldIdx;
use rustc_target::abi::{FieldsShape, VariantIdx, Variants};
use std::num::NonZeroUsize;
use std::num::NonZero;
use super::{InterpCx, MPlaceTy, Machine, Projectable};
@ -43,7 +43,7 @@ fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx> {
}
/// Visits the given value as a union. No automatic recursion can happen here.
#[inline(always)]
fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx> {
fn visit_union(&mut self, _v: &Self::V, _fields: NonZero<usize>) -> InterpResult<'tcx> {
Ok(())
}
/// Visits the given value as the pointer of a `Box`. There is nothing to recurse into.

View file

@ -11,6 +11,7 @@
#![feature(assert_matches)]
#![feature(box_patterns)]
#![feature(decl_macro)]
#![feature(generic_nonzero)]
#![feature(let_chains)]
#![feature(slice_ptr_get)]
#![feature(never_type)]

View file

@ -861,7 +861,7 @@ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location
// We do not use `const` modifiers for intrinsic "functions", as intrinsics are
// `extern` functions, and these have no way to get marked `const`. So instead we
// use `rustc_const_(un)stable` attributes to mean that the intrinsic is `const`
if self.ccx.is_const_stable_const_fn() || tcx.is_intrinsic(callee) {
if self.ccx.is_const_stable_const_fn() || tcx.intrinsic(callee).is_some() {
self.check_op(ops::FnCallUnstable(callee, None));
return;
}

View file

@ -20,6 +20,7 @@
#![feature(cfg_match)]
#![feature(core_intrinsics)]
#![feature(extend_one)]
#![feature(generic_nonzero)]
#![feature(hash_raw_entry)]
#![feature(hasher_prefixfree_extras)]
#![feature(lazy_cell)]

View file

@ -6,6 +6,7 @@
use std::hash::{BuildHasher, Hash, Hasher};
use std::marker::PhantomData;
use std::mem;
use std::num::NonZero;
#[cfg(test)]
mod tests;
@ -338,14 +339,14 @@ impl<CTX, T> HashStable<CTX> for PhantomData<T> {
fn hash_stable(&self, _ctx: &mut CTX, _hasher: &mut StableHasher) {}
}
impl<CTX> HashStable<CTX> for ::std::num::NonZeroU32 {
impl<CTX> HashStable<CTX> for NonZero<u32> {
#[inline]
fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
self.get().hash_stable(ctx, hasher)
}
}
impl<CTX> HashStable<CTX> for ::std::num::NonZeroUsize {
impl<CTX> HashStable<CTX> for NonZero<usize> {
#[inline]
fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
self.get().hash_stable(ctx, hasher)

View file

@ -1,7 +1,7 @@
use parking_lot::Mutex;
use std::cell::Cell;
use std::cell::OnceCell;
use std::num::NonZeroUsize;
use std::num::NonZero;
use std::ops::Deref;
use std::ptr;
use std::sync::Arc;
@ -31,7 +31,7 @@ fn verify(self) -> usize {
}
struct RegistryData {
thread_limit: NonZeroUsize,
thread_limit: NonZero<usize>,
threads: Mutex<usize>,
}
@ -61,7 +61,7 @@ struct ThreadData {
impl Registry {
/// Creates a registry which can hold up to `thread_limit` threads.
pub fn new(thread_limit: NonZeroUsize) -> Self {
pub fn new(thread_limit: NonZero<usize>) -> Self {
Registry(Arc::new(RegistryData { thread_limit, threads: Mutex::new(0) }))
}

View file

@ -4,7 +4,7 @@
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::mem::ManuallyDrop;
use std::num::NonZeroUsize;
use std::num::NonZero;
use std::ops::{Deref, DerefMut};
use std::ptr::NonNull;
@ -134,7 +134,7 @@ fn pack(ptr: NonNull<P::Target>, tag: T) -> NonNull<P::Target> {
ptr.map_addr(|addr| {
// Safety:
// - The pointer is `NonNull` => it's address is `NonZeroUsize`
// - The pointer is `NonNull` => it's address is `NonZero<usize>`
// - `P::BITS` least significant bits are always zero (`Pointer` contract)
// - `T::BITS <= P::BITS` (from `Self::ASSERTION`)
//
@ -143,14 +143,14 @@ fn pack(ptr: NonNull<P::Target>, tag: T) -> NonNull<P::Target> {
// `{non_zero} | packed_tag` can't make the value zero.
let packed = (addr.get() >> T::BITS) | packed_tag;
unsafe { NonZeroUsize::new_unchecked(packed) }
unsafe { NonZero::new_unchecked(packed) }
})
}
/// Retrieves the original raw pointer from `self.packed`.
#[inline]
pub(super) fn pointer_raw(&self) -> NonNull<P::Target> {
self.packed.map_addr(|addr| unsafe { NonZeroUsize::new_unchecked(addr.get() << T::BITS) })
self.packed.map_addr(|addr| unsafe { NonZero::new_unchecked(addr.get() << T::BITS) })
}
/// This provides a reference to the `P` pointer itself, rather than the

View file

@ -79,7 +79,7 @@ fn into_diagnostic_arg(self) -> DiagnosticArgValue {
ast::ParamKindOrd,
std::io::Error,
Box<dyn std::error::Error>,
std::num::NonZeroU32,
std::num::NonZero<u32>,
hir::Target,
Edition,
Ident,

View file

@ -16,6 +16,7 @@
#![feature(box_patterns)]
#![feature(error_reporter)]
#![feature(extract_if)]
#![feature(generic_nonzero)]
#![feature(let_chains)]
#![feature(negative_impls)]
#![feature(never_type)]
@ -77,7 +78,7 @@
use std::fmt;
use std::hash::Hash;
use std::io::Write;
use std::num::NonZeroUsize;
use std::num::NonZero;
use std::ops::DerefMut;
use std::panic;
use std::path::{Path, PathBuf};
@ -525,6 +526,7 @@ pub enum StashKey {
MaybeFruTypo,
CallAssocMethod,
TraitMissingMethod,
AssociatedTypeSuggestion,
OpaqueHiddenTypeMismatch,
MaybeForgetReturn,
/// Query cycle detected, stashing in favor of a better error.
@ -546,7 +548,7 @@ pub struct DiagCtxtFlags {
pub can_emit_warnings: bool,
/// If Some, the Nth error-level diagnostic is upgraded to bug-level.
/// (rustc: see `-Z treat-err-as-bug`)
pub treat_err_as_bug: Option<NonZeroUsize>,
pub treat_err_as_bug: Option<NonZero<usize>>,
/// Eagerly emit delayed bugs as errors, so that the compiler debugger may
/// see all of the errors being emitted at once.
pub eagerly_emit_delayed_bugs: bool,

View file

@ -788,6 +788,10 @@ pub struct BuiltinAttribute {
rustc_safe_intrinsic, Normal, template!(Word), WarnFollowing,
"the `#[rustc_safe_intrinsic]` attribute is used internally to mark intrinsics as safe"
),
rustc_attr!(
rustc_intrinsic, Normal, template!(Word), ErrorFollowing,
"the `#[rustc_intrinsic]` attribute is used to declare intrinsics with function bodies",
),
// ==========================================================================
// Internal attributes, Testing:

View file

@ -12,6 +12,7 @@
//! symbol to the `accepted` or `removed` modules respectively.
#![allow(internal_features)]
#![feature(generic_nonzero)]
#![feature(rustdoc_internals)]
#![doc(rust_logo)]
#![feature(lazy_cell)]
@ -25,13 +26,13 @@
mod tests;
use rustc_span::symbol::Symbol;
use std::num::NonZeroU32;
use std::num::NonZero;
#[derive(Debug, Clone)]
pub struct Feature {
pub name: Symbol,
pub since: &'static str,
issue: Option<NonZeroU32>,
issue: Option<NonZero<u32>>,
}
#[derive(Copy, Clone, Debug)]
@ -85,7 +86,7 @@ pub fn is_nightly_build(&self) -> bool {
}
}
fn find_lang_feature_issue(feature: Symbol) -> Option<NonZeroU32> {
fn find_lang_feature_issue(feature: Symbol) -> Option<NonZero<u32>> {
// Search in all the feature lists.
if let Some(f) = UNSTABLE_FEATURES.iter().find(|f| f.feature.name == feature) {
return f.feature.issue;
@ -99,21 +100,21 @@ fn find_lang_feature_issue(feature: Symbol) -> Option<NonZeroU32> {
panic!("feature `{feature}` is not declared anywhere");
}
const fn to_nonzero(n: Option<u32>) -> Option<NonZeroU32> {
// Can be replaced with `n.and_then(NonZeroU32::new)` if that is ever usable
const fn to_nonzero(n: Option<u32>) -> Option<NonZero<u32>> {
// Can be replaced with `n.and_then(NonZero::new)` if that is ever usable
// in const context. Requires https://github.com/rust-lang/rfcs/pull/2632.
match n {
None => None,
Some(n) => NonZeroU32::new(n),
Some(n) => NonZero::new(n),
}
}
pub enum GateIssue {
Language,
Library(Option<NonZeroU32>),
Library(Option<NonZero<u32>>),
}
pub fn find_feature_issue(feature: Symbol, issue: GateIssue) -> Option<NonZeroU32> {
pub fn find_feature_issue(feature: Symbol, issue: GateIssue) -> Option<NonZero<u32>> {
match issue {
GateIssue::Language => find_lang_feature_issue(feature),
GateIssue::Library(lib) => lib,

View file

@ -525,7 +525,18 @@ pub(crate) fn check_item_type(tcx: TyCtxt<'_>, def_id: LocalDefId) {
DefKind::Enum => {
check_enum(tcx, def_id);
}
DefKind::Fn => {} // entirely within check_item_body
DefKind::Fn => {
if let Some(name) = tcx.intrinsic(def_id) {
intrinsic::check_intrinsic_type(
tcx,
def_id,
tcx.def_ident_span(def_id).unwrap(),
name,
Abi::Rust,
)
}
// Everything else is checked entirely within check_item_body
}
DefKind::Impl { of_trait } => {
if of_trait && let Some(impl_trait_header) = tcx.impl_trait_header(def_id) {
check_impl_items_against_trait(
@ -590,15 +601,24 @@ pub(crate) fn check_item_type(tcx: TyCtxt<'_>, def_id: LocalDefId) {
match abi {
Abi::RustIntrinsic => {
for item in items {
let item = tcx.hir().foreign_item(item.id);
intrinsic::check_intrinsic_type(tcx, item);
intrinsic::check_intrinsic_type(
tcx,
item.id.owner_id.def_id,
item.span,
item.ident.name,
abi,
);
}
}
Abi::PlatformIntrinsic => {
for item in items {
let item = tcx.hir().foreign_item(item.id);
intrinsic::check_platform_intrinsic_type(tcx, item);
intrinsic::check_platform_intrinsic_type(
tcx,
item.id.owner_id.def_id,
item.span,
item.ident.name,
);
}
}

View file

@ -7,30 +7,36 @@
WrongNumberOfGenericArgumentsToIntrinsic,
};
use hir::def_id::DefId;
use rustc_errors::{codes::*, struct_span_code_err, DiagnosticMessage};
use rustc_hir as hir;
use rustc_middle::traits::{ObligationCause, ObligationCauseCode};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::def_id::LocalDefId;
use rustc_span::symbol::{kw, sym};
use rustc_span::{Span, Symbol};
use rustc_target::spec::abi::Abi;
fn equate_intrinsic_type<'tcx>(
tcx: TyCtxt<'tcx>,
it: &hir::ForeignItem<'_>,
span: Span,
def_id: LocalDefId,
n_tps: usize,
n_lts: usize,
n_cts: usize,
sig: ty::PolyFnSig<'tcx>,
) {
let (own_counts, span) = match &it.kind {
hir::ForeignItemKind::Fn(.., generics) => {
let own_counts = tcx.generics_of(it.owner_id.to_def_id()).own_counts();
let (own_counts, span) = match tcx.hir_node_by_def_id(def_id) {
hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(_, generics, _), .. })
| hir::Node::ForeignItem(hir::ForeignItem {
kind: hir::ForeignItemKind::Fn(.., generics),
..
}) => {
let own_counts = tcx.generics_of(def_id).own_counts();
(own_counts, generics.span)
}
_ => {
struct_span_code_err!(tcx.dcx(), it.span, E0622, "intrinsic must be a function")
.with_span_label(it.span, "expected a function")
struct_span_code_err!(tcx.dcx(), span, E0622, "intrinsic must be a function")
.with_span_label(span, "expected a function")
.emit();
return;
}
@ -54,23 +60,26 @@ fn equate_intrinsic_type<'tcx>(
&& gen_count_ok(own_counts.types, n_tps, "type")
&& gen_count_ok(own_counts.consts, n_cts, "const")
{
let it_def_id = it.owner_id.def_id;
let _ = check_function_signature(
tcx,
ObligationCause::new(it.span, it_def_id, ObligationCauseCode::IntrinsicType),
it_def_id.into(),
ObligationCause::new(span, def_id, ObligationCauseCode::IntrinsicType),
def_id.into(),
sig,
);
}
}
/// Returns the unsafety of the given intrinsic.
pub fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: DefId) -> hir::Unsafety {
let has_safe_attr = match tcx.has_attr(intrinsic_id, sym::rustc_safe_intrinsic) {
true => hir::Unsafety::Normal,
false => hir::Unsafety::Unsafe,
pub fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: LocalDefId) -> hir::Unsafety {
let has_safe_attr = if tcx.has_attr(intrinsic_id, sym::rustc_intrinsic) {
tcx.fn_sig(intrinsic_id).skip_binder().unsafety()
} else {
match tcx.has_attr(intrinsic_id, sym::rustc_safe_intrinsic) {
true => hir::Unsafety::Normal,
false => hir::Unsafety::Unsafe,
}
};
let is_in_list = match tcx.item_name(intrinsic_id) {
let is_in_list = match tcx.item_name(intrinsic_id.into()) {
// When adding a new intrinsic to this list,
// it's usually worth updating that intrinsic's documentation
// to note that it's safe to call, since
@ -112,6 +121,7 @@ pub fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: DefId) -> hir
| sym::forget
| sym::black_box
| sym::variant_count
| sym::is_val_statically_known
| sym::ptr_mask
| sym::debug_assertions => hir::Unsafety::Normal,
_ => hir::Unsafety::Unsafe,
@ -122,7 +132,7 @@ pub fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: DefId) -> hir
tcx.def_span(intrinsic_id),
DiagnosticMessage::from(format!(
"intrinsic safety mismatch between list of intrinsics within the compiler and core library intrinsics for intrinsic `{}`",
tcx.item_name(intrinsic_id)
tcx.item_name(intrinsic_id.into())
)
)).emit();
}
@ -132,8 +142,14 @@ pub fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: DefId) -> hir
/// Remember to add all intrinsics here, in `compiler/rustc_codegen_llvm/src/intrinsic.rs`,
/// and in `library/core/src/intrinsics.rs`.
pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
let generics = tcx.generics_of(it.owner_id);
pub fn check_intrinsic_type(
tcx: TyCtxt<'_>,
intrinsic_id: LocalDefId,
span: Span,
intrinsic_name: Symbol,
abi: Abi,
) {
let generics = tcx.generics_of(intrinsic_id);
let param = |n| {
if let Some(&ty::GenericParamDef {
name, kind: ty::GenericParamDefKind::Type { .. }, ..
@ -141,11 +157,9 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
{
Ty::new_param(tcx, n, name)
} else {
Ty::new_error_with_message(tcx, tcx.def_span(it.owner_id), "expected param")
Ty::new_error_with_message(tcx, span, "expected param")
}
};
let intrinsic_id = it.owner_id.to_def_id();
let intrinsic_name = tcx.item_name(intrinsic_id);
let name_str = intrinsic_name.as_str();
let bound_vars = tcx.mk_bound_variable_kinds(&[
@ -169,7 +183,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
})
};
let (n_tps, n_lts, inputs, output, unsafety) = if name_str.starts_with("atomic_") {
let (n_tps, n_lts, n_cts, inputs, output, unsafety) = if name_str.starts_with("atomic_") {
let split: Vec<&str> = name_str.split('_').collect();
assert!(split.len() >= 2, "Atomic intrinsic in an incorrect format");
@ -187,49 +201,51 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
| "umin" => (1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], param(0)),
"fence" | "singlethreadfence" => (0, Vec::new(), Ty::new_unit(tcx)),
op => {
tcx.dcx().emit_err(UnrecognizedAtomicOperation { span: it.span, op });
tcx.dcx().emit_err(UnrecognizedAtomicOperation { span, op });
return;
}
};
(n_tps, 0, inputs, output, hir::Unsafety::Unsafe)
(n_tps, 0, 0, inputs, output, hir::Unsafety::Unsafe)
} else {
let unsafety = intrinsic_operation_unsafety(tcx, intrinsic_id);
let (n_tps, inputs, output) = match intrinsic_name {
sym::abort => (0, Vec::new(), tcx.types.never),
sym::unreachable => (0, Vec::new(), tcx.types.never),
sym::breakpoint => (0, Vec::new(), Ty::new_unit(tcx)),
let (n_tps, n_cts, inputs, output) = match intrinsic_name {
sym::abort => (0, 0, vec![], tcx.types.never),
sym::unreachable => (0, 0, vec![], tcx.types.never),
sym::breakpoint => (0, 0, vec![], Ty::new_unit(tcx)),
sym::size_of | sym::pref_align_of | sym::min_align_of | sym::variant_count => {
(1, Vec::new(), tcx.types.usize)
(1, 0, vec![], tcx.types.usize)
}
sym::size_of_val | sym::min_align_of_val => {
(1, vec![Ty::new_imm_ptr(tcx, param(0))], tcx.types.usize)
(1, 0, vec![Ty::new_imm_ptr(tcx, param(0))], tcx.types.usize)
}
sym::rustc_peek => (1, vec![param(0)], param(0)),
sym::caller_location => (0, vec![], tcx.caller_location_ty()),
sym::rustc_peek => (1, 0, vec![param(0)], param(0)),
sym::caller_location => (0, 0, vec![], tcx.caller_location_ty()),
sym::assert_inhabited
| sym::assert_zero_valid
| sym::assert_mem_uninitialized_valid => (1, Vec::new(), Ty::new_unit(tcx)),
sym::forget => (1, vec![param(0)], Ty::new_unit(tcx)),
sym::transmute | sym::transmute_unchecked => (2, vec![param(0)], param(1)),
| sym::assert_mem_uninitialized_valid => (1, 0, vec![], Ty::new_unit(tcx)),
sym::forget => (1, 0, vec![param(0)], Ty::new_unit(tcx)),
sym::transmute | sym::transmute_unchecked => (2, 0, vec![param(0)], param(1)),
sym::prefetch_read_data
| sym::prefetch_write_data
| sym::prefetch_read_instruction
| sym::prefetch_write_instruction => (
1,
0,
vec![
Ty::new_ptr(tcx, ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
tcx.types.i32,
],
Ty::new_unit(tcx),
),
sym::drop_in_place => (1, vec![Ty::new_mut_ptr(tcx, param(0))], Ty::new_unit(tcx)),
sym::needs_drop => (1, Vec::new(), tcx.types.bool),
sym::drop_in_place => (1, 0, vec![Ty::new_mut_ptr(tcx, param(0))], Ty::new_unit(tcx)),
sym::needs_drop => (1, 0, vec![], tcx.types.bool),
sym::type_name => (1, Vec::new(), Ty::new_static_str(tcx)),
sym::type_id => (1, Vec::new(), tcx.types.u128),
sym::offset => (2, vec![param(0), param(1)], param(0)),
sym::type_name => (1, 0, vec![], Ty::new_static_str(tcx)),
sym::type_id => (1, 0, vec![], tcx.types.u128),
sym::offset => (2, 0, vec![param(0), param(1)], param(0)),
sym::arith_offset => (
1,
0,
vec![
Ty::new_ptr(tcx, ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
tcx.types.isize,
@ -238,6 +254,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
),
sym::ptr_mask => (
1,
0,
vec![
Ty::new_ptr(tcx, ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
tcx.types.usize,
@ -247,6 +264,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
sym::copy | sym::copy_nonoverlapping => (
1,
0,
vec![
Ty::new_ptr(tcx, ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
Ty::new_ptr(tcx, ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Mut }),
@ -256,6 +274,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
),
sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => (
1,
0,
vec![
Ty::new_ptr(tcx, ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Mut }),
Ty::new_ptr(tcx, ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
@ -265,10 +284,11 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
),
sym::compare_bytes => {
let byte_ptr = Ty::new_imm_ptr(tcx, tcx.types.u8);
(0, vec![byte_ptr, byte_ptr, tcx.types.usize], tcx.types.i32)
(0, 0, vec![byte_ptr, byte_ptr, tcx.types.usize], tcx.types.i32)
}
sym::write_bytes | sym::volatile_set_memory => (
1,
0,
vec![
Ty::new_ptr(tcx, ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Mut }),
tcx.types.u8,
@ -276,56 +296,56 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
],
Ty::new_unit(tcx),
),
sym::sqrtf32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::sqrtf64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::powif32 => (0, vec![tcx.types.f32, tcx.types.i32], tcx.types.f32),
sym::powif64 => (0, vec![tcx.types.f64, tcx.types.i32], tcx.types.f64),
sym::sinf32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::sinf64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::cosf32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::cosf64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::powf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
sym::powf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
sym::expf32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::expf64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::exp2f32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::exp2f64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::logf32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::logf64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::log10f32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::log10f64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::log2f32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::log2f64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::fmaf32 => (0, vec![tcx.types.f32, tcx.types.f32, tcx.types.f32], tcx.types.f32),
sym::fmaf64 => (0, vec![tcx.types.f64, tcx.types.f64, tcx.types.f64], tcx.types.f64),
sym::fabsf32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::fabsf64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::minnumf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
sym::minnumf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
sym::maxnumf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
sym::maxnumf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
sym::copysignf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
sym::copysignf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
sym::floorf32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::floorf64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::ceilf32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::ceilf64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::truncf32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::truncf64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::rintf32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::rintf64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::nearbyintf32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::nearbyintf64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::roundf32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::roundf64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::roundevenf32 => (0, vec![tcx.types.f32], tcx.types.f32),
sym::roundevenf64 => (0, vec![tcx.types.f64], tcx.types.f64),
sym::sqrtf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::sqrtf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::powif32 => (0, 0, vec![tcx.types.f32, tcx.types.i32], tcx.types.f32),
sym::powif64 => (0, 0, vec![tcx.types.f64, tcx.types.i32], tcx.types.f64),
sym::sinf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::sinf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::cosf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::cosf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::powf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
sym::powf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
sym::expf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::expf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::exp2f32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::exp2f64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::logf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::logf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::log10f32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::log10f64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::log2f32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::log2f64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::fmaf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32, tcx.types.f32], tcx.types.f32),
sym::fmaf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64, tcx.types.f64], tcx.types.f64),
sym::fabsf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::fabsf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::minnumf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
sym::minnumf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
sym::maxnumf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
sym::maxnumf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
sym::copysignf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
sym::copysignf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
sym::floorf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::floorf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::ceilf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::ceilf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::truncf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::truncf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::rintf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::rintf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::nearbyintf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::nearbyintf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::roundf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::roundf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::roundevenf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
sym::roundevenf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::volatile_load | sym::unaligned_volatile_load => {
(1, vec![Ty::new_imm_ptr(tcx, param(0))], param(0))
(1, 0, vec![Ty::new_imm_ptr(tcx, param(0))], param(0))
}
sym::volatile_store | sym::unaligned_volatile_store => {
(1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], Ty::new_unit(tcx))
(1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], Ty::new_unit(tcx))
}
sym::ctpop
@ -334,62 +354,66 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
| sym::cttz
| sym::cttz_nonzero
| sym::bswap
| sym::bitreverse => (1, vec![param(0)], param(0)),
| sym::bitreverse => (1, 0, vec![param(0)], param(0)),
sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
(1, vec![param(0), param(0)], Ty::new_tup(tcx, &[param(0), tcx.types.bool]))
(1, 0, vec![param(0), param(0)], Ty::new_tup(tcx, &[param(0), tcx.types.bool]))
}
sym::ptr_guaranteed_cmp => (
1,
0,
vec![Ty::new_imm_ptr(tcx, param(0)), Ty::new_imm_ptr(tcx, param(0))],
tcx.types.u8,
),
sym::const_allocate => {
(0, vec![tcx.types.usize, tcx.types.usize], Ty::new_mut_ptr(tcx, tcx.types.u8))
(0, 1, vec![tcx.types.usize, tcx.types.usize], Ty::new_mut_ptr(tcx, tcx.types.u8))
}
sym::const_deallocate => (
0,
1,
vec![Ty::new_mut_ptr(tcx, tcx.types.u8), tcx.types.usize, tcx.types.usize],
Ty::new_unit(tcx),
),
sym::ptr_offset_from => (
1,
0,
vec![Ty::new_imm_ptr(tcx, param(0)), Ty::new_imm_ptr(tcx, param(0))],
tcx.types.isize,
),
sym::ptr_offset_from_unsigned => (
1,
0,
vec![Ty::new_imm_ptr(tcx, param(0)), Ty::new_imm_ptr(tcx, param(0))],
tcx.types.usize,
),
sym::unchecked_div | sym::unchecked_rem | sym::exact_div => {
(1, vec![param(0), param(0)], param(0))
(1, 0, vec![param(0), param(0)], param(0))
}
sym::unchecked_shl | sym::unchecked_shr | sym::rotate_left | sym::rotate_right => {
(1, vec![param(0), param(0)], param(0))
(1, 0, vec![param(0), param(0)], param(0))
}
sym::unchecked_add | sym::unchecked_sub | sym::unchecked_mul => {
(1, vec![param(0), param(0)], param(0))
(1, 0, vec![param(0), param(0)], param(0))
}
sym::wrapping_add | sym::wrapping_sub | sym::wrapping_mul => {
(1, vec![param(0), param(0)], param(0))
(1, 0, vec![param(0), param(0)], param(0))
}
sym::saturating_add | sym::saturating_sub => (1, vec![param(0), param(0)], param(0)),
sym::saturating_add | sym::saturating_sub => (1, 0, vec![param(0), param(0)], param(0)),
sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
(1, vec![param(0), param(0)], param(0))
(1, 0, vec![param(0), param(0)], param(0))
}
sym::float_to_int_unchecked => (2, vec![param(0)], param(1)),
sym::float_to_int_unchecked => (2, 0, vec![param(0)], param(1)),
sym::assume => (0, vec![tcx.types.bool], Ty::new_unit(tcx)),
sym::likely => (0, vec![tcx.types.bool], tcx.types.bool),
sym::unlikely => (0, vec![tcx.types.bool], tcx.types.bool),
sym::assume => (0, 0, vec![tcx.types.bool], Ty::new_unit(tcx)),
sym::likely => (0, 0, vec![tcx.types.bool], tcx.types.bool),
sym::unlikely => (0, 0, vec![tcx.types.bool], tcx.types.bool),
sym::read_via_copy => (1, vec![Ty::new_imm_ptr(tcx, param(0))], param(0)),
sym::read_via_copy => (1, 0, vec![Ty::new_imm_ptr(tcx, param(0))], param(0)),
sym::write_via_move => {
(1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], Ty::new_unit(tcx))
(1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], Ty::new_unit(tcx))
}
sym::discriminant_value => {
@ -401,6 +425,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon };
(
1,
0,
vec![Ty::new_imm_ref(
tcx,
ty::Region::new_bound(tcx, ty::INNERMOST, br),
@ -427,6 +452,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
Abi::Rust,
));
(
0,
0,
vec![Ty::new_fn_ptr(tcx, try_fn_ty), mut_u8, Ty::new_fn_ptr(tcx, catch_fn_ty)],
tcx.types.i32,
@ -434,61 +460,66 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
}
sym::va_start | sym::va_end => match mk_va_list_ty(hir::Mutability::Mut) {
Some((va_list_ref_ty, _)) => (0, vec![va_list_ref_ty], Ty::new_unit(tcx)),
Some((va_list_ref_ty, _)) => (0, 0, vec![va_list_ref_ty], Ty::new_unit(tcx)),
None => bug!("`va_list` language item needed for C-variadic intrinsics"),
},
sym::va_copy => match mk_va_list_ty(hir::Mutability::Not) {
Some((va_list_ref_ty, va_list_ty)) => {
let va_list_ptr_ty = Ty::new_mut_ptr(tcx, va_list_ty);
(0, vec![va_list_ptr_ty, va_list_ref_ty], Ty::new_unit(tcx))
(0, 0, vec![va_list_ptr_ty, va_list_ref_ty], Ty::new_unit(tcx))
}
None => bug!("`va_list` language item needed for C-variadic intrinsics"),
},
sym::va_arg => match mk_va_list_ty(hir::Mutability::Mut) {
Some((va_list_ref_ty, _)) => (1, vec![va_list_ref_ty], param(0)),
Some((va_list_ref_ty, _)) => (1, 0, vec![va_list_ref_ty], param(0)),
None => bug!("`va_list` language item needed for C-variadic intrinsics"),
},
sym::nontemporal_store => {
(1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], Ty::new_unit(tcx))
(1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], Ty::new_unit(tcx))
}
sym::raw_eq => {
let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon };
let param_ty =
Ty::new_imm_ref(tcx, ty::Region::new_bound(tcx, ty::INNERMOST, br), param(0));
(1, vec![param_ty; 2], tcx.types.bool)
(1, 0, vec![param_ty; 2], tcx.types.bool)
}
sym::black_box => (1, vec![param(0)], param(0)),
sym::black_box => (1, 0, vec![param(0)], param(0)),
sym::is_val_statically_known => (1, vec![param(0)], tcx.types.bool),
sym::is_val_statically_known => (1, 1, vec![param(0)], tcx.types.bool),
sym::const_eval_select => (4, vec![param(0), param(1), param(2)], param(3)),
sym::const_eval_select => (4, 0, vec![param(0), param(1), param(2)], param(3)),
sym::vtable_size | sym::vtable_align => {
(0, vec![Ty::new_imm_ptr(tcx, Ty::new_unit(tcx))], tcx.types.usize)
(0, 0, vec![Ty::new_imm_ptr(tcx, Ty::new_unit(tcx))], tcx.types.usize)
}
sym::debug_assertions => (0, Vec::new(), tcx.types.bool),
sym::debug_assertions => (0, 1, Vec::new(), tcx.types.bool),
other => {
tcx.dcx().emit_err(UnrecognizedIntrinsicFunction { span: it.span, name: other });
tcx.dcx().emit_err(UnrecognizedIntrinsicFunction { span, name: other });
return;
}
};
(n_tps, 0, inputs, output, unsafety)
(n_tps, 0, n_cts, inputs, output, unsafety)
};
let sig = tcx.mk_fn_sig(inputs, output, false, unsafety, Abi::RustIntrinsic);
let sig = tcx.mk_fn_sig(inputs, output, false, unsafety, abi);
let sig = ty::Binder::bind_with_vars(sig, bound_vars);
equate_intrinsic_type(tcx, it, n_tps, n_lts, 0, sig)
equate_intrinsic_type(tcx, span, intrinsic_id, n_tps, n_lts, n_cts, sig)
}
/// Type-check `extern "platform-intrinsic" { ... }` functions.
pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
let generics = tcx.generics_of(it.owner_id);
pub fn check_platform_intrinsic_type(
tcx: TyCtxt<'_>,
intrinsic_id: LocalDefId,
span: Span,
name: Symbol,
) {
let generics = tcx.generics_of(intrinsic_id);
let param = |n| {
if let Some(&ty::GenericParamDef {
name, kind: ty::GenericParamDefKind::Type { .. }, ..
@ -496,12 +527,10 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>)
{
Ty::new_param(tcx, n, name)
} else {
Ty::new_error_with_message(tcx, tcx.def_span(it.owner_id), "expected param")
Ty::new_error_with_message(tcx, span, "expected param")
}
};
let name = it.ident.name;
let (n_tps, n_cts, inputs, output) = match name {
sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => {
(2, 0, vec![param(0), param(0)], param(1))
@ -574,12 +603,12 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>)
sym::simd_shuffle_generic => (2, 1, vec![param(0), param(0)], param(1)),
_ => {
let msg = format!("unrecognized platform-specific intrinsic function: `{name}`");
tcx.dcx().span_err(it.span, msg);
tcx.dcx().span_err(span, msg);
return;
}
};
let sig = tcx.mk_fn_sig(inputs, output, false, hir::Unsafety::Unsafe, Abi::PlatformIntrinsic);
let sig = ty::Binder::dummy(sig);
equate_intrinsic_type(tcx, it, n_tps, 0, n_cts, sig)
equate_intrinsic_type(tcx, span, intrinsic_id, n_tps, 0, n_cts, sig)
}

View file

@ -74,7 +74,7 @@
pub use check::check_abi;
use std::num::NonZeroU32;
use std::num::NonZero;
use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
use rustc_errors::ErrorGuaranteed;
@ -270,7 +270,7 @@ fn default_body_is_unstable(
item_did: DefId,
feature: Symbol,
reason: Option<Symbol>,
issue: Option<NonZeroU32>,
issue: Option<NonZero<u32>>,
) {
let missing_item_name = tcx.associated_item(item_did).name;
let (mut some_note, mut none_note, mut reason_str) = (false, false, String::new());

View file

@ -1651,7 +1651,7 @@ fn compute_sig_of_foreign_fn_decl<'tcx>(
abi: abi::Abi,
) -> ty::PolyFnSig<'tcx> {
let unsafety = if abi == abi::Abi::RustIntrinsic {
intrinsic_operation_unsafety(tcx, def_id.to_def_id())
intrinsic_operation_unsafety(tcx, def_id)
} else {
hir::Unsafety::Unsafe
};

View file

@ -63,6 +63,7 @@
#![feature(rustdoc_internals)]
#![allow(internal_features)]
#![feature(control_flow_enum)]
#![feature(generic_nonzero)]
#![feature(if_let_guard)]
#![feature(is_sorted)]
#![feature(iter_intersperse)]

View file

@ -540,8 +540,7 @@ fn confirm_builtin_call(
if let Some(def_id) = def_id
&& self.tcx.def_kind(def_id) == hir::def::DefKind::Fn
&& self.tcx.is_intrinsic(def_id)
&& self.tcx.item_name(def_id) == sym::const_eval_select
&& matches!(self.tcx.intrinsic(def_id), Some(sym::const_eval_select))
{
let fn_sig = self.resolve_vars_if_possible(fn_sig);
for idx in 0..=1 {

View file

@ -867,7 +867,7 @@ fn coerce_from_fn_item(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
let a_sig = a.fn_sig(self.tcx);
if let ty::FnDef(def_id, _) = *a.kind() {
// Intrinsics are not coercible to function pointers
if self.tcx.is_intrinsic(def_id) {
if self.tcx.intrinsic(def_id).is_some() {
return Err(TypeError::IntrinsicCast);
}

View file

@ -109,6 +109,93 @@ fn is_slice_ty(&self, ty: Ty<'tcx>, span: Span) -> bool {
self.autoderef(span, ty).any(|(ty, _)| matches!(ty.kind(), ty::Slice(..) | ty::Array(..)))
}
fn impl_into_iterator_should_be_iterator(
&self,
ty: Ty<'tcx>,
span: Span,
unsatisfied_predicates: &Vec<(
ty::Predicate<'_>,
Option<ty::Predicate<'_>>,
Option<ObligationCause<'_>>,
)>,
) -> bool {
fn predicate_bounds_generic_param<'tcx>(
predicate: ty::Predicate<'_>,
generics: &'tcx ty::Generics,
generic_param: &ty::GenericParamDef,
tcx: TyCtxt<'tcx>,
) -> bool {
if let ty::PredicateKind::Clause(ty::ClauseKind::Trait(trait_pred)) =
predicate.kind().as_ref().skip_binder()
{
let ty::TraitPredicate { trait_ref: ty::TraitRef { args, .. }, .. } = trait_pred;
if args.is_empty() {
return false;
}
let Some(arg_ty) = args[0].as_type() else {
return false;
};
let ty::Param(param) = arg_ty.kind() else {
return false;
};
// Is `generic_param` the same as the arg for this trait predicate?
generic_param.index == generics.type_param(&param, tcx).index
} else {
false
}
}
fn is_iterator_predicate(predicate: ty::Predicate<'_>, tcx: TyCtxt<'_>) -> bool {
if let ty::PredicateKind::Clause(ty::ClauseKind::Trait(trait_pred)) =
predicate.kind().as_ref().skip_binder()
{
tcx.is_diagnostic_item(sym::Iterator, trait_pred.trait_ref.def_id)
} else {
false
}
}
// Does the `ty` implement `IntoIterator`?
let Some(into_iterator_trait) = self.tcx.get_diagnostic_item(sym::IntoIterator) else {
return false;
};
let trait_ref = ty::TraitRef::new(self.tcx, into_iterator_trait, [ty]);
let cause = ObligationCause::new(span, self.body_id, ObligationCauseCode::MiscObligation);
let obligation = Obligation::new(self.tcx, cause, self.param_env, trait_ref);
if !self.predicate_must_hold_modulo_regions(&obligation) {
return false;
}
match ty.kind() {
ty::Param(param) => {
let generics = self.tcx.generics_of(self.body_id);
let generic_param = generics.type_param(&param, self.tcx);
for unsatisfied in unsatisfied_predicates.iter() {
// The parameter implements `IntoIterator`
// but it has called a method that requires it to implement `Iterator`
if predicate_bounds_generic_param(
unsatisfied.0,
generics,
generic_param,
self.tcx,
) && is_iterator_predicate(unsatisfied.0, self.tcx)
{
return true;
}
}
}
ty::Alias(ty::AliasKind::Opaque, _) => {
for unsatisfied in unsatisfied_predicates.iter() {
if is_iterator_predicate(unsatisfied.0, self.tcx) {
return true;
}
}
}
_ => return false,
}
false
}
#[instrument(level = "debug", skip(self))]
pub fn report_method_error(
&self,
@ -555,6 +642,15 @@ pub fn report_no_match_method_error(
"`count` is defined on `{iterator_trait}`, which `{rcvr_ty}` does not implement"
));
}
} else if self.impl_into_iterator_should_be_iterator(rcvr_ty, span, unsatisfied_predicates)
{
err.span_label(span, format!("`{rcvr_ty}` is not an iterator"));
err.multipart_suggestion_verbose(
"call `.into_iter()` first",
vec![(span.shrink_to_lo(), format!("into_iter()."))],
Applicability::MaybeIncorrect,
);
return Some(err);
} else if !unsatisfied_predicates.is_empty() && matches!(rcvr_ty.kind(), ty::Param(_)) {
// We special case the situation where we are looking for `_` in
// `<TypeParam as _>::method` because otherwise the machinery will look for blanket

View file

@ -316,7 +316,7 @@ pub(super) fn suggest_function_pointers(
if !self.same_type_modulo_infer(*found_sig, *expected_sig)
|| !sig.is_suggestable(self.tcx, true)
|| self.tcx.is_intrinsic(*did)
|| self.tcx.intrinsic(*did).is_some()
{
return;
}
@ -348,8 +348,8 @@ pub(super) fn suggest_function_pointers(
if !self.same_type_modulo_infer(*found_sig, *expected_sig)
|| !found_sig.is_suggestable(self.tcx, true)
|| !expected_sig.is_suggestable(self.tcx, true)
|| self.tcx.is_intrinsic(*did1)
|| self.tcx.is_intrinsic(*did2)
|| self.tcx.intrinsic(*did1).is_some()
|| self.tcx.intrinsic(*did2).is_some()
{
return;
}

View file

@ -1,5 +1,6 @@
#![feature(decl_macro)]
#![feature(error_iter)]
#![feature(generic_nonzero)]
#![feature(lazy_cell)]
#![feature(let_chains)]
#![feature(thread_spawn_unchecked)]

View file

@ -20,7 +20,7 @@
use rustc_target::spec::{CodeModel, LinkerFlavorCli, MergeFunctions, PanicStrategy, RelocModel};
use rustc_target::spec::{RelroLevel, SanitizerSet, SplitDebuginfo, StackProtector, TlsModel};
use std::collections::{BTreeMap, BTreeSet};
use std::num::NonZeroUsize;
use std::num::NonZero;
use std::path::{Path, PathBuf};
use std::sync::Arc;
@ -827,7 +827,7 @@ macro_rules! tracked {
tracked!(tls_model, Some(TlsModel::GeneralDynamic));
tracked!(translate_remapped_path_to_local_path, false);
tracked!(trap_unreachable, Some(false));
tracked!(treat_err_as_bug, NonZeroUsize::new(1));
tracked!(treat_err_as_bug, NonZero::new(1));
tracked!(tune_cpu, Some(String::from("abc")));
tracked!(uninit_const_chunk_threshold, 123);
tracked!(unleash_the_miri_inside_of_you, true);

View file

@ -107,7 +107,7 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
use rustc_query_impl::QueryCtxt;
use rustc_query_system::query::{deadlock, QueryContext};
let registry = sync::Registry::new(std::num::NonZeroUsize::new(threads).unwrap());
let registry = sync::Registry::new(std::num::NonZero::new(threads).unwrap());
if !sync::is_dyn_thread_safe() {
return run_in_thread_with_globals(edition, || {

View file

@ -1227,7 +1227,7 @@ fn get_transmute_from_to<'tcx>(
}
fn def_id_is_transmute(cx: &LateContext<'_>, def_id: DefId) -> bool {
cx.tcx.is_intrinsic(def_id) && cx.tcx.item_name(def_id) == sym::transmute
matches!(cx.tcx.intrinsic(def_id), Some(sym::transmute))
}
}
}

View file

@ -31,6 +31,7 @@
#![feature(array_windows)]
#![feature(box_patterns)]
#![feature(control_flow_enum)]
#![feature(generic_nonzero)]
#![feature(if_let_guard)]
#![feature(iter_order_by)]
#![feature(let_chains)]

View file

@ -1,7 +1,6 @@
#![allow(rustc::diagnostic_outside_of_impl)]
#![allow(rustc::untranslatable_diagnostic)]
use std::num::NonZeroU32;
use std::num::NonZero;
use crate::errors::RequestedLevel;
use crate::fluent_generated as fluent;
@ -402,7 +401,7 @@ pub struct BuiltinInternalFeatures {
#[derive(Subdiagnostic)]
#[note(lint_note)]
pub struct BuiltinFeatureIssueNote {
pub n: NonZeroU32,
pub n: NonZero<u32>,
}
pub struct BuiltinUnpermittedTypeInit<'a> {

View file

@ -5,6 +5,7 @@
#![feature(decl_macro)]
#![feature(extract_if)]
#![feature(coroutines)]
#![feature(generic_nonzero)]
#![feature(iter_from_coroutine)]
#![feature(let_chains)]
#![feature(if_let_guard)]

View file

@ -327,7 +327,7 @@ fn map_encoded_cnum_to_current(&self, cnum: CrateNum) -> CrateNum {
}
#[inline]
fn read_lazy_offset_then<T>(&mut self, f: impl Fn(NonZeroUsize) -> T) -> T {
fn read_lazy_offset_then<T>(&mut self, f: impl Fn(NonZero<usize>) -> T) -> T {
let distance = self.read_usize();
let position = match self.lazy_state {
LazyState::NoNode => bug!("read_lazy_with_meta: outside of a metadata node"),
@ -338,7 +338,7 @@ fn read_lazy_offset_then<T>(&mut self, f: impl Fn(NonZeroUsize) -> T) -> T {
}
LazyState::Previous(last_pos) => last_pos.get() + distance,
};
let position = NonZeroUsize::new(position).unwrap();
let position = NonZero::new(position).unwrap();
self.lazy_state = LazyState::Previous(position);
f(position)
}
@ -685,15 +685,15 @@ pub(crate) fn is_compatible(&self) -> bool {
}
pub(crate) fn get_rustc_version(&self) -> String {
LazyValue::<String>::from_position(NonZeroUsize::new(METADATA_HEADER.len() + 8).unwrap())
LazyValue::<String>::from_position(NonZero::new(METADATA_HEADER.len() + 8).unwrap())
.decode(self)
}
fn root_pos(&self) -> NonZeroUsize {
fn root_pos(&self) -> NonZero<usize> {
let offset = METADATA_HEADER.len();
let pos_bytes = self.blob()[offset..][..8].try_into().unwrap();
let pos = u64::from_le_bytes(pos_bytes);
NonZeroUsize::new(pos as usize).unwrap()
NonZero::new(pos as usize).unwrap()
}
pub(crate) fn get_header(&self) -> CrateHeader {
@ -1749,8 +1749,8 @@ fn get_attr_flags(self, index: DefIndex) -> AttrFlags {
self.root.tables.attr_flags.get(self, index)
}
fn get_is_intrinsic(self, index: DefIndex) -> bool {
self.root.tables.is_intrinsic.get(self, index)
fn get_intrinsic(self, index: DefIndex) -> Option<Symbol> {
self.root.tables.intrinsic.get(self, index).map(|d| d.decode(self))
}
fn get_doc_link_resolutions(self, index: DefIndex) -> DocLinkResMap {

View file

@ -356,7 +356,7 @@ fn into_args(self) -> (DefId, SimplifiedType) {
cdata.get_stability_implications(tcx).iter().copied().collect()
}
stripped_cfg_items => { cdata.get_stripped_cfg_items(cdata.cnum, tcx) }
is_intrinsic => { cdata.get_is_intrinsic(def_id.index) }
intrinsic => { cdata.get_intrinsic(def_id.index) }
defined_lang_items => { cdata.get_lang_items(tcx) }
diagnostic_items => { cdata.get_diagnostic_items() }
missing_lang_items => { cdata.get_missing_lang_items(tcx) }

View file

@ -421,7 +421,7 @@ macro_rules! record_defaulted_array {
}
impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
fn emit_lazy_distance(&mut self, position: NonZeroUsize) {
fn emit_lazy_distance(&mut self, position: NonZero<usize>) {
let pos = position.get();
let distance = match self.lazy_state {
LazyState::NoNode => bug!("emit_lazy_distance: outside of a metadata node"),
@ -439,7 +439,7 @@ fn emit_lazy_distance(&mut self, position: NonZeroUsize) {
position.get() - last_pos.get()
}
};
self.lazy_state = LazyState::Previous(NonZeroUsize::new(pos).unwrap());
self.lazy_state = LazyState::Previous(NonZero::new(pos).unwrap());
self.emit_usize(distance);
}
@ -447,7 +447,7 @@ fn lazy<T: ParameterizedOverTcx, B: Borrow<T::Value<'tcx>>>(&mut self, value: B)
where
T::Value<'tcx>: Encodable<EncodeContext<'a, 'tcx>>,
{
let pos = NonZeroUsize::new(self.position()).unwrap();
let pos = NonZero::new(self.position()).unwrap();
assert_eq!(self.lazy_state, LazyState::NoNode);
self.lazy_state = LazyState::NodeStart(pos);
@ -466,7 +466,7 @@ fn lazy_array<T: ParameterizedOverTcx, I: IntoIterator<Item = B>, B: Borrow<T::V
where
T::Value<'tcx>: Encodable<EncodeContext<'a, 'tcx>>,
{
let pos = NonZeroUsize::new(self.position()).unwrap();
let pos = NonZero::new(self.position()).unwrap();
assert_eq!(self.lazy_state, LazyState::NoNode);
self.lazy_state = LazyState::NodeStart(pos);
@ -1409,7 +1409,9 @@ fn encode_def_ids(&mut self) {
if let DefKind::Fn | DefKind::AssocFn = def_kind {
self.tables.asyncness.set_some(def_id.index, tcx.asyncness(def_id));
record_array!(self.tables.fn_arg_names[def_id] <- tcx.fn_arg_names(def_id));
self.tables.is_intrinsic.set(def_id.index, tcx.is_intrinsic(def_id));
if let Some(name) = tcx.intrinsic(def_id) {
record!(self.tables.intrinsic[def_id] <- name);
}
}
if let DefKind::TyParam = def_kind {
let default = self.tcx.object_lifetime_default(def_id);

View file

@ -37,7 +37,7 @@
use rustc_target::spec::{PanicStrategy, TargetTriple};
use std::marker::PhantomData;
use std::num::NonZeroUsize;
use std::num::NonZero;
use decoder::DecodeContext;
pub(crate) use decoder::{CrateMetadata, CrateNumMap, MetadataBlob};
@ -83,7 +83,7 @@ pub(crate) fn rustc_version(cfg_version: &'static str) -> String {
/// order than they were encoded in.
#[must_use]
struct LazyValue<T> {
position: NonZeroUsize,
position: NonZero<usize>,
_marker: PhantomData<fn() -> T>,
}
@ -92,7 +92,7 @@ impl<T: ParameterizedOverTcx> ParameterizedOverTcx for LazyValue<T> {
}
impl<T> LazyValue<T> {
fn from_position(position: NonZeroUsize) -> LazyValue<T> {
fn from_position(position: NonZero<usize>) -> LazyValue<T> {
LazyValue { position, _marker: PhantomData }
}
}
@ -108,7 +108,7 @@ fn from_position(position: NonZeroUsize) -> LazyValue<T> {
/// the minimal distance the length of the sequence, i.e.
/// it's assumed there's no 0-byte element in the sequence.
struct LazyArray<T> {
position: NonZeroUsize,
position: NonZero<usize>,
num_elems: usize,
_marker: PhantomData<fn() -> T>,
}
@ -119,12 +119,12 @@ impl<T: ParameterizedOverTcx> ParameterizedOverTcx for LazyArray<T> {
impl<T> Default for LazyArray<T> {
fn default() -> LazyArray<T> {
LazyArray::from_position_and_num_elems(NonZeroUsize::new(1).unwrap(), 0)
LazyArray::from_position_and_num_elems(NonZero::new(1).unwrap(), 0)
}
}
impl<T> LazyArray<T> {
fn from_position_and_num_elems(position: NonZeroUsize, num_elems: usize) -> LazyArray<T> {
fn from_position_and_num_elems(position: NonZero<usize>, num_elems: usize) -> LazyArray<T> {
LazyArray { position, num_elems, _marker: PhantomData }
}
}
@ -135,7 +135,7 @@ fn from_position_and_num_elems(position: NonZeroUsize, num_elems: usize) -> Lazy
/// `LazyArray<T>`, but without requiring encoding or decoding all the values
/// eagerly and in-order.
struct LazyTable<I, T> {
position: NonZeroUsize,
position: NonZero<usize>,
/// The encoded size of the elements of a table is selected at runtime to drop
/// trailing zeroes. This is the number of bytes used for each table element.
width: usize,
@ -150,7 +150,7 @@ impl<I: 'static, T: ParameterizedOverTcx> ParameterizedOverTcx for LazyTable<I,
impl<I, T> LazyTable<I, T> {
fn from_position_and_encoded_size(
position: NonZeroUsize,
position: NonZero<usize>,
width: usize,
len: usize,
) -> LazyTable<I, T> {
@ -187,11 +187,11 @@ enum LazyState {
/// Inside a metadata node, and before any `Lazy`s.
/// The position is that of the node itself.
NodeStart(NonZeroUsize),
NodeStart(NonZero<usize>),
/// Inside a metadata node, with a previous `Lazy`s.
/// The position is where that previous `Lazy` would start.
Previous(NonZeroUsize),
Previous(NonZero<usize>),
}
type SyntaxContextTable = LazyTable<u32, Option<LazyValue<SyntaxContextData>>>;
@ -375,7 +375,7 @@ fn encode(&self, buf: &mut FileEncoder) -> LazyTables {
define_tables! {
- defaulted:
is_intrinsic: Table<DefIndex, bool>,
intrinsic: Table<DefIndex, Option<LazyValue<Symbol>>>,
is_macro_rules: Table<DefIndex, bool>,
is_type_alias_impl_trait: Table<DefIndex, bool>,
type_alias_is_lazy: Table<DefIndex, bool>,

View file

@ -339,7 +339,7 @@ impl<T> FixedSizeEncoding for Option<LazyValue<T>> {
#[inline]
fn from_bytes(b: &[u8; 8]) -> Self {
let position = NonZeroUsize::new(u64::from_bytes(b) as usize)?;
let position = NonZero::new(u64::from_bytes(b) as usize)?;
Some(LazyValue::from_position(position))
}
@ -366,7 +366,7 @@ impl<T> LazyArray<T> {
}
fn from_bytes_impl(position: &[u8; 8], meta: &[u8; 8]) -> Option<LazyArray<T>> {
let position = NonZeroUsize::new(u64::from_bytes(position) as usize)?;
let position = NonZero::new(u64::from_bytes(position) as usize)?;
let len = u64::from_bytes(meta) as usize;
Some(LazyArray::from_position_and_num_elems(position, len))
}
@ -497,7 +497,7 @@ pub(crate) fn encode(&self, buf: &mut FileEncoder) -> LazyTable<I, T> {
}
LazyTable::from_position_and_encoded_size(
NonZeroUsize::new(pos).unwrap(),
NonZero::new(pos).unwrap(),
width,
self.blocks.len(),
)

View file

@ -34,6 +34,7 @@
#![feature(discriminant_kind)]
#![feature(exhaustive_patterns)]
#![feature(coroutines)]
#![feature(generic_nonzero)]
#![feature(if_let_guard)]
#![feature(inline_const)]
#![feature(iter_from_coroutine)]

View file

@ -21,7 +21,7 @@
use rustc_session::Session;
use rustc_span::symbol::{sym, Symbol};
use rustc_span::Span;
use std::num::NonZeroU32;
use std::num::NonZero;
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum StabilityLevel {
@ -102,7 +102,7 @@ pub fn report_unstable(
sess: &Session,
feature: Symbol,
reason: Option<Symbol>,
issue: Option<NonZeroU32>,
issue: Option<NonZero<u32>>,
suggestion: Option<(Span, String, String, Applicability)>,
is_soft: bool,
span: Span,
@ -235,7 +235,7 @@ pub enum EvalResult {
Deny {
feature: Symbol,
reason: Option<Symbol>,
issue: Option<NonZeroU32>,
issue: Option<NonZero<u32>>,
suggestion: Option<(Span, String, String, Applicability)>,
is_soft: bool,
},
@ -433,7 +433,7 @@ pub fn eval_stability_allow_unstable(
// the `-Z force-unstable-if-unmarked` flag present (we're
// compiling a compiler crate), then let this missing feature
// annotation slide.
if feature == sym::rustc_private && issue == NonZeroU32::new(27812) {
if feature == sym::rustc_private && issue == NonZero::new(27812) {
if self.sess.opts.unstable_opts.force_unstable_if_unmarked {
return EvalResult::Allow;
}

View file

@ -12,6 +12,7 @@
use rustc_session::CtfeBacktrace;
use rustc_span::{def_id::DefId, Span, DUMMY_SP};
use rustc_target::abi::{call, Align, Size, VariantIdx, WrappingRange};
use rustc_type_ir::Mutability;
use std::borrow::Cow;
use std::{any::Any, backtrace::Backtrace, fmt};
@ -367,7 +368,7 @@ pub enum UndefinedBehaviorInfo<'tcx> {
#[derive(Debug, Clone, Copy)]
pub enum PointerKind {
Ref,
Ref(Mutability),
Box,
}
@ -375,7 +376,7 @@ impl IntoDiagnosticArg for PointerKind {
fn into_diagnostic_arg(self) -> DiagnosticArgValue {
DiagnosticArgValue::Str(
match self {
Self::Ref => "ref",
Self::Ref(_) => "ref",
Self::Box => "box",
}
.into(),
@ -408,7 +409,7 @@ impl From<PointerKind> for ExpectedKind {
fn from(x: PointerKind) -> ExpectedKind {
match x {
PointerKind::Box => ExpectedKind::Box,
PointerKind::Ref => ExpectedKind::Reference,
PointerKind::Ref(_) => ExpectedKind::Reference,
}
}
}
@ -419,7 +420,7 @@ pub enum ValidationErrorKind<'tcx> {
PartialPointer,
PtrToUninhabited { ptr_kind: PointerKind, ty: Ty<'tcx> },
PtrToStatic { ptr_kind: PointerKind },
MutableRefInConst,
MutableRefInConstOrStatic,
ConstRefToMutable,
ConstRefToExtern,
MutableRefToImmutable,

View file

@ -122,7 +122,7 @@ macro_rules! throw_ub_custom {
use std::fmt;
use std::io;
use std::io::{Read, Write};
use std::num::{NonZeroU32, NonZeroU64};
use std::num::NonZero;
use std::sync::atomic::{AtomicU32, Ordering};
use rustc_ast::LitKind;
@ -206,7 +206,7 @@ pub enum LitToConstError {
}
#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct AllocId(pub NonZeroU64);
pub struct AllocId(pub NonZero<u64>);
// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
// all the Miri types.
@ -261,7 +261,7 @@ pub fn specialized_encode_alloc_id<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>>(
}
// Used to avoid infinite recursion when decoding cyclic allocations.
type DecodingSessionId = NonZeroU32;
type DecodingSessionId = NonZero<u32>;
#[derive(Clone)]
enum State {
@ -501,7 +501,7 @@ pub(crate) fn new() -> Self {
AllocMap {
alloc_map: Default::default(),
dedup: Default::default(),
next_id: AllocId(NonZeroU64::new(1).unwrap()),
next_id: AllocId(NonZero::new(1).unwrap()),
}
}
fn reserve(&mut self) -> AllocId {

View file

@ -3,7 +3,7 @@
use rustc_macros::HashStable;
use rustc_target::abi::{HasDataLayout, Size};
use std::{fmt, num::NonZeroU64};
use std::{fmt, num::NonZero};
////////////////////////////////////////////////////////////////////////////////
// Pointer arithmetic
@ -129,7 +129,7 @@ pub trait Provenance: Copy + fmt::Debug + 'static {
/// The type of provenance in the compile-time interpreter.
/// This is a packed representation of an `AllocId` and an `immutable: bool`.
#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct CtfeProvenance(NonZeroU64);
pub struct CtfeProvenance(NonZero<u64>);
impl From<AllocId> for CtfeProvenance {
fn from(value: AllocId) -> Self {
@ -155,7 +155,7 @@ impl CtfeProvenance {
/// Returns the `AllocId` of this provenance.
#[inline(always)]
pub fn alloc_id(self) -> AllocId {
AllocId(NonZeroU64::new(self.0.get() & !IMMUTABLE_MASK).unwrap())
AllocId(NonZero::new(self.0.get() & !IMMUTABLE_MASK).unwrap())
}
/// Returns whether this provenance is immutable.

View file

@ -241,6 +241,7 @@ impl EraseType for $ty {
Option<rustc_target::abi::FieldIdx>,
Option<rustc_target::spec::PanicStrategy>,
Option<usize>,
Option<rustc_span::Symbol>,
Result<(), rustc_errors::ErrorGuaranteed>,
Result<(), rustc_middle::traits::query::NoSolution>,
Result<rustc_middle::traits::EvaluationResult, rustc_middle::traits::OverflowError>,

View file

@ -1760,8 +1760,8 @@
separate_provide_extern
}
/// Whether the function is an intrinsic
query is_intrinsic(def_id: DefId) -> bool {
desc { |tcx| "checking whether `{}` is an intrinsic", tcx.def_path_str(def_id) }
query intrinsic(def_id: DefId) -> Option<Symbol> {
desc { |tcx| "fetch intrinsic name if `{}` is an intrinsic", tcx.def_path_str(def_id) }
separate_provide_extern
}
/// Returns the lang items defined in another crate by loading it from metadata.

View file

@ -4,7 +4,7 @@
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use rustc_target::abi::Size;
use std::fmt;
use std::num::NonZeroU8;
use std::num::NonZero;
use crate::ty::TyCtxt;
@ -132,7 +132,7 @@ pub struct ScalarInt {
/// The first `size` bytes of `data` are the value.
/// Do not try to read less or more bytes than that. The remaining bytes must be 0.
data: u128,
size: NonZeroU8,
size: NonZero<u8>,
}
// Cannot derive these, as the derives take references to the fields, and we
@ -161,14 +161,14 @@ fn decode(d: &mut D) -> ScalarInt {
let mut data = [0u8; 16];
let size = d.read_u8();
data[..size as usize].copy_from_slice(d.read_raw_bytes(size as usize));
ScalarInt { data: u128::from_le_bytes(data), size: NonZeroU8::new(size).unwrap() }
ScalarInt { data: u128::from_le_bytes(data), size: NonZero::new(size).unwrap() }
}
}
impl ScalarInt {
pub const TRUE: ScalarInt = ScalarInt { data: 1_u128, size: NonZeroU8::new(1).unwrap() };
pub const TRUE: ScalarInt = ScalarInt { data: 1_u128, size: NonZero::new(1).unwrap() };
pub const FALSE: ScalarInt = ScalarInt { data: 0_u128, size: NonZeroU8::new(1).unwrap() };
pub const FALSE: ScalarInt = ScalarInt { data: 0_u128, size: NonZero::new(1).unwrap() };
#[inline]
pub fn size(self) -> Size {
@ -196,7 +196,7 @@ fn check_data(self) {
#[inline]
pub fn null(size: Size) -> Self {
Self { data: 0, size: NonZeroU8::new(size.bytes() as u8).unwrap() }
Self { data: 0, size: NonZero::new(size.bytes() as u8).unwrap() }
}
#[inline]
@ -208,7 +208,7 @@ pub fn is_null(self) -> bool {
pub fn try_from_uint(i: impl Into<u128>, size: Size) -> Option<Self> {
let data = i.into();
if size.truncate(data) == data {
Some(Self { data, size: NonZeroU8::new(size.bytes() as u8).unwrap() })
Some(Self { data, size: NonZero::new(size.bytes() as u8).unwrap() })
} else {
None
}
@ -220,7 +220,7 @@ pub fn try_from_int(i: impl Into<i128>, size: Size) -> Option<Self> {
// `into` performed sign extension, we have to truncate
let truncated = size.truncate(i as u128);
if size.sign_extend(truncated) as i128 == i {
Some(Self { data: truncated, size: NonZeroU8::new(size.bytes() as u8).unwrap() })
Some(Self { data: truncated, size: NonZero::new(size.bytes() as u8).unwrap() })
} else {
None
}
@ -388,7 +388,7 @@ impl From<$ty> for ScalarInt {
fn from(u: $ty) -> Self {
Self {
data: u128::from(u),
size: NonZeroU8::new(std::mem::size_of::<$ty>() as u8).unwrap(),
size: NonZero::new(std::mem::size_of::<$ty>() as u8).unwrap(),
}
}
}
@ -427,7 +427,7 @@ fn try_from(int: ScalarInt) -> Result<Self, Size> {
impl From<char> for ScalarInt {
#[inline]
fn from(c: char) -> Self {
Self { data: c as u128, size: NonZeroU8::new(std::mem::size_of::<char>() as u8).unwrap() }
Self { data: c as u128, size: NonZero::new(std::mem::size_of::<char>() as u8).unwrap() }
}
}
@ -454,7 +454,7 @@ impl From<Single> for ScalarInt {
#[inline]
fn from(f: Single) -> Self {
// We trust apfloat to give us properly truncated data.
Self { data: f.to_bits(), size: NonZeroU8::new((Single::BITS / 8) as u8).unwrap() }
Self { data: f.to_bits(), size: NonZero::new((Single::BITS / 8) as u8).unwrap() }
}
}
@ -470,7 +470,7 @@ impl From<Double> for ScalarInt {
#[inline]
fn from(f: Double) -> Self {
// We trust apfloat to give us properly truncated data.
Self { data: f.to_bits(), size: NonZeroU8::new((Double::BITS / 8) as u8).unwrap() }
Self { data: f.to_bits(), size: NonZero::new((Double::BITS / 8) as u8).unwrap() }
}
}

View file

@ -18,7 +18,7 @@
use std::cmp::Ordering;
use std::marker::PhantomData;
use std::mem;
use std::num::NonZeroUsize;
use std::num::NonZero;
use std::ops::{ControlFlow, Deref};
use std::ptr::NonNull;
@ -143,9 +143,8 @@ fn from(value: ty::Term<'tcx>) -> Self {
impl<'tcx> GenericArg<'tcx> {
#[inline]
pub fn unpack(self) -> GenericArgKind<'tcx> {
let ptr = unsafe {
self.ptr.map_addr(|addr| NonZeroUsize::new_unchecked(addr.get() & !TAG_MASK))
};
let ptr =
unsafe { self.ptr.map_addr(|addr| NonZero::new_unchecked(addr.get() & !TAG_MASK)) };
// SAFETY: use of `Interned::new_unchecked` here is ok because these
// pointers were originally created from `Interned` types in `pack()`,
// and this is just going in the other direction.

View file

@ -20,7 +20,7 @@
use std::cmp;
use std::fmt;
use std::num::NonZeroUsize;
use std::num::NonZero;
use std::ops::Bound;
pub trait IntegerExt {
@ -761,7 +761,7 @@ fn ty_and_layout_for_variant(
};
tcx.mk_layout(LayoutS {
variants: Variants::Single { index: variant_index },
fields: match NonZeroUsize::new(fields) {
fields: match NonZero::new(fields) {
Some(fields) => FieldsShape::Union(fields),
None => FieldsShape::Arbitrary { offsets: IndexVec::new(), memory_index: IndexVec::new() },
},

View file

@ -61,7 +61,7 @@
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::mem;
use std::num::NonZeroUsize;
use std::num::NonZero;
use std::ops::ControlFlow;
use std::ptr::NonNull;
use std::{fmt, str};
@ -617,9 +617,8 @@ fn decode(d: &mut D) -> Self {
impl<'tcx> Term<'tcx> {
#[inline]
pub fn unpack(self) -> TermKind<'tcx> {
let ptr = unsafe {
self.ptr.map_addr(|addr| NonZeroUsize::new_unchecked(addr.get() & !TAG_MASK))
};
let ptr =
unsafe { self.ptr.map_addr(|addr| NonZero::new_unchecked(addr.get() & !TAG_MASK)) };
// SAFETY: use of `Interned::new_unchecked` here is ok because these
// pointers were originally created from `Interned` types in `pack()`,
// and this is just going in the other direction.

View file

@ -18,7 +18,7 @@
use rustc_index::bit_set::GrowableBitSet;
use rustc_macros::HashStable;
use rustc_session::Limit;
use rustc_span::sym;
use rustc_span::{sym, Symbol};
use rustc_target::abi::{Integer, IntegerType, Primitive, Size};
use rustc_target::spec::abi::Abi;
use smallvec::SmallVec;
@ -1552,9 +1552,15 @@ pub fn is_doc_notable_trait(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
.any(|items| items.iter().any(|item| item.has_name(sym::notable_trait)))
}
/// Determines whether an item is an intrinsic by Abi.
pub fn is_intrinsic(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
matches!(tcx.fn_sig(def_id).skip_binder().abi(), Abi::RustIntrinsic | Abi::PlatformIntrinsic)
/// Determines whether an item is an intrinsic by Abi. or by whether it has a `rustc_intrinsic` attribute
pub fn intrinsic(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<Symbol> {
if matches!(tcx.fn_sig(def_id).skip_binder().abi(), Abi::RustIntrinsic | Abi::PlatformIntrinsic)
|| tcx.has_attr(def_id, sym::rustc_intrinsic)
{
Some(tcx.item_name(def_id.into()))
} else {
None
}
}
pub fn provide(providers: &mut Providers) {
@ -1562,7 +1568,7 @@ pub fn provide(providers: &mut Providers) {
reveal_opaque_types_in_bounds,
is_doc_hidden,
is_doc_notable_trait,
is_intrinsic,
intrinsic,
..*providers
}
}

View file

@ -202,8 +202,7 @@ fn from_terminator<'tcx>(
&terminator.kind
{
if let ty::FnDef(def_id, fn_args) = *func.const_.ty().kind() {
let name = tcx.item_name(def_id);
if !tcx.is_intrinsic(def_id) || name != sym::rustc_peek {
if tcx.intrinsic(def_id)? != sym::rustc_peek {
return None;
}

View file

@ -70,7 +70,7 @@ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, _: Location) {
TerminatorKind::Call { func: Operand::Constant(ref f), unwind, .. } => {
let fn_ty = self.instantiate_ty(f.const_.ty());
self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind()
&& tcx.is_intrinsic(def_id)
&& tcx.intrinsic(def_id).is_some()
{
// Don't give intrinsics the extra penalty for calls
INSTR_COST

View file

@ -289,9 +289,9 @@ fn simplify_intrinsic_assert(&self, terminator: &mut Terminator<'tcx>) {
if args.is_empty() {
return;
}
let ty = args.type_at(0);
let known_is_valid = intrinsic_assert_panics(self.tcx, self.param_env, ty, intrinsic_name);
let known_is_valid =
intrinsic_assert_panics(self.tcx, self.param_env, args[0], intrinsic_name);
match known_is_valid {
// We don't know the layout or it's not validity assertion at all, don't touch it
None => {}
@ -310,10 +310,11 @@ fn simplify_intrinsic_assert(&self, terminator: &mut Terminator<'tcx>) {
fn intrinsic_assert_panics<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
ty: Ty<'tcx>,
arg: ty::GenericArg<'tcx>,
intrinsic_name: Symbol,
) -> Option<bool> {
let requirement = ValidityRequirement::from_intrinsic(intrinsic_name)?;
let ty = arg.expect_ty();
Some(!tcx.check_validity_requirement((requirement, param_env.and(ty))).ok()?)
}
@ -322,9 +323,8 @@ fn resolve_rust_intrinsic<'tcx>(
func_ty: Ty<'tcx>,
) -> Option<(Symbol, GenericArgsRef<'tcx>)> {
if let ty::FnDef(def_id, args) = *func_ty.kind() {
if tcx.is_intrinsic(def_id) {
return Some((tcx.item_name(def_id), args));
}
let name = tcx.intrinsic(def_id)?;
return Some((name, args));
}
None
}

View file

@ -161,8 +161,7 @@ fn remap_mir_for_const_eval_select<'tcx>(
fn_span,
..
} if let ty::FnDef(def_id, _) = *const_.ty().kind()
&& tcx.item_name(def_id) == sym::const_eval_select
&& tcx.is_intrinsic(def_id) =>
&& matches!(tcx.intrinsic(def_id), Some(sym::const_eval_select)) =>
{
let [tupled_args, called_in_const, called_at_rt]: [_; 3] =
std::mem::take(args).try_into().unwrap();

View file

@ -14,9 +14,8 @@ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
if let TerminatorKind::Call { func, args, destination, target, .. } =
&mut terminator.kind
&& let ty::FnDef(def_id, generic_args) = *func.ty(local_decls, tcx).kind()
&& tcx.is_intrinsic(def_id)
&& let Some(intrinsic_name) = tcx.intrinsic(def_id)
{
let intrinsic_name = tcx.item_name(def_id);
match intrinsic_name {
sym::unreachable => {
terminator.kind = TerminatorKind::Unreachable;

View file

@ -956,19 +956,24 @@ fn visit_instance_use<'tcx>(
if !should_codegen_locally(tcx, &instance) {
return;
}
// The intrinsics assert_inhabited, assert_zero_valid, and assert_mem_uninitialized_valid will
// be lowered in codegen to nothing or a call to panic_nounwind. So if we encounter any
// of those intrinsics, we need to include a mono item for panic_nounwind, else we may try to
// codegen a call to that function without generating code for the function itself.
if let ty::InstanceDef::Intrinsic(def_id) = instance.def {
let name = tcx.item_name(def_id);
if let Some(_requirement) = ValidityRequirement::from_intrinsic(name) {
// The intrinsics assert_inhabited, assert_zero_valid, and assert_mem_uninitialized_valid will
// be lowered in codegen to nothing or a call to panic_nounwind. So if we encounter any
// of those intrinsics, we need to include a mono item for panic_nounwind, else we may try to
// codegen a call to that function without generating code for the function itself.
let def_id = tcx.lang_items().get(LangItem::PanicNounwind).unwrap();
let panic_instance = Instance::mono(tcx, def_id);
if should_codegen_locally(tcx, &panic_instance) {
output.push(create_fn_mono_item(tcx, panic_instance, source));
}
} else if tcx.has_attr(def_id, sym::rustc_intrinsic) {
// Codegen the fallback body of intrinsics with fallback bodies
let instance = ty::Instance::new(def_id, instance.args);
if should_codegen_locally(tcx, &instance) {
output.push(create_fn_mono_item(tcx, instance, source));
}
}
}

View file

@ -8,6 +8,7 @@
#![doc(rust_logo)]
#![feature(rustdoc_internals)]
#![allow(internal_features)]
#![feature(generic_nonzero)]
#![feature(let_chains)]
#![feature(map_try_insert)]
#![feature(try_blocks)]

View file

@ -27,7 +27,7 @@
use rustc_target::spec::abi::Abi;
use std::mem::replace;
use std::num::NonZeroU32;
use std::num::NonZero;
#[derive(PartialEq)]
enum AnnotationKind {
@ -645,7 +645,7 @@ fn stability_index(tcx: TyCtxt<'_>, (): ()) -> Index {
let stability = Stability {
level: attr::StabilityLevel::Unstable {
reason: UnstableReason::Default,
issue: NonZeroU32::new(27812),
issue: NonZero::new(27812),
is_soft: false,
implied_by: None,
},

View file

@ -3,6 +3,7 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![doc(rust_logo)]
#![feature(rustdoc_internals)]
#![feature(generic_nonzero)]
#![feature(min_specialization)]
#![feature(rustc_attrs)]
#![allow(rustc::potential_query_instability, unused_parens)]

View file

@ -30,7 +30,7 @@
use rustc_serialize::Encodable;
use rustc_session::Limit;
use rustc_span::def_id::LOCAL_CRATE;
use std::num::NonZeroU64;
use std::num::NonZero;
use thin_vec::ThinVec;
#[derive(Copy, Clone)]
@ -68,10 +68,8 @@ impl QueryContext for QueryCtxt<'_> {
#[inline]
fn next_job_id(self) -> QueryJobId {
QueryJobId(
NonZeroU64::new(
self.query_system.jobs.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
)
.unwrap(),
NonZero::new(self.query_system.jobs.fetch_add(1, std::sync::atomic::Ordering::Relaxed))
.unwrap(),
)
}

View file

@ -1,5 +1,6 @@
#![feature(assert_matches)]
#![feature(core_intrinsics)]
#![feature(generic_nonzero)]
#![feature(hash_raw_entry)]
#![feature(min_specialization)]
#![feature(let_chains)]

View file

@ -11,7 +11,7 @@
use std::hash::Hash;
use std::io::Write;
use std::num::NonZeroU64;
use std::num::NonZero;
#[cfg(parallel_compiler)]
use {
@ -36,7 +36,7 @@ pub struct QueryInfo {
/// A value uniquely identifying an active query job.
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
pub struct QueryJobId(pub NonZeroU64);
pub struct QueryJobId(pub NonZero<u64>);
impl QueryJobId {
fn query(self, map: &QueryMap) -> QueryStackFrame {

View file

@ -18,6 +18,7 @@
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
use rustc_errors::{
codes::*, struct_span_code_err, Applicability, DiagnosticArgValue, ErrCode, IntoDiagnosticArg,
StashKey,
};
use rustc_hir::def::Namespace::{self, *};
use rustc_hir::def::{self, CtorKind, DefKind, LifetimeRes, NonMacroAttrKind, PartialRes, PerNS};
@ -3890,6 +3891,23 @@ fn append_result<T, E>(res1: &mut Result<Vec<T>, E>, res2: Result<Vec<T>, E>) {
finalize,
) {
Ok(Some(partial_res)) if let Some(res) = partial_res.full_res() => {
// if we also have an associated type that matches the ident, stash a suggestion
if let Some(items) = self.diagnostic_metadata.current_trait_assoc_items
&& let [Segment { ident, .. }] = path
&& items.iter().any(|item| {
item.ident == *ident && matches!(item.kind, AssocItemKind::Type(_))
})
{
let mut diag = self.r.tcx.dcx().struct_allow("");
diag.span_suggestion_verbose(
path_span.shrink_to_lo(),
"there is an associated type with the same name",
"Self::",
Applicability::MaybeIncorrect,
);
diag.stash(path_span, StashKey::AssociatedTypeSuggestion);
}
if source.is_expected(res) || res == Res::Err {
partial_res
} else {

View file

@ -11,6 +11,7 @@
#![feature(associated_type_bounds)]
#![feature(const_option)]
#![feature(core_intrinsics)]
#![feature(generic_nonzero)]
#![feature(inline_const)]
#![feature(min_specialization)]
#![feature(never_type)]

View file

@ -6,6 +6,7 @@
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque};
use std::hash::{BuildHasher, Hash};
use std::marker::PhantomData;
use std::num::NonZero;
use std::path;
use std::rc::Rc;
use std::sync::Arc;
@ -216,15 +217,15 @@ fn decode(_d: &mut D) -> ! {
}
}
impl<S: Encoder> Encodable<S> for ::std::num::NonZeroU32 {
impl<S: Encoder> Encodable<S> for NonZero<u32> {
fn encode(&self, s: &mut S) {
s.emit_u32(self.get());
}
}
impl<D: Decoder> Decodable<D> for ::std::num::NonZeroU32 {
impl<D: Decoder> Decodable<D> for NonZero<u32> {
fn decode(d: &mut D) -> Self {
::std::num::NonZeroU32::new(d.read_u32()).unwrap()
NonZero::new(d.read_u32()).unwrap()
}
}

View file

@ -3230,7 +3230,7 @@ pub(crate) mod dep_tracking {
};
use std::collections::BTreeMap;
use std::hash::{DefaultHasher, Hash};
use std::num::NonZeroUsize;
use std::num::NonZero;
use std::path::PathBuf;
pub trait DepTrackingHash {
@ -3272,7 +3272,7 @@ fn hash(
impl_dep_tracking_hash_via_hash!(
bool,
usize,
NonZeroUsize,
NonZero<usize>,
u64,
Hash64,
String,

View file

@ -1,4 +1,4 @@
use std::num::NonZeroU32;
use std::num::NonZero;
use rustc_ast::token;
use rustc_ast::util::literal::LitError;
@ -27,7 +27,7 @@ fn into_diagnostic(self, dcx: &'a DiagCtxt, level: Level) -> DiagnosticBuilder<'
#[derive(Subdiagnostic)]
#[note(session_feature_diagnostic_for_issue)]
pub struct FeatureDiagnosticForIssue {
pub n: NonZeroU32,
pub n: NonZero<u32>,
}
#[derive(Subdiagnostic)]

View file

@ -1,3 +1,4 @@
#![feature(generic_nonzero)]
#![feature(let_chains)]
#![feature(lazy_cell)]
#![feature(option_get_or_insert_default)]

View file

@ -21,7 +21,7 @@
use std::collections::BTreeMap;
use std::hash::{DefaultHasher, Hasher};
use std::num::{IntErrorKind, NonZeroUsize};
use std::num::{IntErrorKind, NonZero};
use std::path::PathBuf;
use std::str;
@ -617,7 +617,7 @@ pub(crate) fn parse_opt_comma_list(slot: &mut Option<Vec<String>>, v: Option<&st
pub(crate) fn parse_threads(slot: &mut usize, v: Option<&str>) -> bool {
match v.and_then(|s| s.parse().ok()) {
Some(0) => {
*slot = std::thread::available_parallelism().map_or(1, std::num::NonZeroUsize::get);
*slot = std::thread::available_parallelism().map_or(1, NonZero::<usize>::get);
true
}
Some(i) => {
@ -991,7 +991,10 @@ pub(crate) fn parse_instrument_xray(
true
}
pub(crate) fn parse_treat_err_as_bug(slot: &mut Option<NonZeroUsize>, v: Option<&str>) -> bool {
pub(crate) fn parse_treat_err_as_bug(
slot: &mut Option<NonZero<usize>>,
v: Option<&str>,
) -> bool {
match v {
Some(s) => match s.parse() {
Ok(val) => {
@ -1004,7 +1007,7 @@ pub(crate) fn parse_treat_err_as_bug(slot: &mut Option<NonZeroUsize>, v: Option<
}
},
None => {
*slot = NonZeroUsize::new(1);
*slot = NonZero::new(1);
true
}
}
@ -1950,7 +1953,7 @@ pub(crate) fn parse_function_return(slot: &mut FunctionReturn, v: Option<&str>)
"translate remapped paths into local paths when possible (default: yes)"),
trap_unreachable: Option<bool> = (None, parse_opt_bool, [TRACKED],
"generate trap instructions for unreachable intrinsics (default: use target setting, usually yes)"),
treat_err_as_bug: Option<NonZeroUsize> = (None, parse_treat_err_as_bug, [TRACKED],
treat_err_as_bug: Option<NonZero<usize>> = (None, parse_treat_err_as_bug, [TRACKED],
"treat the `val`th error that occurs as bug (default if not specified: 0 - don't treat errors as bugs. \
default if specified without a value: 1 - treat the first error as bug)"),
trim_diagnostic_paths: bool = (true, parse_bool, [UNTRACKED],

View file

@ -1422,6 +1422,7 @@
rustc_if_this_changed,
rustc_inherit_overflow_checks,
rustc_insignificant_dtor,
rustc_intrinsic,
rustc_layout,
rustc_layout_scalar_valid_range_end,
rustc_layout_scalar_valid_range_start,

View file

@ -2993,6 +2993,15 @@ fn note_obligation_cause(&self, err: &mut Diagnostic, obligation: &PredicateObli
&mut Default::default(),
);
self.suggest_unsized_bound_if_applicable(err, obligation);
if let Some(span) = err.span.primary_span()
&& let Some(mut diag) =
self.tcx.dcx().steal_diagnostic(span, StashKey::AssociatedTypeSuggestion)
&& let Ok(ref mut s1) = err.suggestions
&& let Ok(ref mut s2) = diag.suggestions
{
s1.append(s2);
diag.cancel()
}
}
}

View file

@ -28,7 +28,8 @@ fn resolve_instance<'tcx>(
tcx.normalize_erasing_regions(param_env, args),
)
} else {
let def = if matches!(tcx.def_kind(def_id), DefKind::Fn) && tcx.is_intrinsic(def_id) {
let def = if matches!(tcx.def_kind(def_id), DefKind::Fn) && tcx.intrinsic(def_id).is_some()
{
debug!(" => intrinsic");
ty::InstanceDef::Intrinsic(def_id)
} else if Some(def_id) == tcx.lang_items().drop_in_place_fn() {

View file

@ -829,6 +829,11 @@
# target triples containing `-none`, `nvptx`, `switch`, or `-uefi`.
#no-std = <platform-specific> (bool)
# This is an array of the codegen backends that will be compiled a rustc
# compiled for this target, overriding the global rust.codegen-backends option.
# See that option for more info.
#codegen-backends = rust.codegen-backends (array)
# =============================================================================
# Distribution options
#

View file

@ -147,7 +147,7 @@
use core::fmt;
use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen};
use core::mem::{self, swap, ManuallyDrop};
use core::num::NonZeroUsize;
use core::num::NonZero;
use core::ops::{Deref, DerefMut};
use core::ptr;
@ -296,7 +296,7 @@ pub struct PeekMut<
heap: &'a mut BinaryHeap<T, A>,
// If a set_len + sift_down are required, this is Some. If a &mut T has not
// yet been exposed to peek_mut()'s caller, it's None.
original_len: Option<NonZeroUsize>,
original_len: Option<NonZero<usize>>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
@ -350,7 +350,7 @@ fn deref_mut(&mut self) -> &mut T {
// the standard library as "leak amplification".
unsafe {
// SAFETY: len > 1 so len != 0.
self.original_len = Some(NonZeroUsize::new_unchecked(len));
self.original_len = Some(NonZero::new_unchecked(len));
// SAFETY: len > 1 so all this does for now is leak elements,
// which is safe.
self.heap.data.set_len(1);
@ -1576,8 +1576,8 @@ unsafe fn as_inner(&mut self) -> &mut Self::Source {
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
unsafe impl<I, A: Allocator> InPlaceIterable for IntoIter<I, A> {
const EXPAND_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
const MERGE_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
const EXPAND_BY: Option<NonZero<usize>> = NonZero::new(1);
const MERGE_BY: Option<NonZero<usize>> = NonZero::new(1);
}
unsafe impl<I> AsVecIntoIter for IntoIter<I> {

View file

@ -1,5 +1,5 @@
use core::iter::{FusedIterator, TrustedLen};
use core::num::NonZeroUsize;
use core::num::NonZero;
use core::{array, fmt, mem::MaybeUninit, ops::Try, ptr};
use crate::alloc::{Allocator, Global};
@ -54,7 +54,7 @@ fn size_hint(&self) -> (usize, Option<usize>) {
}
#[inline]
fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let len = self.inner.len;
let rem = if len < n {
self.inner.clear();
@ -63,7 +63,7 @@ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
self.inner.drain(..n);
0
};
NonZeroUsize::new(rem).map_or(Ok(()), Err)
NonZero::new(rem).map_or(Ok(()), Err)
}
#[inline]
@ -183,7 +183,7 @@ fn next_back(&mut self) -> Option<T> {
}
#[inline]
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let len = self.inner.len;
let rem = if len < n {
self.inner.clear();
@ -192,7 +192,7 @@ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
self.inner.truncate(len - n);
0
};
NonZeroUsize::new(rem).map_or(Ok(()), Err)
NonZero::new(rem).map_or(Ok(()), Err)
}
fn try_rfold<B, F, R>(&mut self, mut init: B, mut f: F) -> R

View file

@ -1,5 +1,5 @@
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
use core::num::NonZeroUsize;
use core::num::NonZero;
use core::ops::Try;
use core::{fmt, mem, slice};
@ -56,7 +56,7 @@ fn next(&mut self) -> Option<&'a T> {
}
}
fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let remaining = self.i1.advance_by(n);
match remaining {
Ok(()) => return Ok(()),
@ -128,7 +128,7 @@ fn next_back(&mut self) -> Option<&'a T> {
}
}
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
match self.i2.advance_back_by(n) {
Ok(()) => return Ok(()),
Err(n) => {

View file

@ -1,5 +1,5 @@
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
use core::num::NonZeroUsize;
use core::num::NonZero;
use core::ops::Try;
use core::{fmt, mem, slice};
@ -48,7 +48,7 @@ fn next(&mut self) -> Option<&'a mut T> {
}
}
fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
match self.i1.advance_by(n) {
Ok(()) => return Ok(()),
Err(remaining) => {
@ -119,7 +119,7 @@ fn next_back(&mut self) -> Option<&'a mut T> {
}
}
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
match self.i2.advance_back_by(n) {
Ok(()) => return Ok(()),
Err(remaining) => {

View file

@ -128,6 +128,7 @@
#![feature(extend_one)]
#![feature(fmt_internals)]
#![feature(fn_traits)]
#![feature(generic_nonzero)]
#![feature(hasher_prefixfree_extras)]
#![feature(hint_assert_unchecked)]
#![feature(inline_const)]

View file

@ -160,14 +160,14 @@
use core::iter::{InPlaceIterable, SourceIter, TrustedRandomAccessNoCoerce};
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, SizedTypeProperties};
use core::num::NonZeroUsize;
use core::num::NonZero;
use core::ptr::{self, NonNull};
use super::{InPlaceDrop, InPlaceDstDataSrcBufDrop, SpecFromIter, SpecFromIterNested, Vec};
const fn in_place_collectible<DEST, SRC>(
step_merge: Option<NonZeroUsize>,
step_expand: Option<NonZeroUsize>,
step_merge: Option<NonZero<usize>>,
step_expand: Option<NonZero<usize>>,
) -> bool {
// Require matching alignments because an alignment-changing realloc is inefficient on many
// system allocators and better implementations would require the unstable Allocator trait.

View file

@ -12,7 +12,7 @@
};
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::num::NonZeroUsize;
use core::num::NonZero;
#[cfg(not(no_global_oom_handling))]
use core::ops::Deref;
use core::ptr::{self, NonNull};
@ -234,7 +234,7 @@ fn size_hint(&self) -> (usize, Option<usize>) {
}
#[inline]
fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let step_size = self.len().min(n);
let to_drop = ptr::slice_from_raw_parts_mut(self.ptr.as_ptr(), step_size);
if T::IS_ZST {
@ -248,7 +248,7 @@ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
unsafe {
ptr::drop_in_place(to_drop);
}
NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
NonZero::new(n - step_size).map_or(Ok(()), Err)
}
#[inline]
@ -336,7 +336,7 @@ fn next_back(&mut self) -> Option<T> {
}
#[inline]
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let step_size = self.len().min(n);
if T::IS_ZST {
// SAFETY: same as for advance_by()
@ -350,7 +350,7 @@ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
unsafe {
ptr::drop_in_place(to_drop);
}
NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
NonZero::new(n - step_size).map_or(Ok(()), Err)
}
}
@ -457,8 +457,8 @@ fn drop(&mut self) {
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {
const EXPAND_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
const MERGE_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
const EXPAND_BY: Option<NonZero<usize>> = NonZero::new(1);
const MERGE_BY: Option<NonZero<usize>> = NonZero::new(1);
}
#[unstable(issue = "none", feature = "inplace_iteration")]

View file

@ -13,6 +13,7 @@
#![feature(core_intrinsics)]
#![feature(extract_if)]
#![feature(exact_size_is_empty)]
#![feature(generic_nonzero)]
#![feature(linked_list_cursors)]
#![feature(map_try_insert)]
#![feature(new_uninit)]

View file

@ -1,5 +1,5 @@
use core::alloc::{Allocator, Layout};
use core::num::NonZeroUsize;
use core::num::NonZero;
use core::ptr::NonNull;
use core::{assert_eq, assert_ne};
use std::alloc::System;
@ -1089,9 +1089,9 @@ fn test_into_iter_advance_by() {
assert_eq!(i.advance_back_by(1), Ok(()));
assert_eq!(i.as_slice(), [2, 3, 4]);
assert_eq!(i.advance_back_by(usize::MAX), Err(NonZeroUsize::new(usize::MAX - 3).unwrap()));
assert_eq!(i.advance_back_by(usize::MAX), Err(NonZero::new(usize::MAX - 3).unwrap()));
assert_eq!(i.advance_by(usize::MAX), Err(NonZeroUsize::new(usize::MAX).unwrap()));
assert_eq!(i.advance_by(usize::MAX), Err(NonZero::new(usize::MAX).unwrap()));
assert_eq!(i.advance_by(0), Ok(()));
assert_eq!(i.advance_back_by(0), Ok(()));
@ -1192,7 +1192,7 @@ fn assert_in_place_trait<T: InPlaceIterable>(_: &T) {}
.map(|(a, b)| a + b)
.map_while(Option::Some)
.skip(1)
.map(|e| if e != usize::MAX { Ok(std::num::NonZeroUsize::new(e)) } else { Err(()) });
.map(|e| if e != usize::MAX { Ok(NonZero::new(e)) } else { Err(()) });
assert_in_place_trait(&iter);
let sink = iter.collect::<Result<Vec<_>, _>>().unwrap();
let sinkptr = sink.as_ptr();

View file

@ -1,4 +1,4 @@
use core::num::NonZeroUsize;
use core::num::NonZero;
use std::assert_matches::assert_matches;
use std::collections::TryReserveErrorKind::*;
use std::collections::{vec_deque::Drain, VecDeque};
@ -445,9 +445,9 @@ fn test_into_iter() {
assert_eq!(it.next_back(), Some(3));
let mut it = VecDeque::from(vec![1, 2, 3, 4, 5]).into_iter();
assert_eq!(it.advance_by(10), Err(NonZeroUsize::new(5).unwrap()));
assert_eq!(it.advance_by(10), Err(NonZero::new(5).unwrap()));
let mut it = VecDeque::from(vec![1, 2, 3, 4, 5]).into_iter();
assert_eq!(it.advance_back_by(10), Err(NonZeroUsize::new(5).unwrap()));
assert_eq!(it.advance_back_by(10), Err(NonZero::new(5).unwrap()));
}
}

View file

@ -1,6 +1,6 @@
//! Defines the `IntoIter` owned iterator for arrays.
use crate::num::NonZeroUsize;
use crate::num::NonZero;
use crate::{
fmt,
intrinsics::transmute_unchecked,
@ -280,7 +280,7 @@ fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
// This also moves the start, which marks them as conceptually "dropped",
// so if anything goes bad then our drop impl won't double-free them.
let range_to_drop = self.alive.take_prefix(n);
@ -292,7 +292,7 @@ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(slice));
}
NonZeroUsize::new(remaining).map_or(Ok(()), Err)
NonZero::new(remaining).map_or(Ok(()), Err)
}
#[inline]
@ -335,7 +335,7 @@ fn rfold<Acc, Fold>(mut self, init: Acc, mut rfold: Fold) -> Acc
})
}
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
// This also moves the end, which marks them as conceptually "dropped",
// so if anything goes bad then our drop impl won't double-free them.
let range_to_drop = self.alive.take_suffix(n);
@ -347,7 +347,7 @@ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(slice));
}
NonZeroUsize::new(remaining).map_or(Ok(()), Err)
NonZero::new(remaining).map_or(Ok(()), Err)
}
}

View file

@ -12,7 +12,7 @@
use crate::escape;
use crate::fmt;
use crate::iter::FusedIterator;
use crate::num::NonZeroUsize;
use crate::num::NonZero;
mod ascii_char;
#[unstable(feature = "ascii_char", issue = "110998")]
@ -133,7 +133,7 @@ fn last(mut self) -> Option<u8> {
}
#[inline]
fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.0.advance_by(n)
}
}
@ -146,7 +146,7 @@ fn next_back(&mut self) -> Option<u8> {
}
#[inline]
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.0.advance_back_by(n)
}
}

View file

@ -43,7 +43,7 @@
use crate::escape;
use crate::fmt::{self, Write};
use crate::iter::FusedIterator;
use crate::num::NonZeroUsize;
use crate::num::NonZero;
pub(crate) use self::methods::EscapeDebugExtArgs;
@ -185,7 +185,7 @@ fn last(mut self) -> Option<char> {
}
#[inline]
fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.0.advance_by(n)
}
}
@ -260,7 +260,7 @@ fn last(mut self) -> Option<char> {
}
#[inline]
fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.0.advance_by(n)
}
}

View file

@ -33,7 +33,7 @@ unsafe impl BytewiseEq for $t {}
// so we can compare them directly.
is_bytewise_comparable!(bool, char, super::Ordering);
// SAFETY: Similarly, the non-zero types have a niche, but no undef and no pointers,
// SAFETY: Similarly, the `NonZero` type has a niche, but no undef and no pointers,
// and they compare like their underlying numeric type.
is_bytewise_comparable!(
NonZeroU8,
@ -50,7 +50,7 @@ unsafe impl BytewiseEq for $t {}
NonZeroIsize,
);
// SAFETY: The NonZero types have the "null" optimization guaranteed, and thus
// SAFETY: The `NonZero` type has the "null" optimization guaranteed, and thus
// are also safe to equality-compare bitwise inside an `Option`.
// The way `PartialOrd` is defined for `Option` means that this wouldn't work
// for `<` or `>` on the signed types, but since we only do `==` it's fine.

View file

@ -1,7 +1,7 @@
//! Helper code for character escaping.
use crate::ascii;
use crate::num::NonZeroUsize;
use crate::num::NonZero;
use crate::ops::Range;
const HEX_DIGITS: [ascii::Char; 16] = *b"0123456789abcdef".as_ascii().unwrap();
@ -106,11 +106,11 @@ pub fn next_back(&mut self) -> Option<u8> {
self.alive.next_back().map(|i| self.data[usize::from(i)].to_u8())
}
pub fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
pub fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.alive.advance_by(n)
}
pub fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
pub fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.alive.advance_back_by(n)
}
}

View file

@ -2368,32 +2368,6 @@ pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
#[rustc_nounwind]
pub fn ptr_guaranteed_cmp<T>(ptr: *const T, other: *const T) -> u8;
/// Allocates a block of memory at compile time.
/// At runtime, just returns a null pointer.
///
/// # Safety
///
/// - The `align` argument must be a power of two.
/// - At compile time, a compile error occurs if this constraint is violated.
/// - At runtime, it is not checked.
#[rustc_const_unstable(feature = "const_heap", issue = "79597")]
#[rustc_nounwind]
pub fn const_allocate(size: usize, align: usize) -> *mut u8;
/// Deallocates a memory which allocated by `intrinsics::const_allocate` at compile time.
/// At runtime, does nothing.
///
/// # Safety
///
/// - The `align` argument must be a power of two.
/// - At compile time, a compile error occurs if this constraint is violated.
/// - At runtime, it is not checked.
/// - If the `ptr` is created in an another const, this intrinsic doesn't deallocate it.
/// - If the `ptr` is pointing to a local variable, this intrinsic doesn't deallocate it.
#[rustc_const_unstable(feature = "const_heap", issue = "79597")]
#[rustc_nounwind]
pub fn const_deallocate(ptr: *mut u8, size: usize, align: usize);
/// Determines whether the raw bytes of the two values are equal.
///
/// This is particularly handy for arrays, since it allows things like just
@ -2517,83 +2491,112 @@ pub fn const_eval_select<ARG: Tuple, F, G, RET>(
where
G: FnOnce<ARG, Output = RET>,
F: FnOnce<ARG, Output = RET>;
/// Returns whether the argument's value is statically known at
/// compile-time.
///
/// This is useful when there is a way of writing the code that will
/// be *faster* when some variables have known values, but *slower*
/// in the general case: an `if is_val_statically_known(var)` can be used
/// to select between these two variants. The `if` will be optimized away
/// and only the desired branch remains.
///
/// Formally speaking, this function non-deterministically returns `true`
/// or `false`, and the caller has to ensure sound behavior for both cases.
/// In other words, the following code has *Undefined Behavior*:
///
/// ```no_run
/// #![feature(is_val_statically_known)]
/// #![feature(core_intrinsics)]
/// # #![allow(internal_features)]
/// use std::hint::unreachable_unchecked;
/// use std::intrinsics::is_val_statically_known;
///
/// unsafe {
/// if !is_val_statically_known(0) { unreachable_unchecked(); }
/// }
/// ```
///
/// This also means that the following code's behavior is unspecified; it
/// may panic, or it may not:
///
/// ```no_run
/// #![feature(is_val_statically_known)]
/// #![feature(core_intrinsics)]
/// # #![allow(internal_features)]
/// use std::intrinsics::is_val_statically_known;
///
/// unsafe {
/// assert_eq!(is_val_statically_known(0), is_val_statically_known(0));
/// }
/// ```
///
/// Unsafe code may not rely on `is_val_statically_known` returning any
/// particular value, ever. However, the compiler will generally make it
/// return `true` only if the value of the argument is actually known.
///
/// When calling this in a `const fn`, both paths must be semantically
/// equivalent, that is, the result of the `true` branch and the `false`
/// branch must return the same value and have the same side-effects *no
/// matter what*.
#[rustc_const_unstable(feature = "is_val_statically_known", issue = "none")]
#[rustc_nounwind]
pub fn is_val_statically_known<T: Copy>(arg: T) -> bool;
/// Returns the value of `cfg!(debug_assertions)`, but after monomorphization instead of in
/// macro expansion.
///
/// This always returns `false` in const eval and Miri. The interpreter provides better
/// diagnostics than the checks that this is used to implement. However, this means
/// you should only be using this intrinsic to guard requirements that, if violated,
/// immediately lead to UB. Otherwise, const-eval and Miri will miss out on those
/// checks entirely.
///
/// Since this is evaluated after monomorphization, branching on this value can be used to
/// implement debug assertions that are included in the precompiled standard library, but can
/// be optimized out by builds that monomorphize the standard library code with debug
/// assertions disabled. This intrinsic is primarily used by [`assert_unsafe_precondition`].
#[rustc_const_unstable(feature = "delayed_debug_assertions", issue = "none")]
#[rustc_safe_intrinsic]
#[cfg(not(bootstrap))]
pub(crate) fn debug_assertions() -> bool;
}
#[cfg(bootstrap)]
/// Returns whether the argument's value is statically known at
/// compile-time.
///
/// This is useful when there is a way of writing the code that will
/// be *faster* when some variables have known values, but *slower*
/// in the general case: an `if is_val_statically_known(var)` can be used
/// to select between these two variants. The `if` will be optimized away
/// and only the desired branch remains.
///
/// Formally speaking, this function non-deterministically returns `true`
/// or `false`, and the caller has to ensure sound behavior for both cases.
/// In other words, the following code has *Undefined Behavior*:
///
/// ```no_run
/// #![feature(is_val_statically_known)]
/// #![feature(core_intrinsics)]
/// # #![allow(internal_features)]
/// use std::hint::unreachable_unchecked;
/// use std::intrinsics::is_val_statically_known;
///
/// if !is_val_statically_known(0) { unsafe { unreachable_unchecked(); } }
/// ```
///
/// This also means that the following code's behavior is unspecified; it
/// may panic, or it may not:
///
/// ```no_run
/// #![feature(is_val_statically_known)]
/// #![feature(core_intrinsics)]
/// # #![allow(internal_features)]
/// use std::intrinsics::is_val_statically_known;
///
/// assert_eq!(is_val_statically_known(0), is_val_statically_known(0));
/// ```
///
/// Unsafe code may not rely on `is_val_statically_known` returning any
/// particular value, ever. However, the compiler will generally make it
/// return `true` only if the value of the argument is actually known.
///
/// When calling this in a `const fn`, both paths must be semantically
/// equivalent, that is, the result of the `true` branch and the `false`
/// branch must return the same value and have the same side-effects *no
/// matter what*.
#[rustc_const_unstable(feature = "is_val_statically_known", issue = "none")]
#[rustc_nounwind]
#[unstable(feature = "core_intrinsics", issue = "none")]
#[cfg_attr(not(bootstrap), rustc_intrinsic)]
pub const fn is_val_statically_known<T: Copy>(_arg: T) -> bool {
false
}
/// Returns the value of `cfg!(debug_assertions)`, but after monomorphization instead of in
/// macro expansion.
///
/// This always returns `false` in const eval and Miri. The interpreter provides better
/// diagnostics than the checks that this is used to implement. However, this means
/// you should only be using this intrinsic to guard requirements that, if violated,
/// immediately lead to UB. Otherwise, const-eval and Miri will miss out on those
/// checks entirely.
///
/// Since this is evaluated after monomorphization, branching on this value can be used to
/// implement debug assertions that are included in the precompiled standard library, but can
/// be optimized out by builds that monomorphize the standard library code with debug
/// assertions disabled. This intrinsic is primarily used by [`assert_unsafe_precondition`].
#[rustc_const_unstable(feature = "delayed_debug_assertions", issue = "none")]
#[unstable(feature = "core_intrinsics", issue = "none")]
#[cfg_attr(not(bootstrap), rustc_intrinsic)]
pub(crate) const fn debug_assertions() -> bool {
cfg!(debug_assertions)
}
/// Allocates a block of memory at compile time.
/// At runtime, just returns a null pointer.
///
/// # Safety
///
/// - The `align` argument must be a power of two.
/// - At compile time, a compile error occurs if this constraint is violated.
/// - At runtime, it is not checked.
#[rustc_const_unstable(feature = "const_heap", issue = "79597")]
#[unstable(feature = "core_intrinsics", issue = "none")]
#[rustc_nounwind]
#[cfg_attr(not(bootstrap), rustc_intrinsic)]
pub const unsafe fn const_allocate(_size: usize, _align: usize) -> *mut u8 {
// const eval overrides this function, but runtime code should always just return null pointers.
crate::ptr::null_mut()
}
/// Deallocates a memory which allocated by `intrinsics::const_allocate` at compile time.
/// At runtime, does nothing.
///
/// # Safety
///
/// - The `align` argument must be a power of two.
/// - At compile time, a compile error occurs if this constraint is violated.
/// - At runtime, it is not checked.
/// - If the `ptr` is created in an another const, this intrinsic doesn't deallocate it.
/// - If the `ptr` is pointing to a local variable, this intrinsic doesn't deallocate it.
#[rustc_const_unstable(feature = "const_heap", issue = "79597")]
#[unstable(feature = "core_intrinsics", issue = "none")]
#[rustc_nounwind]
#[cfg_attr(not(bootstrap), rustc_intrinsic)]
pub const unsafe fn const_deallocate(_ptr: *mut u8, _size: usize, _align: usize) {}
// Some functions are defined here because they accidentally got made
// available in this module on stable. See <https://github.com/rust-lang/rust/issues/15702>.
// (`transmute` also falls into this category, but it cannot be wrapped due to the

View file

@ -233,6 +233,26 @@ pub unsafe fn as_mut(&mut self) -> &mut [MaybeUninit<u8>] {
&mut self.buf.buf[self.buf.filled..]
}
/// Advance the cursor by asserting that `n` bytes have been filled.
///
/// After advancing, the `n` bytes are no longer accessible via the cursor and can only be
/// accessed via the underlying buffer. I.e., the buffer's filled portion grows by `n` elements
/// and its unfilled portion (and the capacity of this cursor) shrinks by `n` elements.
///
/// If less than `n` bytes initialized (by the cursor's point of view), `set_init` should be
/// called first.
///
/// # Panics
///
/// Panics if there are less than `n` bytes initialized.
#[inline]
pub fn advance(&mut self, n: usize) -> &mut Self {
assert!(self.buf.init >= self.buf.filled + n);
self.buf.filled += n;
self
}
/// Advance the cursor by asserting that `n` bytes have been filled.
///
/// After advancing, the `n` bytes are no longer accessible via the cursor and can only be
@ -244,7 +264,7 @@ pub unsafe fn as_mut(&mut self) -> &mut [MaybeUninit<u8>] {
/// The caller must ensure that the first `n` bytes of the cursor have been properly
/// initialised.
#[inline]
pub unsafe fn advance(&mut self, n: usize) -> &mut Self {
pub unsafe fn advance_unchecked(&mut self, n: usize) -> &mut Self {
self.buf.filled += n;
self.buf.init = cmp::max(self.buf.init, self.buf.filled);
self
@ -289,7 +309,7 @@ pub fn append(&mut self, buf: &[u8]) {
// SAFETY: we do not de-initialize any of the elements of the slice
unsafe {
MaybeUninit::write_slice(&mut self.as_mut()[..buf.len()], buf);
MaybeUninit::copy_from_slice(&mut self.as_mut()[..buf.len()], buf);
}
// SAFETY: We just added the entire contents of buf to the filled section.

Some files were not shown because too many files have changed in this diff Show more