Auto merge of #76964 - RalfJung:rollup-ybn06fs, r=RalfJung

Rollup of 15 pull requests

Successful merges:

 - #76722 (Test and fix Send and Sync traits of BTreeMap artefacts)
 - #76766 (Extract some intrinsics out of rustc_codegen_llvm)
 - #76800 (Don't generate bootstrap usage unless it's needed)
 - #76809 (simplfy condition in ItemLowerer::with_trait_impl_ref())
 - #76815 (Fix wording in mir doc)
 - #76818 (Don't compile regex at every function call.)
 - #76821 (Remove redundant nightly features)
 - #76823 (black_box: silence unused_mut warning when building with cfg(miri))
 - #76825 (use `array_windows` instead of `windows` in the compiler)
 - #76827 (fix array_windows docs)
 - #76828 (use strip_prefix over starts_with and manual slicing based on pattern length (clippy::manual_strip))
 - #76840 (Move to intra doc links in core/src/future)
 - #76845 (Use intra docs links in core::{ascii, option, str, pattern, hash::map})
 - #76853 (Use intra-doc links in library/core/src/task/wake.rs)
 - #76871 (support panic=abort in Miri)

Failed merges:

r? `@ghost`
This commit is contained in:
bors 2020-09-20 11:02:36 +00:00
commit 41507ed0d5
51 changed files with 862 additions and 697 deletions

View file

@ -11,7 +11,6 @@
html_root_url = "https://doc.rust-lang.org/nightly/",
test(no_crate_inject, attr(deny(warnings)))
)]
#![feature(core_intrinsics)]
#![feature(dropck_eyepatch)]
#![feature(new_uninit)]
#![feature(maybe_uninit_slice)]
@ -24,7 +23,6 @@
use std::alloc::Layout;
use std::cell::{Cell, RefCell};
use std::cmp;
use std::intrinsics;
use std::marker::{PhantomData, Send};
use std::mem::{self, MaybeUninit};
use std::ptr;
@ -122,7 +120,7 @@ pub fn alloc(&self, object: T) -> &mut T {
unsafe {
if mem::size_of::<T>() == 0 {
self.ptr.set(intrinsics::arith_offset(self.ptr.get() as *mut u8, 1) as *mut T);
self.ptr.set((self.ptr.get() as *mut u8).wrapping_offset(1) as *mut T);
let ptr = mem::align_of::<T>() as *mut T;
// Don't drop the object. This `write` is equivalent to `forget`.
ptr::write(ptr, object);

View file

@ -5,17 +5,13 @@
//! This API is completely unstable and subject to change.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
#![feature(bool_to_option)]
#![feature(box_syntax)]
#![feature(const_fn)] // For the `transmute` in `P::new`
#![feature(const_panic)]
#![feature(const_fn_transmute)]
#![feature(crate_visibility_modifier)]
#![feature(label_break_value)]
#![feature(nll)]
#![feature(or_patterns)]
#![feature(try_trait)]
#![feature(unicode_internals)]
#![recursion_limit = "256"]
#[macro_use]

View file

@ -27,7 +27,7 @@ pub(super) struct ItemLowerer<'a, 'lowering, 'hir> {
impl ItemLowerer<'_, '_, '_> {
fn with_trait_impl_ref(&mut self, impl_ref: &Option<TraitRef>, f: impl FnOnce(&mut Self)) {
let old = self.lctx.is_in_trait_impl;
self.lctx.is_in_trait_impl = if let &None = impl_ref { false } else { true };
self.lctx.is_in_trait_impl = impl_ref.is_some();
f(self);
self.lctx.is_in_trait_impl = old;
}

View file

@ -7,15 +7,12 @@
use crate::va_arg::emit_va_arg;
use crate::value::Value;
use rustc_ast as ast;
use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh};
use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
use rustc_codegen_ssa::glue;
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::operand::OperandRef;
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::MemFlags;
use rustc_hir as hir;
use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
use rustc_middle::ty::{self, Ty};
@ -71,8 +68,6 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<&'ll Va
sym::nearbyintf64 => "llvm.nearbyint.f64",
sym::roundf32 => "llvm.round.f32",
sym::roundf64 => "llvm.round.f64",
sym::assume => "llvm.assume",
sym::abort => "llvm.trap",
_ => return None,
};
Some(cx.get_intrinsic(&llvm_name))
@ -112,9 +107,6 @@ fn codegen_intrinsic_call(
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
None,
),
sym::unreachable => {
return;
}
sym::likely => {
let expect = self.get_intrinsic(&("llvm.expect.i1"));
self.call(expect, &[args[0].immediate(), self.const_bool(true)], None)
@ -137,8 +129,6 @@ fn codegen_intrinsic_call(
let llfn = self.get_intrinsic(&("llvm.debugtrap"));
self.call(llfn, &[], None)
}
sym::va_start => self.va_start(args[0].immediate()),
sym::va_end => self.va_end(args[0].immediate()),
sym::va_copy => {
let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)
@ -169,123 +159,7 @@ fn codegen_intrinsic_call(
_ => bug!("the va_arg intrinsic does not work with non-scalar types"),
}
}
sym::size_of_val => {
let tp_ty = substs.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (llsize, _) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
llsize
} else {
self.const_usize(self.size_of(tp_ty).bytes())
}
}
sym::min_align_of_val => {
let tp_ty = substs.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (_, llalign) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
llalign
} else {
self.const_usize(self.align_of(tp_ty).bytes())
}
}
sym::size_of
| sym::pref_align_of
| sym::min_align_of
| sym::needs_drop
| sym::type_id
| sym::type_name
| sym::variant_count => {
let value = self
.tcx
.const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
.unwrap();
OperandRef::from_const(self, value, ret_ty).immediate_or_packed_pair(self)
}
// Effectively no-op
sym::forget => {
return;
}
sym::offset => {
let ptr = args[0].immediate();
let offset = args[1].immediate();
self.inbounds_gep(ptr, &[offset])
}
sym::arith_offset => {
let ptr = args[0].immediate();
let offset = args[1].immediate();
self.gep(ptr, &[offset])
}
sym::copy_nonoverlapping => {
copy_intrinsic(
self,
false,
false,
substs.type_at(0),
args[1].immediate(),
args[0].immediate(),
args[2].immediate(),
);
return;
}
sym::copy => {
copy_intrinsic(
self,
true,
false,
substs.type_at(0),
args[1].immediate(),
args[0].immediate(),
args[2].immediate(),
);
return;
}
sym::write_bytes => {
memset_intrinsic(
self,
false,
substs.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
);
return;
}
sym::volatile_copy_nonoverlapping_memory => {
copy_intrinsic(
self,
false,
true,
substs.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
);
return;
}
sym::volatile_copy_memory => {
copy_intrinsic(
self,
true,
true,
substs.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
);
return;
}
sym::volatile_set_memory => {
memset_intrinsic(
self,
true,
substs.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
);
return;
}
sym::volatile_load | sym::unaligned_volatile_load => {
let tp_ty = substs.type_at(0);
let mut ptr = args[0].immediate();
@ -343,20 +217,6 @@ fn codegen_intrinsic_call(
| sym::ctpop
| sym::bswap
| sym::bitreverse
| sym::add_with_overflow
| sym::sub_with_overflow
| sym::mul_with_overflow
| sym::wrapping_add
| sym::wrapping_sub
| sym::wrapping_mul
| sym::unchecked_div
| sym::unchecked_rem
| sym::unchecked_shl
| sym::unchecked_shr
| sym::unchecked_add
| sym::unchecked_sub
| sym::unchecked_mul
| sym::exact_div
| sym::rotate_left
| sym::rotate_right
| sym::saturating_add
@ -396,84 +256,6 @@ fn codegen_intrinsic_call(
&[args[0].immediate()],
None,
),
sym::add_with_overflow
| sym::sub_with_overflow
| sym::mul_with_overflow => {
let intrinsic = format!(
"llvm.{}{}.with.overflow.i{}",
if signed { 's' } else { 'u' },
&name_str[..3],
width
);
let llfn = self.get_intrinsic(&intrinsic);
// Convert `i1` to a `bool`, and write it to the out parameter
let pair =
self.call(llfn, &[args[0].immediate(), args[1].immediate()], None);
let val = self.extract_value(pair, 0);
let overflow = self.extract_value(pair, 1);
let overflow = self.zext(overflow, self.type_bool());
let dest = result.project_field(self, 0);
self.store(val, dest.llval, dest.align);
let dest = result.project_field(self, 1);
self.store(overflow, dest.llval, dest.align);
return;
}
sym::wrapping_add => self.add(args[0].immediate(), args[1].immediate()),
sym::wrapping_sub => self.sub(args[0].immediate(), args[1].immediate()),
sym::wrapping_mul => self.mul(args[0].immediate(), args[1].immediate()),
sym::exact_div => {
if signed {
self.exactsdiv(args[0].immediate(), args[1].immediate())
} else {
self.exactudiv(args[0].immediate(), args[1].immediate())
}
}
sym::unchecked_div => {
if signed {
self.sdiv(args[0].immediate(), args[1].immediate())
} else {
self.udiv(args[0].immediate(), args[1].immediate())
}
}
sym::unchecked_rem => {
if signed {
self.srem(args[0].immediate(), args[1].immediate())
} else {
self.urem(args[0].immediate(), args[1].immediate())
}
}
sym::unchecked_shl => self.shl(args[0].immediate(), args[1].immediate()),
sym::unchecked_shr => {
if signed {
self.ashr(args[0].immediate(), args[1].immediate())
} else {
self.lshr(args[0].immediate(), args[1].immediate())
}
}
sym::unchecked_add => {
if signed {
self.unchecked_sadd(args[0].immediate(), args[1].immediate())
} else {
self.unchecked_uadd(args[0].immediate(), args[1].immediate())
}
}
sym::unchecked_sub => {
if signed {
self.unchecked_ssub(args[0].immediate(), args[1].immediate())
} else {
self.unchecked_usub(args[0].immediate(), args[1].immediate())
}
}
sym::unchecked_mul => {
if signed {
self.unchecked_smul(args[0].immediate(), args[1].immediate())
} else {
self.unchecked_umul(args[0].immediate(), args[1].immediate())
}
}
sym::rotate_left | sym::rotate_right => {
let is_left = name == sym::rotate_left;
let val = args[0].immediate();
@ -513,75 +295,6 @@ fn codegen_intrinsic_call(
}
}
}
sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
match float_type_width(arg_tys[0]) {
Some(_width) => match name {
sym::fadd_fast => self.fadd_fast(args[0].immediate(), args[1].immediate()),
sym::fsub_fast => self.fsub_fast(args[0].immediate(), args[1].immediate()),
sym::fmul_fast => self.fmul_fast(args[0].immediate(), args[1].immediate()),
sym::fdiv_fast => self.fdiv_fast(args[0].immediate(), args[1].immediate()),
sym::frem_fast => self.frem_fast(args[0].immediate(), args[1].immediate()),
_ => bug!(),
},
None => {
span_invalid_monomorphization_error(
tcx.sess,
span,
&format!(
"invalid monomorphization of `{}` intrinsic: \
expected basic float type, found `{}`",
name, arg_tys[0]
),
);
return;
}
}
}
sym::float_to_int_unchecked => {
if float_type_width(arg_tys[0]).is_none() {
span_invalid_monomorphization_error(
tcx.sess,
span,
&format!(
"invalid monomorphization of `float_to_int_unchecked` \
intrinsic: expected basic float type, \
found `{}`",
arg_tys[0]
),
);
return;
}
let (width, signed) = match int_type_width_signed(ret_ty, self.cx) {
Some(pair) => pair,
None => {
span_invalid_monomorphization_error(
tcx.sess,
span,
&format!(
"invalid monomorphization of `float_to_int_unchecked` \
intrinsic: expected basic integer type, \
found `{}`",
ret_ty
),
);
return;
}
};
if signed {
self.fptosi(args[0].immediate(), self.cx.type_ix(width))
} else {
self.fptoui(args[0].immediate(), self.cx.type_ix(width))
}
}
sym::discriminant_value => {
if ret_ty.is_integral() {
args[0].deref(self.cx()).codegen_get_discr(self, ret_ty)
} else {
span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
}
}
_ if name_str.starts_with("simd_") => {
match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
@ -589,174 +302,6 @@ fn codegen_intrinsic_call(
Err(()) => return,
}
}
// This requires that atomic intrinsics follow a specific naming pattern:
// "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
name if name_str.starts_with("atomic_") => {
use rustc_codegen_ssa::common::AtomicOrdering::*;
use rustc_codegen_ssa::common::{AtomicRmwBinOp, SynchronizationScope};
let split: Vec<&str> = name_str.split('_').collect();
let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
let (order, failorder) = match split.len() {
2 => (SequentiallyConsistent, SequentiallyConsistent),
3 => match split[2] {
"unordered" => (Unordered, Unordered),
"relaxed" => (Monotonic, Monotonic),
"acq" => (Acquire, Acquire),
"rel" => (Release, Monotonic),
"acqrel" => (AcquireRelease, Acquire),
"failrelaxed" if is_cxchg => (SequentiallyConsistent, Monotonic),
"failacq" if is_cxchg => (SequentiallyConsistent, Acquire),
_ => self.sess().fatal("unknown ordering in atomic intrinsic"),
},
4 => match (split[2], split[3]) {
("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic),
("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic),
_ => self.sess().fatal("unknown ordering in atomic intrinsic"),
},
_ => self.sess().fatal("Atomic intrinsic not in correct format"),
};
let invalid_monomorphization = |ty| {
span_invalid_monomorphization_error(
tcx.sess,
span,
&format!(
"invalid monomorphization of `{}` intrinsic: \
expected basic integer type, found `{}`",
name, ty
),
);
};
match split[1] {
"cxchg" | "cxchgweak" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, self).is_some() {
let weak = split[1] == "cxchgweak";
let pair = self.atomic_cmpxchg(
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
order,
failorder,
weak,
);
let val = self.extract_value(pair, 0);
let success = self.extract_value(pair, 1);
let success = self.zext(success, self.type_bool());
let dest = result.project_field(self, 0);
self.store(val, dest.llval, dest.align);
let dest = result.project_field(self, 1);
self.store(success, dest.llval, dest.align);
return;
} else {
return invalid_monomorphization(ty);
}
}
"load" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, self).is_some() {
let size = self.size_of(ty);
self.atomic_load(args[0].immediate(), order, size)
} else {
return invalid_monomorphization(ty);
}
}
"store" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, self).is_some() {
let size = self.size_of(ty);
self.atomic_store(
args[1].immediate(),
args[0].immediate(),
order,
size,
);
return;
} else {
return invalid_monomorphization(ty);
}
}
"fence" => {
self.atomic_fence(order, SynchronizationScope::CrossThread);
return;
}
"singlethreadfence" => {
self.atomic_fence(order, SynchronizationScope::SingleThread);
return;
}
// These are all AtomicRMW ops
op => {
let atom_op = match op {
"xchg" => AtomicRmwBinOp::AtomicXchg,
"xadd" => AtomicRmwBinOp::AtomicAdd,
"xsub" => AtomicRmwBinOp::AtomicSub,
"and" => AtomicRmwBinOp::AtomicAnd,
"nand" => AtomicRmwBinOp::AtomicNand,
"or" => AtomicRmwBinOp::AtomicOr,
"xor" => AtomicRmwBinOp::AtomicXor,
"max" => AtomicRmwBinOp::AtomicMax,
"min" => AtomicRmwBinOp::AtomicMin,
"umax" => AtomicRmwBinOp::AtomicUMax,
"umin" => AtomicRmwBinOp::AtomicUMin,
_ => self.sess().fatal("unknown atomic operation"),
};
let ty = substs.type_at(0);
if int_type_width_signed(ty, self).is_some() {
self.atomic_rmw(
atom_op,
args[0].immediate(),
args[1].immediate(),
order,
)
} else {
return invalid_monomorphization(ty);
}
}
}
}
sym::nontemporal_store => {
let dst = args[0].deref(self.cx());
args[1].val.nontemporal_store(self, dst);
return;
}
sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
let a = args[0].immediate();
let b = args[1].immediate();
if name == sym::ptr_guaranteed_eq {
self.icmp(IntPredicate::IntEQ, a, b)
} else {
self.icmp(IntPredicate::IntNE, a, b)
}
}
sym::ptr_offset_from => {
let ty = substs.type_at(0);
let pointee_size = self.size_of(ty);
// This is the same sequence that Clang emits for pointer subtraction.
// It can be neither `nsw` nor `nuw` because the input is treated as
// unsigned but then the output is treated as signed, so neither works.
let a = args[0].immediate();
let b = args[1].immediate();
let a = self.ptrtoint(a, self.type_isize());
let b = self.ptrtoint(b, self.type_isize());
let d = self.sub(a, b);
let pointee_size = self.const_usize(pointee_size.bytes());
// this is where the signed magic happens (notice the `s` in `exactsdiv`)
self.exactsdiv(d, pointee_size)
}
_ => bug!("unknown intrinsic '{}'", name),
};
@ -807,39 +352,6 @@ fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
}
}
fn copy_intrinsic(
bx: &mut Builder<'a, 'll, 'tcx>,
allow_overlap: bool,
volatile: bool,
ty: Ty<'tcx>,
dst: &'ll Value,
src: &'ll Value,
count: &'ll Value,
) {
let (size, align) = bx.size_and_align_of(ty);
let size = bx.mul(bx.const_usize(size.bytes()), count);
let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
if allow_overlap {
bx.memmove(dst, align, src, align, size, flags);
} else {
bx.memcpy(dst, align, src, align, size, flags);
}
}
fn memset_intrinsic(
bx: &mut Builder<'a, 'll, 'tcx>,
volatile: bool,
ty: Ty<'tcx>,
dst: &'ll Value,
val: &'ll Value,
count: &'ll Value,
) {
let (size, align) = bx.size_and_align_of(ty);
let size = bx.mul(bx.const_usize(size.bytes()), count);
let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
bx.memset(dst, val, size, align, flags);
}
fn try_intrinsic(
bx: &mut Builder<'a, 'll, 'tcx>,
try_func: &'ll Value,
@ -2205,37 +1717,12 @@ macro_rules! arith {
// stuffs.
fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
match ty.kind() {
ty::Int(t) => Some((
match t {
ast::IntTy::Isize => u64::from(cx.tcx.sess.target.ptr_width),
ast::IntTy::I8 => 8,
ast::IntTy::I16 => 16,
ast::IntTy::I32 => 32,
ast::IntTy::I64 => 64,
ast::IntTy::I128 => 128,
},
true,
)),
ty::Uint(t) => Some((
match t {
ast::UintTy::Usize => u64::from(cx.tcx.sess.target.ptr_width),
ast::UintTy::U8 => 8,
ast::UintTy::U16 => 16,
ast::UintTy::U32 => 32,
ast::UintTy::U64 => 64,
ast::UintTy::U128 => 128,
},
false,
)),
_ => None,
}
}
// Returns the width of a float Ty
// Returns None if the type is not a float
fn float_type_width(ty: Ty<'_>) -> Option<u64> {
match ty.kind() {
ty::Float(t) => Some(t.bit_width()),
ty::Int(t) => {
Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.ptr_width)), true))
}
ty::Uint(t) => {
Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.ptr_width)), false))
}
_ => None,
}
}

View file

@ -8,8 +8,6 @@
#![feature(or_patterns)]
#![feature(trusted_len)]
#![feature(associated_type_bounds)]
#![feature(const_fn)] // for rustc_index::newtype_index
#![feature(const_panic)] // for rustc_index::newtype_index
#![recursion_limit = "256"]
//! This crate contains codegen code that is used by all codegen backends (LLVM and others).

View file

@ -687,7 +687,8 @@ fn codegen_call_terminator(
})
.collect();
bx.codegen_intrinsic_call(
Self::codegen_intrinsic_call(
&mut bx,
*instance.as_ref().unwrap(),
&fn_abi,
&args,

View file

@ -0,0 +1,596 @@
use super::operand::{OperandRef, OperandValue};
use super::place::PlaceRef;
use super::FunctionCx;
use crate::common::{span_invalid_monomorphization_error, IntPredicate};
use crate::glue;
use crate::traits::*;
use crate::MemFlags;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::{sym, Span};
use rustc_target::abi::call::{FnAbi, PassMode};
fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
allow_overlap: bool,
volatile: bool,
ty: Ty<'tcx>,
dst: Bx::Value,
src: Bx::Value,
count: Bx::Value,
) {
let layout = bx.layout_of(ty);
let size = layout.size;
let align = layout.align.abi;
let size = bx.mul(bx.const_usize(size.bytes()), count);
let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
if allow_overlap {
bx.memmove(dst, align, src, align, size, flags);
} else {
bx.memcpy(dst, align, src, align, size, flags);
}
}
fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
volatile: bool,
ty: Ty<'tcx>,
dst: Bx::Value,
val: Bx::Value,
count: Bx::Value,
) {
let layout = bx.layout_of(ty);
let size = layout.size;
let align = layout.align.abi;
let size = bx.mul(bx.const_usize(size.bytes()), count);
let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
bx.memset(dst, val, size, align, flags);
}
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_intrinsic_call(
bx: &mut Bx,
instance: ty::Instance<'tcx>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
args: &[OperandRef<'tcx, Bx::Value>],
llresult: Bx::Value,
span: Span,
) {
let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
let (def_id, substs) = match *callee_ty.kind() {
ty::FnDef(def_id, substs) => (def_id, substs),
_ => bug!("expected fn item type, found {}", callee_ty),
};
let sig = callee_ty.fn_sig(bx.tcx());
let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
let arg_tys = sig.inputs();
let ret_ty = sig.output();
let name = bx.tcx().item_name(def_id);
let name_str = &*name.as_str();
let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
let llval = match name {
sym::assume => {
bx.assume(args[0].immediate());
return;
}
sym::abort => {
bx.abort();
return;
}
sym::unreachable => {
return;
}
sym::va_start => bx.va_start(args[0].immediate()),
sym::va_end => bx.va_end(args[0].immediate()),
sym::size_of_val => {
let tp_ty = substs.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llsize
} else {
bx.const_usize(bx.layout_of(tp_ty).size.bytes())
}
}
sym::min_align_of_val => {
let tp_ty = substs.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llalign
} else {
bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
}
}
sym::size_of
| sym::pref_align_of
| sym::min_align_of
| sym::needs_drop
| sym::type_id
| sym::type_name
| sym::variant_count => {
let value = bx
.tcx()
.const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
.unwrap();
OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
}
// Effectively no-op
sym::forget => {
return;
}
sym::offset => {
let ptr = args[0].immediate();
let offset = args[1].immediate();
bx.inbounds_gep(ptr, &[offset])
}
sym::arith_offset => {
let ptr = args[0].immediate();
let offset = args[1].immediate();
bx.gep(ptr, &[offset])
}
sym::copy_nonoverlapping => {
copy_intrinsic(
bx,
false,
false,
substs.type_at(0),
args[1].immediate(),
args[0].immediate(),
args[2].immediate(),
);
return;
}
sym::copy => {
copy_intrinsic(
bx,
true,
false,
substs.type_at(0),
args[1].immediate(),
args[0].immediate(),
args[2].immediate(),
);
return;
}
sym::write_bytes => {
memset_intrinsic(
bx,
false,
substs.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
);
return;
}
sym::volatile_copy_nonoverlapping_memory => {
copy_intrinsic(
bx,
false,
true,
substs.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
);
return;
}
sym::volatile_copy_memory => {
copy_intrinsic(
bx,
true,
true,
substs.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
);
return;
}
sym::volatile_set_memory => {
memset_intrinsic(
bx,
true,
substs.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
);
return;
}
sym::volatile_store => {
let dst = args[0].deref(bx.cx());
args[1].val.volatile_store(bx, dst);
return;
}
sym::unaligned_volatile_store => {
let dst = args[0].deref(bx.cx());
args[1].val.unaligned_volatile_store(bx, dst);
return;
}
sym::add_with_overflow
| sym::sub_with_overflow
| sym::mul_with_overflow
| sym::wrapping_add
| sym::wrapping_sub
| sym::wrapping_mul
| sym::unchecked_div
| sym::unchecked_rem
| sym::unchecked_shl
| sym::unchecked_shr
| sym::unchecked_add
| sym::unchecked_sub
| sym::unchecked_mul
| sym::exact_div => {
let ty = arg_tys[0];
match int_type_width_signed(ty, bx.tcx()) {
Some((_width, signed)) => match name {
sym::add_with_overflow
| sym::sub_with_overflow
| sym::mul_with_overflow => {
let op = match name {
sym::add_with_overflow => OverflowOp::Add,
sym::sub_with_overflow => OverflowOp::Sub,
sym::mul_with_overflow => OverflowOp::Mul,
_ => bug!(),
};
let (val, overflow) =
bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
// Convert `i1` to a `bool`, and write it to the out parameter
let val = bx.from_immediate(val);
let overflow = bx.from_immediate(overflow);
let dest = result.project_field(bx, 0);
bx.store(val, dest.llval, dest.align);
let dest = result.project_field(bx, 1);
bx.store(overflow, dest.llval, dest.align);
return;
}
sym::wrapping_add => bx.add(args[0].immediate(), args[1].immediate()),
sym::wrapping_sub => bx.sub(args[0].immediate(), args[1].immediate()),
sym::wrapping_mul => bx.mul(args[0].immediate(), args[1].immediate()),
sym::exact_div => {
if signed {
bx.exactsdiv(args[0].immediate(), args[1].immediate())
} else {
bx.exactudiv(args[0].immediate(), args[1].immediate())
}
}
sym::unchecked_div => {
if signed {
bx.sdiv(args[0].immediate(), args[1].immediate())
} else {
bx.udiv(args[0].immediate(), args[1].immediate())
}
}
sym::unchecked_rem => {
if signed {
bx.srem(args[0].immediate(), args[1].immediate())
} else {
bx.urem(args[0].immediate(), args[1].immediate())
}
}
sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
sym::unchecked_shr => {
if signed {
bx.ashr(args[0].immediate(), args[1].immediate())
} else {
bx.lshr(args[0].immediate(), args[1].immediate())
}
}
sym::unchecked_add => {
if signed {
bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
} else {
bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
}
}
sym::unchecked_sub => {
if signed {
bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
} else {
bx.unchecked_usub(args[0].immediate(), args[1].immediate())
}
}
sym::unchecked_mul => {
if signed {
bx.unchecked_smul(args[0].immediate(), args[1].immediate())
} else {
bx.unchecked_umul(args[0].immediate(), args[1].immediate())
}
}
_ => bug!(),
},
None => {
span_invalid_monomorphization_error(
bx.tcx().sess,
span,
&format!(
"invalid monomorphization of `{}` intrinsic: \
expected basic integer type, found `{}`",
name, ty
),
);
return;
}
}
}
sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
match float_type_width(arg_tys[0]) {
Some(_width) => match name {
sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
_ => bug!(),
},
None => {
span_invalid_monomorphization_error(
bx.tcx().sess,
span,
&format!(
"invalid monomorphization of `{}` intrinsic: \
expected basic float type, found `{}`",
name, arg_tys[0]
),
);
return;
}
}
}
sym::float_to_int_unchecked => {
if float_type_width(arg_tys[0]).is_none() {
span_invalid_monomorphization_error(
bx.tcx().sess,
span,
&format!(
"invalid monomorphization of `float_to_int_unchecked` \
intrinsic: expected basic float type, \
found `{}`",
arg_tys[0]
),
);
return;
}
let (_width, signed) = match int_type_width_signed(ret_ty, bx.tcx()) {
Some(pair) => pair,
None => {
span_invalid_monomorphization_error(
bx.tcx().sess,
span,
&format!(
"invalid monomorphization of `float_to_int_unchecked` \
intrinsic: expected basic integer type, \
found `{}`",
ret_ty
),
);
return;
}
};
if signed {
bx.fptosi(args[0].immediate(), llret_ty)
} else {
bx.fptoui(args[0].immediate(), llret_ty)
}
}
sym::discriminant_value => {
if ret_ty.is_integral() {
args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
} else {
span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
}
}
// This requires that atomic intrinsics follow a specific naming pattern:
// "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
name if name_str.starts_with("atomic_") => {
use crate::common::AtomicOrdering::*;
use crate::common::{AtomicRmwBinOp, SynchronizationScope};
let split: Vec<&str> = name_str.split('_').collect();
let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
let (order, failorder) = match split.len() {
2 => (SequentiallyConsistent, SequentiallyConsistent),
3 => match split[2] {
"unordered" => (Unordered, Unordered),
"relaxed" => (Monotonic, Monotonic),
"acq" => (Acquire, Acquire),
"rel" => (Release, Monotonic),
"acqrel" => (AcquireRelease, Acquire),
"failrelaxed" if is_cxchg => (SequentiallyConsistent, Monotonic),
"failacq" if is_cxchg => (SequentiallyConsistent, Acquire),
_ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
},
4 => match (split[2], split[3]) {
("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic),
("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic),
_ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
},
_ => bx.sess().fatal("Atomic intrinsic not in correct format"),
};
let invalid_monomorphization = |ty| {
span_invalid_monomorphization_error(
bx.tcx().sess,
span,
&format!(
"invalid monomorphization of `{}` intrinsic: \
expected basic integer type, found `{}`",
name, ty
),
);
};
match split[1] {
"cxchg" | "cxchgweak" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() {
let weak = split[1] == "cxchgweak";
let pair = bx.atomic_cmpxchg(
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
order,
failorder,
weak,
);
let val = bx.extract_value(pair, 0);
let success = bx.extract_value(pair, 1);
let val = bx.from_immediate(val);
let success = bx.from_immediate(success);
let dest = result.project_field(bx, 0);
bx.store(val, dest.llval, dest.align);
let dest = result.project_field(bx, 1);
bx.store(success, dest.llval, dest.align);
return;
} else {
return invalid_monomorphization(ty);
}
}
"load" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() {
let size = bx.layout_of(ty).size;
bx.atomic_load(args[0].immediate(), order, size)
} else {
return invalid_monomorphization(ty);
}
}
"store" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() {
let size = bx.layout_of(ty).size;
bx.atomic_store(args[1].immediate(), args[0].immediate(), order, size);
return;
} else {
return invalid_monomorphization(ty);
}
}
"fence" => {
bx.atomic_fence(order, SynchronizationScope::CrossThread);
return;
}
"singlethreadfence" => {
bx.atomic_fence(order, SynchronizationScope::SingleThread);
return;
}
// These are all AtomicRMW ops
op => {
let atom_op = match op {
"xchg" => AtomicRmwBinOp::AtomicXchg,
"xadd" => AtomicRmwBinOp::AtomicAdd,
"xsub" => AtomicRmwBinOp::AtomicSub,
"and" => AtomicRmwBinOp::AtomicAnd,
"nand" => AtomicRmwBinOp::AtomicNand,
"or" => AtomicRmwBinOp::AtomicOr,
"xor" => AtomicRmwBinOp::AtomicXor,
"max" => AtomicRmwBinOp::AtomicMax,
"min" => AtomicRmwBinOp::AtomicMin,
"umax" => AtomicRmwBinOp::AtomicUMax,
"umin" => AtomicRmwBinOp::AtomicUMin,
_ => bx.sess().fatal("unknown atomic operation"),
};
let ty = substs.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() {
bx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order)
} else {
return invalid_monomorphization(ty);
}
}
}
}
sym::nontemporal_store => {
let dst = args[0].deref(bx.cx());
args[1].val.nontemporal_store(bx, dst);
return;
}
sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
let a = args[0].immediate();
let b = args[1].immediate();
if name == sym::ptr_guaranteed_eq {
bx.icmp(IntPredicate::IntEQ, a, b)
} else {
bx.icmp(IntPredicate::IntNE, a, b)
}
}
sym::ptr_offset_from => {
let ty = substs.type_at(0);
let pointee_size = bx.layout_of(ty).size;
// This is the same sequence that Clang emits for pointer subtraction.
// It can be neither `nsw` nor `nuw` because the input is treated as
// unsigned but then the output is treated as signed, so neither works.
let a = args[0].immediate();
let b = args[1].immediate();
let a = bx.ptrtoint(a, bx.type_isize());
let b = bx.ptrtoint(b, bx.type_isize());
let d = bx.sub(a, b);
let pointee_size = bx.const_usize(pointee_size.bytes());
// this is where the signed magic happens (notice the `s` in `exactsdiv`)
bx.exactsdiv(d, pointee_size)
}
_ => {
// Need to use backend-specific things in the implementation.
bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
return;
}
};
if !fn_abi.ret.is_ignore() {
if let PassMode::Cast(ty) = fn_abi.ret.mode {
let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(&ty));
let ptr = bx.pointercast(result.llval, ptr_llty);
bx.store(llval, ptr, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
.val
.store(bx, result);
}
}
}
}
// Returns the width of an int Ty, and if it's signed or not
// Returns None if the type is not an integer
// FIXME: theres multiple of this functions, investigate using some of the already existing
// stuffs.
fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
match ty.kind() {
ty::Int(t) => Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.ptr_width)), true)),
ty::Uint(t) => Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.ptr_width)), false)),
_ => None,
}
}
// Returns the width of a float Ty
// Returns None if the type is not a float
fn float_type_width(ty: Ty<'_>) -> Option<u64> {
match ty.kind() {
ty::Float(t) => Some(t.bit_width()),
_ => None,
}
}

View file

@ -486,6 +486,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
pub mod constant;
pub mod coverageinfo;
pub mod debuginfo;
mod intrinsic;
pub mod operand;
pub mod place;
mod rvalue;

View file

@ -8,10 +8,10 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
#![allow(incomplete_features)]
#![feature(array_windows)]
#![feature(control_flow_enum)]
#![feature(in_band_lifetimes)]
#![feature(unboxed_closures)]
#![feature(generators)]
#![feature(generator_trait)]
#![feature(fn_traits)]
#![feature(int_bits_const)]
@ -27,7 +27,7 @@
#![feature(thread_id_value)]
#![feature(extend_one)]
#![feature(const_panic)]
#![feature(const_generics)]
#![feature(min_const_generics)]
#![feature(once_cell)]
#![allow(rustc::default_hash_types)]

View file

@ -34,7 +34,7 @@ pub fn new() -> SortedMap<K, V> {
/// and that there are no duplicates.
#[inline]
pub fn from_presorted_elements(elements: Vec<(K, V)>) -> SortedMap<K, V> {
debug_assert!(elements.windows(2).all(|w| w[0].0 < w[1].0));
debug_assert!(elements.array_windows().all(|[fst, snd]| fst.0 < snd.0));
SortedMap { data: elements }
}
@ -159,7 +159,7 @@ pub fn insert_presorted(&mut self, mut elements: Vec<(K, V)>) {
return;
}
debug_assert!(elements.windows(2).all(|w| w[0].0 < w[1].0));
debug_assert!(elements.array_windows().all(|[fst, snd]| fst.0 < snd.0));
let start_index = self.lookup_index_for(&elements[0].0);

View file

@ -4,8 +4,7 @@
use std::io;
pub fn arg_expand(arg: String) -> Result<Vec<String>, Error> {
if arg.starts_with('@') {
let path = &arg[1..];
if let Some(path) = arg.strip_prefix('@') {
let file = match fs::read_to_string(path) {
Ok(file) => file,
Err(ref err) if err.kind() == io::ErrorKind::InvalidData => {

View file

@ -1,5 +1,4 @@
#![feature(bool_to_option)]
#![feature(cow_is_borrowed)]
#![feature(crate_visibility_modifier)]
#![feature(decl_macro)]
#![feature(or_patterns)]

View file

@ -13,7 +13,6 @@
//! This API is completely unstable and subject to change.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(bindings_after_at)]
#![feature(bool_to_option)]
#![feature(box_patterns)]
#![feature(box_syntax)]
@ -23,7 +22,6 @@
#![feature(never_type)]
#![feature(or_patterns)]
#![feature(in_band_lifetimes)]
#![feature(crate_visibility_modifier)]
#![recursion_limit = "512"] // For rustdoc
#[macro_use]

View file

@ -27,6 +27,7 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
#![cfg_attr(test, feature(test))]
#![feature(array_windows)]
#![feature(bool_to_option)]
#![feature(box_syntax)]
#![feature(crate_visibility_modifier)]

View file

@ -70,9 +70,9 @@ fn is_camel_case(name: &str) -> bool {
// ones (some scripts don't have a concept of upper/lowercase)
!name.chars().next().unwrap().is_lowercase()
&& !name.contains("__")
&& !name.chars().collect::<Vec<_>>().windows(2).any(|pair| {
&& !name.chars().collect::<Vec<_>>().array_windows().any(|&[fst, snd]| {
// contains a capitalisable character followed by, or preceded by, an underscore
char_has_case(pair[0]) && pair[1] == '_' || char_has_case(pair[1]) && pair[0] == '_'
char_has_case(fst) && snd == '_' || char_has_case(snd) && fst == '_'
})
}

View file

@ -23,6 +23,7 @@
//! This API is completely unstable and subject to change.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(array_windows)]
#![feature(backtrace)]
#![feature(bool_to_option)]
#![feature(box_patterns)]
@ -30,12 +31,9 @@
#![feature(cmp_min_max_by)]
#![feature(const_fn)]
#![feature(const_panic)]
#![feature(const_fn_transmute)]
#![feature(core_intrinsics)]
#![feature(discriminant_kind)]
#![feature(drain_filter)]
#![feature(never_type)]
#![feature(exhaustive_patterns)]
#![feature(extern_types)]
#![feature(nll)]
#![feature(once_cell)]
@ -43,13 +41,11 @@
#![feature(or_patterns)]
#![feature(min_specialization)]
#![feature(trusted_len)]
#![feature(stmt_expr_attributes)]
#![feature(test)]
#![feature(in_band_lifetimes)]
#![feature(crate_visibility_modifier)]
#![feature(associated_type_bounds)]
#![feature(rustc_attrs)]
#![feature(hash_raw_entry)]
#![feature(int_error_matching)]
#![recursion_limit = "512"]

View file

@ -2286,7 +2286,7 @@ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
/// Constants
///
/// Two constants are equal if they are the same constant. Note that
/// this does not necessarily mean that they are `==` in Rust -- in
/// this does not necessarily mean that they are `==` in Rust. In
/// particular, one must be wary of `NaN`!
#[derive(Clone, Copy, PartialEq, TyEncodable, TyDecodable, HashStable)]

View file

@ -2419,7 +2419,7 @@ pub fn intern_existential_predicates(
eps: &[ExistentialPredicate<'tcx>],
) -> &'tcx List<ExistentialPredicate<'tcx>> {
assert!(!eps.is_empty());
assert!(eps.windows(2).all(|w| w[0].stable_cmp(self, &w[1]) != Ordering::Greater));
assert!(eps.array_windows().all(|[a, b]| a.stable_cmp(self, b) != Ordering::Greater));
self._intern_existential_predicates(eps)
}

View file

@ -492,8 +492,8 @@ fn add_move_error_suggestions(&self, err: &mut DiagnosticBuilder<'a>, binds_to:
{
if let Ok(pat_snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(pat_span)
{
if pat_snippet.starts_with('&') {
let pat_snippet = pat_snippet[1..].trim_start();
if let Some(stripped) = pat_snippet.strip_prefix('&') {
let pat_snippet = stripped.trim_start();
let (suggestion, to_remove) = if pat_snippet.starts_with("mut")
&& pat_snippet["mut".len()..].starts_with(rustc_lexer::is_whitespace)
{

View file

@ -631,9 +631,8 @@ fn suggest_ampmut<'tcx>(
let lt_name = &src[1..ws_pos];
let ty = &src[ws_pos..];
return (assignment_rhs_span, format!("&{} mut {}", lt_name, ty));
} else if src.starts_with('&') {
let borrowed_expr = &src[1..];
return (assignment_rhs_span, format!("&mut {}", borrowed_expr));
} else if let Some(stripped) = src.strip_prefix('&') {
return (assignment_rhs_span, format!("&mut {}", stripped));
}
}
}

View file

@ -1,6 +1,7 @@
//! A helpful diagram for debugging dataflow problems.
use std::borrow::Cow;
use std::lazy::SyncOnceCell;
use std::{io, ops, str};
use regex::Regex;
@ -570,6 +571,13 @@ fn visit_terminator_after_primary_effect(
}
}
macro_rules! regex {
($re:literal $(,)?) => {{
static RE: SyncOnceCell<regex::Regex> = SyncOnceCell::new();
RE.get_or_init(|| Regex::new($re).unwrap())
}};
}
fn diff_pretty<T, C>(new: T, old: T, ctxt: &C) -> String
where
T: DebugWithContext<C>,
@ -578,7 +586,7 @@ fn diff_pretty<T, C>(new: T, old: T, ctxt: &C) -> String
return String::new();
}
let re = Regex::new("\t?\u{001f}([+-])").unwrap();
let re = regex!("\t?\u{001f}([+-])");
let raw_diff = format!("{:#?}", DebugDiffWithAdapter { new, old, ctxt });

View file

@ -6,6 +6,7 @@
#![feature(nll)]
#![feature(in_band_lifetimes)]
#![feature(array_windows)]
#![feature(bindings_after_at)]
#![feature(bool_to_option)]
#![feature(box_patterns)]
@ -14,20 +15,18 @@
#![feature(const_panic)]
#![feature(crate_visibility_modifier)]
#![feature(decl_macro)]
#![feature(drain_filter)]
#![feature(exact_size_is_empty)]
#![feature(exhaustive_patterns)]
#![feature(iter_order_by)]
#![feature(never_type)]
#![feature(min_specialization)]
#![feature(trusted_len)]
#![feature(try_blocks)]
#![feature(associated_type_bounds)]
#![feature(associated_type_defaults)]
#![feature(stmt_expr_attributes)]
#![feature(trait_alias)]
#![feature(option_expect_none)]
#![feature(or_patterns)]
#![feature(once_cell)]
#![recursion_limit = "256"]
#[macro_use]

View file

@ -277,14 +277,8 @@ fn assert_symbols_are_distinct<'a, 'tcx, I>(tcx: TyCtxt<'tcx>, mono_items: I)
symbols.sort_by_key(|sym| sym.1);
for pair in symbols.windows(2) {
let sym1 = &pair[0].1;
let sym2 = &pair[1].1;
for &[(mono_item1, ref sym1), (mono_item2, ref sym2)] in symbols.array_windows() {
if sym1 == sym2 {
let mono_item1 = pair[0].0;
let mono_item2 = pair[1].0;
let span1 = mono_item1.local_span(tcx);
let span2 = mono_item2.local_span(tcx);

View file

@ -1,7 +1,7 @@
//! Construction of MIR from HIR.
//!
//! This crate also contains the match exhaustiveness and usefulness checking.
#![feature(array_windows)]
#![feature(box_patterns)]
#![feature(box_syntax)]
#![feature(const_fn)]

View file

@ -2299,19 +2299,19 @@ fn range_borders(r: IntRange<'_>) -> impl Iterator<Item = Border> {
// interval into a constructor.
split_ctors.extend(
borders
.windows(2)
.filter_map(|window| match (window[0], window[1]) {
(Border::JustBefore(n), Border::JustBefore(m)) => {
.array_windows()
.filter_map(|&pair| match pair {
[Border::JustBefore(n), Border::JustBefore(m)] => {
if n < m {
Some(IntRange { range: n..=(m - 1), ty, span })
} else {
None
}
}
(Border::JustBefore(n), Border::AfterMax) => {
[Border::JustBefore(n), Border::AfterMax] => {
Some(IntRange { range: n..=u128::MAX, ty, span })
}
(Border::AfterMax, _) => None,
[Border::AfterMax, _] => None,
})
.map(IntRange),
);

View file

@ -3,7 +3,6 @@
#![feature(bool_to_option)]
#![feature(crate_visibility_modifier)]
#![feature(bindings_after_at)]
#![feature(try_blocks)]
#![feature(or_patterns)]
use rustc_ast as ast;

View file

@ -11,8 +11,6 @@
)]
#![feature(nll)]
#![feature(or_patterns)]
#![feature(rustc_private)]
#![feature(unicode_internals)]
#![feature(bool_to_option)]
pub use Alignment::*;

View file

@ -1,7 +1,6 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(in_band_lifetimes)]
#![feature(nll)]
#![feature(or_patterns)]
#![recursion_limit = "256"]
use rustc_attr as attr;

View file

@ -1418,9 +1418,9 @@ impl<'tcx> LifetimeContext<'_, 'tcx> {
if snippet.starts_with('&') && !snippet.starts_with("&'") {
introduce_suggestion
.push((param.span, format!("&{} {}", lt_name, &snippet[1..])));
} else if snippet.starts_with("&'_ ") {
} else if let Some(stripped) = snippet.strip_prefix("&'_ ") {
introduce_suggestion
.push((param.span, format!("&{} {}", lt_name, &snippet[4..])));
.push((param.span, format!("&{} {}", lt_name, stripped)));
}
}
}

View file

@ -56,16 +56,16 @@ pub fn matches(&self, kind: PathKind) -> bool {
impl SearchPath {
pub fn from_cli_opt(path: &str, output: config::ErrorOutputType) -> Self {
let (kind, path) = if path.starts_with("native=") {
(PathKind::Native, &path["native=".len()..])
} else if path.starts_with("crate=") {
(PathKind::Crate, &path["crate=".len()..])
} else if path.starts_with("dependency=") {
(PathKind::Dependency, &path["dependency=".len()..])
} else if path.starts_with("framework=") {
(PathKind::Framework, &path["framework=".len()..])
} else if path.starts_with("all=") {
(PathKind::All, &path["all=".len()..])
let (kind, path) = if let Some(stripped) = path.strip_prefix("native=") {
(PathKind::Native, stripped)
} else if let Some(stripped) = path.strip_prefix("crate=") {
(PathKind::Crate, stripped)
} else if let Some(stripped) = path.strip_prefix("dependency=") {
(PathKind::Dependency, stripped)
} else if let Some(stripped) = path.strip_prefix("framework=") {
(PathKind::Framework, stripped)
} else if let Some(stripped) = path.strip_prefix("all=") {
(PathKind::All, stripped)
} else {
(PathKind::All, path)
};

View file

@ -5,15 +5,14 @@
//! This API is completely unstable and subject to change.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(array_windows)]
#![feature(crate_visibility_modifier)]
#![feature(const_fn)]
#![feature(const_panic)]
#![feature(negative_impls)]
#![feature(nll)]
#![feature(optin_builtin_traits)]
#![feature(min_specialization)]
#![feature(option_expect_none)]
#![feature(refcell_take)]
#[macro_use]
extern crate rustc_macros;
@ -1158,7 +1157,12 @@ fn encode(&self, s: &mut S) -> Result<(), S::Error> {
let max_line_length = if lines.len() == 1 {
0
} else {
lines.windows(2).map(|w| w[1] - w[0]).map(|bp| bp.to_usize()).max().unwrap()
lines
.array_windows()
.map(|&[fst, snd]| snd - fst)
.map(|bp| bp.to_usize())
.max()
.unwrap()
};
let bytes_per_diff: u8 = match max_line_length {
@ -1173,7 +1177,7 @@ fn encode(&self, s: &mut S) -> Result<(), S::Error> {
// Encode the first element.
lines[0].encode(s)?;
let diff_iter = (&lines[..]).windows(2).map(|w| (w[1] - w[0]));
let diff_iter = lines[..].array_windows().map(|&[fst, snd]| snd - fst);
match bytes_per_diff {
1 => {

View file

@ -4,7 +4,6 @@
#![feature(crate_visibility_modifier)]
#![feature(in_band_lifetimes)]
#![feature(nll)]
#![feature(or_patterns)]
#![recursion_limit = "256"]
#[macro_use]

View file

@ -5,7 +5,6 @@
//! This API is completely unstable and subject to change.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(bool_to_option)]
#![feature(nll)]
#![recursion_limit = "256"]

View file

@ -370,7 +370,11 @@ fn replace_prefix<A, B, C>(&self, s: A, old: B, new: C) -> Option<String>
{
let s = s.as_ref();
let old = old.as_ref();
if s.starts_with(old) { Some(new.as_ref().to_owned() + &s[old.len()..]) } else { None }
if let Some(stripped) = s.strip_prefix(old) {
Some(new.as_ref().to_owned() + stripped)
} else {
None
}
}
/// This function is used to determine potential "simple" improvements or users' errors and

View file

@ -589,10 +589,10 @@ fn check_str_addition(
} else {
msg
},
if lstring.starts_with('&') {
if let Some(stripped) = lstring.strip_prefix('&') {
// let a = String::new();
// let _ = &a + "bar";
lstring[1..].to_string()
stripped.to_string()
} else {
format!("{}.to_owned()", lstring)
},
@ -617,10 +617,10 @@ fn check_str_addition(
is_assign,
) {
(Ok(l), Ok(r), IsAssign::No) => {
let to_string = if l.starts_with('&') {
let to_string = if let Some(stripped) = l.strip_prefix('&') {
// let a = String::new(); let b = String::new();
// let _ = &a + b;
l[1..].to_string()
stripped.to_string()
} else {
format!("{}.to_owned()", l)
};

View file

@ -2343,8 +2343,8 @@ fn from_target_feature(
item.span(),
format!("`{}` is not valid for this target", feature),
);
if feature.starts_with('+') {
let valid = supported_target_features.contains_key(&feature[1..]);
if let Some(stripped) = feature.strip_prefix('+') {
let valid = supported_target_features.contains_key(stripped);
if valid {
err.help("consider removing the leading `+` in the feature name");
}

View file

@ -16,6 +16,9 @@ pub struct DormantMutRef<'a, T> {
_marker: PhantomData<&'a mut T>,
}
unsafe impl<'a, T> Sync for DormantMutRef<'a, T> where &'a mut T: Sync {}
unsafe impl<'a, T> Send for DormantMutRef<'a, T> where &'a mut T: Send {}
impl<'a, T> DormantMutRef<'a, T> {
/// Capture a unique borrow, and immediately reborrow it. For the compiler,
/// the lifetime of the new reference is the same as the lifetime of the

View file

@ -1418,6 +1418,146 @@ fn vals<'a, 'new>(v: Values<'a, (), &'static str>) -> Values<'a, (), &'new str>
}
}
#[test]
#[allow(dead_code)]
fn test_sync() {
fn map<T: Sync>(v: &BTreeMap<T, T>) -> impl Sync + '_ {
v
}
fn into_iter<T: Sync>(v: BTreeMap<T, T>) -> impl Sync {
v.into_iter()
}
fn into_keys<T: Sync + Ord>(v: BTreeMap<T, T>) -> impl Sync {
v.into_keys()
}
fn into_values<T: Sync + Ord>(v: BTreeMap<T, T>) -> impl Sync {
v.into_values()
}
fn drain_filter<T: Sync + Ord>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
v.drain_filter(|_, _| false)
}
fn iter<T: Sync>(v: &BTreeMap<T, T>) -> impl Sync + '_ {
v.iter()
}
fn iter_mut<T: Sync>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
v.iter_mut()
}
fn keys<T: Sync>(v: &BTreeMap<T, T>) -> impl Sync + '_ {
v.keys()
}
fn values<T: Sync>(v: &BTreeMap<T, T>) -> impl Sync + '_ {
v.values()
}
fn values_mut<T: Sync>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
v.values_mut()
}
fn range<T: Sync + Ord>(v: &BTreeMap<T, T>) -> impl Sync + '_ {
v.range(..)
}
fn range_mut<T: Sync + Ord>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
v.range_mut(..)
}
fn entry<T: Sync + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
v.entry(Default::default())
}
fn occupied_entry<T: Sync + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
match v.entry(Default::default()) {
Occupied(entry) => entry,
_ => unreachable!(),
}
}
fn vacant_entry<T: Sync + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
match v.entry(Default::default()) {
Vacant(entry) => entry,
_ => unreachable!(),
}
}
}
#[test]
#[allow(dead_code)]
fn test_send() {
fn map<T: Send>(v: BTreeMap<T, T>) -> impl Send {
v
}
fn into_iter<T: Send>(v: BTreeMap<T, T>) -> impl Send {
v.into_iter()
}
fn into_keys<T: Send + Ord>(v: BTreeMap<T, T>) -> impl Send {
v.into_keys()
}
fn into_values<T: Send + Ord>(v: BTreeMap<T, T>) -> impl Send {
v.into_values()
}
fn drain_filter<T: Send + Ord>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
v.drain_filter(|_, _| false)
}
fn iter<T: Send + Sync>(v: &BTreeMap<T, T>) -> impl Send + '_ {
v.iter()
}
fn iter_mut<T: Send + Sync>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
v.iter_mut()
}
fn keys<T: Send + Sync>(v: &BTreeMap<T, T>) -> impl Send + '_ {
v.keys()
}
fn values<T: Send + Sync>(v: &BTreeMap<T, T>) -> impl Send + '_ {
v.values()
}
fn values_mut<T: Send + Sync>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
v.values_mut()
}
fn range<T: Send + Sync + Ord>(v: &BTreeMap<T, T>) -> impl Send + '_ {
v.range(..)
}
fn range_mut<T: Send + Sync + Ord>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
v.range_mut(..)
}
fn entry<T: Send + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
v.entry(Default::default())
}
fn occupied_entry<T: Send + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
match v.entry(Default::default()) {
Occupied(entry) => entry,
_ => unreachable!(),
}
}
fn vacant_entry<T: Send + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
match v.entry(Default::default()) {
Vacant(entry) => entry,
_ => unreachable!(),
}
}
}
#[test]
fn test_occupied_entry_key() {
let mut a = BTreeMap::new();

View file

@ -6,8 +6,6 @@
//!
//! The [`escape_default`] function provides an iterator over the bytes of an
//! escaped version of the character given.
//!
//! [`escape_default`]: fn.escape_default.html
#![stable(feature = "core_ascii", since = "1.26.0")]
@ -20,8 +18,6 @@
///
/// This `struct` is created by the [`escape_default`] function. See its
/// documentation for more.
///
/// [`escape_default`]: fn.escape_default.html
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Clone)]
pub struct EscapeDefault {

View file

@ -7,10 +7,8 @@
/// Creates a future which never resolves, representing a computation that never
/// finishes.
///
/// This `struct` is created by the [`pending`] function. See its
/// This `struct` is created by [`pending()`]. See its
/// documentation for more.
///
/// [`pending`]: fn.pending.html
#[stable(feature = "future_readiness_fns", since = "1.48.0")]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Pending<T> {

View file

@ -33,10 +33,8 @@ pub fn poll_fn<T, F>(f: F) -> PollFn<F>
/// A Future that wraps a function returning `Poll`.
///
/// This `struct` is created by the [`poll_fn`] function. See its
/// This `struct` is created by [`poll_fn()`]. See its
/// documentation for more.
///
/// [`poll_fn`]: fn.poll_fn.html
#[must_use = "futures do nothing unless you `.await` or poll them"]
#[unstable(feature = "future_poll_fn", issue = "72302")]
pub struct PollFn<F> {

View file

@ -4,10 +4,8 @@
/// Creates a future that is immediately ready with a value.
///
/// This `struct` is created by the [`ready`] function. See its
/// This `struct` is created by [`ready()`]. See its
/// documentation for more.
///
/// [`ready`]: fn.ready.html
#[stable(feature = "future_readiness_fns", since = "1.48.0")]
#[derive(Debug, Clone)]
#[must_use = "futures do nothing unless you `.await` or poll them"]

View file

@ -111,7 +111,7 @@ pub fn spin_loop() {
#[cfg_attr(not(miri), inline)]
#[cfg_attr(miri, inline(never))]
#[unstable(feature = "test", issue = "50297")]
#[allow(unreachable_code)] // this makes #[cfg] a bit easier below.
#[cfg_attr(miri, allow(unused_mut))]
pub fn black_box<T>(mut dummy: T) -> T {
// We need to "use" the argument in some way LLVM can't introspect, and on
// targets that support it we can typically leverage inline assembly to do

View file

@ -1502,8 +1502,6 @@ unsafe impl<A> TrustedLen for IterMut<'_, A> {}
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`Option::into_iter`] function.
///
/// [`Option::into_iter`]: enum.Option.html#method.into_iter
#[derive(Clone, Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<A> {

View file

@ -957,7 +957,7 @@ pub fn array_chunks_mut<const N: usize>(&mut self) -> ArrayChunksMut<'_, T, N> {
///
/// This is the const generic equivalent of [`windows`].
///
/// If `N` is smaller than the size of the array, it will return no windows.
/// If `N` is greater than the size of the slice, it will return no windows.
///
/// # Panics
///

View file

@ -4,7 +4,7 @@
//!
//! For more details, see the [`std::str`] module.
//!
//! [`std::str`]: self
//! [`std::str`]: ../../std/str/index.html
#![stable(feature = "rust1", since = "1.0.0")]
@ -84,9 +84,6 @@ pub trait FromStr: Sized {
/// when the string is ill-formatted return an error specific to the
/// inside [`Err`]. The error type is specific to implementation of the trait.
///
/// [`Ok`]: ../../std/result/enum.Result.html#variant.Ok
/// [`Err`]: ../../std/result/enum.Result.html#variant.Err
///
/// # Examples
///
/// Basic usage with [`i32`][ithirtytwo], a type that implements `FromStr`:
@ -269,11 +266,9 @@ pub fn error_len(&self) -> Option<usize> {
///
/// If you are sure that the byte slice is valid UTF-8, and you don't want to
/// incur the overhead of the validity check, there is an unsafe version of
/// this function, [`from_utf8_unchecked`][fromutf8u], which has the same
/// this function, [`from_utf8_unchecked`], which has the same
/// behavior but skips the check.
///
/// [fromutf8u]: fn.from_utf8_unchecked.html
///
/// If you need a `String` instead of a `&str`, consider
/// [`String::from_utf8`][string].
///
@ -318,11 +313,9 @@ pub fn error_len(&self) -> Option<usize> {
/// assert!(str::from_utf8(&sparkle_heart).is_err());
/// ```
///
/// See the docs for [`Utf8Error`][error] for more details on the kinds of
/// See the docs for [`Utf8Error`] for more details on the kinds of
/// errors that can be returned.
///
/// [error]: struct.Utf8Error.html
///
/// A "stack allocated string":
///
/// ```
@ -371,10 +364,8 @@ pub fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
///
/// assert!(str::from_utf8_mut(&mut invalid).is_err());
/// ```
/// See the docs for [`Utf8Error`][error] for more details on the kinds of
/// See the docs for [`Utf8Error`] for more details on the kinds of
/// errors that can be returned.
///
/// [error]: struct.Utf8Error.html
#[stable(feature = "str_mut_extras", since = "1.20.0")]
pub fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> {
run_utf8_validation(v)?;
@ -385,9 +376,7 @@ pub fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> {
/// Converts a slice of bytes to a string slice without checking
/// that the string contains valid UTF-8.
///
/// See the safe version, [`from_utf8`][fromutf8], for more information.
///
/// [fromutf8]: fn.from_utf8.html
/// See the safe version, [`from_utf8`], for more information.
///
/// # Safety
///

View file

@ -28,7 +28,7 @@
//! assert_eq!(s.find(|c: char| c.is_ascii_punctuation()), Some(35));
//! ```
//!
//! [pattern-impls]: trait.Pattern.html#implementors
//! [pattern-impls]: Pattern#implementors
#![unstable(
feature = "pattern",

View file

@ -8,10 +8,8 @@
///
/// [vtable]: https://en.wikipedia.org/wiki/Virtual_method_table
///
/// It consists of a data pointer and a [virtual function pointer table (vtable)][vtable] that
/// customizes the behavior of the `RawWaker`.
///
/// [`Waker`]: struct.Waker.html
/// It consists of a data pointer and a [virtual function pointer table (vtable)][vtable]
/// that customizes the behavior of the `RawWaker`.
#[derive(PartialEq, Debug)]
#[stable(feature = "futures_api", since = "1.36.0")]
pub struct RawWaker {
@ -52,12 +50,10 @@ pub const fn new(data: *const (), vtable: &'static RawWakerVTable) -> RawWaker {
/// The pointer passed to all functions inside the vtable is the `data` pointer
/// from the enclosing [`RawWaker`] object.
///
/// The functions inside this struct are only intended be called on the `data`
/// The functions inside this struct are only intended to be called on the `data`
/// pointer of a properly constructed [`RawWaker`] object from inside the
/// [`RawWaker`] implementation. Calling one of the contained functions using
/// any other `data` pointer will cause undefined behavior.
///
/// [`RawWaker`]: struct.RawWaker.html
#[stable(feature = "futures_api", since = "1.36.0")]
#[derive(PartialEq, Copy, Clone, Debug)]
pub struct RawWakerVTable {
@ -68,9 +64,6 @@ pub struct RawWakerVTable {
/// required for this additional instance of a [`RawWaker`] and associated
/// task. Calling `wake` on the resulting [`RawWaker`] should result in a wakeup
/// of the same task that would have been awoken by the original [`RawWaker`].
///
/// [`Waker`]: struct.Waker.html
/// [`RawWaker`]: struct.RawWaker.html
clone: unsafe fn(*const ()) -> RawWaker,
/// This function will be called when `wake` is called on the [`Waker`].
@ -79,9 +72,6 @@ pub struct RawWakerVTable {
/// The implementation of this function must make sure to release any
/// resources that are associated with this instance of a [`RawWaker`] and
/// associated task.
///
/// [`Waker`]: struct.Waker.html
/// [`RawWaker`]: struct.RawWaker.html
wake: unsafe fn(*const ()),
/// This function will be called when `wake_by_ref` is called on the [`Waker`].
@ -89,9 +79,6 @@ pub struct RawWakerVTable {
///
/// This function is similar to `wake`, but must not consume the provided data
/// pointer.
///
/// [`Waker`]: struct.Waker.html
/// [`RawWaker`]: struct.RawWaker.html
wake_by_ref: unsafe fn(*const ()),
/// This function gets called when a [`RawWaker`] gets dropped.
@ -99,8 +86,6 @@ pub struct RawWakerVTable {
/// The implementation of this function must make sure to release any
/// resources that are associated with this instance of a [`RawWaker`] and
/// associated task.
///
/// [`RawWaker`]: struct.RawWaker.html
drop: unsafe fn(*const ()),
}
@ -142,9 +127,6 @@ impl RawWakerVTable {
/// The implementation of this function must make sure to release any
/// resources that are associated with this instance of a [`RawWaker`] and
/// associated task.
///
/// [`Waker`]: struct.Waker.html
/// [`RawWaker`]: struct.RawWaker.html
#[rustc_promotable]
#[stable(feature = "futures_api", since = "1.36.0")]
// `rustc_allow_const_fn_ptr` is a hack that should not be used anywhere else
@ -208,8 +190,6 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
/// executor-specific wakeup behavior.
///
/// Implements [`Clone`], [`Send`], and [`Sync`].
///
/// [`RawWaker`]: struct.RawWaker.html
#[repr(transparent)]
#[stable(feature = "futures_api", since = "1.36.0")]
pub struct Waker {
@ -275,9 +255,6 @@ pub fn will_wake(&self, other: &Waker) -> bool {
/// The behavior of the returned `Waker` is undefined if the contract defined
/// in [`RawWaker`]'s and [`RawWakerVTable`]'s documentation is not upheld.
/// Therefore this method is unsafe.
///
/// [`RawWaker`]: struct.RawWaker.html
/// [`RawWakerVTable`]: struct.RawWakerVTable.html
#[inline]
#[stable(feature = "futures_api", since = "1.36.0")]
pub unsafe fn from_raw(waker: RawWaker) -> Waker {

View file

@ -47,7 +47,7 @@ unsafe fn abort() -> ! {
}
__rust_abort();
}
} else if #[cfg(windows)] {
} else if #[cfg(all(windows, not(miri)))] {
// On Windows, use the processor-specific __fastfail mechanism. In Windows 8
// and later, this will terminate the process immediately without running any
// in-process exception handlers. In earlier versions of Windows, this

View file

@ -1298,9 +1298,7 @@ pub struct RawEntryBuilderMut<'a, K: 'a, V: 'a, S: 'a> {
/// This `enum` is constructed through the [`raw_entry_mut`] method on [`HashMap`],
/// then calling one of the methods of that [`RawEntryBuilderMut`].
///
/// [`Entry`]: enum.Entry.html
/// [`raw_entry_mut`]: HashMap::raw_entry_mut
/// [`RawEntryBuilderMut`]: struct.RawEntryBuilderMut.html
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub enum RawEntryMut<'a, K: 'a, V: 'a, S: 'a> {
/// An occupied entry.
@ -1705,8 +1703,6 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
/// A view into an occupied entry in a `HashMap`.
/// It is part of the [`Entry`] enum.
///
/// [`Entry`]: enum.Entry.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct OccupiedEntry<'a, K: 'a, V: 'a> {
base: base::RustcOccupiedEntry<'a, K, V>,
@ -1721,8 +1717,6 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
/// A view into a vacant entry in a `HashMap`.
/// It is part of the [`Entry`] enum.
///
/// [`Entry`]: enum.Entry.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct VacantEntry<'a, K: 'a, V: 'a> {
base: base::RustcVacantEntry<'a, K, V>,

View file

@ -98,7 +98,6 @@ fn default() -> Subcommand {
impl Flags {
pub fn parse(args: &[String]) -> Flags {
let mut extra_help = String::new();
let mut subcommand_help = String::from(
"\
Usage: x.py <subcommand> [options] [<paths>...]
@ -170,16 +169,6 @@ pub fn parse(args: &[String]) -> Flags {
"VALUE",
);
// fn usage()
let usage =
|exit_code: i32, opts: &Options, subcommand_help: &str, extra_help: &str| -> ! {
println!("{}", opts.usage(subcommand_help));
if !extra_help.is_empty() {
println!("{}", extra_help);
}
process::exit(exit_code);
};
// We can't use getopt to parse the options until we have completed specifying which
// options are valid, but under the current implementation, some options are conditional on
// the subcommand. Therefore we must manually identify the subcommand first, so that we can
@ -263,12 +252,38 @@ pub fn parse(args: &[String]) -> Flags {
_ => {}
};
// fn usage()
let usage = |exit_code: i32, opts: &Options, verbose: bool, subcommand_help: &str| -> ! {
let mut extra_help = String::new();
// All subcommands except `clean` can have an optional "Available paths" section
if verbose {
let config = Config::parse(&["build".to_string()]);
let build = Build::new(config);
let maybe_rules_help = Builder::get_help(&build, subcommand.as_str());
extra_help.push_str(maybe_rules_help.unwrap_or_default().as_str());
} else if !(subcommand.as_str() == "clean" || subcommand.as_str() == "fmt") {
extra_help.push_str(
format!("Run `./x.py {} -h -v` to see a list of available paths.", subcommand)
.as_str(),
);
}
println!("{}", opts.usage(subcommand_help));
if !extra_help.is_empty() {
println!("{}", extra_help);
}
process::exit(exit_code);
};
// Done specifying what options are possible, so do the getopts parsing
let matches = opts.parse(&args[..]).unwrap_or_else(|e| {
// Invalid argument/option format
println!("\n{}\n", e);
usage(1, &opts, &subcommand_help, &extra_help);
usage(1, &opts, false, &subcommand_help);
});
// Extra sanity check to make sure we didn't hit this crazy corner case:
//
// ./x.py --frobulate clean build
@ -436,24 +451,11 @@ pub fn parse(args: &[String]) -> Flags {
let paths = matches.free[1..].iter().map(|p| p.into()).collect::<Vec<PathBuf>>();
let cfg_file = env::var_os("BOOTSTRAP_CONFIG").map(PathBuf::from);
// All subcommands except `clean` can have an optional "Available paths" section
if matches.opt_present("verbose") {
let config = Config::parse(&["build".to_string()]);
let build = Build::new(config);
let maybe_rules_help = Builder::get_help(&build, subcommand.as_str());
extra_help.push_str(maybe_rules_help.unwrap_or_default().as_str());
} else if !(subcommand.as_str() == "clean" || subcommand.as_str() == "fmt") {
extra_help.push_str(
format!("Run `./x.py {} -h -v` to see a list of available paths.", subcommand)
.as_str(),
);
}
let verbose = matches.opt_present("verbose");
// User passed in -h/--help?
if matches.opt_present("help") {
usage(0, &opts, &subcommand_help, &extra_help);
usage(0, &opts, verbose, &subcommand_help);
}
let cmd = match subcommand.as_str() {
@ -483,7 +485,7 @@ pub fn parse(args: &[String]) -> Flags {
"clean" => {
if !paths.is_empty() {
println!("\nclean does not take a path argument\n");
usage(1, &opts, &subcommand_help, &extra_help);
usage(1, &opts, verbose, &subcommand_help);
}
Subcommand::Clean { all: matches.opt_present("all") }
@ -494,12 +496,12 @@ pub fn parse(args: &[String]) -> Flags {
"run" | "r" => {
if paths.is_empty() {
println!("\nrun requires at least a path!\n");
usage(1, &opts, &subcommand_help, &extra_help);
usage(1, &opts, verbose, &subcommand_help);
}
Subcommand::Run { paths }
}
_ => {
usage(1, &opts, &subcommand_help, &extra_help);
usage(1, &opts, verbose, &subcommand_help);
}
};