Auto merge of #78697 - JohnTitor:rollup-q0fchpv, r=JohnTitor

Rollup of 8 pull requests

Successful merges:

 - #78376 (Treat trailing semicolon as a statement in macro call)
 - #78400 (Fix unindent in doc comments)
 - #78575 (Add a test for compiletest rustc-env & unset-rustc-env directives)
 - #78616 (Document -Zinstrument-coverage)
 - #78663 (Fix ICE when a future-incompat-report has its command-line level capped)
 - #78664 (Fix intrinsic size_of stable link)
 - #78668 (inliner: Remove redundant loop)
 - #78676 (add mipsel-unknown-none target)

Failed merges:

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2020-11-03 06:56:46 +00:00
commit d662f80855
27 changed files with 584 additions and 191 deletions

View file

@ -905,6 +905,13 @@ pub struct Stmt {
}
impl Stmt {
pub fn has_trailing_semicolon(&self) -> bool {
match &self.kind {
StmtKind::Semi(_) => true,
StmtKind::MacCall(mac) => matches!(mac.style, MacStmtStyle::Semicolon),
_ => false,
}
}
pub fn add_trailing_semicolon(mut self) -> Self {
self.kind = match self.kind {
StmtKind::Expr(expr) => StmtKind::Semi(expr),

View file

@ -310,8 +310,44 @@ fn filter_map_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> {
};
if style == ast::MacStmtStyle::Semicolon {
// Implement the proposal described in
// https://github.com/rust-lang/rust/issues/61733#issuecomment-509626449
//
// The macro invocation expands to the list of statements.
// If the list of statements is empty, then 'parse'
// the trailing semicolon on the original invocation
// as an empty statement. That is:
//
// `empty();` is parsed as a single `StmtKind::Empty`
//
// If the list of statements is non-empty, see if the
// final statement alreayd has a trailing semicolon.
//
// If it doesn't have a semicolon, then 'parse' the trailing semicolon
// from the invocation as part of the final statement,
// using `stmt.add_trailing_semicolon()`
//
// If it does have a semicolon, then 'parse' the trailing semicolon
// from the invocation as a new StmtKind::Empty
// FIXME: We will need to preserve the original
// semicolon token and span as part of #15701
let empty_stmt = ast::Stmt {
id: ast::DUMMY_NODE_ID,
kind: ast::StmtKind::Empty,
span: DUMMY_SP,
tokens: None,
};
if let Some(stmt) = stmts.pop() {
stmts.push(stmt.add_trailing_semicolon());
if stmt.has_trailing_semicolon() {
stmts.push(stmt);
stmts.push(empty_stmt);
} else {
stmts.push(stmt.add_trailing_semicolon());
}
} else {
stmts.push(empty_stmt);
}
}

View file

@ -74,6 +74,7 @@ fn process_command_line(&mut self, sess: &Session, store: &LintStore) {
for &(ref lint_name, level) in &sess.opts.lint_opts {
store.check_lint_name_cmdline(sess, &lint_name, level);
let orig_level = level;
// If the cap is less than this specified level, e.g., if we've got
// `--cap-lints allow` but we've also got `-D foo` then we ignore
@ -88,7 +89,7 @@ fn process_command_line(&mut self, sess: &Session, store: &LintStore) {
};
for id in ids {
self.check_gated_lint(id, DUMMY_SP);
let src = LintSource::CommandLine(lint_flag_val);
let src = LintSource::CommandLine(lint_flag_val, orig_level);
specs.insert(id, (level, src));
}
}
@ -123,7 +124,7 @@ fn insert_spec(
diag_builder.note(&rationale.as_str());
}
}
LintSource::CommandLine(_) => {
LintSource::CommandLine(_, _) => {
diag_builder.note("`forbid` lint level was set on command line");
}
}
@ -422,7 +423,7 @@ pub(crate) fn push(
let forbidden_lint_name = match forbid_src {
LintSource::Default => id.to_string(),
LintSource::Node(name, _, _) => name.to_string(),
LintSource::CommandLine(name) => name.to_string(),
LintSource::CommandLine(name, _) => name.to_string(),
};
let (lint_attr_name, lint_attr_span) = match *src {
LintSource::Node(name, span, _) => (name, span),
@ -446,7 +447,7 @@ pub(crate) fn push(
diag_builder.note(&rationale.as_str());
}
}
LintSource::CommandLine(_) => {
LintSource::CommandLine(_, _) => {
diag_builder.note("`forbid` lint level was set on command line");
}
}

View file

@ -42,6 +42,11 @@ fn check_block(&mut self, cx: &EarlyContext<'_>, block: &Block) {
fn maybe_lint_redundant_semis(cx: &EarlyContext<'_>, seq: &mut Option<(Span, bool)>) {
if let Some((span, multiple)) = seq.take() {
// FIXME: Find a better way of ignoring the trailing
// semicolon from macro expansion
if span == rustc_span::DUMMY_SP {
return;
}
cx.struct_span_lint(REDUNDANT_SEMICOLONS, span, |lint| {
let (msg, rem) = if multiple {
("unnecessary trailing semicolons", "remove these semicolons")

View file

@ -22,7 +22,9 @@ pub enum LintSource {
Node(Symbol, Span, Option<Symbol> /* RFC 2383 reason */),
/// Lint level was set by a command-line flag.
CommandLine(Symbol),
/// The provided `Level` is the level specified on the command line -
/// the actual level may be lower due to `--cap-lints`
CommandLine(Symbol, Level),
}
impl LintSource {
@ -30,7 +32,7 @@ pub fn name(&self) -> Symbol {
match *self {
LintSource::Default => symbol::kw::Default,
LintSource::Node(name, _, _) => name,
LintSource::CommandLine(name) => name,
LintSource::CommandLine(name, _) => name,
}
}
@ -38,7 +40,7 @@ pub fn span(&self) -> Span {
match *self {
LintSource::Default => DUMMY_SP,
LintSource::Node(_, span, _) => span,
LintSource::CommandLine(_) => DUMMY_SP,
LintSource::CommandLine(_, _) => DUMMY_SP,
}
}
}
@ -279,12 +281,12 @@ fn struct_lint_level_impl(
&format!("`#[{}({})]` on by default", level.as_str(), name),
);
}
LintSource::CommandLine(lint_flag_val) => {
let flag = match level {
LintSource::CommandLine(lint_flag_val, orig_level) => {
let flag = match orig_level {
Level::Warn => "-W",
Level::Deny => "-D",
Level::Forbid => "-F",
Level::Allow => panic!(),
Level::Allow => "-A",
};
let hyphen_case_lint_name = name.replace("_", "-");
if lint_flag_val.as_str() == name {

View file

@ -93,96 +93,79 @@ fn run_pass(&self, caller_body: &mut Body<'tcx>) {
return;
}
let mut local_change;
let mut changed = false;
while let Some(callsite) = callsites.pop_front() {
debug!("checking whether to inline callsite {:?}", callsite);
loop {
local_change = false;
while let Some(callsite) = callsites.pop_front() {
debug!("checking whether to inline callsite {:?}", callsite);
if let InstanceDef::Item(_) = callsite.callee.def {
if !self.tcx.is_mir_available(callsite.callee.def_id()) {
debug!(
"checking whether to inline callsite {:?} - MIR unavailable",
callsite,
);
continue;
}
if let InstanceDef::Item(_) = callsite.callee.def {
if !self.tcx.is_mir_available(callsite.callee.def_id()) {
debug!("checking whether to inline callsite {:?} - MIR unavailable", callsite,);
continue;
}
}
let callee_body = if let Some(callee_def_id) = callsite.callee.def_id().as_local() {
let callee_hir_id = self.tcx.hir().local_def_id_to_hir_id(callee_def_id);
// Avoid a cycle here by only using `instance_mir` only if we have
// a lower `HirId` than the callee. This ensures that the callee will
// not inline us. This trick only works without incremental compilation.
// So don't do it if that is enabled. Also avoid inlining into generators,
// since their `optimized_mir` is used for layout computation, which can
// create a cycle, even when no attempt is made to inline the function
// in the other direction.
if !self.tcx.dep_graph.is_fully_enabled()
&& self_hir_id < callee_hir_id
&& caller_body.generator_kind.is_none()
{
self.tcx.instance_mir(callsite.callee.def)
} else {
continue;
}
} else {
// This cannot result in a cycle since the callee MIR is from another crate
// and is already optimized.
let callee_body = if let Some(callee_def_id) = callsite.callee.def_id().as_local() {
let callee_hir_id = self.tcx.hir().local_def_id_to_hir_id(callee_def_id);
// Avoid a cycle here by only using `instance_mir` only if we have
// a lower `HirId` than the callee. This ensures that the callee will
// not inline us. This trick only works without incremental compilation.
// So don't do it if that is enabled. Also avoid inlining into generators,
// since their `optimized_mir` is used for layout computation, which can
// create a cycle, even when no attempt is made to inline the function
// in the other direction.
if !self.tcx.dep_graph.is_fully_enabled()
&& self_hir_id < callee_hir_id
&& caller_body.generator_kind.is_none()
{
self.tcx.instance_mir(callsite.callee.def)
};
let callee_body: &Body<'tcx> = &*callee_body;
let callee_body = if self.consider_optimizing(callsite, callee_body) {
self.tcx.subst_and_normalize_erasing_regions(
&callsite.callee.substs,
self.param_env,
callee_body,
)
} else {
continue;
};
// Copy only unevaluated constants from the callee_body into the caller_body.
// Although we are only pushing `ConstKind::Unevaluated` consts to
// `required_consts`, here we may not only have `ConstKind::Unevaluated`
// because we are calling `subst_and_normalize_erasing_regions`.
caller_body.required_consts.extend(
callee_body.required_consts.iter().copied().filter(|&constant| {
matches!(constant.literal.val, ConstKind::Unevaluated(_, _, _))
}),
);
let start = caller_body.basic_blocks().len();
debug!("attempting to inline callsite {:?} - body={:?}", callsite, callee_body);
if !self.inline_call(callsite, caller_body, callee_body) {
debug!("attempting to inline callsite {:?} - failure", callsite);
continue;
}
debug!("attempting to inline callsite {:?} - success", callsite);
} else {
// This cannot result in a cycle since the callee MIR is from another crate
// and is already optimized.
self.tcx.instance_mir(callsite.callee.def)
};
// Add callsites from inlined function
for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated().skip(start) {
if let Some(new_callsite) =
self.get_valid_function_call(bb, bb_data, caller_body)
{
// Don't inline the same function multiple times.
if callsite.callee != new_callsite.callee {
callsites.push_back(new_callsite);
}
let callee_body: &Body<'tcx> = &*callee_body;
let callee_body = if self.consider_optimizing(callsite, callee_body) {
self.tcx.subst_and_normalize_erasing_regions(
&callsite.callee.substs,
self.param_env,
callee_body,
)
} else {
continue;
};
// Copy only unevaluated constants from the callee_body into the caller_body.
// Although we are only pushing `ConstKind::Unevaluated` consts to
// `required_consts`, here we may not only have `ConstKind::Unevaluated`
// because we are calling `subst_and_normalize_erasing_regions`.
caller_body.required_consts.extend(callee_body.required_consts.iter().copied().filter(
|&constant| matches!(constant.literal.val, ConstKind::Unevaluated(_, _, _)),
));
let start = caller_body.basic_blocks().len();
debug!("attempting to inline callsite {:?} - body={:?}", callsite, callee_body);
if !self.inline_call(callsite, caller_body, callee_body) {
debug!("attempting to inline callsite {:?} - failure", callsite);
continue;
}
debug!("attempting to inline callsite {:?} - success", callsite);
// Add callsites from inlined function
for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated().skip(start) {
if let Some(new_callsite) = self.get_valid_function_call(bb, bb_data, caller_body) {
// Don't inline the same function multiple times.
if callsite.callee != new_callsite.callee {
callsites.push_back(new_callsite);
}
}
local_change = true;
changed = true;
}
if !local_change {
break;
}
changed = true;
}
// Simplify if we inlined anything.

View file

@ -0,0 +1,42 @@
//! Bare MIPS32r2, little endian, softfloat, O32 calling convention
//!
//! Can be used for MIPS M4K core (e.g. on PIC32MX devices)
use crate::spec::abi::Abi;
use crate::spec::{LinkerFlavor, LldFlavor, RelocModel};
use crate::spec::{PanicStrategy, Target, TargetOptions};
pub fn target() -> Target {
Target {
llvm_target: "mipsel-unknown-none".to_string(),
target_endian: "little".to_string(),
pointer_width: 32,
target_c_int_width: "32".to_string(),
data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(),
arch: "mips".to_string(),
target_os: "none".to_string(),
target_env: String::new(),
target_vendor: String::new(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
options: TargetOptions {
cpu: "mips32r2".to_string(),
features: "+mips32r2,+soft-float,+noabicalls".to_string(),
max_atomic_width: Some(32),
executables: true,
linker: Some("rust-lld".to_owned()),
panic_strategy: PanicStrategy::Abort,
relocation_model: RelocModel::Static,
unsupported_abis: vec![
Abi::Stdcall,
Abi::Fastcall,
Abi::Vectorcall,
Abi::Thiscall,
Abi::Win64,
Abi::SysV64,
],
emit_debug_gdb_scripts: false,
..Default::default()
},
}
}

View file

@ -653,6 +653,7 @@ fn $module() {
("powerpc64-wrs-vxworks", powerpc64_wrs_vxworks),
("mipsel-sony-psp", mipsel_sony_psp),
("mipsel-unknown-none", mipsel_unknown_none),
("thumbv4t-none-eabi", thumbv4t_none_eabi),
}

View file

@ -764,7 +764,7 @@
/// More specifically, this is the offset in bytes between successive
/// items of the same type, including alignment padding.
///
/// The stabilized version of this intrinsic is [`size_of`].
/// The stabilized version of this intrinsic is [`crate::mem::size_of`].
#[rustc_const_stable(feature = "const_size_of", since = "1.40.0")]
pub fn size_of<T>() -> usize;

View file

@ -180,6 +180,7 @@ target | std | host | notes
`i686-wrs-vxworks` | ? | |
`mips-unknown-linux-uclibc` | ✓ | | MIPS Linux with uClibc
`mipsel-unknown-linux-uclibc` | ✓ | | MIPS (LE) Linux with uClibc
`mipsel-unknown-none` | * | | Bare MIPS (LE) softfloat
`mipsel-sony-psp` | * | | MIPS (LE) Sony PlayStation Portable (PSP)
`mipsisa32r6-unknown-linux-gnu` | ? | |
`mipsisa32r6el-unknown-linux-gnu` | ? | |

Binary file not shown.

After

Width:  |  Height:  |  Size: 407 KiB

View file

@ -0,0 +1,169 @@
# `source-based-code-coverage`
The feature request for this feature is: [#34701]
The Major Change Proposal (MCP) for this feature is: [#278](https://github.com/rust-lang/compiler-team/issues/278)
------------------------
## Introduction
The Rust compiler includes two code coverage implementations:
* A GCC-compatible, gcov-based coverage implementation, enabled with [`-Zprofile`](profile.md), which operates on DebugInfo.
* A source-based code coverage implementation, enabled with `-Zinstrument-coverage`, which uses LLVM's native coverage instrumentation to generate very precise coverage data.
This document describes how to enable and use the LLVM instrumentation-based coverage, via the `-Zinstrument-coverage` compiler flag.
## How it works
When `-Zinstrument-coverage` is enabled, the Rust compiler enhances rust-based libraries and binaries by:
* Automatically injecting calls to an LLVM intrinsic ([`llvm.instrprof.increment`]), at functions and branches in compiled code, to increment counters when conditional sections of code are executed.
* Embedding additional information in the data section of each library and binary (using the [LLVM Code Coverage Mapping Format]), to define the code regions (start and end positions in the source code) being counted.
When running a coverage-instrumented program, the counter values are written to a `profraw` file at program termination. LLVM bundles tools that read the counter results, combine those results with the coverage map (embedded in the program binary), and generate coverage reports in multiple formats.
## Enable coverage profiling in the Rust compiler
Rust's source-based code coverage requires the Rust "profiler runtime". Without it, compiling with `-Zinstrument-coverage` generates an error that the profiler runtime is missing.
The Rust `nightly` distribution channel should include the profiler runtime, by default.
*IMPORTANT:* If you are building the Rust compiler from the source distribution, the profiler runtime is *not* enabled in the default `config.toml.example`, and may not be enabled in your `config.toml`. Edit the `config.toml` file, and find the `profiler` feature entry. Uncomment it and set it to `true`:
```toml
# Build the profiler runtime (required when compiling with options that depend
# on this runtime, such as `-C profile-generate` or `-Z instrument-coverage`).
profiler = true
```
Then rebuild the Rust compiler (see [rustc-dev-guide-how-to-build-and-run]).
### Building the demangler
LLVM coverage reporting tools generate results that can include function names and other symbol references, and the raw coverage results report symbols using the compiler's "mangled" version of the symbol names, which can be difficult to interpret. To work around this issue, LLVM coverage tools also support a user-specified symbol name demangler.
One option for a Rust demangler is [`rustfilt`](https://crates.io/crates/rustfilt), which can be installed with:
```shell
cargo install rustfilt
```
Another option, if you are building from the Rust compiler source distribution, is to use the `rust-demangler` tool included in the Rust source distribution, which can be built with:
```shell
$ ./x.py build rust-demangler
```
## Compiling with coverage enabled
Set the `-Zinstrument-coverage` compiler flag in order to enable LLVM source-based code coverage profiling.
With `cargo`, you can instrument your program binary *and* dependencies at the same time.
For example (if your project's Cargo.toml builds a binary by default):
```shell
$ cd your-project
$ cargo clean
$ RUSTFLAGS="-Zinstrument-coverage" cargo build
```
If `cargo` is not configured to use your `profiler`-enabled version of `rustc`, set the path explicitly via the `RUSTC` environment variable. Here is another example, using a `stage1` build of `rustc` to compile an `example` binary (from the [`json5format`](https://crates.io/crates/json5format) crate):
```shell
$ RUSTC=$HOME/rust/build/x86_64-unknown-linux-gnu/stage1/bin/rustc \
RUSTFLAGS="-Zinstrument-coverage" \
cargo build --example formatjson5
```
## Running the instrumented binary to generate raw coverage profiling data
In the previous example, `cargo` generated the coverage-instrumented binary `formatjson5`:
```shell
$ echo "{some: 'thing'}" | target/debug/examples/formatjson5 -
```
```json5
{
some: 'thing',
}
```
After running this program, a new file, `default.profraw`, should be in the current working directory. It's often preferable to set a specific file name or path. You can change the output file using the environment variable `LLVM_PROFILE_FILE`:
```shell
$ echo "{some: 'thing'}" \
| LLVM_PROFILE_FILE="formatjson5.profraw" target/debug/examples/formatjson5 -
...
$ ls formatjson5.profraw
formatjson5.profraw
```
If `LLVM_PROFILE_FILE` contains a path to a non-existent directory, the missing directory structure will be created. Additionally, the following special pattern strings are rewritten:
* `%p` - The process ID.
* `%h` - The hostname of the machine running the program.
* `%t` - The value of the TMPDIR environment variable.
* `%Nm` - the instrumented binarys signature: The runtime creates a pool of N raw profiles, used for on-line profile merging. The runtime takes care of selecting a raw profile from the pool, locking it, and updating it before the program exits. `N` must be between `1` and `9`, and defaults to `1` if omitted (with simply `%m`).
* `%c` - Does not add anything to the filename, but enables a mode (on some platforms, including Darwin) in which profile counter updates are continuously synced to a file. This means that if the instrumented program crashes, or is killed by a signal, perfect coverage information can still be recovered.
## Creating coverage reports
LLVM's tools to process coverage data and coverage maps have some version dependencies. If you encounter a version mismatch, try updating your LLVM tools.
If you are building the Rust compiler from source, you can optionally use the bundled LLVM tools, built from source. Those tool binaries can typically be found in your build platform directory at something like: `rust/build/x86_64-unknown-linux-gnu/llvm/bin/llvm-*`. (Look for `llvm-profdata` and `llvm-cov`.)
Raw profiles have to be indexed before they can be used to generate coverage reports. This is done using [`llvm-profdata merge`] (which can combine multiple raw profiles and index them at the same time):
```shell
$ llvm-profdata merge -sparse formatjson5.profraw -o formatjson5.profdata
```
Finally, the `.profdata` file is used, in combination with the coverage map (from the program binary) to generate coverage reports using [`llvm-cov report`]--for a coverage summaries--and [`llvm-cov show`]--to see detailed coverage of lines and regions (character ranges), overlaid on the original source code.
These commands have several display and filtering options. For example:
```shell
$ llvm-cov show -Xdemangler=rustfilt target/debug/examples/formatjson5 \
-instr-profile=formatjson5.profdata \
-show-line-counts-or-regions \
-show-instantiations \
-name=add_quoted_string
```
<img alt="Screenshot of sample `llvm-cov show` result, for function add_quoted_string" src="img/llvm-cov-show-01.png" class="center"/>
<br/>
<br/>
Some of the more notable options in this example include:
* `--Xdemangler=rustfilt` - the command name or path used to demangle Rust symbols (`rustfilt` in the example, but this could also be a path to the `rust-demangler` tool)
* `target/debug/examples/formatjson5` - the instrumented binary (from which to extract the coverage map)
* `--instr-profile=<path-to-file>.profdata` - the location of the `.profdata` file created by `llvm-profdata merge` (from the `.profraw` file generated by the instrumented binary)
* `--name=<exact-function-name>` - to show coverage for a specific function (or, consider using another filter option, such as `--name-regex=<pattern>`)
## Interpreting reports
There are four statistics tracked in a coverage summary:
* Function coverage is the percentage of functions that have been executed at least once. A function is considered to be executed if any of its instantiations are executed.
* Instantiation coverage is the percentage of function instantiations that have been executed at least once. Generic functions and functions generated from macros are two kinds of functions that may have multiple instantiations.
* Line coverage is the percentage of code lines that have been executed at least once. Only executable lines within function bodies are considered to be code lines.
* Region coverage is the percentage of code regions that have been executed at least once. A code region may span multiple lines: for example, in a large function body with no control flow. In other cases, a single line can contain multiple code regions: `return x || (y && z)` has countable code regions for `x` (which may resolve the expression, if `x` is `true`), `|| (y && z)` (executed only if `x` was `false`), and `return` (executed in either situation).
Of these four statistics, function coverage is usually the least granular while region coverage is the most granular. The project-wide totals for each statistic are listed in the summary.
## Other references
Rust's implementation and workflow for source-based code coverage is based on the same library and tools used to implement [source-based code coverage in Clang](https://clang.llvm.org/docs/SourceBasedCodeCoverage.html). (This document is partially based on the Clang guide.)
[#34701]: https://github.com/rust-lang/rust/issues/34701
[`llvm.instrprof.increment`]: https://llvm.org/docs/LangRef.html#llvm-instrprof-increment-intrinsic
[LLVM Code Coverage Mapping Format]: https://llvm.org/docs/CoverageMappingFormat.html
[rustc-dev-guide-how-to-build-and-run]: https://rustc-dev-guide.rust-lang.org/building/how-to-build-and-run.html
[`llvm-profdata merge`]: https://llvm.org/docs/CommandGuide/llvm-profdata.html#profdata-merge
[`llvm-cov report`]: https://llvm.org/docs/CommandGuide/llvm-cov.html#llvm-cov-report
[`llvm-cov show`]: https://llvm.org/docs/CommandGuide/llvm-cov.html#llvm-cov-show

View file

@ -1,7 +1,6 @@
use std::cmp;
use std::string::String;
use crate::clean::{self, DocFragment, Item};
use crate::clean::{self, DocFragment, DocFragmentKind, Item};
use crate::core::DocContext;
use crate::fold::{self, DocFolder};
use crate::passes::Pass;
@ -35,65 +34,81 @@ pub fn unindent_doc_comments(&mut self) {
}
fn unindent_fragments(docs: &mut Vec<DocFragment>) {
for fragment in docs {
fragment.doc = unindent(&fragment.doc);
}
}
fn unindent(s: &str) -> String {
let lines = s.lines().collect::<Vec<&str>>();
let mut saw_first_line = false;
let mut saw_second_line = false;
let min_indent = lines.iter().fold(usize::MAX, |min_indent, line| {
// After we see the first non-whitespace line, look at
// the line we have. If it is not whitespace, and therefore
// part of the first paragraph, then ignore the indentation
// level of the first line
let ignore_previous_indents =
saw_first_line && !saw_second_line && !line.chars().all(|c| c.is_whitespace());
let min_indent = if ignore_previous_indents { usize::MAX } else { min_indent };
if saw_first_line {
saw_second_line = true;
}
if line.chars().all(|c| c.is_whitespace()) {
min_indent
} else {
saw_first_line = true;
let mut whitespace = 0;
line.chars().all(|char| {
// Compare against either space or tab, ignoring whether they
// are mixed or not
if char == ' ' || char == '\t' {
whitespace += 1;
true
} else {
false
}
});
cmp::min(min_indent, whitespace)
}
});
if !lines.is_empty() {
let mut unindented = vec![lines[0].trim_start().to_string()];
unindented.extend_from_slice(
&lines[1..]
.iter()
.map(|&line| {
if line.chars().all(|c| c.is_whitespace()) {
line.to_string()
} else {
assert!(line.len() >= min_indent);
line[min_indent..].to_string()
}
})
.collect::<Vec<_>>(),
);
unindented.join("\n")
// `add` is used in case the most common sugared doc syntax is used ("/// "). The other
// fragments kind's lines are never starting with a whitespace unless they are using some
// markdown formatting requiring it. Therefore, if the doc block have a mix between the two,
// we need to take into account the fact that the minimum indent minus one (to take this
// whitespace into account).
//
// For example:
//
// /// hello!
// #[doc = "another"]
//
// In this case, you want "hello! another" and not "hello! another".
let add = if docs.windows(2).any(|arr| arr[0].kind != arr[1].kind)
&& docs.iter().any(|d| d.kind == DocFragmentKind::SugaredDoc)
{
// In case we have a mix of sugared doc comments and "raw" ones, we want the sugared one to
// "decide" how much the minimum indent will be.
1
} else {
s.to_string()
0
};
// `min_indent` is used to know how much whitespaces from the start of each lines must be
// removed. Example:
//
// /// hello!
// #[doc = "another"]
//
// In here, the `min_indent` is 1 (because non-sugared fragment are always counted with minimum
// 1 whitespace), meaning that "hello!" will be considered a codeblock because it starts with 4
// (5 - 1) whitespaces.
let min_indent = match docs
.iter()
.map(|fragment| {
fragment.doc.lines().fold(usize::MAX, |min_indent, line| {
if line.chars().all(|c| c.is_whitespace()) {
min_indent
} else {
// Compare against either space or tab, ignoring whether they are
// mixed or not.
let whitespace = line.chars().take_while(|c| *c == ' ' || *c == '\t').count();
cmp::min(min_indent, whitespace)
+ if fragment.kind == DocFragmentKind::SugaredDoc { 0 } else { add }
}
})
})
.min()
{
Some(x) => x,
None => return,
};
for fragment in docs {
if fragment.doc.lines().count() == 0 {
continue;
}
let min_indent = if fragment.kind != DocFragmentKind::SugaredDoc && min_indent > 0 {
min_indent - add
} else {
min_indent
};
fragment.doc = fragment
.doc
.lines()
.map(|line| {
if line.chars().all(|c| c.is_whitespace()) {
line.to_string()
} else {
assert!(line.len() >= min_indent);
line[min_indent..].to_string()
}
})
.collect::<Vec<_>>()
.join("\n");
}
}

View file

@ -1,72 +1,63 @@
use super::*;
use rustc_span::source_map::DUMMY_SP;
fn create_doc_fragment(s: &str) -> Vec<DocFragment> {
vec![DocFragment {
line: 0,
span: DUMMY_SP,
parent_module: None,
doc: s.to_string(),
kind: DocFragmentKind::SugaredDoc,
}]
}
#[track_caller]
fn run_test(input: &str, expected: &str) {
let mut s = create_doc_fragment(input);
unindent_fragments(&mut s);
assert_eq!(s[0].doc, expected);
}
#[test]
fn should_unindent() {
let s = " line1\n line2".to_string();
let r = unindent(&s);
assert_eq!(r, "line1\nline2");
run_test(" line1\n line2", "line1\nline2");
}
#[test]
fn should_unindent_multiple_paragraphs() {
let s = " line1\n\n line2".to_string();
let r = unindent(&s);
assert_eq!(r, "line1\n\nline2");
run_test(" line1\n\n line2", "line1\n\nline2");
}
#[test]
fn should_leave_multiple_indent_levels() {
// Line 2 is indented another level beyond the
// base indentation and should be preserved
let s = " line1\n\n line2".to_string();
let r = unindent(&s);
assert_eq!(r, "line1\n\n line2");
run_test(" line1\n\n line2", "line1\n\n line2");
}
#[test]
fn should_ignore_first_line_indent() {
// The first line of the first paragraph may not be indented as
// far due to the way the doc string was written:
//
// #[doc = "Start way over here
// and continue here"]
let s = "line1\n line2".to_string();
let r = unindent(&s);
assert_eq!(r, "line1\nline2");
run_test("line1\n line2", "line1\n line2");
}
#[test]
fn should_not_ignore_first_line_indent_in_a_single_line_para() {
let s = "line1\n\n line2".to_string();
let r = unindent(&s);
assert_eq!(r, "line1\n\n line2");
run_test("line1\n\n line2", "line1\n\n line2");
}
#[test]
fn should_unindent_tabs() {
let s = "\tline1\n\tline2".to_string();
let r = unindent(&s);
assert_eq!(r, "line1\nline2");
run_test("\tline1\n\tline2", "line1\nline2");
}
#[test]
fn should_trim_mixed_indentation() {
let s = "\t line1\n\t line2".to_string();
let r = unindent(&s);
assert_eq!(r, "line1\nline2");
let s = " \tline1\n \tline2".to_string();
let r = unindent(&s);
assert_eq!(r, "line1\nline2");
run_test("\t line1\n\t line2", "line1\nline2");
run_test(" \tline1\n \tline2", "line1\nline2");
}
#[test]
fn should_not_trim() {
let s = "\t line1 \n\t line2".to_string();
let r = unindent(&s);
assert_eq!(r, "line1 \nline2");
let s = " \tline1 \n \tline2".to_string();
let r = unindent(&s);
assert_eq!(r, "line1 \nline2");
run_test("\t line1 \n\t line2", "line1 \nline2");
run_test(" \tline1 \n \tline2", "line1 \nline2");
}

View file

@ -0,0 +1 @@
Just some text.

View file

@ -0,0 +1,64 @@
#![feature(external_doc)]
#![crate_name = "foo"]
// @has foo/struct.Example.html
// @matches - '//pre[@class="rust rust-example-rendered"]' \
// '(?m)let example = Example::new\(\)\n \.first\(\)\n \.second\(\)\n \.build\(\);\Z'
/// ```rust
/// let example = Example::new()
/// .first()
#[cfg_attr(not(feature = "one"), doc = " .second()")]
/// .build();
/// ```
pub struct Example;
// @has foo/struct.F.html
// @matches - '//pre[@class="rust rust-example-rendered"]' \
// '(?m)let example = Example::new\(\)\n \.first\(\)\n \.another\(\)\n \.build\(\);\Z'
///```rust
///let example = Example::new()
/// .first()
#[cfg_attr(not(feature = "one"), doc = " .another()")]
/// .build();
/// ```
pub struct F;
// @has foo/struct.G.html
// @matches - '//pre[@class="rust rust-example-rendered"]' \
// '(?m)let example = Example::new\(\)\n\.first\(\)\n \.another\(\)\n\.build\(\);\Z'
///```rust
///let example = Example::new()
///.first()
#[cfg_attr(not(feature = "one"), doc = " .another()")]
///.build();
///```
pub struct G;
// @has foo/struct.H.html
// @has - '//div[@class="docblock"]/p' 'no whitespace lol'
///no whitespace
#[doc = " lol"]
pub struct H;
// @has foo/struct.I.html
// @matches - '//pre[@class="rust rust-example-rendered"]' '(?m)4 whitespaces!\Z'
/// 4 whitespaces!
#[doc = "something"]
pub struct I;
// @has foo/struct.J.html
// @matches - '//div[@class="docblock"]/p' '(?m)a\nno whitespace\nJust some text.\Z'
///a
///no whitespace
#[doc(include = "unindent.md")]
pub struct J;
// @has foo/struct.K.html
// @matches - '//pre[@class="rust rust-example-rendered"]' '(?m)4 whitespaces!\Z'
///a
///
/// 4 whitespaces!
///
#[doc(include = "unindent.md")]
pub struct K;

View file

@ -0,0 +1,10 @@
// compile-flags: -D warnings --cap-lints allow
// check-pass
// Regression test for issue #78660
// Tests that we don't ICE when a future-incompat-report lint has
// has a command-line source, but is capped to allow
fn main() {
["hi"].into_iter();
}

View file

@ -0,0 +1,11 @@
Future incompatibility report: Future breakage date: None, diagnostic:
warning: this method call currently resolves to `<&[T; N] as IntoIterator>::into_iter` (due to autoref coercions), but that might change in the future when `IntoIterator` impls for arrays are added.
--> $DIR/issue-78660-cap-lints-future-compat.rs:9:12
|
LL | ["hi"].into_iter();
| ^^^^^^^^^ help: use `.iter()` instead of `.into_iter()` to avoid ambiguity: `iter`
|
= note: `-D array-into-iter` implied by `-D warnings`
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #66145 <https://github.com/rust-lang/rust/issues/66145>

View file

@ -0,0 +1,10 @@
macro_rules! empty {
() => { }
}
fn foo() -> bool { //~ ERROR mismatched
{ true } //~ ERROR mismatched
empty!();
}
fn main() {}

View file

@ -0,0 +1,17 @@
error[E0308]: mismatched types
--> $DIR/empty-trailing-stmt.rs:6:7
|
LL | { true }
| ^^^^ expected `()`, found `bool`
error[E0308]: mismatched types
--> $DIR/empty-trailing-stmt.rs:5:13
|
LL | fn foo() -> bool {
| --- ^^^^ expected `bool`, found `()`
| |
| implicitly returns `()` as its body has no tail or `return` expression
error: aborting due to 2 previous errors
For more information about this error, try `rustc --explain E0308`.

View file

@ -0,0 +1,9 @@
// Check that aux builds can also use rustc-env, but environment is configured
// separately from the main test case.
//
// rustc-env:COMPILETEST_BAR=bar
pub fn test() {
assert_eq!(option_env!("COMPILETEST_FOO"), None);
assert_eq!(env!("COMPILETEST_BAR"), "bar");
}

View file

@ -1,5 +1,5 @@
error[E0308]: mismatched types
--> $DIR/meta-expected-error-correct-rev.rs:7:18
--> $DIR/expected-error-correct-rev.rs:7:18
|
LL | let x: u32 = 22_usize;
| --- ^^^^^^^^ expected `u32`, found `usize`

View file

@ -1,6 +1,6 @@
// revisions: a
// Counterpart to `meta-expected-error-wrong-rev.rs`
// Counterpart to `expected-error-wrong-rev.rs`
#[cfg(a)]
fn foo() {

View file

@ -1,5 +1,5 @@
// Meta test for compiletest: check that when we give the right error
// patterns, the test passes. See all `meta-revision-bad.rs`.
// patterns, the test passes. See all `revision-bad.rs`.
// run-fail
// revisions: foo bar

View file

@ -0,0 +1,18 @@
// Compiletest meta test checking that rustc-env and unset-rustc-env directives
// can be used to configure environment for rustc.
//
// run-pass
// aux-build:env.rs
// rustc-env:COMPILETEST_FOO=foo
//
// An environment variable that is likely to be set, but should be safe to unset.
// unset-rustc-env:PWD
extern crate env;
fn main() {
assert_eq!(env!("COMPILETEST_FOO"), "foo");
assert_eq!(option_env!("COMPILETEST_BAR"), None);
assert_eq!(option_env!("PWD"), None);
env::test();
}

View file

@ -40,7 +40,7 @@ macro_rules! produce_it
}
}
fn main /* 0#0 */() { }
fn main /* 0#0 */() { ; }
/*
Expansions: