libtest: Print the total time taken to execute a test suite

This commit is contained in:
Jakob Schikowski 2020-11-26 21:15:15 +01:00
parent c922857066
commit 470c059e69
43 changed files with 176 additions and 64 deletions

View file

@ -3,6 +3,7 @@
use std::fs::File;
use std::io;
use std::io::prelude::Write;
use std::time::Instant;
use super::{
bench::fmt_bench_samples,
@ -14,7 +15,7 @@
options::{Options, OutputFormat},
run_tests,
test_result::TestResult,
time::TestExecTime,
time::{TestExecTime, TestSuiteExecTime},
types::{NamePadding, TestDesc, TestDescAndFn},
};
@ -49,6 +50,7 @@ pub struct ConsoleTestState {
pub allowed_fail: usize,
pub filtered_out: usize,
pub measured: usize,
pub exec_time: Option<TestSuiteExecTime>,
pub metrics: MetricMap,
pub failures: Vec<(TestDesc, Vec<u8>)>,
pub not_failures: Vec<(TestDesc, Vec<u8>)>,
@ -72,6 +74,7 @@ pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
allowed_fail: 0,
filtered_out: 0,
measured: 0,
exec_time: None,
metrics: MetricMap::new(),
failures: Vec::new(),
not_failures: Vec::new(),
@ -277,7 +280,14 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Resu
};
let mut st = ConsoleTestState::new(opts)?;
// Prevent the usage of `Instant` in some cases:
// - It's currently not supported for wasm targets.
// - We disable it for miri because it's not available when isolation is enabled.
let is_instant_supported = !cfg!(target_arch = "wasm32") && !cfg!(miri);
let start_time = is_instant_supported.then(Instant::now);
run_tests(opts, tests, |x| on_test_event(&x, &mut st, &mut *out))?;
st.exec_time = start_time.map(|t| TestSuiteExecTime(t.elapsed()));
assert!(st.current_test_count() == st.total);

View file

@ -47,7 +47,7 @@ fn write_event(
evt
))?;
if let Some(exec_time) = exec_time {
self.write_message(&*format!(r#", "exec_time": "{}""#, exec_time))?;
self.write_message(&*format!(r#", "exec_time": {}"#, exec_time.0.as_secs_f64()))?;
}
if let Some(stdout) = stdout {
self.write_message(&*format!(r#", "stdout": "{}""#, EscapedString(stdout)))?;
@ -162,7 +162,7 @@ fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
}
fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
self.writeln_message(&*format!(
self.write_message(&*format!(
"{{ \"type\": \"suite\", \
\"event\": \"{}\", \
\"passed\": {}, \
@ -170,16 +170,23 @@ fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
\"allowed_fail\": {}, \
\"ignored\": {}, \
\"measured\": {}, \
\"filtered_out\": {} }}",
\"filtered_out\": {}",
if state.failed == 0 { "ok" } else { "failed" },
state.passed,
state.failed + state.allowed_fail,
state.allowed_fail,
state.ignored,
state.measured,
state.filtered_out
state.filtered_out,
))?;
if let Some(ref exec_time) = state.exec_time {
let time_str = format!(", \"exec_time\": {}", exec_time.0.as_secs_f64());
self.write_message(&time_str)?;
}
self.writeln_message(" }")?;
Ok(state.failed == 0)
}
}

View file

@ -259,7 +259,7 @@ fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
let s = if state.allowed_fail > 0 {
format!(
". {} passed; {} failed ({} allowed); {} ignored; {} measured; {} filtered out\n\n",
". {} passed; {} failed ({} allowed); {} ignored; {} measured; {} filtered out",
state.passed,
state.failed + state.allowed_fail,
state.allowed_fail,
@ -269,13 +269,20 @@ fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
)
} else {
format!(
". {} passed; {} failed; {} ignored; {} measured; {} filtered out\n\n",
". {} passed; {} failed; {} ignored; {} measured; {} filtered out",
state.passed, state.failed, state.ignored, state.measured, state.filtered_out
)
};
self.write_plain(&s)?;
if let Some(ref exec_time) = state.exec_time {
let time_str = format!("; finished in {}", exec_time);
self.write_plain(&time_str)?;
}
self.write_plain("\n\n")?;
Ok(success)
}
}

View file

@ -236,7 +236,7 @@ fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
let s = if state.allowed_fail > 0 {
format!(
". {} passed; {} failed ({} allowed); {} ignored; {} measured; {} filtered out\n\n",
". {} passed; {} failed ({} allowed); {} ignored; {} measured; {} filtered out",
state.passed,
state.failed + state.allowed_fail,
state.allowed_fail,
@ -246,13 +246,20 @@ fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
)
} else {
format!(
". {} passed; {} failed; {} ignored; {} measured; {} filtered out\n\n",
". {} passed; {} failed; {} ignored; {} measured; {} filtered out",
state.passed, state.failed, state.ignored, state.measured, state.filtered_out
)
};
self.write_plain(&s)?;
if let Some(ref exec_time) = state.exec_time {
let time_str = format!("; finished in {}", exec_time);
self.write_plain(&time_str)?;
}
self.write_plain("\n\n")?;
Ok(success)
}
}

View file

@ -669,6 +669,7 @@ fn should_sort_failures_before_printing_them() {
allowed_fail: 0,
filtered_out: 0,
measured: 0,
exec_time: None,
metrics: MetricMap::new(),
failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
options: Options::new(),

View file

@ -1,8 +1,9 @@
//! Module `time` contains everything related to the time measurement of unit tests
//! execution.
//! Two main purposes of this module:
//! The purposes of this module:
//! - Check whether test is timed out.
//! - Provide helpers for `report-time` and `measure-time` options.
//! - Provide newtypes for executions times.
use std::env;
use std::fmt;
@ -60,7 +61,7 @@ pub fn get_default_test_timeout() -> Instant {
Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S)
}
/// The meassured execution time of a unit test.
/// The measured execution time of a unit test.
#[derive(Debug, Clone, PartialEq)]
pub struct TestExecTime(pub Duration);
@ -70,6 +71,16 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
}
}
/// The measured execution time of the whole test suite.
#[derive(Debug, Clone, Default, PartialEq)]
pub struct TestSuiteExecTime(pub Duration);
impl fmt::Display for TestSuiteExecTime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:.2}s", self.0.as_secs_f64())
}
}
/// Structure denoting time limits for test execution.
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
pub struct TimeThreshold {

View file

@ -13,6 +13,6 @@ all:
cat $(OUTPUT_FILE_DEFAULT) | "$(PYTHON)" validate_json.py
cat $(OUTPUT_FILE_STDOUT_SUCCESS) | "$(PYTHON)" validate_json.py
# Compare to output file
diff output-default.json $(OUTPUT_FILE_DEFAULT)
diff output-stdout-success.json $(OUTPUT_FILE_STDOUT_SUCCESS)
# Normalize the actual output and compare to expected output file
cat $(OUTPUT_FILE_DEFAULT) | sed -r 's/\"exec_time\": [0-9]+(\.[0-9]+)?/\"exec_time\": \$$TIME/' | diff output-default.json -
cat $(OUTPUT_FILE_STDOUT_SUCCESS) | sed -r 's/\"exec_time\": [0-9]+(\.[0-9]+)?/\"exec_time\": \$$TIME/' | diff output-stdout-success.json -

View file

@ -7,4 +7,4 @@
{ "type": "test", "name": "c", "event": "ok" }
{ "type": "test", "event": "started", "name": "d" }
{ "type": "test", "name": "d", "event": "ignored" }
{ "type": "suite", "event": "failed", "passed": 2, "failed": 1, "allowed_fail": 0, "ignored": 1, "measured": 0, "filtered_out": 0 }
{ "type": "suite", "event": "failed", "passed": 2, "failed": 1, "allowed_fail": 0, "ignored": 1, "measured": 0, "filtered_out": 0, "exec_time": $TIME }

View file

@ -7,4 +7,4 @@
{ "type": "test", "name": "c", "event": "ok", "stdout": "thread 'main' panicked at 'assertion failed: false', f.rs:15:5\n" }
{ "type": "test", "event": "started", "name": "d" }
{ "type": "test", "name": "d", "event": "ignored" }
{ "type": "suite", "event": "failed", "passed": 2, "failed": 1, "allowed_fail": 0, "ignored": 1, "measured": 0, "filtered_out": 0 }
{ "type": "suite", "event": "failed", "passed": 2, "failed": 1, "allowed_fail": 0, "ignored": 1, "measured": 0, "filtered_out": 0, "exec_time": $TIME }

View file

@ -1,6 +1,7 @@
// check-pass
// compile-flags:--test --test-args --test-threads=1
// normalize-stdout-test: "src/test/rustdoc-ui" -> "$$DIR"
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// Crates like core have doctests gated on `cfg(not(test))` so we need to make
// sure `cfg(test)` is not active when running `rustdoc --test`.

View file

@ -1,7 +1,7 @@
running 2 tests
test $DIR/cfg-test.rs - Bar (line 26) ... ok
test $DIR/cfg-test.rs - Foo (line 18) ... ok
test $DIR/cfg-test.rs - Bar (line 27) ... ok
test $DIR/cfg-test.rs - Foo (line 19) ... ok
test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -1,6 +1,7 @@
// check-pass
// compile-flags:--test
// normalize-stdout-test: "src/test/rustdoc-ui" -> "$$DIR"
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// Make sure `cfg(doctest)` is set when finding doctests but not inside
// the doctests.

View file

@ -1,6 +1,6 @@
running 1 test
test $DIR/doc-test-doctest-feature.rs - Foo (line 8) ... ok
test $DIR/doc-test-doctest-feature.rs - Foo (line 9) ... ok
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -1,6 +1,7 @@
// check-pass
// compile-flags:--test
// normalize-stdout-test: "src/test/rustdoc-ui" -> "$$DIR"
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
#![feature(doc_cfg)]

View file

@ -1,6 +1,6 @@
running 1 test
test $DIR/doc-test-rustdoc-feature.rs - Foo (line 9) ... ok
test $DIR/doc-test-rustdoc-feature.rs - Foo (line 10) ... ok
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -2,6 +2,7 @@
// aux-build:extern_macros.rs
// compile-flags:--test --test-args=--test-threads=1
// normalize-stdout-test: "src/test/rustdoc-ui" -> "$$DIR"
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// check-pass
//! ```

View file

@ -1,8 +1,8 @@
running 3 tests
test $DIR/doctest-output.rs - (line 7) ... ok
test $DIR/doctest-output.rs - ExpandedStruct (line 23) ... ok
test $DIR/doctest-output.rs - foo::bar (line 17) ... ok
test $DIR/doctest-output.rs - (line 8) ... ok
test $DIR/doctest-output.rs - ExpandedStruct (line 24) ... ok
test $DIR/doctest-output.rs - foo::bar (line 18) ... ok
test result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
test result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -3,6 +3,7 @@
// compile-flags:--test
// normalize-stdout-test: "src/test/rustdoc-ui" -> "$$DIR"
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// failure-status: 101
/// ```compile_fail

View file

@ -1,14 +1,14 @@
running 1 test
test $DIR/failed-doctest-compile-fail.rs - Foo (line 8) ... FAILED
test $DIR/failed-doctest-compile-fail.rs - Foo (line 9) ... FAILED
failures:
---- $DIR/failed-doctest-compile-fail.rs - Foo (line 8) stdout ----
---- $DIR/failed-doctest-compile-fail.rs - Foo (line 9) stdout ----
Test compiled successfully, but it's marked `compile_fail`.
failures:
$DIR/failed-doctest-compile-fail.rs - Foo (line 8)
$DIR/failed-doctest-compile-fail.rs - Foo (line 9)
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -3,6 +3,7 @@
// compile-flags:--test
// normalize-stdout-test: "src/test/rustdoc-ui" -> "$$DIR"
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// failure-status: 101
/// ```compile_fail,E0004

View file

@ -1,12 +1,12 @@
running 1 test
test $DIR/failed-doctest-missing-codes.rs - Foo (line 8) ... FAILED
test $DIR/failed-doctest-missing-codes.rs - Foo (line 9) ... FAILED
failures:
---- $DIR/failed-doctest-missing-codes.rs - Foo (line 8) stdout ----
---- $DIR/failed-doctest-missing-codes.rs - Foo (line 9) stdout ----
error[E0308]: mismatched types
--> $DIR/failed-doctest-missing-codes.rs:9:13
--> $DIR/failed-doctest-missing-codes.rs:10:13
|
LL | let x: () = 5i32;
| -- ^^^^ expected `()`, found `i32`
@ -19,7 +19,7 @@ For more information about this error, try `rustc --explain E0308`.
Some expected error codes were not found: ["E0004"]
failures:
$DIR/failed-doctest-missing-codes.rs - Foo (line 8)
$DIR/failed-doctest-missing-codes.rs - Foo (line 9)
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -5,6 +5,7 @@
// compile-flags:--test --test-args --test-threads=1
// rustc-env:RUST_BACKTRACE=0
// normalize-stdout-test: "src/test/rustdoc-ui" -> "$$DIR"
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// failure-status: 101
// doctest fails at runtime

View file

@ -1,13 +1,13 @@
running 2 tests
test $DIR/failed-doctest-output.rs - OtherStruct (line 21) ... FAILED
test $DIR/failed-doctest-output.rs - SomeStruct (line 11) ... FAILED
test $DIR/failed-doctest-output.rs - OtherStruct (line 22) ... FAILED
test $DIR/failed-doctest-output.rs - SomeStruct (line 12) ... FAILED
failures:
---- $DIR/failed-doctest-output.rs - OtherStruct (line 21) stdout ----
---- $DIR/failed-doctest-output.rs - OtherStruct (line 22) stdout ----
error[E0425]: cannot find value `no` in this scope
--> $DIR/failed-doctest-output.rs:22:1
--> $DIR/failed-doctest-output.rs:23:1
|
LL | no
| ^^ not found in this scope
@ -16,7 +16,7 @@ error: aborting due to previous error
For more information about this error, try `rustc --explain E0425`.
Couldn't compile the test.
---- $DIR/failed-doctest-output.rs - SomeStruct (line 11) stdout ----
---- $DIR/failed-doctest-output.rs - SomeStruct (line 12) stdout ----
Test executable failed (exit code 101).
stdout:
@ -32,8 +32,8 @@ note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
failures:
$DIR/failed-doctest-output.rs - OtherStruct (line 21)
$DIR/failed-doctest-output.rs - SomeStruct (line 11)
$DIR/failed-doctest-output.rs - OtherStruct (line 22)
$DIR/failed-doctest-output.rs - SomeStruct (line 12)
test result: FAILED. 0 passed; 2 failed; 0 ignored; 0 measured; 0 filtered out
test result: FAILED. 0 passed; 2 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -3,6 +3,7 @@
// compile-flags:--test
// normalize-stdout-test: "src/test/rustdoc-ui" -> "$$DIR"
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// failure-status: 101
/// ```should_panic

View file

@ -1,14 +1,14 @@
running 1 test
test $DIR/failed-doctest-should-panic.rs - Foo (line 8) ... FAILED
test $DIR/failed-doctest-should-panic.rs - Foo (line 9) ... FAILED
failures:
---- $DIR/failed-doctest-should-panic.rs - Foo (line 8) stdout ----
---- $DIR/failed-doctest-should-panic.rs - Foo (line 9) stdout ----
Test executable succeeded, but it's marked `should_panic`.
failures:
$DIR/failed-doctest-should-panic.rs - Foo (line 8)
$DIR/failed-doctest-should-panic.rs - Foo (line 9)
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -1,5 +1,6 @@
// compile-flags:--test
// normalize-stdout-test: "src/test/rustdoc-ui" -> "$$DIR"
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// check-pass
#![no_std]

View file

@ -1,6 +1,6 @@
running 1 test
test $DIR/test-no_std.rs - f (line 9) ... ok
test $DIR/test-no_std.rs - f (line 10) ... ok
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -1,5 +1,6 @@
// compile-flags: --test
// normalize-stdout-test: "src/test/rustdoc-ui" -> "$$DIR"
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// failure-status: 101
// rustc-env: RUST_BACKTRACE=0

View file

@ -1,12 +1,12 @@
running 1 test
test $DIR/unparseable-doc-test.rs - foo (line 6) ... FAILED
test $DIR/unparseable-doc-test.rs - foo (line 7) ... FAILED
failures:
---- $DIR/unparseable-doc-test.rs - foo (line 6) stdout ----
---- $DIR/unparseable-doc-test.rs - foo (line 7) stdout ----
error[E0765]: unterminated double quote string
--> $DIR/unparseable-doc-test.rs:8:1
--> $DIR/unparseable-doc-test.rs:9:1
|
LL | "unterminated
| ^^^^^^^^^^^^^
@ -17,7 +17,7 @@ For more information about this error, try `rustc --explain E0765`.
Couldn't compile the test.
failures:
$DIR/unparseable-doc-test.rs - foo (line 6)
$DIR/unparseable-doc-test.rs - foo (line 7)
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -4,6 +4,7 @@
// run-fail
// check-run-results
// exec-env:RUST_BACKTRACE=0
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// ignore-wasm no panic or subprocess support
// ignore-emscripten no panic or subprocess support

View file

@ -1,9 +1,9 @@
thread 'main' panicked at 'assertion failed: `(left == right)`
left: `2`,
right: `4`', $DIR/test-panic-abort-nocapture.rs:32:5
right: `4`', $DIR/test-panic-abort-nocapture.rs:33:5
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
thread 'main' panicked at 'assertion failed: `(left == right)`
left: `2`,
right: `4`', $DIR/test-panic-abort-nocapture.rs:26:5
right: `4`', $DIR/test-panic-abort-nocapture.rs:27:5
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
testing321

View file

@ -19,5 +19,5 @@ failures:
failures:
it_fails
test result: FAILED. 3 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out
test result: FAILED. 3 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -4,6 +4,7 @@
// run-fail
// check-run-results
// exec-env:RUST_BACKTRACE=0
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// ignore-wasm no panic or subprocess support
// ignore-emscripten no panic or subprocess support

View file

@ -18,7 +18,7 @@ testing123
testing321
thread 'main' panicked at 'assertion failed: `(left == right)`
left: `2`,
right: `5`', $DIR/test-panic-abort.rs:33:5
right: `5`', $DIR/test-panic-abort.rs:34:5
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
@ -26,5 +26,5 @@ failures:
it_exits
it_fails
test result: FAILED. 3 passed; 2 failed; 0 ignored; 0 measured; 0 filtered out
test result: FAILED. 3 passed; 2 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -0,0 +1,20 @@
// no-prefer-dynamic
// compile-flags: --test
// run-flags: --test-threads=1
// run-pass
// check-run-results
// only-wasm32
// Tests the output of the test harness with only passed tests.
#![cfg(test)]
#[test]
fn it_works() {
assert_eq!(1 + 1, 2);
}
#[test]
fn it_works_too() {
assert_eq!(1 * 0, 0);
}

View file

@ -0,0 +1,7 @@
running 2 tests
test it_works ... ok
test it_works_too ... ok
test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out

View file

@ -0,0 +1,21 @@
// no-prefer-dynamic
// compile-flags: --test
// run-flags: --test-threads=1
// run-pass
// check-run-results
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// ignore-wasm32 no support for `Instant`
// Tests the output of the test harness with only passed tests.
#![cfg(test)]
#[test]
fn it_works() {
assert_eq!(1 + 1, 2);
}
#[test]
fn it_works_too() {
assert_eq!(1 * 0, 0);
}

View file

@ -0,0 +1,7 @@
running 2 tests
test it_works ... ok
test it_works_too ... ok
test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -3,6 +3,7 @@
// run-flags: --test-threads=1
// check-run-results
// exec-env:RUST_BACKTRACE=0
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// ignore-emscripten no threads support
#[test]

View file

@ -10,12 +10,12 @@ fee
fie
foe
fum
thread 'main' panicked at 'explicit panic', $DIR/test-thread-capture.rs:30:5
thread 'main' panicked at 'explicit panic', $DIR/test-thread-capture.rs:31:5
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
failures:
thready_fail
test result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out
test result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -3,6 +3,7 @@
// run-flags: --test-threads=1 --nocapture
// check-run-results
// exec-env:RUST_BACKTRACE=0
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// ignore-emscripten no threads support
#[test]

View file

@ -1,2 +1,2 @@
thread 'main' panicked at 'explicit panic', $DIR/test-thread-nocapture.rs:30:5
thread 'main' panicked at 'explicit panic', $DIR/test-thread-nocapture.rs:31:5
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace

View file

@ -16,5 +16,5 @@ failures:
failures:
thready_fail
test result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out
test result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME