chore(tests/specs): ability to have sub tests in file (#23667)

Allows writing named sub-tests. These are:

1. Filterable on the command line via `cargo test ...`
2. Run in parallel
3. Use a fresh temp and deno dir for each test (unlike steps)
This commit is contained in:
David Sherret 2024-05-03 00:49:42 -04:00 committed by GitHub
parent b7945a218e
commit 3e98ea4e69
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 383 additions and 202 deletions

4
Cargo.lock generated
View file

@ -2634,9 +2634,9 @@ checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f"
[[package]]
name = "file_test_runner"
version = "0.4.1"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f33b00489de0a5fd03df89aefe9fa55da5da3c1a207ea19cd381d1de7e6204b"
checksum = "cc644d2903f00e5f0e5d34dca805c7a100b09a1d257e07697101d90eb10d3351"
dependencies = [
"anyhow",
"crossbeam-channel",

View file

@ -43,7 +43,7 @@ deno_lockfile.workspace = true
deno_terminal.workspace = true
deno_tls.workspace = true
fastwebsockets = { workspace = true, features = ["upgrade", "unstable-split"] }
file_test_runner = "0.4.1"
file_test_runner = "0.5.0"
flaky_test = "=0.1.0"
http.workspace = true
http-body-util.workspace = true

View file

@ -28,7 +28,7 @@ cargo test test_name
## `__test__.json` file
This file describes the test to execute and the steps to execute. A basic
This file describes the test(s) to execute and the steps to execute. A basic
example looks like:
```json
@ -57,6 +57,23 @@ Or another example that runs multiple steps:
}
```
Or if you want to run several tests at the same time:
```json
{
"tests": {
"ignore_dir": {
"args": "run script.ts",
"output": "script.out"
},
"some_other_test": {
"args": "run other.ts",
"output": "other.out"
}
}
}
```
### Top level properties
- `base` - The base config to use for the test. Options:
@ -71,13 +88,12 @@ Or another example that runs multiple steps:
### Step properties
When writing a single step, these may be at the top level rather than nested in
a "steps" array.
a "steps" array or "tests" object.
- `args` - A string (that will be spilt on whitespace into an args array) or an
array of arguments.
- `output` - Path to use to assert the output.
- `cleanDenoDir` (boolean) - Whether to empty the deno_dir before running the
step.
- `output` - Path to use to assert the output or text (must end with an .out
extension) _or_ text to pattern match against the output.
- `flaky` - Step should be repeated until success a maximum of 3 times.
- `if` (`"windows"`, `"linux"`, `"mac"`, `"unix"`) - Whether to run this step.
- `exitCode` (number) - Expected exit code.

View file

@ -1,19 +1,21 @@
{
"steps": [{
"args": "bench --ignore=collect/ignore collect",
"output": "collect.out"
}, {
"cleanDenoDir": true,
"args": "bench --config collect/deno.jsonc collect",
"output": "collect.out"
}, {
"cleanDenoDir": true,
"args": "bench --config collect/deno2.jsonc collect",
"output": "collect2.out"
}, {
"cleanDenoDir": true,
"args": "bench --config collect/deno.malformed.jsonc",
"exitCode": 1,
"output": "collect_with_malformed_config.out"
}]
"tests": {
"ignore": {
"args": "bench --ignore=collect/ignore collect",
"output": "collect.out"
},
"config_sub_dir": {
"args": "bench --config collect/deno.jsonc collect",
"output": "collect.out"
},
"config_sub_dir_with_exclude": {
"args": "bench --config collect/deno2.jsonc collect",
"output": "collect2.out"
},
"config_malformed": {
"args": "bench --config collect/deno.malformed.jsonc",
"exitCode": 1,
"output": "collect_with_malformed_config.out"
}
}
}

View file

@ -1,28 +1,34 @@
{
"steps": [{
"args": "run --quiet --reload --import-map=import_map.json test.ts",
"output": "run.out"
}, {
"args": "run --quiet --reload --import-map=import_map_invalid.json --config=config.json test.ts",
"output": "flag_has_precedence.out",
"exitCode": 1
}, {
"args": "run --reload --config=config.json test.ts",
"output": "config.out"
}, {
"cleanDenoDir": true,
"args": "cache --quiet --reload --import-map=import_map.json test.ts",
"output": "cache.out"
}, {
"cleanDenoDir": true,
"args": "info --quiet --import-map=import_map.json test.ts",
"output": "info.out"
}, {
"args": "run --quiet --reload --import-map=import_map.json unmapped_bare_specifier.ts",
"output": "unmapped_bare_specifier.out",
"exitCode": 1
}, {
"args": "run --quiet --reload --import-map import_map.json import_data_url.ts",
"output": "import_data_url.out"
}]
"tests": {
"run_import_map": {
"args": "run --quiet --reload --import-map=import_map.json test.ts",
"output": "run.out"
},
"un_invalid_import_map": {
"args": "run --quiet --reload --import-map=import_map_invalid.json --config=config.json test.ts",
"output": "flag_has_precedence.out",
"exitCode": 1
},
"run_config": {
"args": "run --reload --config=config.json test.ts",
"output": "config.out"
},
"cache": {
"args": "cache --quiet --reload --import-map=import_map.json test.ts",
"output": "cache.out"
},
"info": {
"args": "info --quiet --import-map=import_map.json test.ts",
"output": "info.out"
},
"unmapped_bare_specifier": {
"args": "run --quiet --reload --import-map=import_map.json unmapped_bare_specifier.ts",
"output": "unmapped_bare_specifier.out",
"exitCode": 1
},
"data_url": {
"args": "run --quiet --reload --import-map import_map.json import_data_url.ts",
"output": "import_data_url.out"
}
}
}

View file

@ -1,10 +1,12 @@
{
"steps": [{
"args": "run main.ts",
"output": "main.out"
}, {
"cleanDenoDir": true,
"args": "info main.ts",
"output": "main_info.out"
}]
"tests": {
"run": {
"args": "run main.ts",
"output": "main.out"
},
"info": {
"args": "info main.ts",
"output": "main_info.out"
}
}
}

View file

@ -1,12 +1,14 @@
{
"steps": [{
"args": "run -A analyzable.ts",
"output": "analyzable.out",
"exitCode": 1
}, {
"cleanDenoDir": true,
"args": "run -A nonanalyzable.ts",
"output": "nonanalyzable.out",
"exitCode": 1
}]
"tests": {
"analyzable": {
"args": "run -A analyzable.ts",
"output": "analyzable.out",
"exitCode": 1
},
"non_analyzable": {
"args": "run -A nonanalyzable.ts",
"output": "nonanalyzable.out",
"exitCode": 1
}
}
}

View file

@ -1,10 +1,12 @@
{
"steps": [{
"args": "run --log-level=debug main.ts",
"output": "main.out"
}, {
"cleanDenoDir": true,
"args": "info main.ts",
"output": "main_info.out"
}]
"tests": {
"run": {
"args": "run --log-level=debug main.ts",
"output": "main.out"
},
"info": {
"args": "info main.ts",
"output": "main_info.out"
}
}
}

View file

@ -1,10 +1,12 @@
{
"steps": [{
"args": "run --log-level=debug main.ts",
"output": "main.out"
}, {
"cleanDenoDir": true,
"args": "info main.ts",
"output": "main_info.out"
}]
"tests": {
"run": {
"args": "run --log-level=debug main.ts",
"output": "main.out"
},
"info": {
"args": "info main.ts",
"output": "main_info.out"
}
}
}

View file

@ -1,13 +1,16 @@
{
"steps": [{
"args": "run main.ts",
"output": "main.out"
}, {
"cleanDenoDir": true,
"args": "info main.ts",
"output": "main_info.out"
}, {
"args": "run --quiet multiple.ts",
"output": "multiple.out"
}]
"tests": {
"single": {
"args": "run main.ts",
"output": "main.out"
},
"single_info": {
"args": "info main.ts",
"output": "main_info.out"
},
"multiple": {
"args": "run --quiet multiple.ts",
"output": "multiple.out"
}
}
}

View file

@ -1,6 +1,7 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::panic::AssertUnwindSafe;
@ -10,9 +11,14 @@ use std::sync::Arc;
use deno_core::anyhow::Context;
use deno_core::serde_json;
use file_test_runner::collection::collect_tests_or_exit;
use file_test_runner::collection::strategies::FileTestMapperStrategy;
use file_test_runner::collection::strategies::TestPerDirectoryCollectionStrategy;
use file_test_runner::collection::CollectOptions;
use file_test_runner::collection::CollectTestsError;
use file_test_runner::collection::CollectedCategoryOrTest;
use file_test_runner::collection::CollectedTest;
use file_test_runner::collection::CollectedTestCategory;
use file_test_runner::TestResult;
use serde::Deserialize;
use test_util::tests_path;
use test_util::PathRef;
@ -27,6 +33,8 @@ enum VecOrString {
String(String),
}
type JsonMap = serde_json::Map<String, serde_json::Value>;
#[derive(Clone, Deserialize)]
#[serde(deny_unknown_fields, rename_all = "camelCase")]
struct MultiTestMetaData {
@ -40,6 +48,68 @@ struct MultiTestMetaData {
pub base: Option<String>,
#[serde(default)]
pub envs: HashMap<String, String>,
#[serde(default)]
pub tests: BTreeMap<String, JsonMap>,
}
impl MultiTestMetaData {
pub fn into_collected_tests(
mut self,
parent_test: &CollectedTest,
) -> Vec<CollectedTest<serde_json::Value>> {
fn merge_json_value(
multi_test_meta_data: &MultiTestMetaData,
value: &mut JsonMap,
) {
if let Some(base) = &multi_test_meta_data.base {
if !value.contains_key("base") {
value.insert("base".to_string(), base.clone().into());
}
}
if multi_test_meta_data.temp_dir && !value.contains_key("tempDir") {
value.insert("tempDir".to_string(), true.into());
}
if !multi_test_meta_data.envs.is_empty() {
if !value.contains_key("envs") {
value.insert("envs".to_string(), JsonMap::default().into());
}
let envs_obj = value.get_mut("envs").unwrap().as_object_mut().unwrap();
for (key, value) in &multi_test_meta_data.envs {
if !envs_obj.contains_key(key) {
envs_obj.insert(key.into(), value.clone().into());
}
}
}
}
let mut collected_tests = Vec::with_capacity(self.tests.len());
for (name, mut json_data) in std::mem::take(&mut self.tests) {
merge_json_value(&self, &mut json_data);
collected_tests.push(CollectedTest {
name: format!("{}::{}", parent_test.name, name),
path: parent_test.path.clone(),
data: serde_json::Value::Object(json_data),
});
}
collected_tests
}
}
#[derive(Clone, Deserialize)]
#[serde(deny_unknown_fields, rename_all = "camelCase")]
struct MultiStepMetaData {
/// Whether to copy all the non-assertion files in the current
/// test directory to a temporary directory before running the
/// steps.
#[serde(default)]
pub temp_dir: bool,
/// The base environment to use for the test.
#[serde(default)]
pub base: Option<String>,
#[serde(default)]
pub envs: HashMap<String, String>,
#[serde(default)]
pub steps: Vec<StepMetaData>,
}
@ -55,8 +125,8 @@ struct SingleTestMetaData {
}
impl SingleTestMetaData {
pub fn into_multi(self) -> MultiTestMetaData {
MultiTestMetaData {
pub fn into_multi(self) -> MultiStepMetaData {
MultiStepMetaData {
base: self.base,
temp_dir: self.temp_dir,
envs: Default::default(),
@ -68,9 +138,6 @@ impl SingleTestMetaData {
#[derive(Clone, Deserialize)]
#[serde(deny_unknown_fields, rename_all = "camelCase")]
struct StepMetaData {
/// Whether to clean the deno_dir before running the step.
#[serde(default)]
pub clean_deno_dir: bool,
/// If the test should be retried multiple times on failure.
#[serde(default)]
pub flaky: bool,
@ -87,13 +154,17 @@ struct StepMetaData {
}
pub fn main() {
let root_category = collect_tests_or_exit(CollectOptions {
base: tests_path().join("specs").to_path_buf(),
strategy: Box::new(TestPerDirectoryCollectionStrategy {
file_name: MANIFEST_FILE_NAME.to_string(),
}),
filter_override: None,
});
let root_category =
collect_tests_or_exit::<serde_json::Value>(CollectOptions {
base: tests_path().join("specs").to_path_buf(),
strategy: Box::new(FileTestMapperStrategy {
base_strategy: TestPerDirectoryCollectionStrategy {
file_name: MANIFEST_FILE_NAME.to_string(),
},
map: map_test_within_file,
}),
filter_override: None,
});
if root_category.is_empty() {
return; // all tests filtered out
@ -103,49 +174,106 @@ pub fn main() {
file_test_runner::run_tests(
&root_category,
file_test_runner::RunOptions { parallel: true },
Arc::new(|test| {
let diagnostic_logger = Rc::new(RefCell::new(Vec::<u8>::new()));
let result = file_test_runner::TestResult::from_maybe_panic(
AssertUnwindSafe(|| run_test(test, diagnostic_logger.clone())),
);
match result {
file_test_runner::TestResult::Passed
| file_test_runner::TestResult::Ignored => result,
file_test_runner::TestResult::Failed {
output: panic_output,
} => {
let mut output = diagnostic_logger.borrow().clone();
output.push(b'\n');
output.extend(panic_output);
file_test_runner::TestResult::Failed { output }
}
file_test_runner::TestResult::Steps(_) => unreachable!(),
}
}),
Arc::new(run_test),
);
}
fn run_test(test: &CollectedTest, diagnostic_logger: Rc<RefCell<Vec<u8>>>) {
let metadata_path = PathRef::new(&test.path);
let metadata_value = metadata_path.read_jsonc_value();
/// Maps a __test__.jsonc file to a category of tests if it contains a "test" object.
fn map_test_within_file(
test: CollectedTest,
) -> Result<CollectedCategoryOrTest<serde_json::Value>, CollectTestsError> {
let test_path = PathRef::new(&test.path);
let metadata_value = test_path.read_jsonc_value();
if metadata_value
.as_object()
.map(|o| o.contains_key("tests"))
.unwrap_or(false)
{
let data: MultiTestMetaData = serde_json::from_value(metadata_value)
.with_context(|| format!("Failed deserializing {}", test_path))
.map_err(CollectTestsError::Other)?;
Ok(CollectedCategoryOrTest::Category(CollectedTestCategory {
children: data
.into_collected_tests(&test)
.into_iter()
.map(CollectedCategoryOrTest::Test)
.collect(),
name: test.name,
path: test.path,
}))
} else {
Ok(CollectedCategoryOrTest::Test(CollectedTest {
name: test.name,
path: test.path,
data: metadata_value,
}))
}
}
fn run_test(test: &CollectedTest<serde_json::Value>) -> TestResult {
let cwd = PathRef::new(&test.path).parent();
let metadata_value = test.data.clone();
let diagnostic_logger = Rc::new(RefCell::new(Vec::<u8>::new()));
let result = TestResult::from_maybe_panic(AssertUnwindSafe(|| {
run_test_inner(metadata_value, &cwd, diagnostic_logger.clone())
}));
match result {
TestResult::Failed {
output: panic_output,
} => {
let mut output = diagnostic_logger.borrow().clone();
output.push(b'\n');
output.extend(panic_output);
TestResult::Failed { output }
}
TestResult::Passed | TestResult::Ignored | TestResult::SubTests(_) => {
result
}
}
}
fn run_test_inner(
metadata_value: serde_json::Value,
cwd: &PathRef,
diagnostic_logger: Rc<RefCell<Vec<u8>>>,
) {
let metadata = deserialize_value(metadata_value);
let context = test_context_from_metadata(&metadata, cwd, diagnostic_logger);
for step in metadata.steps.iter().filter(|s| should_run_step(s)) {
let run_func = || run_step(step, &metadata, cwd, &context);
if step.flaky {
run_flaky(run_func);
} else {
run_func();
}
}
}
fn deserialize_value(metadata_value: serde_json::Value) -> MultiStepMetaData {
// checking for "steps" leads to a more targeted error message
// instead of when deserializing an untagged enum
let metadata = if metadata_value
if metadata_value
.as_object()
.and_then(|o| o.get("steps"))
.is_some()
.map(|o| o.contains_key("steps"))
.unwrap_or(false)
{
serde_json::from_value::<MultiTestMetaData>(metadata_value)
serde_json::from_value::<MultiStepMetaData>(metadata_value)
} else {
serde_json::from_value::<SingleTestMetaData>(metadata_value)
.map(|s| s.into_multi())
}
.with_context(|| format!("Failed to parse {}", metadata_path))
.unwrap();
.context("Failed to parse test spec")
.unwrap()
}
fn test_context_from_metadata(
metadata: &MultiStepMetaData,
cwd: &PathRef,
diagnostic_logger: Rc<RefCell<Vec<u8>>>,
) -> test_util::TestContext {
let mut builder = TestContextBuilder::new();
builder = builder.logging_capture(diagnostic_logger);
let cwd = PathRef::new(test.path.parent().unwrap());
if metadata.temp_dir {
builder = builder.use_temp_cwd();
@ -171,18 +299,10 @@ fn run_test(test: &CollectedTest, diagnostic_logger: Rc<RefCell<Vec<u8>>>) {
// copy all the files in the cwd to a temp directory
// excluding the metadata and assertion files
let temp_dir = context.temp_dir().path();
let assertion_paths = resolve_test_and_assertion_files(&cwd, &metadata);
let assertion_paths = resolve_test_and_assertion_files(cwd, metadata);
cwd.copy_to_recursive_with_exclusions(temp_dir, &assertion_paths);
}
for step in metadata.steps.iter().filter(|s| should_run_step(s)) {
let run_func = || run_step(step, &metadata, &cwd, &context);
if step.flaky {
run_flaky(run_func);
} else {
run_func();
}
}
context
}
fn should_run_step(step: &StepMetaData) -> bool {
@ -213,14 +333,10 @@ fn run_flaky(action: impl Fn()) {
fn run_step(
step: &StepMetaData,
metadata: &MultiTestMetaData,
metadata: &MultiStepMetaData,
cwd: &PathRef,
context: &test_util::TestContext,
) {
if step.clean_deno_dir {
context.deno_dir().path().remove_dir_all();
}
let command = context
.new_command()
.envs(metadata.envs.iter().chain(step.envs.iter()));
@ -248,7 +364,7 @@ fn run_step(
fn resolve_test_and_assertion_files(
dir: &PathRef,
metadata: &MultiTestMetaData,
metadata: &MultiStepMetaData,
) -> HashSet<PathRef> {
let mut result = HashSet::with_capacity(metadata.steps.len() + 1);
result.insert(dir.join(MANIFEST_FILE_NAME));

View file

@ -1,21 +1,24 @@
{
"steps": [{
"args": "run --allow-read --allow-env main.js",
"output": "main.out"
}, {
"cleanDenoDir": true,
"args": "test --allow-read --allow-env test.js",
"output": "test.out"
}, {
"cleanDenoDir": true,
"args": [
"eval",
"import chalk from 'npm:chalk@5'; console.log(chalk.green('chalk esm loads'));"
],
"output": "main.out"
}, {
"args": "bundle --quiet main.js",
"output": "bundle.out",
"exitCode": 1
}]
"tests": {
"run": {
"args": "run --allow-read --allow-env main.js",
"output": "main.out"
},
"test": {
"args": "test --allow-read --allow-env test.js",
"output": "test.out"
},
"eval": {
"args": [
"eval",
"import chalk from 'npm:chalk@5'; console.log(chalk.green('chalk esm loads'));"
],
"output": "main.out"
},
"bundle": {
"args": "bundle --quiet main.js",
"output": "bundle.out",
"exitCode": 1
}
}
}

View file

@ -21,9 +21,6 @@
}
}]
},
"cleanDenoDir": {
"type": "boolean"
},
"cwd": {
"type": "string"
},
@ -55,32 +52,53 @@
"type": "integer"
}
}
}
},
"oneOf": [{
"required": ["steps"],
"properties": {
"tempDir": {
"type": "boolean"
},
"base": {
"type": "string"
},
"envs": {
"type": "object",
"additionalProperties": {
"type": "string"
},
"single_or_multi_step_test": {
"oneOf": [{
"required": ["steps"],
"properties": {
"tempDir": {
"type": "boolean"
},
"base": {
"type": "string"
},
"envs": {
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"steps": {
"type": "array",
"items": {
"$ref": "#/definitions/single_test"
}
}
}
},
"steps": {
"type": "array",
"items": {
}, {
"allOf": [{
"properties": {
"tempDir": {
"type": "boolean"
},
"base": {
"type": "string"
},
"envs": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}, {
"$ref": "#/definitions/single_test"
}
}
}
}, {
"allOf": [{
}]
}]
},
"multi_test": {
"required": ["tests"],
"properties": {
"tempDir": {
"type": "boolean"
@ -93,10 +111,19 @@
"additionalProperties": {
"type": "string"
}
},
"tests": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/single_or_multi_step_test"
}
}
}
}, {
"$ref": "#/definitions/single_test"
}]
}
},
"oneOf": [{
"$ref": "#/definitions/single_or_multi_step_test"
}, {
"$ref": "#/definitions/multi_test"
}]
}