Refactor unit test runner (#2294)

Properly discovers the permissions needed for each test.
This commit is contained in:
Bartek Iwańczuk 2019-05-09 01:15:24 +02:00 committed by Ryan Dahl
parent ec9080f34c
commit ac8c6fec5b
7 changed files with 317 additions and 158 deletions

View file

@ -218,7 +218,7 @@ after_test:
# listed explicitly in cli\BUILD.gn. This is not an air-tight check.
# TODO: make rollup or another bundler write a depfile.
- ps: |-
$ignore = "test_util.ts", "unit_tests.ts", "*_test.ts"
$ignore = "test_util.ts", "unit_tests.ts", "unit_test_runner.ts", "*_test.ts"
Get-ChildItem "js" -File -Force -Name |
where { $name = $_; -not ($ignore | where { $name -like $_ }) } |
where { -not (Select-String -Pattern $_ -Path cli\BUILD.gn `

View file

@ -17,10 +17,7 @@ export {
assertEquals
} from "./deps/https/deno.land/std/testing/asserts.ts";
// testing.setFilter must be run before any tests are defined.
testing.setFilter(Deno.args[1]);
interface DenoPermissions {
interface TestPermissions {
read?: boolean;
write?: boolean;
net?: boolean;
@ -29,7 +26,24 @@ interface DenoPermissions {
highPrecision?: boolean;
}
function permToString(perms: DenoPermissions): string {
const processPerms = Deno.permissions();
function permissionsMatch(
processPerms: Deno.Permissions,
requiredPerms: Deno.Permissions
): boolean {
for (const permName in processPerms) {
if (processPerms[permName] !== requiredPerms[permName]) {
return false;
}
}
return true;
}
export const permissionCombinations: Map<string, Deno.Permissions> = new Map();
function permToString(perms: Deno.Permissions): string {
const r = perms.read ? 1 : 0;
const w = perms.write ? 1 : 0;
const n = perms.net ? 1 : 0;
@ -39,28 +53,37 @@ function permToString(perms: DenoPermissions): string {
return `permR${r}W${w}N${n}E${e}U${u}H${h}`;
}
function permFromString(s: string): DenoPermissions {
const re = /^permR([01])W([01])N([01])E([01])U([01])H([01])$/;
const found = s.match(re);
if (!found) {
throw Error("Not a permission string");
function registerPermCombination(perms: Deno.Permissions): void {
const key = permToString(perms);
if (!permissionCombinations.has(key)) {
permissionCombinations.set(key, perms);
}
}
function normalizeTestPermissions(perms: TestPermissions): Deno.Permissions {
return {
read: Boolean(Number(found[1])),
write: Boolean(Number(found[2])),
net: Boolean(Number(found[3])),
env: Boolean(Number(found[4])),
run: Boolean(Number(found[5])),
highPrecision: Boolean(Number(found[6]))
read: !!perms.read,
write: !!perms.write,
net: !!perms.net,
run: !!perms.run,
env: !!perms.env,
highPrecision: !!perms.highPrecision
};
}
export function testPerm(
perms: DenoPermissions,
perms: TestPermissions,
fn: testing.TestFunction
): void {
const name = `${fn.name}_${permToString(perms)}`;
testing.test({ fn, name });
const normalizedPerms = normalizeTestPermissions(perms);
registerPermCombination(normalizedPerms);
if (!permissionsMatch(processPerms, normalizedPerms)) {
return;
}
testing.test(fn);
}
export function test(fn: testing.TestFunction): void {
@ -77,38 +100,160 @@ export function test(fn: testing.TestFunction): void {
);
}
test(function permSerialization(): void {
for (const write of [true, false]) {
for (const net of [true, false]) {
for (const env of [true, false]) {
for (const run of [true, false]) {
for (const read of [true, false]) {
for (const highPrecision of [true, false]) {
const perms: DenoPermissions = {
write,
net,
env,
run,
read,
highPrecision
};
assertEquals(perms, permFromString(permToString(perms)));
}
}
}
}
function extractNumber(re: RegExp, str: string): number | undefined {
const match = str.match(re);
if (match) {
return Number.parseInt(match[1]);
}
}
export function parseUnitTestOutput(
rawOutput: Uint8Array,
print: boolean
): { actual?: number; expected?: number; resultOutput?: string } {
const decoder = new TextDecoder();
const output = decoder.decode(rawOutput);
let expected, actual, result;
for (const line of output.split("\n")) {
if (!expected) {
// expect "running 30 tests"
expected = extractNumber(/running (\d+) tests/, line);
} else if (line.indexOf("test result:") !== -1) {
result = line;
}
if (print) {
console.log(line);
}
}
// Check that the number of expected tests equals what was reported at the
// bottom.
if (result) {
// result should be a string like this:
// "test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; ..."
actual = extractNumber(/(\d+) passed/, result);
}
return { actual, expected, resultOutput: result };
}
test(function permissionsMatches(): void {
assert(
permissionsMatch(
{
read: true,
write: false,
net: false,
env: false,
run: false,
highPrecision: false
},
normalizeTestPermissions({ read: true })
)
);
assert(
permissionsMatch(
{
read: false,
write: false,
net: false,
env: false,
run: false,
highPrecision: false
},
normalizeTestPermissions({})
)
);
assertEquals(
permissionsMatch(
{
read: false,
write: true,
net: true,
env: true,
run: true,
highPrecision: true
},
normalizeTestPermissions({ read: true })
),
false
);
assertEquals(
permissionsMatch(
{
read: true,
write: false,
net: true,
env: false,
run: false,
highPrecision: false
},
normalizeTestPermissions({ read: true })
),
false
);
assert(
permissionsMatch(
{
read: true,
write: true,
net: true,
env: true,
run: true,
highPrecision: true
},
{
read: true,
write: true,
net: true,
env: true,
run: true,
highPrecision: true
}
)
);
});
// To better catch internal errors, permFromString should throw if it gets an
// invalid permission string.
test(function permFromStringThrows(): void {
let threw = false;
try {
permFromString("bad");
} catch (e) {
threw = true;
}
assert(threw);
testPerm({ read: true }, async function parsingUnitTestOutput(): Promise<void> {
const cwd = Deno.cwd();
const testDataPath = `${cwd}/tools/testdata/`;
let result;
// This is an example of a successful unit test output.
result = parseUnitTestOutput(
await Deno.readFile(`${testDataPath}/unit_test_output1.txt`),
false
);
assertEquals(result.actual, 96);
assertEquals(result.expected, 96);
// This is an example of a silently dying unit test.
result = parseUnitTestOutput(
await Deno.readFile(`${testDataPath}/unit_test_output2.txt`),
false
);
assertEquals(result.actual, undefined);
assertEquals(result.expected, 96);
// This is an example of compiling before successful unit tests.
result = parseUnitTestOutput(
await Deno.readFile(`${testDataPath}/unit_test_output3.txt`),
false
);
assertEquals(result.actual, 96);
assertEquals(result.expected, 96);
// Check what happens on empty output.
result = parseUnitTestOutput(new TextEncoder().encode("\n\n\n"), false);
assertEquals(result.actual, undefined);
assertEquals(result.expected, undefined);
});

101
js/unit_test_runner.ts Executable file
View file

@ -0,0 +1,101 @@
#!/usr/bin/env deno run --reload --allow-run
// Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
import "./unit_tests.ts";
import { permissionCombinations, parseUnitTestOutput } from "./test_util.ts";
function permsToCliFlags(perms: Deno.Permissions): string[] {
return Object.keys(perms)
.map(
(key): string => {
if (!perms[key]) return "";
const cliFlag = key.replace(
/\.?([A-Z])/g,
(x, y): string => `-${y.toLowerCase()}`
);
return `--allow-${cliFlag}`;
}
)
.filter((e): boolean => e.length > 0);
}
function fmtPerms(perms: Deno.Permissions): string {
let fmt = permsToCliFlags(perms).join(" ");
if (!fmt) {
fmt = "<no permissions>";
}
return fmt;
}
async function main(): Promise<void> {
console.log(
"Discovered permission combinations for tests:",
permissionCombinations.size
);
for (const perms of permissionCombinations.values()) {
console.log("\t" + fmtPerms(perms));
}
const testResults = new Set();
for (const perms of permissionCombinations.values()) {
const permsFmt = fmtPerms(perms);
console.log(`Running tests for: ${permsFmt}`);
const cliPerms = permsToCliFlags(perms);
// run subsequent tests using same deno executable
const args = [
Deno.execPath,
"run",
"--no-prompt",
...cliPerms,
"js/unit_tests.ts"
];
const p = Deno.run({
args,
stdout: "piped"
});
const { actual, expected, resultOutput } = parseUnitTestOutput(
await p.output(),
true
);
let result = 0;
if (!actual && !expected) {
console.error("Bad js/unit_test.ts output");
result = 1;
} else if (expected !== actual) {
result = 1;
}
testResults.add({
perms: permsFmt,
output: resultOutput,
result
});
}
// if any run tests returned non-zero status then whole test
// run should fail
let testsFailed = false;
for (const testResult of testResults.values()) {
console.log(`Summary for ${testResult.perms}`);
console.log(testResult.output + "\n");
testsFailed = testsFailed || testResult.result;
}
if (testsFailed) {
console.error("Unit tests failed");
Deno.exit(1);
}
console.log("Unit tests passed");
}
main();

View file

@ -51,4 +51,11 @@ import "./version_test.ts";
import "../website/app_test.js";
import "./deps/https/deno.land/std/testing/main.ts";
import { runIfMain } from "./deps/https/deno.land/std/testing/mod.ts";
async function main(): Promise<void> {
// Testing entire test suite serially
runIfMain(import.meta);
}
main();

View file

@ -1,67 +1,27 @@
#!/usr/bin/env python
# Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
import util
import sys
import subprocess
import re
import http_server
def run_unit_test2(cmd):
process = subprocess.Popen(
cmd,
bufsize=1,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(actual, expected) = util.parse_unit_test_output(process.stdout, True)
process.wait()
errcode = process.returncode
if errcode != 0:
sys.exit(errcode)
# To avoid the case where we silently filter out all tests.
assert expected > 0
if actual == None and expected == None:
raise AssertionError("Bad js/unit_test.ts output")
if expected != actual:
print "expected", expected, "actual", actual
raise AssertionError("expected tests did not equal actual")
process.wait()
errcode = process.returncode
if errcode != 0:
sys.exit(errcode)
def run_unit_test(deno_exe, permStr, flags=None):
if flags is None:
flags = []
cmd = [deno_exe, "run"] + flags + ["js/unit_tests.ts", permStr]
run_unit_test2(cmd)
# We want to test many ops in deno which have different behavior depending on
# the permissions set. These tests can specify which permissions they expect,
# which appends a special string like "permW1N0" to the end of the test name.
# Here we run several copies of deno with different permissions, filtering the
# tests by the special string. permW1N0 means allow-write but not allow-net.
# See js/test_util.ts for more details.
def unit_tests(deno_exe):
run_unit_test(deno_exe, "permR0W0N0E0U0H0", ["--reload"])
run_unit_test(deno_exe, "permR1W0N0E0U0H0", ["--allow-read"])
run_unit_test(deno_exe, "permR0W1N0E0U0H0", ["--allow-write"])
run_unit_test(deno_exe, "permR0W0N1E0U0H0", ["--allow-net"])
run_unit_test(deno_exe, "permR1W1N0E0U0H0",
["--allow-read", "--allow-write"])
run_unit_test(deno_exe, "permR0W0N0E1U0H0", ["--allow-env"])
run_unit_test(deno_exe, "permR0W0N0E0U0H1", ["--allow-high-precision"])
run_unit_test(deno_exe, "permR0W0N0E0U1H0", ["--allow-run"])
run_unit_test(deno_exe, "permR0W1N0E0U1H0",
["--allow-run", "--allow-write"])
# TODO We might accidentally miss some. We should be smarter about which we
# run. Maybe we can use the "filtered out" number to check this.
cmd = [
deno_exe, "run", "--reload", "--allow-run", "js/unit_test_runner.ts"
]
process = subprocess.Popen(
cmd, bufsize=1, universal_newlines=True, stderr=subprocess.STDOUT)
process.wait()
errcode = process.returncode
if errcode != 0:
sys.exit(errcode)
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage ./tools/unit_tests.py target/debug/deno"
sys.exit(1)
http_server.spawn()
unit_tests(sys.argv[1])

View file

@ -329,28 +329,6 @@ def enable_ansi_colors_win10():
return True
def parse_unit_test_output(output, print_to_stdout):
expected = None
actual = None
result = None
for line in iter(output.readline, ''):
if expected is None:
# expect "running 30 tests"
expected = extract_number(r'running (\d+) tests', line)
elif "test result:" in line:
result = line
if print_to_stdout:
sys.stdout.write(line)
sys.stdout.flush()
# Check that the number of expected tests equals what was reported at the
# bottom.
if result:
# result should be a string like this:
# "test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; ..."
actual = extract_number(r'(\d+) passed', result)
return (actual, expected)
def extract_number(pattern, string):
matches = re.findall(pattern, string)
if len(matches) != 1:

View file

@ -48,37 +48,6 @@ def shell_quote_win_test():
'a"b""c\\d\\"e\\\\')
def parse_unit_test_output_test():
print "Testing util.parse_unit_test_output()..."
# This is an example of a successful unit test output.
output = open(
os.path.join(util.root_path, "tools/testdata/unit_test_output1.txt"))
(actual, expected) = util.parse_unit_test_output(output, False)
assert actual == 96
assert expected == 96
# This is an example of a silently dying unit test.
output = open(
os.path.join(util.root_path, "tools/testdata/unit_test_output2.txt"))
(actual, expected) = util.parse_unit_test_output(output, False)
assert actual == None
assert expected == 96
# This is an example of compiling before successful unit tests.
output = open(
os.path.join(util.root_path, "tools/testdata/unit_test_output3.txt"))
(actual, expected) = util.parse_unit_test_output(output, False)
assert actual == 96
assert expected == 96
# Check what happens on empty output.
from StringIO import StringIO
output = StringIO("\n\n\n")
(actual, expected) = util.parse_unit_test_output(output, False)
assert actual == None
assert expected == None
def parse_wrk_output_test():
print "Testing util.parse_wrk_output_test()..."
f = open(os.path.join(util.root_path, "tools/testdata/wrk1.txt"))
@ -101,7 +70,6 @@ def util_test():
pattern_match_test()
parse_exit_code_test()
shell_quote_win_test()
parse_unit_test_output_test()
parse_wrk_output_test()