Refactor unit test runner (#2294)

Properly discovers the permissions needed for each test.
This commit is contained in:
Bartek Iwańczuk 2019-05-09 01:15:24 +02:00 committed by Ryan Dahl
parent ec9080f34c
commit ac8c6fec5b
7 changed files with 317 additions and 158 deletions

View file

@ -218,7 +218,7 @@ after_test:
# listed explicitly in cli\BUILD.gn. This is not an air-tight check. # listed explicitly in cli\BUILD.gn. This is not an air-tight check.
# TODO: make rollup or another bundler write a depfile. # TODO: make rollup or another bundler write a depfile.
- ps: |- - ps: |-
$ignore = "test_util.ts", "unit_tests.ts", "*_test.ts" $ignore = "test_util.ts", "unit_tests.ts", "unit_test_runner.ts", "*_test.ts"
Get-ChildItem "js" -File -Force -Name | Get-ChildItem "js" -File -Force -Name |
where { $name = $_; -not ($ignore | where { $name -like $_ }) } | where { $name = $_; -not ($ignore | where { $name -like $_ }) } |
where { -not (Select-String -Pattern $_ -Path cli\BUILD.gn ` where { -not (Select-String -Pattern $_ -Path cli\BUILD.gn `

View file

@ -17,10 +17,7 @@ export {
assertEquals assertEquals
} from "./deps/https/deno.land/std/testing/asserts.ts"; } from "./deps/https/deno.land/std/testing/asserts.ts";
// testing.setFilter must be run before any tests are defined. interface TestPermissions {
testing.setFilter(Deno.args[1]);
interface DenoPermissions {
read?: boolean; read?: boolean;
write?: boolean; write?: boolean;
net?: boolean; net?: boolean;
@ -29,7 +26,24 @@ interface DenoPermissions {
highPrecision?: boolean; highPrecision?: boolean;
} }
function permToString(perms: DenoPermissions): string { const processPerms = Deno.permissions();
function permissionsMatch(
processPerms: Deno.Permissions,
requiredPerms: Deno.Permissions
): boolean {
for (const permName in processPerms) {
if (processPerms[permName] !== requiredPerms[permName]) {
return false;
}
}
return true;
}
export const permissionCombinations: Map<string, Deno.Permissions> = new Map();
function permToString(perms: Deno.Permissions): string {
const r = perms.read ? 1 : 0; const r = perms.read ? 1 : 0;
const w = perms.write ? 1 : 0; const w = perms.write ? 1 : 0;
const n = perms.net ? 1 : 0; const n = perms.net ? 1 : 0;
@ -39,28 +53,37 @@ function permToString(perms: DenoPermissions): string {
return `permR${r}W${w}N${n}E${e}U${u}H${h}`; return `permR${r}W${w}N${n}E${e}U${u}H${h}`;
} }
function permFromString(s: string): DenoPermissions { function registerPermCombination(perms: Deno.Permissions): void {
const re = /^permR([01])W([01])N([01])E([01])U([01])H([01])$/; const key = permToString(perms);
const found = s.match(re); if (!permissionCombinations.has(key)) {
if (!found) { permissionCombinations.set(key, perms);
throw Error("Not a permission string");
} }
}
function normalizeTestPermissions(perms: TestPermissions): Deno.Permissions {
return { return {
read: Boolean(Number(found[1])), read: !!perms.read,
write: Boolean(Number(found[2])), write: !!perms.write,
net: Boolean(Number(found[3])), net: !!perms.net,
env: Boolean(Number(found[4])), run: !!perms.run,
run: Boolean(Number(found[5])), env: !!perms.env,
highPrecision: Boolean(Number(found[6])) highPrecision: !!perms.highPrecision
}; };
} }
export function testPerm( export function testPerm(
perms: DenoPermissions, perms: TestPermissions,
fn: testing.TestFunction fn: testing.TestFunction
): void { ): void {
const name = `${fn.name}_${permToString(perms)}`; const normalizedPerms = normalizeTestPermissions(perms);
testing.test({ fn, name });
registerPermCombination(normalizedPerms);
if (!permissionsMatch(processPerms, normalizedPerms)) {
return;
}
testing.test(fn);
} }
export function test(fn: testing.TestFunction): void { export function test(fn: testing.TestFunction): void {
@ -77,38 +100,160 @@ export function test(fn: testing.TestFunction): void {
); );
} }
test(function permSerialization(): void { function extractNumber(re: RegExp, str: string): number | undefined {
for (const write of [true, false]) { const match = str.match(re);
for (const net of [true, false]) {
for (const env of [true, false]) { if (match) {
for (const run of [true, false]) { return Number.parseInt(match[1]);
for (const read of [true, false]) { }
for (const highPrecision of [true, false]) { }
const perms: DenoPermissions = {
write, export function parseUnitTestOutput(
net, rawOutput: Uint8Array,
env, print: boolean
run, ): { actual?: number; expected?: number; resultOutput?: string } {
read, const decoder = new TextDecoder();
highPrecision const output = decoder.decode(rawOutput);
};
assertEquals(perms, permFromString(permToString(perms))); let expected, actual, result;
}
} for (const line of output.split("\n")) {
} if (!expected) {
} // expect "running 30 tests"
expected = extractNumber(/running (\d+) tests/, line);
} else if (line.indexOf("test result:") !== -1) {
result = line;
}
if (print) {
console.log(line);
} }
} }
// Check that the number of expected tests equals what was reported at the
// bottom.
if (result) {
// result should be a string like this:
// "test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; ..."
actual = extractNumber(/(\d+) passed/, result);
}
return { actual, expected, resultOutput: result };
}
test(function permissionsMatches(): void {
assert(
permissionsMatch(
{
read: true,
write: false,
net: false,
env: false,
run: false,
highPrecision: false
},
normalizeTestPermissions({ read: true })
)
);
assert(
permissionsMatch(
{
read: false,
write: false,
net: false,
env: false,
run: false,
highPrecision: false
},
normalizeTestPermissions({})
)
);
assertEquals(
permissionsMatch(
{
read: false,
write: true,
net: true,
env: true,
run: true,
highPrecision: true
},
normalizeTestPermissions({ read: true })
),
false
);
assertEquals(
permissionsMatch(
{
read: true,
write: false,
net: true,
env: false,
run: false,
highPrecision: false
},
normalizeTestPermissions({ read: true })
),
false
);
assert(
permissionsMatch(
{
read: true,
write: true,
net: true,
env: true,
run: true,
highPrecision: true
},
{
read: true,
write: true,
net: true,
env: true,
run: true,
highPrecision: true
}
)
);
}); });
// To better catch internal errors, permFromString should throw if it gets an testPerm({ read: true }, async function parsingUnitTestOutput(): Promise<void> {
// invalid permission string. const cwd = Deno.cwd();
test(function permFromStringThrows(): void { const testDataPath = `${cwd}/tools/testdata/`;
let threw = false;
try { let result;
permFromString("bad");
} catch (e) { // This is an example of a successful unit test output.
threw = true; result = parseUnitTestOutput(
} await Deno.readFile(`${testDataPath}/unit_test_output1.txt`),
assert(threw); false
);
assertEquals(result.actual, 96);
assertEquals(result.expected, 96);
// This is an example of a silently dying unit test.
result = parseUnitTestOutput(
await Deno.readFile(`${testDataPath}/unit_test_output2.txt`),
false
);
assertEquals(result.actual, undefined);
assertEquals(result.expected, 96);
// This is an example of compiling before successful unit tests.
result = parseUnitTestOutput(
await Deno.readFile(`${testDataPath}/unit_test_output3.txt`),
false
);
assertEquals(result.actual, 96);
assertEquals(result.expected, 96);
// Check what happens on empty output.
result = parseUnitTestOutput(new TextEncoder().encode("\n\n\n"), false);
assertEquals(result.actual, undefined);
assertEquals(result.expected, undefined);
}); });

101
js/unit_test_runner.ts Executable file
View file

@ -0,0 +1,101 @@
#!/usr/bin/env deno run --reload --allow-run
// Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
import "./unit_tests.ts";
import { permissionCombinations, parseUnitTestOutput } from "./test_util.ts";
function permsToCliFlags(perms: Deno.Permissions): string[] {
return Object.keys(perms)
.map(
(key): string => {
if (!perms[key]) return "";
const cliFlag = key.replace(
/\.?([A-Z])/g,
(x, y): string => `-${y.toLowerCase()}`
);
return `--allow-${cliFlag}`;
}
)
.filter((e): boolean => e.length > 0);
}
function fmtPerms(perms: Deno.Permissions): string {
let fmt = permsToCliFlags(perms).join(" ");
if (!fmt) {
fmt = "<no permissions>";
}
return fmt;
}
async function main(): Promise<void> {
console.log(
"Discovered permission combinations for tests:",
permissionCombinations.size
);
for (const perms of permissionCombinations.values()) {
console.log("\t" + fmtPerms(perms));
}
const testResults = new Set();
for (const perms of permissionCombinations.values()) {
const permsFmt = fmtPerms(perms);
console.log(`Running tests for: ${permsFmt}`);
const cliPerms = permsToCliFlags(perms);
// run subsequent tests using same deno executable
const args = [
Deno.execPath,
"run",
"--no-prompt",
...cliPerms,
"js/unit_tests.ts"
];
const p = Deno.run({
args,
stdout: "piped"
});
const { actual, expected, resultOutput } = parseUnitTestOutput(
await p.output(),
true
);
let result = 0;
if (!actual && !expected) {
console.error("Bad js/unit_test.ts output");
result = 1;
} else if (expected !== actual) {
result = 1;
}
testResults.add({
perms: permsFmt,
output: resultOutput,
result
});
}
// if any run tests returned non-zero status then whole test
// run should fail
let testsFailed = false;
for (const testResult of testResults.values()) {
console.log(`Summary for ${testResult.perms}`);
console.log(testResult.output + "\n");
testsFailed = testsFailed || testResult.result;
}
if (testsFailed) {
console.error("Unit tests failed");
Deno.exit(1);
}
console.log("Unit tests passed");
}
main();

View file

@ -51,4 +51,11 @@ import "./version_test.ts";
import "../website/app_test.js"; import "../website/app_test.js";
import "./deps/https/deno.land/std/testing/main.ts"; import { runIfMain } from "./deps/https/deno.land/std/testing/mod.ts";
async function main(): Promise<void> {
// Testing entire test suite serially
runIfMain(import.meta);
}
main();

View file

@ -1,67 +1,27 @@
#!/usr/bin/env python #!/usr/bin/env python
# Copyright 2018-2019 the Deno authors. All rights reserved. MIT license. # Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
import util
import sys import sys
import subprocess import subprocess
import re import http_server
def run_unit_test2(cmd):
process = subprocess.Popen(
cmd,
bufsize=1,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(actual, expected) = util.parse_unit_test_output(process.stdout, True)
process.wait()
errcode = process.returncode
if errcode != 0:
sys.exit(errcode)
# To avoid the case where we silently filter out all tests.
assert expected > 0
if actual == None and expected == None:
raise AssertionError("Bad js/unit_test.ts output")
if expected != actual:
print "expected", expected, "actual", actual
raise AssertionError("expected tests did not equal actual")
process.wait()
errcode = process.returncode
if errcode != 0:
sys.exit(errcode)
def run_unit_test(deno_exe, permStr, flags=None):
if flags is None:
flags = []
cmd = [deno_exe, "run"] + flags + ["js/unit_tests.ts", permStr]
run_unit_test2(cmd)
# We want to test many ops in deno which have different behavior depending on
# the permissions set. These tests can specify which permissions they expect,
# which appends a special string like "permW1N0" to the end of the test name.
# Here we run several copies of deno with different permissions, filtering the
# tests by the special string. permW1N0 means allow-write but not allow-net.
# See js/test_util.ts for more details.
def unit_tests(deno_exe): def unit_tests(deno_exe):
run_unit_test(deno_exe, "permR0W0N0E0U0H0", ["--reload"]) cmd = [
run_unit_test(deno_exe, "permR1W0N0E0U0H0", ["--allow-read"]) deno_exe, "run", "--reload", "--allow-run", "js/unit_test_runner.ts"
run_unit_test(deno_exe, "permR0W1N0E0U0H0", ["--allow-write"]) ]
run_unit_test(deno_exe, "permR0W0N1E0U0H0", ["--allow-net"]) process = subprocess.Popen(
run_unit_test(deno_exe, "permR1W1N0E0U0H0", cmd, bufsize=1, universal_newlines=True, stderr=subprocess.STDOUT)
["--allow-read", "--allow-write"])
run_unit_test(deno_exe, "permR0W0N0E1U0H0", ["--allow-env"]) process.wait()
run_unit_test(deno_exe, "permR0W0N0E0U0H1", ["--allow-high-precision"]) errcode = process.returncode
run_unit_test(deno_exe, "permR0W0N0E0U1H0", ["--allow-run"]) if errcode != 0:
run_unit_test(deno_exe, "permR0W1N0E0U1H0", sys.exit(errcode)
["--allow-run", "--allow-write"])
# TODO We might accidentally miss some. We should be smarter about which we
# run. Maybe we can use the "filtered out" number to check this.
if __name__ == '__main__': if __name__ == '__main__':
if len(sys.argv) < 2: if len(sys.argv) < 2:
print "Usage ./tools/unit_tests.py target/debug/deno" print "Usage ./tools/unit_tests.py target/debug/deno"
sys.exit(1) sys.exit(1)
http_server.spawn()
unit_tests(sys.argv[1]) unit_tests(sys.argv[1])

View file

@ -329,28 +329,6 @@ def enable_ansi_colors_win10():
return True return True
def parse_unit_test_output(output, print_to_stdout):
expected = None
actual = None
result = None
for line in iter(output.readline, ''):
if expected is None:
# expect "running 30 tests"
expected = extract_number(r'running (\d+) tests', line)
elif "test result:" in line:
result = line
if print_to_stdout:
sys.stdout.write(line)
sys.stdout.flush()
# Check that the number of expected tests equals what was reported at the
# bottom.
if result:
# result should be a string like this:
# "test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; ..."
actual = extract_number(r'(\d+) passed', result)
return (actual, expected)
def extract_number(pattern, string): def extract_number(pattern, string):
matches = re.findall(pattern, string) matches = re.findall(pattern, string)
if len(matches) != 1: if len(matches) != 1:

View file

@ -48,37 +48,6 @@ def shell_quote_win_test():
'a"b""c\\d\\"e\\\\') 'a"b""c\\d\\"e\\\\')
def parse_unit_test_output_test():
print "Testing util.parse_unit_test_output()..."
# This is an example of a successful unit test output.
output = open(
os.path.join(util.root_path, "tools/testdata/unit_test_output1.txt"))
(actual, expected) = util.parse_unit_test_output(output, False)
assert actual == 96
assert expected == 96
# This is an example of a silently dying unit test.
output = open(
os.path.join(util.root_path, "tools/testdata/unit_test_output2.txt"))
(actual, expected) = util.parse_unit_test_output(output, False)
assert actual == None
assert expected == 96
# This is an example of compiling before successful unit tests.
output = open(
os.path.join(util.root_path, "tools/testdata/unit_test_output3.txt"))
(actual, expected) = util.parse_unit_test_output(output, False)
assert actual == 96
assert expected == 96
# Check what happens on empty output.
from StringIO import StringIO
output = StringIO("\n\n\n")
(actual, expected) = util.parse_unit_test_output(output, False)
assert actual == None
assert expected == None
def parse_wrk_output_test(): def parse_wrk_output_test():
print "Testing util.parse_wrk_output_test()..." print "Testing util.parse_wrk_output_test()..."
f = open(os.path.join(util.root_path, "tools/testdata/wrk1.txt")) f = open(os.path.join(util.root_path, "tools/testdata/wrk1.txt"))
@ -101,7 +70,6 @@ def util_test():
pattern_match_test() pattern_match_test()
parse_exit_code_test() parse_exit_code_test()
shell_quote_win_test() shell_quote_win_test()
parse_unit_test_output_test()
parse_wrk_output_test() parse_wrk_output_test()