bpo-44708: Only re-run test methods that match names of previously failing test methods (GH-27287)

* Move to a static argparse.Namespace subclass
* Roughly annotate runtest.py
* Refactor libregrtest to use lossless test result objects
* Only re-run test methods that match names of previously failing test methods
* Adopt tests to cover test method name matching

Co-authored-by: Pablo Galindo Salgado <Pablogsal@gmail.com>
This commit is contained in:
Łukasz Langa 2021-07-22 20:25:58 +02:00 committed by GitHub
parent 50ffbe3daf
commit f1afef5e0d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 341 additions and 174 deletions

View file

@ -140,6 +140,39 @@
# default (see bpo-30822).
RESOURCE_NAMES = ALL_RESOURCES + ('extralargefile', 'tzdata')
class Namespace(argparse.Namespace):
def __init__(self, **kwargs) -> None:
self.testdir = None
self.verbose = 0
self.quiet = False
self.exclude = False
self.single = False
self.randomize = False
self.fromfile = None
self.findleaks = 1
self.fail_env_changed = False
self.use_resources = None
self.trace = False
self.coverdir = 'coverage'
self.runleaks = False
self.huntrleaks = False
self.verbose2 = False
self.verbose3 = False
self.print_slow = False
self.random_seed = None
self.use_mp = None
self.forever = False
self.header = False
self.failfast = False
self.match_tests = None
self.ignore_tests = None
self.pgo = False
self.pgo_extended = False
super().__init__(**kwargs)
class _ArgParser(argparse.ArgumentParser):
def error(self, message):
@ -320,13 +353,7 @@ def resources_list(string):
def _parse_args(args, **kwargs):
# Defaults
ns = argparse.Namespace(testdir=None, verbose=0, quiet=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=1, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None, use_mp=None, verbose3=False, forever=False,
header=False, failfast=False, match_tests=None, ignore_tests=None,
pgo=False)
ns = Namespace()
for k, v in kwargs.items():
if not hasattr(ns, k):
raise TypeError('%r is an invalid keyword argument '

View file

@ -11,10 +11,10 @@
import unittest
from test.libregrtest.cmdline import _parse_args
from test.libregrtest.runtest import (
findtests, runtest, get_abs_module,
STDTESTS, NOTTESTS, PASSED, FAILED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED,
INTERRUPTED, CHILD_ERROR, TEST_DID_NOT_RUN, TIMEOUT,
PROGRESS_MIN_TIME, format_test_result, is_failed)
findtests, runtest, get_abs_module, is_failed,
STDTESTS, NOTTESTS, PROGRESS_MIN_TIME,
Passed, Failed, EnvChanged, Skipped, ResourceDenied, Interrupted,
ChildError, DidNotRun)
from test.libregrtest.setup import setup_tests
from test.libregrtest.pgo import setup_pgo_tests
from test.libregrtest.utils import removepy, count, format_duration, printlist
@ -99,34 +99,32 @@ def get_executed(self):
| set(self.run_no_tests))
def accumulate_result(self, result, rerun=False):
test_name = result.test_name
ok = result.result
test_name = result.name
if ok not in (CHILD_ERROR, INTERRUPTED) and not rerun:
self.test_times.append((result.test_time, test_name))
if not isinstance(result, (ChildError, Interrupted)) and not rerun:
self.test_times.append((result.duration_sec, test_name))
if ok == PASSED:
if isinstance(result, Passed):
self.good.append(test_name)
elif ok in (FAILED, CHILD_ERROR):
if not rerun:
self.bad.append(test_name)
elif ok == ENV_CHANGED:
self.environment_changed.append(test_name)
elif ok == SKIPPED:
self.skipped.append(test_name)
elif ok == RESOURCE_DENIED:
elif isinstance(result, ResourceDenied):
self.skipped.append(test_name)
self.resource_denieds.append(test_name)
elif ok == TEST_DID_NOT_RUN:
elif isinstance(result, Skipped):
self.skipped.append(test_name)
elif isinstance(result, EnvChanged):
self.environment_changed.append(test_name)
elif isinstance(result, Failed):
if not rerun:
self.bad.append(test_name)
self.rerun.append(result)
elif isinstance(result, DidNotRun):
self.run_no_tests.append(test_name)
elif ok == INTERRUPTED:
elif isinstance(result, Interrupted):
self.interrupted = True
elif ok == TIMEOUT:
self.bad.append(test_name)
else:
raise ValueError("invalid test result: %r" % ok)
raise ValueError("invalid test result: %r" % result)
if rerun and ok not in {FAILED, CHILD_ERROR, INTERRUPTED}:
if rerun and not isinstance(result, (Failed, Interrupted)):
self.bad.remove(test_name)
xml_data = result.xml_data
@ -314,15 +312,31 @@ def rerun_failed_tests(self):
self.log()
self.log("Re-running failed tests in verbose mode")
self.rerun = self.bad[:]
for test_name in self.rerun:
self.log(f"Re-running {test_name} in verbose mode")
rerun_list = self.rerun[:]
self.rerun = []
for result in rerun_list:
test_name = result.name
errors = result.errors or []
failures = result.failures or []
error_names = [test_full_name.split(" ")[0] for (test_full_name, *_) in errors]
failure_names = [test_full_name.split(" ")[0] for (test_full_name, *_) in failures]
self.ns.verbose = True
orig_match_tests = self.ns.match_tests
if errors or failures:
if self.ns.match_tests is None:
self.ns.match_tests = []
self.ns.match_tests.extend(error_names)
self.ns.match_tests.extend(failure_names)
matching = "matching: " + ", ".join(self.ns.match_tests)
self.log(f"Re-running {test_name} in verbose mode ({matching})")
else:
self.log(f"Re-running {test_name} in verbose mode")
result = runtest(self.ns, test_name)
self.ns.match_tests = orig_match_tests
self.accumulate_result(result, rerun=True)
if result.result == INTERRUPTED:
if isinstance(result, Interrupted):
break
if self.bad:
@ -383,7 +397,7 @@ def display_result(self):
if self.rerun:
print()
print("%s:" % count(len(self.rerun), "re-run test"))
printlist(self.rerun)
printlist(r.name for r in self.rerun)
if self.run_no_tests:
print()
@ -423,14 +437,14 @@ def run_tests_sequential(self):
result = runtest(self.ns, test_name)
self.accumulate_result(result)
if result.result == INTERRUPTED:
if isinstance(result, Interrupted):
break
previous_test = format_test_result(result)
previous_test = str(result)
test_time = time.monotonic() - start_time
if test_time >= PROGRESS_MIN_TIME:
previous_test = "%s in %s" % (previous_test, format_duration(test_time))
elif result.result == PASSED:
elif isinstance(result, Passed):
# be quiet: say nothing if the test passed shortly
previous_test = None

View file

@ -1,4 +1,3 @@
import collections
import faulthandler
import functools
import gc
@ -12,33 +11,109 @@
from test import support
from test.support import os_helper
from test.libregrtest.utils import clear_caches
from test.libregrtest.cmdline import Namespace
from test.libregrtest.save_env import saved_test_environment
from test.libregrtest.utils import format_duration, print_warning
from test.libregrtest.utils import clear_caches, format_duration, print_warning
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
CHILD_ERROR = -5 # error in a child process
TEST_DID_NOT_RUN = -6
TIMEOUT = -7
class TestResult:
def __init__(
self,
name: str,
duration_sec: float = 0.0,
xml_data: list[str] | None = None,
) -> None:
self.name = name
self.duration_sec = duration_sec
self.xml_data = xml_data
def __str__(self) -> str:
return f"{self.name} finished"
class Passed(TestResult):
def __str__(self) -> str:
return f"{self.name} passed"
class Failed(TestResult):
def __init__(
self,
name: str,
duration_sec: float = 0.0,
xml_data: list[str] | None = None,
errors: list[tuple[str, str]] | None = None,
failures: list[tuple[str, str]] | None = None,
) -> None:
super().__init__(name, duration_sec=duration_sec, xml_data=xml_data)
self.errors = errors
self.failures = failures
def __str__(self) -> str:
if self.errors and self.failures:
le = len(self.errors)
lf = len(self.failures)
error_s = "error" + ("s" if le > 1 else "")
failure_s = "failure" + ("s" if lf > 1 else "")
return f"{self.name} failed ({le} {error_s}, {lf} {failure_s})"
if self.errors:
le = len(self.errors)
error_s = "error" + ("s" if le > 1 else "")
return f"{self.name} failed ({le} {error_s})"
if self.failures:
lf = len(self.failures)
failure_s = "failure" + ("s" if lf > 1 else "")
return f"{self.name} failed ({lf} {failure_s})"
return f"{self.name} failed"
class UncaughtException(Failed):
def __str__(self) -> str:
return f"{self.name} failed (uncaught exception)"
class EnvChanged(Failed):
def __str__(self) -> str:
return f"{self.name} failed (env changed)"
class RefLeak(Failed):
def __str__(self) -> str:
return f"{self.name} failed (reference leak)"
class Skipped(TestResult):
def __str__(self) -> str:
return f"{self.name} skipped"
class ResourceDenied(Skipped):
def __str__(self) -> str:
return f"{self.name} skipped (resource denied)"
class Interrupted(TestResult):
def __str__(self) -> str:
return f"{self.name} interrupted"
class ChildError(Failed):
def __str__(self) -> str:
return f"{self.name} crashed"
class DidNotRun(TestResult):
def __str__(self) -> str:
return f"{self.name} ran no tests"
class Timeout(Failed):
def __str__(self) -> str:
return f"{self.name} timed out ({format_duration(self.duration_sec)})"
_FORMAT_TEST_RESULT = {
PASSED: '%s passed',
FAILED: '%s failed',
ENV_CHANGED: '%s failed (env changed)',
SKIPPED: '%s skipped',
RESOURCE_DENIED: '%s skipped (resource denied)',
INTERRUPTED: '%s interrupted',
CHILD_ERROR: '%s crashed',
TEST_DID_NOT_RUN: '%s run no tests',
TIMEOUT: '%s timed out',
}
# Minimum duration of a test to display its duration or to mention that
# the test is running in background
@ -67,21 +142,10 @@
FOUND_GARBAGE = []
def is_failed(result, ns):
ok = result.result
if ok in (PASSED, RESOURCE_DENIED, SKIPPED, TEST_DID_NOT_RUN):
return False
if ok == ENV_CHANGED:
def is_failed(result: TestResult, ns: Namespace) -> bool:
if isinstance(result, EnvChanged):
return ns.fail_env_changed
return True
def format_test_result(result):
fmt = _FORMAT_TEST_RESULT.get(result.result, "%s")
text = fmt % result.test_name
if result.result == TIMEOUT:
text = '%s (%s)' % (text, format_duration(result.test_time))
return text
return isinstance(result, Failed)
def findtestdir(path=None):
@ -101,7 +165,7 @@ def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
return stdtests + sorted(tests)
def get_abs_module(ns, test_name):
def get_abs_module(ns: Namespace, test_name: str) -> str:
if test_name.startswith('test.') or ns.testdir:
return test_name
else:
@ -109,10 +173,7 @@ def get_abs_module(ns, test_name):
return 'test.' + test_name
TestResult = collections.namedtuple('TestResult',
'test_name result test_time xml_data')
def _runtest(ns, test_name):
def _runtest(ns: Namespace, test_name: str) -> TestResult:
# Handle faulthandler timeout, capture stdout+stderr, XML serialization
# and measure time.
@ -140,7 +201,7 @@ def _runtest(ns, test_name):
sys.stderr = stream
result = _runtest_inner(ns, test_name,
display_failure=False)
if result != PASSED:
if not isinstance(result, Passed):
output = stream.getvalue()
orig_stderr.write(output)
orig_stderr.flush()
@ -156,36 +217,26 @@ def _runtest(ns, test_name):
if xml_list:
import xml.etree.ElementTree as ET
xml_data = [ET.tostring(x).decode('us-ascii') for x in xml_list]
else:
xml_data = None
result.xml_data = [
ET.tostring(x).decode('us-ascii')
for x in xml_list
]
test_time = time.perf_counter() - start_time
return TestResult(test_name, result, test_time, xml_data)
result.duration_sec = time.perf_counter() - start_time
return result
finally:
if use_timeout:
faulthandler.cancel_dump_traceback_later()
support.junit_xml_list = None
def runtest(ns, test_name):
def runtest(ns: Namespace, test_name: str) -> TestResult:
"""Run a single test.
ns -- regrtest namespace of options
test_name -- the name of the test
Returns the tuple (result, test_time, xml_data), where result is one
of the constants:
INTERRUPTED KeyboardInterrupt
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
EMPTY_TEST_SUITE test ran no subtests.
TIMEOUT test timed out.
Returns a TestResult sub-class depending on the kind of result received.
If ns.xmlpath is not None, xml_data is a list containing each
generated testsuite element.
@ -197,7 +248,7 @@ def runtest(ns, test_name):
msg = traceback.format_exc()
print(f"test {test_name} crashed -- {msg}",
file=sys.stderr, flush=True)
return TestResult(test_name, FAILED, 0.0, None)
return Failed(test_name)
def _test_module(the_module):
@ -210,11 +261,11 @@ def _test_module(the_module):
support.run_unittest(tests)
def save_env(ns, test_name):
def save_env(ns: Namespace, test_name: str):
return saved_test_environment(test_name, ns.verbose, ns.quiet, pgo=ns.pgo)
def _runtest_inner2(ns, test_name):
def _runtest_inner2(ns: Namespace, test_name: str) -> bool:
# Load the test function, run the test function, handle huntrleaks
# and findleaks to detect leaks
@ -265,7 +316,9 @@ def _runtest_inner2(ns, test_name):
return refleak
def _runtest_inner(ns, test_name, display_failure=True):
def _runtest_inner(
ns: Namespace, test_name: str, display_failure: bool = True
) -> TestResult:
# Detect environment changes, handle exceptions.
# Reset the environment_altered flag to detect if a test altered
@ -283,37 +336,43 @@ def _runtest_inner(ns, test_name, display_failure=True):
except support.ResourceDenied as msg:
if not ns.quiet and not ns.pgo:
print(f"{test_name} skipped -- {msg}", flush=True)
return RESOURCE_DENIED
return ResourceDenied(test_name)
except unittest.SkipTest as msg:
if not ns.quiet and not ns.pgo:
print(f"{test_name} skipped -- {msg}", flush=True)
return SKIPPED
return Skipped(test_name)
except support.TestFailedWithDetails as exc:
msg = f"test {test_name} failed"
if display_failure:
msg = f"{msg} -- {exc}"
print(msg, file=sys.stderr, flush=True)
return Failed(test_name, errors=exc.errors, failures=exc.failures)
except support.TestFailed as exc:
msg = f"test {test_name} failed"
if display_failure:
msg = f"{msg} -- {exc}"
print(msg, file=sys.stderr, flush=True)
return FAILED
return Failed(test_name)
except support.TestDidNotRun:
return TEST_DID_NOT_RUN
return DidNotRun(test_name)
except KeyboardInterrupt:
print()
return INTERRUPTED
return Interrupted(test_name)
except:
if not ns.pgo:
msg = traceback.format_exc()
print(f"test {test_name} crashed -- {msg}",
file=sys.stderr, flush=True)
return FAILED
return UncaughtException(test_name)
if refleak:
return FAILED
return RefLeak(test_name)
if support.environment_altered:
return ENV_CHANGED
return PASSED
return EnvChanged(test_name)
return Passed(test_name)
def cleanup_test_droppings(test_name, verbose):
def cleanup_test_droppings(test_name: str, verbose: int) -> None:
# First kill any dangling references to open files etc.
# This can also issue some ResourceWarnings which would otherwise get
# triggered during the following test run, and possibly produce failures.

View file

@ -9,13 +9,15 @@
import threading
import time
import traceback
import types
from typing import NamedTuple, NoReturn, Literal, Any
from test import support
from test.support import os_helper
from test.libregrtest.cmdline import Namespace
from test.libregrtest.main import Regrtest
from test.libregrtest.runtest import (
runtest, INTERRUPTED, CHILD_ERROR, PROGRESS_MIN_TIME,
format_test_result, TestResult, is_failed, TIMEOUT)
runtest, is_failed, TestResult, Interrupted, Timeout, ChildError, PROGRESS_MIN_TIME)
from test.libregrtest.setup import setup_tests
from test.libregrtest.utils import format_duration, print_warning
@ -36,21 +38,21 @@
USE_PROCESS_GROUP = (hasattr(os, "setsid") and hasattr(os, "killpg"))
def must_stop(result, ns):
if result.result == INTERRUPTED:
def must_stop(result: TestResult, ns: Namespace) -> bool:
if isinstance(result, Interrupted):
return True
if ns.failfast and is_failed(result, ns):
return True
return False
def parse_worker_args(worker_args):
def parse_worker_args(worker_args) -> tuple[Namespace, str]:
ns_dict, test_name = json.loads(worker_args)
ns = types.SimpleNamespace(**ns_dict)
ns = Namespace(**ns_dict)
return (ns, test_name)
def run_test_in_subprocess(testname, ns):
def run_test_in_subprocess(testname: str, ns: Namespace) -> subprocess.Popen:
ns_dict = vars(ns)
worker_args = (ns_dict, testname)
worker_args = json.dumps(worker_args)
@ -75,15 +77,15 @@ def run_test_in_subprocess(testname, ns):
**kw)
def run_tests_worker(ns, test_name):
def run_tests_worker(ns: Namespace, test_name: str) -> NoReturn:
setup_tests(ns)
result = runtest(ns, test_name)
print() # Force a newline (just in case)
# Serialize TestResult as list in JSON
print(json.dumps(list(result)), flush=True)
# Serialize TestResult as dict in JSON
print(json.dumps(result, cls=EncodeTestResult), flush=True)
sys.exit(0)
@ -110,15 +112,23 @@ def stop(self):
self.tests_iter = None
MultiprocessResult = collections.namedtuple('MultiprocessResult',
'result stdout stderr error_msg')
class MultiprocessResult(NamedTuple):
result: TestResult
stdout: str
stderr: str
error_msg: str
ExcStr = str
QueueOutput = tuple[Literal[False], MultiprocessResult] | tuple[Literal[True], ExcStr]
class ExitThread(Exception):
pass
class TestWorkerProcess(threading.Thread):
def __init__(self, worker_id, runner):
def __init__(self, worker_id: int, runner: "MultiprocessTestRunner") -> None:
super().__init__()
self.worker_id = worker_id
self.pending = runner.pending
@ -132,7 +142,7 @@ def __init__(self, worker_id, runner):
self._killed = False
self._stopped = False
def __repr__(self):
def __repr__(self) -> str:
info = [f'TestWorkerProcess #{self.worker_id}']
if self.is_alive():
info.append("running")
@ -148,7 +158,7 @@ def __repr__(self):
f'time={format_duration(dt)}'))
return '<%s>' % ' '.join(info)
def _kill(self):
def _kill(self) -> None:
popen = self._popen
if popen is None:
return
@ -176,18 +186,22 @@ def _kill(self):
except OSError as exc:
print_warning(f"Failed to kill {what}: {exc!r}")
def stop(self):
def stop(self) -> None:
# Method called from a different thread to stop this thread
self._stopped = True
self._kill()
def mp_result_error(self, test_name, error_type, stdout='', stderr='',
err_msg=None):
test_time = time.monotonic() - self.start_time
result = TestResult(test_name, error_type, test_time, None)
return MultiprocessResult(result, stdout, stderr, err_msg)
def mp_result_error(
self,
test_result: TestResult,
stdout: str = '',
stderr: str = '',
err_msg=None
) -> MultiprocessResult:
test_result.duration_sec = time.monotonic() - self.start_time
return MultiprocessResult(test_result, stdout, stderr, err_msg)
def _run_process(self, test_name):
def _run_process(self, test_name: str) -> tuple[int, str, str]:
self.start_time = time.monotonic()
self.current_test_name = test_name
@ -246,11 +260,11 @@ def _run_process(self, test_name):
self._popen = None
self.current_test_name = None
def _runtest(self, test_name):
def _runtest(self, test_name: str) -> MultiprocessResult:
retcode, stdout, stderr = self._run_process(test_name)
if retcode is None:
return self.mp_result_error(test_name, TIMEOUT, stdout, stderr)
return self.mp_result_error(Timeout(test_name), stdout, stderr)
err_msg = None
if retcode != 0:
@ -263,18 +277,17 @@ def _runtest(self, test_name):
else:
try:
# deserialize run_tests_worker() output
result = json.loads(result)
result = TestResult(*result)
result = json.loads(result, object_hook=decode_test_result)
except Exception as exc:
err_msg = "Failed to parse worker JSON: %s" % exc
if err_msg is not None:
return self.mp_result_error(test_name, CHILD_ERROR,
return self.mp_result_error(ChildError(test_name),
stdout, stderr, err_msg)
return MultiprocessResult(result, stdout, stderr, err_msg)
def run(self):
def run(self) -> None:
while not self._stopped:
try:
try:
@ -293,7 +306,7 @@ def run(self):
self.output.put((True, traceback.format_exc()))
break
def _wait_completed(self):
def _wait_completed(self) -> None:
popen = self._popen
# stdout and stderr must be closed to ensure that communicate()
@ -308,7 +321,7 @@ def _wait_completed(self):
f"(timeout={format_duration(JOIN_TIMEOUT)}): "
f"{exc!r}")
def wait_stopped(self, start_time):
def wait_stopped(self, start_time: float) -> None:
# bpo-38207: MultiprocessTestRunner.stop_workers() called self.stop()
# which killed the process. Sometimes, killing the process from the
# main thread does not interrupt popen.communicate() in
@ -332,7 +345,7 @@ def wait_stopped(self, start_time):
break
def get_running(workers):
def get_running(workers: list[TestWorkerProcess]) -> list[TestWorkerProcess]:
running = []
for worker in workers:
current_test_name = worker.current_test_name
@ -346,11 +359,11 @@ def get_running(workers):
class MultiprocessTestRunner:
def __init__(self, regrtest):
def __init__(self, regrtest: Regrtest) -> None:
self.regrtest = regrtest
self.log = self.regrtest.log
self.ns = regrtest.ns
self.output = queue.Queue()
self.output: queue.Queue[QueueOutput] = queue.Queue()
self.pending = MultiprocessIterator(self.regrtest.tests)
if self.ns.timeout is not None:
# Rely on faulthandler to kill a worker process. This timouet is
@ -362,7 +375,7 @@ def __init__(self, regrtest):
self.worker_timeout = None
self.workers = None
def start_workers(self):
def start_workers(self) -> None:
self.workers = [TestWorkerProcess(index, self)
for index in range(1, self.ns.use_mp + 1)]
msg = f"Run tests in parallel using {len(self.workers)} child processes"
@ -374,14 +387,14 @@ def start_workers(self):
for worker in self.workers:
worker.start()
def stop_workers(self):
def stop_workers(self) -> None:
start_time = time.monotonic()
for worker in self.workers:
worker.stop()
for worker in self.workers:
worker.wait_stopped(start_time)
def _get_result(self):
def _get_result(self) -> QueueOutput | None:
if not any(worker.is_alive() for worker in self.workers):
# all worker threads are done: consume pending results
try:
@ -407,21 +420,22 @@ def _get_result(self):
if running and not self.ns.pgo:
self.log('running: %s' % ', '.join(running))
def display_result(self, mp_result):
def display_result(self, mp_result: MultiprocessResult) -> None:
result = mp_result.result
text = format_test_result(result)
text = str(result)
if mp_result.error_msg is not None:
# CHILD_ERROR
text += ' (%s)' % mp_result.error_msg
elif (result.test_time >= PROGRESS_MIN_TIME and not self.ns.pgo):
text += ' (%s)' % format_duration(result.test_time)
elif (result.duration_sec >= PROGRESS_MIN_TIME and not self.ns.pgo):
text += ' (%s)' % format_duration(result.duration_sec)
running = get_running(self.workers)
if running and not self.ns.pgo:
text += ' -- running: %s' % ', '.join(running)
self.regrtest.display_progress(self.test_index, text)
def _process_result(self, item):
def _process_result(self, item: QueueOutput) -> bool:
"""Returns True if test runner must stop."""
if item[0]:
# Thread got an exception
format_exc = item[1]
@ -443,7 +457,7 @@ def _process_result(self, item):
return False
def run_tests(self):
def run_tests(self) -> None:
self.start_workers()
self.test_index = 0
@ -469,5 +483,41 @@ def run_tests(self):
self.stop_workers()
def run_tests_multiprocess(regrtest):
def run_tests_multiprocess(regrtest: Regrtest) -> None:
MultiprocessTestRunner(regrtest).run_tests()
class EncodeTestResult(json.JSONEncoder):
"""Encode a TestResult (sub)class object into a JSON dict."""
def default(self, o: Any) -> dict[str, Any]:
if isinstance(o, TestResult):
result = vars(o)
result["__test_result__"] = o.__class__.__name__
return result
return super().default(o)
def decode_test_result(d: dict[str, Any]) -> TestResult | dict[str, Any]:
"""Decode a TestResult (sub)class object from a JSON dict."""
if "__test_result__" not in d:
return d
cls_name = d.pop("__test_result__")
for cls in get_all_test_result_classes():
if cls.__name__ == cls_name:
return cls(**d)
def get_all_test_result_classes() -> set[type[TestResult]]:
prev_count = 0
classes = {TestResult}
while len(classes) > prev_count:
prev_count = len(classes)
to_add = []
for cls in classes:
to_add.extend(cls.__subclasses__())
classes.update(to_add)
return classes

View file

@ -105,6 +105,17 @@ class Error(Exception):
class TestFailed(Error):
"""Test failed."""
class TestFailedWithDetails(TestFailed):
"""Test failed."""
def __init__(self, msg, errors, failures):
self.msg = msg
self.errors = errors
self.failures = failures
super().__init__(msg, errors, failures)
def __str__(self):
return self.msg
class TestDidNotRun(Error):
"""Test did not run any subtests."""
@ -980,7 +991,9 @@ def _run_suite(suite):
else:
err = "multiple errors occurred"
if not verbose: err += "; run in verbose mode for details"
raise TestFailed(err)
errors = [(str(tc), exc_str) for tc, exc_str in result.errors]
failures = [(str(tc), exc_str) for tc, exc_str in result.failures]
raise TestFailedWithDetails(err, errors, failures)
# By default, don't filter tests

View file

@ -15,6 +15,7 @@
import sysconfig
import tempfile
import textwrap
import time
import unittest
from test import libregrtest
from test import support
@ -414,7 +415,7 @@ def parse_executed_tests(self, output):
def check_executed_tests(self, output, tests, skipped=(), failed=(),
env_changed=(), omitted=(),
rerun=(), no_test_ran=(),
rerun={}, no_test_ran=(),
randomize=False, interrupted=False,
fail_env_changed=False):
if isinstance(tests, str):
@ -427,8 +428,6 @@ def check_executed_tests(self, output, tests, skipped=(), failed=(),
env_changed = [env_changed]
if isinstance(omitted, str):
omitted = [omitted]
if isinstance(rerun, str):
rerun = [rerun]
if isinstance(no_test_ran, str):
no_test_ran = [no_test_ran]
@ -466,12 +465,12 @@ def list_regex(line_format, tests):
self.check_line(output, regex)
if rerun:
regex = list_regex('%s re-run test%s', rerun)
regex = list_regex('%s re-run test%s', rerun.keys())
self.check_line(output, regex)
regex = LOG_PREFIX + r"Re-running failed tests in verbose mode"
self.check_line(output, regex)
for test_name in rerun:
regex = LOG_PREFIX + f"Re-running {test_name} in verbose mode"
for name, match in rerun.items():
regex = LOG_PREFIX + f"Re-running {name} in verbose mode \\(matching: {match}\\)"
self.check_line(output, regex)
if no_test_ran:
@ -549,11 +548,10 @@ def run_python(self, args, **kw):
class CheckActualTests(BaseTestCase):
"""
Check that regrtest appears to find the expected set of tests.
"""
def test_finds_expected_number_of_tests(self):
"""
Check that regrtest appears to find the expected set of tests.
"""
args = ['-Wd', '-E', '-bb', '-m', 'test.regrtest', '--list-tests']
output = self.run_python(args)
rough_number_of_tests_found = len(output.splitlines())
@ -1081,15 +1079,18 @@ def test_rerun_fail(self):
import unittest
class Tests(unittest.TestCase):
def test_bug(self):
# test always fail
def test_succeed(self):
return
def test_fail_always(self):
# test that always fails
self.fail("bug")
""")
testname = self.create_test(code=code)
output = self.run_tests("-w", testname, exitcode=2)
self.check_executed_tests(output, [testname],
failed=testname, rerun=testname)
failed=testname, rerun={testname: "test_fail_always"})
def test_rerun_success(self):
# FAILURE then SUCCESS
@ -1098,7 +1099,8 @@ def test_rerun_success(self):
import unittest
class Tests(unittest.TestCase):
failed = False
def test_succeed(self):
return
def test_fail_once(self):
if not hasattr(builtins, '_test_failed'):
@ -1109,7 +1111,7 @@ def test_fail_once(self):
output = self.run_tests("-w", testname, exitcode=0)
self.check_executed_tests(output, [testname],
rerun=testname)
rerun={testname: "test_fail_once"})
def test_no_tests_ran(self):
code = textwrap.dedent("""

View file

@ -0,0 +1,2 @@
Regression tests, when run with -w, are now re-running only the affected
test methods instead of re-running the entire test file.