mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-02 21:32:52 +00:00
4156325cd3
The mtest2make.py script passes the arg '-t 0' to 'meson test' which disables all test timeouts. This is a major source of pain when running in GitLab CI and a test gets stuck. It will stall until GitLab kills the CI job. This leaves us with little easily consumable information about the stalled test. The TAP format doesn't show the test name until it is completed, and TAP output from multiple tests it interleaved. So we have to analyse the log to figure out what tests had un-finished TAP output present and thus infer which test case caused the hang. This is very time consuming and error prone. By allowing meson to kill stalled tests, we get a direct display of what test program got stuck, which lets us more directly focus in on what specific test case within the test program hung. The other issue with disabling meson test timeouts by default is that it makes it more likely that maintainers inadvertantly introduce slowdowns. For example the recent-ish change that accidentally made migrate-test take 15-20 minutes instead of around 1 minute. The main risk of this change is that the individual test timeouts might be too short to allow completion in high load scenarios. Thus, there is likely to be some short term pain where we have to bump the timeouts for certain tests to make them reliable enough. The preceeding few patches raised the timeouts for all failures that were immediately apparent in GitLab CI. Even with the possible short term instability, this should still be a net win for debuggability of failed CI pipelines over the long term. Signed-off-by: Daniel P. Berrangé <berrange@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Message-ID: <20230717182859.707658-13-berrange@redhat.com> Signed-off-by: Thomas Huth <thuth@redhat.com> Message-Id: <20231215070357.10888-17-thuth@redhat.com> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
115 lines
4.1 KiB
Python
115 lines
4.1 KiB
Python
#! /usr/bin/env python3
|
|
|
|
# Create Makefile targets to run tests, from Meson's test introspection data.
|
|
#
|
|
# Author: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
|
from collections import defaultdict
|
|
import itertools
|
|
import json
|
|
import os
|
|
import shlex
|
|
import sys
|
|
|
|
class Suite(object):
|
|
def __init__(self):
|
|
self.deps = set()
|
|
self.speeds = ['quick']
|
|
|
|
def names(self, base):
|
|
return [base if speed == 'quick' else f'{base}-{speed}' for speed in self.speeds]
|
|
|
|
|
|
print('''
|
|
SPEED = quick
|
|
|
|
.speed.quick = $(foreach s,$(sort $(filter-out %-slow %-thorough, $1)), --suite $s)
|
|
.speed.slow = $(foreach s,$(sort $(filter-out %-thorough, $1)), --suite $s)
|
|
.speed.thorough = $(foreach s,$(sort $1), --suite $s)
|
|
|
|
TIMEOUT_MULTIPLIER = 1
|
|
.mtestargs = --no-rebuild -t $(TIMEOUT_MULTIPLIER)
|
|
ifneq ($(SPEED), quick)
|
|
.mtestargs += --setup $(SPEED)
|
|
endif
|
|
.mtestargs += $(subst -j,--num-processes , $(filter-out -j, $(lastword -j1 $(filter -j%, $(MAKEFLAGS)))))
|
|
|
|
.check.mtestargs = $(MTESTARGS) $(.mtestargs) $(if $(V),--verbose,--print-errorlogs)
|
|
.bench.mtestargs = $(MTESTARGS) $(.mtestargs) --benchmark --verbose''')
|
|
|
|
introspect = json.load(sys.stdin)
|
|
|
|
def process_tests(test, targets, suites):
|
|
executable = test['cmd'][0]
|
|
try:
|
|
executable = os.path.relpath(executable)
|
|
except:
|
|
pass
|
|
|
|
deps = (targets.get(x, []) for x in test['depends'])
|
|
deps = itertools.chain.from_iterable(deps)
|
|
deps = list(deps)
|
|
|
|
test_suites = test['suite'] or ['default']
|
|
for s in test_suites:
|
|
# The suite name in the introspection info is "PROJECT" or "PROJECT:SUITE"
|
|
if ':' in s:
|
|
s = s.split(':')[1]
|
|
if s == 'slow' or s == 'thorough':
|
|
continue
|
|
if s.endswith('-slow'):
|
|
s = s[:-5]
|
|
suites[s].speeds.append('slow')
|
|
if s.endswith('-thorough'):
|
|
s = s[:-9]
|
|
suites[s].speeds.append('thorough')
|
|
suites[s].deps.update(deps)
|
|
|
|
def emit_prolog(suites, prefix):
|
|
all_targets = ' '.join((f'{prefix}-{k}' for k in suites.keys()))
|
|
all_xml = ' '.join((f'{prefix}-report-{k}.junit.xml' for k in suites.keys()))
|
|
print()
|
|
print(f'all-{prefix}-targets = {all_targets}')
|
|
print(f'all-{prefix}-xml = {all_xml}')
|
|
print(f'.PHONY: {prefix} do-meson-{prefix} {prefix}-report.junit.xml $(all-{prefix}-targets) $(all-{prefix}-xml)')
|
|
print(f'ifeq ($(filter {prefix}, $(MAKECMDGOALS)),)')
|
|
print(f'.{prefix}.mtestargs += $(call .speed.$(SPEED), $(.{prefix}.mtest-suites))')
|
|
print(f'endif')
|
|
print(f'{prefix}-build: run-ninja')
|
|
print(f'{prefix} $(all-{prefix}-targets): do-meson-{prefix}')
|
|
print(f'do-meson-{prefix}: run-ninja; $(if $(MAKE.n),,+)$(MESON) test $(.{prefix}.mtestargs)')
|
|
print(f'{prefix}-report.junit.xml $(all-{prefix}-xml): {prefix}-report%.junit.xml: run-ninja')
|
|
print(f'\t$(MAKE) {prefix}$* MTESTARGS="$(MTESTARGS) --logbase {prefix}-report$*" && ln -f meson-logs/$@ .')
|
|
|
|
def emit_suite_deps(name, suite, prefix):
|
|
deps = ' '.join(suite.deps)
|
|
targets = [f'{prefix}-{name}', f'{prefix}-report-{name}.junit.xml', f'{prefix}', f'{prefix}-report.junit.xml',
|
|
f'{prefix}-build']
|
|
print()
|
|
print(f'.{prefix}-{name}.deps = {deps}')
|
|
for t in targets:
|
|
print(f'.ninja-goals.{t} += $(.{prefix}-{name}.deps)')
|
|
|
|
def emit_suite(name, suite, prefix):
|
|
emit_suite_deps(name, suite, prefix)
|
|
targets = f'{prefix}-{name} {prefix}-report-{name}.junit.xml {prefix} {prefix}-report.junit.xml'
|
|
print(f'ifneq ($(filter {targets}, $(MAKECMDGOALS)),)')
|
|
print(f'.{prefix}.mtest-suites += ' + ' '.join(suite.names(name)))
|
|
print(f'endif')
|
|
|
|
targets = {t['id']: [os.path.relpath(f) for f in t['filename']]
|
|
for t in introspect['targets']}
|
|
|
|
testsuites = defaultdict(Suite)
|
|
for test in introspect['tests']:
|
|
process_tests(test, targets, testsuites)
|
|
emit_prolog(testsuites, 'check')
|
|
for name, suite in testsuites.items():
|
|
emit_suite(name, suite, 'check')
|
|
|
|
benchsuites = defaultdict(Suite)
|
|
for test in introspect['benchmarks']:
|
|
process_tests(test, targets, benchsuites)
|
|
emit_prolog(benchsuites, 'bench')
|
|
for name, suite in benchsuites.items():
|
|
emit_suite(name, suite, 'bench')
|