diff --git a/.github/workflows/GnuTests.yml b/.github/workflows/GnuTests.yml index e57204213..eef8567c7 100644 --- a/.github/workflows/GnuTests.yml +++ b/.github/workflows/GnuTests.yml @@ -32,7 +32,8 @@ jobs: TEST_FILESET_PREFIX='test-fileset-IDs.sha1#' TEST_FILESET_SUFFIX='.txt' TEST_SUMMARY_FILE='gnu-result.json' - outputs SUITE_LOG_FILE TEST_FILESET_PREFIX TEST_FILESET_SUFFIX TEST_LOGS_GLOB TEST_SUMMARY_FILE + TEST_FULL_SUMMARY_FILE='gnu-full-result.json' + outputs SUITE_LOG_FILE TEST_FILESET_PREFIX TEST_FILESET_SUFFIX TEST_LOGS_GLOB TEST_SUMMARY_FILE TEST_FULL_SUMMARY_FILE - name: Checkout code (uutil) uses: actions/checkout@v2 with: @@ -92,6 +93,11 @@ jobs: path_GNU='${{ steps.vars.outputs.path_GNU }}' path_UUTILS='${{ steps.vars.outputs.path_UUTILS }}' bash "${path_UUTILS}/util/run-gnu-test.sh" + - name: Extract testing info into JSON + shell: bash + run : | + path_UUTILS='${{ steps.vars.outputs.path_UUTILS }}' + python ${path_UUTILS}/util/gnu-json-result.py ${{ steps.vars.outputs.path_GNU_tests }} > ${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }} - name: Extract/summarize testing info id: summary shell: bash @@ -146,6 +152,11 @@ jobs: with: name: test-logs path: "${{ steps.vars.outputs.TEST_LOGS_GLOB }}" + - name: Upload full json results + uses: actions/upload-artifact@v2 + with: + name: gnu-full-result.json + path: ${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }} - name: Compare test failures VS reference shell: bash run: | diff --git a/docs/src/test_coverage.css b/docs/src/test_coverage.css new file mode 100644 index 000000000..37a658695 --- /dev/null +++ b/docs/src/test_coverage.css @@ -0,0 +1,46 @@ +:root { + --PASS: #44AF69; + --ERROR: #F8333C; + --FAIL: #F8333C; + --SKIP: #d3c994; +} +.PASS { + color: var(--PASS); +} +.ERROR { + color: var(--ERROR); +} +.FAIL { + color: var(--FAIL); +} +.SKIP { + color: var(--SKIP); +} +.testSummary { + display: inline-flex; + align-items: center; + justify-content: space-between; + width: 90%; +} +.progress { + width: 80%; + display: flex; + justify-content: right; + align-items: center; +} +.progress-bar { + height: 10px; + width: calc(100% - 15ch); + border-radius: 5px; +} +.result { + font-weight: bold; + width: 7ch; + display: inline-block; +} +.result-line { + margin: 8px; +} +.counts { + margin-right: 10px; +} \ No newline at end of file diff --git a/docs/src/test_coverage.js b/docs/src/test_coverage.js new file mode 100644 index 000000000..814eef6da --- /dev/null +++ b/docs/src/test_coverage.js @@ -0,0 +1,77 @@ +// spell-checker:ignore hljs +function progressBar(totals) { + const bar = document.createElement("div"); + bar.className = "progress-bar"; + let totalTests = 0; + for (const [key, value] of Object.entries(totals)) { + totalTests += value; + } + const passPercentage = Math.round(100 * totals["PASS"] / totalTests); + const skipPercentage = passPercentage + Math.round(100 * totals["PASS"] / totalTests); + bar.style = `background: linear-gradient( + to right, + var(--PASS) ${passPercentage}%, + var(--SKIP) ${passPercentage}%, + var(--SKIP) ${skipPercentage}%, + var(--FAIL) 0)`; + + const progress = document.createElement("div"); + progress.className = "progress" + progress.innerHTML = ` + + ${totals["PASS"]} + / + ${totals["SKIP"]} + / + ${totals["FAIL"] + totals["ERROR"]} + + `; + progress.appendChild(bar); + return progress +} + +function parse_result(parent, obj) { + const totals = { + PASS: 0, + SKIP: 0, + FAIL: 0, + ERROR: 0, + }; + for (const [category, content] of Object.entries(obj)) { + if (typeof content === "string") { + const p = document.createElement("p"); + p.className = "result-line"; + totals[content]++; + p.innerHTML = `${content} ${category}`; + parent.appendChild(p); + } else { + const categoryName = document.createElement("code"); + categoryName.innerHTML = category; + categoryName.className = "hljs"; + + const details = document.createElement("details"); + const subtotals = parse_result(details, content); + for (const [subtotal, count] of Object.entries(subtotals)) { + totals[subtotal] += count; + } + const summaryDiv = document.createElement("div"); + summaryDiv.className = "testSummary"; + summaryDiv.appendChild(categoryName); + summaryDiv.appendChild(progressBar(subtotals)); + + const summary = document.createElement("summary"); + summary.appendChild(summaryDiv); + + details.appendChild(summary); + parent.appendChild(details); + } + } + return totals; +} + +fetch("https://github.com/uutils/coreutils-tracking/blob/main/gnu-full-result.json") + .then((r) => r.json()) + .then((obj) => { + let parent = document.getElementById("test-cov"); + parse_result(parent, obj); + }); diff --git a/docs/src/test_coverage.md b/docs/src/test_coverage.md new file mode 100644 index 000000000..bf4c72129 --- /dev/null +++ b/docs/src/test_coverage.md @@ -0,0 +1,19 @@ +# GNU Test Coverage + +uutils is actively tested against the GNU coreutils test suite. The results +below are automatically updated every day. + +## Coverage per category + +Click on the categories to see the names of the tests. Green indicates a passing +test, yellow indicates a skipped test and red means that the test either failed +or resulted in an error. + + + + +
+ +## Progress over time + + diff --git a/src/bin/uudoc.rs b/src/bin/uudoc.rs index 412a2dd48..71bbb2684 100644 --- a/src/bin/uudoc.rs +++ b/src/bin/uudoc.rs @@ -26,6 +26,7 @@ fn main() -> io::Result<()> { [Introduction](index.md)\n\ * [Installation](installation.md)\n\ * [Contributing](contributing.md)\n\ + * [GNU test coverage](test_coverage.md)\n\ \n\ # Reference\n\ * [Multi-call binary](multicall.md)\n", diff --git a/util/gnu-json-result.py b/util/gnu-json-result.py new file mode 100644 index 000000000..a51aa7d94 --- /dev/null +++ b/util/gnu-json-result.py @@ -0,0 +1,27 @@ +""" +Extract the GNU logs into a JSON file. +""" + +import json +from pathlib import Path +import sys +from os import environ + +out = {} + +test_dir = Path(sys.argv[1]) +for filepath in test_dir.glob("**/*.log"): + path = Path(filepath) + current = out + for key in path.parent.relative_to(test_dir).parts: + if key not in current: + current[key] = {} + current = current[key] + try: + with open(path) as f: + content = f.read() + current[path.name] = content.split("\n")[-2].split(" ")[0] + except: + pass + +print(json.dumps(out, indent=2, sort_keys=True)) \ No newline at end of file