summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTamara Dahlgren <35777542+tldahlgren@users.noreply.github.com>2023-05-17 07:03:21 -0700
committerGitHub <noreply@github.com>2023-05-17 16:03:21 +0200
commit86b9ce1c88164013473b39773bb63e8e6eb45e57 (patch)
tree64e896a31f39195354ac6c6cdffa40387bcd8619
parent05232034f5808b344f7e977d4fe3f441f175702c (diff)
downloadspack-86b9ce1c88164013473b39773bb63e8e6eb45e57.tar.gz
spack-86b9ce1c88164013473b39773bb63e8e6eb45e57.tar.bz2
spack-86b9ce1c88164013473b39773bb63e8e6eb45e57.tar.xz
spack-86b9ce1c88164013473b39773bb63e8e6eb45e57.zip
spack test: fix stand-alone test suite status reporting (#37602)
* Fix reporting of packageless specs as having no tests * Add test_test_output_multiple_specs with update to simple-standalone-test (and tests) * Refactored test status summary; added more tests or checks
-rw-r--r--lib/spack/spack/install_test.py109
-rw-r--r--lib/spack/spack/test/cmd/test.py14
-rw-r--r--lib/spack/spack/test/installer.py11
-rw-r--r--lib/spack/spack/test/test_suite.py93
-rw-r--r--var/spack/repos/builtin.mock/packages/simple-standalone-test/package.py14
5 files changed, 198 insertions, 43 deletions
diff --git a/lib/spack/spack/install_test.py b/lib/spack/spack/install_test.py
index 13f7a2402f..ef9caab055 100644
--- a/lib/spack/spack/install_test.py
+++ b/lib/spack/spack/install_test.py
@@ -215,6 +215,31 @@ def print_message(logger: LogType, msg: str, verbose: bool = False):
tty.info(msg, format="g")
+def overall_status(current_status: "TestStatus", substatuses: List["TestStatus"]) -> "TestStatus":
+ """Determine the overall status based on the current and associated sub status values.
+
+ Args:
+ current_status: current overall status, assumed to default to PASSED
+ substatuses: status of each test part or overall status of each test spec
+ Returns:
+ test status encompassing the main test and all subtests
+ """
+ if current_status in [TestStatus.SKIPPED, TestStatus.NO_TESTS, TestStatus.FAILED]:
+ return current_status
+
+ skipped = 0
+ for status in substatuses:
+ if status == TestStatus.FAILED:
+ return status
+ elif status == TestStatus.SKIPPED:
+ skipped += 1
+
+ if skipped and skipped == len(substatuses):
+ return TestStatus.SKIPPED
+
+ return current_status
+
+
class PackageTest:
"""The class that manages stand-alone (post-install) package tests."""
@@ -308,14 +333,12 @@ class PackageTest:
# to start with the same name) may not have PASSED. This extra
# check is used to ensure the containing test part is not claiming
# to have passed when at least one subpart failed.
- if status == TestStatus.PASSED:
- for pname, substatus in self.test_parts.items():
- if pname != part_name and pname.startswith(part_name):
- if substatus == TestStatus.FAILED:
- print(f"{substatus}: {part_name}{extra}")
- self.test_parts[part_name] = substatus
- self.counts[substatus] += 1
- return
+ substatuses = []
+ for pname, substatus in self.test_parts.items():
+ if pname != part_name and pname.startswith(part_name):
+ substatuses.append(substatus)
+ if substatuses:
+ status = overall_status(status, substatuses)
print(f"{status}: {part_name}{extra}")
self.test_parts[part_name] = status
@@ -420,6 +443,25 @@ class PackageTest:
lines.append(f"{totals:=^80}")
return lines
+ def write_tested_status(self):
+ """Write the overall status to the tested file.
+
+ If there any test part failures, then the tests failed. If all test
+ parts are skipped, then the tests were skipped. If any tests passed
+ then the tests passed; otherwise, there were not tests executed.
+ """
+ status = TestStatus.NO_TESTS
+ if self.counts[TestStatus.FAILED] > 0:
+ status = TestStatus.FAILED
+ else:
+ skipped = self.counts[TestStatus.SKIPPED]
+ if skipped and self.parts() == skipped:
+ status = TestStatus.SKIPPED
+ elif self.counts[TestStatus.PASSED] > 0:
+ status = TestStatus.PASSED
+
+ _add_msg_to_file(self.tested_file, f"{status.value}")
+
@contextlib.contextmanager
def test_part(pkg: Pb, test_name: str, purpose: str, work_dir: str = ".", verbose: bool = False):
@@ -654,8 +696,9 @@ def process_test_parts(pkg: Pb, test_specs: List[spack.spec.Spec], verbose: bool
try:
tests = test_functions(spec.package_class)
except spack.repo.UnknownPackageError:
- # some virtuals don't have a package
- tests = []
+ # Some virtuals don't have a package so we don't want to report
+ # them as not having tests when that isn't appropriate.
+ continue
if len(tests) == 0:
tester.status(spec.name, TestStatus.NO_TESTS)
@@ -682,7 +725,7 @@ def process_test_parts(pkg: Pb, test_specs: List[spack.spec.Spec], verbose: bool
finally:
if tester.ran_tests():
- fs.touch(tester.tested_file)
+ tester.write_tested_status()
# log one more test message to provide a completion timestamp
# for CDash reporting
@@ -889,20 +932,15 @@ class TestSuite:
if remove_directory:
shutil.rmtree(test_dir)
- tested = os.path.exists(self.tested_file_for_spec(spec))
- if tested:
- status = TestStatus.PASSED
- else:
- self.ensure_stage()
- if spec.external and not externals:
- status = TestStatus.SKIPPED
- elif not spec.installed:
- status = TestStatus.SKIPPED
- else:
- status = TestStatus.NO_TESTS
+ status = self.test_status(spec, externals)
self.counts[status] += 1
-
self.write_test_result(spec, status)
+
+ except SkipTest:
+ status = TestStatus.SKIPPED
+ self.counts[status] += 1
+ self.write_test_result(spec, TestStatus.SKIPPED)
+
except BaseException as exc:
status = TestStatus.FAILED
self.counts[status] += 1
@@ -939,6 +977,31 @@ class TestSuite:
if failures:
raise TestSuiteFailure(failures)
+ def test_status(self, spec: spack.spec.Spec, externals: bool) -> Optional[TestStatus]:
+ """Determine the overall test results status for the spec.
+
+ Args:
+ spec: instance of the spec under test
+ externals: ``True`` if externals are to be tested, else ``False``
+
+ Returns:
+ the spec's test status if available or ``None``
+ """
+ tests_status_file = self.tested_file_for_spec(spec)
+ if not os.path.exists(tests_status_file):
+ self.ensure_stage()
+ if spec.external and not externals:
+ status = TestStatus.SKIPPED
+ elif not spec.installed:
+ status = TestStatus.SKIPPED
+ else:
+ status = TestStatus.NO_TESTS
+ return status
+
+ with open(tests_status_file, "r") as f:
+ value = (f.read()).strip("\n")
+ return TestStatus(int(value)) if value else TestStatus.NO_TESTS
+
def ensure_stage(self):
"""Ensure the test suite stage directory exists."""
if not os.path.exists(self.stage):
diff --git a/lib/spack/spack/test/cmd/test.py b/lib/spack/spack/test/cmd/test.py
index 8a7147a2c9..20b54ea8c2 100644
--- a/lib/spack/spack/test/cmd/test.py
+++ b/lib/spack/spack/test/cmd/test.py
@@ -319,3 +319,17 @@ def test_report_filename_for_cdash(install_mockery_mutable_config, mock_fetch):
spack.cmd.common.arguments.sanitize_reporter_options(args)
filename = spack.cmd.test.report_filename(args, suite)
assert filename != "https://blahblah/submit.php?project=debugging"
+
+
+def test_test_output_multiple_specs(
+ mock_test_stage, mock_packages, mock_archive, mock_fetch, install_mockery_mutable_config
+):
+ """Ensure proper reporting for suite with skipped, failing, and passed tests."""
+ install("test-error", "simple-standalone-test@0.9", "simple-standalone-test@1.0")
+ out = spack_test("run", "test-error", "simple-standalone-test", fail_on_error=False)
+
+ # Note that a spec with passing *and* skipped tests is still considered
+ # to have passed at this level. If you want to see the spec-specific
+ # part result summaries, you'll have to look at the "test-out.txt" files
+ # for each spec.
+ assert "1 failed, 2 passed of 3 specs" in out
diff --git a/lib/spack/spack/test/installer.py b/lib/spack/spack/test/installer.py
index 667c6cc1fc..7b9c2b5184 100644
--- a/lib/spack/spack/test/installer.py
+++ b/lib/spack/spack/test/installer.py
@@ -1399,17 +1399,24 @@ def test_print_install_test_log_skipped(install_mockery, mock_packages, capfd, r
assert out == ""
-def test_print_install_test_log_missing(
+def test_print_install_test_log_failures(
tmpdir, install_mockery, mock_packages, ensure_debug, capfd
):
- """Confirm expected error on attempt to print missing test log file."""
+ """Confirm expected outputs when there are test failures."""
name = "trivial-install-test-package"
s = spack.spec.Spec(name).concretized()
pkg = s.package
+ # Missing test log is an error
pkg.run_tests = True
pkg.tester.test_log_file = str(tmpdir.join("test-log.txt"))
pkg.tester.add_failure(AssertionError("test"), "test-failure")
spack.installer.print_install_test_log(pkg)
err = capfd.readouterr()[1]
assert "no test log file" in err
+
+ # Having test log results in path being output
+ fs.touch(pkg.tester.test_log_file)
+ spack.installer.print_install_test_log(pkg)
+ out = capfd.readouterr()[0]
+ assert "See test results at" in out
diff --git a/lib/spack/spack/test/test_suite.py b/lib/spack/spack/test/test_suite.py
index 06b492deff..b3057e9c3d 100644
--- a/lib/spack/spack/test/test_suite.py
+++ b/lib/spack/spack/test/test_suite.py
@@ -12,6 +12,7 @@ from llnl.util.filesystem import join_path, mkdirp, touch
import spack.install_test
import spack.spec
+from spack.install_test import TestStatus
from spack.util.executable import which
@@ -20,7 +21,7 @@ def _true(*args, **kwargs):
return True
-def ensure_results(filename, expected):
+def ensure_results(filename, expected, present=True):
assert os.path.exists(filename)
with open(filename, "r") as fd:
lines = fd.readlines()
@@ -29,7 +30,10 @@ def ensure_results(filename, expected):
if expected in line:
have = True
break
- assert have
+ if present:
+ assert have, f"Expected '{expected}' in the file"
+ else:
+ assert not have, f"Expected '{expected}' NOT to be in the file"
def test_test_log_name(mock_packages, config):
@@ -78,8 +82,8 @@ def test_write_test_result(mock_packages, mock_test_stage):
assert spec.name in msg
-def test_test_uninstalled(mock_packages, install_mockery, mock_test_stage):
- """Attempt to perform stand-alone test for uninstalled package."""
+def test_test_not_installed(mock_packages, install_mockery, mock_test_stage):
+ """Attempt to perform stand-alone test for not_installed package."""
spec = spack.spec.Spec("trivial-smoke-test").concretized()
test_suite = spack.install_test.TestSuite([spec])
@@ -91,10 +95,7 @@ def test_test_uninstalled(mock_packages, install_mockery, mock_test_stage):
@pytest.mark.parametrize(
"arguments,status,msg",
- [
- ({}, spack.install_test.TestStatus.SKIPPED, "Skipped"),
- ({"externals": True}, spack.install_test.TestStatus.NO_TESTS, "No tests"),
- ],
+ [({}, TestStatus.SKIPPED, "Skipped"), ({"externals": True}, TestStatus.NO_TESTS, "No tests")],
)
def test_test_external(
mock_packages, install_mockery, mock_test_stage, monkeypatch, arguments, status, msg
@@ -156,6 +157,7 @@ def test_test_spec_passes(mock_packages, install_mockery, mock_test_stage, monke
ensure_results(test_suite.results_file, "PASSED")
ensure_results(test_suite.log_file_for_spec(spec), "simple stand-alone")
+ ensure_results(test_suite.log_file_for_spec(spec), "standalone-ifc", present=False)
def test_get_test_suite():
@@ -212,8 +214,10 @@ def test_test_functions_pkgless(mock_packages, install_mockery, ensure_debug, ca
spec = spack.spec.Spec("simple-standalone-test").concretized()
fns = spack.install_test.test_functions(spec.package, add_virtuals=True)
out = capsys.readouterr()
- assert len(fns) == 1, "Expected only one test function"
- assert "does not appear to have a package file" in out[1]
+ assert len(fns) == 2, "Expected two test functions"
+ for f in fns:
+ assert f[1].__name__ in ["test_echo", "test_skip"]
+ assert "virtual does not appear to have a package file" in out[1]
# TODO: This test should go away when compilers as dependencies is supported
@@ -301,7 +305,7 @@ def test_test_part_fail(tmpdir, install_mockery_mutable_config, mock_fetch, mock
for part_name, status in pkg.tester.test_parts.items():
assert part_name.endswith(name)
- assert status == spack.install_test.TestStatus.FAILED
+ assert status == TestStatus.FAILED
def test_test_part_pass(install_mockery_mutable_config, mock_fetch, mock_test_stage):
@@ -317,7 +321,7 @@ def test_test_part_pass(install_mockery_mutable_config, mock_fetch, mock_test_st
for part_name, status in pkg.tester.test_parts.items():
assert part_name.endswith(name)
- assert status == spack.install_test.TestStatus.PASSED
+ assert status == TestStatus.PASSED
def test_test_part_skip(install_mockery_mutable_config, mock_fetch, mock_test_stage):
@@ -331,7 +335,7 @@ def test_test_part_skip(install_mockery_mutable_config, mock_fetch, mock_test_st
for part_name, status in pkg.tester.test_parts.items():
assert part_name.endswith(name)
- assert status == spack.install_test.TestStatus.SKIPPED
+ assert status == TestStatus.SKIPPED
def test_test_part_missing_exe_fail_fast(
@@ -354,7 +358,7 @@ def test_test_part_missing_exe_fail_fast(
assert len(test_parts) == 1
for part_name, status in test_parts.items():
assert part_name.endswith(name)
- assert status == spack.install_test.TestStatus.FAILED
+ assert status == TestStatus.FAILED
def test_test_part_missing_exe(
@@ -375,7 +379,66 @@ def test_test_part_missing_exe(
assert len(test_parts) == 1
for part_name, status in test_parts.items():
assert part_name.endswith(name)
- assert status == spack.install_test.TestStatus.FAILED
+ assert status == TestStatus.FAILED
+
+
+# TODO (embedded test parts): Update this once embedded test part tracking
+# TODO (embedded test parts): properly handles the nested context managers.
+@pytest.mark.parametrize(
+ "current,substatuses,expected",
+ [
+ (TestStatus.PASSED, [TestStatus.PASSED, TestStatus.PASSED], TestStatus.PASSED),
+ (TestStatus.FAILED, [TestStatus.PASSED, TestStatus.PASSED], TestStatus.FAILED),
+ (TestStatus.SKIPPED, [TestStatus.PASSED, TestStatus.PASSED], TestStatus.SKIPPED),
+ (TestStatus.NO_TESTS, [TestStatus.PASSED, TestStatus.PASSED], TestStatus.NO_TESTS),
+ (TestStatus.PASSED, [TestStatus.PASSED, TestStatus.SKIPPED], TestStatus.PASSED),
+ (TestStatus.PASSED, [TestStatus.PASSED, TestStatus.FAILED], TestStatus.FAILED),
+ (TestStatus.PASSED, [TestStatus.SKIPPED, TestStatus.SKIPPED], TestStatus.SKIPPED),
+ ],
+)
+def test_embedded_test_part_status(
+ install_mockery_mutable_config, mock_fetch, mock_test_stage, current, substatuses, expected
+):
+ """Check to ensure the status of the enclosing test part reflects summary of embedded parts."""
+
+ s = spack.spec.Spec("trivial-smoke-test").concretized()
+ pkg = s.package
+ base_name = "test_example"
+ part_name = f"{pkg.__class__.__name__}::{base_name}"
+
+ pkg.tester.test_parts[part_name] = current
+ for i, status in enumerate(substatuses):
+ pkg.tester.test_parts[f"{part_name}_{i}"] = status
+
+ pkg.tester.status(base_name, current)
+ assert pkg.tester.test_parts[part_name] == expected
+
+
+@pytest.mark.parametrize(
+ "statuses,expected",
+ [
+ ([TestStatus.PASSED, TestStatus.PASSED], TestStatus.PASSED),
+ ([TestStatus.PASSED, TestStatus.SKIPPED], TestStatus.PASSED),
+ ([TestStatus.PASSED, TestStatus.FAILED], TestStatus.FAILED),
+ ([TestStatus.SKIPPED, TestStatus.SKIPPED], TestStatus.SKIPPED),
+ ([], TestStatus.NO_TESTS),
+ ],
+)
+def test_write_tested_status(
+ tmpdir, install_mockery_mutable_config, mock_fetch, mock_test_stage, statuses, expected
+):
+ """Check to ensure the status of the enclosing test part reflects summary of embedded parts."""
+ s = spack.spec.Spec("trivial-smoke-test").concretized()
+ pkg = s.package
+ for i, status in enumerate(statuses):
+ pkg.tester.test_parts[f"test_{i}"] = status
+ pkg.tester.counts[status] += 1
+
+ pkg.tester.tested_file = tmpdir.join("test-log.txt")
+ pkg.tester.write_tested_status()
+ with open(pkg.tester.tested_file, "r") as f:
+ status = int(f.read().strip("\n"))
+ assert TestStatus(status) == expected
def test_check_special_outputs(tmpdir):
diff --git a/var/spack/repos/builtin.mock/packages/simple-standalone-test/package.py b/var/spack/repos/builtin.mock/packages/simple-standalone-test/package.py
index 9c65773aa5..1adfb9507a 100644
--- a/var/spack/repos/builtin.mock/packages/simple-standalone-test/package.py
+++ b/var/spack/repos/builtin.mock/packages/simple-standalone-test/package.py
@@ -7,16 +7,24 @@ from spack.package import *
class SimpleStandaloneTest(Package):
- """This package has a simple stand-alone test features."""
+ """This package has simple stand-alone test features."""
homepage = "http://www.example.com/simple_test"
url = "http://www.unit-test-should-replace-this-url/simple_test-1.0.tar.gz"
- version("1.0", md5="0123456789abcdef0123456789abcdef")
+ version("1.0", md5="123456789abcdef0123456789abcdefg")
+ version("0.9", md5="0123456789abcdef0123456789abcdef")
- provides("standalone-test")
+ provides("standalone-ifc")
def test_echo(self):
"""simple stand-alone test"""
echo = which("echo")
echo("testing echo", output=str.split, error=str.split)
+
+ def test_skip(self):
+ """simple skip test"""
+ if self.spec.satisfies("@1.0:"):
+ raise SkipTest("This test is not available from v1.0 on")
+
+ print("Ran test_skip")