summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTamara Dahlgren <35777542+tldahlgren@users.noreply.github.com>2023-05-10 02:34:54 -0700
committerGitHub <noreply@github.com>2023-05-10 11:34:54 +0200
commit9a37c8fcb16f8e1f7e228991f0dc61102d8ccc7e (patch)
tree22635371a365263622cdc987ba2b20c91dfa81a8
parent49677b9be5135c5c0aed9a2ebdd27c7683b69997 (diff)
downloadspack-9a37c8fcb16f8e1f7e228991f0dc61102d8ccc7e.tar.gz
spack-9a37c8fcb16f8e1f7e228991f0dc61102d8ccc7e.tar.bz2
spack-9a37c8fcb16f8e1f7e228991f0dc61102d8ccc7e.tar.xz
spack-9a37c8fcb16f8e1f7e228991f0dc61102d8ccc7e.zip
Stand-alone testing: make recipe support and processing spack-/pytest-like (#34236)
This is a refactor of Spack's stand-alone test process to be more spack- and pytest-like. It is more spack-like in that test parts are no longer "hidden" in a package's run_test() method and pytest-like in that any package method whose name starts test_ (i.e., a "test" method) is a test part. We also support the ability to embed test parts in a test method when that makes sense. Test methods are now implicit test parts. The docstring is the purpose for the test part. The name of the method is the name of the test part. The working directory is the active spec's test stage directory. You can embed test parts using the test_part context manager. Functionality added by this commit: * Adds support for multiple test_* stand-alone package test methods, each of which is an implicit test_part for execution and reporting purposes; * Deprecates package use of run_test(); * Exposes some functionality from run_test() as optional helper methods; * Adds a SkipTest exception that can be used to flag stand-alone tests as being skipped; * Updates the packaging guide section on stand-alone tests to provide more examples; * Restores the ability to run tests "inherited" from provided virtual packages; * Prints the test log path (like we currently do for build log paths); * Times and reports the post-install process (since it can include post-install tests); * Corrects context-related error message to distinguish test recipes from build recipes.
-rw-r--r--lib/spack/docs/conf.py1
-rw-r--r--lib/spack/docs/packaging_guide.rst820
-rw-r--r--lib/spack/spack/audit.py11
-rw-r--r--lib/spack/spack/build_environment.py23
-rw-r--r--lib/spack/spack/build_systems/_checks.py10
-rw-r--r--lib/spack/spack/builder.py6
-rw-r--r--lib/spack/spack/ci.py13
-rw-r--r--lib/spack/spack/cmd/info.py40
-rw-r--r--lib/spack/spack/cmd/test.py25
-rw-r--r--lib/spack/spack/install_test.py825
-rw-r--r--lib/spack/spack/installer.py54
-rw-r--r--lib/spack/spack/package.py10
-rw-r--r--lib/spack/spack/package_base.py430
-rw-r--r--lib/spack/spack/repo.py21
-rw-r--r--lib/spack/spack/report.py3
-rw-r--r--lib/spack/spack/reporters/cdash.py58
-rw-r--r--lib/spack/spack/reporters/extract.py101
-rw-r--r--lib/spack/spack/spec.py4
-rw-r--r--lib/spack/spack/test/audit.py2
-rw-r--r--lib/spack/spack/test/builder.py32
-rw-r--r--lib/spack/spack/test/ci.py10
-rw-r--r--lib/spack/spack/test/cmd/env.py7
-rw-r--r--lib/spack/spack/test/cmd/install.py9
-rw-r--r--lib/spack/spack/test/cmd/module.py2
-rw-r--r--lib/spack/spack/test/cmd/pkg.py10
-rw-r--r--lib/spack/spack/test/cmd/test.py130
-rwxr-xr-xlib/spack/spack/test/data/test/test_stage/gavrxt67t7yaiwfek7dds7lgokmoaiin/printing-package-1.0-hzgcoow-test-out.txt11
-rw-r--r--lib/spack/spack/test/data/unparse/legion.txt4
-rw-r--r--lib/spack/spack/test/data/unparse/mfem.txt6
-rw-r--r--lib/spack/spack/test/install.py5
-rw-r--r--lib/spack/spack/test/installer.py29
-rw-r--r--lib/spack/spack/test/package_class.py161
-rw-r--r--lib/spack/spack/test/packages.py8
-rw-r--r--lib/spack/spack/test/repo.py15
-rw-r--r--lib/spack/spack/test/reporters.py128
-rw-r--r--lib/spack/spack/test/test_suite.py262
-rw-r--r--lib/spack/spack/test/util/package_hash.py8
-rw-r--r--lib/spack/spack/verify.py5
-rw-r--r--var/spack/repos/builtin.mock/packages/fail-test-audit/package.py8
-rw-r--r--var/spack/repos/builtin.mock/packages/mpi/package.py16
-rw-r--r--var/spack/repos/builtin.mock/packages/mpich/package.py3
-rw-r--r--var/spack/repos/builtin.mock/packages/printing-package/package.py9
-rw-r--r--var/spack/repos/builtin.mock/packages/py-test-callback/package.py29
-rw-r--r--var/spack/repos/builtin.mock/packages/simple-standalone-test/package.py9
-rw-r--r--var/spack/repos/builtin.mock/packages/test-error/package.py6
-rw-r--r--var/spack/repos/builtin.mock/packages/test-fail/package.py6
46 files changed, 2359 insertions, 1026 deletions
diff --git a/lib/spack/docs/conf.py b/lib/spack/docs/conf.py
index dafe318275..8ba6717147 100644
--- a/lib/spack/docs/conf.py
+++ b/lib/spack/docs/conf.py
@@ -217,6 +217,7 @@ nitpick_ignore = [
# Spack classes that intersphinx is unable to resolve
("py:class", "spack.version.StandardVersion"),
("py:class", "spack.spec.DependencySpec"),
+ ("py:class", "spack.install_test.Pb"),
]
# The reST default role (used for this markup: `text`) to use for all documents.
diff --git a/lib/spack/docs/packaging_guide.rst b/lib/spack/docs/packaging_guide.rst
index 2a4d03bc72..586fdcec6e 100644
--- a/lib/spack/docs/packaging_guide.rst
+++ b/lib/spack/docs/packaging_guide.rst
@@ -18,14 +18,16 @@ have at least some familiarity with Python, and that you've read the
There are two key parts of Spack:
#. **Specs**: expressions for describing builds of software, and
-#. **Packages**: Python modules that describe how to build
- software according to a spec.
+#. **Packages**: Python modules that describe how to build and
+ test software according to a spec.
Specs allow a user to describe a *particular* build in a way that a
package author can understand. Packages allow the packager to
encapsulate the build logic for different versions, compilers,
options, platforms, and dependency combinations in one place.
-Essentially, a package translates a spec into build logic.
+Essentially, a package translates a spec into build logic. It
+also allows the packager to write spec-specific tests of the
+installed software.
Packages in Spack are written in pure Python, so you can do anything
in Spack that you can do in Python. Python was chosen as the
@@ -40,7 +42,7 @@ easy.
The only exception is for proprietary software (e.g., vendor compilers).
If a special build system needs to be added in order to support building
- a package from source, then the associated code and recipe need to be added
+ a package from source, then the associated code and recipe should be added
first.
@@ -1464,6 +1466,8 @@ Go cannot be used to fetch a particular commit or branch, it always
downloads the head of the repository. This download method is untrusted,
and is not recommended. Use another fetch strategy whenever possible.
+.. _variants:
+
--------
Variants
--------
@@ -4096,6 +4100,8 @@ special parameters to ``configure``, like
need to supply special compiler flags depending on the compiler. All
of this information is available in the spec.
+.. _testing-specs:
+
^^^^^^^^^^^^^^^^^^^^^^^^
Testing spec constraints
^^^^^^^^^^^^^^^^^^^^^^^^
@@ -4963,106 +4969,294 @@ be left in the build stage directory as illustrated below:
.. _cmd-spack-test:
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Stand-alone (or smoke) tests
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^
+Stand-alone tests
+^^^^^^^^^^^^^^^^^
-While build-time tests are integrated with the installation process,
-stand-alone tests are independent of that process. Consequently, such
-tests can be performed days, even weeks, after the software is installed.
+While build-time tests are integrated with the build process, stand-alone
+tests are expected to run days, weeks, even months after the software is
+installed. The goal is to provide a mechanism for gaining confidence that
+packages work as installed **and** *continue* to work as the underlying
+software evolves. Packages can add and inherit stand-alone tests. The
+`spack test`` command is used to manage stand-alone testing.
-Stand-alone tests are checks that should run relatively quickly -- as
-in on the order of at most a few minutes -- and ideally execute all
-aspects of the installed software, or at least key functionality.
+.. note::
+
+ Execution speed is important since these tests are intended to quickly
+ assess whether installed specs work on the system. Consequently, they
+ should run relatively quickly -- as in on the order of at most a few
+ minutes -- while ideally executing all, or at least key aspects of the
+ installed software.
.. note::
- Execution speed is important because these tests are intended
- to quickly assess whether the installed software works on the
- system.
+ Failing stand-alone tests indicate problems with the installation and,
+ therefore, there is no reason to proceed with more resource-intensive
+ tests until those have been investigated.
- Failing stand-alone tests indicate that there is no reason to
- proceed with more resource-intensive tests.
+ Passing stand-alone tests indicate that more thorough testing, such
+ as running extensive unit or regression tests, or tests that run at
+ scale can proceed without wasting resources on a problematic installation.
- Passing stand-alone (or smoke) tests can lead to more thorough
- testing, such as extensive unit or regression tests, or tests
- that run at scale.
+Tests are defined in the package using methods with names beginning ``test_``.
+This allows Spack to support multiple independent checks, or parts. Files
+needed for testing, such as source, data, and expected outputs, may be saved
+from the build and or stored with the package in the repository. Regardless
+of origin, these files are automatically copied to the spec's test stage
+directory prior to execution of the test method(s). Spack also provides some
+helper functions to facilitate processing.
-Stand-alone tests have their own test stage directory, which can be
-configured. These tests can compile or build software with the compiler
-used to build the package. They can use files cached from the build for
-testing the installation. Custom files, such as source, data, or expected
-outputs can be added for use in these tests.
+.. _configure-test-stage:
""""""""""""""""""""""""""""""""""""
Configuring the test stage directory
""""""""""""""""""""""""""""""""""""
-Stand-alone tests rely on a stage directory for building, running,
-and tracking results.
-The default directory, ``~/.spack/test``, is defined in
-:ref:`etc/spack/defaults/config.yaml <config-yaml>`.
-You can configure the location in the high-level ``config`` by adding
-or changing the ``test_stage`` path in the appropriate ``config.yaml``
-file such that:
+Stand-alone tests utilize a test stage directory for building, running,
+and tracking results in the same way Spack uses a build stage directory.
+The default test stage root directory, ``~/.spack/test``, is defined in
+:ref:`etc/spack/defaults/config.yaml <config-yaml>`. This location is
+customizable by adding or changing the ``test_stage`` path in the high-level
+``config`` of the appropriate ``config.yaml`` file such that:
.. code-block:: yaml
config:
- test_stage: /path/to/stage
+ test_stage: /path/to/test/stage
-The package can access this path **during test processing** using
-`self.test_suite.stage`.
+Packages can use the ``self.test_suite.stage`` property to access this setting.
+Other package properties that provide access to spec-specific subdirectories
+and files are described in :ref:`accessing staged files <accessing-files>`.
.. note::
- The test stage path is established for the entire suite. That
- means it is the root directory for all specs being installed
- with the same `spack test run` command. Each spec gets its
- own stage subdirectory.
+ The test stage path is the root directory for the **entire suite**.
+ In other words, it is the root directory for **all specs** being
+ tested by the ``spack test run`` command. Each spec gets its own
+ stage subdirectory. Use ``self.test_suite.test_dir_for_spec(self.spec)``
+ to access the spec-specific test stage directory.
+
+
+.. _adding-standalone-tests:
+
+""""""""""""""""""""""""
+Adding stand-alone tests
+""""""""""""""""""""""""
+
+Test recipes are defined in the package using methods with names beginning
+``test_``. This allows for the implementation of multiple independent tests.
+Each method has access to the information Spack tracks on the package, such
+as options, compilers, and dependencies, supporting the customization of tests
+to the build. Standard python ``assert`` statements and other error reporting
+mechanisms are available. Such exceptions are automatically caught and reported
+as test failures.
+
+Each test method is an implicit test part named by the method and whose
+purpose is the method's docstring. Providing a purpose gives context for
+aiding debugging. A test method may contain embedded test parts. Spack
+outputs the test name and purpose prior to running each test method and
+any embedded test parts. For example, ``MyPackage`` below provides two basic
+examples of installation tests: ``test_always_fails`` and ``test_example``.
+As the name indicates, the first always fails. The second simply runs the
+installed example.
+
+.. code-block:: python
+
+ class MyPackage(Package):
+ ...
+
+ def test_always_fails(self):
+ """use assert to always fail"""
+ assert False
+
+ def test_example(self):
+ """run installed example"""
+ example = which(self.prefix.bin.example)
+ example()
+
+Output showing the identification of each test part after runnig the tests
+is illustrated below.
+
+.. code-block:: console
+
+ $ spack test run --alias mypackage mypackage@1.0
+ ==> Spack test mypackage
+ ...
+ $ spack test results -l mypackage
+ ==> Results for test suite 'mypackage':
+ ...
+ ==> [2023-03-10-16:03:56.625204] test: test_always_fails: use assert to always fail
+ ...
+ FAILED
+ ==> [2023-03-10-16:03:56.625439] test: test_example: run installed example
+ ...
+ PASSED
+
+
+.. note::
+
+ If ``MyPackage`` were a recipe for a library, the tests should build
+ an example or test program that is then executed.
+
+A test method can include test parts using the ``test_part`` context manager.
+Each part is treated as an independent check to allow subsequent test parts
+to execute even after a test part fails.
+
+.. _test-part:
+
+The signature for ``test_part`` is:
+
+.. code-block:: python
+
+ def test_part(pkg, test_name, purpose, work_dir=".", verbose=False):
+
+where each argument has the following meaning:
+
+* ``pkg`` is an instance of the package for the spec under test.
+
+* ``test_name`` is the name of the test part, which must start with ``test_``.
+
+* ``purpose`` is a brief description used as a heading for the test part.
+
+ Output from the test is written to a test log file allowing the test name
+ and purpose to be searched for test part confirmation and debugging.
+
+* ``work_dir`` is the path to the directory in which the test will run.
+
+ The default of ``None``, or ``"."``, corresponds to the the spec's test
+ stage (i.e., ``self.test_suite.test_dir_for_spec(self.spec)``.
+
+.. admonition:: Tests should **not** run under the installation directory.
+
+ Use of the package spec's installation directory for building and running
+ tests is **strongly** discouraged. Doing so causes permission errors for
+ shared spack instances *and* facilities that install the software in
+ read-only file systems or directories.
+
+Suppose ``MyPackage`` actually installs two examples we want to use for tests.
+These checks can be implemented as separate checks or, as illustrated below,
+embedded test parts.
+
+.. code-block:: python
+
+ class MyPackage(Package):
+ ...
+
+ def test_example(self):
+ """run installed examples"""
+ for example in ["ex1", "ex2"]:
+ with test_part(
+ self,
+ "test_example_{0}".format(example),
+ purpose="run installed {0}".format(example),
+ ):
+ exe = which(join_path(self.prefix.bin, example))
+ exe()
+
+In this case, there will be an implicit test part for ``test_example``
+and separate sub-parts for ``ex1`` and ``ex2``. The second sub-part
+will be executed regardless of whether the first passes. The test
+log for a run where the first executable fails and the second passes
+is illustrated below.
+
+.. code-block:: console
+
+ $ spack test run --alias mypackage mypackage@1.0
+ ==> Spack test mypackage
+ ...
+ $ spack test results -l mypackage
+ ==> Results for test suite 'mypackage':
+ ...
+ ==> [2023-03-10-16:03:56.625204] test: test_example: run installed examples
+ ==> [2023-03-10-16:03:56.625439] test: test_example_ex1: run installed ex1
+ ...
+ FAILED
+ ==> [2023-03-10-16:03:56.625555] test: test_example_ex2: run installed ex2
+ ...
+ PASSED
+ ...
+
+.. warning::
+
+ Test results reporting requires that each test method and embedded
+ test part for a package have a unique name.
+
+Stand-alone tests run in an environment that provides access to information
+Spack has on how the software was built, such as build options, dependencies,
+and compilers. Build options and dependencies are accessed with the normal
+spec checks. Examples of checking :ref:`variant settings <variants>` and
+:ref:`spec constraints <testing-specs>` can be found at the provided links.
+Accessing compilers in stand-alone tests that are used by the build requires
+setting a package property as described :ref:`below <test-compilation>`.
+
+
+.. _test-compilation:
"""""""""""""""""""""""""
Enabling test compilation
"""""""""""""""""""""""""
-Some stand-alone tests will require access to the compiler with which
-the package was built, especially for library-only packages. You must
-enable loading the package's compiler configuration by setting the
-``test_requires_compiler`` property to ``True`` for your package.
-For example:
+If you want to build and run binaries in tests, then you'll need to tell
+Spack to load the package's compiler configuration. This is accomplished
+by setting the package's ``test_requires_compiler`` property to ``True``.
+
+Setting the property to ``True`` ensures access to the compiler through
+canonical environment variables (e.g., ``CC``, ``CXX``, ``FC``, ``F77``).
+It also gives access to build dependencies like ``cmake`` through their
+``spec objects`` (e.g., ``self.spec["cmake"].prefix.bin.cmake``).
+
+.. note::
+
+ The ``test_requires_compiler`` property should be added at the top of
+ the package near other attributes, such as the ``homepage`` and ``url``.
+
+Below illustrates using this feature to compile an example.
.. code-block:: python
- class MyPackage(Package):
+ class MyLibrary(Package):
...
test_requires_compiler = True
+ ...
-Setting this property to ``True`` makes the compiler available in the
-test environment through the canonical environment variables (e.g.,
-``CC``, ``CXX``, ``FC``, ``F77``).
-
-.. note::
+ def test_cxx_example(self):
+ """build and run cxx-example"""
+ exe = "cxx-example"
+ ...
+ cxx = which(os.environ["CXX"])
+ cxx(
+ "-L{0}".format(self.prefix.lib),
+ "-I{0}".format(self.prefix.include),
+ "{0}.cpp".format(exe),
+ "-o",
+ exe
+ )
+ cxx_example = which(exe)
+ cxx_example()
- We recommend adding the property at the top of the package with the
- other attributes, such as ``homepage`` and ``url``.
.. _cache_extra_test_sources:
"""""""""""""""""""""""
-Adding build-time files
+Saving build-time files
"""""""""""""""""""""""
.. note::
- We highly recommend re-using build-time tests and input files
- for testing installed software. These files are easier to keep
- synchronized since they reside within the software's repository
- than maintaining custom install test files with the Spack package.
+ We highly recommend re-using build-time test sources and pared down
+ input files for testing installed software. These files are easier
+ to keep synchronized with software capabilities since they reside
+ within the software's repository.
+
+ If that is not possible, you can add test-related files to the package
+ repository (see :ref:`adding custom files <cache_custom_files>`). It
+ will be important to maintain them so they work across listed or supported
+ versions of the package.
You can use the ``cache_extra_test_sources`` method to copy directories
-and or files from the build stage directory to the package's installation
-directory.
+and or files from the source build stage directory to the package's
+installation directory.
The signature for ``cache_extra_test_sources`` is:
@@ -5070,54 +5264,73 @@ The signature for ``cache_extra_test_sources`` is:
def cache_extra_test_sources(self, srcs):
-where ``srcs`` is a string or a list of strings corresponding to
-the paths for the files and or subdirectories, relative to the staged
-source, that are to be copied to the corresponding relative test path
-under the prefix. All of the contents within each subdirectory will
-also be copied.
+where ``srcs`` is a string *or* a list of strings corresponding to the
+paths of subdirectories and or files needed for stand-alone testing.
+The paths must be relative to the staged source directory. Contents of
+subdirectories and files are copied to a special test cache subdirectory
+of the installation prefix. They are automatically copied to the appropriate
+relative paths under the test stage directory prior to executing stand-alone
+tests.
For example, a package method for copying everything in the ``tests``
subdirectory plus the ``foo.c`` and ``bar.c`` files from ``examples``
-can be implemented as shown below.
-
-.. note::
-
- The method name ``copy_test_sources`` here is for illustration
- purposes. You are free to use a name that is more suited to your
- package.
-
- The key to copying the files at build time for stand-alone testing
- is use of the ``run_after`` directive, which ensures the associated
- files are copied **after** the provided build stage.
+and using ``foo.c`` in a test method is illustrated below.
.. code-block:: python
- class MyPackage(Package):
+ class MyLibPackage(Package):
...
@run_after("install")
- def copy_test_sources(self):
+ def copy_test_files(self):
srcs = ["tests",
join_path("examples", "foo.c"),
join_path("examples", "bar.c")]
self.cache_extra_test_sources(srcs)
+ def test_foo(self):
+ exe = "foo"
+ src_dir = join_path(
+ self.test_suite.current_test_cache_dir, "examples"
+ )
+ with working_dir(src_dir):
+ cc = which(os.environ["CC"])
+ cc(
+ "-L{0}".format(self.prefix.lib),
+ "-I{0}".format(self.prefix.include),
+ "{0}.c".format(exe),
+ "-o",
+ exe
+ )
+ foo = which(exe)
+ foo()
+
In this case, the method copies the associated files from the build
-stage **after** the software is installed to the package's metadata
-directory. The result is the directory and files will be cached in
-a special test subdirectory under the installation prefix.
-
-These paths are **automatically copied** to the test stage directory
-during stand-alone testing. The package's ``test`` method can access
-them using the ``self.test_suite.current_test_cache_dir`` property.
-In our example, the method would use the following paths to reference
+stage, **after** the software is installed, to the package's test
+cache directory. Then ``test_foo`` builds ``foo`` using ``foo.c``
+before running the program.
+
+.. note::
+
+ The method name ``copy_test_files`` here is for illustration purposes.
+ You are free to use a name that is more suited to your package.
+
+ The key to copying files for stand-alone testing at build time is use
+ of the ``run_after`` directive, which ensures the associated files are
+ copied **after** the provided build stage where the files **and**
+ installation prefix are available.
+
+These paths are **automatically copied** from cache to the test stage
+directory prior to the execution of any stand-alone tests. Tests access
+the files using the ``self.test_suite.current_test_cache_dir`` property.
+In our example above, test methods can use the following paths to reference
the copy of each entry listed in ``srcs``, respectively:
* ``join_path(self.test_suite.current_test_cache_dir, "tests")``
* ``join_path(self.test_suite.current_test_cache_dir, "examples", "foo.c")``
* ``join_path(self.test_suite.current_test_cache_dir, "examples", "bar.c")``
-.. note::
+.. admonition:: Library packages should build stand-alone tests
Library developers will want to build the associated tests
against their **installed** libraries before running them.
@@ -5125,10 +5338,22 @@ the copy of each entry listed in ``srcs``, respectively:
.. note::
While source and input files are generally recommended, binaries
- **may** also be cached by the build process for install testing.
- Only you, as the package writer or maintainer, know whether these
- would be appropriate for ensuring the installed software continues
- to work as the underlying system evolves.
+ **may** also be cached by the build process. Only you, as the package
+ writer or maintainer, know whether these files would be appropriate
+ for testing the installed software weeks to months later.
+
+.. note::
+
+ If one or more of the copied files needs to be modified to reference
+ the installed software, it is recommended that those changes be made
+ to the cached files **once** in the ``copy_test_sources`` method and
+ ***after** the call to ``self.cache_extra_test_sources()``. This will
+ reduce the amount of unnecessary work in the test method **and** avoid
+ problems testing in shared instances and facility deployments.
+
+ The ``filter_file`` function can be quite useful for such changes.
+ See :ref:`file manipulation <file-manipulation>`.
+
.. _cache_custom_files:
@@ -5136,21 +5361,46 @@ the copy of each entry listed in ``srcs``, respectively:
Adding custom files
"""""""""""""""""""
-Some tests may require additional files not available from the build.
-Examples include:
+In some cases it can be useful to have files that can be used to build or
+check the results of tests. Examples include:
- test source files
- test input files
- test build scripts
-- expected test output
+- expected test outputs
+
+While obtaining such files from the software repository is preferred (see
+:ref:`adding build-time files <cache_extra_test_sources>`), there are
+circumstances where that is not feasible (e.g., the software is not being
+actively maintained). When test files can't be obtained from the repository
+or as a supplement to files that can, Spack supports the inclusion of
+additional files under the ``test`` subdirectory of the package in the
+Spack repository.
+
+Spack **automatically copies** the contents of that directory to the
+test staging directory prior to running stand-alone tests. Test methods
+access those files using the ``self.test_suite.current_test_data_dir``
+property as shown below.
-These extra files should be added to the ``test`` subdirectory of the
-package in the Spack repository.
+.. code-block:: python
+
+ class MyLibrary(Package):
+ ...
+
+ test_requires_compiler = True
+ ...
+
+ def test_example(self):
+ """build and run custom-example"""
+ data_dir = self.test_suite.current_test_data_dir
+ exe = "custom-example"
+ src = datadir.join("{0}.cpp".format(exe))
+ ...
+ # TODO: Build custom-example using src and exe
+ ...
+ custom_example = which(exe)
+ custom_example()
-Spack will **automatically copy** the contents of that directory to the
-test staging directory for stand-alone testing. The ``test`` method can
-access those files using the ``self.test_suite.current_test_data_dir``
-property.
.. _expected_test_output_from_file:
@@ -5160,7 +5410,8 @@ Reading expected output from a file
The helper function ``get_escaped_text_output`` is available for packages
to retrieve and properly format the text from a file that contains the
-output that is expected when an executable is run using ``self.run_test``.
+expected output from running an executable that may contain special
+characters.
The signature for ``get_escaped_text_output`` is:
@@ -5171,194 +5422,84 @@ The signature for ``get_escaped_text_output`` is:
where ``filename`` is the path to the file containing the expected output.
The ``filename`` for a :ref:`custom file <cache_custom_files>` can be
-accessed and used as illustrated by a simplified version of an ``sqlite``
-package check:
+accessed by tests using the ``self.test_suite.current_test_data_dir``
+property. The example below illustrates how to read a file that was
+added to the package's ``test`` subdirectory.
.. code-block:: python
+ import re
+
class Sqlite(AutotoolsPackage):
...
- def test(self):
+ def test_example(self):
+ """check example table dump"""
test_data_dir = self.test_suite.current_test_data_dir
db_filename = test_data_dir.join("packages.db")
..
-
expected = get_escaped_text_output(test_data_dir.join("dump.out"))
- self.run_test("sqlite3",
- [db_filename, ".dump"],
- expected,
- installed=True,
- purpose="test: checking dump output",
- skip_missing=False)
-
-Expected outputs do not have to be stored with the Spack package.
-Maintaining them with the source is actually preferable.
+ sqlite3 = which(self.prefix.bin.sqlite3)
+ out = sqlite3(
+ db_filename, ".dump", output=str.split, error=str.split
+ )
+ for exp in expected:
+ assert re.search(exp, out), "Expected '{0}' in output".format(exp)
-Suppose a package's source has ``examples/foo.c`` and ``examples/foo.out``
-files that are copied for stand-alone test purposes using
-:ref:`cache_extra_test_sources <cache_extra_test_sources>` and the
-`run_test` method builds the executable ``examples/foo``. The package
-can retrieve the expected output from ``examples/foo.out`` using:
+If the file was instead copied from the ``tests`` subdirectory of the staged
+source code, the path would be obtained as shown below.
.. code-block:: python
- class MyFooPackage(Package):
- ...
-
- def test(self):
- ..
- filename = join_path(self.test_suite.current_test_cache_dir,
- "examples", "foo.out")
- expected = get_escaped_text_output(filename)
- ..
+ def test_example(self):
+ """check example table dump"""
+ test_cache_dir = self.test_suite.current_test_cache_dir
+ db_filename = test_cache_dir.join("packages.db")
-Alternatively, suppose ``MyFooPackage`` installs tests in ``share/tests``
-and their outputs in ``share/tests/outputs``. The expected output for
-``foo``, assuming it is still called ``foo.out``, can be retrieved as
+Alternatively, if the file was copied to the ``share/tests`` subdirectory
+as part of the installation process, the test could access the path as
follows:
.. code-block:: python
- class MyFooPackage(Package):
- ...
+ def test_example(self):
+ """check example table dump"""
+ db_filename = join_path(self.prefix.share.tests, "packages.db")
- def test(self):
- ..
- filename = join_path(self.prefix.share.tests.outputs, "foo.out")
- expected = get_escaped_text_output(filename)
- ..
+.. _check_outputs:
-""""""""""""""""""""""""
-Adding stand-alone tests
-""""""""""""""""""""""""
-
-Stand-alone tests are defined in the package's ``test`` method. The
-default ``test`` method is a no-op so you'll want to override it to
-implement the tests.
-
-.. note::
+""""""""""""""""""""""""""""""""""""
+Comparing expected to actual outputs
+""""""""""""""""""""""""""""""""""""
- Any package method named ``test`` is automatically executed by
- Spack when the ``spack test run`` command is performed.
+The helper function ``check_outputs`` is available for packages to ensure
+the expected outputs from running an executable are contained within the
+actual outputs.
-For example, the ``MyPackage`` package below provides a skeleton for
-the test method.
+The signature for ``check_outputs`` is:
.. code-block:: python
- class MyPackage(Package):
- ...
-
- def test(self):
- # TODO: Add quick checks of the installed software
- pass
+ def check_outputs(expected, actual):
-Stand-alone tests run in an environment that provides access to the
-package and all of its dependencies, including ``test``-type
-dependencies.
+where each argument has the expected type and meaning:
-Standard python ``assert`` statements and other error reporting
-mechanisms can be used in the ``test`` method. Spack will report
-such errors as test failures.
+* ``expected`` is a string or list of strings containing the expected (raw)
+ output.
-You can implement multiple tests (or test parts) within the ``test``
-method using the ``run_test`` method. Each invocation is run separately
-in a manner that allows testing to continue after failures.
+* ``actual`` is a string containing the actual output from executing the command
-The signature for ``run_test`` is:
+Invoking the method is the equivalent of:
.. code-block:: python
- def run_test(self, exe, options=[], expected=[], status=0,
- installed=False, purpose="", skip_missing=False,
- work_dir=None):
-
-where each argument has the following meaning:
-
-* ``exe`` is the executable to run.
-
- If a name, the ``exe`` is required to be found in one of the paths
- in the ``PATH`` environment variable **unless** ``skip_missing`` is
- ``True``. Alternatively, a relative (to ``work_dir``) or fully
- qualified path for the executable can be provided in ``exe``.
-
- The test will fail if the resulting path is not within the prefix
- of the package being tested **unless** ``installed`` is ``False``.
-
-* ``options`` is a list of the command line options.
-
- Options are a list of strings to be passed to the executable when
- it runs.
-
- The default is ``[]``, which means no options are provided to the
- executable.
-
-* ``expected`` is an optional list of expected output strings.
-
- Spack requires every string in ``expected`` to be a regex matching
- part of the output from the test run (e.g.,
- ``expected=["completed successfully", "converged in"]``). The
- output can also include expected failure outputs (e.g.,
- ``expected=["failed to converge"]``).
-
- The expected output can be :ref:`read from a file
- <expected_test_output_from_file>`.
-
- The default is ``expected=[]``, so Spack will not check the output.
-
-* ``status`` is the optional expected return code(s).
-
- A list of return codes corresponding to successful execution can
- be provided (e.g., ``status=[0,3,7]``). Support for non-zero return
- codes allows for basic **expected failure** tests as well as different
- return codes across versions of the software.
+ for check in expected:
+ if not re.search(check, actual):
+ raise RuntimeError("Expected '{0}' in output '{1}'".format(check, actual))
- The default is ``status=[0]``, which corresponds to **successful**
- execution in the sense that the executable does not exit with a
- failure code or raise an exception.
-
-* ``installed`` is used to require ``exe`` to be within the package
- prefix.
-
- If ``True``, then the path for ``exe`` is required to be within the
- package prefix; otherwise, the path is not constrained.
-
- The default is ``False``, so the fully qualified path for ``exe``
- does **not** need to be within the installation directory.
-
-* ``purpose`` is an optional heading describing the the test part.
-
- Output from the test is written to a test log file so this argument
- serves as a searchable heading in text logs to highlight the start
- of the test part. Having a description can be helpful when debugging
- failing tests.
-
-* ``skip_missing`` is used to determine if the test should be skipped.
-
- If ``True``, then the test part should be skipped if the executable
- is missing; otherwise, the executable must exist. This option can
- be useful when test executables are removed or change as the software
- evolves in subsequent versions.
-
- The default is ``False``, which means the test executable must be
- present for any installable version of the software.
-
-* ``work_dir`` is the path to the directory from which the executable
- will run.
-
- The default of ``None`` corresponds to the current directory (``"."``).
- Each call starts with the working directory set to the spec's test stage
- directory (i.e., ``self.test_suite.test_dir_for_spec(self.spec)``).
-
-.. warning::
-
- Use of the package spec's installation directory for building and running
- tests is **strongly** discouraged. Doing so has caused permission errors
- for shared spack instances *and* for facilities that install the software
- in read-only file systems or directories.
+.. _accessing-files:
"""""""""""""""""""""""""""""""""""""""""
Accessing package- and test-related files
@@ -5366,11 +5507,14 @@ Accessing package- and test-related files
You may need to access files from one or more locations when writing
stand-alone tests. This can happen if the software's repository does not
-include test source files or includes files but has no way to build the
-executables using the installed headers and libraries. In these cases,
-you may need to reference the files relative to one or more root
-directory. The properties containing package- (or spec-) and test-related
-directory paths are provided in the table below.
+include test source files or includes them but has no way to build the
+executables using the installed headers and libraries. In these cases
+you may need to reference the files relative to one or more root directory.
+The table below lists relevant path properties and provides additional
+examples of their use.
+:ref:`Reading expected output <expected_test_output_from_file>` provides
+examples of accessing files saved from the software repository, package
+repository, and installation.
.. list-table:: Directory-to-property mapping
:header-rows: 1
@@ -5388,7 +5532,7 @@ directory paths are provided in the table below.
- ``self.test_suite.stage``
- ``join_path(self.test_suite.stage, "results.txt")``
* - Spec's Test Stage
- - ``self.test_suite.test_dir_for_spec``
+ - ``self.test_suite.test_dir_for_spec(<spec>)``
- ``self.test_suite.test_dir_for_spec(self.spec)``
* - Current Spec's Build-time Files
- ``self.test_suite.current_test_cache_dir``
@@ -5397,15 +5541,20 @@ directory paths are provided in the table below.
- ``self.test_suite.current_test_data_dir``
- ``join_path(self.test_suite.current_test_data_dir, "hello.f90")``
+
+.. _inheriting-tests:
+
""""""""""""""""""""""""""""
Inheriting stand-alone tests
""""""""""""""""""""""""""""
Stand-alone tests defined in parent (.e.g., :ref:`build-systems`) and
-virtual (e.g., :ref:`virtual-dependencies`) packages are available to
-packages that inherit from or provide interfaces for those packages,
-respectively. The table below summarizes the tests that will be included
-with those provided in the package itself when executing stand-alone tests.
+virtual (e.g., :ref:`virtual-dependencies`) packages are executed by
+packages that inherit from or provide interface implementations for those
+packages, respectively.
+
+The table below summarizes the stand-alone tests that will be executed along
+with those implemented in the package itself.
.. list-table:: Inherited/provided stand-alone tests
:header-rows: 1
@@ -5425,86 +5574,64 @@ with those provided in the package itself when executing stand-alone tests.
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/mpi>`_
- Compiles and runs ``mpi_hello`` (``c``, ``fortran``)
* - :ref:`PythonPackage <pythonpackage>`
- - Imports installed modules
+ - Imports modules listed in the ``self.import_modules`` property with defaults derived from the tarball
+ * - :ref:`SipPackage <sippackage>`
+ - Imports modules listed in the ``self.import_modules`` property with defaults derived from the tarball
-These tests are very generic so it is important that package
-developers and maintainers provide additional stand-alone tests
-customized to the package.
+These tests are very basic so it is important that package developers and
+maintainers provide additional stand-alone tests customized to the package.
-One example of a package that adds its own stand-alone (or smoke)
-tests is the `Openmpi package
-<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/openmpi/package.py>`_.
-The preliminary set of tests for the package performed the
-following checks:
+.. warning::
-- installed binaries with the ``--version`` option return the expected
- version;
-- outputs from (selected) installed binaries match expectations;
-- ``make all`` succeeds when building examples that were copied from the
- source directory during package installation; and
-- outputs from running the copied and built examples match expectations.
+ Any package that implements a test method with the same name as an
+ inherited method overrides the inherited method. If that is not the
+ goal and you are not explicitly calling and adding functionality to
+ the inherited method for the test, then make sure that all test methods
+ and embedded test parts have unique test names.
-Below is an example of running and viewing the stand-alone tests,
-where only the outputs for the first of each set are shown:
+One example of a package that adds its own stand-alone tests to those
+"inherited" by the virtual package it provides an implementation for is
+the `Openmpi package
+<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/openmpi/package.py>`_.
-.. code-block:: console
+Below are snippets from running and viewing the stand-alone test results
+for ``openmpi``:
- $ spack test run --alias openmpi-4.0.5 openmpi@4.0.5
- ==> Spack test openmpi-4.0.5
- ==> Testing package openmpi-4.0.5-eygjgve
- $ spack test results -l openmpi-4.0.5
- ==> Spack test openmpi-4.0.5
- ==> Testing package openmpi-4.0.5-eygjgve
- ==> Results for test suite "openmpi-4.0.5":
- ==> openmpi-4.0.5-eygjgve PASSED
- ==> Testing package openmpi-4.0.5-eygjgve
- ==> [2021-04-26-17:35:20.259650] test: ensuring version of mpiCC is 8.3.1
- ==> [2021-04-26-17:35:20.260155] "$SPACK_ROOT/opt/spack/linux-rhel7-broadwell/gcc-8.3.1/openmpi-4.0.5-eygjgvek35awfor2qaljltjind2oa67r/bin/mpiCC" "--version"
- g++ (GCC) 8.3.1 20190311 (Red Hat 8.3.1-3)
- Copyright (C) 2018 Free Software Foundation, Inc.
- This is free software; see the source for copying conditions. There is NO
- warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+.. code-block:: console
- PASSED
+ $ spack test run --alias openmpi openmpi@4.1.4
+ ==> Spack test openmpi
+ ==> Testing package openmpi-4.1.4-ubmrigj
+ ============================== 1 passed of 1 spec ==============================
+
+ $ spack test results -l openmpi
+ ==> Results for test suite 'openmpi':
+ ==> test specs:
+ ==> openmpi-4.1.4-ubmrigj PASSED
+ ==> Testing package openmpi-4.1.4-ubmrigj
+ ==> [2023-03-10-16:03:56.160361] Installing $spack/opt/spack/linux-rhel7-broadwell/gcc-8.3.1/openmpi-4.1.4-ubmrigjrqcafh3hffqcx7yz2nc5jstra/.spack/test to $test_stage/xez37ekynfbi4e7h4zdndfemzufftnym/openmpi-4.1.4-ubmrigj/cache/openmpi
+ ==> [2023-03-10-16:03:56.625204] test: test_bin: test installed binaries
+ ==> [2023-03-10-16:03:56.625439] test: test_bin_mpirun: run and check output of mpirun
+ ==> [2023-03-10-16:03:56.629807] '$spack/opt/spack/linux-rhel7-broadwell/gcc-8.3.1/openmpi-4.1.4-ubmrigjrqcafh3hffqcx7yz2nc5jstra/bin/mpirun' '-n' '1' 'ls' '..'
+ openmpi-4.1.4-ubmrigj repo
+ openmpi-4.1.4-ubmrigj-test-out.txt test_suite.lock
+ PASSED: test_bin_mpirun
...
- ==> [2021-04-26-17:35:20.493921] test: checking mpirun output
- ==> [2021-04-26-17:35:20.494461] "$SPACK_ROOT/opt/spack/linux-rhel7-broadwell/gcc-8.3.1/openmpi-4.0.5-eygjgvek35awfor2qaljltjind2oa67r/bin/mpirun" "-n" "1" "ls" ".."
- openmpi-4.0.5-eygjgve repo test_suite.lock
- openmpi-4.0.5-eygjgve-test-out.txt results.txt
- PASSED
+ ==> [2023-03-10-16:04:01.486977] test: test_version_oshcc: ensure version of oshcc is 8.3.1
+ SKIPPED: test_version_oshcc: oshcc is not installed
...
- ==> [2021-04-26-17:35:20.630452] test: ensuring ability to build the examples
- ==> [2021-04-26-17:35:20.630943] "/usr/bin/make" "all"
- mpicc -g hello_c.c -o hello_c
- mpicc -g ring_c.c -o ring_c
- mpicc -g connectivity_c.c -o connectivity_c
- mpicc -g spc_example.c -o spc_example
+ ==> [2023-03-10-16:04:02.215227] Completed testing
+ ==> [2023-03-10-16:04:02.215597]
+ ======================== SUMMARY: openmpi-4.1.4-ubmrigj ========================
+ Openmpi::test_bin_mpirun .. PASSED
+ Openmpi::test_bin_ompi_info .. PASSED
+ Openmpi::test_bin_oshmem_info .. SKIPPED
+ Openmpi::test_bin_oshrun .. SKIPPED
+ Openmpi::test_bin_shmemrun .. SKIPPED
+ Openmpi::test_bin .. PASSED
...
- PASSED
- ==> [2021-04-26-17:35:23.291214] test: checking hello_c example output and status (0)
- ==> [2021-04-26-17:35:23.291841] "./hello_c"
- Hello, world, I am 0 of 1, (Open MPI v4.0.5, package: Open MPI dahlgren@quartz2300 Distribution, ident: 4.0.5, repo rev: v4.0.5, Aug 26, 2020, 114)
- PASSED
- ...
- ==> [2021-04-26-17:35:24.603152] test: ensuring copied examples cleaned up
- ==> [2021-04-26-17:35:24.603807] "/usr/bin/make" "clean"
- rm -f hello_c hello_cxx hello_mpifh hello_usempi hello_usempif08 hello_oshmem hello_oshmemcxx hello_oshmemfh Hello.class ring_c ring_cxx ring_mpifh ring_usempi ring_usempif08 ring_oshmem ring_oshmemfh Ring.class connectivity_c oshmem_shmalloc oshmem_circular_shift oshmem_max_reduction oshmem_strided_puts oshmem_symmetric_data spc_example *~ *.o
- PASSED
- ==> [2021-04-26-17:35:24.643360] test: mpicc: expect command status in [0]
- ==> [2021-04-26-17:35:24.643834] "$SPACK_ROOT/opt/spack/linux-rhel7-broadwell/gcc-8.3.1/openmpi-4.0.5-eygjgvek35awfor2qaljltjind2oa67r/bin/mpicc" "-o" "mpi_hello_c" "$HOME/.spack/test/hyzq5eqlqfog6fawlzxwg3prqy5vjhms/openmpi-4.0.5-eygjgve/data/mpi/mpi_hello.c"
- PASSED
- ==> [2021-04-26-17:35:24.776765] test: mpirun: expect command status in [0]
- ==> [2021-04-26-17:35:24.777194] "$SPACK_ROOT/opt/spack/linux-rhel7-broadwell/gcc-8.3.1/openmpi-4.0.5-eygjgvek35awfor2qaljltjind2oa67r/bin/mpirun" "-np" "1" "mpi_hello_c"
- Hello world! From rank 0 of 1
- PASSED
- ...
-
+ ============================== 1 passed of 1 spec ==============================
-.. warning::
-
- The API for adding and running stand-alone tests is not yet considered
- stable and may change drastically in future releases. Packages with
- stand-alone tests will be refactored to match changes to the API.
.. _cmd-spack-test-list:
@@ -5514,11 +5641,11 @@ where only the outputs for the first of each set are shown:
Packages available for install testing can be found using the
``spack test list`` command. The command outputs all installed
-packages that have defined ``test`` methods.
+packages that have defined stand-alone test methods.
Alternatively you can use the ``--all`` option to get a list of
-all packages that have defined ``test`` methods even if they are
-not installed.
+all packages that have stand-alone test methods even if the packages
+are not installed.
For more information, refer to `spack test list
<https://spack.readthedocs.io/en/latest/command_index.html#spack-test-list>`_.
@@ -5530,22 +5657,22 @@ For more information, refer to `spack test list
""""""""""""""""""
Install tests can be run for one or more installed packages using
-the ``spack test run`` command. A ``test suite`` is created from
-the provided specs. If no specs are provided it will test all specs
-in the active environment or all specs installed in Spack if no
-environment is active.
+the ``spack test run`` command. A ``test suite`` is created for all
+of the provided specs. The command accepts the same arguments provided
+to ``spack install`` (see :ref:`sec-specs`). If no specs are provided
+the command tests all specs in the active environment or all specs
+installed in the Spack instance if no environment is active.
Test suites can be named using the ``--alias`` option. Unaliased
-Test suites will use the content hash of their specs as their name.
+test suites use the content hash of their specs as their name.
Some of the more commonly used debugging options are:
- ``--fail-fast`` stops testing each package after the first failure
- ``--fail-first`` stops testing packages after the first failure
-Test output is written to a text log file by default but ``junit``
-and ``cdash`` are outputs are available through the ``--log-format``
-option.
+Test output is written to a text log file by default though ``junit``
+and ``cdash`` are outputs available through the ``--log-format`` option.
For more information, refer to `spack test run
<https://spack.readthedocs.io/en/latest/command_index.html#spack-test-run>`_.
@@ -5558,8 +5685,8 @@ For more information, refer to `spack test run
""""""""""""""""""""""
The ``spack test results`` command shows results for all completed
-test suites. Providing the alias or content hash limits reporting
-to the corresponding test suite.
+test suites by default. The alias or content hash can be provided to
+limit reporting to the corresponding test suite.
The ``--logs`` option includes the output generated by the associated
test(s) to facilitate debugging.
@@ -5589,12 +5716,13 @@ For more information, refer to `spack test find
"""""""""""""""""""""
The ``spack test remove`` command removes test suites to declutter
-the test results directory. You are prompted to confirm the removal
+the test stage directory. You are prompted to confirm the removal
of each test suite **unless** you use the ``--yes-to-all`` option.
For more information, refer to `spack test remove
<https://spack.readthedocs.io/en/latest/command_index.html#spack-test-remove>`_.
+
.. _file-manipulation:
---------------------------
diff --git a/lib/spack/spack/audit.py b/lib/spack/spack/audit.py
index de570efa97..327bb6ae4b 100644
--- a/lib/spack/spack/audit.py
+++ b/lib/spack/spack/audit.py
@@ -289,9 +289,14 @@ def _check_build_test_callbacks(pkgs, error_cls):
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
test_callbacks = getattr(pkg_cls, "build_time_test_callbacks", None)
- if test_callbacks and "test" in test_callbacks:
- msg = '{0} package contains "test" method in ' "build_time_test_callbacks"
- instr = 'Remove "test" from: [{0}]'.format(", ".join(test_callbacks))
+ # TODO (post-34236): "test*"->"test_*" once remove deprecated methods
+ # TODO (post-34236): "test"->"test_" once remove deprecated methods
+ has_test_method = test_callbacks and any([m.startswith("test") for m in test_callbacks])
+ if has_test_method:
+ msg = '{0} package contains "test*" method(s) in ' "build_time_test_callbacks"
+ instr = 'Remove all methods whose names start with "test" from: [{0}]'.format(
+ ", ".join(test_callbacks)
+ )
errors.append(error_cls(msg.format(pkg_name), [instr]))
return errors
diff --git a/lib/spack/spack/build_environment.py b/lib/spack/spack/build_environment.py
index 14195f7c25..870ac86fc9 100644
--- a/lib/spack/spack/build_environment.py
+++ b/lib/spack/spack/build_environment.py
@@ -43,6 +43,7 @@ import types
from typing import List, Tuple
import llnl.util.tty as tty
+from llnl.util.filesystem import join_path
from llnl.util.lang import dedupe
from llnl.util.symlink import symlink
from llnl.util.tty.color import cescape, colorize
@@ -53,7 +54,6 @@ import spack.build_systems.meson
import spack.build_systems.python
import spack.builder
import spack.config
-import spack.install_test
import spack.main
import spack.package_base
import spack.paths
@@ -66,6 +66,7 @@ import spack.user_environment
import spack.util.path
import spack.util.pattern
from spack.error import NoHeadersError, NoLibrariesError
+from spack.install_test import spack_install_test_log
from spack.installer import InstallError
from spack.util.cpus import cpus_available
from spack.util.environment import (
@@ -1075,19 +1076,18 @@ def _setup_pkg_and_run(
# 'pkg' is not defined yet
pass
elif context == "test":
- logfile = os.path.join(
- pkg.test_suite.stage, spack.install_test.TestSuite.test_log_name(pkg.spec)
- )
+ logfile = os.path.join(pkg.test_suite.stage, pkg.test_suite.test_log_name(pkg.spec))
error_msg = str(exc)
if isinstance(exc, (spack.multimethod.NoSuchMethodError, AttributeError)):
+ process = "test the installation" if context == "test" else "build from sources"
error_msg = (
- "The '{}' package cannot find an attribute while trying to build "
- "from sources. This might be due to a change in Spack's package format "
+ "The '{}' package cannot find an attribute while trying to {}. "
+ "This might be due to a change in Spack's package format "
"to support multiple build-systems for a single package. You can fix this "
- "by updating the build recipe, and you can also report the issue as a bug. "
+ "by updating the {} recipe, and you can also report the issue as a bug. "
"More information at https://spack.readthedocs.io/en/latest/packaging_guide.html#installation-procedure"
- ).format(pkg.name)
+ ).format(pkg.name, process, context)
error_msg = colorize("@*R{{{}}}".format(error_msg))
error_msg = "{}\n\n{}".format(str(exc), error_msg)
@@ -1360,6 +1360,13 @@ class ChildError(InstallError):
out.write("See {0} log for details:\n".format(self.log_type))
out.write(" {0}\n".format(self.log_name))
+ # Also output the test log path IF it exists
+ if self.context != "test":
+ test_log = join_path(os.path.dirname(self.log_name), spack_install_test_log)
+ if os.path.isfile(test_log):
+ out.write("\nSee test log for details:\n")
+ out.write(" {0}n".format(test_log))
+
return out.getvalue()
def __str__(self):
diff --git a/lib/spack/spack/build_systems/_checks.py b/lib/spack/spack/build_systems/_checks.py
index be92262f48..94c59aaa05 100644
--- a/lib/spack/spack/build_systems/_checks.py
+++ b/lib/spack/spack/build_systems/_checks.py
@@ -108,7 +108,10 @@ def execute_build_time_tests(builder: spack.builder.Builder):
builder: builder prescribing the test callbacks. The name of the callbacks is
stored as a list of strings in the ``build_time_test_callbacks`` attribute.
"""
- builder.pkg.run_test_callbacks(builder, builder.build_time_test_callbacks, "build")
+ if not builder.pkg.run_tests or not builder.build_time_test_callbacks:
+ return
+
+ builder.pkg.tester.phase_tests(builder, "build", builder.build_time_test_callbacks)
def execute_install_time_tests(builder: spack.builder.Builder):
@@ -118,7 +121,10 @@ def execute_install_time_tests(builder: spack.builder.Builder):
builder: builder prescribing the test callbacks. The name of the callbacks is
stored as a list of strings in the ``install_time_test_callbacks`` attribute.
"""
- builder.pkg.run_test_callbacks(builder, builder.install_time_test_callbacks, "install")
+ if not builder.pkg.run_tests or not builder.install_time_test_callbacks:
+ return
+
+ builder.pkg.tester.phase_tests(builder, "install", builder.install_time_test_callbacks)
class BaseBuilder(spack.builder.Builder):
diff --git a/lib/spack/spack/builder.py b/lib/spack/spack/builder.py
index db6ca7aa4b..70ff4e45ae 100644
--- a/lib/spack/spack/builder.py
+++ b/lib/spack/spack/builder.py
@@ -130,9 +130,11 @@ def _create(pkg):
bases,
{
"run_tests": property(lambda x: x.wrapped_package_object.run_tests),
- "test_log_file": property(lambda x: x.wrapped_package_object.test_log_file),
- "test_failures": property(lambda x: x.wrapped_package_object.test_failures),
+ "test_requires_compiler": property(
+ lambda x: x.wrapped_package_object.test_requires_compiler
+ ),
"test_suite": property(lambda x: x.wrapped_package_object.test_suite),
+ "tester": property(lambda x: x.wrapped_package_object.tester),
},
)
new_cls.__module__ = package_cls.__module__
diff --git a/lib/spack/spack/ci.py b/lib/spack/spack/ci.py
index d7966c13f2..41df54a5d1 100644
--- a/lib/spack/spack/ci.py
+++ b/lib/spack/spack/ci.py
@@ -2456,7 +2456,16 @@ class CDashHandler(object):
msg = "Error response code ({0}) in populate_buildgroup".format(response_code)
tty.warn(msg)
- def report_skipped(self, spec, directory_name, reason):
+ def report_skipped(self, spec: spack.spec.Spec, report_dir: str, reason: Optional[str]):
+ """Explicitly report skipping testing of a spec (e.g., it's CI
+ configuration identifies it as known to have broken tests or
+ the CI installation failed).
+
+ Args:
+ spec: spec being tested
+ report_dir: directory where the report will be written
+ reason: reason the test is being skipped
+ """
configuration = CDashConfiguration(
upload_url=self.upload_url,
packages=[spec.name],
@@ -2466,7 +2475,7 @@ class CDashHandler(object):
track=None,
)
reporter = CDash(configuration=configuration)
- reporter.test_skipped_report(directory_name, spec, reason)
+ reporter.test_skipped_report(report_dir, spec, reason)
def translate_deprecated_config(config):
diff --git a/lib/spack/spack/cmd/info.py b/lib/spack/spack/cmd/info.py
index 9e9ae219cc..acca8d3a8a 100644
--- a/lib/spack/spack/cmd/info.py
+++ b/lib/spack/spack/cmd/info.py
@@ -5,7 +5,6 @@
from __future__ import print_function
-import inspect
import textwrap
from itertools import zip_longest
@@ -15,9 +14,10 @@ from llnl.util.tty.colify import colify
import spack.cmd.common.arguments as arguments
import spack.fetch_strategy as fs
+import spack.install_test
import spack.repo
import spack.spec
-from spack.package_base import has_test_method, preferred_version
+from spack.package_base import preferred_version
description = "get detailed information on a particular package"
section = "basic"
@@ -261,41 +261,7 @@ def print_tests(pkg):
# if it has been overridden and, therefore, assumed to be implemented.
color.cprint("")
color.cprint(section_title("Stand-Alone/Smoke Test Methods:"))
- names = []
- pkg_cls = pkg if inspect.isclass(pkg) else pkg.__class__
- if has_test_method(pkg_cls):
- pkg_base = spack.package_base.PackageBase
- test_pkgs = [
- str(cls.test)
- for cls in inspect.getmro(pkg_cls)
- if issubclass(cls, pkg_base) and cls.test != pkg_base.test
- ]
- test_pkgs = list(set(test_pkgs))
- names.extend([(test.split()[1]).lower() for test in test_pkgs])
-
- # TODO Refactor START
- # Use code from package_base.py's test_process IF this functionality is
- # accepted.
- v_names = list(set([vspec.name for vspec in pkg.virtuals_provided]))
-
- # hack for compilers that are not dependencies (yet)
- # TODO: this all eventually goes away
- c_names = ("gcc", "intel", "intel-parallel-studio", "pgi")
- if pkg.name in c_names:
- v_names.extend(["c", "cxx", "fortran"])
- if pkg.spec.intersects("llvm+clang"):
- v_names.extend(["c", "cxx"])
- # TODO Refactor END
-
- v_specs = [spack.spec.Spec(v_name) for v_name in v_names]
- for v_spec in v_specs:
- try:
- pkg_cls = spack.repo.path.get_pkg_class(v_spec.name)
- if has_test_method(pkg_cls):
- names.append("{0}.test".format(pkg_cls.name.lower()))
- except spack.repo.UnknownPackageError:
- pass
-
+ names = spack.install_test.test_function_names(pkg, add_virtuals=True)
if names:
colify(sorted(names), indent=4)
else:
diff --git a/lib/spack/spack/cmd/test.py b/lib/spack/spack/cmd/test.py
index 0baab069eb..518cd70bbb 100644
--- a/lib/spack/spack/cmd/test.py
+++ b/lib/spack/spack/cmd/test.py
@@ -11,6 +11,7 @@ import os
import re
import shutil
import sys
+from collections import Counter
from llnl.util import lang, tty
from llnl.util.tty import colify
@@ -236,9 +237,8 @@ def test_list(args):
tagged = set(spack.repo.path.packages_with_tags(*args.tag)) if args.tag else set()
def has_test_and_tags(pkg_class):
- return spack.package_base.has_test_method(pkg_class) and (
- not args.tag or pkg_class.name in tagged
- )
+ tests = spack.install_test.test_functions(pkg_class)
+ return len(tests) and (not args.tag or pkg_class.name in tagged)
if args.list_all:
report_packages = [
@@ -358,18 +358,17 @@ def _report_suite_results(test_suite, args, constraints):
tty.msg("test specs:")
- failed, skipped, untested = 0, 0, 0
+ counts = Counter()
for pkg_id in test_specs:
if pkg_id in results:
status = results[pkg_id]
- if status == "FAILED":
- failed += 1
- elif status == "NO-TESTS":
- untested += 1
- elif status == "SKIPPED":
- skipped += 1
-
- if args.failed and status != "FAILED":
+ # Backward-compatibility: NO-TESTS => NO_TESTS
+ status = "NO_TESTS" if status == "NO-TESTS" else status
+
+ status = spack.install_test.TestStatus[status]
+ counts[status] += 1
+
+ if args.failed and status != spack.install_test.TestStatus.FAILED:
continue
msg = " {0} {1}".format(pkg_id, status)
@@ -381,7 +380,7 @@ def _report_suite_results(test_suite, args, constraints):
msg += "\n{0}".format("".join(f.readlines()))
tty.msg(msg)
- spack.install_test.write_test_summary(failed, skipped, untested, len(test_specs))
+ spack.install_test.write_test_summary(counts)
else:
msg = "Test %s has no results.\n" % test_suite.name
msg += " Check if it is running with "
diff --git a/lib/spack/spack/install_test.py b/lib/spack/spack/install_test.py
index 4b63a366c7..13f7a2402f 100644
--- a/lib/spack/spack/install_test.py
+++ b/lib/spack/spack/install_test.py
@@ -3,34 +3,77 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import base64
+import contextlib
+import enum
import hashlib
+import inspect
+import io
import os
import re
import shutil
+import sys
+from collections import Counter, OrderedDict
+from typing import Callable, List, Optional, Tuple, Type, TypeVar, Union
import llnl.util.filesystem as fs
import llnl.util.tty as tty
+from llnl.util.lang import nullcontext
+from llnl.util.tty.color import colorize
import spack.error
import spack.paths
import spack.util.spack_json as sjson
+from spack.installer import InstallError
from spack.spec import Spec
from spack.util.prefix import Prefix
+from spack.util.string import plural
+#: Stand-alone test failure info type
+TestFailureType = Tuple[BaseException, str]
+
+#: Name of the test suite's (JSON) lock file
test_suite_filename = "test_suite.lock"
+
+#: Name of the test suite results (summary) file
results_filename = "results.txt"
+#: Name of the Spack install phase-time test log file
+spack_install_test_log = "install-time-test-log.txt"
+
+
+ListOrStringType = Union[str, List[str]]
+LogType = Union["tty.log.nixlog", "tty.log.winlog"]
+
+Pb = TypeVar("Pb", bound="spack.package_base.PackageBase")
+PackageObjectOrClass = Union[Pb, Type[Pb]]
+
+
+class TestStatus(enum.Enum):
+ """Names of different stand-alone test states."""
+
+ NO_TESTS = -1
+ SKIPPED = 0
+ FAILED = 1
+ PASSED = 2
+
+ def __str__(self):
+ return f"{self.name}"
+
+ def lower(self):
+ name = f"{self.name}"
+ return name.lower()
-def get_escaped_text_output(filename):
+
+def get_escaped_text_output(filename: str) -> List[str]:
"""Retrieve and escape the expected text output from the file
Args:
- filename (str): path to the file
+ filename: path to the file
Returns:
- list: escaped text lines read from the file
+ escaped text lines read from the file
"""
- with open(filename, "r") as f:
+ with open(filename) as f:
# Ensure special characters are escaped as needed
expected = f.read()
@@ -52,6 +95,651 @@ def get_test_stage_dir():
)
+def cache_extra_test_sources(pkg: Pb, srcs: ListOrStringType):
+ """Copy relative source paths to the corresponding install test subdir
+
+ This routine is intended as an optional install test setup helper for
+ grabbing source files/directories during the installation process and
+ copying them to the installation test subdirectory for subsequent use
+ during install testing.
+
+ Args:
+ pkg: package being tested
+ srcs: relative path for file(s) and or subdirectory(ies) located in
+ the staged source path that are to be copied to the corresponding
+ location(s) under the install testing directory.
+
+ Raises:
+ spack.installer.InstallError: if any of the source paths are absolute
+ or do not exist
+ under the build stage
+ """
+ errors = []
+ paths = [srcs] if isinstance(srcs, str) else srcs
+ for path in paths:
+ pre = f"Source path ('{path}')"
+ src_path = os.path.join(pkg.stage.source_path, path)
+ dest_path = os.path.join(install_test_root(pkg), path)
+ if os.path.isabs(path):
+ errors.append(f"{pre} must be relative to the build stage directory.")
+ continue
+
+ if os.path.isdir(src_path):
+ fs.install_tree(src_path, dest_path)
+ elif os.path.exists(src_path):
+ fs.mkdirp(os.path.dirname(dest_path))
+ fs.copy(src_path, dest_path)
+ else:
+ errors.append(f"{pre} for the copy does not exist")
+
+ if errors:
+ raise InstallError("\n".join(errors), pkg=pkg)
+
+
+def check_outputs(expected: Union[list, set, str], actual: str):
+ """Ensure the expected outputs are contained in the actual outputs.
+
+ Args:
+ expected: expected raw output string(s)
+ actual: actual output string
+
+ Raises:
+ RuntimeError: the expected output is not found in the actual output
+ """
+ expected = expected if isinstance(expected, (list, set)) else [expected]
+ errors = []
+ for check in expected:
+ if not re.search(check, actual):
+ errors.append(f"Expected '{check}' in output '{actual}'")
+ if errors:
+ raise RuntimeError("\n ".join(errors))
+
+
+def find_required_file(
+ root: str, filename: str, expected: int = 1, recursive: bool = True
+) -> ListOrStringType:
+ """Find the required file(s) under the root directory.
+
+ Args:
+ root: root directory for the search
+ filename: name of the file being located
+ expected: expected number of files to be found under the directory
+ (default is 1)
+ recursive: ``True`` if subdirectories are to be recursively searched,
+ else ``False`` (default is ``True``)
+
+ Returns: the path(s), relative to root, to the required file(s)
+
+ Raises:
+ Exception: SkipTest when number of files detected does not match expected
+ """
+ paths = fs.find(root, filename, recursive=recursive)
+ num_paths = len(paths)
+ if num_paths != expected:
+ files = ": {}".format(", ".join(paths)) if num_paths else ""
+ raise SkipTest(
+ "Expected {} of {} under {} but {} found{}".format(
+ plural(expected, "copy", "copies"),
+ filename,
+ root,
+ plural(num_paths, "copy", "copies"),
+ files,
+ )
+ )
+
+ return paths[0] if expected == 1 else paths
+
+
+def install_test_root(pkg: Pb):
+ """The install test root directory.
+
+ Args:
+ pkg: package being tested
+ """
+ return os.path.join(pkg.metadata_dir, "test")
+
+
+def print_message(logger: LogType, msg: str, verbose: bool = False):
+ """Print the message to the log, optionally echoing.
+
+ Args:
+ logger: instance of the output logger (e.g. nixlog or winlog)
+ msg: message being output
+ verbose: ``True`` displays verbose output, ``False`` suppresses
+ it (``False`` is default)
+ """
+ if verbose:
+ with logger.force_echo():
+ tty.info(msg, format="g")
+ else:
+ tty.info(msg, format="g")
+
+
+class PackageTest:
+ """The class that manages stand-alone (post-install) package tests."""
+
+ def __init__(self, pkg: Pb):
+ """
+ Args:
+ pkg: package being tested
+
+ Raises:
+ ValueError: if the package is not concrete
+ """
+ if not pkg.spec.concrete:
+ raise ValueError("Stand-alone tests require a concrete package")
+
+ self.counts: "Counter" = Counter() # type: ignore[attr-defined]
+ self.pkg = pkg
+ self.test_failures: List[TestFailureType] = []
+ self.test_parts: OrderedDict[str, "TestStatus"] = OrderedDict()
+ self.test_log_file: str
+ self.pkg_id: str
+
+ if pkg.test_suite:
+ # Running stand-alone tests
+ self.test_log_file = pkg.test_suite.log_file_for_spec(pkg.spec)
+ self.tested_file = pkg.test_suite.tested_file_for_spec(pkg.spec)
+ self.pkg_id = pkg.test_suite.test_pkg_id(pkg.spec)
+ else:
+ # Running phase-time tests for a single package whose results are
+ # retained in the package's stage directory.
+ pkg.test_suite = TestSuite([pkg.spec])
+ self.test_log_file = fs.join_path(pkg.stage.path, spack_install_test_log)
+ self.pkg_id = pkg.spec.format("{name}-{version}-{hash:7}")
+
+ # Internal logger for test part processing
+ self._logger = None
+
+ @property
+ def logger(self) -> Optional[LogType]:
+ """The current logger or, if none, sets to one."""
+ if not self._logger:
+ self._logger = tty.log.log_output(self.test_log_file)
+
+ return self._logger
+
+ @contextlib.contextmanager
+ def test_logger(self, verbose: bool = False, externals: bool = False):
+ """Context manager for setting up the test logger
+
+ Args:
+ verbose: Display verbose output, including echoing to stdout,
+ otherwise suppress it
+ externals: ``True`` for performing tests if external package,
+ ``False`` to skip them
+ """
+ fs.touch(self.test_log_file) # Otherwise log_parse complains
+ fs.set_install_permissions(self.test_log_file)
+
+ with tty.log.log_output(self.test_log_file, verbose) as self._logger:
+ with self.logger.force_echo(): # type: ignore[union-attr]
+ tty.msg("Testing package " + colorize(r"@*g{" + self.pkg_id + r"}"))
+
+ # use debug print levels for log file to record commands
+ old_debug = tty.is_debug()
+ tty.set_debug(True)
+
+ try:
+ yield self.logger
+ finally:
+ # reset debug level
+ tty.set_debug(old_debug)
+
+ @property
+ def archived_install_test_log(self) -> str:
+ return fs.join_path(self.pkg.metadata_dir, spack_install_test_log)
+
+ def archive_install_test_log(self, dest_dir: str):
+ if os.path.exists(self.test_log_file):
+ fs.install(self.test_log_file, self.archived_install_test_log)
+
+ def add_failure(self, exception: Exception, msg: str):
+ """Add the failure details to the current list."""
+ self.test_failures.append((exception, msg))
+
+ def status(self, name: str, status: "TestStatus", msg: Optional[str] = None):
+ """Track and print the test status for the test part name."""
+ part_name = f"{self.pkg.__class__.__name__}::{name}"
+ extra = "" if msg is None else f": {msg}"
+
+ # Handle the special case of a test part consisting of subparts.
+ # The containing test part can be PASSED while sub-parts (assumed
+ # to start with the same name) may not have PASSED. This extra
+ # check is used to ensure the containing test part is not claiming
+ # to have passed when at least one subpart failed.
+ if status == TestStatus.PASSED:
+ for pname, substatus in self.test_parts.items():
+ if pname != part_name and pname.startswith(part_name):
+ if substatus == TestStatus.FAILED:
+ print(f"{substatus}: {part_name}{extra}")
+ self.test_parts[part_name] = substatus
+ self.counts[substatus] += 1
+ return
+
+ print(f"{status}: {part_name}{extra}")
+ self.test_parts[part_name] = status
+ self.counts[status] += 1
+
+ def phase_tests(
+ self, builder: spack.builder.Builder, phase_name: str, method_names: List[str]
+ ):
+ """Execute the builder's package phase-time tests.
+
+ Args:
+ builder: builder for package being tested
+ phase_name: the name of the build-time phase (e.g., ``build``, ``install``)
+ method_names: phase-specific callback method names
+ """
+ verbose = tty.is_verbose()
+ fail_fast = spack.config.get("config:fail_fast", False)
+
+ with self.test_logger(verbose=verbose, externals=False) as logger:
+ # Report running each of the methods in the build log
+ print_message(logger, f"Running {phase_name}-time tests", verbose)
+ builder.pkg.test_suite.current_test_spec = builder.pkg.spec
+ builder.pkg.test_suite.current_base_spec = builder.pkg.spec
+
+ # TODO (post-34236): "test"->"test_" once remove deprecated methods
+ have_tests = any(name.startswith("test") for name in method_names)
+ if have_tests:
+ copy_test_files(builder.pkg, builder.pkg.spec)
+
+ for name in method_names:
+ try:
+ # Prefer the method in the package over the builder's.
+ # We need this primarily to pick up arbitrarily named test
+ # methods but also some build-time checks.
+ fn = getattr(builder.pkg, name, getattr(builder, name))
+
+ msg = f"RUN-TESTS: {phase_name}-time tests [{name}]"
+ print_message(logger, msg, verbose)
+
+ fn()
+
+ except AttributeError as e:
+ msg = f"RUN-TESTS: method not implemented [{name}]"
+ print_message(logger, msg, verbose)
+
+ self.add_failure(e, msg)
+ if fail_fast:
+ break
+
+ if have_tests:
+ print_message(logger, "Completed testing", verbose)
+
+ # Raise any collected failures here
+ if self.test_failures:
+ raise TestFailure(self.test_failures)
+
+ def stand_alone_tests(self, kwargs):
+ """Run the package's stand-alone tests.
+
+ Args:
+ kwargs (dict): arguments to be used by the test process
+ """
+ import spack.build_environment
+
+ spack.build_environment.start_build_process(self.pkg, test_process, kwargs)
+
+ def parts(self) -> int:
+ """The total number of (checked) test parts."""
+ try:
+ # New in Python 3.10
+ total = self.counts.total() # type: ignore[attr-defined]
+ except AttributeError:
+ nums = [n for _, n in self.counts.items()]
+ total = sum(nums)
+ return total
+
+ def print_log_path(self):
+ """Print the test log file path."""
+ log = self.archived_install_test_log
+ if not os.path.isfile(log):
+ log = self.test_log_file
+ if not (log and os.path.isfile(log)):
+ tty.debug("There is no test log file (staged or installed)")
+ return
+
+ print(f"\nSee test results at:\n {log}")
+
+ def ran_tests(self) -> bool:
+ """``True`` if ran tests, ``False`` otherwise."""
+ return self.parts() > self.counts[TestStatus.NO_TESTS]
+
+ def summarize(self):
+ """Collect test results summary lines for this spec."""
+ lines = []
+ lines.append("{:=^80}".format(f" SUMMARY: {self.pkg_id} "))
+ for name, status in self.test_parts.items():
+ msg = f"{name} .. {status}"
+ lines.append(msg)
+
+ summary = [f"{n} {s.lower()}" for s, n in self.counts.items() if n > 0]
+ totals = " {} of {} parts ".format(", ".join(summary), self.parts())
+ lines.append(f"{totals:=^80}")
+ return lines
+
+
+@contextlib.contextmanager
+def test_part(pkg: Pb, test_name: str, purpose: str, work_dir: str = ".", verbose: bool = False):
+ wdir = "." if work_dir is None else work_dir
+ tester = pkg.tester
+ # TODO (post-34236): "test"->"test_" once remove deprecated methods
+ assert test_name and test_name.startswith(
+ "test"
+ ), f"Test name must start with 'test' but {test_name} was provided"
+
+ if test_name == "test":
+ tty.warn(
+ "{}: the 'test' method is deprecated. Convert stand-alone "
+ "test(s) to methods with names starting 'test_'.".format(pkg.name)
+ )
+
+ title = "test: {}: {}".format(test_name, purpose or "unspecified purpose")
+ with fs.working_dir(wdir, create=True):
+ try:
+ context = tester.logger.force_echo if verbose else nullcontext
+ with context():
+ tty.info(title, format="g")
+ yield
+ tester.status(test_name, TestStatus.PASSED)
+
+ except SkipTest as e:
+ tester.status(test_name, TestStatus.SKIPPED, str(e))
+
+ except (AssertionError, BaseException) as e:
+ # print a summary of the error to the log file
+ # so that cdash and junit reporters know about it
+ exc_type, _, tb = sys.exc_info()
+ tester.status(test_name, TestStatus.FAILED, str(e))
+
+ import traceback
+
+ # remove the current call frame to exclude the extract_stack
+ # call from the error
+ stack = traceback.extract_stack()[:-1]
+
+ # Package files have a line added at import time, so we re-read
+ # the file to make line numbers match. We have to subtract two
+ # from the line number because the original line number is
+ # inflated once by the import statement and the lines are
+ # displaced one by the import statement.
+ for i, entry in enumerate(stack):
+ filename, lineno, function, text = entry
+ if spack.repo.is_package_file(filename):
+ with open(filename) as f:
+ lines = f.readlines()
+ new_lineno = lineno - 2
+ text = lines[new_lineno]
+ if isinstance(entry, tuple):
+ new_entry = (filename, new_lineno, function, text)
+ stack[i] = new_entry # type: ignore[call-overload]
+ elif isinstance(entry, list):
+ stack[i][1] = new_lineno # type: ignore[index]
+
+ # Format and print the stack
+ out = traceback.format_list(stack)
+ for line in out:
+ print(line.rstrip("\n"))
+
+ if exc_type is spack.util.executable.ProcessError or exc_type is TypeError:
+ iostr = io.StringIO()
+ spack.build_environment.write_log_summary(
+ iostr, "test", tester.test_log_file, last=1
+ ) # type: ignore[assignment]
+ m = iostr.getvalue()
+ else:
+ # We're below the package context, so get context from
+ # stack instead of from traceback.
+ # The traceback is truncated here, so we can't use it to
+ # traverse the stack.
+ m = "\n".join(spack.build_environment.get_package_context(tb))
+
+ exc = e # e is deleted after this block
+
+ # If we fail fast, raise another error
+ if spack.config.get("config:fail_fast", False):
+ raise TestFailure([(exc, m)])
+ else:
+ tester.add_failure(exc, m)
+
+
+def copy_test_files(pkg: Pb, test_spec: spack.spec.Spec):
+ """Copy the spec's cached and custom test files to the test stage directory.
+
+ Args:
+ pkg: package being tested
+ test_spec: spec being tested, where the spec may be virtual
+
+ Raises:
+ TestSuiteError: package must be part of an active test suite
+ """
+ if pkg is None or pkg.test_suite is None:
+ base = "Cannot copy test files"
+ msg = (
+ f"{base} without a package"
+ if pkg is None
+ else f"{pkg.name}: {base}: test suite is missing"
+ )
+ raise TestSuiteError(msg)
+
+ # copy installed test sources cache into test stage dir
+ if test_spec.concrete:
+ cache_source = install_test_root(test_spec.package)
+ cache_dir = pkg.test_suite.current_test_cache_dir
+ if os.path.isdir(cache_source) and not os.path.exists(cache_dir):
+ fs.install_tree(cache_source, cache_dir)
+
+ # copy test data into test stage data dir
+ try:
+ pkg_cls = test_spec.package_class
+ except spack.repo.UnknownPackageError:
+ tty.debug(f"{test_spec.name}: skipping test data copy since no package class found")
+ return
+
+ data_source = Prefix(pkg_cls.package_dir).test
+ data_dir = pkg.test_suite.current_test_data_dir
+ if os.path.isdir(data_source) and not os.path.exists(data_dir):
+ # We assume data dir is used read-only
+ # maybe enforce this later
+ shutil.copytree(data_source, data_dir)
+
+
+def test_function_names(pkg: PackageObjectOrClass, add_virtuals: bool = False) -> List[str]:
+ """Grab the names of all non-empty test functions.
+
+ Args:
+ pkg: package or package class of interest
+ add_virtuals: ``True`` adds test methods of provided package
+ virtual, ``False`` only returns test functions of the package
+
+ Returns:
+ names of non-empty test functions
+
+ Raises:
+ ValueError: occurs if pkg is not a package class
+ """
+ fns = test_functions(pkg, add_virtuals)
+ return [f"{cls_name}.{fn.__name__}" for (cls_name, fn) in fns]
+
+
+def test_functions(
+ pkg: PackageObjectOrClass, add_virtuals: bool = False
+) -> List[Tuple[str, Callable]]:
+ """Grab all non-empty test functions.
+
+ Args:
+ pkg: package or package class of interest
+ add_virtuals: ``True`` adds test methods of provided package
+ virtual, ``False`` only returns test functions of the package
+
+ Returns:
+ list of non-empty test functions' (name, function)
+
+ Raises:
+ ValueError: occurs if pkg is not a package class
+ """
+ instance = isinstance(pkg, spack.package_base.PackageBase)
+ if not (instance or issubclass(pkg, spack.package_base.PackageBase)): # type: ignore[arg-type]
+ raise ValueError(f"Expected a package (class), not {pkg} ({type(pkg)})")
+
+ pkg_cls = pkg.__class__ if instance else pkg
+ classes = [pkg_cls]
+ if add_virtuals:
+ vpkgs = virtuals(pkg)
+ for vname in vpkgs:
+ try:
+ classes.append((Spec(vname)).package_class)
+ except spack.repo.UnknownPackageError:
+ tty.debug(f"{vname}: virtual does not appear to have a package file")
+
+ # TODO (post-34236): Remove if removing empty test method check
+ def skip(line):
+ # This should match the lines in the deprecated test() method
+ ln = line.strip()
+ return ln.startswith("#") or ("warn" in ln and "deprecated" in ln)
+
+ doc_regex = r'\s+("""[\w\s\(\)\-\,\;\:]+""")'
+ tests = []
+ for clss in classes:
+ methods = inspect.getmembers(clss, predicate=lambda x: inspect.isfunction(x))
+ for name, test_fn in methods:
+ # TODO (post-34236): "test"->"test_" once remove deprecated methods
+ if not name.startswith("test"):
+ continue
+
+ # TODO (post-34236): Could remove empty method check once remove
+ # TODO (post-34236): deprecated methods though some use cases,
+ # TODO (post-34236): such as checking packages have actual, non-
+ # TODO (post-34236): empty tests, may want this check to remain.
+ source = re.sub(doc_regex, r"", inspect.getsource(test_fn)).splitlines()[1:]
+ lines = [ln.strip() for ln in source if not skip(ln)]
+ if not lines:
+ continue
+
+ tests.append((clss.__name__, test_fn)) # type: ignore[union-attr]
+
+ return tests
+
+
+def process_test_parts(pkg: Pb, test_specs: List[spack.spec.Spec], verbose: bool = False):
+ """Process test parts associated with the package.
+
+ Args:
+ pkg: package being tested
+ test_specs: list of test specs
+ verbose: Display verbose output (suppress by default)
+
+ Raises:
+ TestSuiteError: package must be part of an active test suite
+ """
+ if pkg is None or pkg.test_suite is None:
+ base = "Cannot process tests"
+ msg = (
+ f"{base} without a package"
+ if pkg is None
+ else f"{pkg.name}: {base}: test suite is missing"
+ )
+ raise TestSuiteError(msg)
+
+ test_suite = pkg.test_suite
+ tester = pkg.tester
+ try:
+ work_dir = test_suite.test_dir_for_spec(pkg.spec)
+ for spec in test_specs:
+ test_suite.current_test_spec = spec
+
+ # grab test functions associated with the spec, which may be virtual
+ try:
+ tests = test_functions(spec.package_class)
+ except spack.repo.UnknownPackageError:
+ # some virtuals don't have a package
+ tests = []
+
+ if len(tests) == 0:
+ tester.status(spec.name, TestStatus.NO_TESTS)
+ continue
+
+ # copy custom and cached test files to the test stage directory
+ copy_test_files(pkg, spec)
+
+ # Run the tests
+ for _, test_fn in tests:
+ with test_part(
+ pkg,
+ test_fn.__name__,
+ purpose=getattr(test_fn, "__doc__"),
+ work_dir=work_dir,
+ verbose=verbose,
+ ):
+ test_fn(pkg)
+
+ # If fail-fast was on, we error out above
+ # If we collect errors, raise them in batch here
+ if tester.test_failures:
+ raise TestFailure(tester.test_failures)
+
+ finally:
+ if tester.ran_tests():
+ fs.touch(tester.tested_file)
+
+ # log one more test message to provide a completion timestamp
+ # for CDash reporting
+ tty.msg("Completed testing")
+
+ lines = tester.summarize()
+ tty.msg("\n{}".format("\n".join(lines)))
+
+ if tester.test_failures:
+ # Print the test log file path
+ tty.msg(f"\n\nSee test results at:\n {tester.test_log_file}")
+ else:
+ tty.msg("No tests to run")
+
+
+def test_process(pkg: Pb, kwargs):
+ verbose = kwargs.get("verbose", True)
+ externals = kwargs.get("externals", False)
+
+ with pkg.tester.test_logger(verbose, externals) as logger:
+ if pkg.spec.external and not externals:
+ print_message(logger, "Skipped tests for external package", verbose)
+ pkg.tester.status(pkg.spec.name, TestStatus.SKIPPED)
+ return
+
+ if not pkg.spec.installed:
+ print_message(logger, "Skipped not installed package", verbose)
+ pkg.tester.status(pkg.spec.name, TestStatus.SKIPPED)
+ return
+
+ # run test methods from the package and all virtuals it provides
+ v_names = virtuals(pkg)
+ test_specs = [pkg.spec] + [spack.spec.Spec(v_name) for v_name in sorted(v_names)]
+ process_test_parts(pkg, test_specs, verbose)
+
+
+def virtuals(pkg):
+ """Return a list of unique virtuals for the package.
+
+ Args:
+ pkg: package of interest
+
+ Returns: names of unique virtual packages
+ """
+ # provided virtuals have to be deduped by name
+ v_names = list({vspec.name for vspec in pkg.virtuals_provided})
+
+ # hack for compilers that are not dependencies (yet)
+ # TODO: this all eventually goes away
+ c_names = ("gcc", "intel", "intel-parallel-studio", "pgi")
+ if pkg.name in c_names:
+ v_names.extend(["c", "cxx", "fortran"])
+ if pkg.spec.satisfies("llvm+clang"):
+ v_names.extend(["c", "cxx"])
+ return v_names
+
+
def get_all_test_suites():
"""Retrieves all validly staged TestSuites
@@ -83,7 +771,7 @@ def get_named_test_suites(name):
list: a list of matching TestSuite instances, which may be empty if none
Raises:
- TestSuiteNameError: If no name is provided
+ Exception: TestSuiteNameError if no name is provided
"""
if not name:
raise TestSuiteNameError("Test suite name is required.")
@@ -92,22 +780,22 @@ def get_named_test_suites(name):
return [ts for ts in test_suites if ts.name == name]
-def get_test_suite(name):
+def get_test_suite(name: str) -> Optional["TestSuite"]:
"""Ensure there is only one matching test suite with the provided name.
Returns:
- str or None: the name if one matching test suite, else None
+ the name if one matching test suite, else None
Raises:
- TestSuiteNameError: If there is more than one matching TestSuite
+ TestSuiteNameError: If there are more than one matching TestSuites
"""
- names = get_named_test_suites(name)
- if len(names) > 1:
- raise TestSuiteNameError('Too many suites named "{0}". May shadow hash.'.format(name))
+ suites = get_named_test_suites(name)
+ if len(suites) > 1:
+ raise TestSuiteNameError(f"Too many suites named '{name}'. May shadow hash.")
- if not names:
+ if not suites:
return None
- return names[0]
+ return suites[0]
def write_test_suite_file(suite):
@@ -116,24 +804,25 @@ def write_test_suite_file(suite):
sjson.dump(suite.to_dict(), stream=f)
-def write_test_summary(num_failed, num_skipped, num_untested, num_specs):
- """Write a well formatted summary of the totals for each relevant status
- category."""
- failed = "{0} failed, ".format(num_failed) if num_failed else ""
- skipped = "{0} skipped, ".format(num_skipped) if num_skipped else ""
- no_tests = "{0} no-tests, ".format(num_untested) if num_untested else ""
- num_passed = num_specs - num_failed - num_untested - num_skipped
+def write_test_summary(counts: "Counter"):
+ """Write summary of the totals for each relevant status category.
- print(
- "{:=^80}".format(
- " {0}{1}{2}{3} passed of {4} specs ".format(
- failed, no_tests, skipped, num_passed, num_specs
- )
- )
- )
+ Args:
+ counts: counts of the occurrences of relevant test status types
+ """
+ summary = [f"{n} {s.lower()}" for s, n in counts.items() if n > 0]
+ try:
+ # New in Python 3.10
+ total = counts.total() # type: ignore[attr-defined]
+ except AttributeError:
+ nums = [n for _, n in counts.items()]
+ total = sum(nums)
+ if total:
+ print("{:=^80}".format(" {} of {} ".format(", ".join(summary), plural(total, "spec"))))
-class TestSuite(object):
+
+class TestSuite:
"""The class that manages specs for ``spack test run`` execution."""
def __init__(self, specs, alias=None):
@@ -147,7 +836,7 @@ class TestSuite(object):
self._hash = None
self._stage = None
- self.fails = 0
+ self.counts: "Counter" = Counter()
@property
def name(self):
@@ -173,12 +862,11 @@ class TestSuite(object):
fail_first = kwargs.get("fail_first", False)
externals = kwargs.get("externals", False)
- skipped, untested = 0, 0
for spec in self.specs:
try:
if spec.package.test_suite:
raise TestSuiteSpecError(
- "Package {0} cannot be run in two test suites at once".format(
+ "Package {} cannot be run in two test suites at once".format(
spec.package.name
)
)
@@ -201,45 +889,55 @@ class TestSuite(object):
if remove_directory:
shutil.rmtree(test_dir)
- # Log test status based on whether any non-pass-only test
- # functions were called
tested = os.path.exists(self.tested_file_for_spec(spec))
if tested:
- status = "PASSED"
+ status = TestStatus.PASSED
else:
self.ensure_stage()
if spec.external and not externals:
- status = "SKIPPED"
- skipped += 1
+ status = TestStatus.SKIPPED
elif not spec.installed:
- status = "SKIPPED"
- skipped += 1
+ status = TestStatus.SKIPPED
else:
- status = "NO-TESTS"
- untested += 1
+ status = TestStatus.NO_TESTS
+ self.counts[status] += 1
self.write_test_result(spec, status)
except BaseException as exc:
- self.fails += 1
- tty.debug("Test failure: {0}".format(str(exc)))
+ status = TestStatus.FAILED
+ self.counts[status] += 1
+ tty.debug(f"Test failure: {str(exc)}")
+
if isinstance(exc, (SyntaxError, TestSuiteSpecError)):
# Create the test log file and report the error.
self.ensure_stage()
- msg = "Testing package {0}\n{1}".format(self.test_pkg_id(spec), str(exc))
+ msg = f"Testing package {self.test_pkg_id(spec)}\n{str(exc)}"
_add_msg_to_file(self.log_file_for_spec(spec), msg)
- self.write_test_result(spec, "FAILED")
+ msg = f"Test failure: {str(exc)}"
+ _add_msg_to_file(self.log_file_for_spec(spec), msg)
+ self.write_test_result(spec, TestStatus.FAILED)
if fail_first:
break
+
finally:
spec.package.test_suite = None
self.current_test_spec = None
self.current_base_spec = None
- write_test_summary(self.fails, skipped, untested, len(self.specs))
+ write_test_summary(self.counts)
+
+ if self.counts[TestStatus.FAILED]:
+ for spec in self.specs:
+ print(
+ "\nSee {} test results at:\n {}".format(
+ spec.format("{name}-{version}-{hash:7}"), self.log_file_for_spec(spec)
+ )
+ )
- if self.fails:
- raise TestSuiteFailure(self.fails)
+ failures = self.counts[TestStatus.FAILED]
+ if failures:
+ raise TestSuiteFailure(failures)
def ensure_stage(self):
"""Ensure the test suite stage directory exists."""
@@ -272,7 +970,7 @@ class TestSuite(object):
"""The standard install test package identifier.
Args:
- spec (spack.spec.Spec): instance of the spec under test
+ spec: instance of the spec under test
Returns:
str: the install test package identifier
@@ -379,7 +1077,7 @@ class TestSuite(object):
spec (spack.spec.Spec): instance of the spec under test
result (str): result from the spec's test execution (e.g, PASSED)
"""
- msg = "{0} {1}".format(self.test_pkg_id(spec), result)
+ msg = f"{self.test_pkg_id(spec)} {result}"
_add_msg_to_file(self.results_file, msg)
def write_reproducibility_data(self):
@@ -419,9 +1117,8 @@ class TestSuite(object):
specs: list of the test suite's specs in dictionary form
alias: the test suite alias
-
Returns:
- TestSuite: Instance of TestSuite created from the specs
+ TestSuite: Instance created from the specs
"""
specs = [Spec.from_dict(spec_dict) for spec_dict in d["specs"]]
alias = d.get("alias", None)
@@ -435,16 +1132,19 @@ class TestSuite(object):
Args:
filename (str): The path to the JSON file containing the test
suite specs and optional alias.
+
+ Raises:
+ BaseException: sjson.SpackJSONError if problem parsing the file
"""
try:
- with open(filename, "r") as f:
+ with open(filename) as f:
data = sjson.load(f)
test_suite = TestSuite.from_dict(data)
content_hash = os.path.basename(os.path.dirname(filename))
test_suite._hash = content_hash
return test_suite
except Exception as e:
- raise sjson.SpackJSONError("error parsing JSON TestSuite:", str(e)) from e
+ raise sjson.SpackJSONError("error parsing JSON TestSuite:", e)
def _add_msg_to_file(filename, msg):
@@ -455,20 +1155,29 @@ def _add_msg_to_file(filename, msg):
msg (str): message to be appended to the file
"""
with open(filename, "a+") as f:
- f.write("{0}\n".format(msg))
+ f.write(f"{msg}\n")
+
+
+class SkipTest(Exception):
+ """Raised when a test (part) is being skipped."""
class TestFailure(spack.error.SpackError):
"""Raised when package tests have failed for an installation."""
- def __init__(self, failures):
+ def __init__(self, failures: List[TestFailureType]):
# Failures are all exceptions
- msg = "%d tests failed.\n" % len(failures)
+ num = len(failures)
+ msg = "{} failed.\n".format(plural(num, "test"))
for failure, message in failures:
msg += "\n\n%s\n" % str(failure)
msg += "\n%s\n" % message
- super(TestFailure, self).__init__(msg)
+ super().__init__(msg)
+
+
+class TestSuiteError(spack.error.SpackError):
+ """Raised when there is an error with the test suite."""
class TestSuiteFailure(spack.error.SpackError):
@@ -477,7 +1186,7 @@ class TestSuiteFailure(spack.error.SpackError):
def __init__(self, num_failures):
msg = "%d test(s) in the suite failed.\n" % num_failures
- super(TestSuiteFailure, self).__init__(msg)
+ super().__init__(msg)
class TestSuiteSpecError(spack.error.SpackError):
diff --git a/lib/spack/spack/installer.py b/lib/spack/spack/installer.py
index 40018ee609..3329b1a1ca 100644
--- a/lib/spack/spack/installer.py
+++ b/lib/spack/spack/installer.py
@@ -278,6 +278,19 @@ def _print_installed_pkg(message):
print(colorize("@*g{[+]} ") + spack.util.path.debug_padded_filter(message))
+def print_install_test_log(pkg: "spack.package_base.PackageBase"):
+ """Output install test log file path but only if have test failures.
+
+ Args:
+ pkg: instance of the package under test
+ """
+ if not pkg.run_tests or not (pkg.tester and pkg.tester.test_failures):
+ # The tests were not run or there were no test failures
+ return
+
+ pkg.tester.print_log_path()
+
+
def _print_timer(pre, pkg_id, timer):
phases = ["{}: {}.".format(p.capitalize(), _hms(timer.duration(p))) for p in timer.phases]
phases.append("Total: {}".format(_hms(timer.duration())))
@@ -536,6 +549,25 @@ def install_msg(name, pid):
return pre + colorize("@*{Installing} @*g{%s}" % name)
+def archive_install_logs(pkg, phase_log_dir):
+ """
+ Copy install logs to their destination directory(ies)
+ Args:
+ pkg (spack.package_base.PackageBase): the package that was built and installed
+ phase_log_dir (str): path to the archive directory
+ """
+ # Archive the whole stdout + stderr for the package
+ fs.install(pkg.log_path, pkg.install_log_path)
+
+ # Archive all phase log paths
+ for phase_log in pkg.phase_log_files:
+ log_file = os.path.basename(phase_log)
+ fs.install(phase_log, os.path.join(phase_log_dir, log_file))
+
+ # Archive the install-phase test log, if present
+ pkg.archive_install_test_log()
+
+
def log(pkg):
"""
Copy provenance into the install directory on success
@@ -553,22 +585,11 @@ def log(pkg):
# FIXME : this potentially catches too many things...
tty.debug(e)
- # Archive the whole stdout + stderr for the package
- fs.install(pkg.log_path, pkg.install_log_path)
-
- # Archive all phase log paths
- for phase_log in pkg.phase_log_files:
- log_file = os.path.basename(phase_log)
- log_file = os.path.join(os.path.dirname(packages_dir), log_file)
- fs.install(phase_log, log_file)
+ archive_install_logs(pkg, os.path.dirname(packages_dir))
# Archive the environment modifications for the build.
fs.install(pkg.env_mods_path, pkg.install_env_path)
- # Archive the install-phase test log, if present
- if pkg.test_install_log_path and os.path.exists(pkg.test_install_log_path):
- fs.install(pkg.test_install_log_path, pkg.install_test_install_log_path)
-
if os.path.exists(pkg.configure_args_path):
# Archive the args used for the build
fs.install(pkg.configure_args_path, pkg.install_configure_args_path)
@@ -1932,14 +1953,17 @@ class BuildProcessInstaller(object):
self._real_install()
+ # Run post install hooks before build stage is removed.
+ self.timer.start("post-install")
+ spack.hooks.post_install(self.pkg.spec, self.explicit)
+ self.timer.stop("post-install")
+
# Stop the timer and save results
self.timer.stop()
with open(self.pkg.times_log_path, "w") as timelog:
self.timer.write_json(timelog)
- # Run post install hooks before build stage is removed.
- spack.hooks.post_install(self.pkg.spec, self.explicit)
-
+ print_install_test_log(self.pkg)
_print_timer(pre=self.pre, pkg_id=self.pkg_id, timer=self.timer)
_print_installed_pkg(self.pkg.prefix)
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index 2d53b72112..6005543709 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -69,7 +69,15 @@ from spack.build_systems.xorg import XorgPackage
from spack.builder import run_after, run_before
from spack.dependency import all_deptypes
from spack.directives import *
-from spack.install_test import get_escaped_text_output
+from spack.install_test import (
+ SkipTest,
+ cache_extra_test_sources,
+ check_outputs,
+ find_required_file,
+ get_escaped_text_output,
+ install_test_root,
+ test_part,
+)
from spack.installer import (
ExternalPackageError,
InstallError,
diff --git a/lib/spack/spack/package_base.py b/lib/spack/spack/package_base.py
index 2da5380153..e1741e9fc0 100644
--- a/lib/spack/spack/package_base.py
+++ b/lib/spack/spack/package_base.py
@@ -25,13 +25,12 @@ import sys
import textwrap
import time
import traceback
-import types
import warnings
-from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type
+from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, TypeVar
import llnl.util.filesystem as fsys
import llnl.util.tty as tty
-from llnl.util.lang import classproperty, memoized, nullcontext
+from llnl.util.lang import classproperty, memoized
from llnl.util.link_tree import LinkTree
import spack.compilers
@@ -55,12 +54,18 @@ import spack.util.environment
import spack.util.path
import spack.util.web
from spack.filesystem_view import YamlFilesystemView
-from spack.install_test import TestFailure, TestSuite
+from spack.install_test import (
+ PackageTest,
+ TestFailure,
+ TestStatus,
+ TestSuite,
+ cache_extra_test_sources,
+ install_test_root,
+)
from spack.installer import InstallError, PackageInstaller
from spack.stage import ResourceStage, Stage, StageComposite, compute_stage_name
from spack.util.executable import ProcessError, which
from spack.util.package_hash import package_hash
-from spack.util.prefix import Prefix
from spack.util.web import FetchError
from spack.version import GitVersion, StandardVersion, Version
@@ -73,24 +78,21 @@ FLAG_HANDLER_TYPE = Callable[[str, Iterable[str]], FLAG_HANDLER_RETURN_TYPE]
_ALLOWED_URL_SCHEMES = ["http", "https", "ftp", "file", "git"]
-# Filename for the Spack build/install log.
+#: Filename for the Spack build/install log.
_spack_build_logfile = "spack-build-out.txt"
-# Filename for the Spack build/install environment file.
+#: Filename for the Spack build/install environment file.
_spack_build_envfile = "spack-build-env.txt"
-# Filename for the Spack build/install environment modifications file.
+#: Filename for the Spack build/install environment modifications file.
_spack_build_envmodsfile = "spack-build-env-mods.txt"
-# Filename for the Spack install phase-time test log.
-_spack_install_test_log = "install-time-test-log.txt"
-
-# Filename of json with total build and phase times (seconds)
-_spack_times_log = "install_times.json"
-
-# Filename for the Spack configure args file.
+#: Filename for the Spack configure args file.
_spack_configure_argsfile = "spack-configure-args.txt"
+#: Filename of json with total build and phase times (seconds)
+spack_times_log = "install_times.json"
+
def deprecated_version(pkg, version):
"""Return True if the version is deprecated, False otherwise.
@@ -181,8 +183,7 @@ class DetectablePackageMeta(object):
def __init__(cls, name, bases, attr_dict):
if hasattr(cls, "executables") and hasattr(cls, "libraries"):
msg = "a package can have either an 'executables' or 'libraries' attribute"
- msg += " [package '{0.name}' defines both]"
- raise spack.error.SpackError(msg.format(cls))
+ raise spack.error.SpackError(f"{msg} [package '{name}' defines both]")
# On windows, extend the list of regular expressions to look for
# filenames ending with ".exe"
@@ -423,17 +424,7 @@ class PackageViewMixin(object):
view.remove_files(merge_map.values())
-def test_log_pathname(test_stage, spec):
- """Build the pathname of the test log file
-
- Args:
- test_stage (str): path to the test stage directory
- spec (spack.spec.Spec): instance of the spec under test
-
- Returns:
- (str): the pathname of the test log file
- """
- return os.path.join(test_stage, "test-{0}-out.txt".format(TestSuite.test_pkg_id(spec)))
+Pb = TypeVar("Pb", bound="PackageBase")
class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
@@ -638,19 +629,13 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
"tags",
]
- #: Boolean. If set to ``True``, the smoke/install test requires a compiler.
- #: This is currently used by smoke tests to ensure a compiler is available
- #: to build a custom test code.
- test_requires_compiler = False
-
- #: List of test failures encountered during a smoke/install test run.
- test_failures = None
+ #: Set to ``True`` to indicate the stand-alone test requires a compiler.
+ #: It is used to ensure a compiler and build dependencies like 'cmake'
+ #: are available to build a custom test code.
+ test_requires_compiler: bool = False
- #: TestSuite instance used to manage smoke/install tests for one or more specs.
- test_suite = None
-
- #: Path to the log file used for tests
- test_log_file = None
+ #: TestSuite instance used to manage stand-alone tests for 1+ specs.
+ test_suite: Optional["TestSuite"] = None
def __init__(self, spec):
# this determines how the package should be built.
@@ -672,6 +657,7 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
# init internal variables
self._stage = None
self._fetcher = None
+ self._tester: Optional["PackageTest"] = None
# Set up timing variables
self._fetch_time = 0.0
@@ -736,9 +722,9 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
for name, conditions in cls.dependencies.items():
# check whether this dependency could be of the type asked for
- types = [dep.type for cond, dep in conditions.items()]
- types = set.union(*types)
- if not any(d in types for d in deptype):
+ deptypes = [dep.type for cond, dep in conditions.items()]
+ deptypes = set.union(*deptypes)
+ if not any(d in deptypes for d in deptype):
continue
# expand virtuals if enabled, otherwise just stop at virtuals
@@ -1149,29 +1135,40 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
return os.path.join(self.stage.path, _spack_configure_argsfile)
@property
- def test_install_log_path(self):
- """Return the install phase-time test log file path, if set."""
- return getattr(self, "test_log_file", None)
-
- @property
- def install_test_install_log_path(self):
- """Return the install location for the install phase-time test log."""
- return fsys.join_path(self.metadata_dir, _spack_install_test_log)
-
- @property
def times_log_path(self):
"""Return the times log json file."""
- return os.path.join(self.metadata_dir, _spack_times_log)
+ return os.path.join(self.metadata_dir, spack_times_log)
@property
def install_configure_args_path(self):
"""Return the configure args file path on successful installation."""
return os.path.join(self.metadata_dir, _spack_configure_argsfile)
+ # TODO (post-34236): Update tests and all packages that use this as a
+ # TODO (post-34236): package method to the function already available
+ # TODO (post-34236): to packages. Once done, remove this property.
@property
def install_test_root(self):
"""Return the install test root directory."""
- return os.path.join(self.metadata_dir, "test")
+ tty.warn(
+ "The 'pkg.install_test_root' property is deprecated with removal "
+ "expected v0.21. Use 'install_test_root(pkg)' instead."
+ )
+ return install_test_root(self)
+
+ def archive_install_test_log(self):
+ """Archive the install-phase test log, if present."""
+ if getattr(self, "tester", None):
+ self.tester.archive_install_test_log(self.metadata_dir)
+
+ @property
+ def tester(self):
+ if not self.spec.versions.concrete:
+ raise ValueError("Cannot retrieve tester for package without concrete version.")
+
+ if not self._tester:
+ self._tester = PackageTest(self)
+ return self._tester
@property
def installed(self):
@@ -1208,7 +1205,7 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
@property
def fetcher(self):
if not self.spec.versions.concrete:
- raise ValueError("Cannot retrieve fetcher for" " package without concrete version.")
+ raise ValueError("Cannot retrieve fetcher for package without concrete version.")
if not self._fetcher:
self._fetcher = self._make_fetcher()
return self._fetcher
@@ -1842,6 +1839,9 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
builder = PackageInstaller([(self, kwargs)])
builder.install()
+ # TODO (post-34236): Update tests and all packages that use this as a
+ # TODO (post-34236): package method to the routine made available to
+ # TODO (post-34236): packages. Once done, remove this method.
def cache_extra_test_sources(self, srcs):
"""Copy relative source paths to the corresponding install test subdir
@@ -1856,45 +1856,13 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
be copied to the corresponding location(s) under the install
testing directory.
"""
- paths = [srcs] if isinstance(srcs, str) else srcs
-
- for path in paths:
- src_path = os.path.join(self.stage.source_path, path)
- dest_path = os.path.join(self.install_test_root, path)
- if os.path.isdir(src_path):
- fsys.install_tree(src_path, dest_path)
- else:
- fsys.mkdirp(os.path.dirname(dest_path))
- fsys.copy(src_path, dest_path)
-
- @contextlib.contextmanager
- def _setup_test(self, verbose, externals):
- self.test_failures = []
- if self.test_suite:
- self.test_log_file = self.test_suite.log_file_for_spec(self.spec)
- self.tested_file = self.test_suite.tested_file_for_spec(self.spec)
- pkg_id = self.test_suite.test_pkg_id(self.spec)
- else:
- self.test_log_file = fsys.join_path(self.stage.path, _spack_install_test_log)
- self.test_suite = TestSuite([self.spec])
- self.test_suite.stage = self.stage.path
- pkg_id = self.spec.format("{name}-{version}-{hash:7}")
-
- fsys.touch(self.test_log_file) # Otherwise log_parse complains
-
- with tty.log.log_output(self.test_log_file, verbose) as logger:
- with logger.force_echo():
- tty.msg("Testing package {0}".format(pkg_id))
-
- # use debug print levels for log file to record commands
- old_debug = tty.is_debug()
- tty.set_debug(True)
-
- try:
- yield logger
- finally:
- # reset debug level
- tty.set_debug(old_debug)
+ msg = (
+ "'pkg.cache_extra_test_sources(srcs) is deprecated with removal "
+ "expected in v0.21. Use 'cache_extra_test_sources(pkg, srcs)' "
+ "instead."
+ )
+ warnings.warn(msg)
+ cache_extra_test_sources(self, srcs)
def do_test(self, dirty=False, externals=False):
if self.test_requires_compiler:
@@ -1909,15 +1877,31 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
)
return
- kwargs = {"dirty": dirty, "fake": False, "context": "test", "externals": externals}
- if tty.is_verbose():
- kwargs["verbose"] = True
- spack.build_environment.start_build_process(self, test_process, kwargs)
+ kwargs = {
+ "dirty": dirty,
+ "fake": False,
+ "context": "test",
+ "externals": externals,
+ "verbose": tty.is_verbose(),
+ }
+
+ self.tester.stand_alone_tests(kwargs)
+ # TODO (post-34236): Remove this deprecated method when eliminate test,
+ # TODO (post-34236): run_test, etc.
+ @property
+ def _test_deprecated_warning(self):
+ alt = f"Use any name starting with 'test_' instead in {self.spec.name}."
+ return f"The 'test' method is deprecated. {alt}"
+
+ # TODO (post-34236): Remove this deprecated method when eliminate test,
+ # TODO (post-34236): run_test, etc.
def test(self):
# Defer tests to virtual and concrete packages
- pass
+ warnings.warn(self._test_deprecated_warning)
+ # TODO (post-34236): Remove this deprecated method when eliminate test,
+ # TODO (post-34236): run_test, etc.
def run_test(
self,
exe,
@@ -1925,7 +1909,7 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
expected=[],
status=0,
installed=False,
- purpose="",
+ purpose=None,
skip_missing=False,
work_dir=None,
):
@@ -1947,22 +1931,56 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
in the install prefix bin directory or the provided work_dir
work_dir (str or None): path to the smoke test directory
"""
+
+ def test_title(purpose, test_name):
+ if not purpose:
+ return f"test: {test_name}: execute {test_name}"
+
+ match = re.search(r"test: ([^:]*): (.*)", purpose)
+ if match:
+ # The test title has all the expected parts
+ return purpose
+
+ match = re.search(r"test: (.*)", purpose)
+ if match:
+ reason = match.group(1)
+ return f"test: {test_name}: {reason}"
+
+ return f"test: {test_name}: {purpose}"
+
+ base_exe = os.path.basename(exe)
+ alternate = f"Use 'test_part' instead for {self.spec.name} to process {base_exe}."
+ warnings.warn(f"The 'run_test' method is deprecated. {alternate}")
+
+ extra = re.compile(r"[\s,\- ]")
+ details = (
+ [extra.sub("", options)]
+ if isinstance(options, str)
+ else [extra.sub("", os.path.basename(opt)) for opt in options]
+ )
+ details = "_".join([""] + details) if details else ""
+ test_name = f"test_{base_exe}{details}"
+ tty.info(test_title(purpose, test_name), format="g")
+
wdir = "." if work_dir is None else work_dir
with fsys.working_dir(wdir, create=True):
try:
runner = which(exe)
if runner is None and skip_missing:
+ self.tester.status(test_name, TestStatus.SKIPPED, f"{exe} is missing")
return
- assert runner is not None, "Failed to find executable '{0}'".format(exe)
+ assert runner is not None, f"Failed to find executable '{exe}'"
self._run_test_helper(runner, options, expected, status, installed, purpose)
- print("PASSED")
+ self.tester.status(test_name, TestStatus.PASSED, None)
return True
- except BaseException as e:
+ except (AssertionError, BaseException) as e:
# print a summary of the error to the log file
# so that cdash and junit reporters know about it
exc_type, _, tb = sys.exc_info()
- print("FAILED: {0}".format(e))
+
+ self.tester.status(test_name, TestStatus.FAILED, str(e))
+
import traceback
# remove the current call frame to exclude the extract_stack
@@ -1991,7 +2009,7 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
if exc_type is spack.util.executable.ProcessError:
out = io.StringIO()
spack.build_environment.write_log_summary(
- out, "test", self.test_log_file, last=1
+ out, "test", self.tester.test_log_file, last=1
)
m = out.getvalue()
else:
@@ -2007,28 +2025,27 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
if spack.config.get("config:fail_fast", False):
raise TestFailure([(exc, m)])
else:
- self.test_failures.append((exc, m))
+ self.tester.add_failure(exc, m)
return False
+ # TODO (post-34236): Remove this deprecated method when eliminate test,
+ # TODO (post-34236): run_test, etc.
def _run_test_helper(self, runner, options, expected, status, installed, purpose):
status = [status] if isinstance(status, int) else status
expected = [expected] if isinstance(expected, str) else expected
options = [options] if isinstance(options, str) else options
- if purpose:
- tty.msg(purpose)
- else:
- tty.debug("test: {0}: expect command status in {1}".format(runner.name, status))
-
if installed:
- msg = "Executable '{0}' expected in prefix".format(runner.name)
- msg += ", found in {0} instead".format(runner.path)
+ msg = f"Executable '{runner.name}' expected in prefix, "
+ msg += f"found in {runner.path} instead"
assert runner.path.startswith(self.spec.prefix), msg
+ tty.msg(f"Expecting return code in {status}")
+
try:
output = runner(*options, output=str.split, error=str.split)
- assert 0 in status, "Expected {0} execution to fail".format(runner.name)
+ assert 0 in status, f"Expected {runner.name} execution to fail"
except ProcessError as err:
output = str(err)
match = re.search(r"exited with status ([0-9]+)", output)
@@ -2037,8 +2054,8 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
for check in expected:
cmd = " ".join([runner.name] + options)
- msg = "Expected '{0}' to match output of `{1}`".format(check, cmd)
- msg += "\n\nOutput: {0}".format(output)
+ msg = f"Expected '{check}' to match output of `{cmd}`"
+ msg += f"\n\nOutput: {output}"
assert re.search(check, output), msg
def unit_test_check(self):
@@ -2068,21 +2085,23 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
return self.install_log_path if self.spec.installed else self.log_path
@classmethod
- def inject_flags(cls: Type, name: str, flags: Iterable[str]) -> FLAG_HANDLER_RETURN_TYPE:
+ def inject_flags(cls: Type[Pb], name: str, flags: Iterable[str]) -> FLAG_HANDLER_RETURN_TYPE:
"""
flag_handler that injects all flags through the compiler wrapper.
"""
return flags, None, None
@classmethod
- def env_flags(cls: Type, name: str, flags: Iterable[str]):
+ def env_flags(cls: Type[Pb], name: str, flags: Iterable[str]) -> FLAG_HANDLER_RETURN_TYPE:
"""
flag_handler that adds all flags to canonical environment variables.
"""
return None, flags, None
@classmethod
- def build_system_flags(cls: Type, name: str, flags: Iterable[str]) -> FLAG_HANDLER_RETURN_TYPE:
+ def build_system_flags(
+ cls: Type[Pb], name: str, flags: Iterable[str]
+ ) -> FLAG_HANDLER_RETURN_TYPE:
"""
flag_handler that passes flags to the build system arguments. Any
package using `build_system_flags` must also implement
@@ -2170,7 +2189,7 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
return self._flag_handler
@flag_handler.setter
- def flag_handler(self, var: FLAG_HANDLER_TYPE):
+ def flag_handler(self, var: FLAG_HANDLER_TYPE) -> None:
self._flag_handler = var
# The flag handler method is called for each of the allowed compiler flags.
@@ -2417,165 +2436,6 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
def builder(self):
return spack.builder.create(self)
- @staticmethod
- def run_test_callbacks(builder, method_names, callback_type="install"):
- """Tries to call all of the listed methods, returning immediately
- if the list is None."""
- if not builder.pkg.run_tests or method_names is None:
- return
-
- fail_fast = spack.config.get("config:fail_fast", False)
- with builder.pkg._setup_test(verbose=False, externals=False) as logger:
- # Report running each of the methods in the build log
- print_test_message(logger, "Running {0}-time tests".format(callback_type), True)
- builder.pkg.test_suite.current_test_spec = builder.pkg.spec
- builder.pkg.test_suite.current_base_spec = builder.pkg.spec
-
- if "test" in method_names:
- _copy_cached_test_files(builder.pkg, builder.pkg.spec)
-
- for name in method_names:
- try:
- fn = getattr(builder, name)
-
- msg = "RUN-TESTS: {0}-time tests [{1}]".format(callback_type, name)
- print_test_message(logger, msg, True)
-
- fn()
- except AttributeError as e:
- msg = "RUN-TESTS: method not implemented [{0}]".format(name)
- print_test_message(logger, msg, True)
-
- builder.pkg.test_failures.append((e, msg))
- if fail_fast:
- break
-
- # Raise any collected failures here
- if builder.pkg.test_failures:
- raise TestFailure(builder.pkg.test_failures)
-
-
-def has_test_method(pkg):
- """Determine if the package defines its own stand-alone test method.
-
- Args:
- pkg (str): the package being checked
-
- Returns:
- (bool): ``True`` if the package overrides the default method; else
- ``False``
- """
- if not inspect.isclass(pkg):
- tty.die("{0}: is not a class, it is {1}".format(pkg, type(pkg)))
-
- return (issubclass(pkg, PackageBase) and pkg.test != PackageBase.test) or (
- isinstance(pkg, PackageBase) and pkg.test.__func__ != PackageBase.test
- )
-
-
-def print_test_message(logger, msg, verbose):
- if verbose:
- with logger.force_echo():
- tty.msg(msg)
- else:
- tty.msg(msg)
-
-
-def _copy_cached_test_files(pkg, spec):
- """Copy any cached stand-alone test-related files."""
-
- # copy installed test sources cache into test cache dir
- if spec.concrete:
- cache_source = spec.package.install_test_root
- cache_dir = pkg.test_suite.current_test_cache_dir
- if os.path.isdir(cache_source) and not os.path.exists(cache_dir):
- fsys.install_tree(cache_source, cache_dir)
-
- # copy test data into test data dir
- data_source = Prefix(spec.package.package_dir).test
- data_dir = pkg.test_suite.current_test_data_dir
- if os.path.isdir(data_source) and not os.path.exists(data_dir):
- # We assume data dir is used read-only
- # maybe enforce this later
- shutil.copytree(data_source, data_dir)
-
-
-def test_process(pkg, kwargs):
- verbose = kwargs.get("verbose", False)
- externals = kwargs.get("externals", False)
-
- with pkg._setup_test(verbose, externals) as logger:
- if pkg.spec.external and not externals:
- print_test_message(logger, "Skipped tests for external package", verbose)
- return
-
- if not pkg.spec.installed:
- print_test_message(logger, "Skipped not installed package", verbose)
- return
-
- # run test methods from the package and all virtuals it
- # provides virtuals have to be deduped by name
- v_names = list(set([vspec.name for vspec in pkg.virtuals_provided]))
-
- # hack for compilers that are not dependencies (yet)
- # TODO: this all eventually goes away
- c_names = ("gcc", "intel", "intel-parallel-studio", "pgi")
- if pkg.name in c_names:
- v_names.extend(["c", "cxx", "fortran"])
- if pkg.spec.satisfies("llvm+clang"):
- v_names.extend(["c", "cxx"])
-
- test_specs = [pkg.spec] + [spack.spec.Spec(v_name) for v_name in sorted(v_names)]
-
- ran_actual_test_function = False
- try:
- with fsys.working_dir(pkg.test_suite.test_dir_for_spec(pkg.spec)):
- for spec in test_specs:
- pkg.test_suite.current_test_spec = spec
- # Fail gracefully if a virtual has no package/tests
- try:
- spec_pkg = spec.package
- except spack.repo.UnknownPackageError:
- continue
-
- _copy_cached_test_files(pkg, spec)
-
- # grab the function for each method so we can call
- # it with the package
- test_fn = spec_pkg.__class__.test
- if not isinstance(test_fn, types.FunctionType):
- test_fn = test_fn.__func__
-
- # Skip any test methods consisting solely of 'pass'
- # since they do not contribute to package testing.
- source = (inspect.getsource(test_fn)).splitlines()[1:]
- lines = (ln.strip() for ln in source)
- statements = [ln for ln in lines if not ln.startswith("#")]
- if len(statements) > 0 and statements[0] == "pass":
- continue
-
- # Run the tests
- ran_actual_test_function = True
- context = logger.force_echo if verbose else nullcontext
- with context():
- test_fn(pkg)
-
- # If fail-fast was on, we error out above
- # If we collect errors, raise them in batch here
- if pkg.test_failures:
- raise TestFailure(pkg.test_failures)
-
- finally:
- # flag the package as having been tested (i.e., ran one or more
- # non-pass-only methods
- if ran_actual_test_function:
- fsys.touch(pkg.tested_file)
- # log one more test message to provide a completion timestamp
- # for CDash reporting
- tty.msg("Completed testing")
- else:
- print_test_message(logger, "No tests to run", verbose)
-
inject_flags = PackageBase.inject_flags
env_flags = PackageBase.env_flags
@@ -2663,16 +2523,6 @@ class PackageError(spack.error.SpackError):
super(PackageError, self).__init__(message, long_msg)
-class PackageVersionError(PackageError):
- """Raised when a version URL cannot automatically be determined."""
-
- def __init__(self, version):
- super(PackageVersionError, self).__init__(
- "Cannot determine a URL automatically for version %s" % version,
- "Please provide a url for this version in the package.py file.",
- )
-
-
class NoURLError(PackageError):
"""Raised when someone tries to build a URL for a package with no URLs."""
diff --git a/lib/spack/spack/repo.py b/lib/spack/spack/repo.py
index f68ce7ebdc..21bfeb8474 100644
--- a/lib/spack/spack/repo.py
+++ b/lib/spack/spack/repo.py
@@ -1063,14 +1063,21 @@ class Repo(object):
"Repository %s does not contain package %s." % (self.namespace, spec.fullname)
)
- # Install patch files needed by the package.
+ package_path = self.filename_for_package_name(spec.name)
+ if not os.path.exists(package_path):
+ # Spec has no files (e.g., package, patches) to copy
+ tty.debug(f"{spec.name} does not have a package to dump")
+ return
+
+ # Install patch files needed by the (concrete) package.
fs.mkdirp(path)
- for patch in itertools.chain.from_iterable(spec.package.patches.values()):
- if patch.path:
- if os.path.exists(patch.path):
- fs.install(patch.path, path)
- else:
- tty.warn("Patch file did not exist: %s" % patch.path)
+ if spec.concrete:
+ for patch in itertools.chain.from_iterable(spec.package.patches.values()):
+ if patch.path:
+ if os.path.exists(patch.path):
+ fs.install(patch.path, path)
+ else:
+ tty.warn("Patch file did not exist: %s" % patch.path)
# Install the package.py file itself.
fs.install(self.filename_for_package_name(spec.name), path)
diff --git a/lib/spack/spack/report.py b/lib/spack/spack/report.py
index 00fe7eb5cf..f7d16b5e29 100644
--- a/lib/spack/spack/report.py
+++ b/lib/spack/spack/report.py
@@ -133,8 +133,9 @@ class InfoCollector:
# Everything else is an error (the installation
# failed outside of the child process)
package["result"] = "error"
- package["stdout"] = self.fetch_log(pkg)
package["message"] = str(exc) or "Unknown error"
+ package["stdout"] = self.fetch_log(pkg)
+ package["stdout"] += package["message"]
package["exception"] = traceback.format_exc()
raise
diff --git a/lib/spack/spack/reporters/cdash.py b/lib/spack/spack/reporters/cdash.py
index 582d6cd4df..4517530751 100644
--- a/lib/spack/spack/reporters/cdash.py
+++ b/lib/spack/spack/reporters/cdash.py
@@ -12,7 +12,7 @@ import re
import socket
import time
import xml.sax.saxutils
-from typing import Dict
+from typing import Dict, Optional
from urllib.parse import urlencode
from urllib.request import HTTPHandler, Request, build_opener
@@ -113,14 +113,14 @@ class CDash(Reporter):
else self.base_buildname
)
- def build_report_for_package(self, directory_name, package, duration):
+ def build_report_for_package(self, report_dir, package, duration):
if "stdout" not in package:
- # Skip reporting on packages that did not generate any output.
+ # Skip reporting on packages that do not generate output.
return
self.current_package_name = package["name"]
self.buildname = self.report_build_name(self.current_package_name)
- report_data = self.initialize_report(directory_name)
+ report_data = self.initialize_report(report_dir)
for phase in CDASH_PHASES:
report_data[phase] = {}
report_data[phase]["loglines"] = []
@@ -215,7 +215,7 @@ class CDash(Reporter):
report_file_name = package["name"] + "_" + report_name
else:
report_file_name = report_name
- phase_report = os.path.join(directory_name, report_file_name)
+ phase_report = os.path.join(report_dir, report_file_name)
with codecs.open(phase_report, "w", "utf-8") as f:
env = spack.tengine.make_environment()
@@ -231,7 +231,7 @@ class CDash(Reporter):
f.write(t.render(report_data))
self.upload(phase_report)
- def build_report(self, directory_name, specs):
+ def build_report(self, report_dir, specs):
# Do an initial scan to determine if we are generating reports for more
# than one package. When we're only reporting on a single package we
# do not explicitly include the package's name in the CDash build name.
@@ -260,7 +260,7 @@ class CDash(Reporter):
if "time" in spec:
duration = int(spec["time"])
for package in spec["packages"]:
- self.build_report_for_package(directory_name, package, duration)
+ self.build_report_for_package(report_dir, package, duration)
self.finalize_report()
def extract_standalone_test_data(self, package, phases, report_data):
@@ -273,13 +273,13 @@ class CDash(Reporter):
testing["generator"] = self.generator
testing["parts"] = extract_test_parts(package["name"], package["stdout"].splitlines())
- def report_test_data(self, directory_name, package, phases, report_data):
+ def report_test_data(self, report_dir, package, phases, report_data):
"""Generate and upload the test report(s) for the package."""
for phase in phases:
# Write the report.
report_name = phase.capitalize() + ".xml"
- report_file_name = package["name"] + "_" + report_name
- phase_report = os.path.join(directory_name, report_file_name)
+ report_file_name = "_".join([package["name"], package["id"], report_name])
+ phase_report = os.path.join(report_dir, report_file_name)
with codecs.open(phase_report, "w", "utf-8") as f:
env = spack.tengine.make_environment()
@@ -297,7 +297,7 @@ class CDash(Reporter):
tty.debug("Preparing to upload {0}".format(phase_report))
self.upload(phase_report)
- def test_report_for_package(self, directory_name, package, duration):
+ def test_report_for_package(self, report_dir, package, duration):
if "stdout" not in package:
# Skip reporting on packages that did not generate any output.
tty.debug("Skipping report for {0}: No generated output".format(package["name"]))
@@ -311,14 +311,14 @@ class CDash(Reporter):
self.buildname = self.report_build_name(self.current_package_name)
self.starttime = self.endtime - duration
- report_data = self.initialize_report(directory_name)
+ report_data = self.initialize_report(report_dir)
report_data["hostname"] = socket.gethostname()
phases = ["testing"]
self.extract_standalone_test_data(package, phases, report_data)
- self.report_test_data(directory_name, package, phases, report_data)
+ self.report_test_data(report_dir, package, phases, report_data)
- def test_report(self, directory_name, specs):
+ def test_report(self, report_dir, specs):
"""Generate reports for each package in each spec."""
tty.debug("Processing test report")
for spec in specs:
@@ -326,21 +326,33 @@ class CDash(Reporter):
if "time" in spec:
duration = int(spec["time"])
for package in spec["packages"]:
- self.test_report_for_package(directory_name, package, duration)
+ self.test_report_for_package(report_dir, package, duration)
self.finalize_report()
- def test_skipped_report(self, directory_name, spec, reason=None):
+ def test_skipped_report(
+ self, report_dir: str, spec: spack.spec.Spec, reason: Optional[str] = None
+ ):
+ """Explicitly report spec as being skipped (e.g., CI).
+
+ Examples are the installation failed or the package is known to have
+ broken tests.
+
+ Args:
+ report_dir: directory where the report is to be written
+ spec: spec being tested
+ reason: optional reason the test is being skipped
+ """
output = "Skipped {0} package".format(spec.name)
if reason:
output += "\n{0}".format(reason)
package = {"name": spec.name, "id": spec.dag_hash(), "result": "skipped", "stdout": output}
- self.test_report_for_package(directory_name, package, duration=0.0)
+ self.test_report_for_package(report_dir, package, duration=0.0)
- def concretization_report(self, directory_name, msg):
+ def concretization_report(self, report_dir, msg):
self.buildname = self.base_buildname
- report_data = self.initialize_report(directory_name)
+ report_data = self.initialize_report(report_dir)
report_data["update"] = {}
report_data["update"]["starttime"] = self.endtime
report_data["update"]["endtime"] = self.endtime
@@ -350,7 +362,7 @@ class CDash(Reporter):
env = spack.tengine.make_environment()
update_template = posixpath.join(self.template_dir, "Update.xml")
t = env.get_template(update_template)
- output_filename = os.path.join(directory_name, "Update.xml")
+ output_filename = os.path.join(report_dir, "Update.xml")
with open(output_filename, "w") as f:
f.write(t.render(report_data))
# We don't have a current package when reporting on concretization
@@ -360,9 +372,9 @@ class CDash(Reporter):
self.success = False
self.finalize_report()
- def initialize_report(self, directory_name):
- if not os.path.exists(directory_name):
- os.mkdir(directory_name)
+ def initialize_report(self, report_dir):
+ if not os.path.exists(report_dir):
+ os.mkdir(report_dir)
report_data = {}
report_data["buildname"] = self.buildname
report_data["buildstamp"] = self.buildstamp
diff --git a/lib/spack/spack/reporters/extract.py b/lib/spack/spack/reporters/extract.py
index e31f8d951a..5b912610f0 100644
--- a/lib/spack/spack/reporters/extract.py
+++ b/lib/spack/spack/reporters/extract.py
@@ -9,17 +9,23 @@ from datetime import datetime
import llnl.util.tty as tty
+from spack.install_test import TestStatus
+
# The keys here represent the only recognized (ctest/cdash) status values
-completed = {"failed": "Completed", "passed": "Completed", "notrun": "No tests to run"}
+completed = {
+ "failed": "Completed",
+ "passed": "Completed",
+ "skipped": "Completed",
+ "notrun": "No tests to run",
+}
log_regexp = re.compile(r"^==> \[([0-9:.\-]*)(?:, [0-9]*)?\] (.*)")
returns_regexp = re.compile(r"\[([0-9 ,]*)\]")
-skip_msgs = ["Testing package", "Results for", "Detected the following"]
+skip_msgs = ["Testing package", "Results for", "Detected the following", "Warning:"]
skip_regexps = [re.compile(r"{0}".format(msg)) for msg in skip_msgs]
-status_values = ["FAILED", "PASSED", "NO-TESTS"]
-status_regexps = [re.compile(r"^({0})".format(stat)) for stat in status_values]
+status_regexps = [re.compile(r"^({0})".format(str(stat))) for stat in TestStatus]
def add_part_output(part, line):
@@ -36,12 +42,14 @@ def elapsed(current, previous):
return diff.total_seconds()
+# TODO (post-34236): Should remove with deprecated test methods since don't
+# TODO (post-34236): have an XFAIL mechanism with the new test_part() approach.
def expected_failure(line):
if not line:
return False
match = returns_regexp.search(line)
- xfail = "0" not in match.group(0) if match else False
+ xfail = "0" not in match.group(1) if match else False
return xfail
@@ -54,12 +62,12 @@ def new_part():
"name": None,
"loglines": [],
"output": None,
- "status": "passed",
+ "status": None,
}
+# TODO (post-34236): Remove this when remove deprecated methods
def part_name(source):
- # TODO: Should be passed the package prefix and only remove it
elements = []
for e in source.replace("'", "").split(" "):
elements.append(os.path.basename(e) if os.sep in e else e)
@@ -73,10 +81,14 @@ def process_part_end(part, curr_time, last_time):
stat = part["status"]
if stat in completed:
+ # TODO (post-34236): remove the expected failure mapping when
+ # TODO (post-34236): remove deprecated test methods.
if stat == "passed" and expected_failure(part["desc"]):
part["completed"] = "Expected to fail"
elif part["completed"] == "Unknown":
part["completed"] = completed[stat]
+ elif stat is None or stat == "unknown":
+ part["status"] = "passed"
part["output"] = "\n".join(part["loglines"])
@@ -96,16 +108,16 @@ def status(line):
match = regex.search(line)
if match:
stat = match.group(0)
- stat = "notrun" if stat == "NO-TESTS" else stat
+ stat = "notrun" if stat == "NO_TESTS" else stat
return stat.lower()
def extract_test_parts(default_name, outputs):
parts = []
part = {}
- testdesc = ""
last_time = None
curr_time = None
+
for line in outputs:
line = line.strip()
if not line:
@@ -115,12 +127,16 @@ def extract_test_parts(default_name, outputs):
if skip(line):
continue
- # Skipped tests start with "Skipped" and end with "package"
+ # The spec was explicitly reported as skipped (e.g., installation
+ # failed, package known to have failing tests, won't test external
+ # package).
if line.startswith("Skipped") and line.endswith("package"):
+ stat = "skipped"
part = new_part()
part["command"] = "Not Applicable"
- part["completed"] = line
+ part["completed"] = completed[stat]
part["elapsed"] = 0.0
+ part["loglines"].append(line)
part["name"] = default_name
part["status"] = "notrun"
parts.append(part)
@@ -137,40 +153,53 @@ def extract_test_parts(default_name, outputs):
if msg.startswith("Installing"):
continue
- # New command means the start of a new test part
- if msg.startswith("'") and msg.endswith("'"):
+ # TODO (post-34236): Remove this check when remove run_test(),
+ # TODO (post-34236): etc. since no longer supporting expected
+ # TODO (post-34236): failures.
+ if msg.startswith("Expecting return code"):
+ if part:
+ part["desc"] += f"; {msg}"
+ continue
+
+ # Terminate without further parsing if no more test messages
+ if "Completed testing" in msg:
+ # Process last lingering part IF it didn't generate status
+ process_part_end(part, curr_time, last_time)
+ return parts
+
+ # New test parts start "test: <name>: <desc>".
+ if msg.startswith("test: "):
# Update the last part processed
process_part_end(part, curr_time, last_time)
part = new_part()
- part["command"] = msg
- part["name"] = part_name(msg)
+ desc = msg.split(":")
+ part["name"] = desc[1].strip()
+ part["desc"] = ":".join(desc[2:]).strip()
parts.append(part)
- # Save off the optional test description if it was
- # tty.debuged *prior to* the command and reset
- if testdesc:
- part["desc"] = testdesc
- testdesc = ""
+ # There is no guarantee of a 1-to-1 mapping of a test part and
+ # a (single) command (or executable) since the introduction of
+ # PR 34236.
+ #
+ # Note that tests where the package does not save the output
+ # (e.g., output=str.split, error=str.split) will not have
+ # a command printed to the test log.
+ elif msg.startswith("'") and msg.endswith("'"):
+ if part:
+ if part["command"]:
+ part["command"] += "; " + msg.replace("'", "")
+ else:
+ part["command"] = msg.replace("'", "")
+ else:
+ part = new_part()
+ part["command"] = msg.replace("'", "")
else:
# Update the last part processed since a new log message
# means a non-test action
process_part_end(part, curr_time, last_time)
- if testdesc:
- # We had a test description but no command so treat
- # as a new part (e.g., some import tests)
- part = new_part()
- part["name"] = "_".join(testdesc.split())
- part["command"] = "unknown"
- part["desc"] = testdesc
- parts.append(part)
- process_part_end(part, curr_time, curr_time)
-
- # Assuming this is a description for the next test part
- testdesc = msg
-
else:
tty.debug("Did not recognize test output '{0}'".format(line))
@@ -197,12 +226,14 @@ def extract_test_parts(default_name, outputs):
# If no parts, create a skeleton to flag that the tests are not run
if not parts:
part = new_part()
- stat = "notrun"
- part["command"] = "Not Applicable"
+ stat = "failed" if outputs[0].startswith("Cannot open log") else "notrun"
+
+ part["command"] = "unknown"
part["completed"] = completed[stat]
part["elapsed"] = 0.0
part["name"] = default_name
part["status"] = stat
+ part["output"] = "\n".join(outputs)
parts.append(part)
return parts
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index 43f73ab991..97ae6b020a 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -1632,7 +1632,9 @@ class Spec(object):
@property
def package(self):
- assert self.concrete, "Spec.package can only be called on concrete specs"
+ assert self.concrete, "{0}: Spec.package can only be called on concrete specs".format(
+ self.name
+ )
if not self._package:
self._package = spack.repo.path.get(self)
return self._package
diff --git a/lib/spack/spack/test/audit.py b/lib/spack/spack/test/audit.py
index 624821d989..2efc2bbd88 100644
--- a/lib/spack/spack/test/audit.py
+++ b/lib/spack/spack/test/audit.py
@@ -21,7 +21,7 @@ import spack.config
(["wrong-variant-in-depends-on"], ["PKG-DIRECTIVES", "PKG-PROPERTIES"]),
# This package has a GitHub patch URL without full_index=1
(["invalid-github-patch-url"], ["PKG-DIRECTIVES", "PKG-PROPERTIES"]),
- # This package has a stand-alone 'test' method in build-time callbacks
+ # This package has a stand-alone 'test*' method in build-time callbacks
(["fail-test-audit"], ["PKG-DIRECTIVES", "PKG-PROPERTIES"]),
# This package has no issues
(["mpileaks"], None),
diff --git a/lib/spack/spack/test/builder.py b/lib/spack/spack/test/builder.py
index 40d32c6e79..96944dc3ac 100644
--- a/lib/spack/spack/test/builder.py
+++ b/lib/spack/spack/test/builder.py
@@ -7,6 +7,8 @@ import sys
import pytest
+from llnl.util.filesystem import touch
+
import spack.paths
@@ -125,6 +127,7 @@ def test_build_time_tests_are_executed_from_default_builder():
@pytest.mark.regression("34518")
@pytest.mark.usefixtures("builder_test_repository", "config", "working_env")
def test_monkey_patching_wrapped_pkg():
+ """Confirm 'run_tests' is accessible through wrappers."""
s = spack.spec.Spec("old-style-autotools").concretized()
builder = spack.builder.create(s.package)
assert s.package.run_tests is False
@@ -139,12 +142,29 @@ def test_monkey_patching_wrapped_pkg():
@pytest.mark.regression("34440")
@pytest.mark.usefixtures("builder_test_repository", "config", "working_env")
def test_monkey_patching_test_log_file():
+ """Confirm 'test_log_file' is accessible through wrappers."""
s = spack.spec.Spec("old-style-autotools").concretized()
builder = spack.builder.create(s.package)
- assert s.package.test_log_file is None
- assert builder.pkg.test_log_file is None
- assert builder.pkg_with_dispatcher.test_log_file is None
- s.package.test_log_file = "/some/file"
- assert builder.pkg.test_log_file == "/some/file"
- assert builder.pkg_with_dispatcher.test_log_file == "/some/file"
+ s.package.tester.test_log_file = "/some/file"
+ assert builder.pkg.tester.test_log_file == "/some/file"
+ assert builder.pkg_with_dispatcher.tester.test_log_file == "/some/file"
+
+
+# Windows context manager's __exit__ fails with ValueError ("I/O operation
+# on closed file").
+@pytest.mark.skipif(sys.platform == "win32", reason="Does not run on windows")
+def test_install_time_test_callback(tmpdir, config, mock_packages, mock_stage):
+ """Confirm able to run stand-alone test as a post-install callback."""
+ s = spack.spec.Spec("py-test-callback").concretized()
+ builder = spack.builder.create(s.package)
+ builder.pkg.run_tests = True
+ s.package.tester.test_log_file = tmpdir.join("install_test.log")
+ touch(s.package.tester.test_log_file)
+
+ for phase_fn in builder:
+ phase_fn.execute()
+
+ with open(s.package.tester.test_log_file, "r") as f:
+ results = f.read().replace("\n", " ")
+ assert "PyTestCallback test" in results
diff --git a/lib/spack/spack/test/ci.py b/lib/spack/spack/test/ci.py
index 0c2bde0028..4002ecdb3d 100644
--- a/lib/spack/spack/test/ci.py
+++ b/lib/spack/spack/test/ci.py
@@ -566,8 +566,7 @@ def test_ci_run_standalone_tests_not_installed_cdash(
ci.run_standalone_tests(**args)
out = capfd.readouterr()[0]
# CDash *and* log file output means log file ignored
- assert "xml option is ignored" in out
- assert "0 passed of 0" in out
+ assert "xml option is ignored with CDash" in out
# copy test results (though none)
artifacts_dir = tmp_path / "artifacts"
@@ -595,9 +594,10 @@ def test_ci_skipped_report(tmpdir, mock_packages, config):
reason = "Testing skip"
handler.report_skipped(spec, tmpdir.strpath, reason=reason)
- report = fs.join_path(tmpdir, "{0}_Testing.xml".format(pkg))
- expected = "Skipped {0} package".format(pkg)
- with open(report, "r") as f:
+ reports = [name for name in tmpdir.listdir() if str(name).endswith("Testing.xml")]
+ assert len(reports) == 1
+ expected = f"Skipped {pkg} package"
+ with open(reports[0], "r") as f:
have = [0, 0]
for line in f:
if expected in line:
diff --git a/lib/spack/spack/test/cmd/env.py b/lib/spack/spack/test/cmd/env.py
index 93ce112215..0bde94a45b 100644
--- a/lib/spack/spack/test/cmd/env.py
+++ b/lib/spack/spack/test/cmd/env.py
@@ -701,6 +701,7 @@ spack:
def test_with_config_bad_include(environment_from_manifest):
+ """Confirm missing include paths raise expected exception and error."""
e = environment_from_manifest(
"""
spack:
@@ -709,14 +710,10 @@ spack:
- no/such/file.yaml
"""
)
- with pytest.raises(spack.config.ConfigFileError) as exc:
+ with pytest.raises(spack.config.ConfigFileError, match="2 missing include path"):
with e:
e.concretize()
- err = str(exc)
- assert "missing include" in err
- assert "/no/such/directory" in err
- assert os.path.join("no", "such", "file.yaml") in err
assert ev.active_environment() is None
diff --git a/lib/spack/spack/test/cmd/install.py b/lib/spack/spack/test/cmd/install.py
index 70e73b6412..9bda13ca05 100644
--- a/lib/spack/spack/test/cmd/install.py
+++ b/lib/spack/spack/test/cmd/install.py
@@ -1072,11 +1072,18 @@ def test_install_empty_env(
],
)
def test_installation_fail_tests(install_mockery, mock_fetch, name, method):
+ """Confirm build-time tests with unknown methods fail."""
output = install("--test=root", "--no-cache", name, fail_on_error=False)
+ # Check that there is a single test failure reported
+ assert output.count("TestFailure: 1 test failed") == 1
+
+ # Check that the method appears twice: no attribute error and in message
assert output.count(method) == 2
assert output.count("method not implemented") == 1
- assert output.count("TestFailure: 1 tests failed") == 1
+
+ # Check that the path to the test log file is also output
+ assert "See test log for details" in output
def test_install_use_buildcache(
diff --git a/lib/spack/spack/test/cmd/module.py b/lib/spack/spack/test/cmd/module.py
index e60d3ab3c7..e893ca1d89 100644
--- a/lib/spack/spack/test/cmd/module.py
+++ b/lib/spack/spack/test/cmd/module.py
@@ -41,7 +41,7 @@ def _module_files(module_type, *specs):
["rm", "doesnotexist"], # Try to remove a non existing module
["find", "mpileaks"], # Try to find a module with multiple matches
["find", "doesnotexist"], # Try to find a module with no matches
- ["find", "--unkown_args"], # Try to give an unknown argument
+ ["find", "--unknown_args"], # Try to give an unknown argument
]
)
def failure_args(request):
diff --git a/lib/spack/spack/test/cmd/pkg.py b/lib/spack/spack/test/cmd/pkg.py
index 7c5aae590e..cd62991331 100644
--- a/lib/spack/spack/test/cmd/pkg.py
+++ b/lib/spack/spack/test/cmd/pkg.py
@@ -85,7 +85,15 @@ def mock_pkg_git_repo(git, tmpdir_factory):
@pytest.fixture(scope="module")
def mock_pkg_names():
repo = spack.repo.path.get_repo("builtin.mock")
- names = set(name for name in repo.all_package_names() if not name.startswith("pkg-"))
+
+ # Be sure to include virtual packages since packages with stand-alone
+ # tests may inherit additional tests from the virtuals they provide,
+ # such as packages that implement `mpi`.
+ names = set(
+ name
+ for name in repo.all_package_names(include_virtuals=True)
+ if not name.startswith("pkg-")
+ )
return names
diff --git a/lib/spack/spack/test/cmd/test.py b/lib/spack/spack/test/cmd/test.py
index ed36d711f8..8a7147a2c9 100644
--- a/lib/spack/spack/test/cmd/test.py
+++ b/lib/spack/spack/test/cmd/test.py
@@ -16,6 +16,7 @@ import spack.config
import spack.package_base
import spack.paths
import spack.store
+from spack.install_test import TestStatus
from spack.main import SpackCommand
install = SpackCommand("install")
@@ -59,15 +60,14 @@ def test_test_dup_alias(
"""Ensure re-using an alias fails with suggestion to change."""
install("libdwarf")
- # Run the tests with the alias once
- out = spack_test("run", "--alias", "libdwarf", "libdwarf")
- assert "Spack test libdwarf" in out
+ # Run the (no) tests with the alias once
+ spack_test("run", "--alias", "libdwarf", "libdwarf")
# Try again with the alias but don't let it fail on the error
with capfd.disabled():
out = spack_test("run", "--alias", "libdwarf", "libdwarf", fail_on_error=False)
- assert "already exists" in out
+ assert "already exists" in out and "Try another alias" in out
def test_test_output(
@@ -83,51 +83,39 @@ def test_test_output(
# Grab test stage directory contents
testdir = os.path.join(mock_test_stage, stage_files[0])
testdir_files = os.listdir(testdir)
+ testlogs = [name for name in testdir_files if str(name).endswith("out.txt")]
+ assert len(testlogs) == 1
- # Grab the output from the test log
- testlog = list(filter(lambda x: x.endswith("out.txt") and x != "results.txt", testdir_files))
- outfile = os.path.join(testdir, testlog[0])
+ # Grab the output from the test log to confirm expected result
+ outfile = os.path.join(testdir, testlogs[0])
with open(outfile, "r") as f:
output = f.read()
- assert "BEFORE TEST" in output
- assert "true: expect command status in [" in output
- assert "AFTER TEST" in output
- assert "FAILED" not in output
+ assert "test_print" in output
+ assert "PASSED" in output
-def test_test_output_on_error(
- mock_packages, mock_archive, mock_fetch, install_mockery_mutable_config, capfd, mock_test_stage
+@pytest.mark.parametrize(
+ "pkg_name,failure", [("test-error", "exited with status 1"), ("test-fail", "not callable")]
+)
+def test_test_output_fails(
+ mock_packages,
+ mock_archive,
+ mock_fetch,
+ install_mockery_mutable_config,
+ mock_test_stage,
+ pkg_name,
+ failure,
):
- install("test-error")
- # capfd interferes with Spack's capturing
- with capfd.disabled():
- out = spack_test("run", "test-error", fail_on_error=False)
-
- assert "TestFailure" in out
- assert "Command exited with status 1" in out
-
+ """Confirm stand-alone test failure with expected outputs."""
+ install(pkg_name)
+ out = spack_test("run", pkg_name, fail_on_error=False)
-def test_test_output_on_failure(
- mock_packages, mock_archive, mock_fetch, install_mockery_mutable_config, capfd, mock_test_stage
-):
- install("test-fail")
- with capfd.disabled():
- out = spack_test("run", "test-fail", fail_on_error=False)
+ # Confirm package-specific failure is in the output
+ assert failure in out
- assert "Expected 'not in the output' to match output of `true`" in out
+ # Confirm standard failure tagging AND test log reference also output
assert "TestFailure" in out
-
-
-def test_show_log_on_error(
- mock_packages, mock_archive, mock_fetch, install_mockery_mutable_config, capfd, mock_test_stage
-):
- """Make sure spack prints location of test log on failure."""
- install("test-error")
- with capfd.disabled():
- out = spack_test("run", "test-error", fail_on_error=False)
-
- assert "See test log" in out
- assert mock_test_stage in out
+ assert "See test log for details" in out
@pytest.mark.usefixtures(
@@ -136,11 +124,12 @@ def test_show_log_on_error(
@pytest.mark.parametrize(
"pkg_name,msgs",
[
- ("test-error", ["FAILED: Command exited", "TestFailure"]),
- ("test-fail", ["FAILED: Expected", "TestFailure"]),
+ ("test-error", ["exited with status 1", "TestFailure"]),
+ ("test-fail", ["not callable", "TestFailure"]),
],
)
def test_junit_output_with_failures(tmpdir, mock_test_stage, pkg_name, msgs):
+ """Confirm stand-alone test failure expected outputs in JUnit reporting."""
install(pkg_name)
with tmpdir.as_cwd():
spack_test(
@@ -173,6 +162,7 @@ def test_cdash_output_test_error(
mock_test_stage,
capfd,
):
+ """Confirm stand-alone test error expected outputs in CDash reporting."""
install("test-error")
with tmpdir.as_cwd():
spack_test(
@@ -183,12 +173,10 @@ def test_cdash_output_test_error(
fail_on_error=False,
)
report_dir = tmpdir.join("cdash_reports")
- print(tmpdir.listdir())
- assert report_dir in tmpdir.listdir()
- report_file = report_dir.join("test-error_Testing.xml")
- assert report_file in report_dir.listdir()
- content = report_file.open().read()
- assert "FAILED: Command exited with status 1" in content
+ reports = [name for name in report_dir.listdir() if str(name).endswith("Testing.xml")]
+ assert len(reports) == 1
+ content = reports[0].open().read()
+ assert "Command exited with status 1" in content
def test_cdash_upload_clean_test(
@@ -203,10 +191,12 @@ def test_cdash_upload_clean_test(
with tmpdir.as_cwd():
spack_test("run", "--log-file=cdash_reports", "--log-format=cdash", "printing-package")
report_dir = tmpdir.join("cdash_reports")
- assert report_dir in tmpdir.listdir()
- report_file = report_dir.join("printing-package_Testing.xml")
- assert report_file in report_dir.listdir()
- content = report_file.open().read()
+ reports = [name for name in report_dir.listdir() if str(name).endswith("Testing.xml")]
+ assert len(reports) == 1
+ content = reports[0].open().read()
+ assert "passed" in content
+ assert "Running test_print" in content, "Expected first command output"
+ assert "second command" in content, "Expected second command output"
assert "</Test>" in content
assert "<Text>" not in content
@@ -226,17 +216,19 @@ def test_test_help_cdash(mock_test_stage):
def test_test_list_all(mock_packages):
- """make sure `spack test list --all` returns all packages with tests"""
+ """Confirm `spack test list --all` returns all packages with test methods"""
pkgs = spack_test("list", "--all").strip().split()
assert set(pkgs) == set(
[
+ "fail-test-audit",
+ "mpich",
"printing-package",
"py-extension1",
"py-extension2",
+ "py-test-callback",
"simple-standalone-test",
"test-error",
"test-fail",
- "fail-test-audit",
]
)
@@ -248,15 +240,6 @@ def test_test_list(mock_packages, mock_archive, mock_fetch, install_mockery_muta
assert pkg_with_tests in output
-@pytest.mark.skipif(sys.platform == "win32", reason="Not supported on Windows (yet)")
-def test_has_test_method_fails(capsys):
- with pytest.raises(SystemExit):
- spack.package_base.has_test_method("printing-package")
-
- captured = capsys.readouterr()[1]
- assert "is not a class" in captured
-
-
def test_read_old_results(mock_packages, mock_test_stage):
"""Take test data generated before the switch to full hash everywhere
and make sure we can still read it in"""
@@ -276,7 +259,7 @@ def test_read_old_results(mock_packages, mock_test_stage):
# The results command should still print the old test results
results_output = spack_test("results")
- assert "PASSED" in results_output
+ assert str(TestStatus.PASSED) in results_output
def test_test_results_none(mock_packages, mock_test_stage):
@@ -291,15 +274,10 @@ def test_test_results_none(mock_packages, mock_test_stage):
@pytest.mark.parametrize(
- "status,expected",
- [
- ("FAILED", "1 failed"),
- ("NO-TESTS", "1 no-tests"),
- ("SKIPPED", "1 skipped"),
- ("PASSED", "1 passed"),
- ],
+ "status", [TestStatus.FAILED, TestStatus.NO_TESTS, TestStatus.SKIPPED, TestStatus.PASSED]
)
-def test_test_results_status(mock_packages, mock_test_stage, status, expected):
+def test_test_results_status(mock_packages, mock_test_stage, status):
+ """Confirm 'spack test results' returns expected status."""
name = "trivial"
spec = spack.spec.Spec("trivial-smoke-test").concretized()
suite = spack.install_test.TestSuite([spec], name)
@@ -313,11 +291,11 @@ def test_test_results_status(mock_packages, mock_test_stage, status, expected):
args.insert(1, opt)
results = spack_test(*args)
- if opt == "--failed" and status != "FAILED":
- assert status not in results
+ if opt == "--failed" and status != TestStatus.FAILED:
+ assert str(status) not in results
else:
- assert status in results
- assert expected in results
+ assert str(status) in results
+ assert "1 {0}".format(status.lower()) in results
@pytest.mark.regression("35337")
diff --git a/lib/spack/spack/test/data/test/test_stage/gavrxt67t7yaiwfek7dds7lgokmoaiin/printing-package-1.0-hzgcoow-test-out.txt b/lib/spack/spack/test/data/test/test_stage/gavrxt67t7yaiwfek7dds7lgokmoaiin/printing-package-1.0-hzgcoow-test-out.txt
index de19fa57e4..00ce05620b 100755
--- a/lib/spack/spack/test/data/test/test_stage/gavrxt67t7yaiwfek7dds7lgokmoaiin/printing-package-1.0-hzgcoow-test-out.txt
+++ b/lib/spack/spack/test/data/test/test_stage/gavrxt67t7yaiwfek7dds7lgokmoaiin/printing-package-1.0-hzgcoow-test-out.txt
@@ -1,6 +1,7 @@
==> Testing package printing-package-1.0-hzgcoow
-BEFORE TEST
-==> [2022-02-28-20:21:46.510616] test: true: expect command status in [0]
-==> [2022-02-28-20:21:46.510937] '/bin/true'
-PASSED
-AFTER TEST
+==> [2022-12-06-20:21:46.550943] test: test_print: Test python print example.
+==> [2022-12-06-20:21:46.553219] '/usr/tce/bin/python' '-c' 'print("Running test_print")'
+Running test_print
+==> [2022-12-06-20:21:46.721077] '/usr/tce/bin/python' '-c' 'print("Running test_print")'
+PASSED: test_print
+==> [2022-12-06-20:21:46.822608] Completed testing
diff --git a/lib/spack/spack/test/data/unparse/legion.txt b/lib/spack/spack/test/data/unparse/legion.txt
index e0c24db18a..21800f95f0 100644
--- a/lib/spack/spack/test/data/unparse/legion.txt
+++ b/lib/spack/spack/test/data/unparse/legion.txt
@@ -34,7 +34,7 @@ class Legion(CMakePackage):
homepage = "https://legion.stanford.edu/"
git = "https://github.com/StanfordLegion/legion.git"
- maintainers = ['pmccormick', 'streichler']
+ maintainers('pmccormick', 'streichler')
tags = ['e4s']
version('21.03.0', tag='legion-21.03.0')
version('stable', branch='stable')
@@ -355,7 +355,7 @@ class Legion(CMakePackage):
def cache_test_sources(self):
"""Copy the example source files after the package is installed to an
install test subdirectory for use during `spack test run`."""
- self.cache_extra_test_sources([join_path('examples', 'local_function_tasks')])
+ cache_extra_test_sources(self, [join_path('examples', 'local_function_tasks')])
def run_local_function_tasks_test(self):
"""Run stand alone test: local_function_tasks"""
diff --git a/lib/spack/spack/test/data/unparse/mfem.txt b/lib/spack/spack/test/data/unparse/mfem.txt
index 2028fd90c6..d8d11c7dd7 100644
--- a/lib/spack/spack/test/data/unparse/mfem.txt
+++ b/lib/spack/spack/test/data/unparse/mfem.txt
@@ -27,8 +27,7 @@ class Mfem(Package, CudaPackage, ROCmPackage):
homepage = 'http://www.mfem.org'
git = 'https://github.com/mfem/mfem.git'
- maintainers = ['v-dobrev', 'tzanio', 'acfisher',
- 'goxberry', 'markcmiller86']
+ maintainers('v-dobrev', 'tzanio', 'acfisher', 'goxberry', 'markcmiller86')
test_requires_compiler = True
@@ -815,8 +814,7 @@ class Mfem(Package, CudaPackage, ROCmPackage):
def cache_test_sources(self):
"""Copy the example source files after the package is installed to an
install test subdirectory for use during `spack test run`."""
- self.cache_extra_test_sources([self.examples_src_dir,
- self.examples_data_dir])
+ cache_extra_test_sources(self, [self.examples_src_dir, self.examples_data_dir])
def test(self):
test_dir = join_path(
diff --git a/lib/spack/spack/test/install.py b/lib/spack/spack/test/install.py
index fb1e910f98..f8e8fd1909 100644
--- a/lib/spack/spack/test/install.py
+++ b/lib/spack/spack/test/install.py
@@ -23,6 +23,7 @@ from spack.package_base import (
_spack_build_envfile,
_spack_build_logfile,
_spack_configure_argsfile,
+ spack_times_log,
)
from spack.spec import Spec
@@ -243,7 +244,7 @@ def test_install_times(install_mockery, mock_fetch, mutable_mock_repo):
spec.package.do_install()
# Ensure dependency directory exists after the installation.
- install_times = os.path.join(spec.package.prefix, ".spack", "install_times.json")
+ install_times = os.path.join(spec.package.prefix, ".spack", spack_times_log)
assert os.path.isfile(install_times)
# Ensure the phases are included
@@ -252,7 +253,7 @@ def test_install_times(install_mockery, mock_fetch, mutable_mock_repo):
# The order should be maintained
phases = [x["name"] for x in times["phases"]]
- assert phases == ["stage", "one", "two", "three", "install"]
+ assert phases == ["stage", "one", "two", "three", "install", "post-install"]
assert all(isinstance(x["seconds"], float) for x in times["phases"])
diff --git a/lib/spack/spack/test/installer.py b/lib/spack/spack/test/installer.py
index 91f02efbbc..667c6cc1fc 100644
--- a/lib/spack/spack/test/installer.py
+++ b/lib/spack/spack/test/installer.py
@@ -1384,3 +1384,32 @@ def test_single_external_implicit_install(install_mockery, explicit_args, is_exp
s.external_path = "/usr"
create_installer([(s, explicit_args)]).install()
assert spack.store.db.get_record(pkg).explicit == is_explicit
+
+
+@pytest.mark.parametrize("run_tests", [True, False])
+def test_print_install_test_log_skipped(install_mockery, mock_packages, capfd, run_tests):
+ """Confirm printing of install log skipped if not run/no failures."""
+ name = "trivial-install-test-package"
+ s = spack.spec.Spec(name).concretized()
+ pkg = s.package
+
+ pkg.run_tests = run_tests
+ spack.installer.print_install_test_log(pkg)
+ out = capfd.readouterr()[0]
+ assert out == ""
+
+
+def test_print_install_test_log_missing(
+ tmpdir, install_mockery, mock_packages, ensure_debug, capfd
+):
+ """Confirm expected error on attempt to print missing test log file."""
+ name = "trivial-install-test-package"
+ s = spack.spec.Spec(name).concretized()
+ pkg = s.package
+
+ pkg.run_tests = True
+ pkg.tester.test_log_file = str(tmpdir.join("test-log.txt"))
+ pkg.tester.add_failure(AssertionError("test"), "test-failure")
+ spack.installer.print_install_test_log(pkg)
+ err = capfd.readouterr()[1]
+ assert "no test log file" in err
diff --git a/lib/spack/spack/test/package_class.py b/lib/spack/spack/test/package_class.py
index 6f5d650231..a9df2750d4 100644
--- a/lib/spack/spack/test/package_class.py
+++ b/lib/spack/spack/test/package_class.py
@@ -17,8 +17,11 @@ import pytest
import llnl.util.filesystem as fs
+import spack.install_test
import spack.package_base
import spack.repo
+from spack.build_systems.generic import Package
+from spack.installer import InstallError
@pytest.fixture(scope="module")
@@ -117,14 +120,14 @@ def test_possible_dependencies_with_multiple_classes(mock_packages, mpileaks_pos
assert expected == spack.package_base.possible_dependencies(*pkgs)
-def setup_install_test(source_paths, install_test_root):
+def setup_install_test(source_paths, test_root):
"""
Set up the install test by creating sources and install test roots.
The convention used here is to create an empty file if the path name
ends with an extension otherwise, a directory is created.
"""
- fs.mkdirp(install_test_root)
+ fs.mkdirp(test_root)
for path in source_paths:
if os.path.splitext(path)[1]:
fs.touchp(path)
@@ -159,10 +162,11 @@ def test_cache_extra_sources(install_mockery, spec, sources, extras, expect):
"""Test the package's cache extra test sources helper function."""
s = spack.spec.Spec(spec).concretized()
s.package.spec.concretize()
- source_path = s.package.stage.source_path
+ source_path = s.package.stage.source_path
srcs = [fs.join_path(source_path, src) for src in sources]
- setup_install_test(srcs, s.package.install_test_root)
+ test_root = spack.install_test.install_test_root(s.package)
+ setup_install_test(srcs, test_root)
emsg_dir = "Expected {0} to be a directory"
emsg_file = "Expected {0} to be a file"
@@ -173,10 +177,10 @@ def test_cache_extra_sources(install_mockery, spec, sources, extras, expect):
else:
assert os.path.isdir(src), emsg_dir.format(src)
- s.package.cache_extra_test_sources(extras)
+ spack.install_test.cache_extra_test_sources(s.package, extras)
- src_dests = [fs.join_path(s.package.install_test_root, src) for src in sources]
- exp_dests = [fs.join_path(s.package.install_test_root, e) for e in expect]
+ src_dests = [fs.join_path(test_root, src) for src in sources]
+ exp_dests = [fs.join_path(test_root, e) for e in expect]
poss_dests = set(src_dests) | set(exp_dests)
msg = "Expected {0} to{1} exist"
@@ -192,3 +196,146 @@ def test_cache_extra_sources(install_mockery, spec, sources, extras, expect):
# Perform a little cleanup
shutil.rmtree(os.path.dirname(source_path))
+
+
+def test_cache_extra_sources_fails(install_mockery):
+ s = spack.spec.Spec("a").concretized()
+ s.package.spec.concretize()
+
+ with pytest.raises(InstallError) as exc_info:
+ spack.install_test.cache_extra_test_sources(s.package, ["/a/b", "no-such-file"])
+
+ errors = str(exc_info.value)
+ assert "'/a/b') must be relative" in errors
+ assert "'no-such-file') for the copy does not exist" in errors
+
+
+def test_package_exes_and_libs():
+ with pytest.raises(spack.error.SpackError, match="defines both"):
+
+ class BadDetectablePackage(spack.package.Package):
+ executables = ["findme"]
+ libraries = ["libFindMe.a"]
+
+
+def test_package_url_and_urls():
+ class URLsPackage(spack.package.Package):
+ url = "https://www.example.com/url-package-1.0.tgz"
+ urls = ["https://www.example.com/archive"]
+
+ s = spack.spec.Spec("a")
+ with pytest.raises(ValueError, match="defines both"):
+ URLsPackage(s)
+
+
+def test_package_license():
+ class LicensedPackage(spack.package.Package):
+ extendees = None # currently a required attribute for is_extension()
+ license_files = None
+
+ s = spack.spec.Spec("a")
+ pkg = LicensedPackage(s)
+ assert pkg.global_license_file is None
+
+ pkg.license_files = ["license.txt"]
+ assert os.path.basename(pkg.global_license_file) == pkg.license_files[0]
+
+
+class BaseTestPackage(Package):
+ extendees = None # currently a required attribute for is_extension()
+
+
+def test_package_version_fails():
+ s = spack.spec.Spec("a")
+ pkg = BaseTestPackage(s)
+ with pytest.raises(ValueError, match="does not have a concrete version"):
+ pkg.version()
+
+
+def test_package_tester_fails():
+ s = spack.spec.Spec("a")
+ pkg = BaseTestPackage(s)
+ with pytest.raises(ValueError, match="without concrete version"):
+ pkg.tester()
+
+
+def test_package_fetcher_fails():
+ s = spack.spec.Spec("a")
+ pkg = BaseTestPackage(s)
+ with pytest.raises(ValueError, match="without concrete version"):
+ pkg.fetcher
+
+
+def test_package_no_extendees():
+ s = spack.spec.Spec("a")
+ pkg = BaseTestPackage(s)
+ assert pkg.extendee_args is None
+
+
+def test_package_test_no_compilers(mock_packages, monkeypatch, capfd):
+ def compilers(compiler, arch_spec):
+ return None
+
+ monkeypatch.setattr(spack.compilers, "compilers_for_spec", compilers)
+
+ s = spack.spec.Spec("a")
+ pkg = BaseTestPackage(s)
+ pkg.test_requires_compiler = True
+ pkg.do_test()
+ error = capfd.readouterr()[1]
+ assert "Skipping tests for package" in error
+ assert "test requires missing compiler" in error
+
+
+# TODO (post-34236): Remove when remove deprecated run_test(), etc.
+@pytest.mark.parametrize(
+ "msg,installed,purpose,expected",
+ [
+ ("do-nothing", False, "test: echo", "do-nothing"),
+ ("not installed", True, "test: echo not installed", "expected in prefix"),
+ ],
+)
+def test_package_run_test_install(
+ install_mockery_mutable_config, mock_fetch, capfd, msg, installed, purpose, expected
+):
+ """Confirm expected outputs from run_test for installed/not installed exe."""
+ s = spack.spec.Spec("trivial-smoke-test").concretized()
+ pkg = s.package
+
+ pkg.run_test(
+ "echo", msg, expected=[expected], installed=installed, purpose=purpose, work_dir="."
+ )
+ output = capfd.readouterr()[0]
+ assert expected in output
+
+
+# TODO (post-34236): Remove when remove deprecated run_test(), etc.
+@pytest.mark.parametrize(
+ "skip,failures,status",
+ [
+ (True, 0, str(spack.install_test.TestStatus.SKIPPED)),
+ (False, 1, str(spack.install_test.TestStatus.FAILED)),
+ ],
+)
+def test_package_run_test_missing(
+ install_mockery_mutable_config, mock_fetch, capfd, skip, failures, status
+):
+ """Confirm expected results from run_test for missing exe when skip or not."""
+ s = spack.spec.Spec("trivial-smoke-test").concretized()
+ pkg = s.package
+
+ pkg.run_test("no-possible-program", skip_missing=skip)
+ output = capfd.readouterr()[0]
+ assert len(pkg.tester.test_failures) == failures
+ assert status in output
+
+
+# TODO (post-34236): Remove when remove deprecated run_test(), etc.
+def test_package_run_test_fail_fast(install_mockery_mutable_config, mock_fetch):
+ """Confirm expected exception when run_test with fail_fast enabled."""
+ s = spack.spec.Spec("trivial-smoke-test").concretized()
+ pkg = s.package
+
+ with spack.config.override("config:fail_fast", True):
+ with pytest.raises(spack.install_test.TestFailure, match="Failed to find executable"):
+ pkg.run_test("no-possible-program")
diff --git a/lib/spack/spack/test/packages.py b/lib/spack/spack/test/packages.py
index 45c8111c75..c3e5b83c71 100644
--- a/lib/spack/spack/test/packages.py
+++ b/lib/spack/spack/test/packages.py
@@ -312,14 +312,6 @@ def test_fetch_options(version_str, digest_end, extra_options):
assert fetcher.extra_options == extra_options
-def test_has_test_method_fails(capsys):
- with pytest.raises(SystemExit):
- spack.package_base.has_test_method("printing-package")
-
- captured = capsys.readouterr()[1]
- assert "is not a class" in captured
-
-
def test_package_deprecated_version(mock_packages, mock_fetch, mock_stage):
spec = Spec("deprecated-versions")
pkg_cls = spack.repo.path.get_pkg_class(spec.name)
diff --git a/lib/spack/spack/test/repo.py b/lib/spack/spack/test/repo.py
index a73056d810..e5c5b00693 100644
--- a/lib/spack/spack/test/repo.py
+++ b/lib/spack/spack/test/repo.py
@@ -152,3 +152,18 @@ def test_repo_path_handles_package_removal(tmpdir, mock_packages):
with spack.repo.use_repositories(builder.root, override=False) as repos:
r = repos.repo_for_pkg("c")
assert r.namespace == "builtin.mock"
+
+
+def test_repo_dump_virtuals(tmpdir, mutable_mock_repo, mock_packages, ensure_debug, capsys):
+ # Start with a package-less virtual
+ vspec = spack.spec.Spec("something")
+ mutable_mock_repo.dump_provenance(vspec, tmpdir)
+ captured = capsys.readouterr()[1]
+ assert "does not have a package" in captured
+
+ # Now with a virtual with a package
+ vspec = spack.spec.Spec("externalvirtual")
+ mutable_mock_repo.dump_provenance(vspec, tmpdir)
+ captured = capsys.readouterr()[1]
+ assert "Installing" in captured
+ assert "package.py" in os.listdir(tmpdir), "Expected the virtual's package to be copied"
diff --git a/lib/spack/spack/test/reporters.py b/lib/spack/spack/test/reporters.py
index 3a4174209c..4d3103f3e3 100644
--- a/lib/spack/spack/test/reporters.py
+++ b/lib/spack/spack/test/reporters.py
@@ -2,6 +2,8 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+import os
+
import pytest
import llnl.util.filesystem as fs
@@ -9,10 +11,12 @@ import llnl.util.tty as tty
import spack.reporters.extract
import spack.spec
+from spack.install_test import TestStatus
from spack.reporters import CDash, CDashConfiguration
# Use a path variable to appease Spack style line length checks
fake_install_prefix = fs.join_path(
+ os.sep,
"usr",
"spack",
"spack",
@@ -28,17 +32,41 @@ fake_test_cache = fs.join_path(
)
+def test_reporters_extract_basics():
+ # This test has a description, command, and status
+ fake_bin = fs.join_path(fake_install_prefix, "bin", "fake")
+ name = "test_no_status"
+ desc = "basic description"
+ status = TestStatus.PASSED
+ outputs = """
+==> Testing package fake-1.0-abcdefg
+==> [2022-02-15-18:44:21.250165] test: {0}: {1}
+==> [2022-02-15-18:44:21.250200] '{2}'
+{3}: {0}
+""".format(
+ name, desc, fake_bin, status
+ ).splitlines()
+
+ parts = spack.reporters.extract.extract_test_parts("fake", outputs)
+ assert len(parts) == 1
+ assert parts[0]["command"] == "{0}".format(fake_bin)
+ assert parts[0]["desc"] == desc
+ assert parts[0]["loglines"] == ["{0}: {1}".format(status, name)]
+ assert parts[0]["status"] == status.lower()
+
+
def test_reporters_extract_no_parts(capfd):
# This test ticks three boxes:
# 1) has Installing, which is skipped;
# 2) does not define any test parts;
# 3) has a status value without a part so generates a warning
+ status = TestStatus.NO_TESTS
outputs = """
==> Testing package fake-1.0-abcdefg
==> [2022-02-11-17:14:38.875259] Installing {0} to {1}
-NO-TESTS
+{2}
""".format(
- fake_install_test_root, fake_test_cache
+ fake_install_test_root, fake_test_cache, status
).splitlines()
parts = spack.reporters.extract.extract_test_parts("fake", outputs)
@@ -49,61 +77,67 @@ NO-TESTS
assert "No part to add status" in err
-def test_reporters_extract_no_command():
- # This test ticks 2 boxes:
- # 1) has a test description with no command or status
- # 2) has a test description, command, and status
- fake_bin = fs.join_path(fake_install_prefix, "bin", "fake")
- outputs = """
-==> Testing package fake-1.0-abcdefg
-==> [2022-02-15-18:44:21.250165] command with no status
-==> [2022-02-15-18:44:21.250175] running test program
-==> [2022-02-15-18:44:21.250200] '{0}'
-PASSED
-""".format(
- fake_bin
- ).splitlines()
-
- parts = spack.reporters.extract.extract_test_parts("fake", outputs)
- assert len(parts) == 2
- assert parts[0]["command"] == "unknown"
- assert parts[1]["loglines"] == ["PASSED"]
- assert parts[1]["elapsed"] == 0.0
-
-
def test_reporters_extract_missing_desc():
+ # This test parts with and without descriptions *and* a test part that has
+ # multiple commands
fake_bin = fs.join_path(fake_install_prefix, "bin", "importer")
+ names = ["test_fake_bin", "test_fake_util", "test_multiple_commands"]
+ descs = ["", "import fake util module", ""]
+ failed = TestStatus.FAILED
+ passed = TestStatus.PASSED
+ results = [passed, failed, passed]
outputs = """
==> Testing package fake-1.0-abcdefg
-==> [2022-02-15-18:44:21.250165] '{0}' '-c' 'import fake.bin'
-PASSED
-==> [2022-02-15-18:44:21.250200] '{0}' '-c' 'import fake.util'
-PASSED
+==> [2022-02-15-18:44:21.250165] test: {0}: {1}
+==> [2022-02-15-18:44:21.250170] '{5}' '-c' 'import fake.bin'
+{2}: {0}
+==> [2022-02-15-18:44:21.250185] test: {3}: {4}
+==> [2022-02-15-18:44:21.250200] '{5}' '-c' 'import fake.util'
+{6}: {3}
+==> [2022-02-15-18:44:21.250205] test: {7}: {8}
+==> [2022-02-15-18:44:21.250210] 'exe1 1'
+==> [2022-02-15-18:44:21.250250] 'exe2 2'
+{9}: {7}
""".format(
- fake_bin
+ names[0],
+ descs[0],
+ results[0],
+ names[1],
+ descs[1],
+ fake_bin,
+ results[1],
+ names[2],
+ descs[2],
+ results[2],
).splitlines()
parts = spack.reporters.extract.extract_test_parts("fake", outputs)
- assert len(parts) == 2
- assert parts[0]["desc"] is None
- assert parts[1]["desc"] is None
+ assert len(parts) == 3
+ for i, (name, desc, status) in enumerate(zip(names, descs, results)):
+ assert parts[i]["name"] == name
+ assert parts[i]["desc"] == desc
+ assert parts[i]["status"] == status.lower()
+ assert parts[2]["command"] == "exe1 1; exe2 2"
+# TODO (post-34236): Remove this test when removing deprecated run_test(), etc.
def test_reporters_extract_xfail():
fake_bin = fs.join_path(fake_install_prefix, "bin", "fake-app")
outputs = """
==> Testing package fake-1.0-abcdefg
-==> [2022-02-15-18:44:21.250165] Expecting return code in [3]
+==> [2022-02-15-18:44:21.250165] test: test_fake: Checking fake imports
+==> [2022-02-15-18:44:21.250175] Expecting return code in [3]
==> [2022-02-15-18:44:21.250200] '{0}'
-PASSED
+{1}
""".format(
- fake_bin
+ fake_bin, str(TestStatus.PASSED)
).splitlines()
parts = spack.reporters.extract.extract_test_parts("fake", outputs)
assert len(parts) == 1
+ parts[0]["command"] == fake_bin
parts[0]["completed"] == "Expected to fail"
@@ -123,6 +157,7 @@ def test_reporters_extract_skipped(state):
parts[0]["completed"] == expected
+# TODO (post-34236): Remove this test when removing deprecated run_test(), etc.
def test_reporters_skip():
# This test ticks 3 boxes:
# 1) covers an as yet uncovered skip messages
@@ -134,7 +169,7 @@ def test_reporters_skip():
==> Testing package fake-1.0-abcdefg
==> [2022-02-15-18:44:21.250165, 123456] Detected the following modules: fake1
==> {0}
-==> [2022-02-15-18:44:21.250175, 123456] running fake program
+==> [2022-02-15-18:44:21.250175, 123456] test: test_fake: running fake program
==> [2022-02-15-18:44:21.250200, 123456] '{1}'
INVALID
Results for test suite abcdefghijklmn
@@ -150,6 +185,27 @@ Results for test suite abcdefghijklmn
assert parts[0]["elapsed"] == 0.0
+def test_reporters_skip_new():
+ outputs = """
+==> [2023-04-06-15:55:13.094025] test: test_skip:
+SKIPPED: test_skip: Package must be built with +python
+==> [2023-04-06-15:55:13.540029] Completed testing
+==> [2023-04-06-15:55:13.540275]
+======================= SUMMARY: fake-1.0-abcdefg ========================
+fake::test_skip .. SKIPPED
+=========================== 1 skipped of 1 part ==========================
+""".splitlines()
+
+ parts = spack.reporters.extract.extract_test_parts("fake", outputs)
+
+ assert len(parts) == 1
+ part = parts[0]
+ assert part["name"] == "test_skip"
+ assert part["status"] == "skipped"
+ assert part["completed"] == "Completed"
+ assert part["loglines"][0].startswith("SKIPPED:")
+
+
def test_reporters_report_for_package_no_stdout(tmpdir, monkeypatch, capfd):
class MockCDash(CDash):
def upload(*args, **kwargs):
diff --git a/lib/spack/spack/test/test_suite.py b/lib/spack/spack/test/test_suite.py
index d2ed898728..06b492deff 100644
--- a/lib/spack/spack/test/test_suite.py
+++ b/lib/spack/spack/test/test_suite.py
@@ -2,13 +2,17 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+import collections
import os
import sys
import pytest
+from llnl.util.filesystem import join_path, mkdirp, touch
+
import spack.install_test
import spack.spec
+from spack.util.executable import which
def _true(*args, **kwargs):
@@ -28,7 +32,7 @@ def ensure_results(filename, expected):
assert have
-def test_test_log_pathname(mock_packages, config):
+def test_test_log_name(mock_packages, config):
"""Ensure test log path is reasonable."""
spec = spack.spec.Spec("libdwarf").concretized()
@@ -87,7 +91,10 @@ def test_test_uninstalled(mock_packages, install_mockery, mock_test_stage):
@pytest.mark.parametrize(
"arguments,status,msg",
- [({}, "SKIPPED", "Skipped"), ({"externals": True}, "NO-TESTS", "No tests")],
+ [
+ ({}, spack.install_test.TestStatus.SKIPPED, "Skipped"),
+ ({"externals": True}, spack.install_test.TestStatus.NO_TESTS, "No tests"),
+ ],
)
def test_test_external(
mock_packages, install_mockery, mock_test_stage, monkeypatch, arguments, status, msg
@@ -101,7 +108,7 @@ def test_test_external(
test_suite = spack.install_test.TestSuite([spec])
test_suite(**arguments)
- ensure_results(test_suite.results_file, status)
+ ensure_results(test_suite.results_file, str(status))
if arguments:
ensure_results(test_suite.log_file_for_spec(spec), msg)
@@ -181,3 +188,252 @@ def test_get_test_suite_too_many(mock_packages, mock_test_stage):
with pytest.raises(spack.install_test.TestSuiteNameError) as exc_info:
spack.install_test.get_test_suite(name)
assert "many suites named" in str(exc_info)
+
+
+@pytest.mark.parametrize(
+ "virtuals,expected",
+ [(False, ["Mpich.test_mpich"]), (True, ["Mpi.test_hello", "Mpich.test_mpich"])],
+)
+def test_test_function_names(mock_packages, install_mockery, virtuals, expected):
+ """Confirm test_function_names works as expected with/without virtuals."""
+ spec = spack.spec.Spec("mpich").concretized()
+ tests = spack.install_test.test_function_names(spec.package, add_virtuals=virtuals)
+ assert sorted(tests) == sorted(expected)
+
+
+def test_test_functions_fails():
+ """Confirm test_functions raises error if no package."""
+ with pytest.raises(ValueError, match="Expected a package"):
+ spack.install_test.test_functions(str)
+
+
+def test_test_functions_pkgless(mock_packages, install_mockery, ensure_debug, capsys):
+ """Confirm works for package providing a package-less virtual."""
+ spec = spack.spec.Spec("simple-standalone-test").concretized()
+ fns = spack.install_test.test_functions(spec.package, add_virtuals=True)
+ out = capsys.readouterr()
+ assert len(fns) == 1, "Expected only one test function"
+ assert "does not appear to have a package file" in out[1]
+
+
+# TODO: This test should go away when compilers as dependencies is supported
+def test_test_virtuals():
+ """Confirm virtuals picks up non-unique, provided compilers."""
+
+ # This is an unrealistic case but it is set up to retrieve all possible
+ # virtual names in a single call.
+ def satisfies(spec):
+ return True
+
+ # Ensure spec will pick up the llvm+clang virtual compiler package names.
+ VirtualSpec = collections.namedtuple("VirtualSpec", ["name", "satisfies"])
+ vspec = VirtualSpec("llvm", satisfies)
+
+ # Ensure the package name is in the list that provides c, cxx, and fortran
+ # to pick up the three associated compilers and that virtuals provided will
+ # be deduped.
+ MyPackage = collections.namedtuple("MyPackage", ["name", "spec", "virtuals_provided"])
+ pkg = MyPackage("gcc", vspec, [vspec, vspec])
+
+ # This check assumes the method will not provide a unique set of compilers
+ v_names = spack.install_test.virtuals(pkg)
+ for name, number in [("c", 2), ("cxx", 2), ("fortran", 1), ("llvm", 1)]:
+ assert v_names.count(name) == number, "Expected {0} of '{1}'".format(number, name)
+
+
+def test_package_copy_test_files_fails(mock_packages):
+ """Confirm copy_test_files fails as expected without package or test_suite."""
+ vspec = spack.spec.Spec("something")
+
+ # Try without a package
+ with pytest.raises(spack.install_test.TestSuiteError) as exc_info:
+ spack.install_test.copy_test_files(None, vspec)
+ assert "without a package" in str(exc_info)
+
+ # Try with a package without a test suite
+ MyPackage = collections.namedtuple("MyPackage", ["name", "spec", "test_suite"])
+ pkg = MyPackage("SomePackage", vspec, None)
+
+ with pytest.raises(spack.install_test.TestSuiteError) as exc_info:
+ spack.install_test.copy_test_files(pkg, vspec)
+ assert "test suite is missing" in str(exc_info)
+
+
+def test_package_copy_test_files_skips(mock_packages, ensure_debug, capsys):
+ """Confirm copy_test_files errors as expected if no package class found."""
+ # Try with a non-concrete spec and package with a test suite
+ MockSuite = collections.namedtuple("TestSuite", ["specs"])
+ MyPackage = collections.namedtuple("MyPackage", ["name", "spec", "test_suite"])
+ vspec = spack.spec.Spec("something")
+ pkg = MyPackage("SomePackage", vspec, MockSuite([]))
+ spack.install_test.copy_test_files(pkg, vspec)
+ out = capsys.readouterr()[1]
+ assert "skipping test data copy" in out
+ assert "no package class found" in out
+
+
+def test_process_test_parts(mock_packages):
+ """Confirm process_test_parts fails as expected without package or test_suite."""
+ # Try without a package
+ with pytest.raises(spack.install_test.TestSuiteError) as exc_info:
+ spack.install_test.process_test_parts(None, [])
+ assert "without a package" in str(exc_info)
+
+ # Try with a package without a test suite
+ MyPackage = collections.namedtuple("MyPackage", ["name", "test_suite"])
+ pkg = MyPackage("SomePackage", None)
+
+ with pytest.raises(spack.install_test.TestSuiteError) as exc_info:
+ spack.install_test.process_test_parts(pkg, [])
+ assert "test suite is missing" in str(exc_info)
+
+
+def test_test_part_fail(tmpdir, install_mockery_mutable_config, mock_fetch, mock_test_stage):
+ """Confirm test_part with a ProcessError results in FAILED status."""
+ s = spack.spec.Spec("trivial-smoke-test").concretized()
+ pkg = s.package
+ pkg.tester.test_log_file = str(tmpdir.join("test-log.txt"))
+ touch(pkg.tester.test_log_file)
+
+ name = "test_fail"
+ with spack.install_test.test_part(pkg, name, "fake ProcessError"):
+ raise spack.util.executable.ProcessError("Mock failure")
+
+ for part_name, status in pkg.tester.test_parts.items():
+ assert part_name.endswith(name)
+ assert status == spack.install_test.TestStatus.FAILED
+
+
+def test_test_part_pass(install_mockery_mutable_config, mock_fetch, mock_test_stage):
+ """Confirm test_part that succeeds results in PASSED status."""
+ s = spack.spec.Spec("trivial-smoke-test").concretized()
+ pkg = s.package
+
+ name = "test_echo"
+ msg = "nothing"
+ with spack.install_test.test_part(pkg, name, "echo"):
+ echo = which("echo")
+ echo(msg)
+
+ for part_name, status in pkg.tester.test_parts.items():
+ assert part_name.endswith(name)
+ assert status == spack.install_test.TestStatus.PASSED
+
+
+def test_test_part_skip(install_mockery_mutable_config, mock_fetch, mock_test_stage):
+ """Confirm test_part that raises SkipTest results in test status SKIPPED."""
+ s = spack.spec.Spec("trivial-smoke-test").concretized()
+ pkg = s.package
+
+ name = "test_skip"
+ with spack.install_test.test_part(pkg, name, "raise SkipTest"):
+ raise spack.install_test.SkipTest("Skipping the test")
+
+ for part_name, status in pkg.tester.test_parts.items():
+ assert part_name.endswith(name)
+ assert status == spack.install_test.TestStatus.SKIPPED
+
+
+def test_test_part_missing_exe_fail_fast(
+ tmpdir, install_mockery_mutable_config, mock_fetch, mock_test_stage
+):
+ """Confirm test_part with fail fast enabled raises exception."""
+ s = spack.spec.Spec("trivial-smoke-test").concretized()
+ pkg = s.package
+ pkg.tester.test_log_file = str(tmpdir.join("test-log.txt"))
+ touch(pkg.tester.test_log_file)
+
+ name = "test_fail_fast"
+ with spack.config.override("config:fail_fast", True):
+ with pytest.raises(spack.install_test.TestFailure, match="object is not callable"):
+ with spack.install_test.test_part(pkg, name, "fail fast"):
+ missing = which("no-possible-program")
+ missing()
+
+ test_parts = pkg.tester.test_parts
+ assert len(test_parts) == 1
+ for part_name, status in test_parts.items():
+ assert part_name.endswith(name)
+ assert status == spack.install_test.TestStatus.FAILED
+
+
+def test_test_part_missing_exe(
+ tmpdir, install_mockery_mutable_config, mock_fetch, mock_test_stage
+):
+ """Confirm test_part with missing executable fails."""
+ s = spack.spec.Spec("trivial-smoke-test").concretized()
+ pkg = s.package
+ pkg.tester.test_log_file = str(tmpdir.join("test-log.txt"))
+ touch(pkg.tester.test_log_file)
+
+ name = "test_missing_exe"
+ with spack.install_test.test_part(pkg, name, "missing exe"):
+ missing = which("no-possible-program")
+ missing()
+
+ test_parts = pkg.tester.test_parts
+ assert len(test_parts) == 1
+ for part_name, status in test_parts.items():
+ assert part_name.endswith(name)
+ assert status == spack.install_test.TestStatus.FAILED
+
+
+def test_check_special_outputs(tmpdir):
+ """This test covers two related helper methods"""
+ contents = """CREATE TABLE packages (
+name varchar(80) primary key,
+has_code integer,
+url varchar(160));
+INSERT INTO packages VALUES('sqlite',1,'https://www.sqlite.org');
+INSERT INTO packages VALUES('readline',1,'https://tiswww.case.edu/php/chet/readline/rltop.html');
+INSERT INTO packages VALUES('xsdk',0,'http://xsdk.info');
+COMMIT;
+"""
+ filename = tmpdir.join("special.txt")
+ with open(filename, "w") as f:
+ f.write(contents)
+
+ expected = spack.install_test.get_escaped_text_output(filename)
+ spack.install_test.check_outputs(expected, contents)
+
+ # Let's also cover case where something expected is NOT in the output
+ expected.append("should not find me")
+ with pytest.raises(RuntimeError, match="Expected"):
+ spack.install_test.check_outputs(expected, contents)
+
+
+def test_find_required_file(tmpdir):
+ filename = "myexe"
+ dirs = ["a", "b"]
+ for d in dirs:
+ path = tmpdir.join(d)
+ mkdirp(path)
+ touch(join_path(path, filename))
+ path = join_path(tmpdir.join("c"), "d")
+ mkdirp(path)
+ touch(join_path(path, filename))
+
+ # First just find a single path
+ results = spack.install_test.find_required_file(
+ tmpdir.join("c"), filename, expected=1, recursive=True
+ )
+ assert isinstance(results, str)
+
+ # Ensure none file if do not recursively search that directory
+ with pytest.raises(spack.install_test.SkipTest, match="Expected 1"):
+ spack.install_test.find_required_file(
+ tmpdir.join("c"), filename, expected=1, recursive=False
+ )
+
+ # Now make sure we get all of the files
+ results = spack.install_test.find_required_file(tmpdir, filename, expected=3, recursive=True)
+ assert isinstance(results, list) and len(results) == 3
+
+
+def test_packagetest_fails(mock_packages):
+ MyPackage = collections.namedtuple("MyPackage", ["spec"])
+
+ s = spack.spec.Spec("a")
+ pkg = MyPackage(s)
+ with pytest.raises(ValueError, match="require a concrete package"):
+ spack.install_test.PackageTest(pkg)
diff --git a/lib/spack/spack/test/util/package_hash.py b/lib/spack/spack/test/util/package_hash.py
index 9ab0ab0c47..6dbd3f04a6 100644
--- a/lib/spack/spack/test/util/package_hash.py
+++ b/lib/spack/spack/test/util/package_hash.py
@@ -337,15 +337,15 @@ def test_remove_complex_package_logic_filtered():
("grads", "rrlmwml3f2frdnqavmro3ias66h5b2ce"),
("llvm", "nufffum5dabmaf4l5tpfcblnbfjknvd3"),
# has @when("@4.1.0") and raw unicode literals
- ("mfem", "tiiv7uq7v2xtv24vdij5ptcv76dpazrw"),
- ("mfem@4.0.0", "tiiv7uq7v2xtv24vdij5ptcv76dpazrw"),
- ("mfem@4.1.0", "gxastq64to74qt4he4knpyjfdhh5auel"),
+ ("mfem", "qtneutm6khd6epd2rhyuv2y6zavsxbed"),
+ ("mfem@4.0.0", "qtneutm6khd6epd2rhyuv2y6zavsxbed"),
+ ("mfem@4.1.0", "uit2ydzhra3b2mlvnq262qlrqqmuwq3d"),
# has @when("@1.5.0:")
("py-torch", "qs7djgqn7dy7r3ps4g7hv2pjvjk4qkhd"),
("py-torch@1.0", "qs7djgqn7dy7r3ps4g7hv2pjvjk4qkhd"),
("py-torch@1.6", "p4ine4hc6f2ik2f2wyuwieslqbozll5w"),
# has a print with multiple arguments
- ("legion", "zdpawm4avw3fllxcutvmqb5c3bj5twqt"),
+ ("legion", "sffy6vz3dusxnxeetofoomlaieukygoj"),
# has nested `with when()` blocks and loops
("trilinos", "vqrgscjrla4hi7bllink7v6v6dwxgc2p"),
],
diff --git a/lib/spack/spack/verify.py b/lib/spack/spack/verify.py
index ead7df597d..664ff8dd54 100644
--- a/lib/spack/spack/verify.py
+++ b/lib/spack/spack/verify.py
@@ -14,6 +14,7 @@ import spack.filesystem_view
import spack.store
import spack.util.file_permissions as fp
import spack.util.spack_json as sjson
+from spack.package_base import spack_times_log
def compute_hash(path: str, block_size: int = 1048576) -> str:
@@ -161,6 +162,10 @@ def check_spec_manifest(spec):
if path == manifest_file:
continue
+ # Do not check the install times log file.
+ if entry == spack_times_log:
+ continue
+
data = manifest.pop(path, {})
results += check_entry(path, data)
diff --git a/var/spack/repos/builtin.mock/packages/fail-test-audit/package.py b/var/spack/repos/builtin.mock/packages/fail-test-audit/package.py
index 33c393bfee..3869d4ac68 100644
--- a/var/spack/repos/builtin.mock/packages/fail-test-audit/package.py
+++ b/var/spack/repos/builtin.mock/packages/fail-test-audit/package.py
@@ -14,8 +14,8 @@ class FailTestAudit(MakefilePackage):
version("1.0", md5="0123456789abcdef0123456789abcdef")
version("2.0", md5="abcdef0123456789abcdef0123456789")
- build_time_test_callbacks = ["test"]
+ build_time_test_callbacks = ["test_build_callbacks"]
- def test(self):
- print("test: test-install-callbacks")
- print("PASSED")
+ def test_build_callbacks(self):
+ """test build time test callbacks"""
+ print("test-build-callbacks")
diff --git a/var/spack/repos/builtin.mock/packages/mpi/package.py b/var/spack/repos/builtin.mock/packages/mpi/package.py
new file mode 100644
index 0000000000..2568bfef2d
--- /dev/null
+++ b/var/spack/repos/builtin.mock/packages/mpi/package.py
@@ -0,0 +1,16 @@
+# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
+# Spack Project Developers. See the top-level COPYRIGHT file for details.
+#
+# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+
+from spack.package import *
+
+
+class Mpi(Package):
+ """Virtual package for the Message Passing Interface."""
+
+ homepage = "https://www.mpi-forum.org/"
+ virtual = True
+
+ def test_hello(self):
+ print("Hello there!")
diff --git a/var/spack/repos/builtin.mock/packages/mpich/package.py b/var/spack/repos/builtin.mock/packages/mpich/package.py
index 1271419229..3bfe41ab6a 100644
--- a/var/spack/repos/builtin.mock/packages/mpich/package.py
+++ b/var/spack/repos/builtin.mock/packages/mpich/package.py
@@ -28,3 +28,6 @@ class Mpich(Package):
def install(self, spec, prefix):
touch(prefix.mpich)
+
+ def test_mpich(self):
+ print("Testing mpich")
diff --git a/var/spack/repos/builtin.mock/packages/printing-package/package.py b/var/spack/repos/builtin.mock/packages/printing-package/package.py
index 6357c72bd6..9d41e0a3a6 100644
--- a/var/spack/repos/builtin.mock/packages/printing-package/package.py
+++ b/var/spack/repos/builtin.mock/packages/printing-package/package.py
@@ -26,7 +26,8 @@ class PrintingPackage(Package):
print("AFTER INSTALL")
- def test(self):
- print("BEFORE TEST")
- self.run_test("true") # run /bin/true
- print("AFTER TEST")
+ def test_print(self):
+ """Test print example."""
+
+ print("Running test_print")
+ print("And a second command")
diff --git a/var/spack/repos/builtin.mock/packages/py-test-callback/package.py b/var/spack/repos/builtin.mock/packages/py-test-callback/package.py
new file mode 100644
index 0000000000..26e0e09997
--- /dev/null
+++ b/var/spack/repos/builtin.mock/packages/py-test-callback/package.py
@@ -0,0 +1,29 @@
+# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
+# Spack Project Developers. See the top-level COPYRIGHT file for details.
+#
+# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+
+import spack.pkg.builtin.mock.python as mp
+from spack.package import *
+
+
+class PyTestCallback(mp.Python):
+ """A package for testing stand-alone test methods as a callback."""
+
+ homepage = "http://www.example.com"
+ url = "http://www.example.com/test-callback-1.0.tar.gz"
+
+ # TODO (post-34236): "test" -> "test_callback" once remove "test" support
+ install_time_test_callbacks = ["test"]
+
+ version("1.0", "00000000000000000000000000000110")
+ version("2.0", "00000000000000000000000000000120")
+
+ def install(self, spec, prefix):
+ mkdirp(prefix.bin)
+
+ # TODO (post-34236): "test" -> "test_callback" once remove "test" support
+ def test(self):
+ super(PyTestCallback, self).test()
+
+ print("PyTestCallback test")
diff --git a/var/spack/repos/builtin.mock/packages/simple-standalone-test/package.py b/var/spack/repos/builtin.mock/packages/simple-standalone-test/package.py
index 3cb78cd25e..9c65773aa5 100644
--- a/var/spack/repos/builtin.mock/packages/simple-standalone-test/package.py
+++ b/var/spack/repos/builtin.mock/packages/simple-standalone-test/package.py
@@ -14,6 +14,9 @@ class SimpleStandaloneTest(Package):
version("1.0", md5="0123456789abcdef0123456789abcdef")
- def test(self):
- msg = "simple stand-alone test"
- self.run_test("echo", [msg], expected=[msg], purpose="test: running {0}".format(msg))
+ provides("standalone-test")
+
+ def test_echo(self):
+ """simple stand-alone test"""
+ echo = which("echo")
+ echo("testing echo", output=str.split, error=str.split)
diff --git a/var/spack/repos/builtin.mock/packages/test-error/package.py b/var/spack/repos/builtin.mock/packages/test-error/package.py
index 5128a265a4..b8f37b4719 100644
--- a/var/spack/repos/builtin.mock/packages/test-error/package.py
+++ b/var/spack/repos/builtin.mock/packages/test-error/package.py
@@ -17,5 +17,7 @@ class TestError(Package):
def install(self, spec, prefix):
mkdirp(prefix.bin)
- def test(self):
- self.run_test("false")
+ def test_false(self):
+ """TestError test"""
+ false = which("false")
+ false()
diff --git a/var/spack/repos/builtin.mock/packages/test-fail/package.py b/var/spack/repos/builtin.mock/packages/test-fail/package.py
index bcaa038e73..6f0416498b 100644
--- a/var/spack/repos/builtin.mock/packages/test-fail/package.py
+++ b/var/spack/repos/builtin.mock/packages/test-fail/package.py
@@ -17,5 +17,7 @@ class TestFail(Package):
def install(self, spec, prefix):
mkdirp(prefix.bin)
- def test(self):
- self.run_test("true", expected=["not in the output"])
+ def test_fails(self):
+ """trigger test failure"""
+ unknown = which("unknown-program")
+ unknown()