summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/spack/docs/basic_usage.rst4
-rw-r--r--lib/spack/docs/build_settings.rst54
-rw-r--r--lib/spack/docs/build_systems/intelpackage.rst32
-rw-r--r--lib/spack/docs/containers.rst4
-rw-r--r--lib/spack/docs/developer_guide.rst47
-rw-r--r--lib/spack/docs/features.rst4
-rw-r--r--lib/spack/docs/getting_started.rst59
-rw-r--r--lib/spack/docs/packaging_guide.rst243
-rw-r--r--lib/spack/docs/pipelines.rst4
-rw-r--r--lib/spack/docs/workflows.rst12
-rw-r--r--lib/spack/llnl/util/filesystem.py35
-rw-r--r--lib/spack/llnl/util/lang.py61
-rw-r--r--lib/spack/llnl/util/tty/pty.py150
-rw-r--r--lib/spack/spack/abi.py19
-rw-r--r--lib/spack/spack/architecture.py2
-rw-r--r--lib/spack/spack/build_environment.py9
-rw-r--r--lib/spack/spack/build_systems/cuda.py30
-rw-r--r--lib/spack/spack/cmd/checksum.py2
-rw-r--r--lib/spack/spack/cmd/config.py187
-rw-r--r--lib/spack/spack/cmd/create.py8
-rw-r--r--lib/spack/spack/cmd/env.py78
-rw-r--r--lib/spack/spack/cmd/external.py84
-rw-r--r--lib/spack/spack/cmd/help.py6
-rw-r--r--lib/spack/spack/cmd/spec.py2
-rw-r--r--lib/spack/spack/compiler.py2
-rw-r--r--lib/spack/spack/compilers/__init__.py4
-rw-r--r--lib/spack/spack/compilers/fj.py2
-rw-r--r--lib/spack/spack/config.py69
-rw-r--r--lib/spack/spack/dependency.py25
-rw-r--r--lib/spack/spack/environment.py100
-rw-r--r--lib/spack/spack/installer.py4
-rw-r--r--lib/spack/spack/main.py8
-rw-r--r--lib/spack/spack/operating_systems/cnk.py22
-rw-r--r--lib/spack/spack/package.py153
-rw-r--r--lib/spack/spack/package_prefs.py33
-rw-r--r--lib/spack/spack/pkgkit.py2
-rw-r--r--lib/spack/spack/platforms/bgq.py38
-rw-r--r--lib/spack/spack/platforms/cray.py12
-rw-r--r--lib/spack/spack/schema/__init__.py8
-rw-r--r--lib/spack/spack/schema/env.py22
-rw-r--r--lib/spack/spack/schema/packages.py65
-rw-r--r--lib/spack/spack/spec.py106
-rw-r--r--lib/spack/spack/test/abi.py66
-rw-r--r--lib/spack/spack/test/architecture.py3
-rw-r--r--lib/spack/spack/test/bindist.py26
-rw-r--r--lib/spack/spack/test/cmd/ci.py12
-rw-r--r--lib/spack/spack/test/cmd/compiler.py32
-rw-r--r--lib/spack/spack/test/cmd/config.py140
-rw-r--r--lib/spack/spack/test/cmd/env.py75
-rw-r--r--lib/spack/spack/test/cmd/external.py173
-rw-r--r--lib/spack/spack/test/compilers/detection.py12
-rw-r--r--lib/spack/spack/test/concretize.py2
-rw-r--r--lib/spack/spack/test/concretize_preferences.py10
-rw-r--r--lib/spack/spack/test/conftest.py4
-rw-r--r--lib/spack/spack/test/data/config/packages.yaml22
-rw-r--r--lib/spack/spack/test/data/targets/bgq-rhel6-power74
-rw-r--r--lib/spack/spack/test/database.py6
-rw-r--r--lib/spack/spack/test/installer.py10
-rw-r--r--lib/spack/spack/test/llnl/util/cpu.py3
-rw-r--r--lib/spack/spack/test/llnl/util/tty/log.py42
-rw-r--r--lib/spack/spack/test/module_parsing.py12
-rw-r--r--lib/spack/spack/test/util/executable.py30
-rw-r--r--lib/spack/spack/util/compression.py4
-rw-r--r--lib/spack/spack/util/crypto.py2
-rw-r--r--lib/spack/spack/util/executable.py9
-rw-r--r--lib/spack/spack/util/module_cmd.py38
-rw-r--r--lib/spack/spack/util/spack_yaml.py18
-rw-r--r--lib/spack/spack/version.py2
68 files changed, 1981 insertions, 587 deletions
diff --git a/lib/spack/docs/basic_usage.rst b/lib/spack/docs/basic_usage.rst
index 93c5858d93..6642b2adbd 100644
--- a/lib/spack/docs/basic_usage.rst
+++ b/lib/spack/docs/basic_usage.rst
@@ -695,11 +695,11 @@ Here is an example of a much longer spec than we've seen thus far:
.. code-block:: none
- mpileaks @1.2:1.4 %gcc@4.7.5 +debug -qt arch=bgq_os ^callpath @1.1 %gcc@4.7.2
+ mpileaks @1.2:1.4 %gcc@4.7.5 +debug -qt target=x86_64 ^callpath @1.1 %gcc@4.7.2
If provided to ``spack install``, this will install the ``mpileaks``
library at some version between ``1.2`` and ``1.4`` (inclusive),
-built using ``gcc`` at version 4.7.5 for the Blue Gene/Q architecture,
+built using ``gcc`` at version 4.7.5 for a generic ``x86_64`` architecture,
with debug options enabled, and without Qt support. Additionally, it
says to link it with the ``callpath`` library (which it depends on),
and to build callpath with ``gcc`` 4.7.2. Most specs will not be as
diff --git a/lib/spack/docs/build_settings.rst b/lib/spack/docs/build_settings.rst
index 9f67d8c14f..3e7a21c3e8 100644
--- a/lib/spack/docs/build_settings.rst
+++ b/lib/spack/docs/build_settings.rst
@@ -57,10 +57,13 @@ directory. Here's an example of an external configuration:
packages:
openmpi:
- paths:
- openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
- openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
- openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
+ externals:
+ - spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
+ prefix: /opt/openmpi-1.4.3
+ - spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
+ prefix: /opt/openmpi-1.4.3-debug
+ - spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
+ prefix: /opt/openmpi-1.6.5-intel
This example lists three installations of OpenMPI, one built with GCC,
one built with GCC and debug information, and another built with Intel.
@@ -76,13 +79,15 @@ of the installation prefixes. The following example says that module
.. code-block:: yaml
cmake:
- modules:
- cmake@3.7.2: CMake/3.7.2
-
-Each ``packages.yaml`` begins with a ``packages:`` token, followed
-by a list of package names. To specify externals, add a ``paths`` or ``modules``
-token under the package name, which lists externals in a
-``spec: /path`` or ``spec: module-name`` format. Each spec should be as
+ externals:
+ - spec: cmake@3.7.2
+ modules:
+ - CMake/3.7.2
+
+Each ``packages.yaml`` begins with a ``packages:`` attribute, followed
+by a list of package names. To specify externals, add an ``externals:``
+attribute under the package name, which lists externals.
+Each external should specify a ``spec:`` string that should be as
well-defined as reasonably possible. If a
package lacks a spec component, such as missing a compiler or
package version, then Spack will guess the missing component based
@@ -106,10 +111,13 @@ be:
packages:
openmpi:
- paths:
- openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
- openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
- openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
+ externals:
+ - spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
+ prefix: /opt/openmpi-1.4.3
+ - spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
+ prefix: /opt/openmpi-1.4.3-debug
+ - spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
+ prefix: /opt/openmpi-1.6.5-intel
buildable: False
The addition of the ``buildable`` flag tells Spack that it should never build
@@ -137,10 +145,13 @@ but more conveniently:
mpi:
buildable: False
openmpi:
- paths:
- openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
- openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
- openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
+ externals:
+ - spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
+ prefix: /opt/openmpi-1.4.3
+ - spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
+ prefix: /opt/openmpi-1.4.3-debug
+ - spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
+ prefix: /opt/openmpi-1.6.5-intel
Implementations can also be listed immediately under the virtual they provide:
@@ -172,8 +183,9 @@ After running this command your ``packages.yaml`` may include new entries:
packages:
cmake:
- paths:
- cmake@3.17.2: /usr
+ externals:
+ - spec: cmake@3.17.2
+ prefix: /usr
Generally this is useful for detecting a small set of commonly-used packages;
for now this is generally limited to finding build-only dependencies.
diff --git a/lib/spack/docs/build_systems/intelpackage.rst b/lib/spack/docs/build_systems/intelpackage.rst
index 66f473cbf8..8594c8d425 100644
--- a/lib/spack/docs/build_systems/intelpackage.rst
+++ b/lib/spack/docs/build_systems/intelpackage.rst
@@ -418,9 +418,13 @@ Adapt the following example. Be sure to maintain the indentation:
# other content ...
intel-mkl:
- modules:
- intel-mkl@2018.2.199 arch=linux-centos6-x86_64: intel-mkl/18/18.0.2
- intel-mkl@2018.3.222 arch=linux-centos6-x86_64: intel-mkl/18/18.0.3
+ externals:
+ - spec: "intel-mkl@2018.2.199 arch=linux-centos6-x86_64"
+ modules:
+ - intel-mkl/18/18.0.2
+ - spec: "intel-mkl@2018.3.222 arch=linux-centos6-x86_64"
+ modules:
+ - intel-mkl/18/18.0.3
The version numbers for the ``intel-mkl`` specs defined here correspond to file
and directory names that Intel uses for its products because they were adopted
@@ -451,12 +455,16 @@ mechanism.
packages:
intel-parallel-studio:
- modules:
- intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64: intel/18/18.0.2
- intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64: intel/18/18.0.3
+ externals:
+ - spec: "intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64"
+ modules:
+ - intel/18/18.0.2
+ - spec: "intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64"
+ modules:
+ - intel/18/18.0.3
buildable: False
-One additional example illustrates the use of ``paths:`` instead of
+One additional example illustrates the use of ``prefix:`` instead of
``modules:``, useful when external modulefiles are not available or not
suitable:
@@ -464,13 +472,15 @@ suitable:
packages:
intel-parallel-studio:
- paths:
- intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal: /opt/intel
- intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal: /opt/intel
+ externals:
+ - spec: "intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal"
+ prefix: /opt/intel
+ - spec: "intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal"
+ prefix: /opt/intel
buildable: False
Note that for the Intel packages discussed here, the directory values in the
-``paths:`` entries must be the high-level and typically version-less
+``prefix:`` entries must be the high-level and typically version-less
"installation directory" that has been used by Intel's product installer.
Such a directory will typically accumulate various product versions. Amongst
them, Spack will select the correct version-specific product directory based on
diff --git a/lib/spack/docs/containers.rst b/lib/spack/docs/containers.rst
index fe678fd76d..b215507701 100644
--- a/lib/spack/docs/containers.rst
+++ b/lib/spack/docs/containers.rst
@@ -45,7 +45,7 @@ Environments:
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
# Install the software, remove unnecessary deps
- RUN cd /opt/spack-environment && spack install && spack gc -y
+ RUN cd /opt/spack-environment && spack env activate . && spack install && spack gc -y
# Strip all the binaries
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
@@ -267,7 +267,7 @@ following ``Dockerfile``:
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
# Install the software, remove unnecessary deps
- RUN cd /opt/spack-environment && spack install && spack gc -y
+ RUN cd /opt/spack-environment && spack env activate . && spack install && spack gc -y
# Strip all the binaries
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
diff --git a/lib/spack/docs/developer_guide.rst b/lib/spack/docs/developer_guide.rst
index 5c57f28226..284690bd6f 100644
--- a/lib/spack/docs/developer_guide.rst
+++ b/lib/spack/docs/developer_guide.rst
@@ -557,8 +557,10 @@ packages. They should generally only contain fixes to the Spack core.
Both major and minor releases are tagged. After each release, we merge
the release branch back into ``develop`` so that the version bump and any
-other release-specific changes are visible in the mainline (see
-:ref:`merging-releases-to-develop`).
+other release-specific changes are visible in the mainline. As a
+convenience, we also tag the latest release as ``releases/latest``,
+so that users can easily check it out to get the latest
+stable version. See :ref:`merging-releases` for more details.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -641,7 +643,7 @@ for a major release, the steps to make the release are as follows:
#. Follow the steps in :ref:`publishing-releases`.
-#. Follow the steps in :ref:`merging-releases-to-develop`.
+#. Follow the steps in :ref:`merging-releases`.
#. Follow the steps in :ref:`announcing-releases`.
@@ -744,7 +746,7 @@ release:
#. Follow the steps in :ref:`publishing-releases`.
-#. Follow the steps in :ref:`merging-releases-to-develop`.
+#. Follow the steps in :ref:`merging-releases`.
#. Follow the steps in :ref:`announcing-releases`.
@@ -794,19 +796,41 @@ Publishing a release on GitHub
for ``download_count`` to see this.
-.. _merging-releases-to-develop:
+.. _merging-releases:
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Merging back into ``develop``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Updating `releases/latest` and `develop`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Once each release is complete, make sure that it is merged back into
-``develop`` with a merge commit:
+If the new release is the **highest** Spack release yet, you should
+also tag it as ``releases/latest``. For example, suppose the highest
+release is currently ``0.15.3``:
+
+ * If you are releasing ``0.15.4`` or ``0.16.0``, then you should tag
+ it with ``releases/latest``, as these are higher than ``0.15.3``.
+
+ * If you are making a new release of an **older** major version of
+ Spack, e.g. ``0.14.4``, then you should not tag it as
+ ``releases/latest`` (as there are newer major versions).
+
+ To tag ``releases/latest``, do this:
+
+ .. code-block:: console
+
+ $ git checkout releases/vX.Y # vX.Y is the new release's branch
+ $ git tag --force releases/latest
+ $ git push --tags
+
+ The ``--force`` argument makes ``git`` overwrite the existing
+ ``releases/latest`` tag with the new one.
+
+We also merge each release that we tag as ``releases/latest`` into ``develop``.
+Make sure to do this with a merge commit:
.. code-block:: console
$ git checkout develop
- $ git merge --no-ff releases/v0.15
+ $ git merge --no-ff vX.Y.Z # vX.Y.Z is the new release's tag
$ git push
We merge back to ``develop`` because it:
@@ -834,7 +858,6 @@ the release is complete and tagged. If you do it before you've tagged the
release and later decide you want to tag some later commit, you'll need
to merge again.
-
.. _announcing-releases:
^^^^^^^^^^^^^^^^^^^^
diff --git a/lib/spack/docs/features.rst b/lib/spack/docs/features.rst
index cd7d3a083f..df212c11fc 100644
--- a/lib/spack/docs/features.rst
+++ b/lib/spack/docs/features.rst
@@ -48,8 +48,8 @@ platform, all on the command line.
# Add compiler flags using the conventional names
$ spack install mpileaks@1.1.2 %gcc@4.7.3 cppflags="-O3 -floop-block"
- # Cross-compile for a different architecture with arch=
- $ spack install mpileaks@1.1.2 arch=bgqos_0
+ # Cross-compile for a different micro-architecture with target=
+ $ spack install mpileaks@1.1.2 target=icelake
Users can specify as many or few options as they care about. Spack
will fill in the unspecified values with sensible defaults. The two listed
diff --git a/lib/spack/docs/getting_started.rst b/lib/spack/docs/getting_started.rst
index 7b908465f5..26deb1cef5 100644
--- a/lib/spack/docs/getting_started.rst
+++ b/lib/spack/docs/getting_started.rst
@@ -712,8 +712,9 @@ an OpenMPI installed in /opt/local, one would use:
packages:
openmpi:
- paths:
- openmpi@1.10.1: /opt/local
+ externals:
+ - spec: openmpi@1.10.1
+ prefix: /opt/local
buildable: False
In general, Spack is easier to use and more reliable if it builds all of
@@ -775,8 +776,9 @@ Then add the following to ``~/.spack/packages.yaml``:
packages:
openssl:
- paths:
- openssl@1.0.2g: /usr
+ externals:
+ - spec: openssl@1.0.2g
+ prefix: /usr
buildable: False
@@ -791,8 +793,9 @@ to add the following to ``packages.yaml``:
packages:
netlib-lapack:
- paths:
- netlib-lapack@3.6.1: /usr
+ externals:
+ - spec: netlib-lapack@3.6.1
+ prefix: /usr
buildable: False
all:
providers:
@@ -1181,9 +1184,13 @@ Here's an example of an external configuration for cray modules:
packages:
mpich:
- modules:
- mpich@7.3.1%gcc@5.2.0 arch=cray_xc-haswell-CNL10: cray-mpich
- mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-haswell-CNL10: cray-mpich
+ externals:
+ - spec: "mpich@7.3.1%gcc@5.2.0 arch=cray_xc-haswell-CNL10"
+ modules:
+ - cray-mpich
+ - spec: "mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-haswell-CNL10"
+ modules:
+ - cray-mpich
all:
providers:
mpi: [mpich]
@@ -1195,7 +1202,7 @@ via module load.
.. note::
- For Cray-provided packages, it is best to use ``modules:`` instead of ``paths:``
+ For Cray-provided packages, it is best to use ``modules:`` instead of ``prefix:``
in ``packages.yaml``, because the Cray Programming Environment heavily relies on
modules (e.g., loading the ``cray-mpich`` module adds MPI libraries to the
compiler wrapper link line).
@@ -1211,19 +1218,31 @@ Here is an example of a full packages.yaml used at NERSC
packages:
mpich:
- modules:
- mpich@7.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge: cray-mpich
- mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-SuSE11-ivybridge: cray-mpich
+ externals:
+ - spec: "mpich@7.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge"
+ modules:
+ - cray-mpich
+ - spec: "mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-SuSE11-ivybridge"
+ modules:
+ - cray-mpich
buildable: False
netcdf:
- modules:
- netcdf@4.3.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge: cray-netcdf
- netcdf@4.3.3.1%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge: cray-netcdf
+ externals:
+ - spec: "netcdf@4.3.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge"
+ modules:
+ - cray-netcdf
+ - spec: "netcdf@4.3.3.1%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge"
+ modules:
+ - cray-netcdf
buildable: False
hdf5:
- modules:
- hdf5@1.8.14%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge: cray-hdf5
- hdf5@1.8.14%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge: cray-hdf5
+ externals:
+ - spec: "hdf5@1.8.14%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge"
+ modules:
+ - cray-hdf5
+ - spec: "hdf5@1.8.14%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge"
+ modules:
+ - cray-hdf5
buildable: False
all:
compiler: [gcc@5.2.0, intel@16.0.0.109]
@@ -1247,6 +1266,6 @@ environment variables may be propagated into containers that are not
using the Cray programming environment.
To ensure that Spack does not autodetect the Cray programming
-environment, unset the environment variable ``CRAYPE_VERSION``. This
+environment, unset the environment variable ``MODULEPATH``. This
will cause Spack to treat a linux container on a Cray system as a base
linux distro.
diff --git a/lib/spack/docs/packaging_guide.rst b/lib/spack/docs/packaging_guide.rst
index d3a888b1fc..b1c55de22c 100644
--- a/lib/spack/docs/packaging_guide.rst
+++ b/lib/spack/docs/packaging_guide.rst
@@ -4054,21 +4054,223 @@ File functions
Making a package discoverable with ``spack external find``
----------------------------------------------------------
-To make a package discoverable with
-:ref:`spack external find <cmd-spack-external-find>` you must
-define one or more executables associated with the package and must
-implement a method to generate a Spec when given an executable.
+The simplest way to make a package discoverable with
+:ref:`spack external find <cmd-spack-external-find>` is to:
-The executables are specified as a package level ``executables``
-attribute which is a list of strings (see example below); each string
-is treated as a regular expression (e.g. 'gcc' would match 'gcc', 'gcc-8.3',
-'my-weird-gcc', etc.).
+1. Define the executables associated with the package
+2. Implement a method to determine the versions of these executables
-The method ``determine_spec_details`` has the following signature:
+^^^^^^^^^^^^^^^^^
+Minimal detection
+^^^^^^^^^^^^^^^^^
+
+The first step is fairly simple, as it requires only to
+specify a package level ``executables`` attribute:
+
+.. code-block:: python
+
+ class Foo(Package):
+ # Each string provided here is treated as a regular expression, and
+ # would match for example 'foo', 'foobar', and 'bazfoo'.
+ executables = ['foo']
+
+This attribute must be a list of strings. Each string is a regular
+expression (e.g. 'gcc' would match 'gcc', 'gcc-8.3', 'my-weird-gcc', etc.) to
+determine a set of system executables that might be part or this package. Note
+that to match only executables named 'gcc' the regular expression ``'^gcc$'``
+must be used.
+
+Finally to determine the version of each executable the ``determine_version``
+method must be implemented:
+
+.. code-block:: python
+
+ @classmethod
+ def determine_version(cls, exe):
+ """Return either the version of the executable passed as argument
+ or ``None`` if the version cannot be determined.
+
+ Args:
+ exe (str): absolute path to the executable being examined
+ """
+
+This method receives as input the path to a single executable and must return
+as output its version as a string; if the user cannot determine the version
+or determines that the executable is not an instance of the package, they can
+return None and the exe will be discarded as a candidate.
+Implementing the two steps above is mandatory, and gives the package the
+basic ability to detect if a spec is present on the system at a given version.
+
+.. note::
+ Any executable for which the ``determine_version`` method returns ``None``
+ will be discarded and won't appear in later stages of the workflow described below.
+
+^^^^^^^^^^^^^^^^^^^^^^^^
+Additional functionality
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Besides the two mandatory steps described above, there are also optional
+methods that can be implemented to either increase the amount of details
+being detected or improve the robustness of the detection logic in a package.
+
+""""""""""""""""""""""""""""""
+Variants and custom attributes
+""""""""""""""""""""""""""""""
+
+The ``determine_variants`` method can be optionally implemented in a package
+to detect additional details of the spec:
+
+.. code-block:: python
+
+ @classmethod
+ def determine_variants(cls, exes, version_str):
+ """Return either a variant string, a tuple of a variant string
+ and a dictionary of extra attributes that will be recorded in
+ packages.yaml or a list of those items.
+
+ Args:
+ exes (list of str): list of executables (absolute paths) that
+ live in the same prefix and share the same version
+ version_str (str): version associated with the list of
+ executables, as detected by ``determine_version``
+ """
+
+This method takes as input a list of executables that live in the same prefix and
+share the same version string, and returns either:
+
+1. A variant string
+2. A tuple of a variant string and a dictionary of extra attributes
+3. A list of items matching either 1 or 2 (if multiple specs are detected
+ from the set of executables)
+
+If extra attributes are returned, they will be recorded in ``packages.yaml``
+and be available for later reuse. As an example, the ``gcc`` package will record
+by default the different compilers found and an entry in ``packages.yaml``
+would look like:
+
+.. code-block:: yaml
+
+ packages:
+ gcc:
+ externals:
+ - spec: 'gcc@9.0.1 languages=c,c++,fortran'
+ prefix: /usr
+ extra_attributes:
+ compilers:
+ c: /usr/bin/x86_64-linux-gnu-gcc-9
+ c++: /usr/bin/x86_64-linux-gnu-g++-9
+ fortran: /usr/bin/x86_64-linux-gnu-gfortran-9
+
+This allows us, for instance, to keep track of executables that would be named
+differently if built by Spack (e.g. ``x86_64-linux-gnu-gcc-9``
+instead of just ``gcc``).
+
+.. TODO: we need to gather some more experience on overriding 'prefix'
+ and other special keywords in extra attributes, but as soon as we are
+ confident that this is the way to go we should document the process.
+ See https://github.com/spack/spack/pull/16526#issuecomment-653783204
+
+"""""""""""""""""""""""""""
+Filter matching executables
+"""""""""""""""""""""""""""
+
+Sometimes defining the appropriate regex for the ``executables``
+attribute might prove to be difficult, especially if one has to
+deal with corner cases or exclude "red herrings". To help keeping
+the regular expressions as simple as possible, each package can
+optionally implement a ``filter_executables`` method:
+
+.. code-block:: python
+
+ @classmethod
+ def filter_detected_exes(cls, prefix, exes_in_prefix):
+ """Return a filtered list of the executables in prefix"""
+
+which takes as input a prefix and a list of matching executables and
+returns a filtered list of said executables.
+
+Using this method has the advantage of allowing custom logic for
+filtering, and does not restrict the user to regular expressions
+only. Consider the case of detecting the GNU C++ compiler. If we
+try to search for executables that match ``g++``, that would have
+the unwanted side effect of selecting also ``clang++`` - which is
+a C++ compiler provided by another package - if present on the system.
+Trying to select executables that contain ``g++`` but not ``clang``
+would be quite complicated to do using regex only. Employing the
+``filter_detected_exes`` method it becomes:
+
+.. code-block:: python
+
+ class Gcc(Package):
+ executables = ['g++']
+
+ def filter_detected_exes(cls, prefix, exes_in_prefix):
+ return [x for x in exes_in_prefix if 'clang' not in x]
+
+Another possibility that this method opens is to apply certain
+filtering logic when specific conditions are met (e.g. take some
+decisions on an OS and not on another).
+
+^^^^^^^^^^^^^^^^^^
+Validate detection
+^^^^^^^^^^^^^^^^^^
+
+To increase detection robustness, packagers may also implement a method
+to validate the detected Spec objects:
+
+.. code-block:: python
+
+ @classmethod
+ def validate_detected_spec(cls, spec, extra_attributes):
+ """Validate a detected spec. Raise an exception if validation fails."""
+
+This method receives a detected spec along with its extra attributes and can be
+used to check that certain conditions are met by the spec. Packagers can either
+use assertions or raise an ``InvalidSpecDetected`` exception when the check fails.
+In case the conditions are not honored the spec will be discarded and any message
+associated with the assertion or the exception will be logged as the reason for
+discarding it.
+
+As an example, a package that wants to check that the ``compilers`` attribute is
+in the extra attributes can implement this method like this:
+
+.. code-block:: python
+
+ @classmethod
+ def validate_detected_spec(cls, spec, extra_attributes):
+ """Check that 'compilers' is in the extra attributes."""
+ msg = ('the extra attribute "compilers" must be set for '
+ 'the detected spec "{0}"'.format(spec))
+ assert 'compilers' in extra_attributes, msg
+
+or like this:
+
+.. code-block:: python
+
+ @classmethod
+ def validate_detected_spec(cls, spec, extra_attributes):
+ """Check that 'compilers' is in the extra attributes."""
+ if 'compilers' not in extra_attributes:
+ msg = ('the extra attribute "compilers" must be set for '
+ 'the detected spec "{0}"'.format(spec))
+ raise InvalidSpecDetected(msg)
+
+.. _determine_spec_details:
+
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Custom detection workflow
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In the rare case when the mechanisms described so far don't fit the
+detection of a package, the implementation of all the methods above
+can be disregarded and instead a custom ``determine_spec_details``
+method can be implemented directly in the package class (note that
+the definition of the ``executables`` attribute is still required):
.. code-block:: python
- def determine_spec_details(prefix, exes_in_prefix):
+ @classmethod
+ def determine_spec_details(cls, prefix, exes_in_prefix):
# exes_in_prefix = a set of paths, each path is an executable
# prefix = a prefix that is common to each path in exes_in_prefix
@@ -4076,14 +4278,13 @@ The method ``determine_spec_details`` has the following signature:
# the package. Return one or more Specs for each instance of the
# package which is thought to be installed in the provided prefix
-``determine_spec_details`` takes as parameters a set of discovered
-executables (which match those specified by the user) as well as a
-common prefix shared by all of those executables. The function must
-return one or more Specs associated with the executables (it can also
-return ``None`` to indicate that no provided executables are associated
-with the package).
+This method takes as input a set of discovered executables (which match
+those specified by the user) as well as a common prefix shared by all
+of those executables. The function must return one or more :py:class:`spack.spec.Spec` associated
+with the executables (it can also return ``None`` to indicate that no
+provided executables are associated with the package).
-Say for example we have a package called ``foo-package`` which
+As an example, consider a made-up package called ``foo-package`` which
builds an executable called ``foo``. ``FooPackage`` would appear as
follows:
@@ -4107,10 +4308,12 @@ follows:
return
# This implementation is lazy and only checks the first candidate
exe_path = candidates[0]
- exe = spack.util.executable.Executable(exe_path)
- output = exe('--version')
+ exe = Executable(exe_path)
+ output = exe('--version', output=str, error=str)
version_str = ... # parse output for version string
- return Spec('foo-package@{0}'.format(version_str))
+ return Spec.from_detection(
+ 'foo-package@{0}'.format(version_str)
+ )
.. _package-lifecycle:
diff --git a/lib/spack/docs/pipelines.rst b/lib/spack/docs/pipelines.rst
index 34ff9cfc6d..342024d94c 100644
--- a/lib/spack/docs/pipelines.rst
+++ b/lib/spack/docs/pipelines.rst
@@ -82,9 +82,9 @@ or Amazon Elastic Kubernetes Service (`EKS <https://aws.amazon.com/eks>`_), thou
topics are outside the scope of this document.
Spack's pipelines are now making use of the
-`trigger <https://docs.gitlab.com/12.9/ee/ci/yaml/README.html#trigger>` syntax to run
+`trigger <https://docs.gitlab.com/12.9/ee/ci/yaml/README.html#trigger>`_ syntax to run
dynamically generated
-`child pipelines <https://docs.gitlab.com/12.9/ee/ci/parent_child_pipelines.html>`.
+`child pipelines <https://docs.gitlab.com/12.9/ee/ci/parent_child_pipelines.html>`_.
Note that the use of dynamic child pipelines requires running Gitlab version
``>= 12.9``.
diff --git a/lib/spack/docs/workflows.rst b/lib/spack/docs/workflows.rst
index 17ca695082..7deb5eff54 100644
--- a/lib/spack/docs/workflows.rst
+++ b/lib/spack/docs/workflows.rst
@@ -1405,11 +1405,12 @@ The main points that are implemented below:
- export CXXFLAGS="-std=c++11"
install:
- - if ! which spack >/dev/null; then
+ - |
+ if ! which spack >/dev/null; then
mkdir -p $SPACK_ROOT &&
git clone --depth 50 https://github.com/spack/spack.git $SPACK_ROOT &&
- echo -e "config:""\n build_jobs:"" 2" > $SPACK_ROOT/etc/spack/config.yaml **
- echo -e "packages:""\n all:""\n target:"" ['x86_64']"
+ printf "config:\n build_jobs: 2\n" > $SPACK_ROOT/etc/spack/config.yaml &&
+ printf "packages:\n all:\n target: ['x86_64']\n" \
> $SPACK_ROOT/etc/spack/packages.yaml;
fi
- travis_wait spack install cmake@3.7.2~openssl~ncurses
@@ -1544,8 +1545,9 @@ Avoid double-installing CUDA by adding, e.g.
packages:
cuda:
- paths:
- cuda@9.0.176%gcc@5.4.0 arch=linux-ubuntu16-x86_64: /usr/local/cuda
+ externals:
+ - spec: "cuda@9.0.176%gcc@5.4.0 arch=linux-ubuntu16-x86_64"
+ prefix: /usr/local/cuda
buildable: False
to your ``packages.yaml``.
diff --git a/lib/spack/llnl/util/filesystem.py b/lib/spack/llnl/util/filesystem.py
index bee6e1e43c..5e08273677 100644
--- a/lib/spack/llnl/util/filesystem.py
+++ b/lib/spack/llnl/util/filesystem.py
@@ -2,7 +2,6 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-
import collections
import errno
import hashlib
@@ -377,17 +376,17 @@ def install(src, dest):
copy(src, dest, _permissions=True)
-def resolve_link_target_relative_to_the_link(l):
+def resolve_link_target_relative_to_the_link(link):
"""
os.path.isdir uses os.path.exists, which for links will check
the existence of the link target. If the link target is relative to
the link, we need to construct a pathname that is valid from
our cwd (which may not be the same as the link's directory)
"""
- target = os.readlink(l)
+ target = os.readlink(link)
if os.path.isabs(target):
return target
- link_dir = os.path.dirname(os.path.abspath(l))
+ link_dir = os.path.dirname(os.path.abspath(link))
return os.path.join(link_dir, target)
@@ -1571,6 +1570,19 @@ def can_access_dir(path):
@memoized
+def can_write_to_dir(path):
+ """Return True if the argument is a directory in which we can write.
+
+ Args:
+ path: path to be tested
+
+ Returns:
+ True if ``path`` is an writeable directory, else False
+ """
+ return os.path.isdir(path) and os.access(path, os.R_OK | os.X_OK | os.W_OK)
+
+
+@memoized
def files_in(*search_paths):
"""Returns all the files in paths passed as arguments.
@@ -1683,3 +1695,18 @@ def prefixes(path):
pass
return paths
+
+
+def md5sum(file):
+ """Compute the MD5 sum of a file.
+
+ Args:
+ file (str): file to be checksummed
+
+ Returns:
+ MD5 sum of the file's content
+ """
+ md5 = hashlib.md5()
+ with open(file, "rb") as f:
+ md5.update(f.read())
+ return md5.digest()
diff --git a/lib/spack/llnl/util/lang.py b/lib/spack/llnl/util/lang.py
index aacef6d3db..e746ce096c 100644
--- a/lib/spack/llnl/util/lang.py
+++ b/lib/spack/llnl/util/lang.py
@@ -21,47 +21,48 @@ ignore_modules = [r'^\.#', '~$']
def index_by(objects, *funcs):
"""Create a hierarchy of dictionaries by splitting the supplied
- set of objects on unique values of the supplied functions.
- Values are used as keys. For example, suppose you have four
- objects with attributes that look like this::
+ set of objects on unique values of the supplied functions.
- a = Spec(name="boost", compiler="gcc", arch="bgqos_0")
- b = Spec(name="mrnet", compiler="intel", arch="chaos_5_x86_64_ib")
- c = Spec(name="libelf", compiler="xlc", arch="bgqos_0")
- d = Spec(name="libdwarf", compiler="intel", arch="chaos_5_x86_64_ib")
+ Values are used as keys. For example, suppose you have four
+ objects with attributes that look like this::
- list_of_specs = [a,b,c,d]
- index1 = index_by(list_of_specs, lambda s: s.arch,
- lambda s: s.compiler)
- index2 = index_by(list_of_specs, lambda s: s.compiler)
+ a = Spec("boost %gcc target=skylake")
+ b = Spec("mrnet %intel target=zen2")
+ c = Spec("libelf %xlc target=skylake")
+ d = Spec("libdwarf %intel target=zen2")
- ``index1`` now has two levels of dicts, with lists at the
- leaves, like this::
+ list_of_specs = [a,b,c,d]
+ index1 = index_by(list_of_specs, lambda s: str(s.target),
+ lambda s: s.compiler)
+ index2 = index_by(list_of_specs, lambda s: s.compiler)
- { 'bgqos_0' : { 'gcc' : [a], 'xlc' : [c] },
- 'chaos_5_x86_64_ib' : { 'intel' : [b, d] }
- }
+ ``index1`` now has two levels of dicts, with lists at the
+ leaves, like this::
- And ``index2`` is a single level dictionary of lists that looks
- like this::
+ { 'zen2' : { 'gcc' : [a], 'xlc' : [c] },
+ 'skylake' : { 'intel' : [b, d] }
+ }
- { 'gcc' : [a],
- 'intel' : [b,d],
- 'xlc' : [c]
- }
+ And ``index2`` is a single level dictionary of lists that looks
+ like this::
- If any elemnts in funcs is a string, it is treated as the name
- of an attribute, and acts like getattr(object, name). So
- shorthand for the above two indexes would be::
+ { 'gcc' : [a],
+ 'intel' : [b,d],
+ 'xlc' : [c]
+ }
- index1 = index_by(list_of_specs, 'arch', 'compiler')
- index2 = index_by(list_of_specs, 'compiler')
+ If any elements in funcs is a string, it is treated as the name
+ of an attribute, and acts like getattr(object, name). So
+ shorthand for the above two indexes would be::
- You can also index by tuples by passing tuples::
+ index1 = index_by(list_of_specs, 'arch', 'compiler')
+ index2 = index_by(list_of_specs, 'compiler')
- index1 = index_by(list_of_specs, ('arch', 'compiler'))
+ You can also index by tuples by passing tuples::
- Keys in the resulting dict will look like ('gcc', 'bgqos_0').
+ index1 = index_by(list_of_specs, ('target', 'compiler'))
+
+ Keys in the resulting dict will look like ('gcc', 'skylake').
"""
if not funcs:
return objects
diff --git a/lib/spack/llnl/util/tty/pty.py b/lib/spack/llnl/util/tty/pty.py
index ef5d40ea57..84c272a6e2 100644
--- a/lib/spack/llnl/util/tty/pty.py
+++ b/lib/spack/llnl/util/tty/pty.py
@@ -31,17 +31,17 @@ from spack.util.executable import which
class ProcessController(object):
"""Wrapper around some fundamental process control operations.
- This allows one process to drive another similar to the way a shell
- would, by sending signals and I/O.
+ This allows one process (the controller) to drive another (the
+ minion) similar to the way a shell would, by sending signals and I/O.
"""
- def __init__(self, pid, master_fd,
+ def __init__(self, pid, controller_fd,
timeout=1, sleep_time=1e-1, debug=False):
"""Create a controller to manipulate the process with id ``pid``
Args:
pid (int): id of process to control
- master_fd (int): master file descriptor attached to pid's stdin
+ controller_fd (int): controller fd attached to pid's stdin
timeout (int): time in seconds for wait operations to time out
(default 1 second)
sleep_time (int): time to sleep after signals, to control the
@@ -58,7 +58,7 @@ class ProcessController(object):
"""
self.pid = pid
self.pgid = os.getpgid(pid)
- self.master_fd = master_fd
+ self.controller_fd = controller_fd
self.timeout = timeout
self.sleep_time = sleep_time
self.debug = debug
@@ -67,8 +67,8 @@ class ProcessController(object):
self.ps = which("ps", required=True)
def get_canon_echo_attrs(self):
- """Get echo and canon attributes of the terminal of master_fd."""
- cfg = termios.tcgetattr(self.master_fd)
+ """Get echo and canon attributes of the terminal of controller_fd."""
+ cfg = termios.tcgetattr(self.controller_fd)
return (
bool(cfg[3] & termios.ICANON),
bool(cfg[3] & termios.ECHO),
@@ -82,7 +82,7 @@ class ProcessController(object):
)
def status(self):
- """Print debug message with status info for the child."""
+ """Print debug message with status info for the minion."""
if self.debug:
canon, echo = self.get_canon_echo_attrs()
sys.stderr.write("canon: %s, echo: %s\n" % (
@@ -94,12 +94,12 @@ class ProcessController(object):
sys.stderr.write("\n")
def input_on(self):
- """True if keyboard input is enabled on the master_fd pty."""
+ """True if keyboard input is enabled on the controller_fd pty."""
return self.get_canon_echo_attrs() == (False, False)
def background(self):
- """True if pgid is in a background pgroup of master_fd's terminal."""
- return self.pgid != os.tcgetpgrp(self.master_fd)
+ """True if pgid is in a background pgroup of controller_fd's tty."""
+ return self.pgid != os.tcgetpgrp(self.controller_fd)
def tstp(self):
"""Send SIGTSTP to the controlled process."""
@@ -115,18 +115,18 @@ class ProcessController(object):
def fg(self):
self.horizontal_line("fg")
with log.ignore_signal(signal.SIGTTOU):
- os.tcsetpgrp(self.master_fd, os.getpgid(self.pid))
+ os.tcsetpgrp(self.controller_fd, os.getpgid(self.pid))
time.sleep(self.sleep_time)
def bg(self):
self.horizontal_line("bg")
with log.ignore_signal(signal.SIGTTOU):
- os.tcsetpgrp(self.master_fd, os.getpgrp())
+ os.tcsetpgrp(self.controller_fd, os.getpgrp())
time.sleep(self.sleep_time)
def write(self, byte_string):
self.horizontal_line("write '%s'" % byte_string.decode("utf-8"))
- os.write(self.master_fd, byte_string)
+ os.write(self.controller_fd, byte_string)
def wait(self, condition):
start = time.time()
@@ -156,50 +156,51 @@ class ProcessController(object):
class PseudoShell(object):
- """Sets up master and child processes with a PTY.
+ """Sets up controller and minion processes with a PTY.
You can create a ``PseudoShell`` if you want to test how some
function responds to terminal input. This is a pseudo-shell from a
- job control perspective; ``master_function`` and ``child_function``
- are set up with a pseudoterminal (pty) so that the master can drive
- the child through process control signals and I/O.
+ job control perspective; ``controller_function`` and ``minion_function``
+ are set up with a pseudoterminal (pty) so that the controller can drive
+ the minion through process control signals and I/O.
The two functions should have signatures like this::
- def master_function(proc, ctl, **kwargs)
- def child_function(**kwargs)
+ def controller_function(proc, ctl, **kwargs)
+ def minion_function(**kwargs)
- ``master_function`` is spawned in its own process and passed three
+ ``controller_function`` is spawned in its own process and passed three
arguments:
proc
- the ``multiprocessing.Process`` object representing the child
+ the ``multiprocessing.Process`` object representing the minion
ctl
- a ``ProcessController`` object tied to the child
+ a ``ProcessController`` object tied to the minion
kwargs
keyword arguments passed from ``PseudoShell.start()``.
- ``child_function`` is only passed ``kwargs`` delegated from
+ ``minion_function`` is only passed ``kwargs`` delegated from
``PseudoShell.start()``.
- The ``ctl.master_fd`` will have its ``master_fd`` connected to
- ``sys.stdin`` in the child process. Both processes will share the
+ The ``ctl.controller_fd`` will have its ``controller_fd`` connected to
+ ``sys.stdin`` in the minion process. Both processes will share the
same ``sys.stdout`` and ``sys.stderr`` as the process instantiating
``PseudoShell``.
Here are the relationships between processes created::
._________________________________________________________.
- | Child Process | pid 2
- | - runs child_function | pgroup 2
+ | Minion Process | pid 2
+ | - runs minion_function | pgroup 2
|_________________________________________________________| session 1
^
- | create process with master_fd connected to stdin
+ | create process with controller_fd connected to stdin
| stdout, stderr are the same as caller
._________________________________________________________.
- | Master Process | pid 1
- | - runs master_function | pgroup 1
- | - uses ProcessController and master_fd to control child | session 1
+ | Controller Process | pid 1
+ | - runs controller_function | pgroup 1
+ | - uses ProcessController and controller_fd to | session 1
+ | control minion |
|_________________________________________________________|
^
| create process
@@ -207,51 +208,51 @@ class PseudoShell(object):
._________________________________________________________.
| Caller | pid 0
| - Constructs, starts, joins PseudoShell | pgroup 0
- | - provides master_function, child_function | session 0
+ | - provides controller_function, minion_function | session 0
|_________________________________________________________|
"""
- def __init__(self, master_function, child_function):
+ def __init__(self, controller_function, minion_function):
self.proc = None
- self.master_function = master_function
- self.child_function = child_function
+ self.controller_function = controller_function
+ self.minion_function = minion_function
# these can be optionally set to change defaults
self.controller_timeout = 1
self.sleep_time = 0
def start(self, **kwargs):
- """Start the master and child processes.
+ """Start the controller and minion processes.
Arguments:
kwargs (dict): arbitrary keyword arguments that will be
- passed to master and child functions
+ passed to controller and minion functions
- The master process will create the child, then call
- ``master_function``. The child process will call
- ``child_function``.
+ The controller process will create the minion, then call
+ ``controller_function``. The minion process will call
+ ``minion_function``.
"""
self.proc = multiprocessing.Process(
- target=PseudoShell._set_up_and_run_master_function,
- args=(self.master_function, self.child_function,
+ target=PseudoShell._set_up_and_run_controller_function,
+ args=(self.controller_function, self.minion_function,
self.controller_timeout, self.sleep_time),
kwargs=kwargs,
)
self.proc.start()
def join(self):
- """Wait for the child process to finish, and return its exit code."""
+ """Wait for the minion process to finish, and return its exit code."""
self.proc.join()
return self.proc.exitcode
@staticmethod
- def _set_up_and_run_child_function(
- tty_name, stdout_fd, stderr_fd, ready, child_function, **kwargs):
- """Child process wrapper for PseudoShell.
+ def _set_up_and_run_minion_function(
+ tty_name, stdout_fd, stderr_fd, ready, minion_function, **kwargs):
+ """Minion process wrapper for PseudoShell.
Handles the mechanics of setting up a PTY, then calls
- ``child_function``.
+ ``minion_function``.
"""
# new process group, like a command or pipeline launched by a shell
@@ -266,45 +267,45 @@ class PseudoShell(object):
if kwargs.get("debug"):
sys.stderr.write(
- "child: stdin.isatty(): %s\n" % sys.stdin.isatty())
+ "minion: stdin.isatty(): %s\n" % sys.stdin.isatty())
# tell the parent that we're really running
if kwargs.get("debug"):
- sys.stderr.write("child: ready!\n")
+ sys.stderr.write("minion: ready!\n")
ready.value = True
try:
- child_function(**kwargs)
+ minion_function(**kwargs)
except BaseException:
traceback.print_exc()
@staticmethod
- def _set_up_and_run_master_function(
- master_function, child_function, controller_timeout, sleep_time,
- **kwargs):
- """Set up a pty, spawn a child process, and execute master_function.
+ def _set_up_and_run_controller_function(
+ controller_function, minion_function, controller_timeout,
+ sleep_time, **kwargs):
+ """Set up a pty, spawn a minion process, execute controller_function.
Handles the mechanics of setting up a PTY, then calls
- ``master_function``.
+ ``controller_function``.
"""
os.setsid() # new session; this process is the controller
- master_fd, child_fd = os.openpty()
- pty_name = os.ttyname(child_fd)
+ controller_fd, minion_fd = os.openpty()
+ pty_name = os.ttyname(minion_fd)
# take controlling terminal
pty_fd = os.open(pty_name, os.O_RDWR)
os.close(pty_fd)
ready = multiprocessing.Value('i', False)
- child_process = multiprocessing.Process(
- target=PseudoShell._set_up_and_run_child_function,
+ minion_process = multiprocessing.Process(
+ target=PseudoShell._set_up_and_run_minion_function,
args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(),
- ready, child_function),
+ ready, minion_function),
kwargs=kwargs,
)
- child_process.start()
+ minion_process.start()
# wait for subprocess to be running and connected.
while not ready.value:
@@ -315,30 +316,31 @@ class PseudoShell(object):
sys.stderr.write("pid: %d\n" % os.getpid())
sys.stderr.write("pgid: %d\n" % os.getpgrp())
sys.stderr.write("sid: %d\n" % os.getsid(0))
- sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(master_fd))
+ sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(controller_fd))
sys.stderr.write("\n")
- child_pgid = os.getpgid(child_process.pid)
- sys.stderr.write("child pid: %d\n" % child_process.pid)
- sys.stderr.write("child pgid: %d\n" % child_pgid)
- sys.stderr.write("child sid: %d\n" % os.getsid(child_process.pid))
+ minion_pgid = os.getpgid(minion_process.pid)
+ sys.stderr.write("minion pid: %d\n" % minion_process.pid)
+ sys.stderr.write("minion pgid: %d\n" % minion_pgid)
+ sys.stderr.write(
+ "minion sid: %d\n" % os.getsid(minion_process.pid))
sys.stderr.write("\n")
sys.stderr.flush()
- # set up master to ignore SIGTSTP, like a shell
+ # set up controller to ignore SIGTSTP, like a shell
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
- # call the master function once the child is ready
+ # call the controller function once the minion is ready
try:
controller = ProcessController(
- child_process.pid, master_fd, debug=kwargs.get("debug"))
+ minion_process.pid, controller_fd, debug=kwargs.get("debug"))
controller.timeout = controller_timeout
controller.sleep_time = sleep_time
- error = master_function(child_process, controller, **kwargs)
+ error = controller_function(minion_process, controller, **kwargs)
except BaseException:
error = 1
traceback.print_exc()
- child_process.join()
+ minion_process.join()
- # return whether either the parent or child failed
- return error or child_process.exitcode
+ # return whether either the parent or minion failed
+ return error or minion_process.exitcode
diff --git a/lib/spack/spack/abi.py b/lib/spack/spack/abi.py
index 9e1ef14551..a29a9eef3b 100644
--- a/lib/spack/spack/abi.py
+++ b/lib/spack/spack/abi.py
@@ -18,10 +18,13 @@ class ABI(object):
"""This class provides methods to test ABI compatibility between specs.
The current implementation is rather rough and could be improved."""
- def architecture_compatible(self, parent, child):
- """Return true if parent and child have ABI compatible targets."""
- return not parent.architecture or not child.architecture or \
- parent.architecture == child.architecture
+ def architecture_compatible(self, target, constraint):
+ """Return true if architecture of target spec is ABI compatible
+ to the architecture of constraint spec. If either the target
+ or constraint specs have no architecture, target is also defined
+ as architecture ABI compatible to constraint."""
+ return not target.architecture or not constraint.architecture or \
+ target.architecture.satisfies(constraint.architecture)
@memoized
def _gcc_get_libstdcxx_version(self, version):
@@ -107,8 +110,8 @@ class ABI(object):
return True
return False
- def compatible(self, parent, child, **kwargs):
- """Returns true iff a parent and child spec are ABI compatible"""
+ def compatible(self, target, constraint, **kwargs):
+ """Returns true if target spec is ABI compatible to constraint spec"""
loosematch = kwargs.get('loose', False)
- return self.architecture_compatible(parent, child) and \
- self.compiler_compatible(parent, child, loose=loosematch)
+ return self.architecture_compatible(target, constraint) and \
+ self.compiler_compatible(target, constraint, loose=loosematch)
diff --git a/lib/spack/spack/architecture.py b/lib/spack/spack/architecture.py
index a7c8062fad..975cb9e56d 100644
--- a/lib/spack/spack/architecture.py
+++ b/lib/spack/spack/architecture.py
@@ -6,7 +6,7 @@
"""
This module contains all the elements that are required to create an
architecture object. These include, the target processor, the operating system,
-and the architecture platform (i.e. cray, darwin, linux, bgq, etc) classes.
+and the architecture platform (i.e. cray, darwin, linux, etc) classes.
On a multiple architecture machine, the architecture spec field can be set to
build a package against any target and operating system that is present on the
diff --git a/lib/spack/spack/build_environment.py b/lib/spack/spack/build_environment.py
index 7ef2126766..4a57dde77b 100644
--- a/lib/spack/spack/build_environment.py
+++ b/lib/spack/spack/build_environment.py
@@ -62,7 +62,7 @@ from spack.util.environment import (
from spack.util.environment import system_dirs
from spack.error import NoLibrariesError, NoHeadersError
from spack.util.executable import Executable
-from spack.util.module_cmd import load_module, get_path_from_module, module
+from spack.util.module_cmd import load_module, path_from_modules, module
from spack.util.log_parse import parse_log_events, make_log_context
@@ -642,7 +642,7 @@ def get_rpaths(pkg):
# Second module is our compiler mod name. We use that to get rpaths from
# module show output.
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
- rpaths.append(get_path_from_module(pkg.compiler.modules[1]))
+ rpaths.append(path_from_modules([pkg.compiler.modules[1]]))
return list(dedupe(filter_system_paths(rpaths)))
@@ -706,8 +706,9 @@ def load_external_modules(pkg):
pkg (PackageBase): package to load deps for
"""
for dep in list(pkg.spec.traverse()):
- if dep.external_module:
- load_module(dep.external_module)
+ external_modules = dep.external_modules or []
+ for external_module in external_modules:
+ load_module(external_module)
def setup_package(pkg, dirty):
diff --git a/lib/spack/spack/build_systems/cuda.py b/lib/spack/spack/build_systems/cuda.py
index cbee710049..ef1b0266f8 100644
--- a/lib/spack/spack/build_systems/cuda.py
+++ b/lib/spack/spack/build_systems/cuda.py
@@ -12,8 +12,9 @@ import spack.variant
class CudaPackage(PackageBase):
"""Auxiliary class which contains CUDA variant, dependencies and conflicts
and is meant to unify and facilitate its usage.
+
+ Maintainers: ax3l, svenevs
"""
- maintainers = ['ax3l', 'svenevs']
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list
# https://developer.nvidia.com/cuda-gpus
@@ -25,6 +26,7 @@ class CudaPackage(PackageBase):
'50', '52', '53',
'60', '61', '62',
'70', '72', '75',
+ '80',
]
# FIXME: keep cuda and cuda_arch separate to make usage easier until
@@ -48,6 +50,7 @@ class CudaPackage(PackageBase):
# CUDA version vs Architecture
# https://en.wikipedia.org/wiki/CUDA#GPUs_supported
+ # https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#deprecated-features
depends_on('cuda@:6.0', when='cuda_arch=10')
depends_on('cuda@:6.5', when='cuda_arch=11')
depends_on('cuda@2.1:6.5', when='cuda_arch=12')
@@ -58,8 +61,8 @@ class CudaPackage(PackageBase):
depends_on('cuda@5.0:10.2', when='cuda_arch=30')
depends_on('cuda@5.0:10.2', when='cuda_arch=32')
- depends_on('cuda@5.0:10.2', when='cuda_arch=35')
- depends_on('cuda@6.5:10.2', when='cuda_arch=37')
+ depends_on('cuda@5.0:', when='cuda_arch=35')
+ depends_on('cuda@6.5:', when='cuda_arch=37')
depends_on('cuda@6.0:', when='cuda_arch=50')
depends_on('cuda@6.5:', when='cuda_arch=52')
@@ -73,6 +76,8 @@ class CudaPackage(PackageBase):
depends_on('cuda@9.0:', when='cuda_arch=72')
depends_on('cuda@10.0:', when='cuda_arch=75')
+ depends_on('cuda@11.0:', when='cuda_arch=80')
+
# There are at least three cases to be aware of for compiler conflicts
# 1. Linux x86_64
# 2. Linux ppc64le
@@ -88,12 +93,15 @@ class CudaPackage(PackageBase):
conflicts('%gcc@7:', when='+cuda ^cuda@:9.1' + arch_platform)
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130' + arch_platform)
conflicts('%gcc@9:', when='+cuda ^cuda@:10.2.89' + arch_platform)
+ conflicts('%gcc@:4,10:', when='+cuda ^cuda@:11.0.2' + arch_platform)
conflicts('%pgi@:14.8', when='+cuda ^cuda@:7.0.27' + arch_platform)
conflicts('%pgi@:15.3,15.5:', when='+cuda ^cuda@7.5' + arch_platform)
conflicts('%pgi@:16.2,16.0:16.3', when='+cuda ^cuda@8' + arch_platform)
conflicts('%pgi@:15,18:', when='+cuda ^cuda@9.0:9.1' + arch_platform)
- conflicts('%pgi@:16', when='+cuda ^cuda@9.2.88:10' + arch_platform)
- conflicts('%pgi@:17', when='+cuda ^cuda@10.2.89' + arch_platform)
+ conflicts('%pgi@:16,19:', when='+cuda ^cuda@9.2.88:10' + arch_platform)
+ conflicts('%pgi@:17,20:',
+ when='+cuda ^cuda@10.1.105:10.2.89' + arch_platform)
+ conflicts('%pgi@:17,20.2:', when='+cuda ^cuda@11.0.2' + arch_platform)
conflicts('%clang@:3.4', when='+cuda ^cuda@:7.5' + arch_platform)
conflicts('%clang@:3.7,4:',
when='+cuda ^cuda@8.0:9.0' + arch_platform)
@@ -104,7 +112,8 @@ class CudaPackage(PackageBase):
conflicts('%clang@:3.7,7.1:', when='+cuda ^cuda@10.1.105' + arch_platform)
conflicts('%clang@:3.7,8.1:',
when='+cuda ^cuda@10.1.105:10.1.243' + arch_platform)
- conflicts('%clang@:3.2,9.0:', when='+cuda ^cuda@10.2.89' + arch_platform)
+ conflicts('%clang@:3.2,9:', when='+cuda ^cuda@10.2.89' + arch_platform)
+ conflicts('%clang@:5,10:', when='+cuda ^cuda@11.0.2' + arch_platform)
# x86_64 vs. ppc64le differ according to NVidia docs
# Linux ppc64le compiler conflicts from Table from the docs below:
@@ -119,6 +128,8 @@ class CudaPackage(PackageBase):
conflicts('%gcc@6:', when='+cuda ^cuda@:9' + arch_platform)
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130' + arch_platform)
conflicts('%gcc@9:', when='+cuda ^cuda@:10.1.243' + arch_platform)
+ # officially, CUDA 11.0.2 only supports the system GCC 8.3 on ppc64le
+ conflicts('%gcc@:4,10:', when='+cuda ^cuda@:11.0.2' + arch_platform)
conflicts('%pgi', when='+cuda ^cuda@:8' + arch_platform)
conflicts('%pgi@:16', when='+cuda ^cuda@:9.1.185' + arch_platform)
conflicts('%pgi@:17', when='+cuda ^cuda@:10' + arch_platform)
@@ -128,6 +139,7 @@ class CudaPackage(PackageBase):
conflicts('%clang@7:', when='+cuda ^cuda@10.0.130' + arch_platform)
conflicts('%clang@7.1:', when='+cuda ^cuda@:10.1.105' + arch_platform)
conflicts('%clang@8.1:', when='+cuda ^cuda@:10.2.89' + arch_platform)
+ conflicts('%clang@:5,10.0:', when='+cuda ^cuda@11.0.2' + arch_platform)
# Intel is mostly relevant for x86_64 Linux, even though it also
# exists for Mac OS X. No information prior to CUDA 3.2 or Intel 11.1
@@ -141,11 +153,13 @@ class CudaPackage(PackageBase):
conflicts('%intel@17.0:', when='+cuda ^cuda@:8.0.60')
conflicts('%intel@18.0:', when='+cuda ^cuda@:9.9')
conflicts('%intel@19.0:', when='+cuda ^cuda@:10.0')
+ conflicts('%intel@19.1:', when='+cuda ^cuda@:10.1')
+ conflicts('%intel@19.2:', when='+cuda ^cuda@:11.0.2')
# XL is mostly relevant for ppc64le Linux
conflicts('%xl@:12,14:', when='+cuda ^cuda@:9.1')
conflicts('%xl@:12,14:15,17:', when='+cuda ^cuda@9.2')
- conflicts('%xl@17:', when='+cuda ^cuda@:10.2.89')
+ conflicts('%xl@:12,17:', when='+cuda ^cuda@:11.0.2')
# Mac OS X
# platform = ' platform=darwin'
@@ -156,7 +170,7 @@ class CudaPackage(PackageBase):
# `clang-apple@x.y.z as a possible fix.
# Compiler conflicts will be eventual taken from here:
# https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#abstract
- conflicts('platform=darwin', when='+cuda ^cuda@11.0:')
+ conflicts('platform=darwin', when='+cuda ^cuda@11.0.2:')
# Make sure cuda_arch can not be used without +cuda
for value in cuda_arch_values:
diff --git a/lib/spack/spack/cmd/checksum.py b/lib/spack/spack/cmd/checksum.py
index 97e7833af0..188deb1149 100644
--- a/lib/spack/spack/cmd/checksum.py
+++ b/lib/spack/spack/cmd/checksum.py
@@ -65,7 +65,7 @@ def checksum(parser, args):
version_lines = spack.stage.get_checksums_for_versions(
url_dict, pkg.name, keep_stage=args.keep_stage,
- batch=(args.batch or len(args.versions) > 0),
+ batch=(args.batch or len(args.versions) > 0 or len(url_dict) == 1),
fetch_options=pkg.fetch_options)
print()
diff --git a/lib/spack/spack/cmd/config.py b/lib/spack/spack/cmd/config.py
index 954d4e4585..e684364d8a 100644
--- a/lib/spack/spack/cmd/config.py
+++ b/lib/spack/spack/cmd/config.py
@@ -2,16 +2,20 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-
from __future__ import print_function
+
+import collections
import os
import re
+import shutil
+import llnl.util.filesystem as fs
import llnl.util.tty as tty
-
import spack.config
+import spack.cmd.common.arguments
import spack.schema.env
import spack.environment as ev
+import spack.schema.packages
import spack.util.spack_yaml as syaml
from spack.util.editor import editor
@@ -80,6 +84,19 @@ def setup_parser(subparser):
# Make the add parser available later
setup_parser.add_parser = add_parser
+ update = sp.add_parser(
+ 'update', help='update configuration files to the latest format'
+ )
+ spack.cmd.common.arguments.add_common_arguments(update, ['yes_to_all'])
+ update.add_argument('section', help='section to update')
+
+ revert = sp.add_parser(
+ 'revert',
+ help='revert configuration files to their state before update'
+ )
+ spack.cmd.common.arguments.add_common_arguments(revert, ['yes_to_all'])
+ revert.add_argument('section', help='section to update')
+
def _get_scope_and_section(args):
"""Extract config scope and section from arguments."""
@@ -275,12 +292,164 @@ def config_remove(args):
set_config(args, path, existing, scope)
+def _can_update_config_file(scope_dir, cfg_file):
+ dir_ok = fs.can_write_to_dir(scope_dir)
+ cfg_ok = fs.can_access(cfg_file)
+ return dir_ok and cfg_ok
+
+
+def config_update(args):
+ # Read the configuration files
+ spack.config.config.get_config(args.section, scope=args.scope)
+ updates = spack.config.config.format_updates[args.section]
+
+ cannot_overwrite, skip_system_scope = [], False
+ for scope in updates:
+ cfg_file = spack.config.config.get_config_filename(
+ scope.name, args.section
+ )
+ scope_dir = scope.path
+ can_be_updated = _can_update_config_file(scope_dir, cfg_file)
+ if not can_be_updated:
+ if scope.name == 'system':
+ skip_system_scope = True
+ msg = ('Not enough permissions to write to "system" scope. '
+ 'Skipping update at that location [cfg={0}]')
+ tty.warn(msg.format(cfg_file))
+ continue
+ cannot_overwrite.append((scope, cfg_file))
+
+ if cannot_overwrite:
+ msg = 'Detected permission issues with the following scopes:\n\n'
+ for scope, cfg_file in cannot_overwrite:
+ msg += '\t[scope={0}, cfg={1}]\n'.format(scope.name, cfg_file)
+ msg += ('\nEither ensure that you have sufficient permissions to '
+ 'modify these files or do not include these scopes in the '
+ 'update.')
+ tty.die(msg)
+
+ if skip_system_scope:
+ updates = [x for x in updates if x.name != 'system']
+
+ # Report if there are no updates to be done
+ if not updates:
+ msg = 'No updates needed for "{0}" section.'
+ tty.msg(msg.format(args.section))
+ return
+
+ proceed = True
+ if not args.yes_to_all:
+ msg = ('The following configuration files are going to be updated to'
+ ' the latest schema format:\n\n')
+ for scope in updates:
+ cfg_file = spack.config.config.get_config_filename(
+ scope.name, args.section
+ )
+ msg += '\t[scope={0}, file={1}]\n'.format(scope.name, cfg_file)
+ msg += ('\nIf the configuration files are updated, versions of Spack '
+ 'that are older than this version may not be able to read '
+ 'them. Spack stores backups of the updated files which can '
+ 'be retrieved with "spack config revert"')
+ tty.msg(msg)
+ proceed = tty.get_yes_or_no('Do you want to proceed?', default=False)
+
+ if not proceed:
+ tty.die('Operation aborted.')
+
+ # Get a function to update the format
+ update_fn = spack.config.ensure_latest_format_fn(args.section)
+ for scope in updates:
+ cfg_file = spack.config.config.get_config_filename(
+ scope.name, args.section
+ )
+ with open(cfg_file) as f:
+ data = syaml.load(f) or {}
+ data = data.pop(args.section, {})
+ update_fn(data)
+
+ # Make a backup copy and rewrite the file
+ bkp_file = cfg_file + '.bkp'
+ shutil.copy(cfg_file, bkp_file)
+ spack.config.config.update_config(
+ args.section, data, scope=scope.name, force=True
+ )
+ msg = 'File "{0}" updated [backup={1}]'
+ tty.msg(msg.format(cfg_file, bkp_file))
+
+
+def _can_revert_update(scope_dir, cfg_file, bkp_file):
+ dir_ok = fs.can_write_to_dir(scope_dir)
+ cfg_ok = not os.path.exists(cfg_file) or fs.can_access(cfg_file)
+ bkp_ok = fs.can_access(bkp_file)
+ return dir_ok and cfg_ok and bkp_ok
+
+
+def config_revert(args):
+ scopes = [args.scope] if args.scope else [
+ x.name for x in spack.config.config.file_scopes
+ ]
+
+ # Search for backup files in the configuration scopes
+ Entry = collections.namedtuple('Entry', ['scope', 'cfg', 'bkp'])
+ to_be_restored, cannot_overwrite = [], []
+ for scope in scopes:
+ cfg_file = spack.config.config.get_config_filename(scope, args.section)
+ bkp_file = cfg_file + '.bkp'
+
+ # If the backup files doesn't exist move to the next scope
+ if not os.path.exists(bkp_file):
+ continue
+
+ # If it exists and we don't have write access in this scope
+ # keep track of it and report a comprehensive error later
+ entry = Entry(scope, cfg_file, bkp_file)
+ scope_dir = os.path.dirname(bkp_file)
+ can_be_reverted = _can_revert_update(scope_dir, cfg_file, bkp_file)
+ if not can_be_reverted:
+ cannot_overwrite.append(entry)
+ continue
+
+ to_be_restored.append(entry)
+
+ # Report errors if we can't revert a configuration
+ if cannot_overwrite:
+ msg = 'Detected permission issues with the following scopes:\n\n'
+ for e in cannot_overwrite:
+ msg += '\t[scope={0.scope}, cfg={0.cfg}, bkp={0.bkp}]\n'.format(e)
+ msg += ('\nEither ensure to have the right permissions before retrying'
+ ' or be more specific on the scope to revert.')
+ tty.die(msg)
+
+ proceed = True
+ if not args.yes_to_all:
+ msg = ('The following scopes will be restored from the corresponding'
+ ' backup files:\n')
+ for entry in to_be_restored:
+ msg += '\t[scope={0.scope}, bkp={0.bkp}]\n'.format(entry)
+ msg += 'This operation cannot be undone.'
+ tty.msg(msg)
+ proceed = tty.get_yes_or_no('Do you want to proceed?', default=False)
+
+ if not proceed:
+ tty.die('Operation aborted.')
+
+ for _, cfg_file, bkp_file in to_be_restored:
+ shutil.copy(bkp_file, cfg_file)
+ os.unlink(bkp_file)
+ msg = 'File "{0}" reverted to old state'
+ tty.msg(msg.format(cfg_file))
+
+
def config(parser, args):
- action = {'get': config_get,
- 'blame': config_blame,
- 'edit': config_edit,
- 'list': config_list,
- 'add': config_add,
- 'rm': config_remove,
- 'remove': config_remove}
+ action = {
+ 'get': config_get,
+ 'blame': config_blame,
+ 'edit': config_edit,
+ 'list': config_list,
+ 'add': config_add,
+ 'rm': config_remove,
+ 'remove': config_remove,
+ 'update': config_update,
+ 'revert': config_revert
+ }
action[args.config_command](args)
diff --git a/lib/spack/spack/cmd/create.py b/lib/spack/spack/cmd/create.py
index 7d12dc98a7..48326868ae 100644
--- a/lib/spack/spack/cmd/create.py
+++ b/lib/spack/spack/cmd/create.py
@@ -445,6 +445,9 @@ def setup_parser(subparser):
subparser.add_argument(
'--skip-editor', action='store_true',
help="skip the edit session for the package (e.g., automation)")
+ subparser.add_argument(
+ '-b', '--batch', action='store_true',
+ help="don't ask which versions to checksum")
class BuildSystemGuesser:
@@ -511,7 +514,7 @@ class BuildSystemGuesser:
# Determine the build system based on the files contained
# in the archive.
for pattern, bs in clues:
- if any(re.search(pattern, l) for l in lines):
+ if any(re.search(pattern, line) for line in lines):
self.build_system = bs
break
@@ -629,7 +632,8 @@ def get_versions(args, name):
versions = spack.stage.get_checksums_for_versions(
url_dict, name, first_stage_function=guesser,
- keep_stage=args.keep_stage, batch=True)
+ keep_stage=args.keep_stage,
+ batch=(args.batch or len(url_dict) == 1))
else:
versions = unhashed_versions
diff --git a/lib/spack/spack/cmd/env.py b/lib/spack/spack/cmd/env.py
index e3c45cc27b..7bd8052528 100644
--- a/lib/spack/spack/cmd/env.py
+++ b/lib/spack/spack/cmd/env.py
@@ -4,6 +4,7 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
+import shutil
import sys
from collections import namedtuple
@@ -14,6 +15,7 @@ from llnl.util.tty.color import colorize
import spack.config
import spack.schema.env
+import spack.cmd.common.arguments
import spack.cmd.install
import spack.cmd.uninstall
import spack.cmd.modules
@@ -37,6 +39,8 @@ subcommands = [
['status', 'st'],
'loads',
'view',
+ 'update',
+ 'revert'
]
@@ -394,6 +398,80 @@ def env_loads(args):
print(' source %s' % loads_file)
+def env_update_setup_parser(subparser):
+ """update environments to the latest format"""
+ subparser.add_argument(
+ metavar='env', dest='env',
+ help='name or directory of the environment to activate'
+ )
+ spack.cmd.common.arguments.add_common_arguments(subparser, ['yes_to_all'])
+
+
+def env_update(args):
+ manifest_file = ev.manifest_file(args.env)
+ backup_file = manifest_file + ".bkp"
+ needs_update = not ev.is_latest_format(manifest_file)
+
+ if not needs_update:
+ tty.msg('No update needed for the environment "{0}"'.format(args.env))
+ return
+
+ proceed = True
+ if not args.yes_to_all:
+ msg = ('The environment "{0}" is going to be updated to the latest '
+ 'schema format.\nIf the environment is updated, versions of '
+ 'Spack that are older than this version may not be able to '
+ 'read it. Spack stores backups of the updated environment '
+ 'which can be retrieved with "spack env revert"')
+ tty.msg(msg.format(args.env))
+ proceed = tty.get_yes_or_no('Do you want to proceed?', default=False)
+
+ if not proceed:
+ tty.die('Operation aborted.')
+
+ ev.update_yaml(manifest_file, backup_file=backup_file)
+ msg = 'Environment "{0}" has been updated [backup={1}]'
+ tty.msg(msg.format(args.env, backup_file))
+
+
+def env_revert_setup_parser(subparser):
+ """restore environments to their state before update"""
+ subparser.add_argument(
+ metavar='env', dest='env',
+ help='name or directory of the environment to activate'
+ )
+ spack.cmd.common.arguments.add_common_arguments(subparser, ['yes_to_all'])
+
+
+def env_revert(args):
+ manifest_file = ev.manifest_file(args.env)
+ backup_file = manifest_file + ".bkp"
+
+ # Check that both the spack.yaml and the backup exist, the inform user
+ # on what is going to happen and ask for confirmation
+ if not os.path.exists(manifest_file):
+ msg = 'cannot fine the manifest file of the environment [file={0}]'
+ tty.die(msg.format(manifest_file))
+ if not os.path.exists(backup_file):
+ msg = 'cannot find the old manifest file to be restored [file={0}]'
+ tty.die(msg.format(backup_file))
+
+ proceed = True
+ if not args.yes_to_all:
+ msg = ('Spack is going to overwrite the current manifest file'
+ ' with a backup copy [manifest={0}, backup={1}]')
+ tty.msg(msg.format(manifest_file, backup_file))
+ proceed = tty.get_yes_or_no('Do you want to proceed?', default=False)
+
+ if not proceed:
+ tty.die('Operation aborted.')
+
+ shutil.copy(backup_file, manifest_file)
+ os.remove(backup_file)
+ msg = 'Environment "{0}" reverted to old state'
+ tty.msg(msg.format(manifest_file))
+
+
#: Dictionary mapping subcommand names and aliases to functions
subcommand_functions = {}
diff --git a/lib/spack/spack/cmd/external.py b/lib/spack/spack/cmd/external.py
index afdd40e2a0..5dcd6ebbd1 100644
--- a/lib/spack/spack/cmd/external.py
+++ b/lib/spack/spack/cmd/external.py
@@ -2,22 +2,24 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-
from __future__ import print_function
-from collections import defaultdict, namedtuple
+
import argparse
import os
import re
-import six
+import sys
+from collections import defaultdict, namedtuple
+import llnl.util.filesystem
+import llnl.util.tty as tty
+import llnl.util.tty.colify as colify
+import six
import spack
import spack.error
-import llnl.util.tty as tty
-import spack.util.spack_yaml as syaml
import spack.util.environment
-import llnl.util.filesystem
+import spack.util.spack_yaml as syaml
-description = "add external packages to Spack configuration"
+description = "manage external packages in Spack configuration"
section = "config"
level = "short"
@@ -26,12 +28,18 @@ def setup_parser(subparser):
sp = subparser.add_subparsers(
metavar='SUBCOMMAND', dest='external_command')
- find_parser = sp.add_parser('find', help=external_find.__doc__)
+ find_parser = sp.add_parser(
+ 'find', help='add external packages to packages.yaml'
+ )
find_parser.add_argument(
'--not-buildable', action='store_true', default=False,
help="packages with detected externals won't be built with Spack")
find_parser.add_argument('packages', nargs=argparse.REMAINDER)
+ sp.add_parser(
+ 'list', help='list detectable packages, by repository and name'
+ )
+
def is_executable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
@@ -74,19 +82,37 @@ def _generate_pkg_config(external_pkg_entries):
This does not generate the entire packages.yaml. For example, given some
external entries for the CMake package, this could return::
- { 'paths': {
- 'cmake@3.17.1': '/opt/cmake-3.17.1/',
- 'cmake@3.16.5': '/opt/cmake-3.16.5/'
- }
+ {
+ 'externals': [{
+ 'spec': 'cmake@3.17.1',
+ 'prefix': '/opt/cmake-3.17.1/'
+ }, {
+ 'spec': 'cmake@3.16.5',
+ 'prefix': '/opt/cmake-3.16.5/'
+ }]
}
"""
- paths_dict = syaml.syaml_dict()
+
+ pkg_dict = syaml.syaml_dict()
+ pkg_dict['externals'] = []
for e in external_pkg_entries:
if not _spec_is_valid(e.spec):
continue
- paths_dict[str(e.spec)] = e.base_dir
- pkg_dict = syaml.syaml_dict()
- pkg_dict['paths'] = paths_dict
+
+ external_items = [('spec', str(e.spec)), ('prefix', e.base_dir)]
+ if e.spec.external_modules:
+ external_items.append(('modules', e.spec.external_modules))
+
+ if e.spec.extra_attributes:
+ external_items.append(
+ ('extra_attributes',
+ syaml.syaml_dict(e.spec.extra_attributes.items()))
+ )
+
+ # external_items.extend(e.spec.extra_attributes.items())
+ pkg_dict['externals'].append(
+ syaml.syaml_dict(external_items)
+ )
return pkg_dict
@@ -234,7 +260,7 @@ def _get_external_packages(packages_to_check, system_path_to_exe=None):
if not specs:
tty.debug(
- 'The following executables in {0} were decidedly not'
+ 'The following executables in {0} were decidedly not '
'part of the package {1}: {2}'
.format(prefix, pkg.name, ', '.join(exes_in_prefix))
)
@@ -259,13 +285,33 @@ def _get_external_packages(packages_to_check, system_path_to_exe=None):
else:
resolved_specs[spec] = prefix
+ try:
+ spec.validate_detection()
+ except Exception as e:
+ msg = ('"{0}" has been detected on the system but will '
+ 'not be added to packages.yaml [reason={1}]')
+ tty.warn(msg.format(spec, str(e)))
+ continue
+
+ if spec.external_path:
+ pkg_prefix = spec.external_path
+
pkg_to_entries[pkg.name].append(
ExternalPackageEntry(spec=spec, base_dir=pkg_prefix))
return pkg_to_entries
-def external(parser, args):
- action = {'find': external_find}
+def external_list(args):
+ # Trigger a read of all packages, might take a long time.
+ list(spack.repo.path.all_packages())
+ # Print all the detectable packages
+ tty.msg("Detectable packages per repository")
+ for namespace, pkgs in sorted(spack.package.detectable_packages.items()):
+ print("Repository:", namespace)
+ colify.colify(pkgs, indent=4, output=sys.stdout)
+
+def external(parser, args):
+ action = {'find': external_find, 'list': external_list}
action[args.external_command](args)
diff --git a/lib/spack/spack/cmd/help.py b/lib/spack/spack/cmd/help.py
index 3af82312df..d730d4f0fe 100644
--- a/lib/spack/spack/cmd/help.py
+++ b/lib/spack/spack/cmd/help.py
@@ -35,6 +35,10 @@ spec expression syntax:
@g{%compiler@version} build with specific compiler version
@g{%compiler@min:max} specific version range (see above)
+ compiler flags:
+ @g{cflags="flags"} cppflags, cflags, cxxflags,
+ fflags, ldflags, ldlibs
+
variants:
@B{+variant} enable <variant>
@r{-variant} or @r{~variant} disable <variant>
@@ -42,7 +46,7 @@ spec expression syntax:
@B{variant=value1,value2,value3} set multi-value <variant> values
architecture variants:
- @m{platform=platform} linux, darwin, cray, bgq, etc.
+ @m{platform=platform} linux, darwin, cray, etc.
@m{os=operating_system} specific <operating_system>
@m{target=target} specific <target> processor
@m{arch=platform-os-target} shortcut for all three above
diff --git a/lib/spack/spack/cmd/spec.py b/lib/spack/spack/cmd/spec.py
index fd03f09e57..316ec1c35a 100644
--- a/lib/spack/spack/cmd/spec.py
+++ b/lib/spack/spack/cmd/spec.py
@@ -34,7 +34,7 @@ for further documentation regarding the spec syntax, see:
const='yaml', help='print concrete spec as YAML')
subparser.add_argument(
'-j', '--json', action='store_const', dest='format', default=None,
- const='json', help='print concrete spec as YAML')
+ const='json', help='print concrete spec as JSON')
subparser.add_argument(
'-c', '--cover', action='store',
default='nodes', choices=['nodes', 'edges', 'paths'],
diff --git a/lib/spack/spack/compiler.py b/lib/spack/spack/compiler.py
index c59c654803..f1a9263c76 100644
--- a/lib/spack/spack/compiler.py
+++ b/lib/spack/spack/compiler.py
@@ -201,7 +201,7 @@ class Compiler(object):
fc_names = []
# Optional prefix regexes for searching for this type of compiler.
- # Prefixes are sometimes used for toolchains, e.g. 'powerpc-bgq-linux-'
+ # Prefixes are sometimes used for toolchains
prefixes = []
# Optional suffix regexes for searching for this type of compiler.
diff --git a/lib/spack/spack/compilers/__init__.py b/lib/spack/spack/compilers/__init__.py
index 36291a8f63..c11bafe47d 100644
--- a/lib/spack/spack/compilers/__init__.py
+++ b/lib/spack/spack/compilers/__init__.py
@@ -576,9 +576,7 @@ def arguments_to_detect_version_fn(operating_system, paths):
)
command_arguments.append(detect_version_args)
- # Reverse it here so that the dict creation (last insert wins)
- # does not spoil the intended precedence.
- return reversed(command_arguments)
+ return command_arguments
fn = getattr(
operating_system, 'arguments_to_detect_version_fn', _default
diff --git a/lib/spack/spack/compilers/fj.py b/lib/spack/spack/compilers/fj.py
index 3747d49d9b..c2cc66e110 100644
--- a/lib/spack/spack/compilers/fj.py
+++ b/lib/spack/spack/compilers/fj.py
@@ -26,7 +26,7 @@ class Fj(spack.compiler.Compiler):
'fc': 'fj/frt'}
version_argument = '--version'
- version_regex = r'\((?:FCC|FRT)\) ([\d.]+)'
+ version_regex = r'\((?:FCC|FRT)\) ([a-z\d.]+)'
required_libs = ['libfj90i', 'libfj90f', 'libfjsrcinfo']
diff --git a/lib/spack/spack/config.py b/lib/spack/spack/config.py
index a3d8101cad..425fcec8ee 100644
--- a/lib/spack/spack/config.py
+++ b/lib/spack/spack/config.py
@@ -30,6 +30,7 @@ schemas are in submodules of :py:mod:`spack.schema`.
"""
+import collections
import copy
import os
import re
@@ -352,6 +353,7 @@ class Configuration(object):
self.scopes = OrderedDict()
for scope in scopes:
self.push_scope(scope)
+ self.format_updates = collections.defaultdict(list)
def push_scope(self, scope):
"""Add a higher precedence scope to the Configuration."""
@@ -440,7 +442,7 @@ class Configuration(object):
for scope in self.scopes.values():
scope.clear()
- def update_config(self, section, update_data, scope=None):
+ def update_config(self, section, update_data, scope=None, force=False):
"""Update the configuration file for a particular scope.
Overwrites contents of a section in a scope with update_data,
@@ -449,7 +451,26 @@ class Configuration(object):
update_data should have the top-level section name stripped off
(it will be re-added). Data itself can be a list, dict, or any
other yaml-ish structure.
+
+ Configuration scopes that are still written in an old schema
+ format will fail to update unless ``force`` is True.
+
+ Args:
+ section (str): section of the configuration to be updated
+ update_data (dict): data to be used for the update
+ scope (str): scope to be updated
+ force (str): force the update
"""
+ if self.format_updates.get(section) and not force:
+ msg = ('The "{0}" section of the configuration needs to be written'
+ ' to disk, but is currently using a deprecated format. '
+ 'Please update it using:\n\n'
+ '\tspack config [--scope=<scope] update {0}\n\n'
+ 'Note that previous versions of Spack will not be able to '
+ 'use the updated configuration.')
+ msg = msg.format(section)
+ raise RuntimeError(msg)
+
_validate_section_name(section) # validate section name
scope = self._validate_scope(scope) # get ConfigScope object
@@ -514,6 +535,15 @@ class Configuration(object):
if section not in data:
continue
+ # We might be reading configuration files in an old format,
+ # thus read data and update it in memory if need be.
+ changed = _update_in_memory(data, section)
+ if changed:
+ self.format_updates[section].append(scope)
+ msg = ('OUTDATED CONFIGURATION FILE '
+ '[section={0}, scope={1}, dir={2}]')
+ tty.debug(msg.format(section, scope.name, scope.path))
+
merged_section = merge_yaml(merged_section, data)
# no config files -- empty config.
@@ -723,7 +753,7 @@ def get(path, default=None, scope=None):
def set(path, value, scope=None):
- """Convenience function for getting single values in config files.
+ """Convenience function for setting single values in config files.
Accepts the path syntax described in ``get()``.
"""
@@ -999,6 +1029,41 @@ def default_list_scope():
return None
+def _update_in_memory(data, section):
+ """Update the format of the configuration data in memory.
+
+ This function assumes the section is valid (i.e. validation
+ is responsibility of the caller)
+
+ Args:
+ data (dict): configuration data
+ section (str): section of the configuration to update
+
+ Returns:
+ True if the data was changed, False otherwise
+ """
+ update_fn = ensure_latest_format_fn(section)
+ changed = update_fn(data[section])
+ return changed
+
+
+def ensure_latest_format_fn(section):
+ """Return a function that takes as input a dictionary read from
+ a configuration file and update it to the latest format.
+
+ The function returns True if there was any update, False otherwise.
+
+ Args:
+ section (str): section of the configuration e.g. "packages",
+ "config", etc.
+ """
+ # The line below is based on the fact that every module we need
+ # is already imported at the top level
+ section_module = getattr(spack.schema, section)
+ update_fn = getattr(section_module, 'update', lambda x: False)
+ return update_fn
+
+
class ConfigError(SpackError):
"""Superclass for all Spack config related errors."""
diff --git a/lib/spack/spack/dependency.py b/lib/spack/spack/dependency.py
index e6b6c9cedc..fe7d6b5983 100644
--- a/lib/spack/spack/dependency.py
+++ b/lib/spack/spack/dependency.py
@@ -17,6 +17,26 @@ all_deptypes = ('build', 'link', 'run', 'test')
default_deptype = ('build', 'link')
+def deptype_chars(*type_tuples):
+ """Create a string representing deptypes for many dependencies.
+
+ The string will be some subset of 'blrt', like 'bl ', 'b t', or
+ ' lr ' where each letter in 'blrt' stands for 'build', 'link',
+ 'run', and 'test' (the dependency types).
+
+ For a single dependency, this just indicates that the dependency has
+ the indicated deptypes. For a list of dependnecies, this shows
+ whether ANY dpeendency in the list has the deptypes (so the deptypes
+ are merged).
+ """
+ types = set()
+ for t in type_tuples:
+ if t:
+ types.update(t)
+
+ return ''.join(t[0] if t in types else ' ' for t in all_deptypes)
+
+
def canonical_deptype(deptype):
"""Convert deptype to a canonical sorted tuple, or raise ValueError.
@@ -108,3 +128,8 @@ class Dependency(object):
self.patches[cond].extend(other.patches[cond])
else:
self.patches[cond] = other.patches[cond]
+
+ def __repr__(self):
+ types = deptype_chars(self.type)
+ return '<Dependency: %s -> %s [%s]>' % (
+ self.pkg.name, self.spec, types)
diff --git a/lib/spack/spack/environment.py b/lib/spack/spack/environment.py
index 99aa3963d5..1423acbddb 100644
--- a/lib/spack/spack/environment.py
+++ b/lib/spack/spack/environment.py
@@ -946,6 +946,7 @@ class Environment(object):
"Not found: {0}".format(query_spec))
old_specs = set(self.user_specs)
+ new_specs = set()
for spec in matches:
if spec in list_to_change:
try:
@@ -1472,6 +1473,18 @@ class Environment(object):
writing if True.
"""
+ # Intercept environment not using the latest schema format and prevent
+ # them from being modified
+ manifest_exists = os.path.exists(self.manifest_path)
+ if manifest_exists and not is_latest_format(self.manifest_path):
+ msg = ('The environment "{0}" needs to be written to disk, but '
+ 'is currently using a deprecated format. Please update it '
+ 'using:\n\n'
+ '\tspack env update {0}\n\n'
+ 'Note that previous versions of Spack will not be able to '
+ 'use the updated configuration.')
+ raise RuntimeError(msg.format(self.name))
+
# ensure path in var/spack/environments
fs.mkdirp(self.path)
@@ -1722,5 +1735,92 @@ def deactivate_config_scope(env):
spack.config.config.remove_scope(scope.name)
+def manifest_file(env_name_or_dir):
+ """Return the absolute path to a manifest file given the environment
+ name or directory.
+
+ Args:
+ env_name_or_dir (str): either the name of a valid environment
+ or a directory where a manifest file resides
+
+ Raises:
+ AssertionError: if the environment is not found
+ """
+ env_dir = None
+ if is_env_dir(env_name_or_dir):
+ env_dir = os.path.abspath(env_name_or_dir)
+ elif exists(env_name_or_dir):
+ env_dir = os.path.abspath(root(env_name_or_dir))
+
+ assert env_dir, "environment not found [env={0}]".format(env_name_or_dir)
+ return os.path.join(env_dir, manifest_name)
+
+
+def update_yaml(manifest, backup_file):
+ """Update a manifest file from an old format to the current one.
+
+ Args:
+ manifest (str): path to a manifest file
+ backup_file (str): file where to copy the original manifest
+
+ Returns:
+ True if the manifest was updated, False otherwise.
+
+ Raises:
+ AssertionError: in case anything goes wrong during the update
+ """
+ # Check if the environment needs update
+ with open(manifest) as f:
+ data = syaml.load(f)
+
+ top_level_key = _top_level_key(data)
+ needs_update = spack.schema.env.update(data[top_level_key])
+ if not needs_update:
+ msg = "No update needed [manifest={0}]".format(manifest)
+ tty.debug(msg)
+ return False
+
+ # Copy environment to a backup file and update it
+ msg = ('backup file "{0}" already exists on disk. Check its content '
+ 'and remove it before trying to update again.')
+ assert not os.path.exists(backup_file), msg.format(backup_file)
+
+ shutil.copy(manifest, backup_file)
+ with open(manifest, 'w') as f:
+ _write_yaml(data, f)
+ return True
+
+
+def _top_level_key(data):
+ """Return the top level key used in this environment
+
+ Args:
+ data (dict): raw yaml data of the environment
+
+ Returns:
+ Either 'spack' or 'env'
+ """
+ msg = ('cannot find top level attribute "spack" or "env"'
+ 'in the environment')
+ assert any(x in data for x in ('spack', 'env')), msg
+ if 'spack' in data:
+ return 'spack'
+ return 'env'
+
+
+def is_latest_format(manifest):
+ """Return True if the manifest file is at the latest schema format,
+ False otherwise.
+
+ Args:
+ manifest (str): manifest file to be analyzed
+ """
+ with open(manifest) as f:
+ data = syaml.load(f)
+ top_level_key = _top_level_key(data)
+ changed = spack.schema.env.update(data[top_level_key])
+ return not changed
+
+
class SpackEnvironmentError(spack.error.SpackError):
"""Superclass for all errors to do with Spack environments."""
diff --git a/lib/spack/spack/installer.py b/lib/spack/spack/installer.py
index 2d4b488ac3..b8bbe7ce3f 100644
--- a/lib/spack/spack/installer.py
+++ b/lib/spack/spack/installer.py
@@ -272,9 +272,9 @@ def _process_external_package(pkg, explicit):
pre = '{s.name}@{s.version} :'.format(s=pkg.spec)
spec = pkg.spec
- if spec.external_module:
+ if spec.external_modules:
tty.msg('{0} has external module in {1}'
- .format(pre, spec.external_module))
+ .format(pre, spec.external_modules))
tty.debug('{0} is actually installed in {1}'
.format(pre, spec.external_path))
else:
diff --git a/lib/spack/spack/main.py b/lib/spack/spack/main.py
index 33c8d1f4c6..d19578007b 100644
--- a/lib/spack/spack/main.py
+++ b/lib/spack/spack/main.py
@@ -702,16 +702,16 @@ def main(argv=None):
if stored_var_name in os.environ:
os.environ[var] = os.environ[stored_var_name]
+ # make spack.config aware of any command line configuration scopes
+ if args.config_scopes:
+ spack.config.command_line_scopes = args.config_scopes
+
# activate an environment if one was specified on the command line
if not args.no_env:
env = ev.find_environment(args)
if env:
ev.activate(env, args.use_env_repo, add_view=False)
- # make spack.config aware of any command line configuration scopes
- if args.config_scopes:
- spack.config.command_line_scopes = args.config_scopes
-
if args.print_shell_vars:
print_setup_info(*args.print_shell_vars.split(','))
return 0
diff --git a/lib/spack/spack/operating_systems/cnk.py b/lib/spack/spack/operating_systems/cnk.py
deleted file mode 100644
index 53a12785da..0000000000
--- a/lib/spack/spack/operating_systems/cnk.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
-# Spack Project Developers. See the top-level COPYRIGHT file for details.
-#
-# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-
-from spack.architecture import OperatingSystem
-
-
-class Cnk(OperatingSystem):
- """ Compute Node Kernel (CNK) is the node level operating system for
- the IBM Blue Gene series of supercomputers. The compute nodes of the
- Blue Gene family of supercomputers run CNK, a lightweight kernel that
- runs on each node and supports one application running for one user
- on that node."""
-
- def __init__(self):
- name = 'cnk'
- version = '1'
- super(Cnk, self).__init__(name, version)
-
- def __str__(self):
- return self.name
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index 4febfb1b47..d5cb3065a8 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -11,6 +11,7 @@ packages.
"""
import base64
+import collections
import contextlib
import copy
import functools
@@ -22,19 +23,15 @@ import shutil
import sys
import textwrap
import time
-from six import StringIO
-from six import string_types
-from six import with_metaclass
-from ordereddict_backport import OrderedDict
+import traceback
-import llnl.util.tty as tty
+import six
-import spack.config
-import spack.paths
-import spack.store
+import llnl.util.tty as tty
import spack.compilers
-import spack.directives
+import spack.config
import spack.dependency
+import spack.directives
import spack.directory_layout
import spack.error
import spack.fetch_strategy as fs
@@ -42,15 +39,19 @@ import spack.hooks
import spack.mirror
import spack.mixins
import spack.multimethod
+import spack.paths
import spack.repo
+import spack.store
import spack.url
import spack.util.environment
import spack.util.web
-import spack.multimethod
-
from llnl.util.filesystem import mkdirp, touch, working_dir
from llnl.util.lang import memoized
from llnl.util.link_tree import LinkTree
+from ordereddict_backport import OrderedDict
+from six import StringIO
+from six import string_types
+from six import with_metaclass
from spack.filesystem_view import YamlFilesystemView
from spack.installer import \
install_args_docstring, PackageInstaller, InstallError
@@ -140,7 +141,104 @@ class InstallPhase(object):
return other
+#: Registers which are the detectable packages, by repo and package name
+#: Need a pass of package repositories to be filled.
+detectable_packages = collections.defaultdict(list)
+
+
+class DetectablePackageMeta(object):
+ """Check if a package is detectable and add default implementations
+ for the detection function.
+ """
+ def __init__(cls, name, bases, attr_dict):
+ # If a package has the executables attribute then it's
+ # assumed to be detectable
+ if hasattr(cls, 'executables'):
+ @classmethod
+ def determine_spec_details(cls, prefix, exes_in_prefix):
+ """Allow ``spack external find ...`` to locate installations.
+
+ Args:
+ prefix (str): the directory containing the executables
+ exes_in_prefix (set): the executables that match the regex
+
+ Returns:
+ The list of detected specs for this package
+ """
+ exes_by_version = collections.defaultdict(list)
+ # The default filter function is the identity function for the
+ # list of executables
+ filter_fn = getattr(cls, 'filter_detected_exes',
+ lambda x, exes: exes)
+ exes_in_prefix = filter_fn(prefix, exes_in_prefix)
+ for exe in exes_in_prefix:
+ try:
+ version_str = cls.determine_version(exe)
+ if version_str:
+ exes_by_version[version_str].append(exe)
+ except Exception as e:
+ msg = ('An error occurred when trying to detect '
+ 'the version of "{0}" [{1}]')
+ tty.debug(msg.format(exe, str(e)))
+
+ specs = []
+ for version_str, exes in exes_by_version.items():
+ variants = cls.determine_variants(exes, version_str)
+ # Normalize output to list
+ if not isinstance(variants, list):
+ variants = [variants]
+
+ for variant in variants:
+ if isinstance(variant, six.string_types):
+ variant = (variant, {})
+ variant_str, extra_attributes = variant
+ spec_str = '{0}@{1} {2}'.format(
+ cls.name, version_str, variant_str
+ )
+
+ # Pop a few reserved keys from extra attributes, since
+ # they have a different semantics
+ external_path = extra_attributes.pop('prefix', None)
+ external_modules = extra_attributes.pop(
+ 'modules', None
+ )
+ spec = spack.spec.Spec(
+ spec_str,
+ external_path=external_path,
+ external_modules=external_modules
+ )
+ specs.append(spack.spec.Spec.from_detection(
+ spec, extra_attributes=extra_attributes
+ ))
+
+ return sorted(specs)
+
+ @classmethod
+ def determine_variants(cls, exes, version_str):
+ return ''
+
+ # Register the class as a detectable package
+ detectable_packages[cls.namespace].append(cls.name)
+
+ # Attach function implementations to the detectable class
+ default = False
+ if not hasattr(cls, 'determine_spec_details'):
+ default = True
+ cls.determine_spec_details = determine_spec_details
+
+ if default and not hasattr(cls, 'determine_version'):
+ msg = ('the package "{0}" in the "{1}" repo needs to define'
+ ' the "determine_version" method to be detectable')
+ NotImplementedError(msg.format(cls.name, cls.namespace))
+
+ if default and not hasattr(cls, 'determine_variants'):
+ cls.determine_variants = determine_variants
+
+ super(DetectablePackageMeta, cls).__init__(name, bases, attr_dict)
+
+
class PackageMeta(
+ DetectablePackageMeta,
spack.directives.DirectiveMeta,
spack.mixins.PackageMixinsMeta,
spack.multimethod.MultiMethodMeta
@@ -1747,7 +1845,23 @@ class PackageBase(with_metaclass(PackageMeta, PackageViewMixin, object)):
with spack.store.db.prefix_write_lock(spec):
if pkg is not None:
- spack.hooks.pre_uninstall(spec)
+ try:
+ spack.hooks.pre_uninstall(spec)
+ except Exception as error:
+ if force:
+ error_msg = (
+ "One or more pre_uninstall hooks have failed"
+ " for {0}, but Spack is continuing with the"
+ " uninstall".format(str(spec)))
+ if isinstance(error, spack.error.SpackError):
+ error_msg += (
+ "\n\nError message: {0}".format(str(error)))
+ tty.warn(error_msg)
+ # Note that if the uninstall succeeds then we won't be
+ # seeing this error again and won't have another chance
+ # to run the hook.
+ else:
+ raise
# Uninstalling in Spack only requires removing the prefix.
if not spec.external:
@@ -1768,7 +1882,20 @@ class PackageBase(with_metaclass(PackageMeta, PackageViewMixin, object)):
spack.store.db.remove(spec)
if pkg is not None:
- spack.hooks.post_uninstall(spec)
+ try:
+ spack.hooks.post_uninstall(spec)
+ except Exception:
+ # If there is a failure here, this is our only chance to do
+ # something about it: at this point the Spec has been removed
+ # from the DB and prefix, so the post-uninstallation hooks
+ # will not have another chance to run.
+ error_msg = (
+ "One or more post-uninstallation hooks failed for"
+ " {0}, but the prefix has been removed (if it is not"
+ " external).".format(str(spec)))
+ tb_msg = traceback.format_exc()
+ error_msg += "\n\nThe error:\n\n{0}".format(tb_msg)
+ tty.warn(error_msg)
tty.msg('Successfully uninstalled {0}'.format(spec.short_spec))
diff --git a/lib/spack/spack/package_prefs.py b/lib/spack/spack/package_prefs.py
index 67325fc7ae..bdedfcfb3d 100644
--- a/lib/spack/spack/package_prefs.py
+++ b/lib/spack/spack/package_prefs.py
@@ -2,7 +2,6 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-
import stat
from six import string_types
@@ -158,7 +157,7 @@ def spec_externals(spec):
"""Return a list of external specs (w/external directory path filled in),
one for each known external installation."""
# break circular import.
- from spack.util.module_cmd import get_path_from_module # NOQA: ignore=F401
+ from spack.util.module_cmd import path_from_modules # NOQA: ignore=F401
allpkgs = spack.config.get('packages')
names = set([spec.name])
@@ -167,24 +166,24 @@ def spec_externals(spec):
external_specs = []
for name in names:
pkg_config = allpkgs.get(name, {})
- pkg_paths = pkg_config.get('paths', {})
- pkg_modules = pkg_config.get('modules', {})
- if (not pkg_paths) and (not pkg_modules):
- continue
-
- for external_spec, path in pkg_paths.items():
- external_spec = spack.spec.Spec(
- external_spec, external_path=canonicalize_path(path))
- if external_spec.satisfies(spec):
- external_specs.append(external_spec)
-
- for external_spec, module in pkg_modules.items():
- external_spec = spack.spec.Spec(
- external_spec, external_module=module)
+ pkg_externals = pkg_config.get('externals', [])
+ for entry in pkg_externals:
+ spec_str = entry['spec']
+ external_path = entry.get('prefix', None)
+ if external_path:
+ external_path = canonicalize_path(external_path)
+ external_modules = entry.get('modules', None)
+ external_spec = spack.spec.Spec.from_detection(
+ spack.spec.Spec(
+ spec_str,
+ external_path=external_path,
+ external_modules=external_modules
+ ), extra_attributes=entry.get('extra_attributes', {})
+ )
if external_spec.satisfies(spec):
external_specs.append(external_spec)
- # defensively copy returned specs
+ # Defensively copy returned specs
return [s.copy() for s in external_specs]
diff --git a/lib/spack/spack/pkgkit.py b/lib/spack/spack/pkgkit.py
index e657144bb4..e2a29894f7 100644
--- a/lib/spack/spack/pkgkit.py
+++ b/lib/spack/spack/pkgkit.py
@@ -39,7 +39,7 @@ from spack.mixins import filter_compiler_wrappers
from spack.version import Version, ver
-from spack.spec import Spec
+from spack.spec import Spec, InvalidSpecDetected
from spack.dependency import all_deptypes
diff --git a/lib/spack/spack/platforms/bgq.py b/lib/spack/spack/platforms/bgq.py
deleted file mode 100644
index 64d71743c4..0000000000
--- a/lib/spack/spack/platforms/bgq.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
-# Spack Project Developers. See the top-level COPYRIGHT file for details.
-#
-# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-
-import os
-from spack.architecture import Platform, Target
-from spack.operating_systems.linux_distro import LinuxDistro
-from spack.operating_systems.cnk import Cnk
-
-
-class Bgq(Platform):
- priority = 30
- front_end = 'power7'
- back_end = 'ppc64'
- default = 'ppc64'
-
- def __init__(self):
- ''' IBM Blue Gene/Q system platform.'''
-
- super(Bgq, self).__init__('bgq')
-
- self.add_target(self.front_end, Target(self.front_end))
- self.add_target(self.back_end, Target(self.back_end))
-
- front_distro = LinuxDistro()
- back_distro = Cnk()
-
- self.front_os = str(front_distro)
- self.back_os = str(back_distro)
- self.default_os = self.back_os
-
- self.add_operating_system(str(front_distro), front_distro)
- self.add_operating_system(str(back_distro), back_distro)
-
- @classmethod
- def detect(cls):
- return os.path.exists('/bgsys')
diff --git a/lib/spack/spack/platforms/cray.py b/lib/spack/spack/platforms/cray.py
index c6d367e9a6..684109f3e0 100644
--- a/lib/spack/spack/platforms/cray.py
+++ b/lib/spack/spack/platforms/cray.py
@@ -111,14 +111,14 @@ class Cray(Platform):
@classmethod
def detect(cls):
"""
- Detect whether this system is a cray machine.
+ Detect whether this system is a Cray machine.
- We detect the cray platform based on the availability through `module`
- of the cray programming environment. If this environment is available,
- we can use it to find compilers, target modules, etc. If the cray
+ We detect the Cray platform based on the availability through `module`
+ of the Cray programming environment. If this environment is available,
+ we can use it to find compilers, target modules, etc. If the Cray
programming environment is not available via modules, then we will
- treat it as a standard linux system, as the cray compiler wrappers
- and other componenets of the cray programming environment are
+ treat it as a standard linux system, as the Cray compiler wrappers
+ and other components of the Cray programming environment are
irrelevant without module support.
"""
return 'opt/cray' in os.environ.get('MODULEPATH', '')
diff --git a/lib/spack/spack/schema/__init__.py b/lib/spack/spack/schema/__init__.py
index 755e3d9086..38cc36caef 100644
--- a/lib/spack/spack/schema/__init__.py
+++ b/lib/spack/spack/schema/__init__.py
@@ -90,11 +90,15 @@ def _make_validator():
is_error = deprecated['error']
if not is_error:
for entry in deprecated_properties:
- llnl.util.tty.warn(msg.format(property=entry))
+ llnl.util.tty.warn(
+ msg.format(property=entry, entry=instance[entry])
+ )
else:
import jsonschema
for entry in deprecated_properties:
- yield jsonschema.ValidationError(msg.format(property=entry))
+ yield jsonschema.ValidationError(
+ msg.format(property=entry, entry=instance[entry])
+ )
return jsonschema.validators.extend(
jsonschema.Draft4Validator, {
diff --git a/lib/spack/spack/schema/env.py b/lib/spack/spack/schema/env.py
index 6ead76416b..18a2048557 100644
--- a/lib/spack/spack/schema/env.py
+++ b/lib/spack/spack/schema/env.py
@@ -8,9 +8,12 @@
.. literalinclude:: _spack_root/lib/spack/spack/schema/env.py
:lines: 36-
"""
+import warnings
+
from llnl.util.lang import union_dicts
import spack.schema.merged
+import spack.schema.packages
import spack.schema.projections
#: legal first keys in the schema
@@ -133,3 +136,22 @@ schema = {
}
}
}
+
+
+def update(data):
+ """Update the data in place to remove deprecated properties.
+
+ Args:
+ data (dict): dictionary to be updated
+
+ Returns:
+ True if data was changed, False otherwise
+ """
+ if 'include' in data:
+ msg = ("included configuration files should be updated manually"
+ " [files={0}]")
+ warnings.warn(msg.format(', '.join(data['include'])))
+
+ if 'packages' in data:
+ return spack.schema.packages.update(data['packages'])
+ return False
diff --git a/lib/spack/spack/schema/packages.py b/lib/spack/spack/schema/packages.py
index 4984471c73..16a8a223ef 100644
--- a/lib/spack/spack/schema/packages.py
+++ b/lib/spack/spack/schema/packages.py
@@ -2,7 +2,6 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-
"""Schema for packages.yaml configuration files.
.. literalinclude:: _spack_root/lib/spack/spack/schema/packages.py
@@ -59,10 +58,6 @@ properties = {
},
},
},
- 'modules': {
- 'type': 'object',
- 'default': {},
- },
'providers': {
'type': 'object',
'default': {},
@@ -72,17 +67,39 @@ properties = {
'type': 'array',
'default': [],
'items': {'type': 'string'}, }, }, },
- 'paths': {
- 'type': 'object',
- 'default': {},
- },
'variants': {
'oneOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}],
},
+ 'externals': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'spec': {'type': 'string'},
+ 'prefix': {'type': 'string'},
+ 'modules': {'type': 'array',
+ 'items': {'type': 'string'}},
+ 'extra_attributes': {'type': 'object'}
+ },
+ 'additionalProperties': True,
+ 'required': ['spec']
+ }
+ },
+ # Deprecated properties, will trigger an error with a
+ # message telling how to update.
+ 'paths': {'type': 'object'},
+ 'modules': {'type': 'object'},
},
+ 'deprecatedProperties': {
+ 'properties': ['modules', 'paths'],
+ 'message': 'the attribute "{property}" in the "packages" '
+ 'section of the configuration has been '
+ 'deprecated [entry={entry}]',
+ 'error': False
+ }
},
},
},
@@ -97,3 +114,33 @@ schema = {
'additionalProperties': False,
'properties': properties,
}
+
+
+def update(data):
+ """Update the data in place to remove deprecated properties.
+
+ Args:
+ data (dict): dictionary to be updated
+
+ Returns:
+ True if data was changed, False otherwise
+ """
+ changed = False
+ for cfg_object in data.values():
+ externals = []
+ paths = cfg_object.pop('paths', {})
+ for spec, prefix in paths.items():
+ externals.append({
+ 'spec': str(spec),
+ 'prefix': str(prefix)
+ })
+ modules = cfg_object.pop('modules', {})
+ for spec, module in modules.items():
+ externals.append({
+ 'spec': str(spec),
+ 'modules': [str(module)]
+ })
+ if externals:
+ changed = True
+ cfg_object['externals'] = externals
+ return changed
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index 175d160855..047764c8ae 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -13,7 +13,7 @@ The syntax looks like this:
.. code-block:: sh
- $ spack install mpileaks ^openmpi @1.2:1.4 +debug %intel @12.1 =bgqos_0
+ $ spack install mpileaks ^openmpi @1.2:1.4 +debug %intel @12.1 target=zen
0 1 2 3 4 5 6
The first part of this is the command, 'spack install'. The rest of the
@@ -959,7 +959,7 @@ class Spec(object):
def __init__(self, spec_like=None,
normal=False, concrete=False, external_path=None,
- external_module=None, full_hash=None):
+ external_modules=None, full_hash=None):
"""Create a new Spec.
Arguments:
@@ -988,8 +988,6 @@ class Spec(object):
self.variants = vt.VariantMap(self)
self.architecture = None
self.compiler = None
- self.external_path = None
- self.external_module = None
self.compiler_flags = FlagMap(self)
self._dependents = DependencyMap()
self._dependencies = DependencyMap()
@@ -1010,9 +1008,13 @@ class Spec(object):
self._normal = normal
self._concrete = concrete
self.external_path = external_path
- self.external_module = external_module
+ self.external_modules = external_modules
self._full_hash = full_hash
+ # This attribute is used to store custom information for
+ # external specs. None signal that it was not set yet.
+ self.extra_attributes = None
+
if isinstance(spec_like, six.string_types):
spec_list = SpecParser(self).parse(spec_like)
if len(spec_list) > 1:
@@ -1025,7 +1027,7 @@ class Spec(object):
@property
def external(self):
- return bool(self.external_path) or bool(self.external_module)
+ return bool(self.external_path) or bool(self.external_modules)
def get_dependency(self, name):
dep = self._dependencies.get(name)
@@ -1526,7 +1528,8 @@ class Spec(object):
if self.external:
d['external'] = syaml.syaml_dict([
('path', self.external_path),
- ('module', self.external_module),
+ ('module', self.external_modules),
+ ('extra_attributes', self.extra_attributes)
])
if not self._concrete:
@@ -1695,21 +1698,21 @@ class Spec(object):
for name in FlagMap.valid_compiler_flags():
spec.compiler_flags[name] = []
+ spec.external_path = None
+ spec.external_modules = None
if 'external' in node:
- spec.external_path = None
- spec.external_module = None
# This conditional is needed because sometimes this function is
# called with a node already constructed that contains a 'versions'
# and 'external' field. Related to virtual packages provider
# indexes.
if node['external']:
spec.external_path = node['external']['path']
- spec.external_module = node['external']['module']
- if spec.external_module is False:
- spec.external_module = None
- else:
- spec.external_path = None
- spec.external_module = None
+ spec.external_modules = node['external']['module']
+ if spec.external_modules is False:
+ spec.external_modules = None
+ spec.extra_attributes = node['external'].get(
+ 'extra_attributes', syaml.syaml_dict()
+ )
# specs read in are concrete unless marked abstract
spec._concrete = node.get('concrete', True)
@@ -1970,6 +1973,44 @@ class Spec(object):
tty.debug(e)
raise sjson.SpackJSONError("error parsing JSON spec:", str(e))
+ @staticmethod
+ def from_detection(spec_str, extra_attributes=None):
+ """Construct a spec from a spec string determined during external
+ detection and attach extra attributes to it.
+
+ Args:
+ spec_str (str): spec string
+ extra_attributes (dict): dictionary containing extra attributes
+
+ Returns:
+ spack.spec.Spec: external spec
+ """
+ s = Spec(spec_str)
+ extra_attributes = syaml.sorted_dict(extra_attributes or {})
+ # This is needed to be able to validate multi-valued variants,
+ # otherwise they'll still be abstract in the context of detection.
+ vt.substitute_abstract_variants(s)
+ s.extra_attributes = extra_attributes
+ return s
+
+ def validate_detection(self):
+ """Validate the detection of an external spec.
+
+ This method is used as part of Spack's detection protocol, and is
+ not meant for client code use.
+ """
+ # Assert that _extra_attributes is a Mapping and not None,
+ # which likely means the spec was created with Spec.from_detection
+ msg = ('cannot validate "{0}" since it was not created '
+ 'using Spec.from_detection'.format(self))
+ assert isinstance(self.extra_attributes, collections.Mapping), msg
+
+ # Validate the spec calling a package specific method
+ validate_fn = getattr(
+ self.package, 'validate_detected_spec', lambda x, y: None
+ )
+ validate_fn(self, self.extra_attributes)
+
def _concretize_helper(self, concretizer, presets=None, visited=None):
"""Recursive helper function for concretize().
This concretizes everything bottom-up. As things are
@@ -2115,8 +2156,8 @@ class Spec(object):
feq(replacement.variants, spec.variants) and
feq(replacement.external_path,
spec.external_path) and
- feq(replacement.external_module,
- spec.external_module)):
+ feq(replacement.external_modules,
+ spec.external_modules)):
continue
# Refine this spec to the candidate. This uses
# replace_with AND dup so that it can work in
@@ -2250,7 +2291,7 @@ class Spec(object):
t[-1] for t in ordered_hashes)
for s in self.traverse():
- if s.external_module and not s.external_path:
+ if s.external_modules and not s.external_path:
compiler = spack.compilers.compiler_for_spec(
s.compiler, s.architecture)
for mod in compiler.modules:
@@ -2259,8 +2300,8 @@ class Spec(object):
# get the path from the module
# the package can override the default
s.external_path = getattr(s.package, 'external_prefix',
- md.get_path_from_module(
- s.external_module))
+ md.path_from_modules(
+ s.external_modules))
# Mark everything in the spec as concrete, as well.
self._mark_concrete()
@@ -3046,7 +3087,7 @@ class Spec(object):
self._normal != other._normal and
self.concrete != other.concrete and
self.external_path != other.external_path and
- self.external_module != other.external_module and
+ self.external_modules != other.external_modules and
self.compiler_flags != other.compiler_flags)
self._package = None
@@ -3074,7 +3115,8 @@ class Spec(object):
self.variants.spec = self
self.external_path = other.external_path
- self.external_module = other.external_module
+ self.external_modules = other.external_modules
+ self.extra_attributes = other.extra_attributes
self.namespace = other.namespace
# Cached fields are results of expensive operations.
@@ -3877,22 +3919,18 @@ class Spec(object):
'@K{%s} ', color=color) % node.dag_hash(hlen)
if show_types:
- types = set()
if cover == 'nodes':
# when only covering nodes, we merge dependency types
# from all dependents before showing them.
- for name, ds in node.dependents_dict().items():
- if ds.deptypes:
- types.update(set(ds.deptypes))
- elif dep_spec.deptypes:
+ types = [
+ ds.deptypes for ds in node.dependents_dict().values()]
+ else:
# when covering edges or paths, we show dependency
# types only for the edge through which we visited
- types = set(dep_spec.deptypes)
+ types = [dep_spec.deptypes]
- out += '['
- for t in dp.all_deptypes:
- out += ''.join(t[0] if t in types else ' ')
- out += '] '
+ type_chars = dp.deptype_chars(*types)
+ out += '[%s] ' % type_chars
out += (" " * d)
if d > 0:
@@ -4533,3 +4571,7 @@ class SpecDependencyNotFoundError(spack.error.SpecError):
class SpecDeprecatedError(spack.error.SpecError):
"""Raised when a spec concretizes to a deprecated spec or dependency."""
+
+
+class InvalidSpecDetected(spack.error.SpecError):
+ """Raised when a detected spec doesn't pass validation checks."""
diff --git a/lib/spack/spack/test/abi.py b/lib/spack/spack/test/abi.py
new file mode 100644
index 0000000000..dd41228941
--- /dev/null
+++ b/lib/spack/spack/test/abi.py
@@ -0,0 +1,66 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
+# Spack Project Developers. See the top-level COPYRIGHT file for details.
+#
+# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+""" Test ABI compatibility helpers"""
+
+import pytest
+
+from spack.abi import ABI
+from spack.spec import Spec
+
+
+@pytest.mark.parametrize(
+ 'target,constraint,expected',
+ [
+ ('foo', 'bar', True),
+ ('platform=linux', 'foo', True),
+ ('foo', 'arch=linux-fedora31-x86_64', True),
+ ('arch=linux-fedora31-skylake', 'arch=linux-fedora31-skylake', True),
+ ('arch=linux-fedora31-skylake', 'arch=linux-fedora31-x86_64', False),
+ ('platform=linux os=fedora31', 'arch=linux-fedora31-x86_64', True),
+ ('platform=linux', 'arch=linux-fedora31-x86_64', True),
+ ('platform=linux os=fedora31', 'platform=linux', True),
+ ('platform=darwin', 'arch=linux-fedora31-x86_64', False),
+ ('os=fedora31', 'platform=linux', False), # TODO should be true ?
+ ])
+def test_architecture_compatibility(target, constraint, expected):
+ assert ABI().architecture_compatible(Spec(target),
+ Spec(constraint)) == expected
+
+
+@pytest.mark.parametrize(
+ 'target,constraint,loose,expected',
+ [
+ ('foo', 'bar', False, True),
+ ('%gcc', 'foo', False, True),
+ ('foo', '%gcc', False, True),
+ ('%gcc', '%gcc', False, True),
+ ('%gcc', '%intel', False, False),
+ ('%gcc', '%clang', False, False),
+ ('%gcc@9.1', '%gcc@9.2', False, False), # TODO should be true ?
+ ('%gcc@9.2.1', '%gcc@9.2.2', False, False), # TODO should be true ?
+ ('%gcc@4.9', '%gcc@9.2', False, False),
+ ('%clang@5', '%clang@6', False, False),
+ ('%gcc@9.1', '%gcc@9.2', True, True),
+ ('%gcc@9.2.1', '%gcc@9.2.2', True, True),
+ ('%gcc@4.9', '%gcc@9.2', True, True),
+ ('%clang@5', '%clang@6', True, True),
+ ])
+def test_compiler_compatibility(target, constraint, loose, expected):
+ assert ABI().compiler_compatible(Spec(target),
+ Spec(constraint),
+ loose=loose) == expected
+
+
+@pytest.mark.parametrize('target,constraint,loose,expected', [
+ ('foo', 'bar', False, True),
+ ('%gcc', 'platform=linux', False, True),
+ ('%gcc@9.2.1', '%gcc@8.3.1 platform=linux', False, False),
+ ('%gcc@9.2.1', '%gcc@8.3.1 platform=linux', True, True),
+ ('%gcc@9.2.1 arch=linux-fedora31-skylake', '%gcc@9.2.1 platform=linux',
+ False, True),
+])
+def test_compatibility(target, constraint, loose, expected):
+ assert ABI().compatible(Spec(target), Spec(constraint),
+ loose=loose) == expected
diff --git a/lib/spack/spack/test/architecture.py b/lib/spack/spack/test/architecture.py
index 80d3fc72c0..7af5a8d150 100644
--- a/lib/spack/spack/test/architecture.py
+++ b/lib/spack/spack/test/architecture.py
@@ -15,7 +15,6 @@ import spack.architecture
from spack.spec import Spec
from spack.platforms.cray import Cray
from spack.platforms.linux import Linux
-from spack.platforms.bgq import Bgq
from spack.platforms.darwin import Darwin
@@ -42,8 +41,6 @@ def test_platform():
output_platform_class = spack.architecture.real_platform()
if os.path.exists('/opt/cray/pe'):
my_platform_class = Cray()
- elif os.path.exists('/bgsys'):
- my_platform_class = Bgq()
elif 'Linux' in py_platform.system():
my_platform_class = Linux()
elif 'Darwin' in py_platform.system():
diff --git a/lib/spack/spack/test/bindist.py b/lib/spack/spack/test/bindist.py
index f561077edd..a7995ca43b 100644
--- a/lib/spack/spack/test/bindist.py
+++ b/lib/spack/spack/test/bindist.py
@@ -7,6 +7,7 @@
This test checks creating and install buildcaches
"""
import os
+import sys
import py
import pytest
import argparse
@@ -158,8 +159,14 @@ def install_dir_non_default_layout(tmpdir):
spack.store.layout = real_layout
-@pytest.mark.requires_executables(
- '/usr/bin/gcc', 'patchelf', 'strings', 'file')
+args = ['strings', 'file']
+if sys.platform == 'darwin':
+ args.extend(['/usr/bin/clang++', 'install_name_tool'])
+else:
+ args.extend(['/usr/bin/g++', 'patchelf'])
+
+
+@pytest.mark.requires_executables(*args)
@pytest.mark.disable_clean_stage_check
@pytest.mark.maybeslow
@pytest.mark.usefixtures('default_config', 'cache_directory',
@@ -177,7 +184,6 @@ def test_default_rpaths_create_install_default_layout(tmpdir,
cspec = Spec('corge')
cspec.concretize()
- # Install patchelf needed for relocate in linux test environment
iparser = argparse.ArgumentParser()
install.setup_parser(iparser)
# Install some packages with dependent packages
@@ -253,8 +259,7 @@ def test_default_rpaths_create_install_default_layout(tmpdir,
mirror.mirror(mparser, margs)
-@pytest.mark.requires_executables(
- '/usr/bin/gcc', 'patchelf', 'strings', 'file')
+@pytest.mark.requires_executables(*args)
@pytest.mark.disable_clean_stage_check
@pytest.mark.maybeslow
@pytest.mark.nomockstage
@@ -302,8 +307,7 @@ def test_default_rpaths_install_nondefault_layout(tmpdir,
mirror.mirror(mparser, margs)
-@pytest.mark.requires_executables(
- '/usr/bin/gcc', 'patchelf', 'strings', 'file')
+@pytest.mark.requires_executables(*args)
@pytest.mark.disable_clean_stage_check
@pytest.mark.maybeslow
@pytest.mark.nomockstage
@@ -356,8 +360,7 @@ def test_relative_rpaths_create_default_layout(tmpdir,
spack.stage.purge()
-@pytest.mark.requires_executables(
- '/usr/bin/gcc', 'patchelf', 'strings', 'file')
+@pytest.mark.requires_executables(*args)
@pytest.mark.disable_clean_stage_check
@pytest.mark.maybeslow
@pytest.mark.nomockstage
@@ -382,7 +385,6 @@ def test_relative_rpaths_install_default_layout(tmpdir,
['add', '--scope', 'site', 'test-mirror-rel', 'file://%s' % mirror_path_rel])
mirror.mirror(mparser, margs)
- # Install patchelf needed for relocate in linux test environment
iparser = argparse.ArgumentParser()
install.setup_parser(iparser)
@@ -422,8 +424,7 @@ def test_relative_rpaths_install_default_layout(tmpdir,
mirror.mirror(mparser, margs)
-@pytest.mark.requires_executables(
- '/usr/bin/gcc', 'patchelf', 'strings', 'file')
+@pytest.mark.requires_executables(*args)
@pytest.mark.disable_clean_stage_check
@pytest.mark.maybeslow
@pytest.mark.nomockstage
@@ -449,7 +450,6 @@ def test_relative_rpaths_install_nondefault(tmpdir,
['add', '--scope', 'site', 'test-mirror-rel', 'file://%s' % mirror_path_rel])
mirror.mirror(mparser, margs)
- # Install patchelf needed for relocate in linux test environment
iparser = argparse.ArgumentParser()
install.setup_parser(iparser)
diff --git a/lib/spack/spack/test/cmd/ci.py b/lib/spack/spack/test/cmd/ci.py
index afa7c7fc07..7e61a885ff 100644
--- a/lib/spack/spack/test/cmd/ci.py
+++ b/lib/spack/spack/test/cmd/ci.py
@@ -527,14 +527,10 @@ spack:
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
- contents = f.read()
- print('generated contents: ')
- print(contents)
- yaml_contents = syaml.load(contents)
- for ci_key in yaml_contents.keys():
- if 'externaltool' in ci_key:
- print('Erroneously staged "externaltool" pkg')
- assert(False)
+ yaml_contents = syaml.load(f)
+
+ # Check that the "externaltool" package was not erroneously staged
+ assert not any('externaltool' in key for key in yaml_contents)
def test_ci_generate_debug_with_custom_spack(tmpdir, mutable_mock_env_path,
diff --git a/lib/spack/spack/test/cmd/compiler.py b/lib/spack/spack/test/cmd/compiler.py
index 61c67ccecd..c5c354221e 100644
--- a/lib/spack/spack/test/cmd/compiler.py
+++ b/lib/spack/spack/test/cmd/compiler.py
@@ -250,3 +250,35 @@ def test_compiler_find_prefer_no_suffix(
assert clang['paths']['cc'] == str(clangdir.join('clang'))
assert clang['paths']['cxx'] == str(clangdir.join('clang++'))
+
+
+def test_compiler_find_path_order(
+ no_compilers_yaml, working_env, clangdir):
+ """Ensure that we find compilers that come first in the PATH first
+ """
+
+ with clangdir.as_cwd():
+ os.mkdir('first_in_path')
+ shutil.copy('gcc-8', 'first_in_path/gcc-8')
+ shutil.copy('g++-8', 'first_in_path/g++-8')
+ shutil.copy('gfortran-8', 'first_in_path/gfortran-8')
+
+ # the first_in_path folder should be searched first
+ os.environ['PATH'] = '{0}:{1}'.format(
+ str(clangdir.join("first_in_path")),
+ str(clangdir),
+ )
+
+ compiler('find', '--scope=site')
+
+ config = spack.compilers.get_compiler_config('site', False)
+
+ gcc = next(c['compiler'] for c in config
+ if c['compiler']['spec'] == 'gcc@8.4.0')
+
+ assert gcc['paths'] == {
+ 'cc': str(clangdir.join('first_in_path', 'gcc-8')),
+ 'cxx': str(clangdir.join('first_in_path', 'g++-8')),
+ 'f77': str(clangdir.join('first_in_path', 'gfortran-8')),
+ 'fc': str(clangdir.join('first_in_path', 'gfortran-8')),
+ }
diff --git a/lib/spack/spack/test/cmd/config.py b/lib/spack/spack/test/cmd/config.py
index 6dbf50676d..524636fed6 100644
--- a/lib/spack/spack/test/cmd/config.py
+++ b/lib/spack/spack/test/cmd/config.py
@@ -2,17 +2,40 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-import pytest
import os
-from llnl.util.filesystem import mkdirp
+import pytest
+import llnl.util.filesystem as fs
import spack.config
import spack.environment as ev
-from spack.main import SpackCommand
-
-config = SpackCommand('config')
-env = SpackCommand('env')
+import spack.main
+import spack.util.spack_yaml as syaml
+
+config = spack.main.SpackCommand('config')
+env = spack.main.SpackCommand('env')
+
+
+@pytest.fixture()
+def packages_yaml_v015(mutable_config):
+ """Create a packages.yaml in the old format"""
+ def _create(scope=None):
+ old_data = {
+ 'packages': {
+ 'cmake': {
+ 'paths': {'cmake@3.14.0': '/usr'}
+ },
+ 'gcc': {
+ 'modules': {'gcc@8.3.0': 'gcc-8'}
+ }
+ }
+ }
+ scope = scope or spack.config.default_modify_scope()
+ cfg_file = spack.config.config.get_config_filename(scope, 'packages')
+ with open(cfg_file, 'w') as f:
+ syaml.dump(old_data, stream=f)
+ return cfg_file
+ return _create
def test_get_config_scope(mock_low_high_config):
@@ -23,8 +46,8 @@ def test_get_config_scope_merged(mock_low_high_config):
low_path = mock_low_high_config.scopes['low'].path
high_path = mock_low_high_config.scopes['high'].path
- mkdirp(low_path)
- mkdirp(high_path)
+ fs.mkdirp(low_path)
+ fs.mkdirp(high_path)
with open(os.path.join(low_path, 'repos.yaml'), 'w') as f:
f.write('''\
@@ -403,3 +426,104 @@ def test_config_remove_from_env(mutable_empty_config, mutable_mock_env_path):
"""
assert output == expected
+
+
+def test_config_update_packages(packages_yaml_v015):
+ """Test Spack updating old packages.yaml format for externals
+ to new format. Ensure that data is preserved and converted
+ properly.
+ """
+ packages_yaml_v015()
+ config('update', '-y', 'packages')
+
+ # Check the entries have been transformed
+ data = spack.config.get('packages')
+ check_update(data)
+
+
+def test_config_update_not_needed(mutable_config):
+ data_before = spack.config.get('repos')
+ config('update', '-y', 'repos')
+ data_after = spack.config.get('repos')
+ assert data_before == data_after
+
+
+def test_config_update_fail_on_permission_issue(
+ packages_yaml_v015, monkeypatch
+):
+ # The first time it will update and create the backup file
+ packages_yaml_v015()
+ # Mock a global scope where we cannot write
+ monkeypatch.setattr(
+ spack.cmd.config, '_can_update_config_file', lambda x, y: False
+ )
+ with pytest.raises(spack.main.SpackCommandError):
+ config('update', '-y', 'packages')
+
+
+def test_config_revert(packages_yaml_v015):
+ cfg_file = packages_yaml_v015()
+ bkp_file = cfg_file + '.bkp'
+
+ config('update', '-y', 'packages')
+
+ # Check that the backup file exists, compute its md5 sum
+ assert os.path.exists(bkp_file)
+ md5bkp = fs.md5sum(bkp_file)
+
+ config('revert', '-y', 'packages')
+
+ # Check that the backup file does not exist anymore and
+ # that the md5 sum of the configuration file is the same
+ # as that of the old backup file
+ assert not os.path.exists(bkp_file)
+ assert md5bkp == fs.md5sum(cfg_file)
+
+
+def test_config_revert_raise_if_cant_write(packages_yaml_v015, monkeypatch):
+ packages_yaml_v015()
+ config('update', '-y', 'packages')
+
+ # Mock a global scope where we cannot write
+ monkeypatch.setattr(
+ spack.cmd.config, '_can_revert_update', lambda x, y, z: False
+ )
+ # The command raises with an helpful error if a configuration
+ # file is to be deleted and we don't have sufficient permissions
+ with pytest.raises(spack.main.SpackCommandError):
+ config('revert', '-y', 'packages')
+
+
+def test_updating_config_implicitly_raises(packages_yaml_v015):
+ # Trying to write implicitly to a scope with a configuration file
+ # in the old format raises an exception
+ packages_yaml_v015()
+ with pytest.raises(RuntimeError):
+ config('add', 'packages:cmake:buildable:false')
+
+
+def test_updating_multiple_scopes_at_once(packages_yaml_v015):
+ # Create 2 config files in the old format
+ packages_yaml_v015(scope='user')
+ packages_yaml_v015(scope='site')
+
+ # Update both of them at once
+ config('update', '-y', 'packages')
+
+ for scope in ('user', 'site'):
+ data = spack.config.get('packages', scope=scope)
+ check_update(data)
+
+
+def check_update(data):
+ """Check that the data from the packages_yaml_v015
+ has been updated.
+ """
+ assert 'externals' in data['cmake']
+ externals = data['cmake']['externals']
+ assert {'spec': 'cmake@3.14.0', 'prefix': '/usr'} in externals
+ assert 'paths' not in data['cmake']
+ assert 'externals' in data['gcc']
+ externals = data['gcc']['externals']
+ assert {'spec': 'gcc@8.3.0', 'modules': ['gcc-8']} in externals
+ assert 'modules' not in data['gcc']
diff --git a/lib/spack/spack/test/cmd/env.py b/lib/spack/spack/test/cmd/env.py
index 87f7a58667..955693ca0f 100644
--- a/lib/spack/spack/test/cmd/env.py
+++ b/lib/spack/spack/test/cmd/env.py
@@ -448,8 +448,9 @@ env:
external_config = StringIO("""\
packages:
a:
- paths:
- a: {a_prefix}
+ externals:
+ - spec: a
+ prefix: {a_prefix}
buildable: false
""".format(a_prefix=str(fake_prefix)))
external_config_dict = spack.util.spack_yaml.load_config(external_config)
@@ -2041,3 +2042,73 @@ def test_env_write_only_non_default():
yaml = f.read()
assert yaml == ev.default_manifest_yaml
+
+
+@pytest.fixture
+def packages_yaml_v015(tmpdir):
+ """Return the path to an existing manifest in the v0.15.x format
+ and the path to a non yet existing backup file.
+ """
+ raw_yaml = """
+spack:
+ specs:
+ - mpich
+ packages:
+ cmake:
+ paths:
+ cmake@3.17.3: /usr
+"""
+ manifest = tmpdir.ensure('spack.yaml')
+ backup_file = tmpdir.join('spack.yaml.bkp')
+ manifest.write(raw_yaml)
+ return manifest, backup_file
+
+
+def test_update_anonymous_env(packages_yaml_v015):
+ manifest, backup_file = packages_yaml_v015
+ env('update', '-y', str(manifest.dirname))
+
+ # The environment is now at the latest format
+ assert ev.is_latest_format(str(manifest))
+ # A backup file has been created and it's not at the latest format
+ assert os.path.exists(str(backup_file))
+ assert not ev.is_latest_format(str(backup_file))
+
+
+def test_double_update(packages_yaml_v015):
+ manifest, backup_file = packages_yaml_v015
+
+ # Update the environment
+ env('update', '-y', str(manifest.dirname))
+ # Try to read the environment (it should not error)
+ ev.create('test', str(manifest))
+ # Updating again does nothing since the manifest is up-to-date
+ env('update', '-y', str(manifest.dirname))
+
+ # The environment is at the latest format
+ assert ev.is_latest_format(str(manifest))
+ # A backup file has been created and it's not at the latest format
+ assert os.path.exists(str(backup_file))
+ assert not ev.is_latest_format(str(backup_file))
+
+
+def test_update_and_revert(packages_yaml_v015):
+ manifest, backup_file = packages_yaml_v015
+
+ # Update the environment
+ env('update', '-y', str(manifest.dirname))
+ assert os.path.exists(str(backup_file))
+ assert not ev.is_latest_format(str(backup_file))
+ assert ev.is_latest_format(str(manifest))
+
+ # Revert to previous state
+ env('revert', '-y', str(manifest.dirname))
+ assert not os.path.exists(str(backup_file))
+ assert not ev.is_latest_format(str(manifest))
+
+
+def test_old_format_cant_be_updated_implicitly(packages_yaml_v015):
+ manifest, backup_file = packages_yaml_v015
+ env('activate', str(manifest.dirname))
+ with pytest.raises(spack.main.SpackCommandError):
+ add('hdf5')
diff --git a/lib/spack/spack/test/cmd/external.py b/lib/spack/spack/test/cmd/external.py
index 0bdf67fe3e..547d20de24 100644
--- a/lib/spack/spack/test/cmd/external.py
+++ b/lib/spack/spack/test/cmd/external.py
@@ -2,10 +2,8 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-
-import pytest
import os
-import stat
+import os.path
import spack
from spack.spec import Spec
@@ -13,30 +11,10 @@ from spack.cmd.external import ExternalPackageEntry
from spack.main import SpackCommand
-@pytest.fixture()
-def create_exe(tmpdir_factory):
- def _create_exe(exe_name, content):
- base_prefix = tmpdir_factory.mktemp('base-prefix')
- base_prefix.ensure('bin', dir=True)
- exe_path = str(base_prefix.join('bin', exe_name))
- with open(exe_path, 'w') as f:
- f.write("""\
-#!/bin/bash
-
-echo "{0}"
-""".format(content))
-
- st = os.stat(exe_path)
- os.chmod(exe_path, st.st_mode | stat.S_IEXEC)
- return exe_path
-
- yield _create_exe
-
-
-def test_find_external_single_package(create_exe):
+def test_find_external_single_package(mock_executable):
pkgs_to_check = [spack.repo.get('cmake')]
- cmake_path = create_exe("cmake", "cmake version 1.foo")
+ cmake_path = mock_executable("cmake", output='echo "cmake version 1.foo"')
system_path_to_exe = {cmake_path: 'cmake'}
pkg_to_entries = spack.cmd.external._get_external_packages(
@@ -48,12 +26,16 @@ def test_find_external_single_package(create_exe):
assert single_entry.spec == Spec('cmake@1.foo')
-def test_find_external_two_instances_same_package(create_exe):
+def test_find_external_two_instances_same_package(mock_executable):
pkgs_to_check = [spack.repo.get('cmake')]
# Each of these cmake instances is created in a different prefix
- cmake_path1 = create_exe("cmake", "cmake version 1.foo")
- cmake_path2 = create_exe("cmake", "cmake version 3.17.2")
+ cmake_path1 = mock_executable(
+ "cmake", output='echo "cmake version 1.foo"', subdir=('base1', 'bin')
+ )
+ cmake_path2 = mock_executable(
+ "cmake", output='echo "cmake version 3.17.2"', subdir=('base2', 'bin')
+ )
system_path_to_exe = {
cmake_path1: 'cmake',
cmake_path2: 'cmake'}
@@ -70,25 +52,24 @@ def test_find_external_two_instances_same_package(create_exe):
def test_find_external_update_config(mutable_config):
- pkg_to_entries = {
- 'cmake': [
- ExternalPackageEntry(Spec('cmake@1.foo'), '/x/y1/'),
- ExternalPackageEntry(Spec('cmake@3.17.2'), '/x/y2/'),
- ]
- }
+ entries = [
+ ExternalPackageEntry(Spec.from_detection('cmake@1.foo'), '/x/y1/'),
+ ExternalPackageEntry(Spec.from_detection('cmake@3.17.2'), '/x/y2/'),
+ ]
+ pkg_to_entries = {'cmake': entries}
spack.cmd.external._update_pkg_config(pkg_to_entries, False)
pkgs_cfg = spack.config.get('packages')
cmake_cfg = pkgs_cfg['cmake']
- cmake_paths_cfg = cmake_cfg['paths']
+ cmake_externals = cmake_cfg['externals']
- assert cmake_paths_cfg['cmake@1.foo'] == '/x/y1/'
- assert cmake_paths_cfg['cmake@3.17.2'] == '/x/y2/'
+ assert {'spec': 'cmake@1.foo', 'prefix': '/x/y1/'} in cmake_externals
+ assert {'spec': 'cmake@3.17.2', 'prefix': '/x/y2/'} in cmake_externals
-def test_get_executables(working_env, create_exe):
- cmake_path1 = create_exe("cmake", "cmake version 1.foo")
+def test_get_executables(working_env, mock_executable):
+ cmake_path1 = mock_executable("cmake", output="echo cmake version 1.foo")
os.environ['PATH'] = ':'.join([os.path.dirname(cmake_path1)])
path_to_exe = spack.cmd.external._get_system_executables()
@@ -98,29 +79,30 @@ def test_get_executables(working_env, create_exe):
external = SpackCommand('external')
-def test_find_external_cmd(mutable_config, working_env, create_exe):
+def test_find_external_cmd(mutable_config, working_env, mock_executable):
"""Test invoking 'spack external find' with additional package arguments,
which restricts the set of packages that Spack looks for.
"""
- cmake_path1 = create_exe("cmake", "cmake version 1.foo")
+ cmake_path1 = mock_executable("cmake", output="echo cmake version 1.foo")
+ prefix = os.path.dirname(os.path.dirname(cmake_path1))
os.environ['PATH'] = ':'.join([os.path.dirname(cmake_path1)])
external('find', 'cmake')
pkgs_cfg = spack.config.get('packages')
cmake_cfg = pkgs_cfg['cmake']
- cmake_paths_cfg = cmake_cfg['paths']
+ cmake_externals = cmake_cfg['externals']
- assert 'cmake@1.foo' in cmake_paths_cfg
+ assert {'spec': 'cmake@1.foo', 'prefix': prefix} in cmake_externals
def test_find_external_cmd_not_buildable(
- mutable_config, working_env, create_exe):
+ mutable_config, working_env, mock_executable):
"""When the user invokes 'spack external find --not-buildable', the config
for any package where Spack finds an external version should be marked as
not buildable.
"""
- cmake_path1 = create_exe("cmake", "cmake version 1.foo")
+ cmake_path1 = mock_executable("cmake", output="echo cmake version 1.foo")
os.environ['PATH'] = ':'.join([os.path.dirname(cmake_path1)])
external('find', '--not-buildable', 'cmake')
pkgs_cfg = spack.config.get('packages')
@@ -128,22 +110,24 @@ def test_find_external_cmd_not_buildable(
def test_find_external_cmd_full_repo(
- mutable_config, working_env, create_exe, mutable_mock_repo):
+ mutable_config, working_env, mock_executable, mutable_mock_repo):
"""Test invoking 'spack external find' with no additional arguments, which
iterates through each package in the repository.
"""
- exe_path1 = create_exe(
- "find-externals1-exe", "find-externals1 version 1.foo")
+ exe_path1 = mock_executable(
+ "find-externals1-exe", output="echo find-externals1 version 1.foo"
+ )
+ prefix = os.path.dirname(os.path.dirname(exe_path1))
os.environ['PATH'] = ':'.join([os.path.dirname(exe_path1)])
external('find')
pkgs_cfg = spack.config.get('packages')
pkg_cfg = pkgs_cfg['find-externals1']
- pkg_paths_cfg = pkg_cfg['paths']
+ pkg_externals = pkg_cfg['externals']
- assert 'find-externals1@1.foo' in pkg_paths_cfg
+ assert {'spec': 'find-externals1@1.foo', 'prefix': prefix} in pkg_externals
def test_find_external_merge(mutable_config, mutable_mock_repo):
@@ -152,26 +136,89 @@ def test_find_external_merge(mutable_config, mutable_mock_repo):
"""
pkgs_cfg_init = {
'find-externals1': {
- 'paths': {
- 'find-externals1@1.1': '/preexisting-prefix/'
- },
+ 'externals': [{
+ 'spec': 'find-externals1@1.1',
+ 'prefix': '/preexisting-prefix/'
+ }],
'buildable': False
}
}
mutable_config.update_config('packages', pkgs_cfg_init)
-
- pkg_to_entries = {
- 'find-externals1': [
- ExternalPackageEntry(Spec('find-externals1@1.1'), '/x/y1/'),
- ExternalPackageEntry(Spec('find-externals1@1.2'), '/x/y2/'),
- ]
- }
+ entries = [
+ ExternalPackageEntry(
+ Spec.from_detection('find-externals1@1.1'), '/x/y1/'
+ ),
+ ExternalPackageEntry(
+ Spec.from_detection('find-externals1@1.2'), '/x/y2/'
+ )
+ ]
+ pkg_to_entries = {'find-externals1': entries}
spack.cmd.external._update_pkg_config(pkg_to_entries, False)
pkgs_cfg = spack.config.get('packages')
pkg_cfg = pkgs_cfg['find-externals1']
- pkg_paths_cfg = pkg_cfg['paths']
+ pkg_externals = pkg_cfg['externals']
+
+ assert {'spec': 'find-externals1@1.1',
+ 'prefix': '/preexisting-prefix/'} in pkg_externals
+ assert {'spec': 'find-externals1@1.2',
+ 'prefix': '/x/y2/'} in pkg_externals
+
+
+def test_list_detectable_packages(mutable_config, mutable_mock_repo):
+ external("list")
+ assert external.returncode == 0
+
+
+def test_packages_yaml_format(mock_executable, mutable_config, monkeypatch):
+ # Prepare an environment to detect a fake gcc
+ gcc_exe = mock_executable('gcc', output="echo 4.2.1")
+ prefix = os.path.dirname(gcc_exe)
+ monkeypatch.setenv('PATH', prefix)
+
+ # Find the external spec
+ external('find', 'gcc')
+
+ # Check entries in 'packages.yaml'
+ packages_yaml = spack.config.get('packages')
+ assert 'gcc' in packages_yaml
+ assert 'externals' in packages_yaml['gcc']
+ externals = packages_yaml['gcc']['externals']
+ assert len(externals) == 1
+ external_gcc = externals[0]
+ assert external_gcc['spec'] == 'gcc@4.2.1 languages=c'
+ assert external_gcc['prefix'] == os.path.dirname(prefix)
+ assert 'extra_attributes' in external_gcc
+ extra_attributes = external_gcc['extra_attributes']
+ assert 'prefix' not in extra_attributes
+ assert extra_attributes['compilers']['c'] == gcc_exe
+
+
+def test_overriding_prefix(mock_executable, mutable_config, monkeypatch):
+ # Prepare an environment to detect a fake gcc that
+ # override its external prefix
+ gcc_exe = mock_executable('gcc', output="echo 4.2.1")
+ prefix = os.path.dirname(gcc_exe)
+ monkeypatch.setenv('PATH', prefix)
+
+ @classmethod
+ def _determine_variants(cls, exes, version_str):
+ return 'languages=c', {
+ 'prefix': '/opt/gcc/bin',
+ 'compilers': {'c': exes[0]}
+ }
+
+ gcc_cls = spack.repo.path.get_pkg_class('gcc')
+ monkeypatch.setattr(gcc_cls, 'determine_variants', _determine_variants)
+
+ # Find the external spec
+ external('find', 'gcc')
- assert pkg_paths_cfg['find-externals1@1.1'] == '/preexisting-prefix/'
- assert pkg_paths_cfg['find-externals1@1.2'] == '/x/y2/'
+ # Check entries in 'packages.yaml'
+ packages_yaml = spack.config.get('packages')
+ assert 'gcc' in packages_yaml
+ assert 'externals' in packages_yaml['gcc']
+ externals = packages_yaml['gcc']['externals']
+ assert len(externals) == 1
+ assert externals[0]['prefix'] == '/opt/gcc/bin'
diff --git a/lib/spack/spack/test/compilers/detection.py b/lib/spack/spack/test/compilers/detection.py
index f36e15804d..634189d02a 100644
--- a/lib/spack/spack/test/compilers/detection.py
+++ b/lib/spack/spack/test/compilers/detection.py
@@ -104,19 +104,19 @@ def test_clang_version_detection(version_str, expected_version):
@pytest.mark.parametrize('version_str,expected_version', [
# C compiler
- ('fcc (FCC) 4.0.0 20190314\n'
+ ('fcc (FCC) 4.0.0a 20190314\n'
'simulating gcc version 6.1\n'
'Copyright FUJITSU LIMITED 2019',
- '4.0.0'),
+ '4.0.0a'),
# C++ compiler
- ('FCC (FCC) 4.0.0 20190314\n'
+ ('FCC (FCC) 4.0.0a 20190314\n'
'simulating gcc version 6.1\n'
'Copyright FUJITSU LIMITED 2019',
- '4.0.0'),
+ '4.0.0a'),
# Fortran compiler
- ('frt (FRT) 4.0.0 20190314\n'
+ ('frt (FRT) 4.0.0a 20190314\n'
'Copyright FUJITSU LIMITED 2019',
- '4.0.0')
+ '4.0.0a')
])
def test_fj_version_detection(version_str, expected_version):
version = spack.compilers.fj.Fj.extract_version_from_output(version_str)
diff --git a/lib/spack/spack/test/concretize.py b/lib/spack/spack/test/concretize.py
index cfe0748c4b..a7a89cc78d 100644
--- a/lib/spack/spack/test/concretize.py
+++ b/lib/spack/spack/test/concretize.py
@@ -373,7 +373,7 @@ class TestConcretize(object):
spec = Spec('externalmodule')
spec.concretize()
- assert spec['externalmodule'].external_module == 'external-module'
+ assert spec['externalmodule'].external_modules == ['external-module']
assert 'externalprereq' not in spec
assert spec['externalmodule'].compiler.satisfies('gcc')
diff --git a/lib/spack/spack/test/concretize_preferences.py b/lib/spack/spack/test/concretize_preferences.py
index df46ed9fe8..c941ccad94 100644
--- a/lib/spack/spack/test/concretize_preferences.py
+++ b/lib/spack/spack/test/concretize_preferences.py
@@ -198,8 +198,9 @@ all:
mpi: [mpich]
mpich:
buildable: false
- paths:
- mpich@3.0.4: /dummy/path
+ externals:
+ - spec: mpich@3.0.4
+ prefix: /dummy/path
""")
spack.config.set('packages', conf, scope='concretize')
@@ -229,8 +230,9 @@ all:
mpi: [mpich]
mpi:
buildable: false
- modules:
- mpich@3.0.4: dummy
+ externals:
+ - spec: mpich@3.0.4
+ modules: [dummy]
""")
spack.config.set('packages', conf, scope='concretize')
diff --git a/lib/spack/spack/test/conftest.py b/lib/spack/spack/test/conftest.py
index 0c5689ee53..0e97b51cde 100644
--- a/lib/spack/spack/test/conftest.py
+++ b/lib/spack/spack/test/conftest.py
@@ -287,6 +287,10 @@ def _skip_if_missing_executables(request):
# FIXME: there's some weird interaction with compilers during concretization.
spack.architecture.real_platform = spack.architecture.platform
spack.architecture.platform = lambda: spack.platforms.test.Test()
+# FIXME: Since we change the architecture above, we have to (re)initialize
+# FIXME: the config singleton. If it gets initialized too early with the
+# FIXME: actual architecture, tests will fail.
+spack.config.config = spack.config._config()
#
diff --git a/lib/spack/spack/test/data/config/packages.yaml b/lib/spack/spack/test/data/config/packages.yaml
index 63e63e525d..c2e8d558b3 100644
--- a/lib/spack/spack/test/data/config/packages.yaml
+++ b/lib/spack/spack/test/data/config/packages.yaml
@@ -4,15 +4,21 @@ packages:
mpi: [openmpi, mpich]
externaltool:
buildable: False
- paths:
- externaltool@1.0%gcc@4.5.0: /path/to/external_tool
- externaltool@0.9%gcc@4.5.0: /usr
+ externals:
+ - spec: externaltool@1.0%gcc@4.5.0
+ prefix: /path/to/external_tool
+ - spec: externaltool@0.9%gcc@4.5.0
+ prefix: /usr
externalvirtual:
buildable: False
- paths:
- externalvirtual@2.0%clang@3.3: /path/to/external_virtual_clang
- externalvirtual@1.0%gcc@4.5.0: /path/to/external_virtual_gcc
+ externals:
+ - spec: externalvirtual@2.0%clang@3.3
+ prefix: /path/to/external_virtual_clang
+ - spec: externalvirtual@1.0%gcc@4.5.0
+ prefix: /path/to/external_virtual_gcc
externalmodule:
buildable: False
- modules:
- externalmodule@1.0%gcc@4.5.0: external-module
+ externals:
+ - spec: externalmodule@1.0%gcc@4.5.0
+ modules:
+ - external-module
diff --git a/lib/spack/spack/test/data/targets/bgq-rhel6-power7 b/lib/spack/spack/test/data/targets/bgq-rhel6-power7
deleted file mode 100644
index 39da8a49fe..0000000000
--- a/lib/spack/spack/test/data/targets/bgq-rhel6-power7
+++ /dev/null
@@ -1,4 +0,0 @@
-processor : 0
-cpu : POWER7 (architected), altivec supported
-clock : 3720.000000MHz
-revision : 2.1 (pvr 003f 0201)
diff --git a/lib/spack/spack/test/database.py b/lib/spack/spack/test/database.py
index 63276c0e7b..a161a22908 100644
--- a/lib/spack/spack/test/database.py
+++ b/lib/spack/spack/test/database.py
@@ -689,17 +689,17 @@ def test_115_reindex_with_packages_not_in_repo(mutable_database):
def test_external_entries_in_db(mutable_database):
rec = mutable_database.get_record('mpileaks ^zmpi')
assert rec.spec.external_path is None
- assert rec.spec.external_module is None
+ assert not rec.spec.external_modules
rec = mutable_database.get_record('externaltool')
assert rec.spec.external_path == '/path/to/external_tool'
- assert rec.spec.external_module is None
+ assert not rec.spec.external_modules
assert rec.explicit is False
rec.spec.package.do_install(fake=True, explicit=True)
rec = mutable_database.get_record('externaltool')
assert rec.spec.external_path == '/path/to/external_tool'
- assert rec.spec.external_module is None
+ assert not rec.spec.external_modules
assert rec.explicit is True
diff --git a/lib/spack/spack/test/installer.py b/lib/spack/spack/test/installer.py
index 68b70e0840..0b3f409641 100644
--- a/lib/spack/spack/test/installer.py
+++ b/lib/spack/spack/test/installer.py
@@ -157,11 +157,11 @@ def test_process_external_package_module(install_mockery, monkeypatch, capfd):
monkeypatch.setattr(spack.database.Database, 'get_record', _none)
spec.external_path = '/actual/external/path/not/checked'
- spec.external_module = 'unchecked_module'
+ spec.external_modules = ['unchecked_module']
inst._process_external_package(spec.package, False)
out = capfd.readouterr()[0]
- assert 'has external module in {0}'.format(spec.external_module) in out
+ assert 'has external module in {0}'.format(spec.external_modules) in out
def test_process_binary_cache_tarball_none(install_mockery, monkeypatch,
@@ -257,15 +257,15 @@ def test_installer_ensure_ready_errors(install_mockery):
fmt = r'cannot be installed locally.*{0}'
# Force an external package error
- path, module = spec.external_path, spec.external_module
+ path, modules = spec.external_path, spec.external_modules
spec.external_path = '/actual/external/path/not/checked'
- spec.external_module = 'unchecked_module'
+ spec.external_modules = ['unchecked_module']
msg = fmt.format('is external')
with pytest.raises(inst.ExternalPackageError, match=msg):
installer._ensure_install_ready(spec.package)
# Force an upstream package error
- spec.external_path, spec.external_module = path, module
+ spec.external_path, spec.external_modules = path, modules
spec.package._installed_upstream = True
msg = fmt.format('is upstream')
with pytest.raises(inst.UpstreamPackageError, match=msg):
diff --git a/lib/spack/spack/test/llnl/util/cpu.py b/lib/spack/spack/test/llnl/util/cpu.py
index 319d9e684e..db2beadaf1 100644
--- a/lib/spack/spack/test/llnl/util/cpu.py
+++ b/lib/spack/spack/test/llnl/util/cpu.py
@@ -37,7 +37,6 @@ from llnl.util.cpu import Microarchitecture # noqa
'darwin-mojave-ivybridge',
'darwin-mojave-haswell',
'darwin-mojave-skylake',
- 'bgq-rhel6-power7'
])
def expected_target(request, monkeypatch):
cpu = llnl.util.cpu
@@ -49,7 +48,7 @@ def expected_target(request, monkeypatch):
)
# Monkeypatch for linux
- if platform in ('linux', 'bgq'):
+ if platform == 'linux':
monkeypatch.setattr(cpu.detect.platform, 'system', lambda: 'Linux')
@contextlib.contextmanager
diff --git a/lib/spack/spack/test/llnl/util/tty/log.py b/lib/spack/spack/test/llnl/util/tty/log.py
index fdf77e6987..39e65f0388 100644
--- a/lib/spack/spack/test/llnl/util/tty/log.py
+++ b/lib/spack/spack/test/llnl/util/tty/log.py
@@ -111,7 +111,7 @@ def test_log_subproc_and_echo_output_capfd(capfd, tmpdir):
# Tests below use a pseudoterminal to test llnl.util.tty.log
#
def simple_logger(**kwargs):
- """Mock logger (child) process for testing log.keyboard_input."""
+ """Mock logger (minion) process for testing log.keyboard_input."""
def handler(signum, frame):
running[0] = False
signal.signal(signal.SIGUSR1, handler)
@@ -125,7 +125,7 @@ def simple_logger(**kwargs):
def mock_shell_fg(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.fg()
ctl.status()
ctl.wait_enabled()
@@ -134,7 +134,7 @@ def mock_shell_fg(proc, ctl, **kwargs):
def mock_shell_fg_no_termios(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.fg()
ctl.status()
ctl.wait_disabled_fg()
@@ -143,7 +143,7 @@ def mock_shell_fg_no_termios(proc, ctl, **kwargs):
def mock_shell_bg(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.bg()
ctl.status()
ctl.wait_disabled()
@@ -152,7 +152,7 @@ def mock_shell_bg(proc, ctl, **kwargs):
def mock_shell_tstp_cont(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.tstp()
ctl.wait_stopped()
@@ -163,7 +163,7 @@ def mock_shell_tstp_cont(proc, ctl, **kwargs):
def mock_shell_tstp_tstp_cont(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.tstp()
ctl.wait_stopped()
@@ -177,7 +177,7 @@ def mock_shell_tstp_tstp_cont(proc, ctl, **kwargs):
def mock_shell_tstp_tstp_cont_cont(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.tstp()
ctl.wait_stopped()
@@ -194,7 +194,7 @@ def mock_shell_tstp_tstp_cont_cont(proc, ctl, **kwargs):
def mock_shell_bg_fg(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.bg()
ctl.status()
ctl.wait_disabled()
@@ -207,7 +207,7 @@ def mock_shell_bg_fg(proc, ctl, **kwargs):
def mock_shell_bg_fg_no_termios(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.bg()
ctl.status()
ctl.wait_disabled()
@@ -220,7 +220,7 @@ def mock_shell_bg_fg_no_termios(proc, ctl, **kwargs):
def mock_shell_fg_bg(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.fg()
ctl.status()
ctl.wait_enabled()
@@ -233,7 +233,7 @@ def mock_shell_fg_bg(proc, ctl, **kwargs):
def mock_shell_fg_bg_no_termios(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.fg()
ctl.status()
ctl.wait_disabled_fg()
@@ -299,7 +299,7 @@ def test_foreground_background(test_fn, termios_on_or_off, tmpdir):
def synchronized_logger(**kwargs):
- """Mock logger (child) process for testing log.keyboard_input.
+ """Mock logger (minion) process for testing log.keyboard_input.
This logger synchronizes with the parent process to test that 'v' can
toggle output. It is used in ``test_foreground_background_output`` below.
@@ -330,7 +330,7 @@ def synchronized_logger(**kwargs):
def mock_shell_v_v(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background_output."""
+ """Controller function for test_foreground_background_output."""
write_lock = kwargs["write_lock"]
v_lock = kwargs["v_lock"]
@@ -357,7 +357,7 @@ def mock_shell_v_v(proc, ctl, **kwargs):
def mock_shell_v_v_no_termios(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background_output."""
+ """Controller function for test_foreground_background_output."""
write_lock = kwargs["write_lock"]
v_lock = kwargs["v_lock"]
@@ -399,9 +399,9 @@ def test_foreground_background_output(
shell = PseudoShell(test_fn, synchronized_logger)
log_path = str(tmpdir.join("log.txt"))
- # Locks for synchronizing with child
- write_lock = multiprocessing.Lock() # must be held by child to write
- v_lock = multiprocessing.Lock() # held while master is in v mode
+ # Locks for synchronizing with minion
+ write_lock = multiprocessing.Lock() # must be held by minion to write
+ v_lock = multiprocessing.Lock() # held while controller is in v mode
with termios_on_or_off():
shell.start(
@@ -427,16 +427,16 @@ def test_foreground_background_output(
with open(log_path) as log:
log = log.read().strip().split("\n")
- # Master and child process coordinate with locks such that the child
+ # Controller and minion process coordinate with locks such that the minion
# writes "off" when echo is off, and "on" when echo is on. The
# output should contain mostly "on" lines, but may contain an "off"
- # or two. This is because the master toggles echo by sending "v" on
- # stdin to the child, but this is not synchronized with our locks.
+ # or two. This is because the controller toggles echo by sending "v" on
+ # stdin to the minion, but this is not synchronized with our locks.
# It's good enough for a test, though. We allow at most 2 "off"'s in
# the output to account for the race.
assert (
['forced output', 'on'] == uniq(output) or
- output.count("off") <= 2 # if master_fd is a bit slow
+ output.count("off") <= 2 # if controller_fd is a bit slow
)
# log should be off for a while, then on, then off
diff --git a/lib/spack/spack/test/module_parsing.py b/lib/spack/spack/test/module_parsing.py
index 0bf485913f..8dc06b058b 100644
--- a/lib/spack/spack/test/module_parsing.py
+++ b/lib/spack/spack/test/module_parsing.py
@@ -9,7 +9,7 @@ import spack
from spack.util.module_cmd import (
module,
- get_path_from_module,
+ path_from_modules,
get_path_args_from_module_line,
get_path_from_module_contents
)
@@ -55,7 +55,7 @@ def test_get_path_from_module_faked(monkeypatch):
return line
monkeypatch.setattr(spack.util.module_cmd, 'module', fake_module)
- path = get_path_from_module('mod')
+ path = path_from_modules(['mod'])
assert path == '/path/to'
@@ -116,10 +116,10 @@ def test_get_argument_from_module_line():
bad_lines = ['prepend_path(PATH,/lib/path)',
'prepend-path (LD_LIBRARY_PATH) /lib/path']
- assert all(get_path_args_from_module_line(l) == ['/lib/path']
- for l in simple_lines)
- assert all(get_path_args_from_module_line(l) == ['/lib/path', '/pkg/path']
- for l in complex_lines)
+ assert all(get_path_args_from_module_line(x) == ['/lib/path']
+ for x in simple_lines)
+ assert all(get_path_args_from_module_line(x) == ['/lib/path', '/pkg/path']
+ for x in complex_lines)
for bl in bad_lines:
with pytest.raises(ValueError):
get_path_args_from_module_line(bl)
diff --git a/lib/spack/spack/test/util/executable.py b/lib/spack/spack/test/util/executable.py
index 5e8795f4bf..ae2859ea4b 100644
--- a/lib/spack/spack/test/util/executable.py
+++ b/lib/spack/spack/test/util/executable.py
@@ -40,6 +40,36 @@ print(u'\\xc3')
assert u'\xc3' == script(output=str).strip()
+def test_which_relative_path_with_slash(tmpdir, working_env):
+ tmpdir.ensure('exe')
+ path = str(tmpdir.join('exe'))
+ os.environ['PATH'] = ''
+
+ with tmpdir.as_cwd():
+ no_exe = ex.which('./exe')
+ assert no_exe is None
+
+ fs.set_executable(path)
+ exe = ex.which('./exe')
+ assert exe.path == path
+
+
+def test_which_with_slash_ignores_path(tmpdir, working_env):
+ tmpdir.ensure('exe')
+ tmpdir.ensure('bin{0}exe'.format(os.path.sep))
+
+ path = str(tmpdir.join('exe'))
+ wrong_path = str(tmpdir.join('bin', 'exe'))
+ os.environ['PATH'] = os.path.dirname(wrong_path)
+
+ fs.set_executable(path)
+ fs.set_executable(wrong_path)
+
+ with tmpdir.as_cwd():
+ exe = ex.which('./exe')
+ assert exe.path == path
+
+
def test_which(tmpdir):
os.environ["PATH"] = str(tmpdir)
assert ex.which("spack-test-exe") is None
diff --git a/lib/spack/spack/util/compression.py b/lib/spack/spack/util/compression.py
index 1688b49f1b..ebbe0519d0 100644
--- a/lib/spack/spack/util/compression.py
+++ b/lib/spack/spack/util/compression.py
@@ -14,7 +14,7 @@ EXTS = ["gz", "bz2", "xz", "Z"]
NOTAR_EXTS = ["zip", "tgz", "tbz2", "txz"]
# Add PRE_EXTS and EXTS last so that .tar.gz is matched *before* .tar or .gz
-ALLOWED_ARCHIVE_TYPES = [".".join(l) for l in product(
+ALLOWED_ARCHIVE_TYPES = [".".join(ext) for ext in product(
PRE_EXTS, EXTS)] + PRE_EXTS + EXTS + NOTAR_EXTS
@@ -36,7 +36,7 @@ def decompressor_for(path, extension=None):
bunzip2 = which('bunzip2', required=True)
return bunzip2
tar = which('tar', required=True)
- tar.add_default_arg('-xf')
+ tar.add_default_arg('-oxf')
return tar
diff --git a/lib/spack/spack/util/crypto.py b/lib/spack/spack/util/crypto.py
index 566e99da21..74a6ee06bd 100644
--- a/lib/spack/spack/util/crypto.py
+++ b/lib/spack/spack/util/crypto.py
@@ -133,7 +133,7 @@ class Checker(object):
@property
def hash_name(self):
"""Get the name of the hash function this Checker is using."""
- return self.hash_fun().name
+ return self.hash_fun().name.lower()
def check(self, filename):
"""Read the file with the specified name and check its checksum
diff --git a/lib/spack/spack/util/executable.py b/lib/spack/spack/util/executable.py
index 28656b0a32..097da3337e 100644
--- a/lib/spack/spack/util/executable.py
+++ b/lib/spack/spack/util/executable.py
@@ -233,10 +233,15 @@ def which_string(*args, **kwargs):
path = path.split(os.pathsep)
for name in args:
- for directory in path:
- exe = os.path.join(directory, name)
+ if os.path.sep in name:
+ exe = os.path.abspath(name)
if os.path.isfile(exe) and os.access(exe, os.X_OK):
return exe
+ else:
+ for directory in path:
+ exe = os.path.join(directory, name)
+ if os.path.isfile(exe) and os.access(exe, os.X_OK):
+ return exe
if required:
raise CommandNotFoundError(
diff --git a/lib/spack/spack/util/module_cmd.py b/lib/spack/spack/util/module_cmd.py
index 7017b2ecb6..bc994fd4b4 100644
--- a/lib/spack/spack/util/module_cmd.py
+++ b/lib/spack/spack/util/module_cmd.py
@@ -135,18 +135,34 @@ def get_path_args_from_module_line(line):
return paths
-def get_path_from_module(mod):
- """Inspects a TCL module for entries that indicate the absolute path
- at which the library supported by said module can be found.
+def path_from_modules(modules):
+ """Inspect a list of TCL modules for entries that indicate the absolute
+ path at which the library supported by said module can be found.
+
+ Args:
+ modules (list): module files to be loaded to get an external package
+
+ Returns:
+ Guess of the prefix path where the package
"""
- # Read the module
- text = module('show', mod).split('\n')
-
- p = get_path_from_module_contents(text, mod)
- if p and not os.path.exists(p):
- tty.warn("Extracted path from module does not exist:"
- "\n\tExtracted path: " + p)
- return p
+ assert isinstance(modules, list), 'the "modules" argument must be a list'
+
+ best_choice = None
+ for module_name in modules:
+ # Read the current module and return a candidate path
+ text = module('show', module_name).split('\n')
+ candidate_path = get_path_from_module_contents(text, module_name)
+
+ if candidate_path and not os.path.exists(candidate_path):
+ msg = ("Extracted path from module does not exist "
+ "[module={0}, path={0}]")
+ tty.warn(msg.format(module_name, candidate_path))
+
+ # If anything is found, then it's the best choice. This means
+ # that we give preference to the last module to be loaded
+ # for packages requiring to load multiple modules in sequence
+ best_choice = candidate_path or best_choice
+ return best_choice
def get_path_from_module_contents(text, module_name):
diff --git a/lib/spack/spack/util/spack_yaml.py b/lib/spack/spack/util/spack_yaml.py
index 46e8e35543..565a9be4ea 100644
--- a/lib/spack/spack/util/spack_yaml.py
+++ b/lib/spack/spack/util/spack_yaml.py
@@ -13,7 +13,7 @@
"""
import ctypes
-
+import collections
from ordereddict_backport import OrderedDict
from six import string_types, StringIO
@@ -332,6 +332,22 @@ def dump_annotated(data, stream=None, *args, **kwargs):
return getvalue()
+def sorted_dict(dict_like):
+ """Return an ordered dict with all the fields sorted recursively.
+
+ Args:
+ dict_like (dict): dictionary to be sorted
+
+ Returns:
+ dictionary sorted recursively
+ """
+ result = syaml_dict(sorted(dict_like.items()))
+ for key, value in result.items():
+ if isinstance(value, collections.Mapping):
+ result[key] = sorted_dict(value)
+ return result
+
+
class SpackYAMLError(spack.error.SpackError):
"""Raised when there are issues with YAML parsing."""
def __init__(self, msg, yaml_error):
diff --git a/lib/spack/spack/version.py b/lib/spack/spack/version.py
index 0c7399c3fb..44e03edb5c 100644
--- a/lib/spack/spack/version.py
+++ b/lib/spack/spack/version.py
@@ -40,7 +40,7 @@ __all__ = ['Version', 'VersionRange', 'VersionList', 'ver']
VALID_VERSION = r'[A-Za-z0-9_.-]'
# Infinity-like versions. The order in the list implies the comparison rules
-infinity_versions = ['develop', 'master', 'head', 'trunk']
+infinity_versions = ['develop', 'main', 'master', 'head', 'trunk']
def int_if_int(string):