summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--etc/spack/defaults/darwin/packages.yaml18
-rw-r--r--etc/spack/defaults/packages.yaml1
-rw-r--r--lib/spack/docs/getting_started.rst67
-rw-r--r--lib/spack/docs/index.rst1
-rw-r--r--lib/spack/docs/module_file_support.rst60
-rw-r--r--lib/spack/docs/module_file_tutorial.rst982
-rw-r--r--lib/spack/docs/packaging_guide.rst4
l---------lib/spack/env/clang/gfortran1
-rw-r--r--lib/spack/external/distro.py1049
-rw-r--r--lib/spack/llnl/util/tty/log.py56
-rw-r--r--lib/spack/spack/build_environment.py21
-rw-r--r--lib/spack/spack/build_systems/autotools.py77
-rw-r--r--lib/spack/spack/cmd/common/arguments.py28
-rw-r--r--lib/spack/spack/cmd/debug.py6
-rw-r--r--lib/spack/spack/cmd/find.py69
-rw-r--r--[-rwxr-xr-x]lib/spack/spack/cmd/flake8.py76
-rw-r--r--lib/spack/spack/cmd/module.py8
-rw-r--r--lib/spack/spack/cmd/spec.py39
-rw-r--r--lib/spack/spack/compilers/__init__.py19
-rw-r--r--lib/spack/spack/compilers/clang.py31
-rw-r--r--lib/spack/spack/concretize.py17
-rw-r--r--lib/spack/spack/fetch_strategy.py112
-rw-r--r--lib/spack/spack/mirror.py20
-rw-r--r--lib/spack/spack/operating_systems/linux_distro.py11
-rw-r--r--lib/spack/spack/package.py25
-rw-r--r--lib/spack/spack/preferred_packages.py6
-rw-r--r--lib/spack/spack/spec.py42
-rw-r--r--lib/spack/spack/stage.py37
-rw-r--r--lib/spack/spack/test/cmd/find.py2
-rw-r--r--lib/spack/spack/test/cmd/module.py2
-rw-r--r--lib/spack/spack/test/spack_yaml.py16
-rw-r--r--lib/spack/spack/util/spack_yaml.py5
-rwxr-xr-xshare/spack/setup-env.sh6
-rw-r--r--var/spack/repos/builtin/packages/cairo/package.py12
-rw-r--r--var/spack/repos/builtin/packages/dealii/package.py28
-rw-r--r--var/spack/repos/builtin/packages/dyninst/package.py35
-rw-r--r--var/spack/repos/builtin/packages/everytrace-example/package.py3
-rw-r--r--var/spack/repos/builtin/packages/everytrace/package.py2
-rw-r--r--var/spack/repos/builtin/packages/fftw/package.py12
-rw-r--r--var/spack/repos/builtin/packages/fftw/pfft-3.3.4.patch865
-rw-r--r--var/spack/repos/builtin/packages/fftw/pfft-3.3.5.patch858
-rw-r--r--var/spack/repos/builtin/packages/fontconfig/package.py12
-rw-r--r--var/spack/repos/builtin/packages/gcc/package.py2
-rw-r--r--var/spack/repos/builtin/packages/graphviz/package.py19
-rw-r--r--var/spack/repos/builtin/packages/libelf/package.py18
-rw-r--r--var/spack/repos/builtin/packages/libiconv/package.py15
-rw-r--r--var/spack/repos/builtin/packages/libsplash/package.py1
-rw-r--r--var/spack/repos/builtin/packages/libtiff/package.py8
-rw-r--r--var/spack/repos/builtin/packages/lzma/package.py10
-rw-r--r--var/spack/repos/builtin/packages/matio/package.py23
-rw-r--r--var/spack/repos/builtin/packages/mpich/package.py54
-rw-r--r--var/spack/repos/builtin/packages/ncl/package.py233
-rw-r--r--var/spack/repos/builtin/packages/ncl/spack_ncl.patch30
-rw-r--r--var/spack/repos/builtin/packages/nmap/package.py37
-rw-r--r--var/spack/repos/builtin/packages/opencoarrays/package.py16
-rw-r--r--var/spack/repos/builtin/packages/pfft/package.py64
-rw-r--r--var/spack/repos/builtin/packages/py-netcdf/package.py1
-rw-r--r--var/spack/repos/builtin/packages/py-pygobject/package.py6
-rw-r--r--var/spack/repos/builtin/packages/py-pygtk/package.py6
-rw-r--r--var/spack/repos/builtin/packages/star-ccm-plus/package.py78
-rw-r--r--var/spack/repos/builtin/packages/tau/package.py7
-rw-r--r--var/spack/repos/builtin/packages/texlive/package.py6
-rw-r--r--var/spack/repos/builtin/packages/trilinos/package.py53
63 files changed, 4914 insertions, 514 deletions
diff --git a/etc/spack/defaults/darwin/packages.yaml b/etc/spack/defaults/darwin/packages.yaml
new file mode 100644
index 0000000000..24a08809db
--- /dev/null
+++ b/etc/spack/defaults/darwin/packages.yaml
@@ -0,0 +1,18 @@
+# -------------------------------------------------------------------------
+# This file controls default concretization preferences for Spack.
+#
+# Settings here are versioned with Spack and are intended to provide
+# sensible defaults out of the box. Spack maintainers should edit this
+# file to keep it current.
+#
+# Users can override these settings by editing the following files.
+#
+# Per-spack-instance settings (overrides defaults):
+# $SPACK_ROOT/etc/spack/packages.yaml
+#
+# Per-user settings (overrides default and site settings):
+# ~/.spack/packages.yaml
+# -------------------------------------------------------------------------
+packages:
+ all:
+ compiler: [clang, gcc, intel]
diff --git a/etc/spack/defaults/packages.yaml b/etc/spack/defaults/packages.yaml
index a6b361d908..eae7752eee 100644
--- a/etc/spack/defaults/packages.yaml
+++ b/etc/spack/defaults/packages.yaml
@@ -15,6 +15,7 @@
# -------------------------------------------------------------------------
packages:
all:
+ compiler: [gcc, intel, pgi, clang, xl, nag]
providers:
mpi: [openmpi, mpich]
blas: [openblas]
diff --git a/lib/spack/docs/getting_started.rst b/lib/spack/docs/getting_started.rst
index 4bc7629a3a..34cbf453e1 100644
--- a/lib/spack/docs/getting_started.rst
+++ b/lib/spack/docs/getting_started.rst
@@ -414,7 +414,17 @@ provides no Fortran compilers. The user is therefore forced to use a
mixed toolchain: XCode-provided Clang for C/C++ and GNU ``gfortran`` for
Fortran.
-In the simplest case, you can just edit ``compilers.yaml``:
+#. You need to make sure that command-line tools are installed. To that
+ end run ``$ xcode-select --install``.
+
+#. Run ``$ spack compiler find`` to locate Clang.
+
+#. There are different ways to get ``gfortran`` on macOS. For example, you can
+ install GCC with Spack (``$ spack install gcc``) or with Homebrew
+ (``$ brew install gcc``).
+
+#. The only thing left to do is to edit ``~/.spack/compilers.yaml`` to provide
+ the path to ``gfortran``:
.. code-block:: yaml
@@ -426,57 +436,10 @@ In the simplest case, you can just edit ``compilers.yaml``:
f77: /path/to/bin/gfortran
fc: /path/to/bin/gfortran
-.. note::
-
- If you are building packages that are sensitive to the compiler's
- name, you may also need to slightly modify a few more files so that
- Spack uses compiler names the build system will recognize.
-
- Following are instructions on how to hack together
- ``clang`` and ``gfortran`` on Macintosh OS X. A similar approach
- should work for other mixed toolchain needs.
-
- Better support for mixed compiler toolchains is planned in forthcoming
- Spack versions.
-
- #. Create a symlink inside ``clang`` environment:
-
- .. code-block:: console
-
- $ cd $SPACK_ROOT/lib/spack/env/clang
- $ ln -s ../cc gfortran
-
-
- #. Patch ``clang`` compiler file:
-
- .. code-block:: diff
-
- $ diff --git a/lib/spack/spack/compilers/clang.py b/lib/spack/spack/compilers/clang.py
- index e406d86..cf8fd01 100644
- --- a/lib/spack/spack/compilers/clang.py
- +++ b/lib/spack/spack/compilers/clang.py
- @@ -35,17 +35,17 @@ class Clang(Compiler):
- cxx_names = ['clang++']
-
- # Subclasses use possible names of Fortran 77 compiler
- - f77_names = []
- + f77_names = ['gfortran']
-
- # Subclasses use possible names of Fortran 90 compiler
- - fc_names = []
- + fc_names = ['gfortran']
-
- # Named wrapper links within spack.build_env_path
- link_paths = { 'cc' : 'clang/clang',
- 'cxx' : 'clang/clang++',
- # Use default wrappers for fortran, in case provided in compilers.yaml
- - 'f77' : 'f77',
- - 'fc' : 'f90' }
- + 'f77' : 'clang/gfortran',
- + 'fc' : 'clang/gfortran' }
-
- @classmethod
- def default_version(self, comp):
+ If you used Spack to install GCC, you can get the installation prefix by
+ ``$ spack location -i gcc`` (this will only work if you have a single version
+ of GCC installed). Whereas for Homebrew, GCC is installed in
+ ``/usr/local/Cellar/gcc/x.y.z``.
^^^^^^^^^^^^^^^^^^^^^
Compiler Verification
diff --git a/lib/spack/docs/index.rst b/lib/spack/docs/index.rst
index 3d1bd86c3e..daaeef1af6 100644
--- a/lib/spack/docs/index.rst
+++ b/lib/spack/docs/index.rst
@@ -46,6 +46,7 @@ or refer to the full manual below.
getting_started
basic_usage
workflows
+ module_file_tutorial
.. toctree::
:maxdepth: 2
diff --git a/lib/spack/docs/module_file_support.rst b/lib/spack/docs/module_file_support.rst
index 19a8161477..f07bd31c2e 100644
--- a/lib/spack/docs/module_file_support.rst
+++ b/lib/spack/docs/module_file_support.rst
@@ -1,8 +1,8 @@
.. _modules:
-=====================================
+=======
Modules
-=====================================
+=======
The use of module systems to manage user environment in a controlled way
is a common practice at HPC centers that is often embraced also by individual
@@ -24,6 +24,8 @@ directly with automatically generated module files.
we advise you to use either Environment Modules or LMod. See :ref:`InstallEnvironmentModules`
for more details.
+.. _shell_support:
+
-------------
Shell support
-------------
@@ -110,9 +112,9 @@ you could type either of these commands to load the callpath module:
.. _cmd-spack-load:
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^^
``spack load / unload``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^^
Neither of these is particularly pretty, easy to remember, or
easy to type. Luckily, Spack has its own interface for using modules
@@ -161,9 +163,9 @@ want to use a package, you can type unload or unuse similarly:
only available if you have enabled Spack's shell support *and* you
have dotkit or modules installed on your machine.
-^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^
Ambiguous module names
-^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^
If a spec used with load/unload or use/unuse is ambiguous (i.e. more
than one installed package matches it), then Spack will warn you:
@@ -294,23 +296,24 @@ For example, consider the following on one system:
# antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64
module load linux-SuSE11-x86_64/antlr-2.7.7-gcc-5.3.0-bdpl46y
------------------------------
+----------------------------
Auto-generating Module Files
------------------------------
+----------------------------
Module files are generated by post-install hooks after the successful
-installation of a package. They are placed in the following directories
-under the Spack root:
+installation of a package. The following table summarizes the essential
+information associated with the different file formats
+that can be generated by Spack:
- +----------------------------------------+------------------------------------+------------------------+
- | | **Module Files Root directory** | **Compatible systems** |
- +========================================+====================================+========================+
- | **Dotkit Module Files** | share/spack/dotkit | DotKit |
- +----------------------------------------+------------------------------------+------------------------+
- | **Non-Hierarchical TCL Module Files** | share/spack/modules | Env. Modules/LMod |
- +----------------------------------------+------------------------------------+------------------------+
- | **Lua Hierarchical Module Files** | share/spack/lmod | LMod |
- +----------------------------------------+------------------------------------+------------------------+
+ +-----------------------------+--------------------+-------------------------------+----------------------+
+ | | **Hook name** | **Default root directory** | **Compatible tools** |
+ +=============================+====================+===============================+======================+
+ | **Dotkit** | ``dotkit`` | share/spack/dotkit | DotKit |
+ +-----------------------------+--------------------+-------------------------------+----------------------+
+ | **TCL - Non-Hierarchical** | ``tcl`` | share/spack/modules | Env. Modules/LMod |
+ +-----------------------------+--------------------+-------------------------------+----------------------+
+ | **Lua - Hierarchical** | ``lmod`` | share/spack/lmod | LMod |
+ +-----------------------------+--------------------+-------------------------------+----------------------+
Though Spack ships with sensible defaults for the generation of module files,
@@ -324,9 +327,9 @@ The former method fits best cases that are site independent, e.g. injecting vari
from language interpreters into their extensions. The latter instead permits to
fine tune the content, naming and creation of module files to meet site specific conventions.
-^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^
``Package`` file API
-^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^
There are two methods that can be overridden in any ``package.py`` to affect the
content of generated module files. The first one is:
@@ -376,9 +379,9 @@ and has similar effects on module file of dependees. Even in this case
.. _modules-yaml:
-----------------------------------
+---------------------------------
Configuration in ``modules.yaml``
-----------------------------------
+---------------------------------
The name of the configuration file that controls module generation behavior
is ``modules.yaml``. The default configuration:
@@ -426,6 +429,8 @@ All these module sections allow for both:
For the latter point in particular it is possible to use anonymous specs
to select an appropriate set of packages on which the modifications should be applied.
+.. _anonymous_specs:
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Selection by anonymous specs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -544,7 +549,7 @@ most likely via the ``+blas`` variant specification.
modules:
tcl:
- naming_scheme: '${PACKAGE}/${VERSION}-${COMPILERNAME}-${COMPILERVERSION}'
+ naming_scheme: '${PACKAGE}/${VERSION}-${COMPILERNAME}-${COMPILERVER}'
all:
conflict: ['${PACKAGE}', 'intel/14.0.1']
@@ -660,9 +665,9 @@ before regeneration if the change in layout is radical.
.. _cmd-spack-module-rm:
-^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^
``spack module rm``
-^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^
If instead what you need is just to delete a few module files, then the right
command is ``module rm``:
@@ -670,7 +675,8 @@ command is ``module rm``:
.. command-output:: spack module rm --help
.. note::
- We care about your module files! Every modification done on modules
+ We care about your module files!
+ Every modification done on modules
that are already existing will ask for a confirmation by default. If
the command is used in a script it is possible though to pass the
``-y`` argument, that will skip this safety measure.
diff --git a/lib/spack/docs/module_file_tutorial.rst b/lib/spack/docs/module_file_tutorial.rst
new file mode 100644
index 0000000000..3ffca487a0
--- /dev/null
+++ b/lib/spack/docs/module_file_tutorial.rst
@@ -0,0 +1,982 @@
+.. _module_tutorial:
+
+=======
+Modules
+=======
+
+This tutorial will guide you through the customization of both
+content and naming of module files generated by Spack.
+
+Starting from the default Spack settings you will add an increasing
+number of directives to the ``modules.yaml`` configuration file to
+satisfy a number of constraints that mimic those that you may encounter
+in a typical production environment at HPC sites.
+
+Even though the focus will be for the most part on customizing
+TCL non-hierarchical module files, everything
+you'll see applies also to other kinds of module files generated by Spack.
+
+The generation of Lua hierarchical
+module files will be addressed at the end of the tutorial,
+and you'll see that with minor modifications
+to an existing ``modules.yaml`` written for TCL
+non-hierarchical modules you'll get almost
+for free the possibility to try a hierarchical layout.
+
+Let's start!
+
+.. _module_file_tutorial_prerequisites:
+
+-------------
+Prerequisites
+-------------
+
+Before proceeding further ensure:
+
+- you have LMod or Environment Modules available
+- have :ref:`shell support <shell_support>` activated in Spack
+
+If you need to install Lmod or Environment module you can refer
+to the documentation :ref:`here <InstallEnvironmentModules>`.
+
+
+^^^^^^^^^^^^^^^^^^
+Add a new compiler
+^^^^^^^^^^^^^^^^^^
+
+Spack automatically scans the environment to search for available
+compilers on first use. On a Ubuntu 14.04 a fresh clone will show
+something like this:
+
+.. code-block:: console
+
+ $ uname -a
+ Linux nuvolari 4.4.0-45-generic #66~14.04.1-Ubuntu SMP Wed Oct 19 15:05:38 UTC 2016 x86_64 x86_64 x86_64 GNU/Linux
+
+ $ spack compilers
+ ==> Available compilers
+ -- gcc ----------------------------------------------------------
+ gcc@4.8
+
+For the purpose of building a limited set of packages with some features
+that will help showcasing the capabilities of
+module customization the first thing we need is to build a new compiler:
+
+.. code-block:: console
+
+ $ spack install gcc@6.2.0
+ # ...
+ # Wait a long time
+ # ...
+
+Then we can use shell support for modules to add it to the list of known compilers:
+
+.. code-block:: console
+
+ # The name of the generated module may vary
+ $ module load gcc-6.2.0-gcc-4.8-twd5nqg
+
+ $ spack compiler add
+ ==> Added 1 new compiler to /home/mculpo/.spack/linux/compilers.yaml
+ gcc@6.2.0
+
+ $ spack compilers
+ ==> Available compilers
+ -- gcc ----------------------------------------------------------
+ gcc@6.2.0 gcc@4.8
+
+Note that the final 7 digits hash at the end of the generated module may vary depending
+on architecture or package version.
+
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Build software that will be used in the tutorial
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Next you should install a few modules that will be used in the tutorial:
+
+.. code-block:: console
+
+ $ spack install netlib-scalapack ^openmpi ^openblas
+ # ...
+
+The packages you need to install are:
+
+- ``netlib-scalapack ^openmpi ^openblas``
+- ``netlib-scalapack ^mpich ^openblas``
+- ``netlib-scalapack ^openmpi ^netlib-lapack``
+- ``netlib-scalapack ^mpich ^netlib-lapack``
+- ``py-scipy ^openblas``
+
+In the end your environment should look something like:
+
+.. code-block:: console
+
+ $ module avail
+
+ ------------------------------------------------------------------------ /home/mculpo/wdir/spack/share/spack/modules/linux-Ubuntu14-x86_64 ------------------------------------------------------------------------
+ binutils-2.27-gcc-4.8-dz3xevw libpciaccess-0.13.4-gcc-6.2.0-eo2siet lzo-2.09-gcc-6.2.0-jcngz72 netlib-scalapack-2.0.2-gcc-6.2.0-wnimqhw python-2.7.12-gcc-6.2.0-qu7rc5p
+ bzip2-1.0.6-gcc-6.2.0-csoc2mq libsigsegv-2.10-gcc-4.8-avb6azw m4-1.4.17-gcc-4.8-iggewke netlib-scalapack-2.0.2-gcc-6.2.0-wojunhq sqlite-3.8.5-gcc-6.2.0-td3zfe7
+ cmake-3.5.2-gcc-6.2.0-6poypqg libsigsegv-2.10-gcc-6.2.0-g3qpmbi m4-1.4.17-gcc-6.2.0-lhgqa6s nettle-3.2-gcc-6.2.0-djdthlh tcl-8.6.5-gcc-4.8-atddxu7
+ curl-7.50.3-gcc-6.2.0-2ffacqm libtool-2.4.6-gcc-6.2.0-kiepac6 mpc-1.0.3-gcc-4.8-lylv7lk openblas-0.2.19-gcc-6.2.0-js33umc util-macros-1.19.0-gcc-6.2.0-uoukuqk
+ expat-2.2.0-gcc-6.2.0-bxqnjar libxml2-2.9.4-gcc-6.2.0-3k4ykbe mpfr-3.1.4-gcc-4.8-bldfx3w openmpi-2.0.1-gcc-6.2.0-s3qbtby xz-5.2.2-gcc-6.2.0-t5lk6in
+ gcc-6.2.0-gcc-4.8-twd5nqg lmod-6.4.5-gcc-4.8-7v7bh7b mpich-3.2-gcc-6.2.0-5n5xoep openssl-1.0.2j-gcc-6.2.0-hibnfda zlib-1.2.8-gcc-4.8-bds4ies
+ gmp-6.1.1-gcc-4.8-uq52e2n lua-5.3.2-gcc-4.8-xozf2hx ncurses-6.0-gcc-4.8-u62fit4 pkg-config-0.29.1-gcc-6.2.0-rslsgcs zlib-1.2.8-gcc-6.2.0-asydrba
+ gmp-6.1.1-gcc-6.2.0-3cfh3hi lua-luafilesystem-1_6_3-gcc-4.8-sbzejlz ncurses-6.0-gcc-6.2.0-7tb426s py-nose-1.3.7-gcc-6.2.0-4gl5c42
+ hwloc-1.11.4-gcc-6.2.0-3ostwel lua-luaposix-33.4.0-gcc-4.8-xf7y2p5 netlib-lapack-3.6.1-gcc-6.2.0-mirer2l py-numpy-1.11.1-gcc-6.2.0-i3rpk4e
+ isl-0.14-gcc-4.8-cq73t5m lz4-131-gcc-6.2.0-cagoem4 netlib-scalapack-2.0.2-gcc-6.2.0-6bqlxqy py-scipy-0.18.1-gcc-6.2.0-e6uljfi
+ libarchive-3.2.1-gcc-6.2.0-2b54aos lzma-4.32.7-gcc-6.2.0-sfmeynw netlib-scalapack-2.0.2-gcc-6.2.0-hpqb3dp py-setuptools-25.2.0-gcc-6.2.0-hkqauaa
+
+------------------------------------------------
+Filter unwanted modifications to the environment
+------------------------------------------------
+
+The non-hierarchical TCL module files that have been generated so far
+follow the default rules for module generation, which are given
+:ref:`here <modules-yaml>` in the reference part of the manual. Taking a
+look at the ``gcc`` module you'll see something like:
+
+.. code-block:: console
+
+ $ module show gcc-6.2.0-gcc-4.8-twd5nqg
+ ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ /home/mculpo/wdir/spack/share/spack/modules/linux-Ubuntu14-x86_64/gcc-6.2.0-gcc-4.8-twd5nqg:
+ ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ whatis("gcc @6.2.0 ")
+ prepend_path("PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/bin")
+ prepend_path("CMAKE_PREFIX_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/")
+ prepend_path("MANPATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/share/man")
+ prepend_path("PKG_CONFIG_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/lib64/pkgconfig")
+ prepend_path("LIBRARY_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/lib64")
+ prepend_path("LD_LIBRARY_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/lib64")
+ prepend_path("CPATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/include")
+ help([[The GNU Compiler Collection includes front ends for C, C++, Objective-C,
+ Fortran, and Java.
+ ]])
+
+As expected, a few environment variables representing paths will be modified
+by the modules according to the default prefix inspection rules.
+
+Consider now the case that your site has decided that e.g. ``CPATH`` and
+``LIBRARY_PATH`` modifications should not be present in module files. What you can
+do to abide by the rules is to create a configuration file ``~/.spack/modules.yaml``
+with the following content:
+
+.. code-block:: yaml
+
+ modules:
+ tcl:
+ all:
+ filter:
+ environment_blacklist: ['CPATH', 'LIBRARY_PATH']
+
+Next you should regenerate all the module files:
+
+.. code-block:: console
+
+ $ spack module refresh --module-type tcl
+ ==> You are about to regenerate tcl module files for:
+
+ -- linux-Ubuntu14-x86_64 / gcc@4.8 ------------------------------
+ dz3xevw binutils@2.27 uq52e2n gmp@6.1.1 avb6azw libsigsegv@2.10 xozf2hx lua@5.3.2 xf7y2p5 lua-luaposix@33.4.0 lylv7lk mpc@1.0.3 u62fit4 ncurses@6.0 bds4ies zlib@1.2.8
+ twd5nqg gcc@6.2.0 cq73t5m isl@0.14 7v7bh7b lmod@6.4.5 sbzejlz lua-luafilesystem@1_6_3 iggewke m4@1.4.17 bldfx3w mpfr@3.1.4 atddxu7 tcl@8.6.5
+
+ ...
+
+ ==> Do you want to proceed ? [y/n]
+ y
+ ==> Regenerating tcl module files
+
+If you take a look now at the module for ``gcc`` you'll see that the unwanted
+paths have disappeared:
+
+.. code-block:: console
+
+ $ module show gcc-6.2.0-gcc-4.8-twd5nqg
+ ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ /home/mculpo/wdir/spack/share/spack/modules/linux-Ubuntu14-x86_64/gcc-6.2.0-gcc-4.8-twd5nqg:
+ ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ whatis("gcc @6.2.0 ")
+ prepend_path("PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/bin")
+ prepend_path("CMAKE_PREFIX_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/")
+ prepend_path("MANPATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/share/man")
+ prepend_path("PKG_CONFIG_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/lib64/pkgconfig")
+ prepend_path("LD_LIBRARY_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/lib64")
+ help([[The GNU Compiler Collection includes front ends for C, C++, Objective-C,
+ Fortran, and Java.
+ ]])
+
+----------------------------------------------
+Prevent some module files from being generated
+----------------------------------------------
+
+Another common request at many sites is to avoid exposing software that
+is only needed as an intermediate step when building a newer stack.
+Let's try to prevent the generation of
+module files for anything that is compiled with ``gcc@4.8`` (the OS provided compiler).
+
+To do this you should add a ``blacklist`` keyword to the configuration file:
+
+.. code-block:: yaml
+ :emphasize-lines: 3,4
+
+ modules:
+ tcl:
+ blacklist:
+ - '%gcc@4.8'
+ all:
+ filter:
+ environment_blacklist: ['CPATH', 'LIBRARY_PATH']
+
+and regenerate the module files:
+
+.. code-block:: console
+
+ $ spack module refresh --module-type tcl --delete-tree
+ ==> You are about to regenerate tcl module files for:
+
+ -- linux-Ubuntu14-x86_64 / gcc@4.8 ------------------------------
+ dz3xevw binutils@2.27 uq52e2n gmp@6.1.1 avb6azw libsigsegv@2.10 xozf2hx lua@5.3.2 xf7y2p5 lua-luaposix@33.4.0 lylv7lk mpc@1.0.3 u62fit4 ncurses@6.0 bds4ies zlib@1.2.8
+ twd5nqg gcc@6.2.0 cq73t5m isl@0.14 7v7bh7b lmod@6.4.5 sbzejlz lua-luafilesystem@1_6_3 iggewke m4@1.4.17 bldfx3w mpfr@3.1.4 atddxu7 tcl@8.6.5
+
+ -- linux-Ubuntu14-x86_64 / gcc@6.2.0 ----------------------------
+ csoc2mq bzip2@1.0.6 2b54aos libarchive@3.2.1 sfmeynw lzma@4.32.7 wnimqhw netlib-scalapack@2.0.2 s3qbtby openmpi@2.0.1 hkqauaa py-setuptools@25.2.0
+ 6poypqg cmake@3.5.2 eo2siet libpciaccess@0.13.4 jcngz72 lzo@2.09 6bqlxqy netlib-scalapack@2.0.2 hibnfda openssl@1.0.2j qu7rc5p python@2.7.12
+ 2ffacqm curl@7.50.3 g3qpmbi libsigsegv@2.10 lhgqa6s m4@1.4.17 wojunhq netlib-scalapack@2.0.2 rslsgcs pkg-config@0.29.1 td3zfe7 sqlite@3.8.5
+ bxqnjar expat@2.2.0 kiepac6 libtool@2.4.6 5n5xoep mpich@3.2 hpqb3dp netlib-scalapack@2.0.2 4gl5c42 py-nose@1.3.7 uoukuqk util-macros@1.19.0
+ 3cfh3hi gmp@6.1.1 3k4ykbe libxml2@2.9.4 7tb426s ncurses@6.0 djdthlh nettle@3.2 i3rpk4e py-numpy@1.11.1 t5lk6in xz@5.2.2
+ 3ostwel hwloc@1.11.4 cagoem4 lz4@131 mirer2l netlib-lapack@3.6.1 js33umc openblas@0.2.19 e6uljfi py-scipy@0.18.1 asydrba zlib@1.2.8
+
+ ==> Do you want to proceed ? [y/n]
+ y
+
+ $ module avail
+
+ ------------------------------------------------------------------------ /home/mculpo/wdir/spack/share/spack/modules/linux-Ubuntu14-x86_64 ------------------------------------------------------------------------
+ bzip2-1.0.6-gcc-6.2.0-csoc2mq libsigsegv-2.10-gcc-6.2.0-g3qpmbi ncurses-6.0-gcc-6.2.0-7tb426s openmpi-2.0.1-gcc-6.2.0-s3qbtby sqlite-3.8.5-gcc-6.2.0-td3zfe7
+ cmake-3.5.2-gcc-6.2.0-6poypqg libtool-2.4.6-gcc-6.2.0-kiepac6 netlib-lapack-3.6.1-gcc-6.2.0-mirer2l openssl-1.0.2j-gcc-6.2.0-hibnfda util-macros-1.19.0-gcc-6.2.0-uoukuqk
+ curl-7.50.3-gcc-6.2.0-2ffacqm libxml2-2.9.4-gcc-6.2.0-3k4ykbe netlib-scalapack-2.0.2-gcc-6.2.0-6bqlxqy pkg-config-0.29.1-gcc-6.2.0-rslsgcs xz-5.2.2-gcc-6.2.0-t5lk6in
+ expat-2.2.0-gcc-6.2.0-bxqnjar lz4-131-gcc-6.2.0-cagoem4 netlib-scalapack-2.0.2-gcc-6.2.0-hpqb3dp py-nose-1.3.7-gcc-6.2.0-4gl5c42 zlib-1.2.8-gcc-6.2.0-asydrba
+ gmp-6.1.1-gcc-6.2.0-3cfh3hi lzma-4.32.7-gcc-6.2.0-sfmeynw netlib-scalapack-2.0.2-gcc-6.2.0-wnimqhw py-numpy-1.11.1-gcc-6.2.0-i3rpk4e
+ hwloc-1.11.4-gcc-6.2.0-3ostwel lzo-2.09-gcc-6.2.0-jcngz72 netlib-scalapack-2.0.2-gcc-6.2.0-wojunhq py-scipy-0.18.1-gcc-6.2.0-e6uljfi
+ libarchive-3.2.1-gcc-6.2.0-2b54aos m4-1.4.17-gcc-6.2.0-lhgqa6s nettle-3.2-gcc-6.2.0-djdthlh py-setuptools-25.2.0-gcc-6.2.0-hkqauaa
+ libpciaccess-0.13.4-gcc-6.2.0-eo2siet mpich-3.2-gcc-6.2.0-5n5xoep openblas-0.2.19-gcc-6.2.0-js33umc python-2.7.12-gcc-6.2.0-qu7rc5p
+
+This time it is convenient to pass the option ``--delete-tree`` to the command that
+regenerates the module files to instruct it to delete the existing tree and regenerate
+a new one instead of overwriting the files in the existing directory.
+
+If you pay careful attention you'll see though that we went too far in blacklisting modules:
+the module for ``gcc@6.2.0`` disappeared as it was bootstrapped with ``gcc@4.8``. To specify
+exceptions to the blacklist rules you can use ``whitelist``:
+
+.. code-block:: yaml
+ :emphasize-lines: 3,4
+
+ modules:
+ tcl:
+ whitelist:
+ - gcc
+ blacklist:
+ - '%gcc@4.8'
+ all:
+ filter:
+ environment_blacklist: ['CPATH', 'LIBRARY_PATH']
+
+``whitelist`` rules always have precedence over ``blacklist`` rules. If you regenerate the modules again:
+
+.. code-block:: console
+
+ $ spack module refresh --module-type tcl -y
+
+you'll see that now the module for ``gcc@6.2.0`` has reappeared:
+
+.. code-block:: console
+
+ $ module avail gcc-6.2.0-gcc-4.8-twd5nqg
+
+ ------------------------------------------------------------------------ /home/mculpo/wdir/spack/share/spack/modules/linux-Ubuntu14-x86_64 ------------------------------------------------------------------------
+ gcc-6.2.0-gcc-4.8-twd5nqg
+
+-------------------------
+Change module file naming
+-------------------------
+
+The next step in making module files more user-friendly is to
+improve their naming scheme.
+To reduce the length of the hash or remove it altogether you can
+use the ``hash_length`` keyword in the configuration file:
+
+.. TODO: give reasons to remove hashes if they are not evident enough?
+
+.. code-block:: yaml
+ :emphasize-lines: 3
+
+ modules:
+ tcl:
+ hash_length: 0
+ whitelist:
+ - gcc
+ blacklist:
+ - '%gcc@4.8'
+ all:
+ filter:
+ environment_blacklist: ['CPATH', 'LIBRARY_PATH']
+
+If you try to regenerate the module files now you will get an error:
+
+.. code-block:: console
+
+ $ spack module refresh --module-type tcl --delete-tree -y
+ ==> Error: Name clashes detected in module files:
+
+ file : /home/mculpo/wdir/spack/share/spack/modules/linux-Ubuntu14-x86_64/netlib-scalapack-2.0.2-gcc-6.2.0
+ spec : netlib-scalapack@2.0.2%gcc@6.2.0~fpic+shared arch=linux-Ubuntu14-x86_64
+ spec : netlib-scalapack@2.0.2%gcc@6.2.0~fpic+shared arch=linux-Ubuntu14-x86_64
+ spec : netlib-scalapack@2.0.2%gcc@6.2.0~fpic+shared arch=linux-Ubuntu14-x86_64
+ spec : netlib-scalapack@2.0.2%gcc@6.2.0~fpic+shared arch=linux-Ubuntu14-x86_64
+
+ ==> Error: Operation aborted
+
+.. note::
+ We try to check for errors upfront!
+ In Spack we check for errors upfront whenever possible, so don't worry about your module files:
+ as a name clash was detected nothing has been changed on disk.
+
+The problem here is that without
+the hashes the four different flavors of ``netlib-scalapack`` map to the same module file
+name. We have the possibility to add suffixes to differentiate them:
+
+.. code-block:: yaml
+ :emphasize-lines: 9-11,14-17
+
+ modules:
+ tcl:
+ hash_length: 0
+ whitelist:
+ - gcc
+ blacklist:
+ - '%gcc@4.8'
+ all:
+ suffixes:
+ '^openblas': openblas
+ '^netlib-lapack': netlib
+ filter:
+ environment_blacklist: ['CPATH', 'LIBRARY_PATH']
+ netlib-scalapack:
+ suffixes:
+ '^openmpi': openmpi
+ '^mpich': mpich
+
+As you can see it is possible to specify rules that applies only to a
+restricted set of packages using :ref:`anonymous specs <anonymous_specs>`.
+Regenerating module files now we obtain:
+
+.. code-block:: console
+
+ $ spack module refresh --module-type tcl --delete-tree -y
+ ==> Regenerating tcl module files
+ $ module avail
+
+ ------------------------------------------------------------------------ /home/mculpo/wdir/spack/share/spack/modules/linux-Ubuntu14-x86_64 ------------------------------------------------------------------------
+ bzip2-1.0.6-gcc-6.2.0 libpciaccess-0.13.4-gcc-6.2.0 mpich-3.2-gcc-6.2.0 openblas-0.2.19-gcc-6.2.0 python-2.7.12-gcc-6.2.0
+ cmake-3.5.2-gcc-6.2.0 libsigsegv-2.10-gcc-6.2.0 ncurses-6.0-gcc-6.2.0 openmpi-2.0.1-gcc-6.2.0 sqlite-3.8.5-gcc-6.2.0
+ curl-7.50.3-gcc-6.2.0 libtool-2.4.6-gcc-6.2.0 netlib-lapack-3.6.1-gcc-6.2.0 openssl-1.0.2j-gcc-6.2.0 util-macros-1.19.0-gcc-6.2.0
+ expat-2.2.0-gcc-6.2.0 libxml2-2.9.4-gcc-6.2.0 netlib-scalapack-2.0.2-gcc-6.2.0-netlib-mpich pkg-config-0.29.1-gcc-6.2.0 xz-5.2.2-gcc-6.2.0
+ gcc-6.2.0-gcc-4.8 lz4-131-gcc-6.2.0 netlib-scalapack-2.0.2-gcc-6.2.0-netlib-openmpi py-nose-1.3.7-gcc-6.2.0 zlib-1.2.8-gcc-6.2.0
+ gmp-6.1.1-gcc-6.2.0 lzma-4.32.7-gcc-6.2.0 netlib-scalapack-2.0.2-gcc-6.2.0-openblas-mpich py-numpy-1.11.1-gcc-6.2.0-openblas
+ hwloc-1.11.4-gcc-6.2.0 lzo-2.09-gcc-6.2.0 netlib-scalapack-2.0.2-gcc-6.2.0-openblas-openmpi py-scipy-0.18.1-gcc-6.2.0-openblas
+ libarchive-3.2.1-gcc-6.2.0 m4-1.4.17-gcc-6.2.0 nettle-3.2-gcc-6.2.0 py-setuptools-25.2.0-gcc-6.2.0
+
+Finally we can set a ``naming_scheme`` to prevent users from loading
+modules that refer to different flavors of the same library/application:
+
+.. code-block:: yaml
+ :emphasize-lines: 4,10,11
+
+ modules:
+ tcl:
+ hash_length: 0
+ naming_scheme: '${PACKAGE}/${VERSION}-${COMPILERNAME}-${COMPILERVER}'
+ whitelist:
+ - gcc
+ blacklist:
+ - '%gcc@4.8'
+ all:
+ conflict:
+ - '${PACKAGE}'
+ suffixes:
+ '^openblas': openblas
+ '^netlib-lapack': netlib
+ filter:
+ environment_blacklist: ['CPATH', 'LIBRARY_PATH']
+ netlib-scalapack:
+ suffixes:
+ '^openmpi': openmpi
+ '^mpich': mpich
+
+The final result should look like:
+
+.. code-block:: console
+
+ $ module avail
+
+ ------------------------------------------------------------------------ /home/mculpo/wdir/spack/share/spack/modules/linux-Ubuntu14-x86_64 ------------------------------------------------------------------------
+ bzip2/1.0.6-gcc-6.2.0 libpciaccess/0.13.4-gcc-6.2.0 mpich/3.2-gcc-6.2.0 openblas/0.2.19-gcc-6.2.0 python/2.7.12-gcc-6.2.0
+ cmake/3.5.2-gcc-6.2.0 libsigsegv/2.10-gcc-6.2.0 ncurses/6.0-gcc-6.2.0 openmpi/2.0.1-gcc-6.2.0 sqlite/3.8.5-gcc-6.2.0
+ curl/7.50.3-gcc-6.2.0 libtool/2.4.6-gcc-6.2.0 netlib-lapack/3.6.1-gcc-6.2.0 openssl/1.0.2j-gcc-6.2.0 util-macros/1.19.0-gcc-6.2.0
+ expat/2.2.0-gcc-6.2.0 libxml2/2.9.4-gcc-6.2.0 netlib-scalapack/2.0.2-gcc-6.2.0-netlib-mpich pkg-config/0.29.1-gcc-6.2.0 xz/5.2.2-gcc-6.2.0
+ gcc/6.2.0-gcc-4.8 lz4/131-gcc-6.2.0 netlib-scalapack/2.0.2-gcc-6.2.0-netlib-openmpi py-nose/1.3.7-gcc-6.2.0 zlib/1.2.8-gcc-6.2.0
+ gmp/6.1.1-gcc-6.2.0 lzma/4.32.7-gcc-6.2.0 netlib-scalapack/2.0.2-gcc-6.2.0-openblas-mpich py-numpy/1.11.1-gcc-6.2.0-openblas
+ hwloc/1.11.4-gcc-6.2.0 lzo/2.09-gcc-6.2.0 netlib-scalapack/2.0.2-gcc-6.2.0-openblas-openmpi (D) py-scipy/0.18.1-gcc-6.2.0-openblas
+ libarchive/3.2.1-gcc-6.2.0 m4/1.4.17-gcc-6.2.0 nettle/3.2-gcc-6.2.0 py-setuptools/25.2.0-gcc-6.2.0
+
+.. note::
+ TCL specific directive
+ The directives ``naming_scheme`` and ``conflict`` are TCL specific and do not apply
+ to the ``dotkit`` or ``lmod`` sections in the configuration file.
+
+------------------------------------
+Add custom environment modifications
+------------------------------------
+
+At many sites it is customary to set an environment variable in a
+package's module file that points to the folder in which the package
+is installed. You can achieve this with Spack by adding an
+``environment`` directive to the configuration file:
+
+.. code-block:: yaml
+ :emphasize-lines: 17-19
+
+ modules:
+ tcl:
+ hash_length: 0
+ naming_scheme: '${PACKAGE}/${VERSION}-${COMPILERNAME}-${COMPILERVER}'
+ whitelist:
+ - gcc
+ blacklist:
+ - '%gcc@4.8'
+ all:
+ conflict:
+ - '${PACKAGE}'
+ suffixes:
+ '^openblas': openblas
+ '^netlib-lapack': netlib
+ filter:
+ environment_blacklist: ['CPATH', 'LIBRARY_PATH']
+ environment:
+ set:
+ '${PACKAGE}_ROOT': '${PREFIX}'
+ netlib-scalapack:
+ suffixes:
+ '^openmpi': openmpi
+ '^mpich': mpich
+
+There are many variable tokens available to use in the ``environment``
+and ``naming_scheme`` directives, such as ``${PACKAGE}``,
+``${VERSION}``, etc. (see the :meth:`~spack.spec.Spec.format` API
+documentation for the complete list).
+
+Regenerating the module files should result in something like:
+
+.. code-block:: console
+ :emphasize-lines: 14
+
+ $ spack module refresh -y --module-type tcl
+ ==> Regenerating tcl module files
+
+ $ module show gcc
+ ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ /home/mculpo/wdir/spack/share/spack/modules/linux-Ubuntu14-x86_64/gcc/6.2.0-gcc-4.8:
+ ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ whatis("gcc @6.2.0 ")
+ prepend_path("PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/bin")
+ prepend_path("CMAKE_PREFIX_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/")
+ prepend_path("MANPATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/share/man")
+ prepend_path("PKG_CONFIG_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/lib64/pkgconfig")
+ prepend_path("LD_LIBRARY_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/lib64")
+ setenv("GCC_ROOT","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u")
+ conflict("gcc")
+ help([[The GNU Compiler Collection includes front ends for C, C++, Objective-C,
+ Fortran, and Java.
+ ]])
+
+As you see the ``gcc`` module has the environment variable ``GCC_ROOT`` set.
+
+Sometimes it's also useful to apply environment modifications selectively and target
+only certain packages. You can, for instance set the common variables ``CC``, ``CXX``,
+etc. in the ``gcc`` module file and apply other custom modifications to the
+``openmpi`` modules as follows:
+
+.. code-block:: yaml
+ :emphasize-lines: 20-32
+
+ modules:
+ tcl:
+ hash_length: 0
+ naming_scheme: '${PACKAGE}/${VERSION}-${COMPILERNAME}-${COMPILERVER}'
+ whitelist:
+ - gcc
+ blacklist:
+ - '%gcc@4.8'
+ all:
+ conflict:
+ - '${PACKAGE}'
+ suffixes:
+ '^openblas': openblas
+ '^netlib-lapack': netlib
+ filter:
+ environment_blacklist: ['CPATH', 'LIBRARY_PATH']
+ environment:
+ set:
+ '${PACKAGE}_ROOT': '${PREFIX}'
+ gcc:
+ environment:
+ set:
+ CC: gcc
+ CXX: g++
+ FC: gfortran
+ F90: gfortran
+ F77: gfortran
+ openmpi:
+ environment:
+ set:
+ SLURM_MPI_TYPE: pmi2
+ OMPI_MCA_btl_openib_warn_default_gid_prefix: '0'
+ netlib-scalapack:
+ suffixes:
+ '^openmpi': openmpi
+ '^mpich': mpich
+
+This time we will be more selective and regenerate only the ``gcc`` and
+``openmpi`` module files:
+
+.. code-block:: console
+
+ $ spack module refresh -y --module-type tcl gcc
+ ==> Regenerating tcl module files
+
+ $ spack module refresh -y --module-type tcl openmpi
+ ==> Regenerating tcl module files
+
+ $ module show gcc
+ ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ /home/mculpo/wdir/spack/share/spack/modules/linux-Ubuntu14-x86_64/gcc/6.2.0-gcc-4.8:
+ ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ whatis("gcc @6.2.0 ")
+ prepend_path("PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/bin")
+ prepend_path("CMAKE_PREFIX_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/")
+ prepend_path("MANPATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/share/man")
+ prepend_path("PKG_CONFIG_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/lib64/pkgconfig")
+ prepend_path("LD_LIBRARY_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u/lib64")
+ setenv("GCC_ROOT","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-4.8/gcc-6.2.0-twd5nqg33hrrssqclcfi5k42eccwxz5u")
+ setenv("CC","gcc")
+ setenv("CXX","g++")
+ setenv("F90","gfortran")
+ setenv("FC","gfortran")
+ setenv("F77","gfortran")
+ conflict("gcc")
+ help([[The GNU Compiler Collection includes front ends for C, C++, Objective-C,
+ Fortran, and Java.
+ ]])
+
+ $ module show openmpi
+ ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ /home/mculpo/wdir/spack/share/spack/modules/linux-Ubuntu14-x86_64/openmpi/2.0.1-gcc-6.2.0:
+ ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ whatis("openmpi @2.0.1 ")
+ prepend_path("PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-6.2.0/openmpi-2.0.1-s3qbtbyh3y5y4gkchmhcuak7th44l53w/bin")
+ prepend_path("CMAKE_PREFIX_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-6.2.0/openmpi-2.0.1-s3qbtbyh3y5y4gkchmhcuak7th44l53w/")
+ prepend_path("LD_LIBRARY_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-6.2.0/openmpi-2.0.1-s3qbtbyh3y5y4gkchmhcuak7th44l53w/lib")
+ prepend_path("PKG_CONFIG_PATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-6.2.0/openmpi-2.0.1-s3qbtbyh3y5y4gkchmhcuak7th44l53w/lib/pkgconfig")
+ prepend_path("MANPATH","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-6.2.0/openmpi-2.0.1-s3qbtbyh3y5y4gkchmhcuak7th44l53w/share/man")
+ setenv("SLURM_MPI_TYPE","pmi2")
+ setenv("OMPI_MCA_BTL_OPENIB_WARN_DEFAULT_GID_PREFIX","0")
+ setenv("OPENMPI_ROOT","/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-6.2.0/openmpi-2.0.1-s3qbtbyh3y5y4gkchmhcuak7th44l53w")
+ conflict("openmpi")
+ help([[The Open MPI Project is an open source Message Passing Interface
+ implementation that is developed and maintained by a consortium of
+ academic, research, and industry partners. Open MPI is therefore able to
+ combine the expertise, technologies, and resources from all across the
+ High Performance Computing community in order to build the best MPI
+ library available. Open MPI offers advantages for system and software
+ vendors, application developers and computer science researchers.
+ ]])
+
+
+---------------------
+Autoload dependencies
+---------------------
+
+Spack can also generate module files that contain code to load the
+dependencies automatically. You can, for instance generate python
+modules that load their dependencies by adding the ``autoload``
+directive and assigning it the value ``direct``:
+
+.. code-block:: yaml
+ :emphasize-lines: 37,38
+
+ modules:
+ tcl:
+ hash_length: 0
+ naming_scheme: '${PACKAGE}/${VERSION}-${COMPILERNAME}-${COMPILERVER}'
+ whitelist:
+ - gcc
+ blacklist:
+ - '%gcc@4.8'
+ all:
+ conflict:
+ - '${PACKAGE}'
+ suffixes:
+ '^openblas': openblas
+ '^netlib-lapack': netlib
+ filter:
+ environment_blacklist: ['CPATH', 'LIBRARY_PATH']
+ environment:
+ set:
+ '${PACKAGE}_ROOT': '${PREFIX}'
+ gcc:
+ environment:
+ set:
+ CC: gcc
+ CXX: g++
+ FC: gfortran
+ F90: gfortran
+ F77: gfortran
+ openmpi:
+ environment:
+ set:
+ SLURM_MPI_TYPE: pmi2
+ OMPI_MCA_btl_openib_warn_default_gid_prefix: '0'
+ netlib-scalapack:
+ suffixes:
+ '^openmpi': openmpi
+ '^mpich': mpich
+ ^python:
+ autoload: 'direct'
+
+and regenerating the module files for every package that depends on ``python``:
+
+.. code-block:: console
+
+ $ spack module refresh -y --module-type tcl ^python
+ ==> Regenerating tcl module files
+
+Now the ``py-scipy`` module will be:
+
+.. code-block:: tcl
+
+ #%Module1.0
+ ## Module file created by spack (https://github.com/LLNL/spack) on 2016-11-02 20:53:21.283547
+ ##
+ ## py-scipy@0.18.1%gcc@6.2.0 arch=linux-Ubuntu14-x86_64-e6uljfi
+ ##
+ module-whatis "py-scipy @0.18.1"
+
+ proc ModulesHelp { } {
+ puts stderr "SciPy (pronounced "Sigh Pie") is a Scientific Library for Python. It"
+ puts stderr "provides many user-friendly and efficient numerical routines such as"
+ puts stderr "routines for numerical integration and optimization."
+ }
+
+ if ![ is-loaded python/2.7.12-gcc-6.2.0 ] {
+ puts stderr "Autoloading python/2.7.12-gcc-6.2.0"
+ module load python/2.7.12-gcc-6.2.0
+ }
+
+ if ![ is-loaded openblas/0.2.19-gcc-6.2.0 ] {
+ puts stderr "Autoloading openblas/0.2.19-gcc-6.2.0"
+ module load openblas/0.2.19-gcc-6.2.0
+ }
+
+ if ![ is-loaded py-numpy/1.11.1-gcc-6.2.0-openblas ] {
+ puts stderr "Autoloading py-numpy/1.11.1-gcc-6.2.0-openblas"
+ module load py-numpy/1.11.1-gcc-6.2.0-openblas
+ }
+
+ prepend-path CMAKE_PREFIX_PATH "/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-6.2.0/py-scipy-0.18.1-e6uljfiffgym4xvj6wveevqxfqnfb3gh/"
+ prepend-path LD_LIBRARY_PATH "/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-6.2.0/py-scipy-0.18.1-e6uljfiffgym4xvj6wveevqxfqnfb3gh/lib"
+ prepend-path PYTHONPATH "/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-6.2.0/py-scipy-0.18.1-e6uljfiffgym4xvj6wveevqxfqnfb3gh/lib/python2.7/site-packages"
+ setenv PY_SCIPY_ROOT "/home/mculpo/wdir/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-6.2.0/py-scipy-0.18.1-e6uljfiffgym4xvj6wveevqxfqnfb3gh"
+ conflict py-scipy
+
+and will contain code to autoload all the dependencies:
+
+.. code-block:: console
+
+ $ module load py-scipy
+ Autoloading python/2.7.12-gcc-6.2.0
+ Autoloading openblas/0.2.19-gcc-6.2.0
+ Autoloading py-numpy/1.11.1-gcc-6.2.0-openblas
+
+-----------------------------
+Lua hierarchical module files
+-----------------------------
+
+In the final part of this tutorial you will modify ``modules.yaml`` to generate
+Lua hierarchical module files. You will see that most of the directives used before
+are also valid in the ``lmod`` context.
+
+^^^^^^^^^^^^^^^^^
+Core/Compiler/MPI
+^^^^^^^^^^^^^^^^^
+
+.. warning::
+ Only LMod supports Lua hierarchical module files
+ For this part of the tutorial you need to be using LMod to
+ manage your environment.
+
+The most common hierarchy is the so called ``Core/Compiler/MPI``. To have an idea
+how a hierarchy is organized you may refer to the
+`Lmod guide <https://www.tacc.utexas.edu/research-development/tacc-projects/lmod/user-guide/module-hierarchy>`_.
+Since ``lmod`` is not enabled by default, you need to add it to the list of
+enabled module file generators. The other things you need to do are:
+
+- change the ``tcl`` tag to ``lmod``
+- remove ``tcl`` specific directives (``naming_scheme`` and ``conflict``)
+- set which compilers are considered ``core``
+- remove the ``mpi`` related suffixes (as they will be substituted by hierarchies)
+
+After modifications the configuration file will be:
+
+.. code-block:: yaml
+ :emphasize-lines: 2-6
+
+ modules:
+ enable::
+ - lmod
+ lmod:
+ core_compilers:
+ - 'gcc@4.8'
+ hash_length: 0
+ whitelist:
+ - gcc
+ blacklist:
+ - '%gcc@4.8'
+ all:
+ suffixes:
+ '^openblas': openblas
+ '^netlib-lapack': netlib
+ filter:
+ environment_blacklist: ['CPATH', 'LIBRARY_PATH']
+ environment:
+ set:
+ '${PACKAGE}_ROOT': '${PREFIX}'
+ gcc:
+ environment:
+ set:
+ CC: gcc
+ CXX: g++
+ FC: gfortran
+ F90: gfortran
+ F77: gfortran
+ openmpi:
+ environment:
+ set:
+ SLURM_MPI_TYPE: pmi2
+ OMPI_MCA_btl_openib_warn_default_gid_prefix: '0'
+
+
+.. note::
+ The double colon
+ The double colon after ``enable`` is intentional and it serves the
+ purpose of overriding the default list of enabled generators so
+ that only ``lmod`` will be active (see :ref:`the reference
+ manual <config-overrides>` for a more detailed explanation of
+ config scopes).
+
+The directive ``core_compilers`` accepts a list of compilers : everything built
+using these compilers will create a module in the ``Core`` part of the hierarchy. It is
+common practice to put the OS provided compilers in the list and only build common utilities
+and other compilers in ``Core``.
+
+If you regenerate the module files
+
+.. code-block:: console
+
+ $ spack module refresh --module-type lmod --delete-tree -y
+
+and update ``MODULEPATH`` to point to the ``Core`` folder, and
+list the available modules, you'll see:
+
+.. code-block:: console
+
+ $ module unuse /home/mculpo/wdir/spack/share/spack/modules/linux-Ubuntu14-x86_64
+ $ module use /home/mculpo/wdir/spack/share/spack/lmod/linux-Ubuntu14-x86_64/Core
+ $ module avail
+
+ ----------------------------------------------------------------------- /home/mculpo/wdir/spack/share/spack/lmod/linux-Ubuntu14-x86_64/Core -----------------------------------------------------------------------
+ gcc/6.2.0
+
+The only module visible now is ``gcc``. Loading that you will make
+visible the ``Compiler`` part of the software stack that was built with ``gcc/6.2.0``:
+
+.. code-block:: console
+
+ $ module load gcc
+ $ module avail
+
+ -------------------------------------------------------------------- /home/mculpo/wdir/spack/share/spack/lmod/linux-Ubuntu14-x86_64/gcc/6.2.0 ---------------------------------------------------------------------
+ binutils/2.27 curl/7.50.3 hwloc/1.11.4 libtool/2.4.6 lzo/2.09 netlib-lapack/3.6.1 openssl/1.0.2j py-scipy/0.18.1-openblas util-macros/1.19.0
+ bison/3.0.4 expat/2.2.0 libarchive/3.2.1 libxml2/2.9.4 m4/1.4.17 nettle/3.2 pkg-config/0.29.1 py-setuptools/25.2.0 xz/5.2.2
+ bzip2/1.0.6 flex/2.6.0 libpciaccess/0.13.4 lz4/131 mpich/3.2 openblas/0.2.19 py-nose/1.3.7 python/2.7.12 zlib/1.2.8
+ cmake/3.6.1 gmp/6.1.1 libsigsegv/2.10 lzma/4.32.7 ncurses/6.0 openmpi/2.0.1 py-numpy/1.11.1-openblas sqlite/3.8.5
+
+ ----------------------------------------------------------------------- /home/mculpo/wdir/spack/share/spack/lmod/linux-Ubuntu14-x86_64/Core -----------------------------------------------------------------------
+ gcc/6.2.0 (L)
+
+The same holds true for the ``MPI`` part of the stack, that you can enable by loading
+either ``mpich`` or ``openmpi``. The nice features of LMod will become evident
+once you'll try switching among different stacks:
+
+.. code-block:: console
+
+ $ module load mpich
+ $ module avail
+
+ ----------------------------------------------------------- /home/mculpo/wdir/spack/share/spack/lmod/linux-Ubuntu14-x86_64/mpich/3.2-5n5xoep/gcc/6.2.0 ------------------------------------------------------------
+ netlib-scalapack/2.0.2-netlib netlib-scalapack/2.0.2-openblas (D)
+
+ -------------------------------------------------------------------- /home/mculpo/wdir/spack/share/spack/lmod/linux-Ubuntu14-x86_64/gcc/6.2.0 ---------------------------------------------------------------------
+ binutils/2.27 curl/7.50.3 hwloc/1.11.4 libtool/2.4.6 lzo/2.09 netlib-lapack/3.6.1 openssl/1.0.2j py-scipy/0.18.1-openblas util-macros/1.19.0
+ bison/3.0.4 expat/2.2.0 libarchive/3.2.1 libxml2/2.9.4 m4/1.4.17 nettle/3.2 pkg-config/0.29.1 py-setuptools/25.2.0 xz/5.2.2
+ bzip2/1.0.6 flex/2.6.0 libpciaccess/0.13.4 lz4/131 mpich/3.2 (L) openblas/0.2.19 py-nose/1.3.7 python/2.7.12 zlib/1.2.8
+ cmake/3.6.1 gmp/6.1.1 libsigsegv/2.10 lzma/4.32.7 ncurses/6.0 openmpi/2.0.1 py-numpy/1.11.1-openblas sqlite/3.8.5
+
+ ----------------------------------------------------------------------- /home/mculpo/wdir/spack/share/spack/lmod/linux-Ubuntu14-x86_64/Core -----------------------------------------------------------------------
+ gcc/6.2.0 (L)
+
+ $ module load openblas netlib-scalapack/2.0.2-openblas
+ $ module list
+
+ Currently Loaded Modules:
+ 1) gcc/6.2.0 2) mpich/3.2 3) openblas/0.2.19 4) netlib-scalapack/2.0.2-openblas
+
+ $ module load openmpi
+
+ Lmod is automatically replacing "mpich/3.2" with "openmpi/2.0.1"
+
+
+ Due to MODULEPATH changes the following have been reloaded:
+ 1) netlib-scalapack/2.0.2-openblas
+
+This layout is already a great improvement over the usual non-hierarchical layout,
+but it still has an asymmetry: ``LAPACK`` providers are semantically the same as ``MPI``
+providers, but they are still not part of the hierarchy. We'll see a possible solution
+next.
+
+.. Activate lmod and turn the previous modifications into lmod:
+ Add core compilers
+
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Extend the hierarchy to other virtual providers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. warning::
+ This is an experimental feature
+ Having a hierarchy deeper than ``Core``/``Compiler``/``MPI`` is an experimental
+ feature, still not fully supported by ``module spider``,
+ see `here <https://github.com/TACC/Lmod/issues/114>`_. Furthermore its use
+ with hierarchies more complex than ``Core``/``Compiler``/``MPI``/``LAPACK``
+ has not been thoroughly tested in production environments.
+
+Spack permits you to generate Lua hierarchical module files where users
+can add an arbitrary list of virtual providers to the triplet
+``Core``/``Compiler``/``MPI``. A configuration file like:
+
+.. code-block:: yaml
+ :emphasize-lines: 7,8
+
+ modules:
+ enable::
+ - lmod
+ lmod:
+ core_compilers:
+ - 'gcc@4.8'
+ hierarchical_scheme:
+ - lapack
+ hash_length: 0
+ whitelist:
+ - gcc
+ blacklist:
+ - '%gcc@4.8'
+ - readline
+ all:
+ filter:
+ environment_blacklist: ['CPATH', 'LIBRARY_PATH']
+ environment:
+ set:
+ '${PACKAGE}_ROOT': '${PREFIX}'
+ gcc:
+ environment:
+ set:
+ CC: gcc
+ CXX: g++
+ FC: gfortran
+ F90: gfortran
+ F77: gfortran
+ openmpi:
+ environment:
+ set:
+ SLURM_MPI_TYPE: pmi2
+ OMPI_MCA_btl_openib_warn_default_gid_prefix: '0'
+
+will add ``lapack`` providers to the mix. After the usual regeneration of module files:
+
+.. code-block:: console
+
+ $ module purge
+ $ spack module refresh --module-type lmod --delete-tree -y
+ ==> Regenerating lmod module files
+
+you will have something like:
+
+.. code-block:: console
+
+ $ module load gcc
+ $ module load openblas
+ $ module load openmpi
+ $ module avail
+
+ --------------------------------------------- /home/mculpo/wdir/spack/share/spack/lmod/linux-Ubuntu14-x86_64/openblas/0.2.19-js33umc/openmpi/2.0.1-s3qbtby/gcc/6.2.0 ----------------------------------------------
+ netlib-scalapack/2.0.2
+
+ -------------------------------------------------------- /home/mculpo/wdir/spack/share/spack/lmod/linux-Ubuntu14-x86_64/openblas/0.2.19-js33umc/gcc/6.2.0 ---------------------------------------------------------
+ py-numpy/1.11.1 py-scipy/0.18.1
+
+ -------------------------------------------------------------------- /home/mculpo/wdir/spack/share/spack/lmod/linux-Ubuntu14-x86_64/gcc/6.2.0 ---------------------------------------------------------------------
+ binutils/2.27 curl/7.50.3 hwloc/1.11.4 libtool/2.4.6 lzo/2.09 netlib-lapack/3.6.1 openssl/1.0.2j python/2.7.12 zlib/1.2.8
+ bison/3.0.4 expat/2.2.0 libarchive/3.2.1 libxml2/2.9.4 m4/1.4.17 nettle/3.2 pkg-config/0.29.1 sqlite/3.8.5
+ bzip2/1.0.6 flex/2.6.0 libpciaccess/0.13.4 lz4/131 mpich/3.2 openblas/0.2.19 (L) py-nose/1.3.7 util-macros/1.19.0
+ cmake/3.6.1 gmp/6.1.1 libsigsegv/2.10 lzma/4.32.7 ncurses/6.0 openmpi/2.0.1 (L) py-setuptools/25.2.0 xz/5.2.2
+
+ ----------------------------------------------------------------------- /home/mculpo/wdir/spack/share/spack/lmod/linux-Ubuntu14-x86_64/Core -----------------------------------------------------------------------
+ gcc/6.2.0 (L)
+
+Now both the ``MPI`` and the ``LAPACK`` providers are handled by LMod as hierarchies:
+
+.. code-block:: console
+
+ $ module load py-numpy netlib-scalapack
+ $ module load mpich
+
+ Lmod is automatically replacing "openmpi/2.0.1" with "mpich/3.2"
+
+
+ Due to MODULEPATH changes the following have been reloaded:
+ 1) netlib-scalapack/2.0.2
+
+ $ module load netlib-lapack
+
+ Lmod is automatically replacing "openblas/0.2.19" with "netlib-lapack/3.6.1"
+
+
+ Inactive Modules:
+ 1) py-numpy
+
+ Due to MODULEPATH changes the following have been reloaded:
+ 1) netlib-scalapack/2.0.2
+
+making the use of tags to differentiate them unnecessary.
+Note that because we only compiled ``py-numpy`` with ``openblas`` the module
+is made inactive when we switch the ``LAPACK`` provider. The user
+environment will now be consistent by design!
diff --git a/lib/spack/docs/packaging_guide.rst b/lib/spack/docs/packaging_guide.rst
index bf5763f4f8..a22fcd71ba 100644
--- a/lib/spack/docs/packaging_guide.rst
+++ b/lib/spack/docs/packaging_guide.rst
@@ -2026,8 +2026,8 @@ The last element of a package is its ``install()`` method. This is
where the real work of installation happens, and it's the main part of
the package you'll need to customize for each piece of software.
-.. literalinclude:: ../../../var/spack/repos/builtin/packages/libelf/package.py
- :pyobject: Libelf.install
+.. literalinclude:: ../../../var/spack/repos/builtin/packages/libpng/package.py
+ :pyobject: Libpng.install
:linenos:
``install`` takes a ``spec``: a description of how the package should
diff --git a/lib/spack/env/clang/gfortran b/lib/spack/env/clang/gfortran
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/clang/gfortran
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/external/distro.py b/lib/spack/external/distro.py
new file mode 100644
index 0000000000..091bba3db8
--- /dev/null
+++ b/lib/spack/external/distro.py
@@ -0,0 +1,1049 @@
+# Copyright 2015,2016 Nir Cohen
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+The ``distro`` package (``distro`` stands for Linux Distribution) provides
+information about the Linux distribution it runs on, such as a reliable
+machine-readable distro ID, or version information.
+
+It is a renewed alternative implementation for Python's original
+:py:func:`platform.linux_distribution` function, but it provides much more
+functionality. An alternative implementation became necessary because Python
+3.5 deprecated this function, and Python 3.7 is expected to remove it
+altogether. Its predecessor function :py:func:`platform.dist` was already
+deprecated since Python 2.6 and is also expected to be removed in Python 3.7.
+Still, there are many cases in which access to Linux distribution information
+is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for
+more information.
+"""
+
+import os
+import re
+import sys
+import shlex
+import subprocess
+
+
+if not sys.platform.startswith('linux'):
+ raise ImportError('Unsupported platform: {0}'.format(sys.platform))
+
+
+_UNIXCONFDIR = '/etc'
+_OS_RELEASE_BASENAME = 'os-release'
+
+#: Translation table for normalizing the "ID" attribute defined in os-release
+#: files, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as defined in the os-release file, translated to lower case,
+#: with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_OS_ID = {}
+
+#: Translation table for normalizing the "Distributor ID" attribute returned by
+#: the lsb_release command, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as returned by the lsb_release command, translated to lower
+#: case, with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_LSB_ID = {
+ 'enterpriseenterprise': 'oracle', # Oracle Enterprise Linux
+ 'redhatenterpriseworkstation': 'rhel', # RHEL 6, 7 Workstation
+ 'redhatenterpriseserver': 'rhel', # RHEL 6, 7 Server
+}
+
+#: Translation table for normalizing the distro ID derived from the file name
+#: of distro release files, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as derived from the file name of a distro release file,
+#: translated to lower case, with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_DISTRO_ID = {
+ 'redhat': 'rhel', # RHEL 6.x, 7.x
+}
+
+
+# Pattern for content of distro release file (reversed)
+_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
+ r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)')
+
+# Pattern for base file name of distro release file
+_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(
+ r'(\w+)[-_](release|version)$')
+
+# Base file names to be ignored when searching for distro release file
+_DISTRO_RELEASE_IGNORE_BASENAMES = (
+ 'debian_version',
+ 'lsb-release',
+ 'oem-release',
+ _OS_RELEASE_BASENAME,
+ 'system-release'
+)
+
+
+def linux_distribution(full_distribution_name=True):
+ """
+ Return information about the current Linux distribution as a tuple
+ ``(id_name, version, codename)`` with items as follows:
+
+ * ``id_name``: If *full_distribution_name* is false, the result of
+ :func:`distro.id`. Otherwise, the result of :func:`distro.name`.
+
+ * ``version``: The result of :func:`distro.version`.
+
+ * ``codename``: The result of :func:`distro.codename`.
+
+ The interface of this function is compatible with the original
+ :py:func:`platform.linux_distribution` function, supporting a subset of
+ its parameters.
+
+ The data it returns may not exactly be the same, because it uses more data
+ sources than the original function, and that may lead to different data if
+ the Linux distribution is not consistent across multiple data sources it
+ provides (there are indeed such distributions ...).
+
+ Another reason for differences is the fact that the :func:`distro.id`
+ method normalizes the distro ID string to a reliable machine-readable value
+ for a number of popular Linux distributions.
+ """
+ return _distroi.linux_distribution(full_distribution_name)
+
+
+def id():
+ """
+ Return the distro ID of the current Linux distribution, as a
+ machine-readable string.
+
+ For a number of Linux distributions, the returned distro ID value is
+ *reliable*, in the sense that it is documented and that it does not change
+ across releases of the distribution.
+
+ This package maintains the following reliable distro ID values:
+
+ ============== =========================================
+ Distro ID Distribution
+ ============== =========================================
+ "ubuntu" Ubuntu
+ "debian" Debian
+ "rhel" RedHat Enterprise Linux
+ "centos" CentOS
+ "fedora" Fedora
+ "sles" SUSE Linux Enterprise Server
+ "opensuse" openSUSE
+ "amazon" Amazon Linux
+ "arch" Arch Linux
+ "cloudlinux" CloudLinux OS
+ "exherbo" Exherbo Linux
+ "gentoo" GenToo Linux
+ "ibm_powerkvm" IBM PowerKVM
+ "kvmibm" KVM for IBM z Systems
+ "linuxmint" Linux Mint
+ "mageia" Mageia
+ "mandriva" Mandriva Linux
+ "parallels" Parallels
+ "pidora" Pidora
+ "raspbian" Raspbian
+ "oracle" Oracle Linux (and Oracle Enterprise Linux)
+ "scientific" Scientific Linux
+ "slackware" Slackware
+ "xenserver" XenServer
+ ============== =========================================
+
+ If you have a need to get distros for reliable IDs added into this set,
+ or if you find that the :func:`distro.id` function returns a different
+ distro ID for one of the listed distros, please create an issue in the
+ `distro issue tracker`_.
+
+ **Lookup hierarchy and transformations:**
+
+ First, the ID is obtained from the following sources, in the specified
+ order. The first available and non-empty value is used:
+
+ * the value of the "ID" attribute of the os-release file,
+
+ * the value of the "Distributor ID" attribute returned by the lsb_release
+ command,
+
+ * the first part of the file name of the distro release file,
+
+ The so determined ID value then passes the following transformations,
+ before it is returned by this method:
+
+ * it is translated to lower case,
+
+ * blanks (which should not be there anyway) are translated to underscores,
+
+ * a normalization of the ID is performed, based upon
+ `normalization tables`_. The purpose of this normalization is to ensure
+ that the ID is as reliable as possible, even across incompatible changes
+ in the Linux distributions. A common reason for an incompatible change is
+ the addition of an os-release file, or the addition of the lsb_release
+ command, with ID values that differ from what was previously determined
+ from the distro release file name.
+ """
+ return _distroi.id()
+
+
+def name(pretty=False):
+ """
+ Return the name of the current Linux distribution, as a human-readable
+ string.
+
+ If *pretty* is false, the name is returned without version or codename.
+ (e.g. "CentOS Linux")
+
+ If *pretty* is true, the version and codename are appended.
+ (e.g. "CentOS Linux 7.1.1503 (Core)")
+
+ **Lookup hierarchy:**
+
+ The name is obtained from the following sources, in the specified order.
+ The first available and non-empty value is used:
+
+ * If *pretty* is false:
+
+ - the value of the "NAME" attribute of the os-release file,
+
+ - the value of the "Distributor ID" attribute returned by the lsb_release
+ command,
+
+ - the value of the "<name>" field of the distro release file.
+
+ * If *pretty* is true:
+
+ - the value of the "PRETTY_NAME" attribute of the os-release file,
+
+ - the value of the "Description" attribute returned by the lsb_release
+ command,
+
+ - the value of the "<name>" field of the distro release file, appended
+ with the value of the pretty version ("<version_id>" and "<codename>"
+ fields) of the distro release file, if available.
+ """
+ return _distroi.name(pretty)
+
+
+def version(pretty=False, best=False):
+ """
+ Return the version of the current Linux distribution, as a human-readable
+ string.
+
+ If *pretty* is false, the version is returned without codename (e.g.
+ "7.0").
+
+ If *pretty* is true, the codename in parenthesis is appended, if the
+ codename is non-empty (e.g. "7.0 (Maipo)").
+
+ Some distributions provide version numbers with different precisions in
+ the different sources of distribution information. Examining the different
+ sources in a fixed priority order does not always yield the most precise
+ version (e.g. for Debian 8.2, or CentOS 7.1).
+
+ The *best* parameter can be used to control the approach for the returned
+ version:
+
+ If *best* is false, the first non-empty version number in priority order of
+ the examined sources is returned.
+
+ If *best* is true, the most precise version number out of all examined
+ sources is returned.
+
+ **Lookup hierarchy:**
+
+ In all cases, the version number is obtained from the following sources.
+ If *best* is false, this order represents the priority order:
+
+ * the value of the "VERSION_ID" attribute of the os-release file,
+ * the value of the "Release" attribute returned by the lsb_release
+ command,
+ * the version number parsed from the "<version_id>" field of the first line
+ of the distro release file,
+ * the version number parsed from the "PRETTY_NAME" attribute of the
+ os-release file, if it follows the format of the distro release files.
+ * the version number parsed from the "Description" attribute returned by
+ the lsb_release command, if it follows the format of the distro release
+ files.
+ """
+ return _distroi.version(pretty, best)
+
+
+def version_parts(best=False):
+ """
+ Return the version of the current Linux distribution as a tuple
+ ``(major, minor, build_number)`` with items as follows:
+
+ * ``major``: The result of :func:`distro.major_version`.
+
+ * ``minor``: The result of :func:`distro.minor_version`.
+
+ * ``build_number``: The result of :func:`distro.build_number`.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distroi.version_parts(best)
+
+
+def major_version(best=False):
+ """
+ Return the major version of the current Linux distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The major version is the first
+ part of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distroi.major_version(best)
+
+
+def minor_version(best=False):
+ """
+ Return the minor version of the current Linux distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The minor version is the second
+ part of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distroi.minor_version(best)
+
+
+def build_number(best=False):
+ """
+ Return the build number of the current Linux distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The build number is the third part
+ of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distroi.build_number(best)
+
+
+def like():
+ """
+ Return a space-separated list of distro IDs of distributions that are
+ closely related to the current Linux distribution in regards to packaging
+ and programming interfaces, for example distributions the current
+ distribution is a derivative from.
+
+ **Lookup hierarchy:**
+
+ This information item is only provided by the os-release file.
+ For details, see the description of the "ID_LIKE" attribute in the
+ `os-release man page
+ <http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
+ """
+ return _distroi.like()
+
+
+def codename():
+ """
+ Return the codename for the release of the current Linux distribution,
+ as a string.
+
+ If the distribution does not have a codename, an empty string is returned.
+
+ Note that the returned codename is not always really a codename. For
+ example, openSUSE returns "x86_64". This function does not handle such
+ cases in any special way and just returns the string it finds, if any.
+
+ **Lookup hierarchy:**
+
+ * the codename within the "VERSION" attribute of the os-release file, if
+ provided,
+
+ * the value of the "Codename" attribute returned by the lsb_release
+ command,
+
+ * the value of the "<codename>" field of the distro release file.
+ """
+ return _distroi.codename()
+
+
+def info(pretty=False, best=False):
+ """
+ Return certain machine-readable information items about the current Linux
+ distribution in a dictionary, as shown in the following example:
+
+ .. sourcecode:: python
+
+ {
+ 'id': 'rhel',
+ 'version': '7.0',
+ 'version_parts': {
+ 'major': '7',
+ 'minor': '0',
+ 'build_number': ''
+ },
+ 'like': 'fedora',
+ 'codename': 'Maipo'
+ }
+
+ The dictionary structure and keys are always the same, regardless of which
+ information items are available in the underlying data sources. The values
+ for the various keys are as follows:
+
+ * ``id``: The result of :func:`distro.id`.
+
+ * ``version``: The result of :func:`distro.version`.
+
+ * ``version_parts -> major``: The result of :func:`distro.major_version`.
+
+ * ``version_parts -> minor``: The result of :func:`distro.minor_version`.
+
+ * ``version_parts -> build_number``: The result of
+ :func:`distro.build_number`.
+
+ * ``like``: The result of :func:`distro.like`.
+
+ * ``codename``: The result of :func:`distro.codename`.
+
+ For a description of the *pretty* and *best* parameters, see the
+ :func:`distro.version` method.
+ """
+ return _distroi.info(pretty, best)
+
+
+def os_release_info():
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the os-release file data source of the current Linux distribution.
+
+ See `os-release file`_ for details about these information items.
+ """
+ return _distroi.os_release_info()
+
+
+def lsb_release_info():
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the lsb_release command data source of the current Linux distribution.
+
+ See `lsb_release command output`_ for details about these information
+ items.
+ """
+ return _distroi.lsb_release_info()
+
+
+def distro_release_info():
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the distro release file data source of the current Linux distribution.
+
+ See `distro release file`_ for details about these information items.
+ """
+ return _distroi.distro_release_info()
+
+
+def os_release_attr(attribute):
+ """
+ Return a single named information item from the os-release file data source
+ of the current Linux distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `os-release file`_ for details about these information items.
+ """
+ return _distroi.os_release_attr(attribute)
+
+
+def lsb_release_attr(attribute):
+ """
+ Return a single named information item from the lsb_release command output
+ data source of the current Linux distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `lsb_release command output`_ for details about these information
+ items.
+ """
+ return _distroi.lsb_release_attr(attribute)
+
+
+def distro_release_attr(attribute):
+ """
+ Return a single named information item from the distro release file
+ data source of the current Linux distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `distro release file`_ for details about these information items.
+ """
+ return _distroi.distro_release_attr(attribute)
+
+
+class LinuxDistribution(object):
+ """
+ Provides information about a Linux distribution.
+
+ This package creates a private module-global instance of this class with
+ default initialization arguments, that is used by the
+ `consolidated accessor functions`_ and `single source accessor functions`_.
+ By using default initialization arguments, that module-global instance
+ returns data about the current Linux distribution (i.e. the distro this
+ package runs on).
+
+ Normally, it is not necessary to create additional instances of this class.
+ However, in situations where control is needed over the exact data sources
+ that are used, instances of this class can be created with a specific
+ distro release file, or a specific os-release file, or without invoking the
+ lsb_release command.
+ """
+
+ def __init__(self,
+ include_lsb=True,
+ os_release_file='',
+ distro_release_file=''):
+ """
+ The initialization method of this class gathers information from the
+ available data sources, and stores that in private instance attributes.
+ Subsequent access to the information items uses these private instance
+ attributes, so that the data sources are read only once.
+
+ Parameters:
+
+ * ``include_lsb`` (bool): Controls whether the
+ `lsb_release command output`_ is included as a data source.
+
+ If the lsb_release command is not available in the program execution
+ path, the data source for the lsb_release command will be empty.
+
+ * ``os_release_file`` (string): The path name of the
+ `os-release file`_ that is to be used as a data source.
+
+ An empty string (the default) will cause the default path name to
+ be used (see `os-release file`_ for details).
+
+ If the specified or defaulted os-release file does not exist, the
+ data source for the os-release file will be empty.
+
+ * ``distro_release_file`` (string): The path name of the
+ `distro release file`_ that is to be used as a data source.
+
+ An empty string (the default) will cause a default search algorithm
+ to be used (see `distro release file`_ for details).
+
+ If the specified distro release file does not exist, or if no default
+ distro release file can be found, the data source for the distro
+ release file will be empty.
+
+ Public instance attributes:
+
+ * ``os_release_file`` (string): The path name of the
+ `os-release file`_ that is actually used as a data source. The
+ empty string if no distro release file is used as a data source.
+
+ * ``distro_release_file`` (string): The path name of the
+ `distro release file`_ that is actually used as a data source. The
+ empty string if no distro release file is used as a data source.
+
+ Raises:
+
+ * :py:exc:`IOError`: Some I/O issue with an os-release file or distro
+ release file.
+
+ * :py:exc:`subprocess.CalledProcessError`: The lsb_release command had
+ some issue (other than not being available in the program execution
+ path).
+
+ * :py:exc:`UnicodeError`: A data source has unexpected characters or
+ uses an unexpected encoding.
+ """
+ self.os_release_file = os_release_file or \
+ os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME)
+ self.distro_release_file = distro_release_file or '' # updated later
+ self._os_release_info = self._os_release_info()
+ self._lsb_release_info = self._lsb_release_info() \
+ if include_lsb else {}
+ self._distro_release_info = self._distro_release_info()
+
+ def __repr__(self):
+ return \
+ "LinuxDistribution(" \
+ "os_release_file={0!r}, " \
+ "distro_release_file={1!r}, " \
+ "_os_release_info={2!r}, " \
+ "_lsb_release_info={3!r}, " \
+ "_distro_release_info={4!r})".format(
+ self.os_release_file,
+ self.distro_release_file,
+ self._os_release_info,
+ self._lsb_release_info,
+ self._distro_release_info)
+
+ def linux_distribution(self, full_distribution_name=True):
+ """
+ Return information about the Linux distribution that is compatible
+ with Python's :func:`platform.linux_distribution`, supporting a subset
+ of its parameters.
+
+ For details, see :func:`distro.linux_distribution`.
+ """
+ return (
+ self.name() if full_distribution_name else self.id(),
+ self.version(),
+ self.codename()
+ )
+
+ def id(self):
+ """
+ Return the distro ID of the Linux distribution, as a string.
+
+ For details, see :func:`distro.id`.
+ """
+ distro_id = self.os_release_attr('id')
+ if distro_id:
+ distro_id = distro_id.lower().replace(' ', '_')
+ return NORMALIZED_OS_ID.get(distro_id, distro_id)
+
+ distro_id = self.lsb_release_attr('distributor_id')
+ if distro_id:
+ distro_id = distro_id.lower().replace(' ', '_')
+ return NORMALIZED_LSB_ID.get(distro_id, distro_id)
+
+ distro_id = self.distro_release_attr('id')
+ if distro_id:
+ distro_id = distro_id.lower().replace(' ', '_')
+ return NORMALIZED_DISTRO_ID.get(distro_id, distro_id)
+
+ return ''
+
+ def name(self, pretty=False):
+ """
+ Return the name of the Linux distribution, as a string.
+
+ For details, see :func:`distro.name`.
+ """
+ name = self.os_release_attr('name') \
+ or self.lsb_release_attr('distributor_id') \
+ or self.distro_release_attr('name')
+ if pretty:
+ name = self.os_release_attr('pretty_name') \
+ or self.lsb_release_attr('description')
+ if not name:
+ name = self.distro_release_attr('name')
+ version = self.version(pretty=True)
+ if version:
+ name = name + ' ' + version
+ return name or ''
+
+ def version(self, pretty=False, best=False):
+ """
+ Return the version of the Linux distribution, as a string.
+
+ For details, see :func:`distro.version`.
+ """
+ versions = [
+ self.os_release_attr('version_id'),
+ self.lsb_release_attr('release'),
+ self.distro_release_attr('version_id'),
+ self._parse_distro_release_content(
+ self.os_release_attr('pretty_name')).get('version_id', ''),
+ self._parse_distro_release_content(
+ self.lsb_release_attr('description')).get('version_id', '')
+ ]
+ version = ''
+ if best:
+ # This algorithm uses the last version in priority order that has
+ # the best precision. If the versions are not in conflict, that
+ # does not matter; otherwise, using the last one instead of the
+ # first one might be considered a surprise.
+ for v in versions:
+ if v.count(".") > version.count(".") or version == '':
+ version = v
+ else:
+ for v in versions:
+ if v != '':
+ version = v
+ break
+ if pretty and version and self.codename():
+ version = u'{0} ({1})'.format(version, self.codename())
+ return version
+
+ def version_parts(self, best=False):
+ """
+ Return the version of the Linux distribution, as a tuple of version
+ numbers.
+
+ For details, see :func:`distro.version_parts`.
+ """
+ version_str = self.version(best=best)
+ if version_str:
+ g = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?')
+ m = g.match(version_str)
+ if m:
+ major, minor, build_number = m.groups()
+ return major, minor or '', build_number or ''
+ return '', '', ''
+
+ def major_version(self, best=False):
+ """
+ Return the major version number of the current distribution.
+
+ For details, see :func:`distro.major_version`.
+ """
+ return self.version_parts(best)[0]
+
+ def minor_version(self, best=False):
+ """
+ Return the minor version number of the Linux distribution.
+
+ For details, see :func:`distro.minor_version`.
+ """
+ return self.version_parts(best)[1]
+
+ def build_number(self, best=False):
+ """
+ Return the build number of the Linux distribution.
+
+ For details, see :func:`distro.build_number`.
+ """
+ return self.version_parts(best)[2]
+
+ def like(self):
+ """
+ Return the IDs of distributions that are like the Linux distribution.
+
+ For details, see :func:`distro.like`.
+ """
+ return self.os_release_attr('id_like') or ''
+
+ def codename(self):
+ """
+ Return the codename of the Linux distribution.
+
+ For details, see :func:`distro.codename`.
+ """
+ return self.os_release_attr('codename') \
+ or self.lsb_release_attr('codename') \
+ or self.distro_release_attr('codename') \
+ or ''
+
+ def info(self, pretty=False, best=False):
+ """
+ Return certain machine-readable information about the Linux
+ distribution.
+
+ For details, see :func:`distro.info`.
+ """
+ return dict(
+ id=self.id(),
+ version=self.version(pretty, best),
+ version_parts=dict(
+ major=self.major_version(best),
+ minor=self.minor_version(best),
+ build_number=self.build_number(best)
+ ),
+ like=self.like(),
+ codename=self.codename(),
+ )
+
+ def os_release_info(self):
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the os-release file data source of the Linux distribution.
+
+ For details, see :func:`distro.os_release_info`.
+ """
+ return self._os_release_info
+
+ def lsb_release_info(self):
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the lsb_release command data source of the Linux
+ distribution.
+
+ For details, see :func:`distro.lsb_release_info`.
+ """
+ return self._lsb_release_info
+
+ def distro_release_info(self):
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the distro release file data source of the Linux
+ distribution.
+
+ For details, see :func:`distro.distro_release_info`.
+ """
+ return self._distro_release_info
+
+ def os_release_attr(self, attribute):
+ """
+ Return a single named information item from the os-release file data
+ source of the Linux distribution.
+
+ For details, see :func:`distro.os_release_attr`.
+ """
+ return self._os_release_info.get(attribute, '')
+
+ def lsb_release_attr(self, attribute):
+ """
+ Return a single named information item from the lsb_release command
+ output data source of the Linux distribution.
+
+ For details, see :func:`distro.lsb_release_attr`.
+ """
+ return self._lsb_release_info.get(attribute, '')
+
+ def distro_release_attr(self, attribute):
+ """
+ Return a single named information item from the distro release file
+ data source of the Linux distribution.
+
+ For details, see :func:`distro.distro_release_attr`.
+ """
+ return self._distro_release_info.get(attribute, '')
+
+ def _os_release_info(self):
+ """
+ Get the information items from the specified os-release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if os.path.isfile(self.os_release_file):
+ with open(self.os_release_file, 'r') as f:
+ return self._parse_os_release_content(f)
+ return {}
+
+ @staticmethod
+ def _parse_os_release_content(lines):
+ """
+ Parse the lines of an os-release file.
+
+ Parameters:
+
+ * lines: Iterable through the lines in the os-release file.
+ Each line must be a unicode string or a UTF-8 encoded byte
+ string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ props = {}
+ lexer = shlex.shlex(lines, posix=True)
+ lexer.whitespace_split = True
+
+ # The shlex module defines its `wordchars` variable using literals,
+ # making it dependent on the encoding of the Python source file.
+ # In Python 2.6 and 2.7, the shlex source file is encoded in
+ # 'iso-8859-1', and the `wordchars` variable is defined as a byte
+ # string. This causes a UnicodeDecodeError to be raised when the
+ # parsed content is a unicode object. The following fix resolves that
+ # (... but it should be fixed in shlex...):
+ if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
+ lexer.wordchars = lexer.wordchars.decode('iso-8859-1')
+
+ tokens = list(lexer)
+ for token in tokens:
+ # At this point, all shell-like parsing has been done (i.e.
+ # comments processed, quotes and backslash escape sequences
+ # processed, multi-line values assembled, trailing newlines
+ # stripped, etc.), so the tokens are now either:
+ # * variable assignments: var=value
+ # * commands or their arguments (not allowed in os-release)
+ if '=' in token:
+ k, v = token.split('=', 1)
+ if isinstance(v, bytes):
+ v = v.decode('utf-8')
+ props[k.lower()] = v
+ if k == 'VERSION':
+ # this handles cases in which the codename is in
+ # the `(CODENAME)` (rhel, centos, fedora) format
+ # or in the `, CODENAME` format (Ubuntu).
+ codename = re.search(r'(\(\D+\))|,(\s+)?\D+', v)
+ if codename:
+ codename = codename.group()
+ codename = codename.strip('()')
+ codename = codename.strip(',')
+ codename = codename.strip()
+ # codename appears within paranthese.
+ props['codename'] = codename
+ else:
+ props['codename'] = ''
+ else:
+ # Ignore any tokens that are not variable assignments
+ pass
+ return props
+
+ def _lsb_release_info(self):
+ """
+ Get the information items from the lsb_release command output.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ cmd = 'lsb_release -a'
+ p = subprocess.Popen(
+ cmd,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ out, err = out.decode('ascii'), err.decode('ascii')
+ rc = p.returncode
+ if rc == 0:
+ content = out.splitlines()
+ return self._parse_lsb_release_content(content)
+ elif rc == 127: # Command not found
+ return {}
+ else:
+ if sys.version_info[:2] >= (3, 5):
+ raise subprocess.CalledProcessError(rc, cmd, out, err)
+ elif sys.version_info[:2] >= (2, 7):
+ raise subprocess.CalledProcessError(rc, cmd, out)
+ elif sys.version_info[:2] == (2, 6):
+ raise subprocess.CalledProcessError(rc, cmd)
+
+ @staticmethod
+ def _parse_lsb_release_content(lines):
+ """
+ Parse the output of the lsb_release command.
+
+ Parameters:
+
+ * lines: Iterable through the lines of the lsb_release output.
+ Each line must be a unicode string or a UTF-8 encoded byte
+ string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ props = {}
+ for line in lines:
+ if isinstance(line, bytes):
+ line = line.decode('utf-8')
+ kv = line.strip('\n').split(':', 1)
+ if len(kv) != 2:
+ # Ignore lines without colon.
+ continue
+ k, v = kv
+ props.update({k.replace(' ', '_').lower(): v.strip()})
+ return props
+
+ def _distro_release_info(self):
+ """
+ Get the information items from the specified distro release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if self.distro_release_file:
+ # If it was specified, we use it and parse what we can, even if
+ # its file name or content does not match the expected pattern.
+ distro_info = self._parse_distro_release_file(
+ self.distro_release_file)
+ basename = os.path.basename(self.distro_release_file)
+ # The file name pattern for user-specified distro release files
+ # is somewhat more tolerant (compared to when searching for the
+ # file), because we want to use what was specified as best as
+ # possible.
+ match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
+ if match:
+ distro_info['id'] = match.group(1)
+ return distro_info
+ else:
+ basenames = os.listdir(_UNIXCONFDIR)
+ # We sort for repeatability in cases where there are multiple
+ # distro specific files; e.g. CentOS, Oracle, Enterprise all
+ # containing `redhat-release` on top of their own.
+ basenames.sort()
+ for basename in basenames:
+ if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
+ continue
+ match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
+ if match:
+ filepath = os.path.join(_UNIXCONFDIR, basename)
+ distro_info = self._parse_distro_release_file(filepath)
+ if 'name' in distro_info:
+ # The name is always present if the pattern matches
+ self.distro_release_file = filepath
+ distro_info['id'] = match.group(1)
+ return distro_info
+ return {}
+
+ def _parse_distro_release_file(self, filepath):
+ """
+ Parse a distro release file.
+
+ Parameters:
+
+ * filepath: Path name of the distro release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if os.path.isfile(filepath):
+ with open(filepath, 'r') as fp:
+ # Only parse the first line. For instance, on SLES there
+ # are multiple lines. We don't want them...
+ return self._parse_distro_release_content(fp.readline())
+ return {}
+
+ @staticmethod
+ def _parse_distro_release_content(line):
+ """
+ Parse a line from a distro release file.
+
+ Parameters:
+ * line: Line from the distro release file. Must be a unicode string
+ or a UTF-8 encoded byte string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if isinstance(line, bytes):
+ line = line.decode('utf-8')
+ m = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(
+ line.strip()[::-1])
+ distro_info = {}
+ if m:
+ distro_info['name'] = m.group(3)[::-1] # regexp ensures non-None
+ if m.group(2):
+ distro_info['version_id'] = m.group(2)[::-1]
+ if m.group(1):
+ distro_info['codename'] = m.group(1)[::-1]
+ elif line:
+ distro_info['name'] = line.strip()
+ return distro_info
+
+
+_distroi = LinuxDistribution()
diff --git a/lib/spack/llnl/util/tty/log.py b/lib/spack/llnl/util/tty/log.py
index a4ba2a9bdf..3d4972b3ae 100644
--- a/lib/spack/llnl/util/tty/log.py
+++ b/lib/spack/llnl/util/tty/log.py
@@ -120,7 +120,14 @@ class log_output(object):
daemon joining. If echo is True, also prints the output to stdout.
"""
- def __init__(self, filename, echo=False, force_color=False, debug=False):
+ def __init__(
+ self,
+ filename,
+ echo=False,
+ force_color=False,
+ debug=False,
+ input_stream=sys.stdin
+ ):
self.filename = filename
# Various output options
self.echo = echo
@@ -132,46 +139,57 @@ class log_output(object):
self.directAssignment = False
self.read, self.write = os.pipe()
- # Sets a daemon that writes to file what it reads from a pipe
- self.p = multiprocessing.Process(
- target=self._spawn_writing_daemon,
- args=(self.read,),
- name='logger_daemon'
- )
- self.p.daemon = True
# Needed to un-summon the daemon
self.parent_pipe, self.child_pipe = multiprocessing.Pipe()
+ # Input stream that controls verbosity interactively
+ self.input_stream = input_stream
def __enter__(self):
- self.p.start()
+ # Sets a daemon that writes to file what it reads from a pipe
+ try:
+ fwd_input_stream = os.fdopen(
+ os.dup(self.input_stream.fileno())
+ )
+ self.p = multiprocessing.Process(
+ target=self._spawn_writing_daemon,
+ args=(self.read, fwd_input_stream),
+ name='logger_daemon'
+ )
+ self.p.daemon = True
+ self.p.start()
+ finally:
+ fwd_input_stream.close()
return log_output.OutputRedirection(self)
def __exit__(self, exc_type, exc_val, exc_tb):
self.parent_pipe.send(True)
self.p.join(60.0) # 1 minute to join the child
- def _spawn_writing_daemon(self, read):
+ def _spawn_writing_daemon(self, read, input_stream):
# Parent: read from child, skip the with block.
read_file = os.fdopen(read, 'r', 0)
with open(self.filename, 'w') as log_file:
- with keyboard_input(sys.stdin):
+ with keyboard_input(input_stream):
while True:
- rlist, _, _ = select.select([read_file, sys.stdin], [], [])
- if not rlist:
- break
+ # Without the last parameter (timeout) select will wait
+ # until at least one of the two streams are ready. This
+ # may cause the function to hang.
+ rlist, _, _ = select.select(
+ [read_file, input_stream], [], [], 0
+ )
# Allow user to toggle echo with 'v' key.
# Currently ignores other chars.
- if sys.stdin in rlist:
- if sys.stdin.read(1) == 'v':
+ if input_stream in rlist:
+ if input_stream.read(1) == 'v':
self.echo = not self.echo
# Handle output from the with block process.
if read_file in rlist:
+ # If we arrive here it means that
+ # read_file was ready for reading : it
+ # should never happen that line is false-ish
line = read_file.readline()
- if not line:
- # For some reason we never reach this point...
- break
# Echo to stdout if requested.
if self.echo:
diff --git a/lib/spack/spack/build_environment.py b/lib/spack/spack/build_environment.py
index 81f893f736..1e6c473efd 100644
--- a/lib/spack/spack/build_environment.py
+++ b/lib/spack/spack/build_environment.py
@@ -528,10 +528,10 @@ def fork(pkg, function, dirty=False):
carries on.
"""
- def child_execution(child_connection):
+ def child_execution(child_connection, input_stream):
try:
setup_package(pkg, dirty=dirty)
- function()
+ function(input_stream)
child_connection.send(None)
except:
# catch ANYTHING that goes wrong in the child process
@@ -559,11 +559,18 @@ def fork(pkg, function, dirty=False):
child_connection.close()
parent_connection, child_connection = multiprocessing.Pipe()
- p = multiprocessing.Process(
- target=child_execution,
- args=(child_connection,)
- )
- p.start()
+ try:
+ # Forward sys.stdin to be able to activate / deactivate
+ # verbosity pressing a key at run-time
+ input_stream = os.fdopen(os.dup(sys.stdin.fileno()))
+ p = multiprocessing.Process(
+ target=child_execution,
+ args=(child_connection, input_stream)
+ )
+ p.start()
+ finally:
+ # Close the input stream in the parent process
+ input_stream.close()
child_exc = parent_connection.recv()
p.join()
diff --git a/lib/spack/spack/build_systems/autotools.py b/lib/spack/spack/build_systems/autotools.py
index 0bb5576708..8535c9d3e3 100644
--- a/lib/spack/spack/build_systems/autotools.py
+++ b/lib/spack/spack/build_systems/autotools.py
@@ -24,7 +24,11 @@
##############################################################################
import inspect
+import os
import os.path
+import shutil
+from subprocess import PIPE
+from subprocess import check_call
import llnl.util.tty as tty
from spack.package import PackageBase
@@ -46,6 +50,79 @@ class AutotoolsPackage(PackageBase):
# To be used in UI queries that require to know which
# build-system class we are using
build_system_class = 'AutotoolsPackage'
+ patch_config_guess = True
+
+ def do_patch_config_guess(self):
+ """Some packages ship with an older config.guess and need to have
+ this updated when installed on a newer architecture."""
+
+ my_config_guess = None
+ config_guess = None
+ if os.path.exists('config.guess'):
+ # First search the top-level source directory
+ my_config_guess = 'config.guess'
+ else:
+ # Then search in all sub directories.
+ # We would like to use AC_CONFIG_AUX_DIR, but not all packages
+ # ship with their configure.in or configure.ac.
+ d = '.'
+ dirs = [os.path.join(d, o) for o in os.listdir(d)
+ if os.path.isdir(os.path.join(d, o))]
+ for dirname in dirs:
+ path = os.path.join(dirname, 'config.guess')
+ if os.path.exists(path):
+ my_config_guess = path
+
+ if my_config_guess is not None:
+ try:
+ check_call([my_config_guess], stdout=PIPE, stderr=PIPE)
+ # The package's config.guess already runs OK, so just use it
+ return True
+ except:
+ pass
+
+ # Look for a spack-installed automake package
+ if 'automake' in self.spec:
+ automake_path = os.path.join(self.spec['automake'].prefix, 'share',
+ 'automake-' +
+ str(self.spec['automake'].version))
+ path = os.path.join(automake_path, 'config.guess')
+ if os.path.exists(path):
+ config_guess = path
+ if config_guess is not None:
+ try:
+ check_call([config_guess], stdout=PIPE, stderr=PIPE)
+ shutil.copyfile(config_guess, my_config_guess)
+ return True
+ except:
+ pass
+
+ # Look for the system's config.guess
+ if os.path.exists('/usr/share'):
+ automake_dir = [s for s in os.listdir('/usr/share') if
+ "automake" in s]
+ if automake_dir:
+ automake_path = os.path.join('/usr/share', automake_dir[0])
+ path = os.path.join(automake_path, 'config.guess')
+ if os.path.exists(path):
+ config_guess = path
+ if config_guess is not None:
+ try:
+ check_call([config_guess], stdout=PIPE, stderr=PIPE)
+ shutil.copyfile(config_guess, my_config_guess)
+ return True
+ except:
+ pass
+
+ return False
+
+ def patch(self):
+ """Perform any required patches."""
+
+ if self.patch_config_guess and self.spec.satisfies(
+ 'arch=linux-redhat7-ppc64le'):
+ if not self.do_patch_config_guess():
+ raise RuntimeError('Failed to find suitable config.guess')
def autoreconf(self, spec, prefix):
"""Not needed usually, configure should be already there"""
diff --git a/lib/spack/spack/cmd/common/arguments.py b/lib/spack/spack/cmd/common/arguments.py
index ade6844813..1470ea035d 100644
--- a/lib/spack/spack/cmd/common/arguments.py
+++ b/lib/spack/spack/cmd/common/arguments.py
@@ -46,19 +46,25 @@ class ConstraintAction(argparse.Action):
"""Constructs a list of specs based on a constraint given on the command line
An instance of this class is supposed to be used as an argument action
- in a parser. It will read a constraint and will attach a list of matching
- specs to the namespace
+ in a parser. It will read a constraint and will attach a function to the
+ arguments that accepts optional keyword arguments.
+
+ To obtain the specs from a command the function must be called.
"""
- qualifiers = {}
def __call__(self, parser, namespace, values, option_string=None):
# Query specs from command line
- d = self.qualifiers.get(namespace.subparser_name, {})
- specs = [s for s in spack.store.db.query(**d)]
- values = ' '.join(values)
+ self.values = values
+ namespace.contraint = values
+ namespace.specs = self._specs
+
+ def _specs(self, **kwargs):
+ specs = [s for s in spack.store.db.query(**kwargs)]
+ values = ' '.join(self.values)
if values:
specs = [x for x in specs if x.satisfies(values, strict=True)]
- namespace.specs = specs
+ return specs
+
_arguments['constraint'] = Args(
'constraint', nargs='*', action=ConstraintAction,
@@ -83,3 +89,11 @@ _arguments['clean'] = Args(
_arguments['dirty'] = Args(
'--dirty', action='store_true', dest='dirty',
help='Do NOT clean environment before installing.')
+
+_arguments['long'] = Args(
+ '-l', '--long', action='store_true',
+ help='Show dependency hashes as well as versions.')
+
+_arguments['very_long'] = Args(
+ '-L', '--very-long', action='store_true',
+ help='Show full dependency hashes as well as versions.')
diff --git a/lib/spack/spack/cmd/debug.py b/lib/spack/spack/cmd/debug.py
index 757c5bca80..c7e90cb210 100644
--- a/lib/spack/spack/cmd/debug.py
+++ b/lib/spack/spack/cmd/debug.py
@@ -74,16 +74,16 @@ def create_db_tarball(args):
tarball_name = "spack-db.%s.tar.gz" % _debug_tarball_suffix()
tarball_path = os.path.abspath(tarball_name)
- base = os.path.basename(spack.install_path)
+ base = os.path.basename(spack.store.root)
transform_args = []
if 'GNU' in tar('--version', output=str):
transform_args = ['--transform', 's/^%s/%s/' % (base, tarball_name)]
else:
transform_args = ['-s', '/^%s/%s/' % (base, tarball_name)]
- wd = os.path.dirname(spack.install_path)
+ wd = os.path.dirname(spack.store.root)
with working_dir(wd):
- files = [spack.installed_db._index_path]
+ files = [spack.store.db._index_path]
files += glob('%s/*/*/*/.spack/spec.yaml' % base)
files = [os.path.relpath(f) for f in files]
diff --git a/lib/spack/spack/cmd/find.py b/lib/spack/spack/cmd/find.py
index 50e6112486..27949ef5db 100644
--- a/lib/spack/spack/cmd/find.py
+++ b/lib/spack/spack/cmd/find.py
@@ -22,16 +22,11 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-import argparse
import sys
import llnl.util.tty as tty
-import spack
-import spack.spec
-import spack.store
-from llnl.util.lang import *
-from llnl.util.tty.colify import *
-from llnl.util.tty.color import *
+import spack.cmd.common.arguments as arguments
+
from spack.cmd import display_specs
description = "Find installed spack packages"
@@ -43,6 +38,7 @@ def setup_parser(subparser):
action='store_const',
dest='mode',
const='short',
+ default='short',
help='Show only specs (default)')
format_group.add_argument('-p', '--paths',
action='store_const',
@@ -56,24 +52,18 @@ def setup_parser(subparser):
const='deps',
help='Show full dependency DAG of installed packages')
- subparser.add_argument('-l', '--long',
- action='store_true',
- dest='long',
- help='Show dependency hashes as well as versions.')
- subparser.add_argument('-L', '--very-long',
- action='store_true',
- dest='very_long',
- help='Show dependency hashes as well as versions.')
+ arguments.add_common_arguments(subparser, ['long', 'very_long'])
+
subparser.add_argument('-f', '--show-flags',
action='store_true',
dest='show_flags',
help='Show spec compiler flags.')
-
- subparser.add_argument(
+ implicit_explicit = subparser.add_mutually_exclusive_group()
+ implicit_explicit.add_argument(
'-e', '--explicit',
action='store_true',
help='Show only specs that were installed explicitly')
- subparser.add_argument(
+ implicit_explicit.add_argument(
'-E', '--implicit',
action='store_true',
help='Show only specs that were installed as dependencies')
@@ -100,17 +90,10 @@ def setup_parser(subparser):
action='store_true',
help='Show fully qualified package names.')
- subparser.add_argument('query_specs',
- nargs=argparse.REMAINDER,
- help='optional specs to filter results')
+ arguments.add_common_arguments(subparser, ['constraint'])
def query_arguments(args):
- # Check arguments
- if args.explicit and args.implicit:
- tty.error('You can\'t pass -E and -e options simultaneously.')
- raise SystemExit(1)
-
# Set up query arguments.
installed, known = True, any
if args.only_missing:
@@ -129,35 +112,17 @@ def query_arguments(args):
def find(parser, args):
- # Filter out specs that don't exist.
- query_specs = spack.cmd.parse_specs(args.query_specs)
- query_specs, nonexisting = partition_list(
- query_specs, lambda s: spack.repo.exists(s.name) or not s.name)
-
- if nonexisting:
- msg = "No such package%s: " % ('s' if len(nonexisting) > 1 else '')
- msg += ", ".join(s.name for s in nonexisting)
- tty.msg(msg)
-
- if not query_specs:
- return
-
q_args = query_arguments(args)
-
- # Get all the specs the user asked for
+ query_specs = args.specs(**q_args)
+ # Exit early if no package matches the constraint
if not query_specs:
- specs = set(spack.store.db.query(**q_args))
- else:
- results = [set(spack.store.db.query(qs, **q_args))
- for qs in query_specs]
- specs = set.union(*results)
-
- if not args.mode:
- args.mode = 'short'
-
+ msg = "No package matches the query: {0}".format(args.contraint)
+ tty.msg(msg)
+ return
+ # Display the result
if sys.stdout.isatty():
- tty.msg("%d installed packages." % len(specs))
- display_specs(specs,
+ tty.msg("%d installed packages." % len(query_specs))
+ display_specs(query_specs,
mode=args.mode,
long=args.long,
very_long=args.very_long,
diff --git a/lib/spack/spack/cmd/flake8.py b/lib/spack/spack/cmd/flake8.py
index 8648bc88d6..a4c607a640 100755..100644
--- a/lib/spack/spack/cmd/flake8.py
+++ b/lib/spack/spack/cmd/flake8.py
@@ -27,6 +27,7 @@ import os
import sys
import shutil
import tempfile
+import argparse
from llnl.util.filesystem import *
@@ -51,6 +52,9 @@ exemptions = {
# Exempt lines with urls and descriptions from overlong line errors.
501: [r'^\s*homepage\s*=',
r'^\s*url\s*=',
+ r'^\s*git\s*=',
+ r'^\s*svn\s*=',
+ r'^\s*hg\s*=',
r'^\s*version\(.*\)',
r'^\s*variant\(.*\)',
r'^\s*depends_on\(.*\)',
@@ -62,7 +66,7 @@ exemptions = {
# exemptions applied to all files.
r'.py$': {
# Exempt lines with URLs from overlong line errors.
- 501: [r'^(https?|file)\:']
+ 501: [r'(https?|file)\:']
},
}
@@ -73,25 +77,30 @@ exemptions = dict((re.compile(file_pattern),
for file_pattern, error_dict in exemptions.items())
-def filter_file(source, dest):
+def filter_file(source, dest, output=False):
"""Filter a single file through all the patterns in exemptions."""
- for file_pattern, errors in exemptions.items():
- if not file_pattern.search(source):
- continue
+ with open(source) as infile:
+ parent = os.path.dirname(dest)
+ mkdirp(parent)
- with open(source) as infile:
- parent = os.path.dirname(dest)
- mkdirp(parent)
+ with open(dest, 'w') as outfile:
+ for line in infile:
+ line = line.rstrip()
+
+ for file_pattern, errors in exemptions.items():
+ if not file_pattern.search(source):
+ continue
- with open(dest, 'w') as outfile:
- for line in infile:
- line = line.rstrip()
for code, patterns in errors.items():
for pattern in patterns:
if pattern.search(line):
line += (" # NOQA: ignore=%d" % code)
break
- outfile.write(line + '\n')
+
+ oline = line + '\n'
+ outfile.write(oline)
+ if output:
+ sys.stdout.write(oline)
def setup_parser(subparser):
@@ -99,6 +108,14 @@ def setup_parser(subparser):
'-k', '--keep-temp', action='store_true',
help="Do not delete temporary directory where flake8 runs. "
"Use for debugging, to see filtered files.")
+ subparser.add_argument(
+ '-o', '--output', action='store_true',
+ help="Send filtered files to stdout as well as temp files.")
+ subparser.add_argument(
+ '-r', '--root-relative', action='store_true', default=False,
+ help="print root-relative paths (default is cwd-relative)")
+ subparser.add_argument(
+ 'files', nargs=argparse.REMAINDER, help="specific files to check")
def flake8(parser, args):
@@ -108,28 +125,51 @@ def flake8(parser, args):
temp = tempfile.mkdtemp()
try:
+ file_list = args.files
+ if file_list:
+ def prefix_relative(path):
+ return os.path.relpath(
+ os.path.abspath(os.path.realpath(path)), spack.prefix)
+
+ file_list = [prefix_relative(p) for p in file_list]
+
with working_dir(spack.prefix):
- changed = changed_files('*.py', output=str)
- changed = [x for x in changed.split('\n') if x]
+ if not file_list:
+ file_list = changed_files('*.py', output=str)
+ file_list = [x for x in file_list.split('\n') if x]
+
shutil.copy('.flake8', os.path.join(temp, '.flake8'))
print '======================================================='
print 'flake8: running flake8 code checks on spack.'
print
print 'Modified files:'
- for filename in changed:
+ for filename in file_list:
print " %s" % filename.strip()
print('=======================================================')
# filter files into a temporary directory with exemptions added.
- for filename in changed:
+ for filename in file_list:
src_path = os.path.join(spack.prefix, filename)
dest_path = os.path.join(temp, filename)
- filter_file(src_path, dest_path)
+ filter_file(src_path, dest_path, args.output)
# run flake8 on the temporary tree.
with working_dir(temp):
- flake8('--format', 'pylint', *changed, fail_on_error=False)
+ output = flake8('--format', 'pylint', *file_list,
+ fail_on_error=False, output=str)
+
+ if args.root_relative:
+ # print results relative to repo root.
+ print output
+ else:
+ # print results relative to current working directory
+ def cwd_relative(path):
+ return '%s: [' % os.path.relpath(
+ os.path.join(spack.prefix, path.group(1)), os.getcwd())
+
+ for line in output.split('\n'):
+ print re.sub(r'^(.*): \[', cwd_relative, line)
if flake8.returncode != 0:
print "Flake8 found errors."
diff --git a/lib/spack/spack/cmd/module.py b/lib/spack/spack/cmd/module.py
index d7abe0fa87..31460b3124 100644
--- a/lib/spack/spack/cmd/module.py
+++ b/lib/spack/spack/cmd/module.py
@@ -244,17 +244,17 @@ def module(parser, args):
'known': True
},
}
- arguments.ConstraintAction.qualifiers.update(constraint_qualifiers)
-
+ query_args = constraint_qualifiers.get(args.subparser_name, {})
+ specs = args.specs(**query_args)
module_type = args.module_type
constraint = args.constraint
try:
- callbacks[args.subparser_name](module_type, args.specs, args)
+ callbacks[args.subparser_name](module_type, specs, args)
except MultipleMatches:
message = ('the constraint \'{query}\' matches multiple packages, '
'and this is not allowed in this context')
tty.error(message.format(query=constraint))
- for s in args.specs:
+ for s in specs:
sys.stderr.write(s.format(color=True) + '\n')
raise SystemExit(1)
except NoMatch:
diff --git a/lib/spack/spack/cmd/spec.py b/lib/spack/spack/cmd/spec.py
index 6e6d1c1277..0a6fb330ac 100644
--- a/lib/spack/spack/cmd/spec.py
+++ b/lib/spack/spack/cmd/spec.py
@@ -23,36 +23,57 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
-import spack.cmd
import spack
+import spack.cmd
+import spack.cmd.common.arguments as arguments
description = "print out abstract and concrete versions of a spec."
def setup_parser(subparser):
- subparser.add_argument('-i', '--ids', action='store_true',
- help="show numerical ids for dependencies.")
+ arguments.add_common_arguments(subparser, ['long', 'very_long'])
+ subparser.add_argument(
+ '-y', '--yaml', action='store_true', default=False,
+ help='Print concrete spec as YAML.')
+ subparser.add_argument(
+ '-c', '--cover', action='store',
+ default='nodes', choices=['nodes', 'edges', 'paths'],
+ help='How extensively to traverse the DAG. (default: nodes).')
+ subparser.add_argument(
+ '-I', '--install-status', action='store_true', default=False,
+ help='Show install status of packages. Packages can be: '
+ 'installed [+], missing and needed by an installed package [-], '
+ 'or not installed (no annotation).')
subparser.add_argument(
'specs', nargs=argparse.REMAINDER, help="specs of packages")
def spec(parser, args):
- kwargs = {'ids': args.ids,
- 'indent': 2,
- 'color': True}
+ kwargs = {'color': True,
+ 'cover': args.cover,
+ 'install_status': args.install_status,
+ 'hashes': args.long or args.very_long,
+ 'hashlen': None if args.very_long else 7}
for spec in spack.cmd.parse_specs(args.specs):
+ # With -y, just print YAML to output.
+ if args.yaml:
+ spec.concretize()
+ print spec.to_yaml()
+ continue
+
+ # Print some diagnostic info by default.
print "Input spec"
- print "------------------------------"
+ print "--------------------------------"
print spec.tree(**kwargs)
print "Normalized"
- print "------------------------------"
+ print "--------------------------------"
spec.normalize()
print spec.tree(**kwargs)
print "Concretized"
- print "------------------------------"
+ print "--------------------------------"
spec.concretize()
print spec.tree(**kwargs)
diff --git a/lib/spack/spack/compilers/__init__.py b/lib/spack/spack/compilers/__init__.py
index 0db632a880..72f8532d3e 100644
--- a/lib/spack/spack/compilers/__init__.py
+++ b/lib/spack/spack/compilers/__init__.py
@@ -26,7 +26,6 @@
system and configuring Spack to use multiple compilers.
"""
import imp
-import platform
from llnl.util.lang import list_modules
from llnl.util.filesystem import join_path
@@ -44,12 +43,6 @@ _path_instance_vars = ['cc', 'cxx', 'f77', 'fc']
_other_instance_vars = ['modules', 'operating_system']
_cache_config_file = []
-# TODO: customize order in config file
-if platform.system() == 'Darwin':
- _default_order = ['clang', 'gcc', 'intel']
-else:
- _default_order = ['gcc', 'intel', 'pgi', 'clang', 'xlc', 'nag']
-
def _auto_compiler_spec(function):
def converter(cspec_like, *args, **kwargs):
@@ -169,18 +162,6 @@ def all_compilers(scope=None, init_config=True):
for s in all_compilers_config(scope, init_config)]
-def default_compiler():
- versions = []
- for name in _default_order:
- versions = find(name)
- if versions:
- break
- else:
- raise NoCompilersError()
-
- return sorted(versions)[-1]
-
-
def find_compilers(*paths):
"""Return a list of compilers found in the suppied paths.
This invokes the find_compilers() method for each operating
diff --git a/lib/spack/spack/compilers/clang.py b/lib/spack/spack/compilers/clang.py
index 34eec4ea7b..da18adcecd 100644
--- a/lib/spack/spack/compilers/clang.py
+++ b/lib/spack/spack/compilers/clang.py
@@ -24,6 +24,7 @@
##############################################################################
import re
import os
+import sys
import spack
import spack.compiler as cpr
from spack.compiler import *
@@ -41,18 +42,18 @@ class Clang(Compiler):
cxx_names = ['clang++']
# Subclasses use possible names of Fortran 77 compiler
- f77_names = []
+ f77_names = ['gfortran']
# Subclasses use possible names of Fortran 90 compiler
- fc_names = []
+ fc_names = ['gfortran']
# Named wrapper links within spack.build_env_path
link_paths = {'cc': 'clang/clang',
'cxx': 'clang/clang++',
# Use default wrappers for fortran, in case provided in
# compilers.yaml
- 'f77': 'f77',
- 'fc': 'f90'}
+ 'f77': 'clang/gfortran',
+ 'fc': 'clang/gfortran'}
@property
def is_apple(self):
@@ -121,6 +122,28 @@ class Clang(Compiler):
full_path = xcrun('-f', basename, output=str)
return full_path.strip()
+ @classmethod
+ def fc_version(cls, fc):
+ version = get_compiler_version(
+ fc, '-dumpversion',
+ # older gfortran versions don't have simple dumpversion output.
+ r'(?:GNU Fortran \(GCC\))?(\d+\.\d+(?:\.\d+)?)')
+ # This is horribly ad hoc, we need to map from gcc/gfortran version
+ # to clang version, but there could be multiple clang
+ # versions that work for a single gcc/gfortran version
+ if sys.platform == 'darwin':
+ clangversionfromgcc = {'6.2.0': '8.0.0-apple'}
+ else:
+ clangversionfromgcc = {}
+ if version in clangversionfromgcc:
+ return clangversionfromgcc[version]
+ else:
+ return 'unknown'
+
+ @classmethod
+ def f77_version(cls, f77):
+ return cls.fc_version(f77)
+
def setup_custom_environment(self, env):
"""Set the DEVELOPER_DIR environment for the Xcode toolchain.
diff --git a/lib/spack/spack/concretize.py b/lib/spack/spack/concretize.py
index 2351e2bfc9..dcea147814 100644
--- a/lib/spack/spack/concretize.py
+++ b/lib/spack/spack/concretize.py
@@ -385,14 +385,15 @@ class DefaultConcretizer(object):
arch.platform_os)
# copy concrete version into other_compiler
- index = 0
- while not _proper_compiler_style(matches[index], spec.architecture):
- index += 1
- if index == len(matches) - 1:
- arch = spec.architecture
- raise UnavailableCompilerVersionError(spec.compiler,
- arch.platform_os)
- spec.compiler = matches[index].copy()
+ try:
+ spec.compiler = next(
+ c for c in matches
+ if _proper_compiler_style(c, spec.architecture)).copy()
+ except StopIteration:
+ raise UnavailableCompilerVersionError(
+ spec.compiler, spec.architecture.platform_os
+ )
+
assert(spec.compiler.concrete)
return True # things changed.
diff --git a/lib/spack/spack/fetch_strategy.py b/lib/spack/spack/fetch_strategy.py
index 4374587250..5084a68e08 100644
--- a/lib/spack/spack/fetch_strategy.py
+++ b/lib/spack/spack/fetch_strategy.py
@@ -116,6 +116,14 @@ class FetchStrategy(object):
def archive(self, destination):
pass # Used to create tarball for mirror.
+ @property
+ def cachable(self):
+ """Return whether the fetcher is capable of caching the
+ resource it retrieves. This generally is determined by
+ whether the resource is identifiably associated with a
+ specific package version."""
+ pass
+
def __str__(self): # Should be human readable URL.
return "FetchStrategy.__str___"
@@ -186,7 +194,7 @@ class URLFetchStrategy(FetchStrategy):
save_file = self.stage.save_filename
partial_file = self.stage.save_filename + '.part'
- tty.msg("Trying to fetch from %s" % self.url)
+ tty.msg("Fetching %s" % self.url)
if partial_file:
save_args = ['-C',
@@ -272,6 +280,10 @@ class URLFetchStrategy(FetchStrategy):
"""Path to the source archive within this stage directory."""
return self.stage.archive_file
+ @property
+ def cachable(self):
+ return bool(self.digest)
+
@_needs_stage
def expand(self):
if not self.expand_archive:
@@ -283,7 +295,7 @@ class URLFetchStrategy(FetchStrategy):
self.stage.chdir()
if not self.archive_file:
raise NoArchiveFileError(
- "URLFetchStrategy couldn't find archive file",
+ "Couldn't find archive file",
"Failed on expand() for URL %s" % self.url)
if not self.extension:
@@ -380,16 +392,34 @@ class CacheURLFetchStrategy(URLFetchStrategy):
@_needs_stage
def fetch(self):
- super(CacheURLFetchStrategy, self).fetch()
+ path = re.sub('^file://', '', self.url)
+
+ # check whether the cache file exists.
+ if not os.path.isfile(path):
+ raise NoCacheError('No cache of %s' % path)
+
+ self.stage.chdir()
+
+ # remove old symlink if one is there.
+ filename = self.stage.save_filename
+ if os.path.exists(filename):
+ os.remove(filename)
+
+ # Symlink to local cached archive.
+ os.symlink(path, filename)
+
+ # Remove link if checksum fails, or subsequent fetchers
+ # will assume they don't need to download.
if self.digest:
try:
self.check()
except ChecksumError:
- # Future fetchers will assume they don't need to
- # download if the file remains
os.remove(self.archive_file)
raise
+ # Notify the user how we fetched.
+ tty.msg('Using cached archive: %s' % path)
+
class VCSFetchStrategy(FetchStrategy):
@@ -555,6 +585,10 @@ class GitFetchStrategy(VCSFetchStrategy):
return self._git
+ @property
+ def cachable(self):
+ return bool(self.commit or self.tag)
+
@_needs_stage
def fetch(self):
self.stage.chdir()
@@ -671,6 +705,10 @@ class SvnFetchStrategy(VCSFetchStrategy):
self._svn = which('svn', required=True)
return self._svn
+ @property
+ def cachable(self):
+ return bool(self.revision)
+
@_needs_stage
def fetch(self):
self.stage.chdir()
@@ -754,6 +792,10 @@ class HgFetchStrategy(VCSFetchStrategy):
self._hg = which('hg', required=True)
return self._hg
+ @property
+ def cachable(self):
+ return bool(self.revision)
+
@_needs_stage
def fetch(self):
self.stage.chdir()
@@ -860,22 +902,35 @@ def for_package_version(pkg, version):
raise InvalidArgsError(pkg, version)
+def from_list_url(pkg):
+ """If a package provides a URL which lists URLs for resources by
+ version, this can can create a fetcher for a URL discovered for
+ the specified package's version."""
+ if pkg.list_url:
+ try:
+ versions = pkg.fetch_remote_versions()
+ try:
+ url_from_list = versions[pkg.version]
+ return URLFetchStrategy(url=url_from_list, digest=None)
+ except KeyError:
+ tty.msg("Can not find version %s in url_list" %
+ self.version)
+ except:
+ tty.msg("Could not determine url from list_url.")
+
+
class FsCache(object):
def __init__(self, root):
self.root = os.path.abspath(root)
def store(self, fetcher, relativeDst):
- unique = False
- uidGroups = [['tag', 'commit'], ['digest'], ['revision']]
- for grp in uidGroups:
- try:
- unique |= any(getattr(fetcher, x) for x in grp)
- except AttributeError:
- pass
- if unique:
- break
- if not unique:
+ # skip fetchers that aren't cachable
+ if not fetcher.cachable:
+ return
+
+ # Don't store things that are already cached.
+ if isinstance(fetcher, CacheURLFetchStrategy):
return
dst = join_path(self.root, relativeDst)
@@ -883,23 +938,23 @@ class FsCache(object):
fetcher.archive(dst)
def fetcher(self, targetPath, digest, **kwargs):
- url = "file://" + join_path(self.root, targetPath)
- return CacheURLFetchStrategy(url, digest, **kwargs)
+ path = join_path(self.root, targetPath)
+ return CacheURLFetchStrategy(path, digest, **kwargs)
def destroy(self):
shutil.rmtree(self.root, ignore_errors=True)
class FetchError(spack.error.SpackError):
+ """Superclass fo fetcher errors."""
- def __init__(self, msg, long_msg=None):
- super(FetchError, self).__init__(msg, long_msg)
+class NoCacheError(FetchError):
+ """Raised when there is no cached archive for a package."""
-class FailedDownloadError(FetchError):
+class FailedDownloadError(FetchError):
"""Raised wen a download fails."""
-
def __init__(self, url, msg=""):
super(FailedDownloadError, self).__init__(
"Failed to fetch file from URL: %s" % url, msg)
@@ -907,19 +962,14 @@ class FailedDownloadError(FetchError):
class NoArchiveFileError(FetchError):
-
- def __init__(self, msg, long_msg):
- super(NoArchiveFileError, self).__init__(msg, long_msg)
+ """"Raised when an archive file is expected but none exists."""
class NoDigestError(FetchError):
-
- def __init__(self, msg, long_msg=None):
- super(NoDigestError, self).__init__(msg, long_msg)
+ """Raised after attempt to checksum when URL has no digest."""
class InvalidArgsError(FetchError):
-
def __init__(self, pkg, version):
msg = ("Could not construct a fetch strategy for package %s at "
"version %s")
@@ -928,17 +978,11 @@ class InvalidArgsError(FetchError):
class ChecksumError(FetchError):
-
"""Raised when archive fails to checksum."""
- def __init__(self, message, long_msg=None):
- super(ChecksumError, self).__init__(message, long_msg)
-
class NoStageError(FetchError):
-
"""Raised when fetch operations are called before set_stage()."""
-
def __init__(self, method):
super(NoStageError, self).__init__(
"Must call FetchStrategy.set_stage() before calling %s" %
diff --git a/lib/spack/spack/mirror.py b/lib/spack/spack/mirror.py
index 0f72e4e25c..97aeef5434 100644
--- a/lib/spack/spack/mirror.py
+++ b/lib/spack/spack/mirror.py
@@ -44,7 +44,7 @@ from spack.version import *
from spack.util.compression import allowed_archive
-def mirror_archive_filename(spec, fetcher):
+def mirror_archive_filename(spec, fetcher, resourceId=None):
"""Get the name of the spec's archive in the mirror."""
if not spec.version.concrete:
raise ValueError("mirror.path requires spec with concrete version.")
@@ -67,15 +67,18 @@ def mirror_archive_filename(spec, fetcher):
# Otherwise we'll make a .tar.gz ourselves
ext = 'tar.gz'
- filename = "%s-%s" % (spec.package.name, spec.version)
- if ext:
- filename += ".%s" % ext
+ if resourceId:
+ filename = "%s-%s" % (resourceId, spec.version) + ".%s" % ext
+ else:
+ filename = "%s-%s" % (spec.package.name, spec.version) + ".%s" % ext
+
return filename
-def mirror_archive_path(spec, fetcher):
+def mirror_archive_path(spec, fetcher, resourceId=None):
"""Get the relative path to the spec's archive within a mirror."""
- return join_path(spec.name, mirror_archive_filename(spec, fetcher))
+ return join_path(
+ spec.name, mirror_archive_filename(spec, fetcher, resourceId))
def get_matching_versions(specs, **kwargs):
@@ -204,8 +207,9 @@ def add_single_spec(spec, mirror_root, categories, **kwargs):
name = spec.format("$_$@")
else:
resource = stage.resource
- archive_path = join_path(
- subdir, suggest_archive_basename(resource))
+ archive_path = os.path.abspath(join_path(
+ mirror_root,
+ mirror_archive_path(spec, fetcher, resource.name)))
name = "{resource} ({pkg}).".format(
resource=resource.name, pkg=spec.format("$_$@"))
subdir = os.path.dirname(archive_path)
diff --git a/lib/spack/spack/operating_systems/linux_distro.py b/lib/spack/spack/operating_systems/linux_distro.py
index 6d70ae80b6..b9a39361da 100644
--- a/lib/spack/spack/operating_systems/linux_distro.py
+++ b/lib/spack/spack/operating_systems/linux_distro.py
@@ -1,5 +1,4 @@
import re
-import platform as py_platform
from spack.architecture import OperatingSystem
@@ -12,8 +11,14 @@ class LinuxDistro(OperatingSystem):
"""
def __init__(self):
- distname, version, _ = py_platform.linux_distribution(
- full_distribution_name=False)
+ try:
+ # This will throw an error if imported on a non-Linux platform.
+ from external.distro import linux_distribution
+ distname, version, _ = linux_distribution(
+ full_distribution_name=False)
+ distname, version = str(distname), str(version)
+ except ImportError as e:
+ distname, version = 'unknown', ''
# Grabs major version from tuple on redhat; on other platforms
# grab the first legal identifier in the version field. On
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index 8ce8da1ff2..b238e39be5 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -95,8 +95,6 @@ class InstallPhase(object):
# install phase, thus return a properly set wrapper
phase = getattr(instance, self.name)
- print phase
-
@functools.wraps(phase)
def phase_wrapper(spec, prefix):
# Check instance attributes at the beginning of a phase
@@ -394,7 +392,8 @@ class PackageBase(object):
The install function is designed so that someone not too terribly familiar
with Python could write a package installer. For example, we put a number
of commands in install scope that you can use almost like shell commands.
- These include make, configure, cmake, rm, rmtree, mkdir, mkdirp, and others.
+ These include make, configure, cmake, rm, rmtree, mkdir, mkdirp, and
+ others.
You can see above in the cmake script that these commands are used to run
configure and make almost like they're used on the command line. The
@@ -409,9 +408,9 @@ class PackageBase(object):
pollute other namespaces, and it allows you to more easily implement an
install function.
- For a full list of commands and variables available in module scope, see the
- add_commands_to_module() function in this class. This is where most of
- them are created and set on the module.
+ For a full list of commands and variables available in module scope, see
+ the add_commands_to_module() function in this class. This is where most
+ of them are created and set on the module.
**Parallel Builds**
@@ -688,7 +687,8 @@ class PackageBase(object):
def _make_resource_stage(self, root_stage, fetcher, resource):
resource_stage_folder = self._resource_stage(resource)
- resource_mirror = join_path(self.name, os.path.basename(fetcher.url))
+ resource_mirror = spack.mirror.mirror_archive_path(
+ self.spec, fetcher, resource.name)
stage = ResourceStage(resource.fetcher,
root=root_stage,
resource=resource,
@@ -703,7 +703,6 @@ class PackageBase(object):
# Construct a path where the stage should build..
s = self.spec
stage_name = "%s-%s-%s" % (s.name, s.version, s.dag_hash())
- # Build the composite stage
stage = Stage(fetcher, mirror_path=mp, name=stage_name, path=self.path)
return stage
@@ -1197,7 +1196,7 @@ class PackageBase(object):
self.make_jobs = make_jobs
# Then install the package itself.
- def build_process():
+ def build_process(input_stream):
"""Forked for each build. Has its own process and python
module space set up by build_environment.fork()."""
@@ -1239,9 +1238,11 @@ class PackageBase(object):
# Spawn a daemon that reads from a pipe and redirects
# everything to log_path
redirection_context = log_output(
- log_path, verbose,
- sys.stdout.isatty(),
- True
+ log_path,
+ echo=verbose,
+ force_color=sys.stdout.isatty(),
+ debug=True,
+ input_stream=input_stream
)
with redirection_context as log_redirection:
for phase_name, phase in zip(self.phases, self._InstallPhase_phases): # NOQA: ignore=E501
diff --git a/lib/spack/spack/preferred_packages.py b/lib/spack/spack/preferred_packages.py
index 45a41c8e2b..08f9c3cfa8 100644
--- a/lib/spack/spack/preferred_packages.py
+++ b/lib/spack/spack/preferred_packages.py
@@ -28,9 +28,6 @@ from spack.version import *
class PreferredPackages(object):
- # Arbitrary, but consistent
- _default_order = {'compiler': ['gcc', 'intel', 'clang', 'pgi', 'xlc']}
-
def __init__(self):
self.preferred = spack.config.get_config('packages')
self._spec_for_pkgname_cache = {}
@@ -128,9 +125,6 @@ class PreferredPackages(object):
key = (pkgname, component, second_key)
if key not in self._spec_for_pkgname_cache:
pkglist = self._order_for_package(pkgname, component, second_key)
- if not pkglist:
- if component in self._default_order:
- pkglist = self._default_order[component]
if component == 'compiler':
self._spec_for_pkgname_cache[key] = \
[spack.spec.CompilerSpec(s) for s in pkglist]
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index 17f206d81b..951e9ae652 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -475,10 +475,10 @@ class FlagMap(HashableMap):
def satisfies(self, other, strict=False):
if strict or (self.spec and self.spec._concrete):
- return all(f in self and set(self[f]) <= set(other[f])
+ return all(f in self and set(self[f]) == set(other[f])
for f in other)
else:
- return all(set(self[f]) <= set(other[f])
+ return all(set(self[f]) == set(other[f])
for f in other if (other[f] != [] and f in self))
def constrain(self, other):
@@ -2209,6 +2209,9 @@ class Spec(object):
${SPACK_PREFIX}/opt
${PREFIX} The package prefix
+ Note these are case-insensitive: for example you can specify either
+ ``${PACKAGE}`` or ``${package}``.
+
Optionally you can provide a width, e.g. ``$20_`` for a 20-wide name.
Like printf, you can provide '-' for left justification, e.g.
``$-20_`` for a left-justified name.
@@ -2299,6 +2302,7 @@ class Spec(object):
"'%s'" % format_string)
named_str += c
continue
+ named_str = named_str.upper()
if named_str == 'PACKAGE':
name = self.name if self.name else ''
write(fmt % self.name, '@')
@@ -2311,7 +2315,7 @@ class Spec(object):
elif named_str == 'COMPILERNAME':
if self.compiler:
write(fmt % self.compiler.name, '%')
- elif named_str == 'COMPILERVER':
+ elif named_str in ['COMPILERVER', 'COMPILERVERSION']:
if self.compiler:
write(fmt % self.compiler.versions, '%')
elif named_str == 'COMPILERFLAGS':
@@ -2392,12 +2396,24 @@ class Spec(object):
def __str__(self):
return self.format() + self.dep_string()
+ def _install_status(self):
+ """Helper for tree to print DB install status."""
+ if not self.concrete:
+ return None
+ try:
+ record = spack.store.db.get_record(self)
+ return record.installed
+ except KeyError:
+ return None
+
def tree(self, **kwargs):
"""Prints out this spec and its dependencies, tree-formatted
with indentation."""
color = kwargs.pop('color', False)
depth = kwargs.pop('depth', False)
- showid = kwargs.pop('ids', False)
+ hashes = kwargs.pop('hashes', True)
+ hlen = kwargs.pop('hashlen', None)
+ install_status = kwargs.pop('install_status', True)
cover = kwargs.pop('cover', 'nodes')
indent = kwargs.pop('indent', 0)
fmt = kwargs.pop('format', '$_$@$%@+$+$=')
@@ -2406,8 +2422,6 @@ class Spec(object):
check_kwargs(kwargs, self.tree)
out = ""
- cur_id = 0
- ids = {}
for d, node in self.traverse(
order='pre', cover=cover, depth=True, deptypes=deptypes):
if prefix is not None:
@@ -2415,11 +2429,17 @@ class Spec(object):
out += " " * indent
if depth:
out += "%-4d" % d
- if not id(node) in ids:
- cur_id += 1
- ids[id(node)] = cur_id
- if showid:
- out += "%-4d" % ids[id(node)]
+ if install_status:
+ status = node._install_status()
+ if status is None:
+ out += " " # Package isn't installed
+ elif status:
+ out += colorize("@g{[+]} ", color=color) # installed
+ else:
+ out += colorize("@r{[-]} ", color=color) # missing
+
+ if hashes:
+ out += colorize('@K{%s} ', color=color) % node.dag_hash(hlen)
out += (" " * d)
if d > 0:
out += "^"
diff --git a/lib/spack/spack/stage.py b/lib/spack/spack/stage.py
index ff10a38ca8..4157511ce0 100644
--- a/lib/spack/spack/stage.py
+++ b/lib/spack/spack/stage.py
@@ -161,7 +161,8 @@ class Stage(object):
def __init__(
self, url_or_fetch_strategy,
- name=None, mirror_path=None, keep=False, path=None, lock=True):
+ name=None, mirror_path=None, keep=False, path=None, lock=True,
+ alternate_fetchers=None):
"""Create a stage object.
Parameters:
url_or_fetch_strategy
@@ -197,6 +198,7 @@ class Stage(object):
self.fetcher.set_stage(self)
# self.fetcher can change with mirrors.
self.default_fetcher = self.fetcher
+ self.alternate_fetchers = alternate_fetchers
# used for mirrored archives of repositories.
self.skip_checksum_for_mirror = True
@@ -408,28 +410,14 @@ class Stage(object):
fetchers.insert(
0, fs.URLFetchStrategy(
url, digest, expand=expand, extension=extension))
- fetchers.insert(
- 0, spack.fetch_cache.fetcher(
- self.mirror_path, digest, expand=expand,
- extension=extension))
-
- # Look for the archive in list_url
- package_name = os.path.dirname(self.mirror_path)
- pkg = spack.repo.get(package_name)
- if pkg.list_url is not None and pkg.url is not None:
- try:
- archive_version = spack.url.parse_version(
- self.default_fetcher.url)
- versions = pkg.fetch_remote_versions()
- try:
- url_from_list = versions[Version(archive_version)]
- fetchers.append(fs.URLFetchStrategy(
- url_from_list, digest))
- except KeyError:
- tty.msg("Can not find version %s in url_list" %
- archive_version)
- except:
- tty.msg("Could not determine url from list_url.")
+ if self.default_fetcher.cachable:
+ fetchers.insert(
+ 0, spack.fetch_cache.fetcher(
+ self.mirror_path, digest, expand=expand,
+ extension=extension))
+
+ if self.alternate_fetchers:
+ fetchers.extend(self.alternate_fetchers)
for fetcher in fetchers:
try:
@@ -437,6 +425,9 @@ class Stage(object):
self.fetcher = fetcher
self.fetcher.fetch()
break
+ except spack.fetch_strategy.NoCacheError as e:
+ # Don't bother reporting when something is not cached.
+ continue
except spack.error.SpackError as e:
tty.msg("Fetching from %s failed." % fetcher)
tty.debug(e)
diff --git a/lib/spack/spack/test/cmd/find.py b/lib/spack/spack/test/cmd/find.py
index fa82db7733..4788da8ec6 100644
--- a/lib/spack/spack/test/cmd/find.py
+++ b/lib/spack/spack/test/cmd/find.py
@@ -52,5 +52,3 @@ class FindTest(unittest.TestCase):
args.implicit = True
q_args = query_arguments(args)
self.assertEqual(q_args['explicit'], False)
- args.explicit = True
- self.assertRaises(SystemExit, query_arguments, args)
diff --git a/lib/spack/spack/test/cmd/module.py b/lib/spack/spack/test/cmd/module.py
index 3a0ce32e6c..39f9c5649f 100644
--- a/lib/spack/spack/test/cmd/module.py
+++ b/lib/spack/spack/test/cmd/module.py
@@ -34,7 +34,7 @@ class TestModule(spack.test.mock_database.MockDatabase):
def _get_module_files(self, args):
return [modules.module_types[args.module_type](spec).file_name
- for spec in args.specs]
+ for spec in args.specs()]
def test_module_common_operations(self):
parser = argparse.ArgumentParser()
diff --git a/lib/spack/spack/test/spack_yaml.py b/lib/spack/spack/test/spack_yaml.py
index 30ed1672e2..fbbb7b8e60 100644
--- a/lib/spack/spack/test/spack_yaml.py
+++ b/lib/spack/spack/test/spack_yaml.py
@@ -90,3 +90,19 @@ class SpackYamlTest(unittest.TestCase):
check(self.data['config_file']['some_list'][2], 8, 8)
check(self.data['config_file']['another_list'], 10, 10)
check(self.data['config_file']['some_key'], 11, 11)
+
+ def test_yaml_aliases(self):
+ aliased_list_1 = ['foo']
+ aliased_list_2 = []
+ dict_with_aliases = {
+ 'a': aliased_list_1,
+ 'b': aliased_list_1,
+ 'c': aliased_list_1,
+ 'd': aliased_list_2,
+ 'e': aliased_list_2,
+ 'f': aliased_list_2,
+ }
+ string = syaml.dump(dict_with_aliases)
+
+ # ensure no YAML aliases appear in syaml dumps.
+ self.assertFalse('*id' in string)
diff --git a/lib/spack/spack/util/spack_yaml.py b/lib/spack/spack/util/spack_yaml.py
index 674c79bca1..c27db52066 100644
--- a/lib/spack/spack/util/spack_yaml.py
+++ b/lib/spack/spack/util/spack_yaml.py
@@ -202,6 +202,11 @@ class OrderedLineDumper(Dumper):
node.flow_style = best_style
return node
+ def ignore_aliases(self, _data):
+ """Make the dumper NEVER print YAML aliases."""
+ return True
+
+
# Make our special objects look like normal YAML ones.
OrderedLineDumper.add_representer(syaml_dict, OrderedLineDumper.represent_dict)
OrderedLineDumper.add_representer(syaml_list, OrderedLineDumper.represent_list)
diff --git a/share/spack/setup-env.sh b/share/spack/setup-env.sh
index 2eb1dfecb3..f602ea627e 100755
--- a/share/spack/setup-env.sh
+++ b/share/spack/setup-env.sh
@@ -189,5 +189,7 @@ _sp_prefix=$(cd "$(dirname $(dirname $_sp_share_dir))" && pwd)
_spack_pathadd PATH "${_sp_prefix%/}/bin"
_sp_sys_type=$(spack-python -c 'print(spack.architecture.sys_type())')
-_spack_pathadd DK_NODE "${_sp_share_dir%/}/dotkit/$_sp_sys_type"
-_spack_pathadd MODULEPATH "${_sp_share_dir%/}/modules/$_sp_sys_type"
+_sp_dotkit_root=$(spack-python -c "print(spack.util.path.canonicalize_path(spack.config.get_config('config').get('module_roots', {}).get('dotkit')))")
+_sp_tcl_root=$(spack-python -c "print(spack.util.path.canonicalize_path(spack.config.get_config('config').get('module_roots', {}).get('tcl')))")
+_spack_pathadd DK_NODE "${_sp_dotkit_root%/}/$_sp_sys_type"
+_spack_pathadd MODULEPATH "${_sp_tcl_root%/}/$_sp_sys_type"
diff --git a/var/spack/repos/builtin/packages/cairo/package.py b/var/spack/repos/builtin/packages/cairo/package.py
index b2911e126a..12c7838f63 100644
--- a/var/spack/repos/builtin/packages/cairo/package.py
+++ b/var/spack/repos/builtin/packages/cairo/package.py
@@ -25,7 +25,7 @@
from spack import *
-class Cairo(Package):
+class Cairo(AutotoolsPackage):
"""Cairo is a 2D graphics library with support for multiple output
devices."""
homepage = "http://cairographics.org"
@@ -40,9 +40,7 @@ class Cairo(Package):
depends_on("pkg-config", type="build")
depends_on("fontconfig@2.10.91:") # Require newer version of fontconfig.
- def install(self, spec, prefix):
- configure("--prefix=%s" % prefix,
- "--disable-trace", # can cause problems with libiberty
- "--enable-tee")
- make()
- make("install")
+ def configure_args(self):
+ args = ["--disable-trace", # can cause problems with libiberty
+ "--enable-tee"]
+ return args
diff --git a/var/spack/repos/builtin/packages/dealii/package.py b/var/spack/repos/builtin/packages/dealii/package.py
index dbccd01b99..563f751e0b 100644
--- a/var/spack/repos/builtin/packages/dealii/package.py
+++ b/var/spack/repos/builtin/packages/dealii/package.py
@@ -23,10 +23,9 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
-import sys
-class Dealii(Package):
+class Dealii(CMakePackage):
"""C++ software library providing well-documented tools to build finite
element codes for a broad variety of PDEs."""
homepage = "https://www.dealii.org"
@@ -118,19 +117,16 @@ class Dealii(Package):
depends_on("numdiff", when='@develop')
depends_on("astyle@2.04", when='@develop')
- def install(self, spec, prefix):
- options = []
- options.extend(std_cmake_args)
-
+ def build_type(self):
# CMAKE_BUILD_TYPE should be DebugRelease | Debug | Release
- for word in options[:]:
- if word.startswith('-DCMAKE_BUILD_TYPE'):
- options.remove(word)
+ return 'DebugRelease'
+
+ def cmake_args(self):
+ spec = self.spec
+ options = []
- dsuf = 'dylib' if sys.platform == 'darwin' else 'so'
lapack_blas = spec['lapack'].lapack_libs + spec['blas'].blas_libs
options.extend([
- '-DCMAKE_BUILD_TYPE=DebugRelease',
'-DDEAL_II_COMPONENT_EXAMPLES=ON',
'-DDEAL_II_WITH_THREADS:BOOL=ON',
'-DBOOST_DIR=%s' % spec['boost'].prefix,
@@ -215,9 +211,9 @@ class Dealii(Package):
'-DNETCDF_FOUND=true',
'-DNETCDF_LIBRARIES=%s;%s' % (
join_path(spec['netcdf-cxx'].prefix.lib,
- 'libnetcdf_c++.%s' % dsuf),
+ 'libnetcdf_c++.%s' % dso_suffix),
join_path(spec['netcdf'].prefix.lib,
- 'libnetcdf.%s' % dsuf)),
+ 'libnetcdf.%s' % dso_suffix)),
'-DNETCDF_INCLUDE_DIRS=%s;%s' % (
spec['netcdf-cxx'].prefix.include,
spec['netcdf'].prefix.include),
@@ -238,11 +234,7 @@ class Dealii(Package):
'-DDEAL_II_WITH_OPENCASCADE=OFF'
])
- cmake('.', *options)
- make()
- if self.run_tests:
- make("test")
- make("install")
+ return options
def setup_environment(self, spack_env, env):
env.set('DEAL_II_DIR', self.prefix)
diff --git a/var/spack/repos/builtin/packages/dyninst/package.py b/var/spack/repos/builtin/packages/dyninst/package.py
index 3df7ca551d..420ab0fc68 100644
--- a/var/spack/repos/builtin/packages/dyninst/package.py
+++ b/var/spack/repos/builtin/packages/dyninst/package.py
@@ -33,6 +33,13 @@ class Dyninst(Package):
url = "https://github.com/dyninst/dyninst/archive/v9.2.0.tar.gz"
list_url = "http://www.dyninst.org/downloads/dyninst-8.x"
+ # version 9.2.1b was the latest git commit when trying to port to a
+ # ppc64le system to get fixes in computeAddrWidth independent of
+ # endianness. This version can be removed if the next release includes
+ # this change. The actual commit was
+ # b8596ad4023ec40ac07e669ff8ea3ec06e262703
+ version('9.2.1b', git='https://github.com/dyninst/dyninst.git',
+ commit='859cb778e20b619443c943c96dd1851da763142b')
version('9.2.0', 'ad023f85e8e57837ed9de073b59d6bab',
url="https://github.com/dyninst/dyninst/archive/v9.2.0.tar.gz")
version('9.1.0', '5c64b77521457199db44bec82e4988ac',
@@ -67,19 +74,21 @@ class Dyninst(Package):
libdwarf = spec['libdwarf'].prefix
with working_dir('spack-build', create=True):
- cmake('..',
- '-DBoost_INCLUDE_DIR=%s' % spec['boost'].prefix.include,
- '-DBoost_LIBRARY_DIR=%s' % spec['boost'].prefix.lib,
- '-DBoost_NO_SYSTEM_PATHS=TRUE',
- '-DLIBELF_INCLUDE_DIR=%s' % join_path(
- libelf.include, 'libelf'),
- '-DLIBELF_LIBRARIES=%s' % join_path(
- libelf.lib, 'libelf.so'),
- '-DLIBDWARF_INCLUDE_DIR=%s' % libdwarf.include,
- '-DLIBDWARF_LIBRARIES=%s' % join_path(
- libdwarf.lib, 'libdwarf.so'),
- *std_cmake_args)
-
+ args = ['..',
+ '-DBoost_INCLUDE_DIR=%s' % spec['boost'].prefix.include,
+ '-DBoost_LIBRARY_DIR=%s' % spec['boost'].prefix.lib,
+ '-DBoost_NO_SYSTEM_PATHS=TRUE',
+ '-DLIBELF_INCLUDE_DIR=%s' % join_path(
+ libelf.include, 'libelf'),
+ '-DLIBELF_LIBRARIES=%s' % join_path(
+ libelf.lib, 'libelf.so'),
+ '-DLIBDWARF_INCLUDE_DIR=%s' % libdwarf.include,
+ '-DLIBDWARF_LIBRARIES=%s' % join_path(
+ libdwarf.lib, 'libdwarf.so')]
+ if spec.satisfies('arch=linux-redhat7-ppc64le'):
+ args.append('-Darch_ppc64_little_endian=1')
+ args += std_cmake_args
+ cmake(*args)
make()
make("install")
diff --git a/var/spack/repos/builtin/packages/everytrace-example/package.py b/var/spack/repos/builtin/packages/everytrace-example/package.py
index 8a85423192..8c49e04634 100644
--- a/var/spack/repos/builtin/packages/everytrace-example/package.py
+++ b/var/spack/repos/builtin/packages/everytrace-example/package.py
@@ -39,8 +39,5 @@ class EverytraceExample(CMakePackage):
# Currently the only MPI this everytrace works with.
depends_on('openmpi')
- def configure_args(self):
- return []
-
def setup_environment(self, spack_env, env):
env.prepend_path('PATH', join_path(self.prefix, 'bin'))
diff --git a/var/spack/repos/builtin/packages/everytrace/package.py b/var/spack/repos/builtin/packages/everytrace/package.py
index ee1a058009..a3f3e2cfce 100644
--- a/var/spack/repos/builtin/packages/everytrace/package.py
+++ b/var/spack/repos/builtin/packages/everytrace/package.py
@@ -42,7 +42,7 @@ class Everytrace(CMakePackage):
depends_on('cmake', type='build')
depends_on('mpi', when='+mpi')
- def configure_args(self):
+ def cmake_args(self):
spec = self.spec
return [
'-DUSE_MPI=%s' % ('YES' if '+mpi' in spec else 'NO'),
diff --git a/var/spack/repos/builtin/packages/fftw/package.py b/var/spack/repos/builtin/packages/fftw/package.py
index 3069e39226..53b635ba7c 100644
--- a/var/spack/repos/builtin/packages/fftw/package.py
+++ b/var/spack/repos/builtin/packages/fftw/package.py
@@ -39,6 +39,9 @@ class Fftw(Package):
version('3.3.5', '6cc08a3b9c7ee06fdd5b9eb02e06f569')
version('3.3.4', '2edab8c06b24feeb3b82bbb3ebf3e7b3')
+ patch('pfft-3.3.5.patch', when="@3.3.5+pfft_patches", level=0)
+ patch('pfft-3.3.4.patch', when="@3.3.4+pfft_patches", level=0)
+
variant(
'float', default=True,
description='Produces a single precision version of the library')
@@ -51,8 +54,13 @@ class Fftw(Package):
'(works only with GCC and libquadmath)')
variant('openmp', default=False, description="Enable OpenMP support.")
variant('mpi', default=False, description='Activate MPI support')
+ variant(
+ 'pfft_patches', default=False,
+ description='Add extra transpose functions for PFFT compatibility')
depends_on('mpi', when='+mpi')
+ depends_on('automake', type='build', when='+pfft_patches')
+ depends_on('autoconf', type='build', when='+pfft_patches')
# TODO : add support for architecture specific optimizations as soon as
# targets are supported
@@ -77,6 +85,10 @@ class Fftw(Package):
if '+mpi' in spec:
options.append('--enable-mpi')
+ if '+pfft_patches' in spec:
+ autoreconf = which('autoreconf')
+ autoreconf('-ifv')
+
configure(*options)
make()
if self.run_tests:
diff --git a/var/spack/repos/builtin/packages/fftw/pfft-3.3.4.patch b/var/spack/repos/builtin/packages/fftw/pfft-3.3.4.patch
new file mode 100644
index 0000000000..4740a60ae4
--- /dev/null
+++ b/var/spack/repos/builtin/packages/fftw/pfft-3.3.4.patch
@@ -0,0 +1,865 @@
+--- mpi/conf.c 2014-03-04 19:41:03.000000000 +0100
++++ mpi/conf.c 2015-09-05 05:53:19.085516467 +0200
+@@ -29,6 +29,8 @@ static const solvtab s =
+ SOLVTAB(XM(transpose_pairwise_register)),
+ SOLVTAB(XM(transpose_alltoall_register)),
+ SOLVTAB(XM(transpose_recurse_register)),
++ SOLVTAB(XM(transpose_pairwise_transposed_register)),
++ SOLVTAB(XM(transpose_alltoall_transposed_register)),
+ SOLVTAB(XM(dft_rank_geq2_register)),
+ SOLVTAB(XM(dft_rank_geq2_transposed_register)),
+ SOLVTAB(XM(dft_serial_register)),
+
+--- mpi/Makefile.am 2013-03-18 13:10:45.000000000 +0100
++++ mpi/Makefile.am 2015-09-05 05:53:19.084516437 +0200
+@@ -16,6 +16,7 @@ BUILT_SOURCES = fftw3-mpi.f03.in fftw3-m
+ CLEANFILES = fftw3-mpi.f03 fftw3l-mpi.f03
+
+ TRANSPOSE_SRC = transpose-alltoall.c transpose-pairwise.c transpose-recurse.c transpose-problem.c transpose-solve.c mpi-transpose.h
++TRANSPOSE_SRC += transpose-alltoall-transposed.c transpose-pairwise-transposed.c
+ DFT_SRC = dft-serial.c dft-rank-geq2.c dft-rank-geq2-transposed.c dft-rank1.c dft-rank1-bigvec.c dft-problem.c dft-solve.c mpi-dft.h
+ RDFT_SRC = rdft-serial.c rdft-rank-geq2.c rdft-rank-geq2-transposed.c rdft-rank1-bigvec.c rdft-problem.c rdft-solve.c mpi-rdft.h
+ RDFT2_SRC = rdft2-serial.c rdft2-rank-geq2.c rdft2-rank-geq2-transposed.c rdft2-problem.c rdft2-solve.c mpi-rdft2.h
+
+--- mpi/mpi-transpose.h 2014-03-04 19:41:03.000000000 +0100
++++ mpi/mpi-transpose.h 2015-09-05 05:53:19.085516467 +0200
+@@ -59,3 +59,5 @@ int XM(mkplans_posttranspose)(const prob
+ void XM(transpose_pairwise_register)(planner *p);
+ void XM(transpose_alltoall_register)(planner *p);
+ void XM(transpose_recurse_register)(planner *p);
++void XM(transpose_pairwise_transposed_register)(planner *p);
++void XM(transpose_alltoall_transposed_register)(planner *p);
+
+--- mpi/transpose-alltoall-transposed.c 1970-01-01 01:00:00.000000000 +0100
++++ mpi/transpose-alltoall-transposed.c 2015-09-05 05:53:19.085516467 +0200
+@@ -0,0 +1,280 @@
++/*
++ * Copyright (c) 2003, 2007-11 Matteo Frigo
++ * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
++ * Copyright (c) 2012 Michael Pippig
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ */
++
++/* plans for distributed out-of-place transpose using MPI_Alltoall,
++ and which destroy the input array (also if TRANSPOSED_IN is used) */
++
++#include "mpi-transpose.h"
++#include <string.h>
++
++typedef struct {
++ solver super;
++ int copy_transposed_out; /* whether to copy the output for TRANSPOSED_OUT,
++ which makes the first transpose out-of-place
++ but costs an extra copy and requires us
++ to destroy the input */
++} S;
++
++typedef struct {
++ plan_mpi_transpose super;
++
++ plan *cld1, *cld2, *cld2rest, *cld3;
++
++ MPI_Comm comm;
++ int *send_block_sizes, *send_block_offsets;
++ int *recv_block_sizes, *recv_block_offsets;
++
++ INT rest_Ioff, rest_Ooff;
++
++ int equal_blocks;
++} P;
++
++/* transpose locally to get contiguous chunks
++ this may take two transposes if the block sizes are unequal
++ (3 subplans, two of which operate on disjoint data) */
++static void apply_pretranspose(
++ const P *ego, R *I, R *O
++ )
++{
++ plan_rdft *cld2, *cld2rest, *cld3;
++
++ cld3 = (plan_rdft *) ego->cld3;
++ if (cld3)
++ cld3->apply(ego->cld3, O, O);
++ /* else TRANSPOSED_IN is true and user wants I transposed */
++
++ cld2 = (plan_rdft *) ego->cld2;
++ cld2->apply(ego->cld2, I, O);
++ cld2rest = (plan_rdft *) ego->cld2rest;
++ if (cld2rest) {
++ cld2rest->apply(ego->cld2rest,
++ I + ego->rest_Ioff, O + ego->rest_Ooff);
++ }
++}
++
++static void apply(const plan *ego_, R *I, R *O)
++{
++ const P *ego = (const P *) ego_;
++ plan_rdft *cld1 = (plan_rdft *) ego->cld1;
++
++ if (cld1) {
++ /* transpose locally to get contiguous chunks */
++ apply_pretranspose(ego, I, O);
++
++ /* transpose chunks globally */
++ if (ego->equal_blocks)
++ MPI_Alltoall(O, ego->send_block_sizes[0], FFTW_MPI_TYPE,
++ I, ego->recv_block_sizes[0], FFTW_MPI_TYPE,
++ ego->comm);
++ else
++ MPI_Alltoallv(O, ego->send_block_sizes, ego->send_block_offsets,
++ FFTW_MPI_TYPE,
++ I, ego->recv_block_sizes, ego->recv_block_offsets,
++ FFTW_MPI_TYPE,
++ ego->comm);
++
++ /* transpose locally to get non-transposed output */
++ cld1->apply(ego->cld1, I, O);
++ } /* else TRANSPOSED_OUT is true and user wants O transposed */
++ else {
++ /* transpose locally to get contiguous chunks */
++ apply_pretranspose(ego, I, I);
++
++ /* transpose chunks globally */
++ if (ego->equal_blocks)
++ MPI_Alltoall(I, ego->send_block_sizes[0], FFTW_MPI_TYPE,
++ O, ego->recv_block_sizes[0], FFTW_MPI_TYPE,
++ ego->comm);
++ else
++ MPI_Alltoallv(I, ego->send_block_sizes, ego->send_block_offsets,
++ FFTW_MPI_TYPE,
++ O, ego->recv_block_sizes, ego->recv_block_offsets,
++ FFTW_MPI_TYPE,
++ ego->comm);
++ }
++}
++
++static int applicable(const S *ego, const problem *p_,
++ const planner *plnr)
++{
++ /* in contrast to transpose-alltoall this algorithm can not preserve the input,
++ * since we need at least one transpose before the (out-of-place) Alltoall */
++ const problem_mpi_transpose *p = (const problem_mpi_transpose *) p_;
++ return (1
++ && p->I != p->O
++ && (!NO_DESTROY_INPUTP(plnr))
++ && ((p->flags & TRANSPOSED_OUT) || !ego->copy_transposed_out)
++ && ONLY_TRANSPOSEDP(p->flags)
++ );
++}
++
++static void awake(plan *ego_, enum wakefulness wakefulness)
++{
++ P *ego = (P *) ego_;
++ X(plan_awake)(ego->cld1, wakefulness);
++ X(plan_awake)(ego->cld2, wakefulness);
++ X(plan_awake)(ego->cld2rest, wakefulness);
++ X(plan_awake)(ego->cld3, wakefulness);
++}
++
++static void destroy(plan *ego_)
++{
++ P *ego = (P *) ego_;
++ X(ifree0)(ego->send_block_sizes);
++ MPI_Comm_free(&ego->comm);
++ X(plan_destroy_internal)(ego->cld3);
++ X(plan_destroy_internal)(ego->cld2rest);
++ X(plan_destroy_internal)(ego->cld2);
++ X(plan_destroy_internal)(ego->cld1);
++}
++
++static void print(const plan *ego_, printer *p)
++{
++ const P *ego = (const P *) ego_;
++ p->print(p, "(mpi-transpose-alltoall-transposed%s%(%p%)%(%p%)%(%p%)%(%p%))",
++ ego->equal_blocks ? "/e" : "",
++ ego->cld1, ego->cld2, ego->cld2rest, ego->cld3);
++}
++
++static plan *mkplan(const solver *ego_, const problem *p_, planner *plnr)
++{
++ const S *ego = (const S *) ego_;
++ const problem_mpi_transpose *p;
++ P *pln;
++ plan *cld1 = 0, *cld2 = 0, *cld2rest = 0, *cld3 = 0;
++ INT b, bt, vn, rest_Ioff, rest_Ooff;
++ R *O;
++ int *sbs, *sbo, *rbs, *rbo;
++ int pe, my_pe, n_pes;
++ int equal_blocks = 1;
++ static const plan_adt padt = {
++ XM(transpose_solve), awake, print, destroy
++ };
++
++ if (!applicable(ego, p_, plnr))
++ return (plan *) 0;
++
++ p = (const problem_mpi_transpose *) p_;
++ vn = p->vn;
++
++ MPI_Comm_rank(p->comm, &my_pe);
++ MPI_Comm_size(p->comm, &n_pes);
++
++ bt = XM(block)(p->ny, p->tblock, my_pe);
++
++ if (p->flags & TRANSPOSED_OUT) { /* O stays transposed */
++ if (ego->copy_transposed_out) {
++ cld1 = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(X(mktensor_1d)
++ (bt * p->nx * vn, 1, 1),
++ p->I, O = p->O),
++ 0, 0, NO_SLOW);
++ if (XM(any_true)(!cld1, p->comm)) goto nada;
++ }
++ else /* first transpose is in-place */
++ O = p->I;
++ }
++ else { /* transpose nx x bt x vn -> bt x nx x vn */
++ cld1 = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(X(mktensor_3d)
++ (bt, vn, p->nx * vn,
++ p->nx, bt * vn, vn,
++ vn, 1, 1),
++ p->I, O = p->O),
++ 0, 0, NO_SLOW);
++ if (XM(any_true)(!cld1, p->comm)) goto nada;
++ }
++
++ if (XM(any_true)(!XM(mkplans_pretranspose)(p, plnr, p->I, O, my_pe,
++ &cld2, &cld2rest, &cld3,
++ &rest_Ioff, &rest_Ooff),
++ p->comm)) goto nada;
++
++
++ pln = MKPLAN_MPI_TRANSPOSE(P, &padt, apply);
++
++ pln->cld1 = cld1;
++ pln->cld2 = cld2;
++ pln->cld2rest = cld2rest;
++ pln->rest_Ioff = rest_Ioff;
++ pln->rest_Ooff = rest_Ooff;
++ pln->cld3 = cld3;
++
++ MPI_Comm_dup(p->comm, &pln->comm);
++
++ /* Compute sizes/offsets of blocks to send for all-to-all command. */
++ sbs = (int *) MALLOC(4 * n_pes * sizeof(int), PLANS);
++ sbo = sbs + n_pes;
++ rbs = sbo + n_pes;
++ rbo = rbs + n_pes;
++ b = XM(block)(p->nx, p->block, my_pe);
++ bt = XM(block)(p->ny, p->tblock, my_pe);
++ for (pe = 0; pe < n_pes; ++pe) {
++ INT db, dbt; /* destination block sizes */
++ db = XM(block)(p->nx, p->block, pe);
++ dbt = XM(block)(p->ny, p->tblock, pe);
++ if (db != p->block || dbt != p->tblock)
++ equal_blocks = 0;
++
++ /* MPI requires type "int" here; apparently it
++ has no 64-bit API? Grrr. */
++ sbs[pe] = (int) (b * dbt * vn);
++ sbo[pe] = (int) (pe * (b * p->tblock) * vn);
++ rbs[pe] = (int) (db * bt * vn);
++ rbo[pe] = (int) (pe * (p->block * bt) * vn);
++ }
++ pln->send_block_sizes = sbs;
++ pln->send_block_offsets = sbo;
++ pln->recv_block_sizes = rbs;
++ pln->recv_block_offsets = rbo;
++ pln->equal_blocks = equal_blocks;
++
++ X(ops_zero)(&pln->super.super.ops);
++ if (cld1) X(ops_add2)(&cld1->ops, &pln->super.super.ops);
++ if (cld2) X(ops_add2)(&cld2->ops, &pln->super.super.ops);
++ if (cld2rest) X(ops_add2)(&cld2rest->ops, &pln->super.super.ops);
++ if (cld3) X(ops_add2)(&cld3->ops, &pln->super.super.ops);
++ /* FIXME: should MPI operations be counted in "other" somehow? */
++
++ return &(pln->super.super);
++
++ nada:
++ X(plan_destroy_internal)(cld3);
++ X(plan_destroy_internal)(cld2rest);
++ X(plan_destroy_internal)(cld2);
++ X(plan_destroy_internal)(cld1);
++ return (plan *) 0;
++}
++
++static solver *mksolver(int copy_transposed_out)
++{
++ static const solver_adt sadt = { PROBLEM_MPI_TRANSPOSE, mkplan, 0 };
++ S *slv = MKSOLVER(S, &sadt);
++ slv->copy_transposed_out = copy_transposed_out;
++ return &(slv->super);
++}
++
++void XM(transpose_alltoall_transposed_register)(planner *p)
++{
++ int cto;
++ for (cto = 0; cto <= 1; ++cto)
++ REGISTER_SOLVER(p, mksolver(cto));
++}
+
+--- mpi/transpose-pairwise.c 2014-03-04 19:41:03.000000000 +0100
++++ mpi/transpose-pairwise.c 2015-09-05 06:00:05.715433709 +0200
+@@ -53,7 +53,6 @@ static void transpose_chunks(int *sched,
+ {
+ if (sched) {
+ int i;
+- MPI_Status status;
+
+ /* TODO: explore non-synchronous send/recv? */
+
+@@ -74,7 +73,7 @@ static void transpose_chunks(int *sched,
+ O + rbo[pe], (int) (rbs[pe]),
+ FFTW_MPI_TYPE,
+ pe, (pe * n_pes + my_pe) & 0xffff,
+- comm, &status);
++ comm, MPI_STATUS_IGNORE);
+ }
+ }
+
+@@ -92,7 +91,7 @@ static void transpose_chunks(int *sched,
+ O + rbo[pe], (int) (rbs[pe]),
+ FFTW_MPI_TYPE,
+ pe, (pe * n_pes + my_pe) & 0xffff,
+- comm, &status);
++ comm, MPI_STATUS_IGNORE);
+ }
+ }
+ }
+@@ -350,6 +349,7 @@ nada:
+ X(plan_destroy_internal)(*cld3);
+ X(plan_destroy_internal)(*cld2rest);
+ X(plan_destroy_internal)(*cld2);
++ *cld2 = *cld2rest = *cld3 = NULL;
+ return 0;
+ }
+
+--- mpi/transpose-pairwise-transposed.c 1970-01-01 01:00:00.000000000 +0100
++++ mpi/transpose-pairwise-transposed.c 2015-09-05 06:00:07.280481042 +0200
+@@ -0,0 +1,510 @@
++/*
++ * Copyright (c) 2003, 2007-11 Matteo Frigo
++ * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
++ * Copyright (c) 2012 Michael Pippig
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ */
++
++/* Distributed transposes using a sequence of carefully scheduled
++ pairwise exchanges. This has the advantage that it can be done
++ in-place, or out-of-place while preserving the input, using buffer
++ space proportional to the local size divided by the number of
++ processes (i.e. to the total array size divided by the number of
++ processes squared). */
++
++#include "mpi-transpose.h"
++#include <string.h>
++
++typedef struct {
++ solver super;
++ int preserve_input; /* preserve input even if DESTROY_INPUT was passed */
++} S;
++
++typedef struct {
++ plan_mpi_transpose super;
++
++ plan *cld1, *cld2, *cld2rest, *cld3;
++ INT rest_Ioff, rest_Ooff;
++
++ int n_pes, my_pe, *sched;
++ INT *send_block_sizes, *send_block_offsets;
++ INT *recv_block_sizes, *recv_block_offsets;
++ MPI_Comm comm;
++ int preserve_input;
++} P;
++
++static void transpose_chunks(int *sched, int n_pes, int my_pe,
++ INT *sbs, INT *sbo, INT *rbs, INT *rbo,
++ MPI_Comm comm,
++ R *I, R *O)
++{
++ if (sched) {
++ int i;
++
++ /* TODO: explore non-synchronous send/recv? */
++
++ if (I == O) {
++ R *buf = (R*) MALLOC(sizeof(R) * sbs[0], BUFFERS);
++
++ for (i = 0; i < n_pes; ++i) {
++ int pe = sched[i];
++ if (my_pe == pe) {
++ if (rbo[pe] != sbo[pe])
++ memmove(O + rbo[pe], O + sbo[pe],
++ sbs[pe] * sizeof(R));
++ }
++ else {
++ memcpy(buf, O + sbo[pe], sbs[pe] * sizeof(R));
++ MPI_Sendrecv(buf, (int) (sbs[pe]), FFTW_MPI_TYPE,
++ pe, (my_pe * n_pes + pe) & 0xffff,
++ O + rbo[pe], (int) (rbs[pe]),
++ FFTW_MPI_TYPE,
++ pe, (pe * n_pes + my_pe) & 0xffff,
++ comm, MPI_STATUS_IGNORE);
++ }
++ }
++
++ X(ifree)(buf);
++ }
++ else { /* I != O */
++ for (i = 0; i < n_pes; ++i) {
++ int pe = sched[i];
++ if (my_pe == pe)
++ memcpy(O + rbo[pe], I + sbo[pe], sbs[pe] * sizeof(R));
++ else
++ MPI_Sendrecv(I + sbo[pe], (int) (sbs[pe]),
++ FFTW_MPI_TYPE,
++ pe, (my_pe * n_pes + pe) & 0xffff,
++ O + rbo[pe], (int) (rbs[pe]),
++ FFTW_MPI_TYPE,
++ pe, (pe * n_pes + my_pe) & 0xffff,
++ comm, MPI_STATUS_IGNORE);
++ }
++ }
++ }
++}
++
++/* transpose locally to get contiguous chunks
++ this may take two transposes if the block sizes are unequal
++ (3 subplans, two of which operate on disjoint data) */
++static void apply_pretranspose(
++ const P *ego, R *I, R *O
++ )
++{
++ plan_rdft *cld2, *cld2rest, *cld3;
++
++ cld3 = (plan_rdft *) ego->cld3;
++ if (cld3)
++ cld3->apply(ego->cld3, O, O);
++ /* else TRANSPOSED_IN is true and user wants I transposed */
++
++ cld2 = (plan_rdft *) ego->cld2;
++ cld2->apply(ego->cld2, I, O);
++ cld2rest = (plan_rdft *) ego->cld2rest;
++ if (cld2rest) {
++ cld2rest->apply(ego->cld2rest,
++ I + ego->rest_Ioff, O + ego->rest_Ooff);
++ }
++}
++
++static void apply(const plan *ego_, R *I, R *O)
++{
++ const P *ego = (const P *) ego_;
++ plan_rdft *cld1 = (plan_rdft *) ego->cld1;
++
++ if (cld1) {
++ /* transpose locally to get contiguous chunks */
++ apply_pretranspose(ego, I, O);
++
++ if(ego->preserve_input) I = O;
++
++ /* transpose chunks globally */
++ transpose_chunks(ego->sched, ego->n_pes, ego->my_pe,
++ ego->send_block_sizes, ego->send_block_offsets,
++ ego->recv_block_sizes, ego->recv_block_offsets,
++ ego->comm, O, I);
++
++ /* transpose locally to get non-transposed output */
++ cld1->apply(ego->cld1, I, O);
++ } /* else TRANSPOSED_OUT is true and user wants O transposed */
++ else if (ego->preserve_input) {
++ /* transpose locally to get contiguous chunks */
++ apply_pretranspose(ego, I, O);
++
++ /* transpose chunks globally */
++ transpose_chunks(ego->sched, ego->n_pes, ego->my_pe,
++ ego->send_block_sizes, ego->send_block_offsets,
++ ego->recv_block_sizes, ego->recv_block_offsets,
++ ego->comm, O, O);
++ }
++ else {
++ /* transpose locally to get contiguous chunks */
++ apply_pretranspose(ego, I, I);
++
++ /* transpose chunks globally */
++ transpose_chunks(ego->sched, ego->n_pes, ego->my_pe,
++ ego->send_block_sizes, ego->send_block_offsets,
++ ego->recv_block_sizes, ego->recv_block_offsets,
++ ego->comm, I, O);
++ }
++}
++
++static int applicable(const S *ego, const problem *p_,
++ const planner *plnr)
++{
++ const problem_mpi_transpose *p = (const problem_mpi_transpose *) p_;
++ /* Note: this is *not* UGLY for out-of-place, destroy-input plans;
++ the planner often prefers transpose-pairwise to transpose-alltoall,
++ at least with LAM MPI on my machine. */
++ return (1
++ && (!ego->preserve_input || (!NO_DESTROY_INPUTP(plnr)
++ && p->I != p->O))
++ && ONLY_TRANSPOSEDP(p->flags));
++}
++
++static void awake(plan *ego_, enum wakefulness wakefulness)
++{
++ P *ego = (P *) ego_;
++ X(plan_awake)(ego->cld1, wakefulness);
++ X(plan_awake)(ego->cld2, wakefulness);
++ X(plan_awake)(ego->cld2rest, wakefulness);
++ X(plan_awake)(ego->cld3, wakefulness);
++}
++
++static void destroy(plan *ego_)
++{
++ P *ego = (P *) ego_;
++ X(ifree0)(ego->sched);
++ X(ifree0)(ego->send_block_sizes);
++ MPI_Comm_free(&ego->comm);
++ X(plan_destroy_internal)(ego->cld3);
++ X(plan_destroy_internal)(ego->cld2rest);
++ X(plan_destroy_internal)(ego->cld2);
++ X(plan_destroy_internal)(ego->cld1);
++}
++
++static void print(const plan *ego_, printer *p)
++{
++ const P *ego = (const P *) ego_;
++ p->print(p, "(mpi-transpose-pairwise-transposed%s%(%p%)%(%p%)%(%p%)%(%p%))",
++ ego->preserve_input==2 ?"/p":"",
++ ego->cld1, ego->cld2, ego->cld2rest, ego->cld3);
++}
++
++/* Given a process which_pe and a number of processes npes, fills
++ the array sched[npes] with a sequence of processes to communicate
++ with for a deadlock-free, optimum-overlap all-to-all communication.
++ (All processes must call this routine to get their own schedules.)
++ The schedule can be re-ordered arbitrarily as long as all processes
++ apply the same permutation to their schedules.
++
++ The algorithm here is based upon the one described in:
++ J. A. M. Schreuder, "Constructing timetables for sport
++ competitions," Mathematical Programming Study 13, pp. 58-67 (1980).
++ In a sport competition, you have N teams and want every team to
++ play every other team in as short a time as possible (maximum overlap
++ between games). This timetabling problem is therefore identical
++ to that of an all-to-all communications problem. In our case, there
++ is one wrinkle: as part of the schedule, the process must do
++ some data transfer with itself (local data movement), analogous
++ to a requirement that each team "play itself" in addition to other
++ teams. With this wrinkle, it turns out that an optimal timetable
++ (N parallel games) can be constructed for any N, not just for even
++ N as in the original problem described by Schreuder.
++*/
++static void fill1_comm_sched(int *sched, int which_pe, int npes)
++{
++ int pe, i, n, s = 0;
++ A(which_pe >= 0 && which_pe < npes);
++ if (npes % 2 == 0) {
++ n = npes;
++ sched[s++] = which_pe;
++ }
++ else
++ n = npes + 1;
++ for (pe = 0; pe < n - 1; ++pe) {
++ if (npes % 2 == 0) {
++ if (pe == which_pe) sched[s++] = npes - 1;
++ else if (npes - 1 == which_pe) sched[s++] = pe;
++ }
++ else if (pe == which_pe) sched[s++] = pe;
++
++ if (pe != which_pe && which_pe < n - 1) {
++ i = (pe - which_pe + (n - 1)) % (n - 1);
++ if (i < n/2)
++ sched[s++] = (pe + i) % (n - 1);
++
++ i = (which_pe - pe + (n - 1)) % (n - 1);
++ if (i < n/2)
++ sched[s++] = (pe - i + (n - 1)) % (n - 1);
++ }
++ }
++ A(s == npes);
++}
++
++/* Sort the communication schedule sched for npes so that the schedule
++ on process sortpe is ascending or descending (!ascending). This is
++ necessary to allow in-place transposes when the problem does not
++ divide equally among the processes. In this case there is one
++ process where the incoming blocks are bigger/smaller than the
++ outgoing blocks and thus have to be received in
++ descending/ascending order, respectively, to avoid overwriting data
++ before it is sent. */
++static void sort1_comm_sched(int *sched, int npes, int sortpe, int ascending)
++{
++ int *sortsched, i;
++ sortsched = (int *) MALLOC(npes * sizeof(int) * 2, OTHER);
++ fill1_comm_sched(sortsched, sortpe, npes);
++ if (ascending)
++ for (i = 0; i < npes; ++i)
++ sortsched[npes + sortsched[i]] = sched[i];
++ else
++ for (i = 0; i < npes; ++i)
++ sortsched[2*npes - 1 - sortsched[i]] = sched[i];
++ for (i = 0; i < npes; ++i)
++ sched[i] = sortsched[npes + i];
++ X(ifree)(sortsched);
++}
++
++/* make the plans to do the pre-MPI transpositions (shared with
++ transpose-alltoall-transposed) */
++int XM(mkplans_pretranspose)(const problem_mpi_transpose *p, planner *plnr,
++ R *I, R *O, int my_pe,
++ plan **cld2, plan **cld2rest, plan **cld3,
++ INT *rest_Ioff, INT *rest_Ooff)
++{
++ INT vn = p->vn;
++ INT b = XM(block)(p->nx, p->block, my_pe);
++ INT bt = p->tblock;
++ INT nyb = p->ny / bt; /* number of equal-sized blocks */
++ INT nyr = p->ny - nyb * bt; /* leftover rows after equal blocks */
++
++ *cld2 = *cld2rest = *cld3 = NULL;
++ *rest_Ioff = *rest_Ooff = 0;
++
++ if (!(p->flags & TRANSPOSED_IN) && (nyr == 0 || I != O)) {
++ INT ny = p->ny * vn;
++ bt *= vn;
++ *cld2 = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(X(mktensor_3d)
++ (nyb, bt, b * bt,
++ b, ny, bt,
++ bt, 1, 1),
++ I, O),
++ 0, 0, NO_SLOW);
++ if (!*cld2) goto nada;
++
++ if (nyr > 0) {
++ *rest_Ioff = nyb * bt;
++ *rest_Ooff = nyb * b * bt;
++ bt = nyr * vn;
++ *cld2rest = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(X(mktensor_2d)
++ (b, ny, bt,
++ bt, 1, 1),
++ I + *rest_Ioff,
++ O + *rest_Ooff),
++ 0, 0, NO_SLOW);
++ if (!*cld2rest) goto nada;
++ }
++ }
++ else {
++ *cld2 = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(
++ X(mktensor_4d)
++ (nyb, b * bt * vn, b * bt * vn,
++ b, vn, bt * vn,
++ bt, b * vn, vn,
++ vn, 1, 1),
++ I, O),
++ 0, 0, NO_SLOW);
++ if (!*cld2) goto nada;
++
++ *rest_Ioff = *rest_Ooff = nyb * bt * b * vn;
++ *cld2rest = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(
++ X(mktensor_3d)
++ (b, vn, nyr * vn,
++ nyr, b * vn, vn,
++ vn, 1, 1),
++ I + *rest_Ioff, O + *rest_Ooff),
++ 0, 0, NO_SLOW);
++ if (!*cld2rest) goto nada;
++
++ if (!(p->flags & TRANSPOSED_IN)) {
++ *cld3 = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(
++ X(mktensor_3d)
++ (p->ny, vn, b * vn,
++ b, p->ny * vn, vn,
++ vn, 1, 1),
++ I, I),
++ 0, 0, NO_SLOW);
++ if (!*cld3) goto nada;
++ }
++ }
++
++ return 1;
++
++nada:
++ X(plan_destroy_internal)(*cld3);
++ X(plan_destroy_internal)(*cld2rest);
++ X(plan_destroy_internal)(*cld2);
++ *cld2 = *cld2rest = *cld3 = NULL;
++ return 0;
++}
++
++static plan *mkplan(const solver *ego_, const problem *p_, planner *plnr)
++{
++ const S *ego = (const S *) ego_;
++ const problem_mpi_transpose *p;
++ P *pln;
++ plan *cld1 = 0, *cld2 = 0, *cld2rest = 0, *cld3 = 0;
++ INT b, bt, vn, rest_Ioff, rest_Ooff;
++ INT *sbs, *sbo, *rbs, *rbo;
++ int pe, my_pe, n_pes, sort_pe = -1, ascending = 1;
++ R *I, *O;
++ static const plan_adt padt = {
++ XM(transpose_solve), awake, print, destroy
++ };
++
++ UNUSED(ego);
++
++ if (!applicable(ego, p_, plnr))
++ return (plan *) 0;
++
++ p = (const problem_mpi_transpose *) p_;
++ vn = p->vn;
++ I = p->I; O = p->O;
++
++ MPI_Comm_rank(p->comm, &my_pe);
++ MPI_Comm_size(p->comm, &n_pes);
++
++ bt = XM(block)(p->ny, p->tblock, my_pe);
++
++
++ if (ego->preserve_input || NO_DESTROY_INPUTP(plnr)) I = p->O;
++
++ if (!(p->flags & TRANSPOSED_OUT)) { /* nx x bt x vn -> bt x nx x vn */
++ cld1 = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(X(mktensor_3d)
++ (bt, vn, p->nx * vn,
++ p->nx, bt * vn, vn,
++ vn, 1, 1),
++ I, O = p->O),
++ 0, 0, NO_SLOW);
++ if (XM(any_true)(!cld1, p->comm)) goto nada;
++
++ }
++ else {
++ if (ego->preserve_input || NO_DESTROY_INPUTP(plnr))
++ O = p->O;
++ else
++ O = p->I;
++ }
++
++ if (XM(any_true)(!XM(mkplans_pretranspose)(p, plnr, p->I, O, my_pe,
++ &cld2, &cld2rest, &cld3,
++ &rest_Ioff, &rest_Ooff),
++ p->comm)) goto nada;
++
++ pln = MKPLAN_MPI_TRANSPOSE(P, &padt, apply);
++
++ pln->cld1 = cld1;
++ pln->cld2 = cld2;
++ pln->cld2rest = cld2rest;
++ pln->rest_Ioff = rest_Ioff;
++ pln->rest_Ooff = rest_Ooff;
++ pln->cld3 = cld3;
++ pln->preserve_input = ego->preserve_input ? 2 : NO_DESTROY_INPUTP(plnr);
++
++ MPI_Comm_dup(p->comm, &pln->comm);
++
++ n_pes = (int) X(imax)(XM(num_blocks)(p->nx, p->block),
++ XM(num_blocks)(p->ny, p->tblock));
++
++ /* Compute sizes/offsets of blocks to exchange between processors */
++ sbs = (INT *) MALLOC(4 * n_pes * sizeof(INT), PLANS);
++ sbo = sbs + n_pes;
++ rbs = sbo + n_pes;
++ rbo = rbs + n_pes;
++ b = XM(block)(p->nx, p->block, my_pe);
++ bt = XM(block)(p->ny, p->tblock, my_pe);
++ for (pe = 0; pe < n_pes; ++pe) {
++ INT db, dbt; /* destination block sizes */
++ db = XM(block)(p->nx, p->block, pe);
++ dbt = XM(block)(p->ny, p->tblock, pe);
++
++ sbs[pe] = b * dbt * vn;
++ sbo[pe] = pe * (b * p->tblock) * vn;
++ rbs[pe] = db * bt * vn;
++ rbo[pe] = pe * (p->block * bt) * vn;
++
++ if (db * dbt > 0 && db * p->tblock != p->block * dbt) {
++ A(sort_pe == -1); /* only one process should need sorting */
++ sort_pe = pe;
++ ascending = db * p->tblock > p->block * dbt;
++ }
++ }
++ pln->n_pes = n_pes;
++ pln->my_pe = my_pe;
++ pln->send_block_sizes = sbs;
++ pln->send_block_offsets = sbo;
++ pln->recv_block_sizes = rbs;
++ pln->recv_block_offsets = rbo;
++
++ if (my_pe >= n_pes) {
++ pln->sched = 0; /* this process is not doing anything */
++ }
++ else {
++ pln->sched = (int *) MALLOC(n_pes * sizeof(int), PLANS);
++ fill1_comm_sched(pln->sched, my_pe, n_pes);
++ if (sort_pe >= 0)
++ sort1_comm_sched(pln->sched, n_pes, sort_pe, ascending);
++ }
++
++ X(ops_zero)(&pln->super.super.ops);
++ if (cld1) X(ops_add2)(&cld1->ops, &pln->super.super.ops);
++ if (cld2) X(ops_add2)(&cld2->ops, &pln->super.super.ops);
++ if (cld2rest) X(ops_add2)(&cld2rest->ops, &pln->super.super.ops);
++ if (cld3) X(ops_add2)(&cld3->ops, &pln->super.super.ops);
++ /* FIXME: should MPI operations be counted in "other" somehow? */
++
++ return &(pln->super.super);
++
++ nada:
++ X(plan_destroy_internal)(cld3);
++ X(plan_destroy_internal)(cld2rest);
++ X(plan_destroy_internal)(cld2);
++ X(plan_destroy_internal)(cld1);
++ return (plan *) 0;
++}
++
++static solver *mksolver(int preserve_input)
++{
++ static const solver_adt sadt = { PROBLEM_MPI_TRANSPOSE, mkplan, 0 };
++ S *slv = MKSOLVER(S, &sadt);
++ slv->preserve_input = preserve_input;
++ return &(slv->super);
++}
++
++void XM(transpose_pairwise_transposed_register)(planner *p)
++{
++ int preserve_input;
++ for (preserve_input = 0; preserve_input <= 1; ++preserve_input)
++ REGISTER_SOLVER(p, mksolver(preserve_input));
++}
diff --git a/var/spack/repos/builtin/packages/fftw/pfft-3.3.5.patch b/var/spack/repos/builtin/packages/fftw/pfft-3.3.5.patch
new file mode 100644
index 0000000000..360a3757f9
--- /dev/null
+++ b/var/spack/repos/builtin/packages/fftw/pfft-3.3.5.patch
@@ -0,0 +1,858 @@
+--- mpi/conf.c 2014-03-04 19:41:03.000000000 +0100
++++ mpi/conf.c 2015-09-05 05:53:19.085516467 +0200
+@@ -29,6 +29,8 @@ static const solvtab s =
+ SOLVTAB(XM(transpose_pairwise_register)),
+ SOLVTAB(XM(transpose_alltoall_register)),
+ SOLVTAB(XM(transpose_recurse_register)),
++ SOLVTAB(XM(transpose_pairwise_transposed_register)),
++ SOLVTAB(XM(transpose_alltoall_transposed_register)),
+ SOLVTAB(XM(dft_rank_geq2_register)),
+ SOLVTAB(XM(dft_rank_geq2_transposed_register)),
+ SOLVTAB(XM(dft_serial_register)),
+
+--- mpi/Makefile.am 2013-03-18 13:10:45.000000000 +0100
++++ mpi/Makefile.am 2015-09-05 05:53:19.084516437 +0200
+@@ -16,6 +16,7 @@ BUILT_SOURCES = fftw3-mpi.f03.in fftw3-m
+ CLEANFILES = fftw3-mpi.f03 fftw3l-mpi.f03
+
+ TRANSPOSE_SRC = transpose-alltoall.c transpose-pairwise.c transpose-recurse.c transpose-problem.c transpose-solve.c mpi-transpose.h
++TRANSPOSE_SRC += transpose-alltoall-transposed.c transpose-pairwise-transposed.c
+ DFT_SRC = dft-serial.c dft-rank-geq2.c dft-rank-geq2-transposed.c dft-rank1.c dft-rank1-bigvec.c dft-problem.c dft-solve.c mpi-dft.h
+ RDFT_SRC = rdft-serial.c rdft-rank-geq2.c rdft-rank-geq2-transposed.c rdft-rank1-bigvec.c rdft-problem.c rdft-solve.c mpi-rdft.h
+ RDFT2_SRC = rdft2-serial.c rdft2-rank-geq2.c rdft2-rank-geq2-transposed.c rdft2-problem.c rdft2-solve.c mpi-rdft2.h
+
+--- mpi/mpi-transpose.h 2014-03-04 19:41:03.000000000 +0100
++++ mpi/mpi-transpose.h 2015-09-05 05:53:19.085516467 +0200
+@@ -59,3 +59,5 @@ int XM(mkplans_posttranspose)(const prob
+ void XM(transpose_pairwise_register)(planner *p);
+ void XM(transpose_alltoall_register)(planner *p);
+ void XM(transpose_recurse_register)(planner *p);
++void XM(transpose_pairwise_transposed_register)(planner *p);
++void XM(transpose_alltoall_transposed_register)(planner *p);
+
+--- mpi/transpose-alltoall-transposed.c 1970-01-01 01:00:00.000000000 +0100
++++ mpi/transpose-alltoall-transposed.c 2015-09-05 05:53:19.085516467 +0200
+@@ -0,0 +1,280 @@
++/*
++ * Copyright (c) 2003, 2007-11 Matteo Frigo
++ * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
++ * Copyright (c) 2012 Michael Pippig
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ */
++
++/* plans for distributed out-of-place transpose using MPI_Alltoall,
++ and which destroy the input array (also if TRANSPOSED_IN is used) */
++
++#include "mpi-transpose.h"
++#include <string.h>
++
++typedef struct {
++ solver super;
++ int copy_transposed_out; /* whether to copy the output for TRANSPOSED_OUT,
++ which makes the first transpose out-of-place
++ but costs an extra copy and requires us
++ to destroy the input */
++} S;
++
++typedef struct {
++ plan_mpi_transpose super;
++
++ plan *cld1, *cld2, *cld2rest, *cld3;
++
++ MPI_Comm comm;
++ int *send_block_sizes, *send_block_offsets;
++ int *recv_block_sizes, *recv_block_offsets;
++
++ INT rest_Ioff, rest_Ooff;
++
++ int equal_blocks;
++} P;
++
++/* transpose locally to get contiguous chunks
++ this may take two transposes if the block sizes are unequal
++ (3 subplans, two of which operate on disjoint data) */
++static void apply_pretranspose(
++ const P *ego, R *I, R *O
++ )
++{
++ plan_rdft *cld2, *cld2rest, *cld3;
++
++ cld3 = (plan_rdft *) ego->cld3;
++ if (cld3)
++ cld3->apply(ego->cld3, O, O);
++ /* else TRANSPOSED_IN is true and user wants I transposed */
++
++ cld2 = (plan_rdft *) ego->cld2;
++ cld2->apply(ego->cld2, I, O);
++ cld2rest = (plan_rdft *) ego->cld2rest;
++ if (cld2rest) {
++ cld2rest->apply(ego->cld2rest,
++ I + ego->rest_Ioff, O + ego->rest_Ooff);
++ }
++}
++
++static void apply(const plan *ego_, R *I, R *O)
++{
++ const P *ego = (const P *) ego_;
++ plan_rdft *cld1 = (plan_rdft *) ego->cld1;
++
++ if (cld1) {
++ /* transpose locally to get contiguous chunks */
++ apply_pretranspose(ego, I, O);
++
++ /* transpose chunks globally */
++ if (ego->equal_blocks)
++ MPI_Alltoall(O, ego->send_block_sizes[0], FFTW_MPI_TYPE,
++ I, ego->recv_block_sizes[0], FFTW_MPI_TYPE,
++ ego->comm);
++ else
++ MPI_Alltoallv(O, ego->send_block_sizes, ego->send_block_offsets,
++ FFTW_MPI_TYPE,
++ I, ego->recv_block_sizes, ego->recv_block_offsets,
++ FFTW_MPI_TYPE,
++ ego->comm);
++
++ /* transpose locally to get non-transposed output */
++ cld1->apply(ego->cld1, I, O);
++ } /* else TRANSPOSED_OUT is true and user wants O transposed */
++ else {
++ /* transpose locally to get contiguous chunks */
++ apply_pretranspose(ego, I, I);
++
++ /* transpose chunks globally */
++ if (ego->equal_blocks)
++ MPI_Alltoall(I, ego->send_block_sizes[0], FFTW_MPI_TYPE,
++ O, ego->recv_block_sizes[0], FFTW_MPI_TYPE,
++ ego->comm);
++ else
++ MPI_Alltoallv(I, ego->send_block_sizes, ego->send_block_offsets,
++ FFTW_MPI_TYPE,
++ O, ego->recv_block_sizes, ego->recv_block_offsets,
++ FFTW_MPI_TYPE,
++ ego->comm);
++ }
++}
++
++static int applicable(const S *ego, const problem *p_,
++ const planner *plnr)
++{
++ /* in contrast to transpose-alltoall this algorithm can not preserve the input,
++ * since we need at least one transpose before the (out-of-place) Alltoall */
++ const problem_mpi_transpose *p = (const problem_mpi_transpose *) p_;
++ return (1
++ && p->I != p->O
++ && (!NO_DESTROY_INPUTP(plnr))
++ && ((p->flags & TRANSPOSED_OUT) || !ego->copy_transposed_out)
++ && ONLY_TRANSPOSEDP(p->flags)
++ );
++}
++
++static void awake(plan *ego_, enum wakefulness wakefulness)
++{
++ P *ego = (P *) ego_;
++ X(plan_awake)(ego->cld1, wakefulness);
++ X(plan_awake)(ego->cld2, wakefulness);
++ X(plan_awake)(ego->cld2rest, wakefulness);
++ X(plan_awake)(ego->cld3, wakefulness);
++}
++
++static void destroy(plan *ego_)
++{
++ P *ego = (P *) ego_;
++ X(ifree0)(ego->send_block_sizes);
++ MPI_Comm_free(&ego->comm);
++ X(plan_destroy_internal)(ego->cld3);
++ X(plan_destroy_internal)(ego->cld2rest);
++ X(plan_destroy_internal)(ego->cld2);
++ X(plan_destroy_internal)(ego->cld1);
++}
++
++static void print(const plan *ego_, printer *p)
++{
++ const P *ego = (const P *) ego_;
++ p->print(p, "(mpi-transpose-alltoall-transposed%s%(%p%)%(%p%)%(%p%)%(%p%))",
++ ego->equal_blocks ? "/e" : "",
++ ego->cld1, ego->cld2, ego->cld2rest, ego->cld3);
++}
++
++static plan *mkplan(const solver *ego_, const problem *p_, planner *plnr)
++{
++ const S *ego = (const S *) ego_;
++ const problem_mpi_transpose *p;
++ P *pln;
++ plan *cld1 = 0, *cld2 = 0, *cld2rest = 0, *cld3 = 0;
++ INT b, bt, vn, rest_Ioff, rest_Ooff;
++ R *O;
++ int *sbs, *sbo, *rbs, *rbo;
++ int pe, my_pe, n_pes;
++ int equal_blocks = 1;
++ static const plan_adt padt = {
++ XM(transpose_solve), awake, print, destroy
++ };
++
++ if (!applicable(ego, p_, plnr))
++ return (plan *) 0;
++
++ p = (const problem_mpi_transpose *) p_;
++ vn = p->vn;
++
++ MPI_Comm_rank(p->comm, &my_pe);
++ MPI_Comm_size(p->comm, &n_pes);
++
++ bt = XM(block)(p->ny, p->tblock, my_pe);
++
++ if (p->flags & TRANSPOSED_OUT) { /* O stays transposed */
++ if (ego->copy_transposed_out) {
++ cld1 = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(X(mktensor_1d)
++ (bt * p->nx * vn, 1, 1),
++ p->I, O = p->O),
++ 0, 0, NO_SLOW);
++ if (XM(any_true)(!cld1, p->comm)) goto nada;
++ }
++ else /* first transpose is in-place */
++ O = p->I;
++ }
++ else { /* transpose nx x bt x vn -> bt x nx x vn */
++ cld1 = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(X(mktensor_3d)
++ (bt, vn, p->nx * vn,
++ p->nx, bt * vn, vn,
++ vn, 1, 1),
++ p->I, O = p->O),
++ 0, 0, NO_SLOW);
++ if (XM(any_true)(!cld1, p->comm)) goto nada;
++ }
++
++ if (XM(any_true)(!XM(mkplans_pretranspose)(p, plnr, p->I, O, my_pe,
++ &cld2, &cld2rest, &cld3,
++ &rest_Ioff, &rest_Ooff),
++ p->comm)) goto nada;
++
++
++ pln = MKPLAN_MPI_TRANSPOSE(P, &padt, apply);
++
++ pln->cld1 = cld1;
++ pln->cld2 = cld2;
++ pln->cld2rest = cld2rest;
++ pln->rest_Ioff = rest_Ioff;
++ pln->rest_Ooff = rest_Ooff;
++ pln->cld3 = cld3;
++
++ MPI_Comm_dup(p->comm, &pln->comm);
++
++ /* Compute sizes/offsets of blocks to send for all-to-all command. */
++ sbs = (int *) MALLOC(4 * n_pes * sizeof(int), PLANS);
++ sbo = sbs + n_pes;
++ rbs = sbo + n_pes;
++ rbo = rbs + n_pes;
++ b = XM(block)(p->nx, p->block, my_pe);
++ bt = XM(block)(p->ny, p->tblock, my_pe);
++ for (pe = 0; pe < n_pes; ++pe) {
++ INT db, dbt; /* destination block sizes */
++ db = XM(block)(p->nx, p->block, pe);
++ dbt = XM(block)(p->ny, p->tblock, pe);
++ if (db != p->block || dbt != p->tblock)
++ equal_blocks = 0;
++
++ /* MPI requires type "int" here; apparently it
++ has no 64-bit API? Grrr. */
++ sbs[pe] = (int) (b * dbt * vn);
++ sbo[pe] = (int) (pe * (b * p->tblock) * vn);
++ rbs[pe] = (int) (db * bt * vn);
++ rbo[pe] = (int) (pe * (p->block * bt) * vn);
++ }
++ pln->send_block_sizes = sbs;
++ pln->send_block_offsets = sbo;
++ pln->recv_block_sizes = rbs;
++ pln->recv_block_offsets = rbo;
++ pln->equal_blocks = equal_blocks;
++
++ X(ops_zero)(&pln->super.super.ops);
++ if (cld1) X(ops_add2)(&cld1->ops, &pln->super.super.ops);
++ if (cld2) X(ops_add2)(&cld2->ops, &pln->super.super.ops);
++ if (cld2rest) X(ops_add2)(&cld2rest->ops, &pln->super.super.ops);
++ if (cld3) X(ops_add2)(&cld3->ops, &pln->super.super.ops);
++ /* FIXME: should MPI operations be counted in "other" somehow? */
++
++ return &(pln->super.super);
++
++ nada:
++ X(plan_destroy_internal)(cld3);
++ X(plan_destroy_internal)(cld2rest);
++ X(plan_destroy_internal)(cld2);
++ X(plan_destroy_internal)(cld1);
++ return (plan *) 0;
++}
++
++static solver *mksolver(int copy_transposed_out)
++{
++ static const solver_adt sadt = { PROBLEM_MPI_TRANSPOSE, mkplan, 0 };
++ S *slv = MKSOLVER(S, &sadt);
++ slv->copy_transposed_out = copy_transposed_out;
++ return &(slv->super);
++}
++
++void XM(transpose_alltoall_transposed_register)(planner *p)
++{
++ int cto;
++ for (cto = 0; cto <= 1; ++cto)
++ REGISTER_SOLVER(p, mksolver(cto));
++}
+
+--- mpi/transpose-pairwise.c 2014-03-04 19:41:03.000000000 +0100
++++ mpi/transpose-pairwise.c 2015-09-05 06:00:05.715433709 +0200
+@@ -53,7 +53,6 @@ static void transpose_chunks(int *sched,
+ {
+ if (sched) {
+ int i;
+- MPI_Status status;
+
+ /* TODO: explore non-synchronous send/recv? */
+
+@@ -74,7 +73,7 @@ static void transpose_chunks(int *sched,
+ O + rbo[pe], (int) (rbs[pe]),
+ FFTW_MPI_TYPE,
+ pe, (pe * n_pes + my_pe) & 0xffff,
+- comm, &status);
++ comm, MPI_STATUS_IGNORE);
+ }
+ }
+
+@@ -92,7 +91,7 @@ static void transpose_chunks(int *sched,
+ O + rbo[pe], (int) (rbs[pe]),
+ FFTW_MPI_TYPE,
+ pe, (pe * n_pes + my_pe) & 0xffff,
+- comm, &status);
++ comm, MPI_STATUS_IGNORE);
+ }
+ }
+ }
+
+--- mpi/transpose-pairwise-transposed.c 1970-01-01 01:00:00.000000000 +0100
++++ mpi/transpose-pairwise-transposed.c 2015-09-05 06:00:07.280481042 +0200
+@@ -0,0 +1,510 @@
++/*
++ * Copyright (c) 2003, 2007-11 Matteo Frigo
++ * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
++ * Copyright (c) 2012 Michael Pippig
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ */
++
++/* Distributed transposes using a sequence of carefully scheduled
++ pairwise exchanges. This has the advantage that it can be done
++ in-place, or out-of-place while preserving the input, using buffer
++ space proportional to the local size divided by the number of
++ processes (i.e. to the total array size divided by the number of
++ processes squared). */
++
++#include "mpi-transpose.h"
++#include <string.h>
++
++typedef struct {
++ solver super;
++ int preserve_input; /* preserve input even if DESTROY_INPUT was passed */
++} S;
++
++typedef struct {
++ plan_mpi_transpose super;
++
++ plan *cld1, *cld2, *cld2rest, *cld3;
++ INT rest_Ioff, rest_Ooff;
++
++ int n_pes, my_pe, *sched;
++ INT *send_block_sizes, *send_block_offsets;
++ INT *recv_block_sizes, *recv_block_offsets;
++ MPI_Comm comm;
++ int preserve_input;
++} P;
++
++static void transpose_chunks(int *sched, int n_pes, int my_pe,
++ INT *sbs, INT *sbo, INT *rbs, INT *rbo,
++ MPI_Comm comm,
++ R *I, R *O)
++{
++ if (sched) {
++ int i;
++
++ /* TODO: explore non-synchronous send/recv? */
++
++ if (I == O) {
++ R *buf = (R*) MALLOC(sizeof(R) * sbs[0], BUFFERS);
++
++ for (i = 0; i < n_pes; ++i) {
++ int pe = sched[i];
++ if (my_pe == pe) {
++ if (rbo[pe] != sbo[pe])
++ memmove(O + rbo[pe], O + sbo[pe],
++ sbs[pe] * sizeof(R));
++ }
++ else {
++ memcpy(buf, O + sbo[pe], sbs[pe] * sizeof(R));
++ MPI_Sendrecv(buf, (int) (sbs[pe]), FFTW_MPI_TYPE,
++ pe, (my_pe * n_pes + pe) & 0xffff,
++ O + rbo[pe], (int) (rbs[pe]),
++ FFTW_MPI_TYPE,
++ pe, (pe * n_pes + my_pe) & 0xffff,
++ comm, MPI_STATUS_IGNORE);
++ }
++ }
++
++ X(ifree)(buf);
++ }
++ else { /* I != O */
++ for (i = 0; i < n_pes; ++i) {
++ int pe = sched[i];
++ if (my_pe == pe)
++ memcpy(O + rbo[pe], I + sbo[pe], sbs[pe] * sizeof(R));
++ else
++ MPI_Sendrecv(I + sbo[pe], (int) (sbs[pe]),
++ FFTW_MPI_TYPE,
++ pe, (my_pe * n_pes + pe) & 0xffff,
++ O + rbo[pe], (int) (rbs[pe]),
++ FFTW_MPI_TYPE,
++ pe, (pe * n_pes + my_pe) & 0xffff,
++ comm, MPI_STATUS_IGNORE);
++ }
++ }
++ }
++}
++
++/* transpose locally to get contiguous chunks
++ this may take two transposes if the block sizes are unequal
++ (3 subplans, two of which operate on disjoint data) */
++static void apply_pretranspose(
++ const P *ego, R *I, R *O
++ )
++{
++ plan_rdft *cld2, *cld2rest, *cld3;
++
++ cld3 = (plan_rdft *) ego->cld3;
++ if (cld3)
++ cld3->apply(ego->cld3, O, O);
++ /* else TRANSPOSED_IN is true and user wants I transposed */
++
++ cld2 = (plan_rdft *) ego->cld2;
++ cld2->apply(ego->cld2, I, O);
++ cld2rest = (plan_rdft *) ego->cld2rest;
++ if (cld2rest) {
++ cld2rest->apply(ego->cld2rest,
++ I + ego->rest_Ioff, O + ego->rest_Ooff);
++ }
++}
++
++static void apply(const plan *ego_, R *I, R *O)
++{
++ const P *ego = (const P *) ego_;
++ plan_rdft *cld1 = (plan_rdft *) ego->cld1;
++
++ if (cld1) {
++ /* transpose locally to get contiguous chunks */
++ apply_pretranspose(ego, I, O);
++
++ if(ego->preserve_input) I = O;
++
++ /* transpose chunks globally */
++ transpose_chunks(ego->sched, ego->n_pes, ego->my_pe,
++ ego->send_block_sizes, ego->send_block_offsets,
++ ego->recv_block_sizes, ego->recv_block_offsets,
++ ego->comm, O, I);
++
++ /* transpose locally to get non-transposed output */
++ cld1->apply(ego->cld1, I, O);
++ } /* else TRANSPOSED_OUT is true and user wants O transposed */
++ else if (ego->preserve_input) {
++ /* transpose locally to get contiguous chunks */
++ apply_pretranspose(ego, I, O);
++
++ /* transpose chunks globally */
++ transpose_chunks(ego->sched, ego->n_pes, ego->my_pe,
++ ego->send_block_sizes, ego->send_block_offsets,
++ ego->recv_block_sizes, ego->recv_block_offsets,
++ ego->comm, O, O);
++ }
++ else {
++ /* transpose locally to get contiguous chunks */
++ apply_pretranspose(ego, I, I);
++
++ /* transpose chunks globally */
++ transpose_chunks(ego->sched, ego->n_pes, ego->my_pe,
++ ego->send_block_sizes, ego->send_block_offsets,
++ ego->recv_block_sizes, ego->recv_block_offsets,
++ ego->comm, I, O);
++ }
++}
++
++static int applicable(const S *ego, const problem *p_,
++ const planner *plnr)
++{
++ const problem_mpi_transpose *p = (const problem_mpi_transpose *) p_;
++ /* Note: this is *not* UGLY for out-of-place, destroy-input plans;
++ the planner often prefers transpose-pairwise to transpose-alltoall,
++ at least with LAM MPI on my machine. */
++ return (1
++ && (!ego->preserve_input || (!NO_DESTROY_INPUTP(plnr)
++ && p->I != p->O))
++ && ONLY_TRANSPOSEDP(p->flags));
++}
++
++static void awake(plan *ego_, enum wakefulness wakefulness)
++{
++ P *ego = (P *) ego_;
++ X(plan_awake)(ego->cld1, wakefulness);
++ X(plan_awake)(ego->cld2, wakefulness);
++ X(plan_awake)(ego->cld2rest, wakefulness);
++ X(plan_awake)(ego->cld3, wakefulness);
++}
++
++static void destroy(plan *ego_)
++{
++ P *ego = (P *) ego_;
++ X(ifree0)(ego->sched);
++ X(ifree0)(ego->send_block_sizes);
++ MPI_Comm_free(&ego->comm);
++ X(plan_destroy_internal)(ego->cld3);
++ X(plan_destroy_internal)(ego->cld2rest);
++ X(plan_destroy_internal)(ego->cld2);
++ X(plan_destroy_internal)(ego->cld1);
++}
++
++static void print(const plan *ego_, printer *p)
++{
++ const P *ego = (const P *) ego_;
++ p->print(p, "(mpi-transpose-pairwise-transposed%s%(%p%)%(%p%)%(%p%)%(%p%))",
++ ego->preserve_input==2 ?"/p":"",
++ ego->cld1, ego->cld2, ego->cld2rest, ego->cld3);
++}
++
++/* Given a process which_pe and a number of processes npes, fills
++ the array sched[npes] with a sequence of processes to communicate
++ with for a deadlock-free, optimum-overlap all-to-all communication.
++ (All processes must call this routine to get their own schedules.)
++ The schedule can be re-ordered arbitrarily as long as all processes
++ apply the same permutation to their schedules.
++
++ The algorithm here is based upon the one described in:
++ J. A. M. Schreuder, "Constructing timetables for sport
++ competitions," Mathematical Programming Study 13, pp. 58-67 (1980).
++ In a sport competition, you have N teams and want every team to
++ play every other team in as short a time as possible (maximum overlap
++ between games). This timetabling problem is therefore identical
++ to that of an all-to-all communications problem. In our case, there
++ is one wrinkle: as part of the schedule, the process must do
++ some data transfer with itself (local data movement), analogous
++ to a requirement that each team "play itself" in addition to other
++ teams. With this wrinkle, it turns out that an optimal timetable
++ (N parallel games) can be constructed for any N, not just for even
++ N as in the original problem described by Schreuder.
++*/
++static void fill1_comm_sched(int *sched, int which_pe, int npes)
++{
++ int pe, i, n, s = 0;
++ A(which_pe >= 0 && which_pe < npes);
++ if (npes % 2 == 0) {
++ n = npes;
++ sched[s++] = which_pe;
++ }
++ else
++ n = npes + 1;
++ for (pe = 0; pe < n - 1; ++pe) {
++ if (npes % 2 == 0) {
++ if (pe == which_pe) sched[s++] = npes - 1;
++ else if (npes - 1 == which_pe) sched[s++] = pe;
++ }
++ else if (pe == which_pe) sched[s++] = pe;
++
++ if (pe != which_pe && which_pe < n - 1) {
++ i = (pe - which_pe + (n - 1)) % (n - 1);
++ if (i < n/2)
++ sched[s++] = (pe + i) % (n - 1);
++
++ i = (which_pe - pe + (n - 1)) % (n - 1);
++ if (i < n/2)
++ sched[s++] = (pe - i + (n - 1)) % (n - 1);
++ }
++ }
++ A(s == npes);
++}
++
++/* Sort the communication schedule sched for npes so that the schedule
++ on process sortpe is ascending or descending (!ascending). This is
++ necessary to allow in-place transposes when the problem does not
++ divide equally among the processes. In this case there is one
++ process where the incoming blocks are bigger/smaller than the
++ outgoing blocks and thus have to be received in
++ descending/ascending order, respectively, to avoid overwriting data
++ before it is sent. */
++static void sort1_comm_sched(int *sched, int npes, int sortpe, int ascending)
++{
++ int *sortsched, i;
++ sortsched = (int *) MALLOC(npes * sizeof(int) * 2, OTHER);
++ fill1_comm_sched(sortsched, sortpe, npes);
++ if (ascending)
++ for (i = 0; i < npes; ++i)
++ sortsched[npes + sortsched[i]] = sched[i];
++ else
++ for (i = 0; i < npes; ++i)
++ sortsched[2*npes - 1 - sortsched[i]] = sched[i];
++ for (i = 0; i < npes; ++i)
++ sched[i] = sortsched[npes + i];
++ X(ifree)(sortsched);
++}
++
++/* make the plans to do the pre-MPI transpositions (shared with
++ transpose-alltoall-transposed) */
++int XM(mkplans_pretranspose)(const problem_mpi_transpose *p, planner *plnr,
++ R *I, R *O, int my_pe,
++ plan **cld2, plan **cld2rest, plan **cld3,
++ INT *rest_Ioff, INT *rest_Ooff)
++{
++ INT vn = p->vn;
++ INT b = XM(block)(p->nx, p->block, my_pe);
++ INT bt = p->tblock;
++ INT nyb = p->ny / bt; /* number of equal-sized blocks */
++ INT nyr = p->ny - nyb * bt; /* leftover rows after equal blocks */
++
++ *cld2 = *cld2rest = *cld3 = NULL;
++ *rest_Ioff = *rest_Ooff = 0;
++
++ if (!(p->flags & TRANSPOSED_IN) && (nyr == 0 || I != O)) {
++ INT ny = p->ny * vn;
++ bt *= vn;
++ *cld2 = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(X(mktensor_3d)
++ (nyb, bt, b * bt,
++ b, ny, bt,
++ bt, 1, 1),
++ I, O),
++ 0, 0, NO_SLOW);
++ if (!*cld2) goto nada;
++
++ if (nyr > 0) {
++ *rest_Ioff = nyb * bt;
++ *rest_Ooff = nyb * b * bt;
++ bt = nyr * vn;
++ *cld2rest = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(X(mktensor_2d)
++ (b, ny, bt,
++ bt, 1, 1),
++ I + *rest_Ioff,
++ O + *rest_Ooff),
++ 0, 0, NO_SLOW);
++ if (!*cld2rest) goto nada;
++ }
++ }
++ else {
++ *cld2 = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(
++ X(mktensor_4d)
++ (nyb, b * bt * vn, b * bt * vn,
++ b, vn, bt * vn,
++ bt, b * vn, vn,
++ vn, 1, 1),
++ I, O),
++ 0, 0, NO_SLOW);
++ if (!*cld2) goto nada;
++
++ *rest_Ioff = *rest_Ooff = nyb * bt * b * vn;
++ *cld2rest = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(
++ X(mktensor_3d)
++ (b, vn, nyr * vn,
++ nyr, b * vn, vn,
++ vn, 1, 1),
++ I + *rest_Ioff, O + *rest_Ooff),
++ 0, 0, NO_SLOW);
++ if (!*cld2rest) goto nada;
++
++ if (!(p->flags & TRANSPOSED_IN)) {
++ *cld3 = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(
++ X(mktensor_3d)
++ (p->ny, vn, b * vn,
++ b, p->ny * vn, vn,
++ vn, 1, 1),
++ I, I),
++ 0, 0, NO_SLOW);
++ if (!*cld3) goto nada;
++ }
++ }
++
++ return 1;
++
++nada:
++ X(plan_destroy_internal)(*cld3);
++ X(plan_destroy_internal)(*cld2rest);
++ X(plan_destroy_internal)(*cld2);
++ *cld2 = *cld2rest = *cld3 = NULL;
++ return 0;
++}
++
++static plan *mkplan(const solver *ego_, const problem *p_, planner *plnr)
++{
++ const S *ego = (const S *) ego_;
++ const problem_mpi_transpose *p;
++ P *pln;
++ plan *cld1 = 0, *cld2 = 0, *cld2rest = 0, *cld3 = 0;
++ INT b, bt, vn, rest_Ioff, rest_Ooff;
++ INT *sbs, *sbo, *rbs, *rbo;
++ int pe, my_pe, n_pes, sort_pe = -1, ascending = 1;
++ R *I, *O;
++ static const plan_adt padt = {
++ XM(transpose_solve), awake, print, destroy
++ };
++
++ UNUSED(ego);
++
++ if (!applicable(ego, p_, plnr))
++ return (plan *) 0;
++
++ p = (const problem_mpi_transpose *) p_;
++ vn = p->vn;
++ I = p->I; O = p->O;
++
++ MPI_Comm_rank(p->comm, &my_pe);
++ MPI_Comm_size(p->comm, &n_pes);
++
++ bt = XM(block)(p->ny, p->tblock, my_pe);
++
++
++ if (ego->preserve_input || NO_DESTROY_INPUTP(plnr)) I = p->O;
++
++ if (!(p->flags & TRANSPOSED_OUT)) { /* nx x bt x vn -> bt x nx x vn */
++ cld1 = X(mkplan_f_d)(plnr,
++ X(mkproblem_rdft_0_d)(X(mktensor_3d)
++ (bt, vn, p->nx * vn,
++ p->nx, bt * vn, vn,
++ vn, 1, 1),
++ I, O = p->O),
++ 0, 0, NO_SLOW);
++ if (XM(any_true)(!cld1, p->comm)) goto nada;
++
++ }
++ else {
++ if (ego->preserve_input || NO_DESTROY_INPUTP(plnr))
++ O = p->O;
++ else
++ O = p->I;
++ }
++
++ if (XM(any_true)(!XM(mkplans_pretranspose)(p, plnr, p->I, O, my_pe,
++ &cld2, &cld2rest, &cld3,
++ &rest_Ioff, &rest_Ooff),
++ p->comm)) goto nada;
++
++ pln = MKPLAN_MPI_TRANSPOSE(P, &padt, apply);
++
++ pln->cld1 = cld1;
++ pln->cld2 = cld2;
++ pln->cld2rest = cld2rest;
++ pln->rest_Ioff = rest_Ioff;
++ pln->rest_Ooff = rest_Ooff;
++ pln->cld3 = cld3;
++ pln->preserve_input = ego->preserve_input ? 2 : NO_DESTROY_INPUTP(plnr);
++
++ MPI_Comm_dup(p->comm, &pln->comm);
++
++ n_pes = (int) X(imax)(XM(num_blocks)(p->nx, p->block),
++ XM(num_blocks)(p->ny, p->tblock));
++
++ /* Compute sizes/offsets of blocks to exchange between processors */
++ sbs = (INT *) MALLOC(4 * n_pes * sizeof(INT), PLANS);
++ sbo = sbs + n_pes;
++ rbs = sbo + n_pes;
++ rbo = rbs + n_pes;
++ b = XM(block)(p->nx, p->block, my_pe);
++ bt = XM(block)(p->ny, p->tblock, my_pe);
++ for (pe = 0; pe < n_pes; ++pe) {
++ INT db, dbt; /* destination block sizes */
++ db = XM(block)(p->nx, p->block, pe);
++ dbt = XM(block)(p->ny, p->tblock, pe);
++
++ sbs[pe] = b * dbt * vn;
++ sbo[pe] = pe * (b * p->tblock) * vn;
++ rbs[pe] = db * bt * vn;
++ rbo[pe] = pe * (p->block * bt) * vn;
++
++ if (db * dbt > 0 && db * p->tblock != p->block * dbt) {
++ A(sort_pe == -1); /* only one process should need sorting */
++ sort_pe = pe;
++ ascending = db * p->tblock > p->block * dbt;
++ }
++ }
++ pln->n_pes = n_pes;
++ pln->my_pe = my_pe;
++ pln->send_block_sizes = sbs;
++ pln->send_block_offsets = sbo;
++ pln->recv_block_sizes = rbs;
++ pln->recv_block_offsets = rbo;
++
++ if (my_pe >= n_pes) {
++ pln->sched = 0; /* this process is not doing anything */
++ }
++ else {
++ pln->sched = (int *) MALLOC(n_pes * sizeof(int), PLANS);
++ fill1_comm_sched(pln->sched, my_pe, n_pes);
++ if (sort_pe >= 0)
++ sort1_comm_sched(pln->sched, n_pes, sort_pe, ascending);
++ }
++
++ X(ops_zero)(&pln->super.super.ops);
++ if (cld1) X(ops_add2)(&cld1->ops, &pln->super.super.ops);
++ if (cld2) X(ops_add2)(&cld2->ops, &pln->super.super.ops);
++ if (cld2rest) X(ops_add2)(&cld2rest->ops, &pln->super.super.ops);
++ if (cld3) X(ops_add2)(&cld3->ops, &pln->super.super.ops);
++ /* FIXME: should MPI operations be counted in "other" somehow? */
++
++ return &(pln->super.super);
++
++ nada:
++ X(plan_destroy_internal)(cld3);
++ X(plan_destroy_internal)(cld2rest);
++ X(plan_destroy_internal)(cld2);
++ X(plan_destroy_internal)(cld1);
++ return (plan *) 0;
++}
++
++static solver *mksolver(int preserve_input)
++{
++ static const solver_adt sadt = { PROBLEM_MPI_TRANSPOSE, mkplan, 0 };
++ S *slv = MKSOLVER(S, &sadt);
++ slv->preserve_input = preserve_input;
++ return &(slv->super);
++}
++
++void XM(transpose_pairwise_transposed_register)(planner *p)
++{
++ int preserve_input;
++ for (preserve_input = 0; preserve_input <= 1; ++preserve_input)
++ REGISTER_SOLVER(p, mksolver(preserve_input));
++}
diff --git a/var/spack/repos/builtin/packages/fontconfig/package.py b/var/spack/repos/builtin/packages/fontconfig/package.py
index 311156378a..99c9b1f15d 100644
--- a/var/spack/repos/builtin/packages/fontconfig/package.py
+++ b/var/spack/repos/builtin/packages/fontconfig/package.py
@@ -25,7 +25,7 @@
from spack import *
-class Fontconfig(Package):
+class Fontconfig(AutotoolsPackage):
"""Fontconfig customizing font access"""
homepage = "http://www.freedesktop.org/wiki/Software/fontconfig/"
url = "http://www.freedesktop.org/software/fontconfig/release/fontconfig-2.11.1.tar.gz"
@@ -36,10 +36,6 @@ class Fontconfig(Package):
depends_on('libxml2')
depends_on('pkg-config', type='build')
- def install(self, spec, prefix):
- configure("--prefix=%s" % prefix,
- "--enable-libxml2",
- "--disable-docs")
-
- make()
- make("install")
+ def configure_args(self):
+ args = ["--enable-libxml2", "--disable-docs"]
+ return args
diff --git a/var/spack/repos/builtin/packages/gcc/package.py b/var/spack/repos/builtin/packages/gcc/package.py
index be3b6cca51..18fe0d88f8 100644
--- a/var/spack/repos/builtin/packages/gcc/package.py
+++ b/var/spack/repos/builtin/packages/gcc/package.py
@@ -58,7 +58,7 @@ class Gcc(Package):
provides('golang', when='@4.7.1:')
patch('piclibs.patch', when='+piclibs')
- patch('gcc-backport.patch', when='@4.7:5.3')
+ patch('gcc-backport.patch', when='@4.7:4.9.2,5:5.3')
def install(self, spec, prefix):
# libjava/configure needs a minor fix to install into spack paths.
diff --git a/var/spack/repos/builtin/packages/graphviz/package.py b/var/spack/repos/builtin/packages/graphviz/package.py
index b37121248c..bb23513d2f 100644
--- a/var/spack/repos/builtin/packages/graphviz/package.py
+++ b/var/spack/repos/builtin/packages/graphviz/package.py
@@ -24,9 +24,10 @@
##############################################################################
from spack import *
import sys
+import shutil
-class Graphviz(Package):
+class Graphviz(AutotoolsPackage):
"""Graph Visualization Software"""
homepage = "http://www.graphviz.org"
url = "http://www.graphviz.org/pub/graphviz/stable/SOURCES/graphviz-2.38.0.tar.gz"
@@ -46,11 +47,13 @@ class Graphviz(Package):
depends_on("swig")
depends_on("python")
depends_on("ghostscript")
+ depends_on("freetype")
+ depends_on("libtool", type='build')
depends_on("pkg-config", type='build')
- def install(self, spec, prefix):
- options = ['--prefix=%s' % prefix]
- if '+perl' not in spec:
+ def configure_args(self):
+ options = []
+ if '+perl' not in self.spec:
options.append('--disable-perl')
# On OSX fix the compiler error:
@@ -59,7 +62,9 @@ class Graphviz(Package):
# include <X11/Xlib.h>
if sys.platform == 'darwin':
options.append('CFLAGS=-I/opt/X11/include')
+ options.append('--with-ltdl-lib=%s/lib' % self.spec['libtool'].prefix)
- configure(*options)
- make()
- make("install")
+ # A hack to patch config.guess in the libltdl sub directory
+ shutil.copyfile('./config/config.guess', 'libltdl/config/config.guess')
+
+ return options
diff --git a/var/spack/repos/builtin/packages/libelf/package.py b/var/spack/repos/builtin/packages/libelf/package.py
index 3304d27bdb..000b4e0957 100644
--- a/var/spack/repos/builtin/packages/libelf/package.py
+++ b/var/spack/repos/builtin/packages/libelf/package.py
@@ -25,7 +25,7 @@
from spack import *
-class Libelf(Package):
+class Libelf(AutotoolsPackage):
"""libelf lets you read, modify or create ELF object files in an
architecture-independent way. The library takes care of size
and endian issues, e.g. you can process a file for SPARC
@@ -38,13 +38,13 @@ class Libelf(Package):
version('0.8.12', 'e21f8273d9f5f6d43a59878dc274fec7')
provides('elf')
+ depends_on('automake', type='build')
- def install(self, spec, prefix):
- configure("--prefix=" + prefix,
- "--enable-shared",
- "--disable-dependency-tracking",
- "--disable-debug")
- make()
+ def configure_args(self):
+ args = ["--enable-shared",
+ "--disable-dependency-tracking",
+ "--disable-debug"]
+ return args
- # The mkdir commands in libelf's install can fail in parallel
- make("install", parallel=False)
+ def install(self, spec, prefix):
+ make('install', parallel=False)
diff --git a/var/spack/repos/builtin/packages/libiconv/package.py b/var/spack/repos/builtin/packages/libiconv/package.py
index 982929b80a..72f67ec80d 100644
--- a/var/spack/repos/builtin/packages/libiconv/package.py
+++ b/var/spack/repos/builtin/packages/libiconv/package.py
@@ -23,9 +23,10 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
+import shutil
-class Libiconv(Package):
+class Libiconv(AutotoolsPackage):
"""GNU libiconv provides an implementation of the iconv() function
and the iconv program for character set conversion."""
@@ -38,10 +39,10 @@ class Libiconv(Package):
# of C11 any more and thus might not exist.
patch("gets.patch")
- def install(self, spec, prefix):
- configure('--prefix={0}'.format(prefix),
- '--enable-extra-encodings')
+ def configure_args(self):
+ args = ['--enable-extra-encodings']
- make()
- make('check')
- make('install')
+ # A hack to patch config.guess in the libcharset sub directory
+ shutil.copyfile('./build-aux/config.guess',
+ 'libcharset/build-aux/config.guess')
+ return args
diff --git a/var/spack/repos/builtin/packages/libsplash/package.py b/var/spack/repos/builtin/packages/libsplash/package.py
index b58d37e6ae..c87dae19be 100644
--- a/var/spack/repos/builtin/packages/libsplash/package.py
+++ b/var/spack/repos/builtin/packages/libsplash/package.py
@@ -41,6 +41,7 @@ class Libsplash(Package):
git='https://github.com/ComputationalRadiationPhysics/libSplash.git')
version('master', branch='master',
git='https://github.com/ComputationalRadiationPhysics/libSplash.git')
+ version('1.6.0', 'c05bce95abfe1ae4cd9d9817acf58d94')
version('1.5.0', 'c1efec4c20334242c8a3b6bfdc0207e3')
version('1.4.0', '2de37bcef6fafa1960391bf44b1b50e0')
version('1.3.1', '524580ba088d97253d03b4611772f37c')
diff --git a/var/spack/repos/builtin/packages/libtiff/package.py b/var/spack/repos/builtin/packages/libtiff/package.py
index 6c282dee7c..70c371b3b8 100644
--- a/var/spack/repos/builtin/packages/libtiff/package.py
+++ b/var/spack/repos/builtin/packages/libtiff/package.py
@@ -25,7 +25,7 @@
from spack import *
-class Libtiff(Package):
+class Libtiff(AutotoolsPackage):
"""libtiff graphics format library"""
homepage = "http://www.simplesystems.org/libtiff/"
url = "ftp://download.osgeo.org/libtiff/tiff-4.0.3.tar.gz"
@@ -36,9 +36,3 @@ class Libtiff(Package):
depends_on('jpeg')
depends_on('zlib')
depends_on('xz')
-
- def install(self, spec, prefix):
- configure("--prefix=%s" % prefix)
-
- make()
- make("install")
diff --git a/var/spack/repos/builtin/packages/lzma/package.py b/var/spack/repos/builtin/packages/lzma/package.py
index 23d697ffe8..3eb97a2d9f 100644
--- a/var/spack/repos/builtin/packages/lzma/package.py
+++ b/var/spack/repos/builtin/packages/lzma/package.py
@@ -25,7 +25,7 @@
from spack import *
-class Lzma(Package):
+class Lzma(AutotoolsPackage):
"""LZMA Utils are legacy data compression software with high compression
ratio. LZMA Utils are no longer developed, although critical bugs may be
fixed as long as fixing them doesn't require huge changes to the code.
@@ -39,11 +39,3 @@ class Lzma(Package):
url = "http://tukaani.org/lzma/lzma-4.32.7.tar.gz"
version('4.32.7', '2a748b77a2f8c3cbc322dbd0b4c9d06a')
-
- def install(self, spec, prefix):
- configure('--prefix={0}'.format(prefix))
-
- make()
- if self.run_tests:
- make('check') # one of the tests fails for me
- make('install')
diff --git a/var/spack/repos/builtin/packages/matio/package.py b/var/spack/repos/builtin/packages/matio/package.py
index c141f7e8af..a33b23a4e9 100644
--- a/var/spack/repos/builtin/packages/matio/package.py
+++ b/var/spack/repos/builtin/packages/matio/package.py
@@ -25,15 +25,26 @@
from spack import *
-class Matio(Package):
+class Matio(AutotoolsPackage):
"""matio is an C library for reading and writing Matlab MAT files"""
homepage = "http://sourceforge.net/projects/matio/"
- url = "http://downloads.sourceforge.net/project/matio/matio/1.5.2/matio-1.5.2.tar.gz"
+ url = "http://downloads.sourceforge.net/project/matio/matio/1.5.9/matio-1.5.9.tar.gz"
+ version('1.5.9', 'aab5b4219a3c0262afe7eeb7bdd2f463')
version('1.5.2', '85b007b99916c63791f28398f6a4c6f1')
- def install(self, spec, prefix):
- configure('--prefix=%s' % prefix)
+ variant("zlib", default=True,
+ description='support for compressed mat files')
+ variant("hdf5", default=True,
+ description='support for version 7.3 mat files via hdf5')
- make()
- make("install")
+ depends_on("zlib", when="+zlib")
+ depends_on("hdf5", when="+hdf5")
+
+ def configure_args(self):
+ args = []
+ if '+zlib' in self.spec:
+ args.append("--with-zlib=%s" % self.spec['zlib'].prefix)
+ if '+hdf5' in self.spec:
+ args.append("--with-hdf5=%s" % self.spec['hdf5'].prefix)
+ return args
diff --git a/var/spack/repos/builtin/packages/mpich/package.py b/var/spack/repos/builtin/packages/mpich/package.py
index e4ff29c00a..958fbe762c 100644
--- a/var/spack/repos/builtin/packages/mpich/package.py
+++ b/var/spack/repos/builtin/packages/mpich/package.py
@@ -26,12 +26,12 @@ from spack import *
import os
-class Mpich(Package):
+class Mpich(AutotoolsPackage):
"""MPICH is a high performance and widely portable implementation of
the Message Passing Interface (MPI) standard."""
homepage = "http://www.mpich.org"
- url = "http://www.mpich.org/static/downloads/3.0.4/mpich-3.0.4.tar.gz"
+ url = "http://www.mpich.org/static/downloads/3.0.4/mpich-3.0.4.tar.gz"
list_url = "http://www.mpich.org/static/downloads/"
list_depth = 2
@@ -81,16 +81,19 @@ class Mpich(Package):
join_path(self.prefix.lib, 'libmpi.{0}'.format(dso_suffix))
]
- def install(self, spec, prefix):
+ @AutotoolsPackage.precondition('autoreconf')
+ def die_without_fortran(self):
# Until we can pass variants such as +fortran through virtual
# dependencies depends_on('mpi'), require Fortran compiler to
# avoid delayed build errors in dependents.
if (self.compiler.f77 is None) or (self.compiler.fc is None):
- raise InstallError('Mpich requires both C and Fortran ',
- 'compilers!')
+ raise InstallError(
+ 'Mpich requires both C and Fortran compilers!'
+ )
- config_args = [
- '--prefix={0}'.format(prefix),
+ def configure_args(self):
+ spec = self.spec
+ return [
'--enable-shared',
'--with-pm={0}'.format('hydra' if '+hydra' in spec else 'no'),
'--with-pmi={0}'.format('yes' if '+pmi' in spec else 'no'),
@@ -98,27 +101,8 @@ class Mpich(Package):
'--{0}-ibverbs'.format('with' if '+verbs' in spec else 'without')
]
- # TODO: Spack should make it so that you can't actually find
- # these compilers if they're "disabled" for the current
- # compiler configuration.
- if not self.compiler.f77:
- config_args.append("--disable-f77")
-
- if not self.compiler.fc:
- config_args.append("--disable-fc")
-
- if not self.compiler.fc and not self.compiler.f77:
- config_args.append("--disable-fortran")
-
- configure(*config_args)
-
- make()
- make('check')
- make('install')
-
- self.filter_compilers(prefix)
-
- def filter_compilers(self, prefix):
+ @AutotoolsPackage.sanity_check('install')
+ def filter_compilers(self):
"""Run after install to make the MPI compilers use the
compilers that Spack built the package with.
@@ -126,14 +110,18 @@ class Mpich(Package):
to Spack's generic cc, c++, f77, and f90. We want them to
be bound to whatever compiler they were built with."""
- mpicc = join_path(prefix.bin, 'mpicc')
- mpicxx = join_path(prefix.bin, 'mpicxx')
- mpif77 = join_path(prefix.bin, 'mpif77')
- mpif90 = join_path(prefix.bin, 'mpif90')
+ mpicc = join_path(self.prefix.bin, 'mpicc')
+ mpicxx = join_path(self.prefix.bin, 'mpicxx')
+ mpif77 = join_path(self.prefix.bin, 'mpif77')
+ mpif90 = join_path(self.prefix.bin, 'mpif90')
# Substitute Spack compile wrappers for the real
# underlying compiler
- kwargs = {'ignore_absent': True, 'backup': False, 'string': True}
+ kwargs = {
+ 'ignore_absent': True,
+ 'backup': False,
+ 'string': True
+ }
filter_file(env['CC'], self.compiler.cc, mpicc, **kwargs)
filter_file(env['CXX'], self.compiler.cxx, mpicxx, **kwargs)
filter_file(env['F77'], self.compiler.f77, mpif77, **kwargs)
diff --git a/var/spack/repos/builtin/packages/ncl/package.py b/var/spack/repos/builtin/packages/ncl/package.py
new file mode 100644
index 0000000000..7d31c7a8f7
--- /dev/null
+++ b/var/spack/repos/builtin/packages/ncl/package.py
@@ -0,0 +1,233 @@
+##############################################################################
+# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License (as
+# published by the Free Software Foundation) version 2.1, February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+from spack import *
+import os
+import shutil
+import tempfile
+
+
+class Ncl(Package):
+ """NCL is an interpreted language designed specifically for
+ scientific data analysis and visualization. Supports NetCDF 3/4,
+ GRIB 1/2, HDF 4/5, HDF-EOD 2/5, shapefile, ASCII, binary.
+ Numerous analysis functions are built-in."""
+
+ homepage = "https://www.ncl.ucar.edu"
+
+ version('6.3.0', '4834df63d3b56778441246303ab921c4',
+ url='https://www.earthsystemgrid.org/download/fileDownload.html?'
+ 'logicalFileId=bec58cb3-cd9b-11e4-bb80-00c0f03d5b7c',
+ extension='tar.gz')
+ patch('spack_ncl.patch')
+
+ # This installation script is implemented according to this manual:
+ # http://www.ncl.ucar.edu/Download/build_from_src.shtml
+
+ variant('hdf4', default=False, description='Enable HDF4 support.')
+ variant('gdal', default=False, description='Enable GDAL support.')
+ variant('triangle', default=True, description='Enable Triangle support.')
+ variant('udunits2', default=True, description='Enable UDUNITS-2 support.')
+ variant('openmp', default=True, description='Enable OpenMP support.')
+
+ # Non-optional dependencies according to the manual:
+ depends_on('jpeg')
+ depends_on('netcdf')
+ depends_on('cairo')
+
+ # Also, the manual says that ncl requires zlib, but that comes as a
+ # mandatory dependency of libpng, which is a mandatory dependency of cairo.
+
+ # In Spack, we do not have an option to compile netcdf without netcdf-4
+ # support, so we will tell the ncl configuration script that we want
+ # support for netcdf-4, but the script assumes that hdf5 is compiled with
+ # szip support. We introduce this restriction with the following dependency
+ # statement.
+ depends_on('hdf5@:1.8+szip')
+
+ # In Spack, we also do not have an option to compile netcdf without DAP
+ # support, so we will tell the ncl configuration script that we have it.
+
+ # Some of the optional dependencies according to the manual:
+ depends_on('hdf', when='+hdf4')
+ depends_on('gdal', when='+gdal')
+ depends_on('udunits2', when='+udunits2')
+
+ # We need src files of triangle to appear in ncl's src tree if we want
+ # triangle's features.
+ resource(
+ name='triangle',
+ url='http://www.netlib.org/voronoi/triangle.zip',
+ md5='10aff8d7950f5e0e2fb6dd2e340be2c9',
+ placement='triangle_src',
+ when='+triangle')
+
+ def install(self, spec, prefix):
+
+ if (self.compiler.fc is None) or (self.compiler.cc is None):
+ raise InstallError('NCL package requires both '
+ 'C and Fortran compilers.')
+
+ self.prepare_site_config()
+ self.prepare_install_config()
+ self.prepare_src_tree()
+ make('Everything', parallel=False)
+
+ def setup_environment(self, spack_env, run_env):
+ run_env.set('NCARG_ROOT', self.spec.prefix)
+
+ def prepare_site_config(self):
+ fc_flags = []
+ cc_flags = []
+ c2f_flags = []
+
+ if '+openmp' in self.spec:
+ fc_flags.append(self.compiler.openmp_flag)
+ cc_flags.append(self.compiler.openmp_flag)
+
+ if self.compiler.name == 'gcc':
+ fc_flags.append('-fno-range-check')
+ c2f_flags.extend(['-lgfortran'])
+ elif self.compiler.name == 'intel':
+ fc_flags.append('-fp-model precise')
+ cc_flags.append('-fp-model precise')
+ c2f_flags.extend(['-lifcore', '-lifport'])
+
+ with open('./config/Spack', 'w') as f:
+ f.writelines([
+ '#define HdfDefines\n',
+ '#define CppCommand \'/usr/bin/env cpp -traditional\'\n',
+ '#define CCompiler cc\n',
+ '#define FCompiler fc\n',
+ ('#define CtoFLibraries ' + ' '.join(c2f_flags) + '\n'
+ if len(c2f_flags) > 0
+ else ''),
+ ('#define CtoFLibrariesUser ' + ' '.join(c2f_flags) + '\n'
+ if len(c2f_flags) > 0
+ else ''),
+ ('#define CcOptions ' + ' '.join(cc_flags) + '\n'
+ if len(cc_flags) > 0
+ else ''),
+ ('#define FcOptions ' + ' '.join(fc_flags) + '\n'
+ if len(fc_flags) > 0
+ else ''),
+ '#define BuildShared NO'
+ ])
+
+ def prepare_install_config(self):
+ # Remove the results of the previous configuration attempts.
+ self.delete_files('./Makefile', './config/Site.local')
+
+ # Generate an array of answers that will be passed to the interactive
+ # configuration script.
+ config_answers = [
+ # Enter Return to continue
+ '\n',
+ # Build NCL?
+ 'y\n',
+ # Parent installation directory :
+ '\'' + self.spec.prefix + '\'\n',
+ # System temp space directory :
+ '\'' + tempfile.mkdtemp(prefix='ncl_ncar_') + '\'\n',
+ # Build NetCDF4 feature support (optional)?
+ 'y\n'
+ ]
+
+ if '+hdf4' in self.spec:
+ config_answers.extend([
+ # Build HDF4 support (optional) into NCL?
+ 'y\n',
+ # Also build HDF4 support (optional) into raster library?
+ 'y\n',
+ # Did you build HDF4 with szip support?
+ 'y\n' if self.spec.satisfies('^hdf+szip') else 'n\n'
+ ])
+ else:
+ config_answers.extend([
+ # Build HDF4 support (optional) into NCL?
+ 'n\n',
+ # Also build HDF4 support (optional) into raster library?
+ 'n\n'
+ ])
+
+ config_answers.extend([
+ # Build Triangle support (optional) into NCL
+ 'y\n' if '+triangle' in self.spec else 'n\n',
+ # If you are using NetCDF V4.x, did you enable NetCDF-4 support?
+ 'y\n',
+ # Did you build NetCDF with OPeNDAP support?
+ 'y\n',
+ # Build GDAL support (optional) into NCL?
+ 'y\n' if '+gdal' in self.spec else 'n\n',
+ # Build Udunits-2 support (optional) into NCL?
+ 'y\n' if '+uduints2' in self.spec else 'n\n',
+ # Build Vis5d+ support (optional) into NCL?
+ 'n\n',
+ # Build HDF-EOS2 support (optional) into NCL?
+ 'n\n',
+ # Build HDF5 support (optional) into NCL?
+ 'y\n',
+ # Build HDF-EOS5 support (optional) into NCL?
+ 'n\n',
+ # Build GRIB2 support (optional) into NCL?
+ 'n\n',
+ # Enter local library search path(s) :
+ # The paths will be passed by the Spack wrapper.
+ ' \n',
+ # Enter local include search path(s) :
+ # All other paths will be passed by the Spack wrapper.
+ '\'' + join_path(self.spec['freetype'].prefix.include,
+ 'freetype2') + '\'\n',
+ # Go back and make more changes or review?
+ 'n\n',
+ # Save current configuration?
+ 'y\n'
+ ])
+
+ config_answers_filename = 'spack-config.in'
+ config_script = Executable('./Configure')
+
+ with open(config_answers_filename, 'w') as f:
+ f.writelines(config_answers)
+
+ with open(config_answers_filename, 'r') as f:
+ config_script(input=f)
+
+ def prepare_src_tree(self):
+ if '+triangle' in self.spec:
+ triangle_src = join_path(self.stage.source_path, 'triangle_src')
+ triangle_dst = join_path(self.stage.source_path, 'ni', 'src',
+ 'lib', 'hlu')
+ shutil.copy(join_path(triangle_src, 'triangle.h'), triangle_dst)
+ shutil.copy(join_path(triangle_src, 'triangle.c'), triangle_dst)
+
+ @staticmethod
+ def delete_files(*filenames):
+ for filename in filenames:
+ if os.path.exists(filename):
+ try:
+ os.remove(filename)
+ except OSError, e:
+ raise InstallError('Failed to delete file %s: %s' % (
+ e.filename, e.strerror))
diff --git a/var/spack/repos/builtin/packages/ncl/spack_ncl.patch b/var/spack/repos/builtin/packages/ncl/spack_ncl.patch
new file mode 100644
index 0000000000..ebbecc43ba
--- /dev/null
+++ b/var/spack/repos/builtin/packages/ncl/spack_ncl.patch
@@ -0,0 +1,30 @@
+--- a/config/ymake 2015-03-16 22:21:42.000000000 +0100
++++ b/config/ymake 2016-10-14 13:44:49.530646098 +0200
+@@ -537,0 +538,3 @@
++# We want to have our own definitions for spack
++set sysincs = Spack
++
+--- a/Configure 2015-03-16 22:22:17.000000000 +0100
++++ b/Configure 2016-10-14 13:49:42.157631106 +0200
+@@ -1137,5 +1137,13 @@
+- if (! -d $incs[1]) then
+- echo " *** Warning: <$incs[1]> does not exist"
+- echo ""
+- goto proc_locincdir
+- else
++
++ # We don't want our path(s) to be preprocessed by cpp
++ # inside ymake script. That is why we pass them in quotes (')
++ # to this script. But if we do so, the following condition
++ # is always false. That is why we comment it out and promise
++ # to pass only correct path(s). You might want to do the same
++ # thing for the libraries search path(s).
++
++ # if (! -d $incs[1]) then
++ # echo " *** Warning: <$incs[1]> does not exist"
++ # echo ""
++ # goto proc_locincdir
++ # else
+@@ -1143 +1151 @@
+- endif
++ # endif
diff --git a/var/spack/repos/builtin/packages/nmap/package.py b/var/spack/repos/builtin/packages/nmap/package.py
new file mode 100644
index 0000000000..f4576cde53
--- /dev/null
+++ b/var/spack/repos/builtin/packages/nmap/package.py
@@ -0,0 +1,37 @@
+##############################################################################
+# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License (as
+# published by the Free Software Foundation) version 2.1, February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+from spack import *
+
+
+class Nmap(AutotoolsPackage):
+ """Nmap ("Network Mapper") is a free and open source (license)
+ utility for network discovery and security auditing.
+ It also provides ncat an updated nc"""
+
+ homepage = "https://nmap.org"
+ url = "https://nmap.org/dist/nmap-7.31.tar.bz2"
+
+ version('7.31', 'f2f6660142a777862342a58cc54258ea')
+ version('7.30', '8d86797d5c9e56de571f9630c0e6b5f8')
diff --git a/var/spack/repos/builtin/packages/opencoarrays/package.py b/var/spack/repos/builtin/packages/opencoarrays/package.py
index d9760e2afc..eb76960024 100644
--- a/var/spack/repos/builtin/packages/opencoarrays/package.py
+++ b/var/spack/repos/builtin/packages/opencoarrays/package.py
@@ -34,8 +34,9 @@ class Opencoarrays(CMakePackage):
"""
homepage = "http://www.opencoarrays.org/"
- url = "https://github.com/sourceryinstitute/opencoarrays/releases/download/1.6.2/OpenCoarrays-1.6.2.tar.gz"
+ url = "https://github.com/sourceryinstitute/opencoarrays/releases/download/1.7.4/OpenCoarrays-1.7.4.tar.gz"
+ version('1.7.4', '85ba87def461e3ff5a164de2e6482930')
version('1.6.2', '5a4da993794f3e04ea7855a6678981ba')
depends_on('cmake', type='build')
@@ -43,11 +44,8 @@ class Opencoarrays(CMakePackage):
provides('coarrays')
- def install(self, spec, prefix):
- with working_dir('spack-build', create=True):
- args = std_cmake_args
- args.append("-DCMAKE_C_COMPILER=%s" % self.spec['mpi'].mpicc)
- args.append("-DCMAKE_Fortran_COMPILER=%s" % self.spec['mpi'].mpifc)
- cmake('..', *args)
- make()
- make("install")
+ def cmake_args(self):
+ args = []
+ args.append("-DCMAKE_C_COMPILER=%s" % self.spec['mpi'].mpicc)
+ args.append("-DCMAKE_Fortran_COMPILER=%s" % self.spec['mpi'].mpifc)
+ return args
diff --git a/var/spack/repos/builtin/packages/pfft/package.py b/var/spack/repos/builtin/packages/pfft/package.py
new file mode 100644
index 0000000000..575f0af3c5
--- /dev/null
+++ b/var/spack/repos/builtin/packages/pfft/package.py
@@ -0,0 +1,64 @@
+##############################################################################
+# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License (as
+# published by the Free Software Foundation) version 2.1, February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+from spack import *
+
+
+class Pfft(AutotoolsPackage):
+ """PFFT is a software library for computing massively parallel,
+ fast Fourier transformations on distributed memory architectures.
+ PFFT can be understood as a generalization of FFTW-MPI to
+ multidimensional data decomposition."""
+
+ homepage = "https://www-user.tu-chemnitz.de/~potts/workgroup/pippig/software.php.en"
+ url = "https://www-user.tu-chemnitz.de/~potts/workgroup/pippig/software/pfft-1.0.8-alpha.tar.gz"
+
+ version('1.0.8-alpha', '46457fbe8e38d02ff87d439b63dc0709')
+
+ depends_on('fftw+mpi+pfft_patches')
+ depends_on('mpi')
+
+ def install(self, spec, prefix):
+ options = ['--prefix={0}'.format(prefix)]
+ if not self.compiler.f77 or not self.compiler.fc:
+ options.append("--disable-fortran")
+
+ configure(*options)
+ make()
+ if self.run_tests:
+ make("check")
+ make("install")
+
+ if '+float' in spec['fftw']:
+ configure('--enable-float', *options)
+ make()
+ if self.run_tests:
+ make("check")
+ make("install")
+ if '+long_double' in spec['fftw']:
+ configure('--enable-long-double', *options)
+ make()
+ if self.run_tests:
+ make("check")
+ make("install")
diff --git a/var/spack/repos/builtin/packages/py-netcdf/package.py b/var/spack/repos/builtin/packages/py-netcdf/package.py
index 497f81f86d..d238855d1e 100644
--- a/var/spack/repos/builtin/packages/py-netcdf/package.py
+++ b/var/spack/repos/builtin/packages/py-netcdf/package.py
@@ -35,6 +35,7 @@ class PyNetcdf(Package):
extends('python')
depends_on('py-numpy', type=nolink)
depends_on('py-cython', type=nolink)
+ depends_on('py-setuptools', type=nolink)
depends_on('netcdf')
def install(self, spec, prefix):
diff --git a/var/spack/repos/builtin/packages/py-pygobject/package.py b/var/spack/repos/builtin/packages/py-pygobject/package.py
index 3af849e758..675eb8f004 100644
--- a/var/spack/repos/builtin/packages/py-pygobject/package.py
+++ b/var/spack/repos/builtin/packages/py-pygobject/package.py
@@ -25,7 +25,7 @@
from spack import *
-class PyPygobject(Package):
+class PyPygobject(AutotoolsPackage):
"""bindings for the GLib, and GObject,
to be used in Python."""
@@ -43,6 +43,4 @@ class PyPygobject(Package):
patch('pygobject-2.28.6-introspection-1.patch')
def install(self, spec, prefix):
- configure("--prefix=%s" % prefix)
- make()
- make("install", parallel=False)
+ make('install', parallel=False)
diff --git a/var/spack/repos/builtin/packages/py-pygtk/package.py b/var/spack/repos/builtin/packages/py-pygtk/package.py
index ab0a139f02..56e0b39fd5 100644
--- a/var/spack/repos/builtin/packages/py-pygtk/package.py
+++ b/var/spack/repos/builtin/packages/py-pygtk/package.py
@@ -25,7 +25,7 @@
from spack import *
-class PyPygtk(Package):
+class PyPygtk(AutotoolsPackage):
"""bindings for the Gtk in Python"""
homepage = "http://www.pygtk.org/"
url = "http://ftp.gnome.org/pub/GNOME/sources/pygtk/2.24/pygtk-2.24.0.tar.gz"
@@ -41,6 +41,4 @@ class PyPygtk(Package):
depends_on('py-py2cairo')
def install(self, spec, prefix):
- configure("--prefix=%s" % prefix)
- make()
- make("install", parallel=False)
+ make('install', parallel=False)
diff --git a/var/spack/repos/builtin/packages/star-ccm-plus/package.py b/var/spack/repos/builtin/packages/star-ccm-plus/package.py
new file mode 100644
index 0000000000..ba1516b62a
--- /dev/null
+++ b/var/spack/repos/builtin/packages/star-ccm-plus/package.py
@@ -0,0 +1,78 @@
+##############################################################################
+# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License (as
+# published by the Free Software Foundation) version 2.1, February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+from spack import *
+import glob
+import os
+
+
+class StarCcmPlus(Package):
+ """STAR-CCM+ (Computational Continuum Mechanics) CFD solver."""
+
+ homepage = "http://mdx.plm.automation.siemens.com/star-ccm-plus"
+
+ version('11.06.010_02', 'd349c6ac8293d8e6e7a53533d695588f')
+
+ variant('docs', default=False, description='Install the documentation')
+
+ # Licensing
+ license_required = True
+ license_vars = ['CDLMD_LICENSE_FILE', 'LM_LICENSE_FILE']
+
+ def url_for_version(self, version):
+ return "file://{0}/STAR-CCM+{1}_linux-x86_64.tar.gz".format(
+ os.getcwd(), version)
+
+ def install(self, spec, prefix):
+ # There is a known issue with the LaunchAnywhere application.
+ # Specifically, it cannot handle long prompts or prompts
+ # containing special characters and backslashes. It results in
+ # the following error message:
+ #
+ # An internal LaunchAnywhere application error has occured and this
+ # application cannot proceed. (LAX)
+ #
+ # Stack Trace:
+ # java.lang.IllegalArgumentException: Malformed \uxxxx encoding.
+ # at java.util.Properties.loadConvert(Unknown Source)
+ # at java.util.Properties.load0(Unknown Source)
+ # at java.util.Properties.load(Unknown Source)
+ # at com.zerog.common.java.util.PropertiesUtil.loadProperties(
+ # Unknown Source)
+ # at com.zerog.lax.LAX.<init>(Unknown Source)
+ # at com.zerog.lax.LAX.main(Unknown Source)
+ #
+ # https://www.maplesoft.com/support/faqs/detail.aspx?sid=35272
+ env['PS1'] = '>'
+ env['PROMPT_COMMAND'] = ''
+
+ installer = Executable(glob.glob('*.bin')[0])
+
+ installer(
+ '-i', 'silent',
+ '-DINSTALLDIR={0}'.format(prefix),
+ '-DINSTALLFLEX=false',
+ '-DADDSYSTEMPATH=false',
+ '-DNODOC={0}'.format('false' if '+docs' in spec else 'true')
+ )
diff --git a/var/spack/repos/builtin/packages/tau/package.py b/var/spack/repos/builtin/packages/tau/package.py
index 1801b41c37..d6b0a98d67 100644
--- a/var/spack/repos/builtin/packages/tau/package.py
+++ b/var/spack/repos/builtin/packages/tau/package.py
@@ -24,7 +24,7 @@
##############################################################################
from spack import *
import os
-import os.path
+import glob
from llnl.util.filesystem import join_path
@@ -146,3 +146,8 @@ class Tau(Package):
dest = join_path(self.prefix, d)
if os.path.isdir(src) and not os.path.exists(dest):
os.symlink(join_path(subdir, d), dest)
+
+ def setup_environment(self, spack_env, run_env):
+ pattern = join_path(self.prefix.lib, 'Makefile.*')
+ files = glob.glob(pattern)
+ run_env.set('TAU_MAKEFILE', files[0])
diff --git a/var/spack/repos/builtin/packages/texlive/package.py b/var/spack/repos/builtin/packages/texlive/package.py
index c9c677e2b1..36b3fad2f6 100644
--- a/var/spack/repos/builtin/packages/texlive/package.py
+++ b/var/spack/repos/builtin/packages/texlive/package.py
@@ -32,8 +32,10 @@ class Texlive(Package):
homepage = "http://www.tug.org/texlive"
- version('live', '8402774984c67fed4a18b7f6491243a6',
- url="http://mirror.ctan.org/systems/texlive/tlnet/install-tl-unx.tar.gz")
+ # pull from specific site because the texlive mirrors do not all
+ # update in synchrony.
+ version('live', '6d171d370f3a2f2b936b9b0c87e8d0fe',
+ url="http://ctan.math.utah.edu/ctan/tex-archive/systems/texlive/tlnet/install-tl-unx.tar.gz")
# There does not seem to be a complete list of schemes.
# Examples include:
diff --git a/var/spack/repos/builtin/packages/trilinos/package.py b/var/spack/repos/builtin/packages/trilinos/package.py
index 4eb50ba64d..07393f9e9b 100644
--- a/var/spack/repos/builtin/packages/trilinos/package.py
+++ b/var/spack/repos/builtin/packages/trilinos/package.py
@@ -36,7 +36,7 @@ import sys
# https://github.com/trilinos/Trilinos/issues/175
-class Trilinos(Package):
+class Trilinos(CMakePackage):
"""The Trilinos Project is an effort to develop algorithms and enabling
technologies within an object-oriented software framework for the solution
of large-scale, complex multi-physics engineering and scientific problems.
@@ -124,12 +124,12 @@ class Trilinos(Package):
raise RuntimeError('The superlu-dist variant can only be used' +
' with Trilinos @12.0.1:')
- def install(self, spec, prefix):
+ def cmake_args(self):
+ spec = self.spec
self.variants_check()
cxx_flags = []
options = []
- options.extend(std_cmake_args)
mpi_bin = spec['mpi'].prefix.bin
# Note: -DXYZ_LIBRARY_NAMES= needs semicolon separated list of names
@@ -157,7 +157,8 @@ class Trilinos(Package):
'-DTrilinos_ENABLE_CXX11:BOOL=ON',
'-DTPL_ENABLE_Netcdf:BOOL=ON',
'-DTPL_ENABLE_HYPRE:BOOL=%s' % (
- 'ON' if '+hypre' in spec else 'OFF')
+ 'ON' if '+hypre' in spec else 'OFF'),
+ '-DCMAKE_INSTALL_NAME_DIR:PATH=%s/lib' % self.prefix
])
if spec.satisfies('%intel') and spec.satisfies('@12.6.2'):
@@ -206,11 +207,6 @@ class Trilinos(Package):
'-DTrilinos_ENABLE_Fortran=ON'
])
- # for build-debug only:
- # options.extend([
- # '-DCMAKE_VERBOSE_MAKEFILE:BOOL=TRUE'
- # ])
-
# suite-sparse related
if '+suite-sparse' in spec:
options.extend([
@@ -330,27 +326,20 @@ class Trilinos(Package):
options.extend([
'-DTrilinos_ENABLE_FEI=OFF'
])
+ return options
- with working_dir('spack-build', create=True):
- cmake('..', *options)
- make()
- make('install')
-
- # When trilinos is built with Python, libpytrilinos is included
- # through cmake configure files. Namely, Trilinos_LIBRARIES in
- # TrilinosConfig.cmake contains pytrilinos. This leads to a
- # run-time error: Symbol not found: _PyBool_Type and prevents
- # Trilinos to be used in any C++ code, which links executable
- # against the libraries listed in Trilinos_LIBRARIES. See
- # https://github.com/Homebrew/homebrew-science/issues/2148#issuecomment-103614509
- # A workaround it to remove PyTrilinos from the COMPONENTS_LIST :
- if '+python' in self.spec:
- filter_file(r'(SET\(COMPONENTS_LIST.*)(PyTrilinos;)(.*)',
- (r'\1\3'),
- '%s/cmake/Trilinos/TrilinosConfig.cmake' %
- prefix.lib)
-
- # The shared libraries are not installed correctly on Darwin;
- # correct this
- if (sys.platform == 'darwin') and ('+shared' in spec):
- fix_darwin_install_name(prefix.lib)
+ @CMakePackage.sanity_check('install')
+ def filter_python(self):
+ # When trilinos is built with Python, libpytrilinos is included
+ # through cmake configure files. Namely, Trilinos_LIBRARIES in
+ # TrilinosConfig.cmake contains pytrilinos. This leads to a
+ # run-time error: Symbol not found: _PyBool_Type and prevents
+ # Trilinos to be used in any C++ code, which links executable
+ # against the libraries listed in Trilinos_LIBRARIES. See
+ # https://github.com/Homebrew/homebrew-science/issues/2148#issuecomment-103614509
+ # A workaround is to remove PyTrilinos from the COMPONENTS_LIST :
+ if '+python' in self.spec:
+ filter_file(r'(SET\(COMPONENTS_LIST.*)(PyTrilinos;)(.*)',
+ (r'\1\3'),
+ '%s/cmake/Trilinos/TrilinosConfig.cmake' %
+ self.prefix.lib)