summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorGregory Becker <becker33@llnl.gov>2016-04-27 19:38:51 -0700
committerGregory Becker <becker33@llnl.gov>2016-04-27 19:38:51 -0700
commitae5198e5e765c64fea666753905f5162f2300181 (patch)
tree7d58b63d19128edeaa6cc2f6fe915f11a5690471 /lib
parent8e54babf104985a8db0df3fbb2f3d0f134342571 (diff)
parentdd84a575807636159d80809f59962cf799b83fd7 (diff)
downloadspack-ae5198e5e765c64fea666753905f5162f2300181.tar.gz
spack-ae5198e5e765c64fea666753905f5162f2300181.tar.bz2
spack-ae5198e5e765c64fea666753905f5162f2300181.tar.xz
spack-ae5198e5e765c64fea666753905f5162f2300181.zip
Merged in current develop to cflags 042716
Diffstat (limited to 'lib')
-rw-r--r--lib/spack/docs/basic_usage.rst109
-rw-r--r--lib/spack/docs/conf.py3
-rw-r--r--lib/spack/docs/developer_guide.rst15
-rw-r--r--lib/spack/docs/exts/sphinxcontrib/__init__.py2
-rw-r--r--lib/spack/docs/exts/sphinxcontrib/programoutput.py2
-rw-r--r--lib/spack/docs/features.rst2
-rw-r--r--lib/spack/docs/getting_started.rst6
-rw-r--r--lib/spack/docs/index.rst6
-rw-r--r--lib/spack/docs/mirrors.rst15
-rw-r--r--lib/spack/docs/packaging_guide.rst199
-rw-r--r--lib/spack/docs/site_configuration.rst148
-rwxr-xr-xlib/spack/env/cc347
l---------lib/spack/env/clang/clang1
l---------lib/spack/env/clang/clang++1
l---------lib/spack/env/gcc/g++1
l---------lib/spack/env/gcc/gcc1
l---------lib/spack/env/gcc/gfortran1
l---------lib/spack/env/intel/icc1
l---------lib/spack/env/intel/icpc1
l---------lib/spack/env/intel/ifort1
l---------lib/spack/env/nag/nagfor1
l---------lib/spack/env/pgi/pgc++1
l---------lib/spack/env/pgi/pgcc1
l---------lib/spack/env/pgi/pgfortran1
l---------lib/spack/env/xl/xlc1
l---------lib/spack/env/xl/xlc++1
l---------lib/spack/env/xl/xlf1
l---------lib/spack/env/xl/xlf901
-rw-r--r--lib/spack/external/__init__.py2
-rw-r--r--lib/spack/external/argparse.py22
-rw-r--r--lib/spack/external/functools_backport.py (renamed from lib/spack/external/functools.py)0
-rw-r--r--lib/spack/external/jsonschema/COPYING19
-rw-r--r--lib/spack/external/jsonschema/README.rst104
-rw-r--r--lib/spack/external/jsonschema/__init__.py26
-rw-r--r--lib/spack/external/jsonschema/__main__.py2
-rw-r--r--lib/spack/external/jsonschema/_format.py240
-rw-r--r--lib/spack/external/jsonschema/_reflect.py155
-rw-r--r--lib/spack/external/jsonschema/_utils.py213
-rw-r--r--lib/spack/external/jsonschema/_validators.py358
-rw-r--r--lib/spack/external/jsonschema/cli.py72
-rw-r--r--lib/spack/external/jsonschema/compat.py53
-rw-r--r--lib/spack/external/jsonschema/exceptions.py264
-rw-r--r--lib/spack/external/jsonschema/schemas/draft3.json201
-rw-r--r--lib/spack/external/jsonschema/schemas/draft4.json221
-rw-r--r--lib/spack/external/jsonschema/tests/__init__.py0
-rw-r--r--lib/spack/external/jsonschema/tests/compat.py15
-rw-r--r--lib/spack/external/jsonschema/tests/test_cli.py110
-rw-r--r--lib/spack/external/jsonschema/tests/test_exceptions.py382
-rw-r--r--lib/spack/external/jsonschema/tests/test_format.py63
-rw-r--r--lib/spack/external/jsonschema/tests/test_jsonschema_test_suite.py290
-rw-r--r--lib/spack/external/jsonschema/tests/test_validators.py786
-rw-r--r--lib/spack/external/jsonschema/validators.py428
-rw-r--r--lib/spack/external/nose/LICENSE504
-rw-r--r--lib/spack/external/nose/__init__.py15
-rw-r--r--lib/spack/external/nose/__main__.py8
-rw-r--r--lib/spack/external/nose/case.py397
-rw-r--r--lib/spack/external/nose/commands.py172
-rw-r--r--lib/spack/external/nose/config.py661
-rw-r--r--lib/spack/external/nose/core.py341
-rw-r--r--lib/spack/external/nose/exc.py9
-rw-r--r--lib/spack/external/nose/ext/__init__.py3
-rw-r--r--lib/spack/external/nose/ext/dtcompat.py2272
-rw-r--r--lib/spack/external/nose/failure.py42
-rw-r--r--lib/spack/external/nose/importer.py167
-rw-r--r--lib/spack/external/nose/inspector.py207
-rw-r--r--lib/spack/external/nose/loader.py623
-rw-r--r--lib/spack/external/nose/plugins/__init__.py190
-rw-r--r--lib/spack/external/nose/plugins/allmodules.py45
-rw-r--r--lib/spack/external/nose/plugins/attrib.py286
-rw-r--r--lib/spack/external/nose/plugins/base.py725
-rw-r--r--lib/spack/external/nose/plugins/builtin.py34
-rw-r--r--lib/spack/external/nose/plugins/capture.py115
-rw-r--r--lib/spack/external/nose/plugins/collect.py94
-rw-r--r--lib/spack/external/nose/plugins/cover.py271
-rw-r--r--lib/spack/external/nose/plugins/debug.py67
-rw-r--r--lib/spack/external/nose/plugins/deprecated.py45
-rw-r--r--lib/spack/external/nose/plugins/doctests.py455
-rw-r--r--lib/spack/external/nose/plugins/errorclass.py210
-rw-r--r--lib/spack/external/nose/plugins/failuredetail.py49
-rw-r--r--lib/spack/external/nose/plugins/isolate.py103
-rw-r--r--lib/spack/external/nose/plugins/logcapture.py245
-rw-r--r--lib/spack/external/nose/plugins/manager.py460
-rw-r--r--lib/spack/external/nose/plugins/multiprocess.py835
-rw-r--r--lib/spack/external/nose/plugins/plugintest.py416
-rw-r--r--lib/spack/external/nose/plugins/prof.py154
-rw-r--r--lib/spack/external/nose/plugins/skip.py63
-rw-r--r--lib/spack/external/nose/plugins/testid.py311
-rw-r--r--lib/spack/external/nose/plugins/xunit.py341
-rw-r--r--lib/spack/external/nose/proxy.py188
-rw-r--r--lib/spack/external/nose/pyversion.py215
-rw-r--r--lib/spack/external/nose/result.py200
-rw-r--r--lib/spack/external/nose/selector.py251
-rw-r--r--lib/spack/external/nose/sphinx/__init__.py1
-rw-r--r--lib/spack/external/nose/sphinx/pluginopts.py189
-rw-r--r--lib/spack/external/nose/suite.py609
-rw-r--r--lib/spack/external/nose/tools/__init__.py15
-rw-r--r--lib/spack/external/nose/tools/nontrivial.py151
-rw-r--r--lib/spack/external/nose/tools/trivial.py54
-rw-r--r--lib/spack/external/nose/twistedtools.py173
-rw-r--r--lib/spack/external/nose/usage.txt115
-rw-r--r--lib/spack/external/nose/util.py668
-rw-r--r--lib/spack/external/ordereddict_backport.py (renamed from lib/spack/external/ordereddict.py)0
-rwxr-xr-xlib/spack/external/pyqver2.py3
-rw-r--r--lib/spack/llnl/util/filesystem.py109
-rw-r--r--lib/spack/llnl/util/lang.py47
-rw-r--r--lib/spack/llnl/util/link_tree.py2
-rw-r--r--lib/spack/llnl/util/lock.py2
-rw-r--r--lib/spack/llnl/util/tty/__init__.py41
-rw-r--r--lib/spack/llnl/util/tty/colify.py9
-rw-r--r--lib/spack/llnl/util/tty/color.py2
-rw-r--r--lib/spack/llnl/util/tty/log.py36
-rw-r--r--lib/spack/spack/__init__.py61
-rw-r--r--lib/spack/spack/abi.py128
-rw-r--r--lib/spack/spack/architecture.py2
-rw-r--r--lib/spack/spack/build_environment.py218
-rw-r--r--lib/spack/spack/cmd/__init__.py11
-rw-r--r--lib/spack/spack/cmd/activate.py4
-rw-r--r--lib/spack/spack/cmd/arch.py2
-rw-r--r--lib/spack/spack/cmd/bootstrap.py4
-rw-r--r--lib/spack/spack/cmd/cd.py2
-rw-r--r--lib/spack/spack/cmd/checksum.py53
-rw-r--r--lib/spack/spack/cmd/clean.py6
-rw-r--r--lib/spack/spack/cmd/compiler.py71
-rw-r--r--lib/spack/spack/cmd/compilers.py7
-rw-r--r--lib/spack/spack/cmd/config.py16
-rw-r--r--lib/spack/spack/cmd/create.py127
-rw-r--r--lib/spack/spack/cmd/deactivate.py6
-rw-r--r--lib/spack/spack/cmd/dependents.py4
-rw-r--r--lib/spack/spack/cmd/diy.py18
-rw-r--r--lib/spack/spack/cmd/doc.py2
-rw-r--r--lib/spack/spack/cmd/edit.py62
-rw-r--r--lib/spack/spack/cmd/env.py4
-rw-r--r--lib/spack/spack/cmd/extensions.py17
-rw-r--r--lib/spack/spack/cmd/fetch.py19
-rw-r--r--lib/spack/spack/cmd/find.py20
-rw-r--r--lib/spack/spack/cmd/graph.py4
-rw-r--r--lib/spack/spack/cmd/help.py2
-rw-r--r--lib/spack/spack/cmd/info.py8
-rw-r--r--lib/spack/spack/cmd/install.py6
-rw-r--r--lib/spack/spack/cmd/list.py6
-rw-r--r--lib/spack/spack/cmd/load.py4
-rw-r--r--lib/spack/spack/cmd/location.py12
-rw-r--r--lib/spack/spack/cmd/md5.py45
-rw-r--r--lib/spack/spack/cmd/mirror.py89
-rw-r--r--lib/spack/spack/cmd/module.py20
-rw-r--r--lib/spack/spack/cmd/package-list.py6
-rw-r--r--lib/spack/spack/cmd/patch.py7
-rw-r--r--lib/spack/spack/cmd/pkg.py11
-rw-r--r--lib/spack/spack/cmd/providers.py6
-rw-r--r--lib/spack/spack/cmd/purge.py2
-rw-r--r--lib/spack/spack/cmd/python.py14
-rw-r--r--lib/spack/spack/cmd/reindex.py4
-rw-r--r--lib/spack/spack/cmd/repo.py174
-rw-r--r--lib/spack/spack/cmd/restage.py6
-rw-r--r--lib/spack/spack/cmd/spec.py4
-rw-r--r--lib/spack/spack/cmd/stage.py11
-rw-r--r--lib/spack/spack/cmd/test-install.py52
-rw-r--r--lib/spack/spack/cmd/test.py22
-rw-r--r--lib/spack/spack/cmd/uninstall.py206
-rw-r--r--lib/spack/spack/cmd/unload.py4
-rw-r--r--lib/spack/spack/cmd/unuse.py4
-rw-r--r--lib/spack/spack/cmd/url-parse.py75
-rw-r--r--lib/spack/spack/cmd/urls.py4
-rw-r--r--lib/spack/spack/cmd/use.py4
-rw-r--r--lib/spack/spack/cmd/versions.py4
-rw-r--r--lib/spack/spack/compiler.py8
-rw-r--r--lib/spack/spack/compilers/__init__.py180
-rw-r--r--lib/spack/spack/compilers/clang.py40
-rw-r--r--lib/spack/spack/compilers/gcc.py11
-rw-r--r--lib/spack/spack/compilers/intel.py8
-rw-r--r--lib/spack/spack/compilers/nag.py33
-rw-r--r--lib/spack/spack/compilers/pgi.py20
-rw-r--r--lib/spack/spack/compilers/xl.py8
-rw-r--r--lib/spack/spack/concretize.py244
-rw-r--r--lib/spack/spack/config.py816
-rw-r--r--lib/spack/spack/database.py16
-rw-r--r--lib/spack/spack/directives.py68
-rw-r--r--lib/spack/spack/directory_layout.py87
-rw-r--r--lib/spack/spack/environment.py252
-rw-r--r--lib/spack/spack/error.py2
-rw-r--r--lib/spack/spack/fetch_strategy.py182
-rw-r--r--lib/spack/spack/graph.py4
-rw-r--r--lib/spack/spack/hooks/__init__.py2
-rw-r--r--lib/spack/spack/hooks/dotkit.py2
-rw-r--r--lib/spack/spack/hooks/extensions.py6
-rw-r--r--lib/spack/spack/hooks/sbang.py98
-rw-r--r--lib/spack/spack/hooks/tclmodule.py2
-rw-r--r--lib/spack/spack/mirror.py160
-rw-r--r--lib/spack/spack/modules.py265
-rw-r--r--lib/spack/spack/multimethod.py47
-rw-r--r--lib/spack/spack/package.py662
-rw-r--r--lib/spack/spack/packages.py211
-rw-r--r--lib/spack/spack/parse.py2
-rw-r--r--lib/spack/spack/patch.py8
-rw-r--r--lib/spack/spack/preferred_packages.py175
-rw-r--r--lib/spack/spack/repository.py855
-rw-r--r--lib/spack/spack/resource.py41
-rw-r--r--lib/spack/spack/spec.py422
-rw-r--r--lib/spack/spack/stage.py447
-rw-r--r--lib/spack/spack/test/__init__.py44
-rw-r--r--lib/spack/spack/test/cc.py145
-rw-r--r--lib/spack/spack/test/cmd/__init__.py0
-rw-r--r--lib/spack/spack/test/cmd/uninstall.py37
-rw-r--r--lib/spack/spack/test/concretize.py151
-rw-r--r--lib/spack/spack/test/config.py104
-rw-r--r--lib/spack/spack/test/configure_guess.py23
-rw-r--r--lib/spack/spack/test/database.py91
-rw-r--r--lib/spack/spack/test/directory_layout.py40
-rw-r--r--lib/spack/spack/test/environment.py73
-rw-r--r--lib/spack/spack/test/git_fetch.py55
-rw-r--r--lib/spack/spack/test/hg_fetch.py53
-rw-r--r--lib/spack/spack/test/install.py43
-rw-r--r--lib/spack/spack/test/link_tree.py9
-rw-r--r--lib/spack/spack/test/lock.py10
-rw-r--r--lib/spack/spack/test/make_executable.py56
-rw-r--r--lib/spack/spack/test/mirror.py96
-rw-r--r--lib/spack/spack/test/mock_database.py80
-rw-r--r--lib/spack/spack/test/mock_packages_test.py97
-rw-r--r--lib/spack/spack/test/mock_repo.py16
-rw-r--r--lib/spack/spack/test/multimethod.py51
-rw-r--r--lib/spack/spack/test/namespace_trie.py115
-rw-r--r--lib/spack/spack/test/optional_deps.py6
-rw-r--r--lib/spack/spack/test/package_sanity.py17
-rw-r--r--lib/spack/spack/test/packages.py64
-rw-r--r--lib/spack/spack/test/pattern.py104
-rw-r--r--lib/spack/spack/test/python_version.py15
-rw-r--r--lib/spack/spack/test/sbang.py93
-rw-r--r--lib/spack/spack/test/spec_dag.py26
-rw-r--r--lib/spack/spack/test/spec_semantics.py46
-rw-r--r--lib/spack/spack/test/spec_syntax.py5
-rw-r--r--lib/spack/spack/test/spec_yaml.py2
-rw-r--r--lib/spack/spack/test/stage.py159
-rw-r--r--lib/spack/spack/test/svn_fetch.py58
-rw-r--r--lib/spack/spack/test/tally_plugin.py59
-rw-r--r--lib/spack/spack/test/unit_install.py53
-rw-r--r--lib/spack/spack/test/url_extrapolate.py5
-rw-r--r--lib/spack/spack/test/url_parse.py4
-rw-r--r--lib/spack/spack/test/url_substitution.py4
-rw-r--r--lib/spack/spack/test/versions.py3
-rw-r--r--lib/spack/spack/test/yaml.py94
-rw-r--r--lib/spack/spack/url.py8
-rw-r--r--lib/spack/spack/util/__init__.py2
-rw-r--r--lib/spack/spack/util/compression.py9
-rw-r--r--lib/spack/spack/util/crypto.py2
-rw-r--r--lib/spack/spack/util/debug.py2
-rw-r--r--lib/spack/spack/util/environment.py11
-rw-r--r--lib/spack/spack/util/executable.py106
-rw-r--r--lib/spack/spack/util/multiproc.py2
-rw-r--r--lib/spack/spack/util/naming.py151
-rw-r--r--lib/spack/spack/util/pattern.py116
-rw-r--r--lib/spack/spack/util/prefix.py2
-rw-r--r--lib/spack/spack/util/spack_yaml.py201
-rw-r--r--lib/spack/spack/util/string.py2
-rw-r--r--lib/spack/spack/util/web.py146
-rw-r--r--lib/spack/spack/variant.py2
-rw-r--r--lib/spack/spack/version.py4
-rw-r--r--lib/spack/spack/virtual.py11
257 files changed, 26047 insertions, 2911 deletions
diff --git a/lib/spack/docs/basic_usage.rst b/lib/spack/docs/basic_usage.rst
index 0578f0c8db..68f3d07b29 100644
--- a/lib/spack/docs/basic_usage.rst
+++ b/lib/spack/docs/basic_usage.rst
@@ -149,26 +149,46 @@ customize an installation in :ref:`sec-specs`.
``spack uninstall``
~~~~~~~~~~~~~~~~~~~~~
-To uninstall a package, type ``spack uninstall <package>``. This will
-completely remove the directory in which the package was installed.
+To uninstall a package, type ``spack uninstall <package>``. This will ask the user for
+confirmation, and in case will completely remove the directory in which the package was installed.
.. code-block:: sh
spack uninstall mpich
If there are still installed packages that depend on the package to be
-uninstalled, spack will refuse to uninstall it. You can override this
-behavior with ``spack uninstall -f <package>``, but you risk breaking
-other installed packages. In general, it is safer to remove dependent
-packages *before* removing their dependencies.
+uninstalled, spack will refuse to uninstall it.
-A line like ``spack uninstall mpich`` may be ambiguous, if multiple
-``mpich`` configurations are installed. For example, if both
+To uninstall a package and every package that depends on it, you may give the
+`--dependents` option.
+
+.. code-block:: sh
+
+ spack uninstall --dependents mpich
+
+will display a list of all the packages that depends on `mpich` and, upon confirmation,
+will uninstall them in the right order.
+
+A line like
+
+.. code-block:: sh
+
+ spack uninstall mpich
+
+may be ambiguous, if multiple ``mpich`` configurations are installed. For example, if both
``mpich@3.0.2`` and ``mpich@3.1`` are installed, ``mpich`` could refer
to either one. Because it cannot determine which one to uninstall,
-Spack will ask you to provide a version number to remove the
-ambiguity. As an example, ``spack uninstall mpich@3.1`` is
-unambiguous in this scenario.
+Spack will ask you either to provide a version number to remove the
+ambiguity or use the ``--all`` option to uninstall all of the matching packages.
+
+You may force uninstall a package with the `--force` option
+
+.. code-block:: sh
+
+ spack uninstall --force mpich
+
+but you risk breaking other installed packages. In general, it is safer to remove dependent
+packages *before* removing their dependencies or use the `--dependents` option.
Seeing installed packages
@@ -357,7 +377,7 @@ Spack, you can simply run ``spack compiler add`` with the path to
where the compiler is installed. For example::
$ spack compiler add /usr/local/tools/ic-13.0.079
- ==> Added 1 new compiler to /Users/gamblin2/.spackconfig
+ ==> Added 1 new compiler to /Users/gamblin2/.spack/compilers.yaml
intel@13.0.079
Or you can run ``spack compiler add`` with no arguments to force
@@ -367,7 +387,7 @@ installed, but you know that new compilers have been added to your
$ module load gcc-4.9.0
$ spack compiler add
- ==> Added 1 new compiler to /Users/gamblin2/.spackconfig
+ ==> Added 1 new compiler to /Users/gamblin2/.spack/compilers.yaml
gcc@4.9.0
This loads the environment module for gcc-4.9.0 to get it into the
@@ -398,27 +418,34 @@ Manual compiler configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If auto-detection fails, you can manually configure a compiler by
-editing your ``~/.spackconfig`` file. You can do this by running
-``spack config edit``, which will open the file in your ``$EDITOR``.
+editing your ``~/.spack/compilers.yaml`` file. You can do this by running
+``spack config edit compilers``, which will open the file in your ``$EDITOR``.
Each compiler configuration in the file looks like this::
...
- [compiler "intel@15.0.0"]
- cc = /usr/local/bin/icc-15.0.024-beta
- cxx = /usr/local/bin/icpc-15.0.024-beta
- f77 = /usr/local/bin/ifort-15.0.024-beta
- fc = /usr/local/bin/ifort-15.0.024-beta
- ...
+ chaos_5_x86_64_ib:
+ ...
+ intel@15.0.0:
+ cc: /usr/local/bin/icc-15.0.024-beta
+ cxx: /usr/local/bin/icpc-15.0.024-beta
+ f77: /usr/local/bin/ifort-15.0.024-beta
+ fc: /usr/local/bin/ifort-15.0.024-beta
+ ...
+
+The chaos_5_x86_64_ib string is an architecture string, and multiple
+compilers can be listed underneath an architecture. The architecture
+string may be replaced with the string 'all' to signify compilers that
+work on all architectures.
For compilers, like ``clang``, that do not support Fortran, put
``None`` for ``f77`` and ``fc``::
- [compiler "clang@3.3svn"]
- cc = /usr/bin/clang
- cxx = /usr/bin/clang++
- f77 = None
- fc = None
+ clang@3.3svn:
+ cc: /usr/bin/clang
+ cxx: /usr/bin/clang++
+ f77: None
+ fc: None
Once you save the file, the configured compilers will show up in the
list displayed by ``spack compilers``.
@@ -767,6 +794,34 @@ Environment modules
Spack provides some limited integration with environment module
systems to make it easier to use the packages it provides.
+
+Installing Environment Modules
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In order to use Spack's generated environment modules, you must have
+installed the *Environment Modules* package. On many Linux
+distributions, this can be installed from the vendor's repository.
+For example: ```yum install environment-modules``
+(Fedora/RHEL/CentOS). If your Linux distribution does not have
+Environment Modules, you can get it with Spack:
+
+1. Install with::
+
+ spack install environment-modules
+
+2. Activate with::
+
+ MODULES_HOME=`spack location -i environment-modules`
+ MODULES_VERSION=`ls -1 $MODULES_HOME/Modules | head -1`
+ ${MODULES_HOME}/Modules/${MODULES_VERSION}/bin/add.modules
+
+This adds to your ``.bashrc`` (or similar) files, enabling Environment
+Modules when you log in. It will ask your permission before changing
+any files.
+
+Spack and Environment Modules
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
You can enable shell support by sourcing some files in the
``/share/spack`` directory.
@@ -896,7 +951,7 @@ Or, similarly with modules, you could type:
$ spack load mpich %gcc@4.4.7
These commands will add appropriate directories to your ``PATH``,
-``MANPATH``, and ``LD_LIBRARY_PATH``. When you no longer want to use
+``MANPATH``, ``CPATH``, and ``LD_LIBRARY_PATH``. When you no longer want to use
a package, you can type unload or unuse similarly:
.. code-block:: sh
diff --git a/lib/spack/docs/conf.py b/lib/spack/docs/conf.py
index bce9ef0e94..3d2a8251aa 100644
--- a/lib/spack/docs/conf.py
+++ b/lib/spack/docs/conf.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -43,6 +43,7 @@ import subprocess
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('exts'))
+sys.path.insert(0, os.path.abspath('../external'))
# Add the Spack bin directory to the path so that we can use its output in docs.
spack_root = '../../..'
diff --git a/lib/spack/docs/developer_guide.rst b/lib/spack/docs/developer_guide.rst
index db47de80f5..0b618aa683 100644
--- a/lib/spack/docs/developer_guide.rst
+++ b/lib/spack/docs/developer_guide.rst
@@ -73,19 +73,32 @@ with a high level view of Spack's directory structure::
spack/ <- installation root
bin/
spack <- main spack executable
+
+ etc/
+ spack/ <- Spack config files.
+ Can be overridden by files in ~/.spack.
+
var/
spack/ <- build & stage directories
+ repos/ <- contains package repositories
+ builtin/ <- pkg repository that comes with Spack
+ repo.yaml <- descriptor for the builtin repository
+ packages/ <- directories under here contain packages
+
opt/
spack/ <- packages are installed here
+
lib/
spack/
docs/ <- source for this documentation
env/ <- compiler wrappers for build environment
+ external/ <- external libs included in Spack distro
+ llnl/ <- some general-use libraries
+
spack/ <- spack module; contains Python code
cmd/ <- each file in here is a spack subcommand
compilers/ <- compiler description files
- packages/ <- each file in here is a spack package
test/ <- unit test modules
util/ <- common code
diff --git a/lib/spack/docs/exts/sphinxcontrib/__init__.py b/lib/spack/docs/exts/sphinxcontrib/__init__.py
index 838d616eb4..298856746c 100644
--- a/lib/spack/docs/exts/sphinxcontrib/__init__.py
+++ b/lib/spack/docs/exts/sphinxcontrib/__init__.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/docs/exts/sphinxcontrib/programoutput.py b/lib/spack/docs/exts/sphinxcontrib/programoutput.py
index ff006acf72..f0fa045c86 100644
--- a/lib/spack/docs/exts/sphinxcontrib/programoutput.py
+++ b/lib/spack/docs/exts/sphinxcontrib/programoutput.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/docs/features.rst b/lib/spack/docs/features.rst
index fcb810086d..0998ba8da4 100644
--- a/lib/spack/docs/features.rst
+++ b/lib/spack/docs/features.rst
@@ -103,7 +103,7 @@ creates a simple python file:
It doesn't take much python coding to get from there to a working
package:
-.. literalinclude:: ../../../var/spack/packages/libelf/package.py
+.. literalinclude:: ../../../var/spack/repos/builtin/packages/libelf/package.py
:lines: 25-
Spack also provides wrapper functions around common commands like
diff --git a/lib/spack/docs/getting_started.rst b/lib/spack/docs/getting_started.rst
index d958d9e74a..2c5b68ea65 100644
--- a/lib/spack/docs/getting_started.rst
+++ b/lib/spack/docs/getting_started.rst
@@ -5,11 +5,11 @@ Download
--------------------
Getting spack is easy. You can clone it from the `github repository
-<https://github.com/scalability-llnl/spack>`_ using this command:
+<https://github.com/llnl/spack>`_ using this command:
.. code-block:: sh
- $ git clone https://github.com/scalability-llnl/spack.git
+ $ git clone https://github.com/llnl/spack.git
This will create a directory called ``spack``. We'll assume that the
full path to this directory is in the ``SPACK_ROOT`` environment
@@ -22,7 +22,7 @@ go:
$ spack install libelf
For a richer experience, use Spack's `shell support
-<http://scalability-llnl.github.io/spack/basic_usage.html#environment-modules>`_:
+<http://software.llnl.gov/spack/basic_usage.html#environment-modules>`_:
.. code-block:: sh
diff --git a/lib/spack/docs/index.rst b/lib/spack/docs/index.rst
index 97c8361421..d6ce52b747 100644
--- a/lib/spack/docs/index.rst
+++ b/lib/spack/docs/index.rst
@@ -18,18 +18,18 @@ configurations can coexist on the same system.
Most importantly, Spack is *simple*. It offers a simple *spec* syntax
so that users can specify versions and configuration options
concisely. Spack is also simple for package authors: package files
-are writtin in pure Python, and specs allow package authors to
+are written in pure Python, and specs allow package authors to
maintain a single file for many different builds of the same package.
See the :doc:`features` for examples and highlights.
Get spack from the `github repository
-<https://github.com/scalability-llnl/spack>`_ and install your first
+<https://github.com/llnl/spack>`_ and install your first
package:
.. code-block:: sh
- $ git clone https://github.com/scalability-llnl/spack.git
+ $ git clone https://github.com/llnl/spack.git
$ cd spack/bin
$ ./spack install libelf
diff --git a/lib/spack/docs/mirrors.rst b/lib/spack/docs/mirrors.rst
index d732a3dd54..dad04d053b 100644
--- a/lib/spack/docs/mirrors.rst
+++ b/lib/spack/docs/mirrors.rst
@@ -38,7 +38,7 @@ contains tarballs for each package, named after each package.
.. note::
- Archives are **not** named exactly they were in the package's fetch
+ Archives are **not** named exactly the way they were in the package's fetch
URL. They have the form ``<name>-<version>.<extension>``, where
``<name>`` is Spack's name for the package, ``<version>`` is the
version of the tarball, and ``<extension>`` is whatever format the
@@ -186,7 +186,7 @@ Each mirror has a name so that you can refer to it again later.
``spack mirror list``
----------------------------
-If you want to see all the mirrors Spack knows about you can run ``spack mirror list``::
+To see all the mirrors Spack knows about, run ``spack mirror list``::
$ spack mirror list
local_filesystem file:///Users/gamblin2/spack-mirror-2014-06-24
@@ -196,7 +196,7 @@ If you want to see all the mirrors Spack knows about you can run ``spack mirror
``spack mirror remove``
----------------------------
-And, if you want to remove a mirror, just remove it by name::
+To remove a mirror by name::
$ spack mirror remove local_filesystem
$ spack mirror list
@@ -205,12 +205,11 @@ And, if you want to remove a mirror, just remove it by name::
Mirror precedence
----------------------------
-Adding a mirror really just adds a section in ``~/.spackconfig``::
+Adding a mirror really adds a line in ``~/.spack/mirrors.yaml``::
- [mirror "local_filesystem"]
- url = file:///Users/gamblin2/spack-mirror-2014-06-24
- [mirror "remote_server"]
- url = https://example.com/some/web-hosted/directory/spack-mirror-2014-06-24
+ mirrors:
+ local_filesystem: file:///Users/gamblin2/spack-mirror-2014-06-24
+ remote_server: https://example.com/some/web-hosted/directory/spack-mirror-2014-06-24
If you want to change the order in which mirrors are searched for
packages, you can edit this file and reorder the sections. Spack will
diff --git a/lib/spack/docs/packaging_guide.rst b/lib/spack/docs/packaging_guide.rst
index 59ba63fa35..519c0da232 100644
--- a/lib/spack/docs/packaging_guide.rst
+++ b/lib/spack/docs/packaging_guide.rst
@@ -84,7 +84,7 @@ always choose to download just one tarball initially, and run
If it fails entirely, you can get minimal boilerplate by using
:ref:`spack-edit-f`, or you can manually create a directory and
- ``package.py`` file for the package in ``var/spack/packages``.
+ ``package.py`` file for the package in ``var/spack/repos/builtin/packages``.
.. note::
@@ -203,7 +203,7 @@ edit`` command:
So, if you used ``spack create`` to create a package, then saved and
closed the resulting file, you can get back to it with ``spack edit``.
The ``cmake`` package actually lives in
-``$SPACK_ROOT/var/spack/packages/cmake/package.py``, but this provides
+``$SPACK_ROOT/var/spack/repos/builtin/packages/cmake/package.py``, but this provides
a much simpler shortcut and saves you the trouble of typing the full
path.
@@ -269,18 +269,18 @@ live in Spack's directory structure. In general, `spack-create`_ and
`spack-edit`_ handle creating package files for you, so you can skip
most of the details here.
-``var/spack/packages``
+``var/spack/repos/builtin/packages``
~~~~~~~~~~~~~~~~~~~~~~~
A Spack installation directory is structured like a standard UNIX
install prefix (``bin``, ``lib``, ``include``, ``var``, ``opt``,
etc.). Most of the code for Spack lives in ``$SPACK_ROOT/lib/spack``.
-Packages themselves live in ``$SPACK_ROOT/var/spack/packages``.
+Packages themselves live in ``$SPACK_ROOT/var/spack/repos/builtin/packages``.
If you ``cd`` to that directory, you will see directories for each
package:
-.. command-output:: cd $SPACK_ROOT/var/spack/packages; ls -CF
+.. command-output:: cd $SPACK_ROOT/var/spack/repos/builtin/packages; ls -CF
:shell:
:ellipsis: 10
@@ -288,7 +288,7 @@ Each directory contains a file called ``package.py``, which is where
all the python code for the package goes. For example, the ``libelf``
package lives in::
- $SPACK_ROOT/var/spack/packages/libelf/package.py
+ $SPACK_ROOT/var/spack/repos/builtin/packages/libelf/package.py
Alongside the ``package.py`` file, a package may contain extra
directories or files (like patches) that it needs to build.
@@ -301,7 +301,7 @@ Packages are named after the directory containing ``package.py``. So,
``libelf``'s ``package.py`` lives in a directory called ``libelf``.
The ``package.py`` file defines a class called ``Libelf``, which
extends Spack's ``Package`` class. for example, here is
-``$SPACK_ROOT/var/spack/packages/libelf/package.py``:
+``$SPACK_ROOT/var/spack/repos/builtin/packages/libelf/package.py``:
.. code-block:: python
:linenos:
@@ -328,7 +328,7 @@ these:
$ spack install libelf@0.8.13
Spack sees the package name in the spec and looks for
-``libelf/package.py`` in ``var/spack/packages``. Likewise, if you say
+``libelf/package.py`` in ``var/spack/repos/builtin/packages``. Likewise, if you say
``spack install py-numpy``, then Spack looks for
``py-numpy/package.py``.
@@ -401,6 +401,35 @@ construct the new one for ``8.2.1``.
When you supply a custom URL for a version, Spack uses that URL
*verbatim* and does not perform extrapolation.
+Skipping the expand step
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Spack normally expands archives automatically after downloading
+them. If you want to skip this step (e.g., for self-extracting
+executables and other custom archive types), you can add
+``expand=False`` to a ``version`` directive.
+
+.. code-block:: python
+
+ version('8.2.1', '4136d7b4c04df68b686570afa26988ac',
+ url='http://example.com/foo-8.2.1-special-version.tar.gz', 'expand=False')
+
+When ``expand`` is set to ``False``, Spack sets the current working
+directory to the directory containing the downloaded archive before it
+calls your ``install`` method. Within ``install``, the path to the
+downloaded archive is available as ``self.stage.archive_file``.
+
+Here is an example snippet for packages distributed as self-extracting
+archives. The example sets permissions on the downloaded file to make
+it executable, then runs it with some arguments.
+
+.. code-block:: python
+
+ def install(self, spec, prefix):
+ set_executable(self.stage.archive_file)
+ installer = Executable(self.stage.archive_file)
+ installer('--prefix=%s' % prefix, 'arg1', 'arg2', 'etc.')
+
Checksums
~~~~~~~~~~~~~~~~~
@@ -632,7 +661,7 @@ Default
revision instead.
Revisions
- Add ``hg`` and ``revision``parameters:
+ Add ``hg`` and ``revision`` parameters:
.. code-block:: python
@@ -703,7 +732,7 @@ supply is a filename, then the patch needs to live within the spack
source tree. For example, the patch above lives in a directory
structure like this::
- $SPACK_ROOT/var/spack/packages/
+ $SPACK_ROOT/var/spack/repos/builtin/packages/
mvapich2/
package.py
ad_lustre_rwcontig_open_source.patch
@@ -1524,6 +1553,69 @@ This is useful when you want to know exactly what Spack will do when
you ask for a particular spec.
+``Concretization Policies``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A user may have certain preferences for how packages should
+be concretized on their system. For example, one user may prefer packages
+built with OpenMPI and the Intel compiler. Another user may prefer
+packages be built with MVAPICH and GCC.
+
+Spack can be configured to prefer certain compilers, package
+versions, depends_on, and variants during concretization.
+The preferred configuration can be controlled via the
+``~/.spack/packages.yaml`` file for user configuations, or the
+``etc/spack/packages.yaml`` site configuration.
+
+
+Here's an example packages.yaml file that sets preferred packages:
+
+.. code-block:: sh
+
+ packages:
+ dyninst:
+ compiler: [gcc@4.9]
+ variants: +debug
+ gperftools:
+ version: [2.2, 2.4, 2.3]
+ all:
+ compiler: [gcc@4.4.7, gcc@4.6:, intel, clang, pgi]
+ providers:
+ mpi: [mvapich, mpich, openmpi]
+
+
+At a high level, this example is specifying how packages should be
+concretized. The dyninst package should prefer using gcc 4.9 and
+be built with debug options. The gperftools package should prefer version
+2.2 over 2.4. Every package on the system should prefer mvapich for
+its MPI and gcc 4.4.7 (except for Dyninst, which overrides this by preferring gcc 4.9).
+These options are used to fill in implicit defaults. Any of them can be overwritten
+on the command line if explicitly requested.
+
+Each packages.yaml file begins with the string ``packages:`` and
+package names are specified on the next level. The special string ``all``
+applies settings to each package. Underneath each package name is
+one or more components: ``compiler``, ``variants``, ``version``,
+or ``providers``. Each component has an ordered list of spec
+``constraints``, with earlier entries in the list being preferred over
+later entries.
+
+Sometimes a package installation may have constraints that forbid
+the first concretization rule, in which case Spack will use the first
+legal concretization rule. Going back to the example, if a user
+requests gperftools 2.3 or later, then Spack will install version 2.4
+as the 2.4 version of gperftools is preferred over 2.3.
+
+An explicit concretization rule in the preferred section will always
+take preference over unlisted concretizations. In the above example,
+xlc isn't listed in the compiler list. Every listed compiler from
+gcc to pgi will thus be preferred over the xlc compiler.
+
+The syntax for the ``provider`` section differs slightly from other
+concretization rules. A provider lists a value that packages may
+``depend_on`` (e.g, mpi) and a list of rules for fulfilling that
+dependency.
+
.. _install-method:
Implementing the ``install`` method
@@ -1533,7 +1625,7 @@ The last element of a package is its ``install()`` method. This is
where the real work of installation happens, and it's the main part of
the package you'll need to customize for each piece of software.
-.. literalinclude:: ../../../var/spack/packages/libelf/package.py
+.. literalinclude:: ../../../var/spack/repos/builtin/packages/libelf/package.py
:start-after: 0.8.12
:linenos:
@@ -1711,15 +1803,15 @@ Compile-time library search paths
* ``-L$dep_prefix/lib``
* ``-L$dep_prefix/lib64``
Runtime library search paths (RPATHs)
- * ``-Wl,-rpath=$dep_prefix/lib``
- * ``-Wl,-rpath=$dep_prefix/lib64``
+ * ``-Wl,-rpath,$dep_prefix/lib``
+ * ``-Wl,-rpath,$dep_prefix/lib64``
Include search paths
* ``-I$dep_prefix/include``
An example of this would be the ``libdwarf`` build, which has one
dependency: ``libelf``. Every call to ``cc`` in the ``libdwarf``
build will have ``-I$LIBELF_PREFIX/include``,
-``-L$LIBELF_PREFIX/lib``, and ``-Wl,-rpath=$LIBELF_PREFIX/lib``
+``-L$LIBELF_PREFIX/lib``, and ``-Wl,-rpath,$LIBELF_PREFIX/lib``
inserted on the command line. This is done transparently to the
project's build system, which will just think it's using a system
where ``libelf`` is readily available. Because of this, you **do
@@ -1752,6 +1844,20 @@ dedicated process.
.. _prefix-objects:
+
+Failing the build
+----------------------
+
+Sometimes you don't want a package to successfully install unless some
+condition is true. You can explicitly cause the build to fail from
+``install()`` by raising an ``InstallError``, for example:
+
+.. code-block:: python
+
+ if spec.architecture.startswith('darwin'):
+ raise InstallError('This package does not build on Mac OS X!')
+
+
Prefix objects
----------------------
@@ -2068,6 +2174,62 @@ package, this allows us to avoid race conditions in the library's
build system.
+.. _sanity-checks:
+
+Sanity checking an intallation
+--------------------------------
+
+By default, Spack assumes that a build has failed if nothing is
+written to the install prefix, and that it has succeeded if anything
+(a file, a directory, etc.) is written to the install prefix after
+``install()`` completes.
+
+Consider a simple autotools build like this:
+
+.. code-block:: python
+
+ def install(self, spec, prefix):
+ configure("--prefix=" + prefix)
+ make()
+ make("install")
+
+If you are using using standard autotools or CMake, ``configure`` and
+``make`` will not write anything to the install prefix. Only ``make
+install`` writes the files, and only once the build is already
+complete. Not all builds are like this. Many builds of scientific
+software modify the install prefix *before* ``make install``. Builds
+like this can falsely report that they were successfully installed if
+an error occurs before the install is complete but after files have
+been written to the ``prefix``.
+
+
+``sanity_check_is_file`` and ``sanity_check_is_dir``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can optionally specify *sanity checks* to deal with this problem.
+Add properties like this to your package:
+
+.. code-block:: python
+
+ class MyPackage(Package):
+ ...
+
+ sanity_check_is_file = ['include/libelf.h']
+ sanity_check_is_dir = [lib]
+
+ def install(self, spec, prefix):
+ configure("--prefix=" + prefix)
+ make()
+ make("install")
+
+Now, after ``install()`` runs, Spack will check whether
+``$prefix/include/libelf.h`` exists and is a file, and whether
+``$prefix/lib`` exists and is a directory. If the checks fail, then
+the build will fail and the install prefix will be removed. If they
+succeed, Spack considers the build succeeful and keeps the prefix in
+place.
+
+
.. _file-manipulation:
File manipulation functions
@@ -2108,6 +2270,15 @@ Filtering functions
Examples:
+ #. Filtering a Makefile to force it to use Spack's compiler wrappers:
+
+ .. code-block:: python
+
+ filter_file(r'^CC\s*=.*', spack_cc, 'Makefile')
+ filter_file(r'^CXX\s*=.*', spack_cxx, 'Makefile')
+ filter_file(r'^F77\s*=.*', spack_f77, 'Makefile')
+ filter_file(r'^FC\s*=.*', spack_fc, 'Makefile')
+
#. Replacing ``#!/usr/bin/perl`` with ``#!/usr/bin/env perl`` in ``bib2xhtml``:
.. code-block:: python
diff --git a/lib/spack/docs/site_configuration.rst b/lib/spack/docs/site_configuration.rst
index b03df29573..3abfa21a9d 100644
--- a/lib/spack/docs/site_configuration.rst
+++ b/lib/spack/docs/site_configuration.rst
@@ -54,87 +54,73 @@ more elements to the list to indicate where your own site's temporary
directory is.
-.. _concretization-policies:
-
-Concretization policies
-----------------------------
-
-When a user asks for a package like ``mpileaks`` to be installed,
-Spack has to make decisions like what version should be installed,
-what compiler to use, and how its dependencies should be configured.
-This process is called *concretization*, and it's covered in detail in
-:ref:`its own section <abstract-and-concrete>`.
-
-The default concretization policies are in the
-:py:mod:`spack.concretize` module, specifically in the
-:py:class:`spack.concretize.DefaultConcretizer` class. These are the
-important methods used in the concretization process:
-
-* :py:meth:`concretize_version(self, spec) <spack.concretize.DefaultConcretizer.concretize_version>`
-* :py:meth:`concretize_architecture(self, spec) <spack.concretize.DefaultConcretizer.concretize_architecture>`
-* :py:meth:`concretize_compiler(self, spec) <spack.concretize.DefaultConcretizer.concretize_compiler>`
-* :py:meth:`choose_provider(self, spec, providers) <spack.concretize.DefaultConcretizer.choose_provider>`
-
-The first three take a :py:class:`Spec <spack.spec.Spec>` object and
-modify it by adding constraints for the version. For example, if the
-input spec had a version range like `1.0:5.0.3`, then the
-``concretize_version`` method should set the spec's version to a
-*single* version in that range. Likewise, ``concretize_architecture``
-selects an architecture when the input spec does not have one, and
-``concretize_compiler`` needs to set both a concrete compiler and a
-concrete compiler version.
-
-``choose_provider()`` affects how concrete implementations are chosen
-based on a virtual dependency spec. The input spec is some virtual
-dependency and the ``providers`` index is a :py:class:`ProviderIndex
-<spack.packages.ProviderIndex>` object. The ``ProviderIndex`` maps
-the virtual spec to specs for possible implementations, and
-``choose_provider()`` should simply choose one of these. The
-``concretize_*`` methods will be called on the chosen implementation
-later, so there is no need to fully concretize the spec when returning
-it.
-
-The ``DefaultConcretizer`` is intended to provide sensible defaults
-for each policy, but there are certain choices that it can't know
-about. For example, one site might prefer ``OpenMPI`` over ``MPICH``,
-or another might prefer an old version of some packages. These types
-of special cases can be integrated with custom concretizers.
-
-Writing a custom concretizer
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To write your own concretizer, you need only subclass
-``DefaultConcretizer`` and override the methods you want to change.
-For example, you might write a class like this to change *only* the
-``concretize_version()`` behavior:
-
-.. code-block:: python
-
- from spack.concretize import DefaultConcretizer
-
- class MyConcretizer(DefaultConcretizer):
- def concretize_version(self, spec):
- # implement custom logic here.
-
-Once you have written your custom concretizer, you can make Spack use
-it by editing ``globals.py``. Find this part of the file:
-
-.. code-block:: python
-
- #
- # This controls how things are concretized in spack.
- # Replace it with a subclass if you want different
- # policies.
- #
- concretizer = DefaultConcretizer()
-
-Set concretizer to *your own* class instead of the default:
-
-.. code-block:: python
-
- concretizer = MyConcretizer()
-
-The next time you run Spack, your changes should take effect.
+External Packages
+~~~~~~~~~~~~~~~~~~~~~
+Spack can be configured to use externally-installed
+packages rather than building its own packages. This may be desirable
+if machines ship with system packages, such as a customized MPI
+that should be used instead of Spack building its own MPI.
+
+External packages are configured through the ``packages.yaml`` file found
+in a Spack installation's ``etc/spack/`` or a user's ``~/.spack/``
+directory. Here's an example of an external configuration:
+
+.. code-block:: yaml
+
+ packages:
+ openmpi:
+ paths:
+ openmpi@1.4.3%gcc@4.4.7=chaos_5_x86_64_ib: /opt/openmpi-1.4.3
+ openmpi@1.4.3%gcc@4.4.7=chaos_5_x86_64_ib+debug: /opt/openmpi-1.4.3-debug
+ openmpi@1.6.5%intel@10.1=chaos_5_x86_64_ib: /opt/openmpi-1.6.5-intel
+
+This example lists three installations of OpenMPI, one built with gcc,
+one built with gcc and debug information, and another built with Intel.
+If Spack is asked to build a package that uses one of these MPIs as a
+dependency, it will use the the pre-installed OpenMPI in
+the given directory.
+
+Each ``packages.yaml`` begins with a ``packages:`` token, followed
+by a list of package names. To specify externals, add a ``paths``
+token under the package name, which lists externals in a
+``spec : /path`` format. Each spec should be as
+well-defined as reasonably possible. If a
+package lacks a spec component, such as missing a compiler or
+package version, then Spack will guess the missing component based
+on its most-favored packages, and it may guess incorrectly.
+
+Each package version and compilers listed in an external should
+have entries in Spack's packages and compiler configuration, even
+though the package and compiler may not every be built.
+
+The packages configuration can tell Spack to use an external location
+for certain package versions, but it does not restrict Spack to using
+external packages. In the above example, if an OpenMPI 1.8.4 became
+available Spack may choose to start building and linking with that version
+rather than continue using the pre-installed OpenMPI versions.
+
+To prevent this, the ``packages.yaml`` configuration also allows packages
+to be flagged as non-buildable. The previous example could be modified to
+be:
+
+.. code-block:: yaml
+
+ packages:
+ openmpi:
+ paths:
+ openmpi@1.4.3%gcc@4.4.7=chaos_5_x86_64_ib: /opt/openmpi-1.4.3
+ openmpi@1.4.3%gcc@4.4.7=chaos_5_x86_64_ib+debug: /opt/openmpi-1.4.3-debug
+ openmpi@1.6.5%intel@10.1=chaos_5_x86_64_ib: /opt/openmpi-1.6.5-intel
+ buildable: False
+
+The addition of the ``buildable`` flag tells Spack that it should never build
+its own version of OpenMPI, and it will instead always rely on a pre-built
+OpenMPI. Similar to ``paths``, ``buildable`` is specified as a property under
+a package name.
+
+The ``buildable`` does not need to be paired with external packages.
+It could also be used alone to forbid packages that may be
+buggy or otherwise undesirable.
Profiling
diff --git a/lib/spack/env/cc b/lib/spack/env/cc
index 846bfd0ff9..99d506558e 100755
--- a/lib/spack/env/cc
+++ b/lib/spack/env/cc
@@ -7,7 +7,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -39,7 +39,7 @@
#
# This is the list of environment variables that need to be set before
-# the script runs. They are set by routines in spack.build_environment
+# the script runs. They are set by routines in spack.build_environment
# as part of spack.package.Package.do_install().
parameters="
SPACK_PREFIX
@@ -51,7 +51,7 @@ SPACK_SHORT_SPEC"
# The compiler input variables are checked for sanity later:
# SPACK_CC, SPACK_CXX, SPACK_F77, SPACK_FC
# The default compiler flags are passed from the config files:
-# SPACK_CFLAGS, SPACK_CXXFLAGS, SPACK_FFLAGS, SPACK_LDFLAGS
+# SPACK_CFLAGS, SPACK_CXXFLAGS, SPACK_FFLAGS, SPACK_LDFLAGS, SPACK_LDLIBS
# Debug flag is optional; set to true for debug logging:
# SPACK_DEBUG
# Test command is used to unit test the compiler script.
@@ -67,12 +67,11 @@ function die {
}
for param in $parameters; do
- if [ -z "${!param}" ]; then
- die "Spack compiler must be run from spack! Input $param was missing!"
+ if [[ -z ${!param} ]]; then
+ die "Spack compiler must be run from Spack! Input '$param' is missing."
fi
done
-#
# Figure out the type of compiler, the language, and the mode so that
# the compiler script knows what to do.
#
@@ -80,249 +79,185 @@ done
# 'command' is set based on the input command to $SPACK_[CC|CXX|F77|F90]
#
# 'mode' is set to one of:
+# vcheck version check
+# cpp preprocess
# cc compile
+# as assemble
# ld link
# ccld compile & link
-# cpp preprocessor
-# vcheck version check
-#
+
command=$(basename "$0")
case "$command" in
- cc|gcc|c89|c99|clang|xlc)
+ cpp)
+ mode=cpp
+ ;;
+ cc|gcc|c89|c99|clang|xlc|icc|pgcc)
command=("$SPACK_CC")
- if [ "$SPACK_CFLAGS" ]; then
- for flag in ${SPACK_CFLAGS[@]}; do
- command+=("$flag");
- done
- fi
language="C"
+ lang_flags=C
;;
c++|CC|g++|clang++|xlC)
command=("$SPACK_CXX")
- if [ "$SPACK_CXXFLAGS" ]; then
- for flag in ${SPACK_CXXFLAGS[@]}; do
- command+=("$flag");
- done
- fi
language="C++"
+ lang_flags=CXX
;;
f77|xlf)
command=("$SPACK_F77")
- if [ "$SPACK_FFLAGS" ]; then
- for flag in ${SPACK_FFLAGS[@]}; do
- command+=("$flag");
- done
- fi
language="Fortran 77"
+ lang_flags=F
;;
- fc|f90|f95|xlf90)
+ fc|f90|f95|xlf90|gfortran|ifort|pgfortrn|nagfor)
command="$SPACK_FC"
- if [ "$SPACK_FFLAGS" ]; then
- for flag in ${SPACK_FFLAGS[@]}; do
- command+=("$flag");
- done
- fi
language="Fortran 90"
- ;;
- cpp)
- mode=cpp
- if [ "$SPACK_CPPFLAGS" ]; then
- for flag in ${SPACK_CPPFLAGS[@]}; do
- command+=("$flag");
- done
- fi
+ lang_flags=F
;;
ld)
mode=ld
- if [ "$SPACK_LDFLAGS" ]; then
- for flag in ${SPACK_LDFLAGS[@]}; do
- command+=("$flag");
- done
- fi
;;
*)
die "Unkown compiler: $command"
;;
esac
-# Finish setting up the mode.
+# Check for vcheck mode
if [ -z "$mode" ]; then
- mode=ccld
- if [ "$SPACK_LDFLAGS" ]; then
- for flag in ${SPACK_LDFLAGS[@]}; do
- command+=("$flag");
- done
- fi
for arg in "$@"; do
- if [ "$arg" = -v -o "$arg" = -V -o "$arg" = --version -o "$arg" = -dumpversion ]; then
+ if [[ $arg == -v || $arg == -V || $arg == --version || $arg == -dumpversion ]]; then
mode=vcheck
break
- elif [ "$arg" = -E ]; then
+ fi
+ done
+fi
+
+# Finish setting the mode
+if [[ -z $mode ]]; then
+ mode=ccld
+ for arg in "$@"; do
+ if [[ $arg == -E ]]; then
mode=cpp
break
- elif [ "$arg" = -c ]; then
+ elif [[ $arg == -S ]]; then
+ mode=as
+ break
+ elif [[ $arg == -c ]]; then
mode=cc
break
fi
done
fi
-# Dump the version and exist if we're in testing mode.
-if [ "$SPACK_TEST_COMMAND" = "dump-mode" ]; then
+# Dump the version and exit if we're in testing mode.
+if [[ $SPACK_TEST_COMMAND == dump-mode ]]; then
echo "$mode"
exit
fi
# Check that at least one of the real commands was actually selected,
# otherwise we don't know what to execute.
-if [ -z "$command" ]; then
+if [[ -z $command ]]; then
die "ERROR: Compiler '$SPACK_COMPILER_SPEC' does not support compiling $language programs."
fi
-# Save original command for debug logging
-input_command="$@"
+if [[ $mode == vcheck ]]; then
+ exec ${command} "$@"
+fi
-#
-# Now do real parsing of the command line args, trying hard to keep
-# non-rpath linker arguments in the proper order w.r.t. other command
-# line arguments. This is important for things like groups.
-#
-includes=()
-libraries=()
-libs=()
-rpaths=()
-other_args=()
+# Darwin's linker has a -r argument that merges object files together.
+# It doesn't work with -rpath.
+# This variable controls whether they are added.
+add_rpaths=true
+if [[ mode == ld && $OSTYPE == darwin* ]]; then
+ for arg in "$@"; do
+ if [[ $arg == -r ]]; then
+ add_rpaths=false
+ break
+ fi
+ done
+fi
-while [ -n "$1" ]; do
- case "$1" in
- -I*)
- arg="${1#-I}"
- if [ -z "$arg" ]; then shift; arg="$1"; fi
- includes+=("$arg")
- ;;
- -L*)
- arg="${1#-L}"
- if [ -z "$arg" ]; then shift; arg="$1"; fi
- libraries+=("$arg")
- ;;
- -l*)
- arg="${1#-l}"
- if [ -z "$arg" ]; then shift; arg="$1"; fi
- libs+=("$arg")
- ;;
- -Wl,*)
- arg="${1#-Wl,}"
- if [ -z "$arg" ]; then shift; arg="$1"; fi
- if [[ "$arg" = -rpath=* ]]; then
- rpaths+=("${arg#-rpath=}")
- elif [[ "$arg" = -rpath ]]; then
- shift; arg="$1"
- if [[ "$arg" != -Wl,* ]]; then
- die "-Wl,-rpath was not followed by -Wl,*"
- fi
- rpaths+=("${arg#-Wl,}")
- else
- other_args+=("-Wl,$arg")
- fi
- ;;
- -Xlinker,*)
- arg="${1#-Xlinker,}"
- if [ -z "$arg" ]; then shift; arg="$1"; fi
- if [[ "$arg" = -rpath=* ]]; then
- rpaths+=("${arg#-rpath=}")
- elif [[ "$arg" = -rpath ]]; then
- shift; arg="$1"
- if [[ "$arg" != -Xlinker,* ]]; then
- die "-Xlinker,-rpath was not followed by -Xlinker,*"
- fi
- rpaths+=("${arg#-Xlinker,}")
- else
- other_args+=("-Xlinker,$arg")
- fi
- ;;
- *)
- other_args+=("$1")
- ;;
- esac
- shift
-done
+# Save original command for debug logging
+input_command="$@"
+args=("$@")
-# Dump parsed values for unit testing if asked for
-if [ -n "$SPACK_TEST_COMMAND" ]; then
- IFS=$'\n'
- case "$SPACK_TEST_COMMAND" in
- dump-includes) echo "${includes[*]}";;
- dump-libraries) echo "${libraries[*]}";;
- dump-libs) echo "${libs[*]}";;
- dump-rpaths) echo "${rpaths[*]}";;
- dump-other-args) echo "${other_args[*]}";;
- dump-all)
- echo "INCLUDES:"
- echo "${includes[*]}"
- echo
- echo "LIBRARIES:"
- echo "${libraries[*]}"
- echo
- echo "LIBS:"
- echo "${libs[*]}"
- echo
- echo "RPATHS:"
- echo "${rpaths[*]}"
- echo
- echo "ARGS:"
- echo "${other_args[*]}"
- ;;
- *)
- echo "ERROR: Unknown test command"
- exit 1 ;;
- esac
- exit
-fi
+# Prepend cppflags, cflags, cxxflags, fflags, and ldflags
+case "$mode" in
+ cppas|cc|ccld)
+ # Add cppflags
+ args=(${SPACK_CPPFLAGS[@]} "${args[@]}") ;;
+ *)
+ ;;
+esac
+case "$mode" in
+ cc|ccld)
+ # Add c, cxx, and f flags
+ case $lang_flags in
+ C)
+ args=(${SPACK_CFLAGS[@]} "${args[@]}") ;;
+ CXX)
+ args=(${SPACK_CXXFLAGS[@]} "${args[@]}") ;;
+ F)
+ args=(${SPACK_FFLAGS[@]} "${args[@]}") ;;
+ esac
+ ;;
+ *)
+ ;;
+esac
+case "$mode" in
+ ld|ccld)
+ # Add ldflags
+ args=(${SPACK_CPPFLAGS[@]} "${args[@]}") ;;
+ *)
+ ;;
+esac
# Read spack dependencies from the path environment variable
IFS=':' read -ra deps <<< "$SPACK_DEPENDENCIES"
for dep in "${deps[@]}"; do
- if [ -d "$dep/include" ]; then
- includes+=("$dep/include")
+ # Prepend include directories
+ if [[ -d $dep/include ]]; then
+ if [[ $mode == cpp || $mode == cc || $mode == as || $mode == ccld ]]; then
+ args=("-I$dep/include" "${args[@]}")
+ fi
fi
- if [ -d "$dep/lib" ]; then
- libraries+=("$dep/lib")
- rpaths+=("$dep/lib")
+ # Prepend lib and RPATH directories
+ if [[ -d $dep/lib ]]; then
+ if [[ $mode == ccld ]]; then
+ $add_rpaths && args=("-Wl,-rpath,$dep/lib" "${args[@]}")
+ args=("-L$dep/lib" "${args[@]}")
+ elif [[ $mode == ld ]]; then
+ $add_rpaths && args=("-rpath" "$dep/lib" "${args[@]}")
+ args=("-L$dep/lib" "${args[@]}")
+ fi
fi
- if [ -d "$dep/lib64" ]; then
- libraries+=("$dep/lib64")
- rpaths+=("$dep/lib64")
+ # Prepend lib64 and RPATH directories
+ if [[ -d $dep/lib64 ]]; then
+ if [[ $mode == ccld ]]; then
+ $add_rpaths && args=("-Wl,-rpath,$dep/lib64" "${args[@]}")
+ args=("-L$dep/lib64" "${args[@]}")
+ elif [[ $mode == ld ]]; then
+ $add_rpaths && args=("-rpath" "$dep/lib64" "${args[@]}")
+ args=("-L$dep/lib64" "${args[@]}")
+ fi
fi
done
# Include all -L's and prefix/whatever dirs in rpath
-for dir in "${libraries[@]}"; do
- [[ dir = $SPACK_INSTALL* ]] && rpaths+=("$dir")
-done
-rpaths+=("$SPACK_PREFIX/lib")
-rpaths+=("$SPACK_PREFIX/lib64")
-
-# Put the arguments together
-args=()
-for dir in "${includes[@]}"; do args+=("-I$dir"); done
-args+=("${other_args[@]}")
-for dir in "${libraries[@]}"; do args+=("-L$dir"); done
-for lib in "${libs[@]}"; do args+=("-l$lib"); done
-
-if [ "$mode" = ccld ]; then
- for dir in "${rpaths[@]}"; do
- args+=("-Wl,-rpath")
- args+=("-Wl,$dir");
- done
-elif [ "$mode" = ld ]; then
- for dir in "${rpaths[@]}"; do
- args+=("-rpath")
- args+=("$dir");
- done
+if [[ $mode == ccld ]]; then
+ $add_rpaths && args=("-Wl,-rpath,$SPACK_PREFIX/lib" "-Wl,-rpath,$SPACK_PREFIX/lib64" "${args[@]}")
+elif [[ $mode == ld ]]; then
+ $add_rpaths && args=("-rpath" "$SPACK_PREFIX/lib" "-rpath" "$SPACK_PREFIX/lib64" "${args[@]}")
fi
+# Add SPACK_LDLIBS to args
+case "$mode" in
+ ld|ccld)
+ args=("${args[@]}" ${SPACK_LDLIBS[@]}) ;;
+ *)
+ ;;
+esac
+
#
# Unset pesky environment variables that could affect build sanity.
#
@@ -336,41 +271,41 @@ unset DYLD_LIBRARY_PATH
#
IFS=':' read -ra env_path <<< "$PATH"
IFS=':' read -ra spack_env_dirs <<< "$SPACK_ENV_PATH"
-spack_env_dirs+=(".")
+spack_env_dirs+=("" ".")
PATH=""
for dir in "${env_path[@]}"; do
- remove=""
- for rm_dir in "${spack_env_dirs[@]}"; do
- if [ "$dir" = "$rm_dir" ]; then remove=True; fi
- done
- if [ -z "$remove" ]; then
- if [ -z "$PATH" ]; then
- PATH="$dir"
- else
- PATH="$PATH:$dir"
+ addpath=true
+ for env_dir in "${spack_env_dirs[@]}"; do
+ if [[ $dir == $env_dir ]]; then
+ addpath=false
+ break
fi
+ done
+ if $addpath; then
+ PATH="${PATH:+$PATH:}$dir"
fi
done
export PATH
-full_command=("${command[@]}")
-full_command+=("${args[@]}")
+#ifdef NEW
+full_command=("$command" "${args[@]}")
+
+# In test command mode, write out full command for Spack tests.
+if [[ $SPACK_TEST_COMMAND == dump-args ]]; then
+ echo "${full_command[@]}"
+ exit
+elif [[ -n $SPACK_TEST_COMMAND ]]; then
+ die "ERROR: Unknown test command"
+fi
#
# Write the input and output commands to debug logs if it's asked for.
#
-if [ "$SPACK_DEBUG" = "TRUE" ]; then
+if [[ $SPACK_DEBUG == TRUE ]]; then
input_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_SHORT_SPEC.in.log"
output_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_SHORT_SPEC.out.log"
- echo "$input_command" >> $input_log
- echo "$mode ${full_command[@]}" >> $output_log
+ echo "[$mode] $command $input_command" >> $input_log
+ echo "[$mode] ${full_command[@]}" >> $output_log
fi
-#echo "---------------------------"
-#echo "---------------------------"
-#echo "---------------------------"
-#echo "${full_command[@]}"
-#echo "---------------------------"
-#echo "---------------------------"
-#echo "---------------------------"
exec "${full_command[@]}"
diff --git a/lib/spack/env/clang/clang b/lib/spack/env/clang/clang
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/clang/clang
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/env/clang/clang++ b/lib/spack/env/clang/clang++
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/clang/clang++
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/env/gcc/g++ b/lib/spack/env/gcc/g++
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/gcc/g++
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/env/gcc/gcc b/lib/spack/env/gcc/gcc
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/gcc/gcc
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/env/gcc/gfortran b/lib/spack/env/gcc/gfortran
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/gcc/gfortran
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/env/intel/icc b/lib/spack/env/intel/icc
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/intel/icc
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/env/intel/icpc b/lib/spack/env/intel/icpc
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/intel/icpc
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/env/intel/ifort b/lib/spack/env/intel/ifort
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/intel/ifort
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/env/nag/nagfor b/lib/spack/env/nag/nagfor
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/nag/nagfor
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/env/pgi/pgc++ b/lib/spack/env/pgi/pgc++
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/pgi/pgc++
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/env/pgi/pgcc b/lib/spack/env/pgi/pgcc
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/pgi/pgcc
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/env/pgi/pgfortran b/lib/spack/env/pgi/pgfortran
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/pgi/pgfortran
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/env/xl/xlc b/lib/spack/env/xl/xlc
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/xl/xlc
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/env/xl/xlc++ b/lib/spack/env/xl/xlc++
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/xl/xlc++
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/env/xl/xlf b/lib/spack/env/xl/xlf
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/xl/xlf
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/env/xl/xlf90 b/lib/spack/env/xl/xlf90
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/xl/xlf90
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/external/__init__.py b/lib/spack/external/__init__.py
index 0578022210..7a89a1ac67 100644
--- a/lib/spack/external/__init__.py
+++ b/lib/spack/external/__init__.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/external/argparse.py b/lib/spack/external/argparse.py
index 394e5da152..ec9a9ee738 100644
--- a/lib/spack/external/argparse.py
+++ b/lib/spack/external/argparse.py
@@ -1067,9 +1067,13 @@ class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
- def __init__(self, name, help):
+ def __init__(self, name, aliases, help):
+ metavar = dest = name
+ if aliases:
+ metavar += ' (%s)' % ', '.join(aliases)
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
- sup.__init__(option_strings=[], dest=name, help=help)
+ sup.__init__(option_strings=[], dest=dest, help=help,
+ metavar=metavar)
def __init__(self,
option_strings,
@@ -1097,15 +1101,22 @@ class _SubParsersAction(Action):
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
+ aliases = kwargs.pop('aliases', ())
+
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
- choice_action = self._ChoicesPseudoAction(name, help)
+ choice_action = self._ChoicesPseudoAction(name, aliases, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
+
+ # make parser available under aliases also
+ for alias in aliases:
+ self._name_parser_map[alias] = parser
+
return parser
def _get_subactions(self):
@@ -1123,8 +1134,9 @@ class _SubParsersAction(Action):
try:
parser = self._name_parser_map[parser_name]
except KeyError:
- tup = parser_name, ', '.join(self._name_parser_map)
- msg = _('unknown parser %r (choices: %s)' % tup)
+ args = {'parser_name': parser_name,
+ 'choices': ', '.join(self._name_parser_map)}
+ msg = _('unknown parser %(parser_name)r (choices: %(choices)s)') % args
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
diff --git a/lib/spack/external/functools.py b/lib/spack/external/functools_backport.py
index 19f0903c82..19f0903c82 100644
--- a/lib/spack/external/functools.py
+++ b/lib/spack/external/functools_backport.py
diff --git a/lib/spack/external/jsonschema/COPYING b/lib/spack/external/jsonschema/COPYING
new file mode 100644
index 0000000000..af9cfbdb13
--- /dev/null
+++ b/lib/spack/external/jsonschema/COPYING
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Julian Berman
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/lib/spack/external/jsonschema/README.rst b/lib/spack/external/jsonschema/README.rst
new file mode 100644
index 0000000000..20c2fe6266
--- /dev/null
+++ b/lib/spack/external/jsonschema/README.rst
@@ -0,0 +1,104 @@
+==========
+jsonschema
+==========
+
+``jsonschema`` is an implementation of `JSON Schema <http://json-schema.org>`_
+for Python (supporting 2.6+ including Python 3).
+
+.. code-block:: python
+
+ >>> from jsonschema import validate
+
+ >>> # A sample schema, like what we'd get from json.load()
+ >>> schema = {
+ ... "type" : "object",
+ ... "properties" : {
+ ... "price" : {"type" : "number"},
+ ... "name" : {"type" : "string"},
+ ... },
+ ... }
+
+ >>> # If no exception is raised by validate(), the instance is valid.
+ >>> validate({"name" : "Eggs", "price" : 34.99}, schema)
+
+ >>> validate(
+ ... {"name" : "Eggs", "price" : "Invalid"}, schema
+ ... ) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ ValidationError: 'Invalid' is not of type 'number'
+
+
+Features
+--------
+
+* Full support for
+ `Draft 3 <https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.Draft3Validator>`_
+ **and** `Draft 4 <https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.Draft4Validator>`_
+ of the schema.
+
+* `Lazy validation <https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.IValidator.iter_errors>`_
+ that can iteratively report *all* validation errors.
+
+* Small and extensible
+
+* `Programmatic querying <https://python-jsonschema.readthedocs.org/en/latest/errors/#module-jsonschema>`_
+ of which properties or items failed validation.
+
+
+Release Notes
+-------------
+
+* A simple CLI was added for validation
+* Validation errors now keep full absolute paths and absolute schema paths in
+ their ``absolute_path`` and ``absolute_schema_path`` attributes. The ``path``
+ and ``schema_path`` attributes are deprecated in favor of ``relative_path``
+ and ``relative_schema_path``\ .
+
+*Note:* Support for Python 3.2 was dropped in this release, and installation
+now uses setuptools.
+
+
+Running the Test Suite
+----------------------
+
+``jsonschema`` uses the wonderful `Tox <http://tox.readthedocs.org>`_ for its
+test suite. (It really is wonderful, if for some reason you haven't heard of
+it, you really should use it for your projects).
+
+Assuming you have ``tox`` installed (perhaps via ``pip install tox`` or your
+package manager), just run ``tox`` in the directory of your source checkout to
+run ``jsonschema``'s test suite on all of the versions of Python ``jsonschema``
+supports. Note that you'll need to have all of those versions installed in
+order to run the tests on each of them, otherwise ``tox`` will skip (and fail)
+the tests on that version.
+
+Of course you're also free to just run the tests on a single version with your
+favorite test runner. The tests live in the ``jsonschema.tests`` package.
+
+
+Community
+---------
+
+There's a `mailing list <https://groups.google.com/forum/#!forum/jsonschema>`_
+for this implementation on Google Groups.
+
+Please join, and feel free to send questions there.
+
+
+Contributing
+------------
+
+I'm Julian Berman.
+
+``jsonschema`` is on `GitHub <http://github.com/Julian/jsonschema>`_.
+
+Get in touch, via GitHub or otherwise, if you've got something to contribute,
+it'd be most welcome!
+
+You can also generally find me on Freenode (nick: ``tos9``) in various
+channels, including ``#python``.
+
+If you feel overwhelmingly grateful, you can woo me with beer money on
+`Gittip <https://www.gittip.com/Julian/>`_ or via Google Wallet with the email
+in my GitHub profile.
diff --git a/lib/spack/external/jsonschema/__init__.py b/lib/spack/external/jsonschema/__init__.py
new file mode 100644
index 0000000000..6c099f1d8b
--- /dev/null
+++ b/lib/spack/external/jsonschema/__init__.py
@@ -0,0 +1,26 @@
+"""
+An implementation of JSON Schema for Python
+
+The main functionality is provided by the validator classes for each of the
+supported JSON Schema versions.
+
+Most commonly, :func:`validate` is the quickest way to simply validate a given
+instance under a schema, and will create a validator for you.
+
+"""
+
+from jsonschema.exceptions import (
+ ErrorTree, FormatError, RefResolutionError, SchemaError, ValidationError
+)
+from jsonschema._format import (
+ FormatChecker, draft3_format_checker, draft4_format_checker,
+)
+from jsonschema.validators import (
+ Draft3Validator, Draft4Validator, RefResolver, validate
+)
+
+
+__version__ = "2.4.0"
+
+
+# flake8: noqa
diff --git a/lib/spack/external/jsonschema/__main__.py b/lib/spack/external/jsonschema/__main__.py
new file mode 100644
index 0000000000..82c29fd39e
--- /dev/null
+++ b/lib/spack/external/jsonschema/__main__.py
@@ -0,0 +1,2 @@
+from jsonschema.cli import main
+main()
diff --git a/lib/spack/external/jsonschema/_format.py b/lib/spack/external/jsonschema/_format.py
new file mode 100644
index 0000000000..bb52d183ad
--- /dev/null
+++ b/lib/spack/external/jsonschema/_format.py
@@ -0,0 +1,240 @@
+import datetime
+import re
+import socket
+
+from jsonschema.compat import str_types
+from jsonschema.exceptions import FormatError
+
+
+class FormatChecker(object):
+ """
+ A ``format`` property checker.
+
+ JSON Schema does not mandate that the ``format`` property actually do any
+ validation. If validation is desired however, instances of this class can
+ be hooked into validators to enable format validation.
+
+ :class:`FormatChecker` objects always return ``True`` when asked about
+ formats that they do not know how to validate.
+
+ To check a custom format using a function that takes an instance and
+ returns a ``bool``, use the :meth:`FormatChecker.checks` or
+ :meth:`FormatChecker.cls_checks` decorators.
+
+ :argument iterable formats: the known formats to validate. This argument
+ can be used to limit which formats will be used
+ during validation.
+
+ """
+
+ checkers = {}
+
+ def __init__(self, formats=None):
+ if formats is None:
+ self.checkers = self.checkers.copy()
+ else:
+ self.checkers = dict((k, self.checkers[k]) for k in formats)
+
+ def checks(self, format, raises=()):
+ """
+ Register a decorated function as validating a new format.
+
+ :argument str format: the format that the decorated function will check
+ :argument Exception raises: the exception(s) raised by the decorated
+ function when an invalid instance is found. The exception object
+ will be accessible as the :attr:`ValidationError.cause` attribute
+ of the resulting validation error.
+
+ """
+
+ def _checks(func):
+ self.checkers[format] = (func, raises)
+ return func
+ return _checks
+
+ cls_checks = classmethod(checks)
+
+ def check(self, instance, format):
+ """
+ Check whether the instance conforms to the given format.
+
+ :argument instance: the instance to check
+ :type: any primitive type (str, number, bool)
+ :argument str format: the format that instance should conform to
+ :raises: :exc:`FormatError` if instance does not conform to format
+
+ """
+
+ if format not in self.checkers:
+ return
+
+ func, raises = self.checkers[format]
+ result, cause = None, None
+ try:
+ result = func(instance)
+ except raises as e:
+ cause = e
+ if not result:
+ raise FormatError(
+ "%r is not a %r" % (instance, format), cause=cause,
+ )
+
+ def conforms(self, instance, format):
+ """
+ Check whether the instance conforms to the given format.
+
+ :argument instance: the instance to check
+ :type: any primitive type (str, number, bool)
+ :argument str format: the format that instance should conform to
+ :rtype: bool
+
+ """
+
+ try:
+ self.check(instance, format)
+ except FormatError:
+ return False
+ else:
+ return True
+
+
+_draft_checkers = {"draft3": [], "draft4": []}
+
+
+def _checks_drafts(both=None, draft3=None, draft4=None, raises=()):
+ draft3 = draft3 or both
+ draft4 = draft4 or both
+
+ def wrap(func):
+ if draft3:
+ _draft_checkers["draft3"].append(draft3)
+ func = FormatChecker.cls_checks(draft3, raises)(func)
+ if draft4:
+ _draft_checkers["draft4"].append(draft4)
+ func = FormatChecker.cls_checks(draft4, raises)(func)
+ return func
+ return wrap
+
+
+@_checks_drafts("email")
+def is_email(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return "@" in instance
+
+
+_ipv4_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
+
+@_checks_drafts(draft3="ip-address", draft4="ipv4")
+def is_ipv4(instance):
+ if not isinstance(instance, str_types):
+ return True
+ if not _ipv4_re.match(instance):
+ return False
+ return all(0 <= int(component) <= 255 for component in instance.split("."))
+
+
+if hasattr(socket, "inet_pton"):
+ @_checks_drafts("ipv6", raises=socket.error)
+ def is_ipv6(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return socket.inet_pton(socket.AF_INET6, instance)
+
+
+_host_name_re = re.compile(r"^[A-Za-z0-9][A-Za-z0-9\.\-]{1,255}$")
+
+@_checks_drafts(draft3="host-name", draft4="hostname")
+def is_host_name(instance):
+ if not isinstance(instance, str_types):
+ return True
+ if not _host_name_re.match(instance):
+ return False
+ components = instance.split(".")
+ for component in components:
+ if len(component) > 63:
+ return False
+ return True
+
+
+try:
+ import rfc3987
+except ImportError:
+ pass
+else:
+ @_checks_drafts("uri", raises=ValueError)
+ def is_uri(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return rfc3987.parse(instance, rule="URI")
+
+
+try:
+ import strict_rfc3339
+except ImportError:
+ try:
+ import isodate
+ except ImportError:
+ pass
+ else:
+ @_checks_drafts("date-time", raises=(ValueError, isodate.ISO8601Error))
+ def is_date(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return isodate.parse_datetime(instance)
+else:
+ @_checks_drafts("date-time")
+ def is_date(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return strict_rfc3339.validate_rfc3339(instance)
+
+
+@_checks_drafts("regex", raises=re.error)
+def is_regex(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return re.compile(instance)
+
+
+@_checks_drafts(draft3="date", raises=ValueError)
+def is_date(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return datetime.datetime.strptime(instance, "%Y-%m-%d")
+
+
+@_checks_drafts(draft3="time", raises=ValueError)
+def is_time(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return datetime.datetime.strptime(instance, "%H:%M:%S")
+
+
+try:
+ import webcolors
+except ImportError:
+ pass
+else:
+ def is_css_color_code(instance):
+ return webcolors.normalize_hex(instance)
+
+
+ @_checks_drafts(draft3="color", raises=(ValueError, TypeError))
+ def is_css21_color(instance):
+ if (
+ not isinstance(instance, str_types) or
+ instance.lower() in webcolors.css21_names_to_hex
+ ):
+ return True
+ return is_css_color_code(instance)
+
+
+ def is_css3_color(instance):
+ if instance.lower() in webcolors.css3_names_to_hex:
+ return True
+ return is_css_color_code(instance)
+
+
+draft3_format_checker = FormatChecker(_draft_checkers["draft3"])
+draft4_format_checker = FormatChecker(_draft_checkers["draft4"])
diff --git a/lib/spack/external/jsonschema/_reflect.py b/lib/spack/external/jsonschema/_reflect.py
new file mode 100644
index 0000000000..d09e38fbdc
--- /dev/null
+++ b/lib/spack/external/jsonschema/_reflect.py
@@ -0,0 +1,155 @@
+# -*- test-case-name: twisted.test.test_reflect -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Standardized versions of various cool and/or strange things that you can do
+with Python's reflection capabilities.
+"""
+
+import sys
+
+from jsonschema.compat import PY3
+
+
+class _NoModuleFound(Exception):
+ """
+ No module was found because none exists.
+ """
+
+
+
+class InvalidName(ValueError):
+ """
+ The given name is not a dot-separated list of Python objects.
+ """
+
+
+
+class ModuleNotFound(InvalidName):
+ """
+ The module associated with the given name doesn't exist and it can't be
+ imported.
+ """
+
+
+
+class ObjectNotFound(InvalidName):
+ """
+ The object associated with the given name doesn't exist and it can't be
+ imported.
+ """
+
+
+
+if PY3:
+ def reraise(exception, traceback):
+ raise exception.with_traceback(traceback)
+else:
+ exec("""def reraise(exception, traceback):
+ raise exception.__class__, exception, traceback""")
+
+reraise.__doc__ = """
+Re-raise an exception, with an optional traceback, in a way that is compatible
+with both Python 2 and Python 3.
+
+Note that on Python 3, re-raised exceptions will be mutated, with their
+C{__traceback__} attribute being set.
+
+@param exception: The exception instance.
+@param traceback: The traceback to use, or C{None} indicating a new traceback.
+"""
+
+
+def _importAndCheckStack(importName):
+ """
+ Import the given name as a module, then walk the stack to determine whether
+ the failure was the module not existing, or some code in the module (for
+ example a dependent import) failing. This can be helpful to determine
+ whether any actual application code was run. For example, to distiguish
+ administrative error (entering the wrong module name), from programmer
+ error (writing buggy code in a module that fails to import).
+
+ @param importName: The name of the module to import.
+ @type importName: C{str}
+ @raise Exception: if something bad happens. This can be any type of
+ exception, since nobody knows what loading some arbitrary code might
+ do.
+ @raise _NoModuleFound: if no module was found.
+ """
+ try:
+ return __import__(importName)
+ except ImportError:
+ excType, excValue, excTraceback = sys.exc_info()
+ while excTraceback:
+ execName = excTraceback.tb_frame.f_globals["__name__"]
+ # in Python 2 execName is None when an ImportError is encountered,
+ # where in Python 3 execName is equal to the importName.
+ if execName is None or execName == importName:
+ reraise(excValue, excTraceback)
+ excTraceback = excTraceback.tb_next
+ raise _NoModuleFound()
+
+
+
+def namedAny(name):
+ """
+ Retrieve a Python object by its fully qualified name from the global Python
+ module namespace. The first part of the name, that describes a module,
+ will be discovered and imported. Each subsequent part of the name is
+ treated as the name of an attribute of the object specified by all of the
+ name which came before it. For example, the fully-qualified name of this
+ object is 'twisted.python.reflect.namedAny'.
+
+ @type name: L{str}
+ @param name: The name of the object to return.
+
+ @raise InvalidName: If the name is an empty string, starts or ends with
+ a '.', or is otherwise syntactically incorrect.
+
+ @raise ModuleNotFound: If the name is syntactically correct but the
+ module it specifies cannot be imported because it does not appear to
+ exist.
+
+ @raise ObjectNotFound: If the name is syntactically correct, includes at
+ least one '.', but the module it specifies cannot be imported because
+ it does not appear to exist.
+
+ @raise AttributeError: If an attribute of an object along the way cannot be
+ accessed, or a module along the way is not found.
+
+ @return: the Python object identified by 'name'.
+ """
+ if not name:
+ raise InvalidName('Empty module name')
+
+ names = name.split('.')
+
+ # if the name starts or ends with a '.' or contains '..', the __import__
+ # will raise an 'Empty module name' error. This will provide a better error
+ # message.
+ if '' in names:
+ raise InvalidName(
+ "name must be a string giving a '.'-separated list of Python "
+ "identifiers, not %r" % (name,))
+
+ topLevelPackage = None
+ moduleNames = names[:]
+ while not topLevelPackage:
+ if moduleNames:
+ trialname = '.'.join(moduleNames)
+ try:
+ topLevelPackage = _importAndCheckStack(trialname)
+ except _NoModuleFound:
+ moduleNames.pop()
+ else:
+ if len(names) == 1:
+ raise ModuleNotFound("No module named %r" % (name,))
+ else:
+ raise ObjectNotFound('%r does not name an object' % (name,))
+
+ obj = topLevelPackage
+ for n in names[1:]:
+ obj = getattr(obj, n)
+
+ return obj
diff --git a/lib/spack/external/jsonschema/_utils.py b/lib/spack/external/jsonschema/_utils.py
new file mode 100644
index 0000000000..2262f3305d
--- /dev/null
+++ b/lib/spack/external/jsonschema/_utils.py
@@ -0,0 +1,213 @@
+import itertools
+import json
+import pkgutil
+import re
+
+from jsonschema.compat import str_types, MutableMapping, urlsplit
+
+
+class URIDict(MutableMapping):
+ """
+ Dictionary which uses normalized URIs as keys.
+
+ """
+
+ def normalize(self, uri):
+ return urlsplit(uri).geturl()
+
+ def __init__(self, *args, **kwargs):
+ self.store = dict()
+ self.store.update(*args, **kwargs)
+
+ def __getitem__(self, uri):
+ return self.store[self.normalize(uri)]
+
+ def __setitem__(self, uri, value):
+ self.store[self.normalize(uri)] = value
+
+ def __delitem__(self, uri):
+ del self.store[self.normalize(uri)]
+
+ def __iter__(self):
+ return iter(self.store)
+
+ def __len__(self):
+ return len(self.store)
+
+ def __repr__(self):
+ return repr(self.store)
+
+
+class Unset(object):
+ """
+ An as-of-yet unset attribute or unprovided default parameter.
+
+ """
+
+ def __repr__(self):
+ return "<unset>"
+
+
+def load_schema(name):
+ """
+ Load a schema from ./schemas/``name``.json and return it.
+
+ """
+
+ data = pkgutil.get_data(__package__, "schemas/{0}.json".format(name))
+ return json.loads(data.decode("utf-8"))
+
+
+def indent(string, times=1):
+ """
+ A dumb version of :func:`textwrap.indent` from Python 3.3.
+
+ """
+
+ return "\n".join(" " * (4 * times) + line for line in string.splitlines())
+
+
+def format_as_index(indices):
+ """
+ Construct a single string containing indexing operations for the indices.
+
+ For example, [1, 2, "foo"] -> [1][2]["foo"]
+
+ :type indices: sequence
+
+ """
+
+ if not indices:
+ return ""
+ return "[%s]" % "][".join(repr(index) for index in indices)
+
+
+def find_additional_properties(instance, schema):
+ """
+ Return the set of additional properties for the given ``instance``.
+
+ Weeds out properties that should have been validated by ``properties`` and
+ / or ``patternProperties``.
+
+ Assumes ``instance`` is dict-like already.
+
+ """
+
+ properties = schema.get("properties", {})
+ patterns = "|".join(schema.get("patternProperties", {}))
+ for property in instance:
+ if property not in properties:
+ if patterns and re.search(patterns, property):
+ continue
+ yield property
+
+
+def extras_msg(extras):
+ """
+ Create an error message for extra items or properties.
+
+ """
+
+ if len(extras) == 1:
+ verb = "was"
+ else:
+ verb = "were"
+ return ", ".join(repr(extra) for extra in extras), verb
+
+
+def types_msg(instance, types):
+ """
+ Create an error message for a failure to match the given types.
+
+ If the ``instance`` is an object and contains a ``name`` property, it will
+ be considered to be a description of that object and used as its type.
+
+ Otherwise the message is simply the reprs of the given ``types``.
+
+ """
+
+ reprs = []
+ for type in types:
+ try:
+ reprs.append(repr(type["name"]))
+ except Exception:
+ reprs.append(repr(type))
+ return "%r is not of type %s" % (instance, ", ".join(reprs))
+
+
+def flatten(suitable_for_isinstance):
+ """
+ isinstance() can accept a bunch of really annoying different types:
+ * a single type
+ * a tuple of types
+ * an arbitrary nested tree of tuples
+
+ Return a flattened tuple of the given argument.
+
+ """
+
+ types = set()
+
+ if not isinstance(suitable_for_isinstance, tuple):
+ suitable_for_isinstance = (suitable_for_isinstance,)
+ for thing in suitable_for_isinstance:
+ if isinstance(thing, tuple):
+ types.update(flatten(thing))
+ else:
+ types.add(thing)
+ return tuple(types)
+
+
+def ensure_list(thing):
+ """
+ Wrap ``thing`` in a list if it's a single str.
+
+ Otherwise, return it unchanged.
+
+ """
+
+ if isinstance(thing, str_types):
+ return [thing]
+ return thing
+
+
+def unbool(element, true=object(), false=object()):
+ """
+ A hack to make True and 1 and False and 0 unique for ``uniq``.
+
+ """
+
+ if element is True:
+ return true
+ elif element is False:
+ return false
+ return element
+
+
+def uniq(container):
+ """
+ Check if all of a container's elements are unique.
+
+ Successively tries first to rely that the elements are hashable, then
+ falls back on them being sortable, and finally falls back on brute
+ force.
+
+ """
+
+ try:
+ return len(set(unbool(i) for i in container)) == len(container)
+ except TypeError:
+ try:
+ sort = sorted(unbool(i) for i in container)
+ sliced = itertools.islice(sort, 1, None)
+ for i, j in zip(sort, sliced):
+ if i == j:
+ return False
+ except (NotImplementedError, TypeError):
+ seen = []
+ for e in container:
+ e = unbool(e)
+ if e in seen:
+ return False
+ seen.append(e)
+ return True
diff --git a/lib/spack/external/jsonschema/_validators.py b/lib/spack/external/jsonschema/_validators.py
new file mode 100644
index 0000000000..c6e801ccb2
--- /dev/null
+++ b/lib/spack/external/jsonschema/_validators.py
@@ -0,0 +1,358 @@
+import re
+
+from jsonschema import _utils
+from jsonschema.exceptions import FormatError, ValidationError
+from jsonschema.compat import iteritems
+
+
+FLOAT_TOLERANCE = 10 ** -15
+
+
+def patternProperties(validator, patternProperties, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for pattern, subschema in iteritems(patternProperties):
+ for k, v in iteritems(instance):
+ if re.search(pattern, k):
+ for error in validator.descend(
+ v, subschema, path=k, schema_path=pattern,
+ ):
+ yield error
+
+
+def additionalProperties(validator, aP, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ extras = set(_utils.find_additional_properties(instance, schema))
+
+ if validator.is_type(aP, "object"):
+ for extra in extras:
+ for error in validator.descend(instance[extra], aP, path=extra):
+ yield error
+ elif not aP and extras:
+ error = "Additional properties are not allowed (%s %s unexpected)"
+ yield ValidationError(error % _utils.extras_msg(extras))
+
+
+def items(validator, items, instance, schema):
+ if not validator.is_type(instance, "array"):
+ return
+
+ if validator.is_type(items, "object"):
+ for index, item in enumerate(instance):
+ for error in validator.descend(item, items, path=index):
+ yield error
+ else:
+ for (index, item), subschema in zip(enumerate(instance), items):
+ for error in validator.descend(
+ item, subschema, path=index, schema_path=index,
+ ):
+ yield error
+
+
+def additionalItems(validator, aI, instance, schema):
+ if (
+ not validator.is_type(instance, "array") or
+ validator.is_type(schema.get("items", {}), "object")
+ ):
+ return
+
+ len_items = len(schema.get("items", []))
+ if validator.is_type(aI, "object"):
+ for index, item in enumerate(instance[len_items:], start=len_items):
+ for error in validator.descend(item, aI, path=index):
+ yield error
+ elif not aI and len(instance) > len(schema.get("items", [])):
+ error = "Additional items are not allowed (%s %s unexpected)"
+ yield ValidationError(
+ error %
+ _utils.extras_msg(instance[len(schema.get("items", [])):])
+ )
+
+
+def minimum(validator, minimum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if schema.get("exclusiveMinimum", False):
+ failed = float(instance) <= minimum
+ cmp = "less than or equal to"
+ else:
+ failed = float(instance) < minimum
+ cmp = "less than"
+
+ if failed:
+ yield ValidationError(
+ "%r is %s the minimum of %r" % (instance, cmp, minimum)
+ )
+
+
+def maximum(validator, maximum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if schema.get("exclusiveMaximum", False):
+ failed = instance >= maximum
+ cmp = "greater than or equal to"
+ else:
+ failed = instance > maximum
+ cmp = "greater than"
+
+ if failed:
+ yield ValidationError(
+ "%r is %s the maximum of %r" % (instance, cmp, maximum)
+ )
+
+
+def multipleOf(validator, dB, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if isinstance(dB, float):
+ mod = instance % dB
+ failed = (mod > FLOAT_TOLERANCE) and (dB - mod) > FLOAT_TOLERANCE
+ else:
+ failed = instance % dB
+
+ if failed:
+ yield ValidationError("%r is not a multiple of %r" % (instance, dB))
+
+
+def minItems(validator, mI, instance, schema):
+ if validator.is_type(instance, "array") and len(instance) < mI:
+ yield ValidationError("%r is too short" % (instance,))
+
+
+def maxItems(validator, mI, instance, schema):
+ if validator.is_type(instance, "array") and len(instance) > mI:
+ yield ValidationError("%r is too long" % (instance,))
+
+
+def uniqueItems(validator, uI, instance, schema):
+ if (
+ uI and
+ validator.is_type(instance, "array") and
+ not _utils.uniq(instance)
+ ):
+ yield ValidationError("%r has non-unique elements" % instance)
+
+
+def pattern(validator, patrn, instance, schema):
+ if (
+ validator.is_type(instance, "string") and
+ not re.search(patrn, instance)
+ ):
+ yield ValidationError("%r does not match %r" % (instance, patrn))
+
+
+def format(validator, format, instance, schema):
+ if validator.format_checker is not None:
+ try:
+ validator.format_checker.check(instance, format)
+ except FormatError as error:
+ yield ValidationError(error.message, cause=error.cause)
+
+
+def minLength(validator, mL, instance, schema):
+ if validator.is_type(instance, "string") and len(instance) < mL:
+ yield ValidationError("%r is too short" % (instance,))
+
+
+def maxLength(validator, mL, instance, schema):
+ if validator.is_type(instance, "string") and len(instance) > mL:
+ yield ValidationError("%r is too long" % (instance,))
+
+
+def dependencies(validator, dependencies, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, dependency in iteritems(dependencies):
+ if property not in instance:
+ continue
+
+ if validator.is_type(dependency, "object"):
+ for error in validator.descend(
+ instance, dependency, schema_path=property,
+ ):
+ yield error
+ else:
+ dependencies = _utils.ensure_list(dependency)
+ for dependency in dependencies:
+ if dependency not in instance:
+ yield ValidationError(
+ "%r is a dependency of %r" % (dependency, property)
+ )
+
+
+def enum(validator, enums, instance, schema):
+ if instance not in enums:
+ yield ValidationError("%r is not one of %r" % (instance, enums))
+
+
+def ref(validator, ref, instance, schema):
+ with validator.resolver.resolving(ref) as resolved:
+ for error in validator.descend(instance, resolved):
+ yield error
+
+
+def type_draft3(validator, types, instance, schema):
+ types = _utils.ensure_list(types)
+
+ all_errors = []
+ for index, type in enumerate(types):
+ if type == "any":
+ return
+ if validator.is_type(type, "object"):
+ errors = list(validator.descend(instance, type, schema_path=index))
+ if not errors:
+ return
+ all_errors.extend(errors)
+ else:
+ if validator.is_type(instance, type):
+ return
+ else:
+ yield ValidationError(
+ _utils.types_msg(instance, types), context=all_errors,
+ )
+
+
+def properties_draft3(validator, properties, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, subschema in iteritems(properties):
+ if property in instance:
+ for error in validator.descend(
+ instance[property],
+ subschema,
+ path=property,
+ schema_path=property,
+ ):
+ yield error
+ elif subschema.get("required", False):
+ error = ValidationError("%r is a required property" % property)
+ error._set(
+ validator="required",
+ validator_value=subschema["required"],
+ instance=instance,
+ schema=schema,
+ )
+ error.path.appendleft(property)
+ error.schema_path.extend([property, "required"])
+ yield error
+
+
+def disallow_draft3(validator, disallow, instance, schema):
+ for disallowed in _utils.ensure_list(disallow):
+ if validator.is_valid(instance, {"type" : [disallowed]}):
+ yield ValidationError(
+ "%r is disallowed for %r" % (disallowed, instance)
+ )
+
+
+def extends_draft3(validator, extends, instance, schema):
+ if validator.is_type(extends, "object"):
+ for error in validator.descend(instance, extends):
+ yield error
+ return
+ for index, subschema in enumerate(extends):
+ for error in validator.descend(instance, subschema, schema_path=index):
+ yield error
+
+
+def type_draft4(validator, types, instance, schema):
+ types = _utils.ensure_list(types)
+
+ if not any(validator.is_type(instance, type) for type in types):
+ yield ValidationError(_utils.types_msg(instance, types))
+
+
+def properties_draft4(validator, properties, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, subschema in iteritems(properties):
+ if property in instance:
+ for error in validator.descend(
+ instance[property],
+ subschema,
+ path=property,
+ schema_path=property,
+ ):
+ yield error
+
+
+def required_draft4(validator, required, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+ for property in required:
+ if property not in instance:
+ yield ValidationError("%r is a required property" % property)
+
+
+def minProperties_draft4(validator, mP, instance, schema):
+ if validator.is_type(instance, "object") and len(instance) < mP:
+ yield ValidationError(
+ "%r does not have enough properties" % (instance,)
+ )
+
+
+def maxProperties_draft4(validator, mP, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+ if validator.is_type(instance, "object") and len(instance) > mP:
+ yield ValidationError("%r has too many properties" % (instance,))
+
+
+def allOf_draft4(validator, allOf, instance, schema):
+ for index, subschema in enumerate(allOf):
+ for error in validator.descend(instance, subschema, schema_path=index):
+ yield error
+
+
+def oneOf_draft4(validator, oneOf, instance, schema):
+ subschemas = enumerate(oneOf)
+ all_errors = []
+ for index, subschema in subschemas:
+ errs = list(validator.descend(instance, subschema, schema_path=index))
+ if not errs:
+ first_valid = subschema
+ break
+ all_errors.extend(errs)
+ else:
+ yield ValidationError(
+ "%r is not valid under any of the given schemas" % (instance,),
+ context=all_errors,
+ )
+
+ more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)]
+ if more_valid:
+ more_valid.append(first_valid)
+ reprs = ", ".join(repr(schema) for schema in more_valid)
+ yield ValidationError(
+ "%r is valid under each of %s" % (instance, reprs)
+ )
+
+
+def anyOf_draft4(validator, anyOf, instance, schema):
+ all_errors = []
+ for index, subschema in enumerate(anyOf):
+ errs = list(validator.descend(instance, subschema, schema_path=index))
+ if not errs:
+ break
+ all_errors.extend(errs)
+ else:
+ yield ValidationError(
+ "%r is not valid under any of the given schemas" % (instance,),
+ context=all_errors,
+ )
+
+
+def not_draft4(validator, not_schema, instance, schema):
+ if validator.is_valid(instance, not_schema):
+ yield ValidationError(
+ "%r is not allowed for %r" % (not_schema, instance)
+ )
diff --git a/lib/spack/external/jsonschema/cli.py b/lib/spack/external/jsonschema/cli.py
new file mode 100644
index 0000000000..0126564f46
--- /dev/null
+++ b/lib/spack/external/jsonschema/cli.py
@@ -0,0 +1,72 @@
+from __future__ import absolute_import
+import argparse
+import json
+import sys
+
+from jsonschema._reflect import namedAny
+from jsonschema.validators import validator_for
+
+
+def _namedAnyWithDefault(name):
+ if "." not in name:
+ name = "jsonschema." + name
+ return namedAny(name)
+
+
+def _json_file(path):
+ with open(path) as file:
+ return json.load(file)
+
+
+parser = argparse.ArgumentParser(
+ description="JSON Schema Validation CLI",
+)
+parser.add_argument(
+ "-i", "--instance",
+ action="append",
+ dest="instances",
+ type=_json_file,
+ help="a path to a JSON instance to validate "
+ "(may be specified multiple times)",
+)
+parser.add_argument(
+ "-F", "--error-format",
+ default="{error.instance}: {error.message}\n",
+ help="the format to use for each error output message, specified in "
+ "a form suitable for passing to str.format, which will be called "
+ "with 'error' for each error",
+)
+parser.add_argument(
+ "-V", "--validator",
+ type=_namedAnyWithDefault,
+ help="the fully qualified object name of a validator to use, or, for "
+ "validators that are registered with jsonschema, simply the name "
+ "of the class.",
+)
+parser.add_argument(
+ "schema",
+ help="the JSON Schema to validate with",
+ type=_json_file,
+)
+
+
+def parse_args(args):
+ arguments = vars(parser.parse_args(args=args or ["--help"]))
+ if arguments["validator"] is None:
+ arguments["validator"] = validator_for(arguments["schema"])
+ return arguments
+
+
+def main(args=sys.argv[1:]):
+ sys.exit(run(arguments=parse_args(args=args)))
+
+
+def run(arguments, stdout=sys.stdout, stderr=sys.stderr):
+ error_format = arguments["error_format"]
+ validator = arguments["validator"](schema=arguments["schema"])
+ errored = False
+ for instance in arguments["instances"] or ():
+ for error in validator.iter_errors(instance):
+ stderr.write(error_format.format(error=error))
+ errored = True
+ return errored
diff --git a/lib/spack/external/jsonschema/compat.py b/lib/spack/external/jsonschema/compat.py
new file mode 100644
index 0000000000..6ca49ab6be
--- /dev/null
+++ b/lib/spack/external/jsonschema/compat.py
@@ -0,0 +1,53 @@
+from __future__ import unicode_literals
+import sys
+import operator
+
+try:
+ from collections import MutableMapping, Sequence # noqa
+except ImportError:
+ from collections.abc import MutableMapping, Sequence # noqa
+
+PY3 = sys.version_info[0] >= 3
+
+if PY3:
+ zip = zip
+ from io import StringIO
+ from urllib.parse import (
+ unquote, urljoin, urlunsplit, SplitResult, urlsplit as _urlsplit
+ )
+ from urllib.request import urlopen
+ str_types = str,
+ int_types = int,
+ iteritems = operator.methodcaller("items")
+else:
+ from itertools import izip as zip # noqa
+ from StringIO import StringIO
+ from urlparse import (
+ urljoin, urlunsplit, SplitResult, urlsplit as _urlsplit # noqa
+ )
+ from urllib import unquote # noqa
+ from urllib2 import urlopen # noqa
+ str_types = basestring
+ int_types = int, long
+ iteritems = operator.methodcaller("iteritems")
+
+
+# On python < 3.3 fragments are not handled properly with unknown schemes
+def urlsplit(url):
+ scheme, netloc, path, query, fragment = _urlsplit(url)
+ if "#" in path:
+ path, fragment = path.split("#", 1)
+ return SplitResult(scheme, netloc, path, query, fragment)
+
+
+def urldefrag(url):
+ if "#" in url:
+ s, n, p, q, frag = urlsplit(url)
+ defrag = urlunsplit((s, n, p, q, ''))
+ else:
+ defrag = url
+ frag = ''
+ return defrag, frag
+
+
+# flake8: noqa
diff --git a/lib/spack/external/jsonschema/exceptions.py b/lib/spack/external/jsonschema/exceptions.py
new file mode 100644
index 0000000000..478e59c531
--- /dev/null
+++ b/lib/spack/external/jsonschema/exceptions.py
@@ -0,0 +1,264 @@
+from collections import defaultdict, deque
+import itertools
+import pprint
+import textwrap
+
+from jsonschema import _utils
+from jsonschema.compat import PY3, iteritems
+
+
+WEAK_MATCHES = frozenset(["anyOf", "oneOf"])
+STRONG_MATCHES = frozenset()
+
+_unset = _utils.Unset()
+
+
+class _Error(Exception):
+ def __init__(
+ self,
+ message,
+ validator=_unset,
+ path=(),
+ cause=None,
+ context=(),
+ validator_value=_unset,
+ instance=_unset,
+ schema=_unset,
+ schema_path=(),
+ parent=None,
+ ):
+ self.message = message
+ self.path = self.relative_path = deque(path)
+ self.schema_path = self.relative_schema_path = deque(schema_path)
+ self.context = list(context)
+ self.cause = self.__cause__ = cause
+ self.validator = validator
+ self.validator_value = validator_value
+ self.instance = instance
+ self.schema = schema
+ self.parent = parent
+
+ for error in context:
+ error.parent = self
+
+ def __repr__(self):
+ return "<%s: %r>" % (self.__class__.__name__, self.message)
+
+ def __str__(self):
+ return unicode(self).encode("utf-8")
+
+ def __unicode__(self):
+ essential_for_verbose = (
+ self.validator, self.validator_value, self.instance, self.schema,
+ )
+ if any(m is _unset for m in essential_for_verbose):
+ return self.message
+
+ pschema = pprint.pformat(self.schema, width=72)
+ pinstance = pprint.pformat(self.instance, width=72)
+ return self.message + textwrap.dedent("""
+
+ Failed validating %r in schema%s:
+ %s
+
+ On instance%s:
+ %s
+ """.rstrip()
+ ) % (
+ self.validator,
+ _utils.format_as_index(list(self.relative_schema_path)[:-1]),
+ _utils.indent(pschema),
+ _utils.format_as_index(self.relative_path),
+ _utils.indent(pinstance),
+ )
+
+ if PY3:
+ __str__ = __unicode__
+
+ @classmethod
+ def create_from(cls, other):
+ return cls(**other._contents())
+
+ @property
+ def absolute_path(self):
+ parent = self.parent
+ if parent is None:
+ return self.relative_path
+
+ path = deque(self.relative_path)
+ path.extendleft(parent.absolute_path)
+ return path
+
+ @property
+ def absolute_schema_path(self):
+ parent = self.parent
+ if parent is None:
+ return self.relative_schema_path
+
+ path = deque(self.relative_schema_path)
+ path.extendleft(parent.absolute_schema_path)
+ return path
+
+ def _set(self, **kwargs):
+ for k, v in iteritems(kwargs):
+ if getattr(self, k) is _unset:
+ setattr(self, k, v)
+
+ def _contents(self):
+ attrs = (
+ "message", "cause", "context", "validator", "validator_value",
+ "path", "schema_path", "instance", "schema", "parent",
+ )
+ return dict((attr, getattr(self, attr)) for attr in attrs)
+
+
+class ValidationError(_Error):
+ pass
+
+
+class SchemaError(_Error):
+ pass
+
+
+class RefResolutionError(Exception):
+ pass
+
+
+class UnknownType(Exception):
+ def __init__(self, type, instance, schema):
+ self.type = type
+ self.instance = instance
+ self.schema = schema
+
+ def __str__(self):
+ return unicode(self).encode("utf-8")
+
+ def __unicode__(self):
+ pschema = pprint.pformat(self.schema, width=72)
+ pinstance = pprint.pformat(self.instance, width=72)
+ return textwrap.dedent("""
+ Unknown type %r for validator with schema:
+ %s
+
+ While checking instance:
+ %s
+ """.rstrip()
+ ) % (self.type, _utils.indent(pschema), _utils.indent(pinstance))
+
+ if PY3:
+ __str__ = __unicode__
+
+
+
+class FormatError(Exception):
+ def __init__(self, message, cause=None):
+ super(FormatError, self).__init__(message, cause)
+ self.message = message
+ self.cause = self.__cause__ = cause
+
+ def __str__(self):
+ return self.message.encode("utf-8")
+
+ def __unicode__(self):
+ return self.message
+
+ if PY3:
+ __str__ = __unicode__
+
+
+class ErrorTree(object):
+ """
+ ErrorTrees make it easier to check which validations failed.
+
+ """
+
+ _instance = _unset
+
+ def __init__(self, errors=()):
+ self.errors = {}
+ self._contents = defaultdict(self.__class__)
+
+ for error in errors:
+ container = self
+ for element in error.path:
+ container = container[element]
+ container.errors[error.validator] = error
+
+ self._instance = error.instance
+
+ def __contains__(self, index):
+ """
+ Check whether ``instance[index]`` has any errors.
+
+ """
+
+ return index in self._contents
+
+ def __getitem__(self, index):
+ """
+ Retrieve the child tree one level down at the given ``index``.
+
+ If the index is not in the instance that this tree corresponds to and
+ is not known by this tree, whatever error would be raised by
+ ``instance.__getitem__`` will be propagated (usually this is some
+ subclass of :class:`LookupError`.
+
+ """
+
+ if self._instance is not _unset and index not in self:
+ self._instance[index]
+ return self._contents[index]
+
+ def __setitem__(self, index, value):
+ self._contents[index] = value
+
+ def __iter__(self):
+ """
+ Iterate (non-recursively) over the indices in the instance with errors.
+
+ """
+
+ return iter(self._contents)
+
+ def __len__(self):
+ """
+ Same as :attr:`total_errors`.
+
+ """
+
+ return self.total_errors
+
+ def __repr__(self):
+ return "<%s (%s total errors)>" % (self.__class__.__name__, len(self))
+
+ @property
+ def total_errors(self):
+ """
+ The total number of errors in the entire tree, including children.
+
+ """
+
+ child_errors = sum(len(tree) for _, tree in iteritems(self._contents))
+ return len(self.errors) + child_errors
+
+
+def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES):
+ def relevance(error):
+ validator = error.validator
+ return -len(error.path), validator not in weak, validator in strong
+ return relevance
+
+
+relevance = by_relevance()
+
+
+def best_match(errors, key=relevance):
+ errors = iter(errors)
+ best = next(errors, None)
+ if best is None:
+ return
+ best = max(itertools.chain([best], errors), key=key)
+
+ while best.context:
+ best = min(best.context, key=key)
+ return best
diff --git a/lib/spack/external/jsonschema/schemas/draft3.json b/lib/spack/external/jsonschema/schemas/draft3.json
new file mode 100644
index 0000000000..5bcefe30d5
--- /dev/null
+++ b/lib/spack/external/jsonschema/schemas/draft3.json
@@ -0,0 +1,201 @@
+{
+ "$schema": "http://json-schema.org/draft-03/schema#",
+ "dependencies": {
+ "exclusiveMaximum": "maximum",
+ "exclusiveMinimum": "minimum"
+ },
+ "id": "http://json-schema.org/draft-03/schema#",
+ "properties": {
+ "$ref": {
+ "format": "uri",
+ "type": "string"
+ },
+ "$schema": {
+ "format": "uri",
+ "type": "string"
+ },
+ "additionalItems": {
+ "default": {},
+ "type": [
+ {
+ "$ref": "#"
+ },
+ "boolean"
+ ]
+ },
+ "additionalProperties": {
+ "default": {},
+ "type": [
+ {
+ "$ref": "#"
+ },
+ "boolean"
+ ]
+ },
+ "default": {
+ "type": "any"
+ },
+ "dependencies": {
+ "additionalProperties": {
+ "items": {
+ "type": "string"
+ },
+ "type": [
+ "string",
+ "array",
+ {
+ "$ref": "#"
+ }
+ ]
+ },
+ "default": {},
+ "type": [
+ "string",
+ "array",
+ "object"
+ ]
+ },
+ "description": {
+ "type": "string"
+ },
+ "disallow": {
+ "items": {
+ "type": [
+ "string",
+ {
+ "$ref": "#"
+ }
+ ]
+ },
+ "type": [
+ "string",
+ "array"
+ ],
+ "uniqueItems": true
+ },
+ "divisibleBy": {
+ "default": 1,
+ "exclusiveMinimum": true,
+ "minimum": 0,
+ "type": "number"
+ },
+ "enum": {
+ "minItems": 1,
+ "type": "array",
+ "uniqueItems": true
+ },
+ "exclusiveMaximum": {
+ "default": false,
+ "type": "boolean"
+ },
+ "exclusiveMinimum": {
+ "default": false,
+ "type": "boolean"
+ },
+ "extends": {
+ "default": {},
+ "items": {
+ "$ref": "#"
+ },
+ "type": [
+ {
+ "$ref": "#"
+ },
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "id": {
+ "format": "uri",
+ "type": "string"
+ },
+ "items": {
+ "default": {},
+ "items": {
+ "$ref": "#"
+ },
+ "type": [
+ {
+ "$ref": "#"
+ },
+ "array"
+ ]
+ },
+ "maxDecimal": {
+ "minimum": 0,
+ "type": "number"
+ },
+ "maxItems": {
+ "minimum": 0,
+ "type": "integer"
+ },
+ "maxLength": {
+ "type": "integer"
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "minItems": {
+ "default": 0,
+ "minimum": 0,
+ "type": "integer"
+ },
+ "minLength": {
+ "default": 0,
+ "minimum": 0,
+ "type": "integer"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "pattern": {
+ "format": "regex",
+ "type": "string"
+ },
+ "patternProperties": {
+ "additionalProperties": {
+ "$ref": "#"
+ },
+ "default": {},
+ "type": "object"
+ },
+ "properties": {
+ "additionalProperties": {
+ "$ref": "#",
+ "type": "object"
+ },
+ "default": {},
+ "type": "object"
+ },
+ "required": {
+ "default": false,
+ "type": "boolean"
+ },
+ "title": {
+ "type": "string"
+ },
+ "type": {
+ "default": "any",
+ "items": {
+ "type": [
+ "string",
+ {
+ "$ref": "#"
+ }
+ ]
+ },
+ "type": [
+ "string",
+ "array"
+ ],
+ "uniqueItems": true
+ },
+ "uniqueItems": {
+ "default": false,
+ "type": "boolean"
+ }
+ },
+ "type": "object"
+}
diff --git a/lib/spack/external/jsonschema/schemas/draft4.json b/lib/spack/external/jsonschema/schemas/draft4.json
new file mode 100644
index 0000000000..fead5cefab
--- /dev/null
+++ b/lib/spack/external/jsonschema/schemas/draft4.json
@@ -0,0 +1,221 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "default": {},
+ "definitions": {
+ "positiveInteger": {
+ "minimum": 0,
+ "type": "integer"
+ },
+ "positiveIntegerDefault0": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/positiveInteger"
+ },
+ {
+ "default": 0
+ }
+ ]
+ },
+ "schemaArray": {
+ "items": {
+ "$ref": "#"
+ },
+ "minItems": 1,
+ "type": "array"
+ },
+ "simpleTypes": {
+ "enum": [
+ "array",
+ "boolean",
+ "integer",
+ "null",
+ "number",
+ "object",
+ "string"
+ ]
+ },
+ "stringArray": {
+ "items": {
+ "type": "string"
+ },
+ "minItems": 1,
+ "type": "array",
+ "uniqueItems": true
+ }
+ },
+ "dependencies": {
+ "exclusiveMaximum": [
+ "maximum"
+ ],
+ "exclusiveMinimum": [
+ "minimum"
+ ]
+ },
+ "description": "Core schema meta-schema",
+ "id": "http://json-schema.org/draft-04/schema#",
+ "properties": {
+ "$schema": {
+ "format": "uri",
+ "type": "string"
+ },
+ "additionalItems": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "$ref": "#"
+ }
+ ],
+ "default": {}
+ },
+ "additionalProperties": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "$ref": "#"
+ }
+ ],
+ "default": {}
+ },
+ "allOf": {
+ "$ref": "#/definitions/schemaArray"
+ },
+ "anyOf": {
+ "$ref": "#/definitions/schemaArray"
+ },
+ "default": {},
+ "definitions": {
+ "additionalProperties": {
+ "$ref": "#"
+ },
+ "default": {},
+ "type": "object"
+ },
+ "dependencies": {
+ "additionalProperties": {
+ "anyOf": [
+ {
+ "$ref": "#"
+ },
+ {
+ "$ref": "#/definitions/stringArray"
+ }
+ ]
+ },
+ "type": "object"
+ },
+ "description": {
+ "type": "string"
+ },
+ "enum": {
+ "minItems": 1,
+ "type": "array",
+ "uniqueItems": true
+ },
+ "exclusiveMaximum": {
+ "default": false,
+ "type": "boolean"
+ },
+ "exclusiveMinimum": {
+ "default": false,
+ "type": "boolean"
+ },
+ "id": {
+ "format": "uri",
+ "type": "string"
+ },
+ "items": {
+ "anyOf": [
+ {
+ "$ref": "#"
+ },
+ {
+ "$ref": "#/definitions/schemaArray"
+ }
+ ],
+ "default": {}
+ },
+ "maxItems": {
+ "$ref": "#/definitions/positiveInteger"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/positiveInteger"
+ },
+ "maxProperties": {
+ "$ref": "#/definitions/positiveInteger"
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "minItems": {
+ "$ref": "#/definitions/positiveIntegerDefault0"
+ },
+ "minLength": {
+ "$ref": "#/definitions/positiveIntegerDefault0"
+ },
+ "minProperties": {
+ "$ref": "#/definitions/positiveIntegerDefault0"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "multipleOf": {
+ "exclusiveMinimum": true,
+ "minimum": 0,
+ "type": "number"
+ },
+ "not": {
+ "$ref": "#"
+ },
+ "oneOf": {
+ "$ref": "#/definitions/schemaArray"
+ },
+ "pattern": {
+ "format": "regex",
+ "type": "string"
+ },
+ "patternProperties": {
+ "additionalProperties": {
+ "$ref": "#"
+ },
+ "default": {},
+ "type": "object"
+ },
+ "properties": {
+ "additionalProperties": {
+ "$ref": "#"
+ },
+ "default": {},
+ "type": "object"
+ },
+ "required": {
+ "$ref": "#/definitions/stringArray"
+ },
+ "title": {
+ "type": "string"
+ },
+ "type": {
+ "anyOf": [
+ {
+ "$ref": "#/definitions/simpleTypes"
+ },
+ {
+ "items": {
+ "$ref": "#/definitions/simpleTypes"
+ },
+ "minItems": 1,
+ "type": "array",
+ "uniqueItems": true
+ }
+ ]
+ },
+ "uniqueItems": {
+ "default": false,
+ "type": "boolean"
+ }
+ },
+ "type": "object"
+}
diff --git a/lib/spack/external/jsonschema/tests/__init__.py b/lib/spack/external/jsonschema/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/spack/external/jsonschema/tests/__init__.py
diff --git a/lib/spack/external/jsonschema/tests/compat.py b/lib/spack/external/jsonschema/tests/compat.py
new file mode 100644
index 0000000000..b37483f5dd
--- /dev/null
+++ b/lib/spack/external/jsonschema/tests/compat.py
@@ -0,0 +1,15 @@
+import sys
+
+
+if sys.version_info[:2] < (2, 7): # pragma: no cover
+ import unittest2 as unittest
+else:
+ import unittest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+# flake8: noqa
diff --git a/lib/spack/external/jsonschema/tests/test_cli.py b/lib/spack/external/jsonschema/tests/test_cli.py
new file mode 100644
index 0000000000..f625ca989d
--- /dev/null
+++ b/lib/spack/external/jsonschema/tests/test_cli.py
@@ -0,0 +1,110 @@
+from jsonschema import Draft4Validator, ValidationError, cli
+from jsonschema.compat import StringIO
+from jsonschema.tests.compat import mock, unittest
+
+
+def fake_validator(*errors):
+ errors = list(reversed(errors))
+
+ class FakeValidator(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def iter_errors(self, instance):
+ if errors:
+ return errors.pop()
+ return []
+ return FakeValidator
+
+
+class TestParser(unittest.TestCase):
+ FakeValidator = fake_validator()
+
+ def setUp(self):
+ mock_open = mock.mock_open()
+ patch_open = mock.patch.object(cli, "open", mock_open, create=True)
+ patch_open.start()
+ self.addCleanup(patch_open.stop)
+
+ mock_json_load = mock.Mock()
+ mock_json_load.return_value = {}
+ patch_json_load = mock.patch("json.load")
+ patch_json_load.start()
+ self.addCleanup(patch_json_load.stop)
+
+ def test_find_validator_by_fully_qualified_object_name(self):
+ arguments = cli.parse_args(
+ [
+ "--validator",
+ "jsonschema.tests.test_cli.TestParser.FakeValidator",
+ "--instance", "foo.json",
+ "schema.json",
+ ]
+ )
+ self.assertIs(arguments["validator"], self.FakeValidator)
+
+ def test_find_validator_in_jsonschema(self):
+ arguments = cli.parse_args(
+ [
+ "--validator", "Draft4Validator",
+ "--instance", "foo.json",
+ "schema.json",
+ ]
+ )
+ self.assertIs(arguments["validator"], Draft4Validator)
+
+
+class TestCLI(unittest.TestCase):
+ def test_successful_validation(self):
+ stdout, stderr = StringIO(), StringIO()
+ exit_code = cli.run(
+ {
+ "validator": fake_validator(),
+ "schema": {},
+ "instances": [1],
+ "error_format": "{error.message}",
+ },
+ stdout=stdout,
+ stderr=stderr,
+ )
+ self.assertFalse(stdout.getvalue())
+ self.assertFalse(stderr.getvalue())
+ self.assertEqual(exit_code, 0)
+
+ def test_unsuccessful_validation(self):
+ error = ValidationError("I am an error!", instance=1)
+ stdout, stderr = StringIO(), StringIO()
+ exit_code = cli.run(
+ {
+ "validator": fake_validator([error]),
+ "schema": {},
+ "instances": [1],
+ "error_format": "{error.instance} - {error.message}",
+ },
+ stdout=stdout,
+ stderr=stderr,
+ )
+ self.assertFalse(stdout.getvalue())
+ self.assertEqual(stderr.getvalue(), "1 - I am an error!")
+ self.assertEqual(exit_code, 1)
+
+ def test_unsuccessful_validation_multiple_instances(self):
+ first_errors = [
+ ValidationError("9", instance=1),
+ ValidationError("8", instance=1),
+ ]
+ second_errors = [ValidationError("7", instance=2)]
+ stdout, stderr = StringIO(), StringIO()
+ exit_code = cli.run(
+ {
+ "validator": fake_validator(first_errors, second_errors),
+ "schema": {},
+ "instances": [1, 2],
+ "error_format": "{error.instance} - {error.message}\t",
+ },
+ stdout=stdout,
+ stderr=stderr,
+ )
+ self.assertFalse(stdout.getvalue())
+ self.assertEqual(stderr.getvalue(), "1 - 9\t1 - 8\t2 - 7\t")
+ self.assertEqual(exit_code, 1)
diff --git a/lib/spack/external/jsonschema/tests/test_exceptions.py b/lib/spack/external/jsonschema/tests/test_exceptions.py
new file mode 100644
index 0000000000..9e5793c628
--- /dev/null
+++ b/lib/spack/external/jsonschema/tests/test_exceptions.py
@@ -0,0 +1,382 @@
+import textwrap
+
+from jsonschema import Draft4Validator, exceptions
+from jsonschema.compat import PY3
+from jsonschema.tests.compat import mock, unittest
+
+
+class TestBestMatch(unittest.TestCase):
+ def best_match(self, errors):
+ errors = list(errors)
+ best = exceptions.best_match(errors)
+ reversed_best = exceptions.best_match(reversed(errors))
+ self.assertEqual(
+ best,
+ reversed_best,
+ msg="Didn't return a consistent best match!\n"
+ "Got: {0}\n\nThen: {1}".format(best, reversed_best),
+ )
+ return best
+
+ def test_shallower_errors_are_better_matches(self):
+ validator = Draft4Validator(
+ {
+ "properties" : {
+ "foo" : {
+ "minProperties" : 2,
+ "properties" : {"bar" : {"type" : "object"}},
+ }
+ }
+ }
+ )
+ best = self.best_match(validator.iter_errors({"foo" : {"bar" : []}}))
+ self.assertEqual(best.validator, "minProperties")
+
+ def test_oneOf_and_anyOf_are_weak_matches(self):
+ """
+ A property you *must* match is probably better than one you have to
+ match a part of.
+
+ """
+
+ validator = Draft4Validator(
+ {
+ "minProperties" : 2,
+ "anyOf" : [{"type" : "string"}, {"type" : "number"}],
+ "oneOf" : [{"type" : "string"}, {"type" : "number"}],
+ }
+ )
+ best = self.best_match(validator.iter_errors({}))
+ self.assertEqual(best.validator, "minProperties")
+
+ def test_if_the_most_relevant_error_is_anyOf_it_is_traversed(self):
+ """
+ If the most relevant error is an anyOf, then we traverse its context
+ and select the otherwise *least* relevant error, since in this case
+ that means the most specific, deep, error inside the instance.
+
+ I.e. since only one of the schemas must match, we look for the most
+ relevant one.
+
+ """
+
+ validator = Draft4Validator(
+ {
+ "properties" : {
+ "foo" : {
+ "anyOf" : [
+ {"type" : "string"},
+ {"properties" : {"bar" : {"type" : "array"}}},
+ ],
+ },
+ },
+ },
+ )
+ best = self.best_match(validator.iter_errors({"foo" : {"bar" : 12}}))
+ self.assertEqual(best.validator_value, "array")
+
+ def test_if_the_most_relevant_error_is_oneOf_it_is_traversed(self):
+ """
+ If the most relevant error is an oneOf, then we traverse its context
+ and select the otherwise *least* relevant error, since in this case
+ that means the most specific, deep, error inside the instance.
+
+ I.e. since only one of the schemas must match, we look for the most
+ relevant one.
+
+ """
+
+ validator = Draft4Validator(
+ {
+ "properties" : {
+ "foo" : {
+ "oneOf" : [
+ {"type" : "string"},
+ {"properties" : {"bar" : {"type" : "array"}}},
+ ],
+ },
+ },
+ },
+ )
+ best = self.best_match(validator.iter_errors({"foo" : {"bar" : 12}}))
+ self.assertEqual(best.validator_value, "array")
+
+ def test_if_the_most_relevant_error_is_allOf_it_is_traversed(self):
+ """
+ Now, if the error is allOf, we traverse but select the *most* relevant
+ error from the context, because all schemas here must match anyways.
+
+ """
+
+ validator = Draft4Validator(
+ {
+ "properties" : {
+ "foo" : {
+ "allOf" : [
+ {"type" : "string"},
+ {"properties" : {"bar" : {"type" : "array"}}},
+ ],
+ },
+ },
+ },
+ )
+ best = self.best_match(validator.iter_errors({"foo" : {"bar" : 12}}))
+ self.assertEqual(best.validator_value, "string")
+
+ def test_nested_context_for_oneOf(self):
+ validator = Draft4Validator(
+ {
+ "properties" : {
+ "foo" : {
+ "oneOf" : [
+ {"type" : "string"},
+ {
+ "oneOf" : [
+ {"type" : "string"},
+ {
+ "properties" : {
+ "bar" : {"type" : "array"}
+ },
+ },
+ ],
+ },
+ ],
+ },
+ },
+ },
+ )
+ best = self.best_match(validator.iter_errors({"foo" : {"bar" : 12}}))
+ self.assertEqual(best.validator_value, "array")
+
+ def test_one_error(self):
+ validator = Draft4Validator({"minProperties" : 2})
+ error, = validator.iter_errors({})
+ self.assertEqual(
+ exceptions.best_match(validator.iter_errors({})).validator,
+ "minProperties",
+ )
+
+ def test_no_errors(self):
+ validator = Draft4Validator({})
+ self.assertIsNone(exceptions.best_match(validator.iter_errors({})))
+
+
+class TestByRelevance(unittest.TestCase):
+ def test_short_paths_are_better_matches(self):
+ shallow = exceptions.ValidationError("Oh no!", path=["baz"])
+ deep = exceptions.ValidationError("Oh yes!", path=["foo", "bar"])
+ match = max([shallow, deep], key=exceptions.relevance)
+ self.assertIs(match, shallow)
+
+ match = max([deep, shallow], key=exceptions.relevance)
+ self.assertIs(match, shallow)
+
+ def test_global_errors_are_even_better_matches(self):
+ shallow = exceptions.ValidationError("Oh no!", path=[])
+ deep = exceptions.ValidationError("Oh yes!", path=["foo"])
+
+ errors = sorted([shallow, deep], key=exceptions.relevance)
+ self.assertEqual(
+ [list(error.path) for error in errors],
+ [["foo"], []],
+ )
+
+ errors = sorted([deep, shallow], key=exceptions.relevance)
+ self.assertEqual(
+ [list(error.path) for error in errors],
+ [["foo"], []],
+ )
+
+ def test_weak_validators_are_lower_priority(self):
+ weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
+ normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
+
+ best_match = exceptions.by_relevance(weak="a")
+
+ match = max([weak, normal], key=best_match)
+ self.assertIs(match, normal)
+
+ match = max([normal, weak], key=best_match)
+ self.assertIs(match, normal)
+
+ def test_strong_validators_are_higher_priority(self):
+ weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
+ normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
+ strong = exceptions.ValidationError("Oh fine!", path=[], validator="c")
+
+ best_match = exceptions.by_relevance(weak="a", strong="c")
+
+ match = max([weak, normal, strong], key=best_match)
+ self.assertIs(match, strong)
+
+ match = max([strong, normal, weak], key=best_match)
+ self.assertIs(match, strong)
+
+
+class TestErrorTree(unittest.TestCase):
+ def test_it_knows_how_many_total_errors_it_contains(self):
+ errors = [mock.MagicMock() for _ in range(8)]
+ tree = exceptions.ErrorTree(errors)
+ self.assertEqual(tree.total_errors, 8)
+
+ def test_it_contains_an_item_if_the_item_had_an_error(self):
+ errors = [exceptions.ValidationError("a message", path=["bar"])]
+ tree = exceptions.ErrorTree(errors)
+ self.assertIn("bar", tree)
+
+ def test_it_does_not_contain_an_item_if_the_item_had_no_error(self):
+ errors = [exceptions.ValidationError("a message", path=["bar"])]
+ tree = exceptions.ErrorTree(errors)
+ self.assertNotIn("foo", tree)
+
+ def test_validators_that_failed_appear_in_errors_dict(self):
+ error = exceptions.ValidationError("a message", validator="foo")
+ tree = exceptions.ErrorTree([error])
+ self.assertEqual(tree.errors, {"foo" : error})
+
+ def test_it_creates_a_child_tree_for_each_nested_path(self):
+ errors = [
+ exceptions.ValidationError("a bar message", path=["bar"]),
+ exceptions.ValidationError("a bar -> 0 message", path=["bar", 0]),
+ ]
+ tree = exceptions.ErrorTree(errors)
+ self.assertIn(0, tree["bar"])
+ self.assertNotIn(1, tree["bar"])
+
+ def test_children_have_their_errors_dicts_built(self):
+ e1, e2 = (
+ exceptions.ValidationError("1", validator="foo", path=["bar", 0]),
+ exceptions.ValidationError("2", validator="quux", path=["bar", 0]),
+ )
+ tree = exceptions.ErrorTree([e1, e2])
+ self.assertEqual(tree["bar"][0].errors, {"foo" : e1, "quux" : e2})
+
+ def test_it_does_not_contain_subtrees_that_are_not_in_the_instance(self):
+ error = exceptions.ValidationError("123", validator="foo", instance=[])
+ tree = exceptions.ErrorTree([error])
+
+ with self.assertRaises(IndexError):
+ tree[0]
+
+ def test_if_its_in_the_tree_anyhow_it_does_not_raise_an_error(self):
+ """
+ If a validator is dumb (like :validator:`required` in draft 3) and
+ refers to a path that isn't in the instance, the tree still properly
+ returns a subtree for that path.
+
+ """
+
+ error = exceptions.ValidationError(
+ "a message", validator="foo", instance={}, path=["foo"],
+ )
+ tree = exceptions.ErrorTree([error])
+ self.assertIsInstance(tree["foo"], exceptions.ErrorTree)
+
+
+class TestErrorReprStr(unittest.TestCase):
+ def make_error(self, **kwargs):
+ defaults = dict(
+ message=u"hello",
+ validator=u"type",
+ validator_value=u"string",
+ instance=5,
+ schema={u"type": u"string"},
+ )
+ defaults.update(kwargs)
+ return exceptions.ValidationError(**defaults)
+
+ def assertShows(self, expected, **kwargs):
+ if PY3:
+ expected = expected.replace("u'", "'")
+ expected = textwrap.dedent(expected).rstrip("\n")
+
+ error = self.make_error(**kwargs)
+ message_line, _, rest = str(error).partition("\n")
+ self.assertEqual(message_line, error.message)
+ self.assertEqual(rest, expected)
+
+ def test_repr(self):
+ self.assertEqual(
+ repr(exceptions.ValidationError(message="Hello!")),
+ "<ValidationError: %r>" % "Hello!",
+ )
+
+ def test_unset_error(self):
+ error = exceptions.ValidationError("message")
+ self.assertEqual(str(error), "message")
+
+ kwargs = {
+ "validator": "type",
+ "validator_value": "string",
+ "instance": 5,
+ "schema": {"type": "string"}
+ }
+ # Just the message should show if any of the attributes are unset
+ for attr in kwargs:
+ k = dict(kwargs)
+ del k[attr]
+ error = exceptions.ValidationError("message", **k)
+ self.assertEqual(str(error), "message")
+
+ def test_empty_paths(self):
+ self.assertShows(
+ """
+ Failed validating u'type' in schema:
+ {u'type': u'string'}
+
+ On instance:
+ 5
+ """,
+ path=[],
+ schema_path=[],
+ )
+
+ def test_one_item_paths(self):
+ self.assertShows(
+ """
+ Failed validating u'type' in schema:
+ {u'type': u'string'}
+
+ On instance[0]:
+ 5
+ """,
+ path=[0],
+ schema_path=["items"],
+ )
+
+ def test_multiple_item_paths(self):
+ self.assertShows(
+ """
+ Failed validating u'type' in schema[u'items'][0]:
+ {u'type': u'string'}
+
+ On instance[0][u'a']:
+ 5
+ """,
+ path=[0, u"a"],
+ schema_path=[u"items", 0, 1],
+ )
+
+ def test_uses_pprint(self):
+ with mock.patch("pprint.pformat") as pformat:
+ str(self.make_error())
+ self.assertEqual(pformat.call_count, 2) # schema + instance
+
+ def test_str_works_with_instances_having_overriden_eq_operator(self):
+ """
+ Check for https://github.com/Julian/jsonschema/issues/164 which
+ rendered exceptions unusable when a `ValidationError` involved
+ instances with an `__eq__` method that returned truthy values.
+
+ """
+
+ instance = mock.MagicMock()
+ error = exceptions.ValidationError(
+ "a message",
+ validator="foo",
+ instance=instance,
+ validator_value="some",
+ schema="schema",
+ )
+ str(error)
+ self.assertFalse(instance.__eq__.called)
diff --git a/lib/spack/external/jsonschema/tests/test_format.py b/lib/spack/external/jsonschema/tests/test_format.py
new file mode 100644
index 0000000000..8392ca1de3
--- /dev/null
+++ b/lib/spack/external/jsonschema/tests/test_format.py
@@ -0,0 +1,63 @@
+"""
+Tests for the parts of jsonschema related to the :validator:`format` property.
+
+"""
+
+from jsonschema.tests.compat import mock, unittest
+
+from jsonschema import FormatError, ValidationError, FormatChecker
+from jsonschema.validators import Draft4Validator
+
+
+class TestFormatChecker(unittest.TestCase):
+ def setUp(self):
+ self.fn = mock.Mock()
+
+ def test_it_can_validate_no_formats(self):
+ checker = FormatChecker(formats=())
+ self.assertFalse(checker.checkers)
+
+ def test_it_raises_a_key_error_for_unknown_formats(self):
+ with self.assertRaises(KeyError):
+ FormatChecker(formats=["o noes"])
+
+ def test_it_can_register_cls_checkers(self):
+ with mock.patch.dict(FormatChecker.checkers, clear=True):
+ FormatChecker.cls_checks("new")(self.fn)
+ self.assertEqual(FormatChecker.checkers, {"new" : (self.fn, ())})
+
+ def test_it_can_register_checkers(self):
+ checker = FormatChecker()
+ checker.checks("new")(self.fn)
+ self.assertEqual(
+ checker.checkers,
+ dict(FormatChecker.checkers, new=(self.fn, ()))
+ )
+
+ def test_it_catches_registered_errors(self):
+ checker = FormatChecker()
+ cause = self.fn.side_effect = ValueError()
+
+ checker.checks("foo", raises=ValueError)(self.fn)
+
+ with self.assertRaises(FormatError) as cm:
+ checker.check("bar", "foo")
+
+ self.assertIs(cm.exception.cause, cause)
+ self.assertIs(cm.exception.__cause__, cause)
+
+ # Unregistered errors should not be caught
+ self.fn.side_effect = AttributeError
+ with self.assertRaises(AttributeError):
+ checker.check("bar", "foo")
+
+ def test_format_error_causes_become_validation_error_causes(self):
+ checker = FormatChecker()
+ checker.checks("foo", raises=ValueError)(self.fn)
+ cause = self.fn.side_effect = ValueError()
+ validator = Draft4Validator({"format" : "foo"}, format_checker=checker)
+
+ with self.assertRaises(ValidationError) as cm:
+ validator.validate("bar")
+
+ self.assertIs(cm.exception.__cause__, cause)
diff --git a/lib/spack/external/jsonschema/tests/test_jsonschema_test_suite.py b/lib/spack/external/jsonschema/tests/test_jsonschema_test_suite.py
new file mode 100644
index 0000000000..75c6857bc0
--- /dev/null
+++ b/lib/spack/external/jsonschema/tests/test_jsonschema_test_suite.py
@@ -0,0 +1,290 @@
+"""
+Test runner for the JSON Schema official test suite
+
+Tests comprehensive correctness of each draft's validator.
+
+See https://github.com/json-schema/JSON-Schema-Test-Suite for details.
+
+"""
+
+from contextlib import closing
+from decimal import Decimal
+import glob
+import json
+import io
+import itertools
+import os
+import re
+import subprocess
+import sys
+
+try:
+ from sys import pypy_version_info
+except ImportError:
+ pypy_version_info = None
+
+from jsonschema import (
+ FormatError, SchemaError, ValidationError, Draft3Validator,
+ Draft4Validator, FormatChecker, draft3_format_checker,
+ draft4_format_checker, validate,
+)
+from jsonschema.compat import PY3
+from jsonschema.tests.compat import mock, unittest
+import jsonschema
+
+
+REPO_ROOT = os.path.join(os.path.dirname(jsonschema.__file__), os.path.pardir)
+SUITE = os.getenv("JSON_SCHEMA_TEST_SUITE", os.path.join(REPO_ROOT, "json"))
+
+if not os.path.isdir(SUITE):
+ raise ValueError(
+ "Can't find the JSON-Schema-Test-Suite directory. Set the "
+ "'JSON_SCHEMA_TEST_SUITE' environment variable or run the tests from "
+ "alongside a checkout of the suite."
+ )
+
+TESTS_DIR = os.path.join(SUITE, "tests")
+JSONSCHEMA_SUITE = os.path.join(SUITE, "bin", "jsonschema_suite")
+
+remotes_stdout = subprocess.Popen(
+ ["python", JSONSCHEMA_SUITE, "remotes"], stdout=subprocess.PIPE,
+).stdout
+
+with closing(remotes_stdout):
+ if PY3:
+ remotes_stdout = io.TextIOWrapper(remotes_stdout)
+ REMOTES = json.load(remotes_stdout)
+
+
+def make_case(schema, data, valid, name):
+ if valid:
+ def test_case(self):
+ kwargs = getattr(self, "validator_kwargs", {})
+ validate(data, schema, cls=self.validator_class, **kwargs)
+ else:
+ def test_case(self):
+ kwargs = getattr(self, "validator_kwargs", {})
+ with self.assertRaises(ValidationError):
+ validate(data, schema, cls=self.validator_class, **kwargs)
+
+ if not PY3:
+ name = name.encode("utf-8")
+ test_case.__name__ = name
+
+ return test_case
+
+
+def maybe_skip(skip, test_case, case, test):
+ if skip is not None:
+ reason = skip(case, test)
+ if reason is not None:
+ test_case = unittest.skip(reason)(test_case)
+ return test_case
+
+
+def load_json_cases(tests_glob, ignore_glob="", basedir=TESTS_DIR, skip=None):
+ if ignore_glob:
+ ignore_glob = os.path.join(basedir, ignore_glob)
+
+ def add_test_methods(test_class):
+ ignored = set(glob.iglob(ignore_glob))
+
+ for filename in glob.iglob(os.path.join(basedir, tests_glob)):
+ if filename in ignored:
+ continue
+
+ validating, _ = os.path.splitext(os.path.basename(filename))
+ id = itertools.count(1)
+
+ with open(filename) as test_file:
+ for case in json.load(test_file):
+ for test in case["tests"]:
+ name = "test_%s_%s_%s" % (
+ validating,
+ next(id),
+ re.sub(r"[\W ]+", "_", test["description"]),
+ )
+ assert not hasattr(test_class, name), name
+
+ test_case = make_case(
+ data=test["data"],
+ schema=case["schema"],
+ valid=test["valid"],
+ name=name,
+ )
+ test_case = maybe_skip(skip, test_case, case, test)
+ setattr(test_class, name, test_case)
+
+ return test_class
+ return add_test_methods
+
+
+class TypesMixin(object):
+ @unittest.skipIf(PY3, "In Python 3 json.load always produces unicode")
+ def test_string_a_bytestring_is_a_string(self):
+ self.validator_class({"type" : "string"}).validate(b"foo")
+
+
+class DecimalMixin(object):
+ def test_it_can_validate_with_decimals(self):
+ schema = {"type" : "number"}
+ validator = self.validator_class(
+ schema, types={"number" : (int, float, Decimal)}
+ )
+
+ for valid in [1, 1.1, Decimal(1) / Decimal(8)]:
+ validator.validate(valid)
+
+ for invalid in ["foo", {}, [], True, None]:
+ with self.assertRaises(ValidationError):
+ validator.validate(invalid)
+
+
+def missing_format(checker):
+ def missing_format(case, test):
+ format = case["schema"].get("format")
+ if format not in checker.checkers:
+ return "Format checker {0!r} not found.".format(format)
+ elif (
+ format == "date-time" and
+ pypy_version_info is not None and
+ pypy_version_info[:2] <= (1, 9)
+ ):
+ # datetime.datetime is overzealous about typechecking in <=1.9
+ return "datetime.datetime is broken on this version of PyPy."
+ return missing_format
+
+
+class FormatMixin(object):
+ def test_it_returns_true_for_formats_it_does_not_know_about(self):
+ validator = self.validator_class(
+ {"format" : "carrot"}, format_checker=FormatChecker(),
+ )
+ validator.validate("bugs")
+
+ def test_it_does_not_validate_formats_by_default(self):
+ validator = self.validator_class({})
+ self.assertIsNone(validator.format_checker)
+
+ def test_it_validates_formats_if_a_checker_is_provided(self):
+ checker = mock.Mock(spec=FormatChecker)
+ validator = self.validator_class(
+ {"format" : "foo"}, format_checker=checker,
+ )
+
+ validator.validate("bar")
+
+ checker.check.assert_called_once_with("bar", "foo")
+
+ cause = ValueError()
+ checker.check.side_effect = FormatError('aoeu', cause=cause)
+
+ with self.assertRaises(ValidationError) as cm:
+ validator.validate("bar")
+ # Make sure original cause is attached
+ self.assertIs(cm.exception.cause, cause)
+
+ def test_it_validates_formats_of_any_type(self):
+ checker = mock.Mock(spec=FormatChecker)
+ validator = self.validator_class(
+ {"format" : "foo"}, format_checker=checker,
+ )
+
+ validator.validate([1, 2, 3])
+
+ checker.check.assert_called_once_with([1, 2, 3], "foo")
+
+ cause = ValueError()
+ checker.check.side_effect = FormatError('aoeu', cause=cause)
+
+ with self.assertRaises(ValidationError) as cm:
+ validator.validate([1, 2, 3])
+ # Make sure original cause is attached
+ self.assertIs(cm.exception.cause, cause)
+
+
+if sys.maxunicode == 2 ** 16 - 1: # This is a narrow build.
+ def narrow_unicode_build(case, test):
+ if "supplementary Unicode" in test["description"]:
+ return "Not running surrogate Unicode case, this Python is narrow."
+else:
+ def narrow_unicode_build(case, test): # This isn't, skip nothing.
+ return
+
+
+@load_json_cases(
+ "draft3/*.json",
+ skip=narrow_unicode_build,
+ ignore_glob="draft3/refRemote.json",
+)
+@load_json_cases(
+ "draft3/optional/format.json", skip=missing_format(draft3_format_checker)
+)
+@load_json_cases("draft3/optional/bignum.json")
+@load_json_cases("draft3/optional/zeroTerminatedFloats.json")
+class TestDraft3(unittest.TestCase, TypesMixin, DecimalMixin, FormatMixin):
+ validator_class = Draft3Validator
+ validator_kwargs = {"format_checker" : draft3_format_checker}
+
+ def test_any_type_is_valid_for_type_any(self):
+ validator = self.validator_class({"type" : "any"})
+ validator.validate(mock.Mock())
+
+ # TODO: we're in need of more meta schema tests
+ def test_invalid_properties(self):
+ with self.assertRaises(SchemaError):
+ validate({}, {"properties": {"test": True}},
+ cls=self.validator_class)
+
+ def test_minItems_invalid_string(self):
+ with self.assertRaises(SchemaError):
+ # needs to be an integer
+ validate([1], {"minItems" : "1"}, cls=self.validator_class)
+
+
+@load_json_cases(
+ "draft4/*.json",
+ skip=narrow_unicode_build,
+ ignore_glob="draft4/refRemote.json",
+)
+@load_json_cases(
+ "draft4/optional/format.json", skip=missing_format(draft4_format_checker)
+)
+@load_json_cases("draft4/optional/bignum.json")
+@load_json_cases("draft4/optional/zeroTerminatedFloats.json")
+class TestDraft4(unittest.TestCase, TypesMixin, DecimalMixin, FormatMixin):
+ validator_class = Draft4Validator
+ validator_kwargs = {"format_checker" : draft4_format_checker}
+
+ # TODO: we're in need of more meta schema tests
+ def test_invalid_properties(self):
+ with self.assertRaises(SchemaError):
+ validate({}, {"properties": {"test": True}},
+ cls=self.validator_class)
+
+ def test_minItems_invalid_string(self):
+ with self.assertRaises(SchemaError):
+ # needs to be an integer
+ validate([1], {"minItems" : "1"}, cls=self.validator_class)
+
+
+class RemoteRefResolutionMixin(object):
+ def setUp(self):
+ patch = mock.patch("jsonschema.validators.requests")
+ requests = patch.start()
+ requests.get.side_effect = self.resolve
+ self.addCleanup(patch.stop)
+
+ def resolve(self, reference):
+ _, _, reference = reference.partition("http://localhost:1234/")
+ return mock.Mock(**{"json.return_value" : REMOTES.get(reference)})
+
+
+@load_json_cases("draft3/refRemote.json")
+class Draft3RemoteResolution(RemoteRefResolutionMixin, unittest.TestCase):
+ validator_class = Draft3Validator
+
+
+@load_json_cases("draft4/refRemote.json")
+class Draft4RemoteResolution(RemoteRefResolutionMixin, unittest.TestCase):
+ validator_class = Draft4Validator
diff --git a/lib/spack/external/jsonschema/tests/test_validators.py b/lib/spack/external/jsonschema/tests/test_validators.py
new file mode 100644
index 0000000000..f8692388ea
--- /dev/null
+++ b/lib/spack/external/jsonschema/tests/test_validators.py
@@ -0,0 +1,786 @@
+from collections import deque
+from contextlib import contextmanager
+import json
+
+from jsonschema import FormatChecker, ValidationError
+from jsonschema.tests.compat import mock, unittest
+from jsonschema.validators import (
+ RefResolutionError, UnknownType, Draft3Validator,
+ Draft4Validator, RefResolver, create, extend, validator_for, validate,
+)
+
+
+class TestCreateAndExtend(unittest.TestCase):
+ def setUp(self):
+ self.meta_schema = {u"properties" : {u"smelly" : {}}}
+ self.smelly = mock.MagicMock()
+ self.validators = {u"smelly" : self.smelly}
+ self.types = {u"dict" : dict}
+ self.Validator = create(
+ meta_schema=self.meta_schema,
+ validators=self.validators,
+ default_types=self.types,
+ )
+
+ self.validator_value = 12
+ self.schema = {u"smelly" : self.validator_value}
+ self.validator = self.Validator(self.schema)
+
+ def test_attrs(self):
+ self.assertEqual(self.Validator.VALIDATORS, self.validators)
+ self.assertEqual(self.Validator.META_SCHEMA, self.meta_schema)
+ self.assertEqual(self.Validator.DEFAULT_TYPES, self.types)
+
+ def test_init(self):
+ self.assertEqual(self.validator.schema, self.schema)
+
+ def test_iter_errors(self):
+ instance = "hello"
+
+ self.smelly.return_value = []
+ self.assertEqual(list(self.validator.iter_errors(instance)), [])
+
+ error = mock.Mock()
+ self.smelly.return_value = [error]
+ self.assertEqual(list(self.validator.iter_errors(instance)), [error])
+
+ self.smelly.assert_called_with(
+ self.validator, self.validator_value, instance, self.schema,
+ )
+
+ def test_if_a_version_is_provided_it_is_registered(self):
+ with mock.patch("jsonschema.validators.validates") as validates:
+ validates.side_effect = lambda version : lambda cls : cls
+ Validator = create(meta_schema={u"id" : ""}, version="my version")
+ validates.assert_called_once_with("my version")
+ self.assertEqual(Validator.__name__, "MyVersionValidator")
+
+ def test_if_a_version_is_not_provided_it_is_not_registered(self):
+ with mock.patch("jsonschema.validators.validates") as validates:
+ create(meta_schema={u"id" : "id"})
+ self.assertFalse(validates.called)
+
+ def test_extend(self):
+ validators = dict(self.Validator.VALIDATORS)
+ new = mock.Mock()
+
+ Extended = extend(self.Validator, validators={u"a new one" : new})
+
+ validators.update([(u"a new one", new)])
+ self.assertEqual(Extended.VALIDATORS, validators)
+ self.assertNotIn(u"a new one", self.Validator.VALIDATORS)
+
+ self.assertEqual(Extended.META_SCHEMA, self.Validator.META_SCHEMA)
+ self.assertEqual(Extended.DEFAULT_TYPES, self.Validator.DEFAULT_TYPES)
+
+
+class TestIterErrors(unittest.TestCase):
+ def setUp(self):
+ self.validator = Draft3Validator({})
+
+ def test_iter_errors(self):
+ instance = [1, 2]
+ schema = {
+ u"disallow" : u"array",
+ u"enum" : [["a", "b", "c"], ["d", "e", "f"]],
+ u"minItems" : 3
+ }
+
+ got = (e.message for e in self.validator.iter_errors(instance, schema))
+ expected = [
+ "%r is disallowed for [1, 2]" % (schema["disallow"],),
+ "[1, 2] is too short",
+ "[1, 2] is not one of %r" % (schema["enum"],),
+ ]
+ self.assertEqual(sorted(got), sorted(expected))
+
+ def test_iter_errors_multiple_failures_one_validator(self):
+ instance = {"foo" : 2, "bar" : [1], "baz" : 15, "quux" : "spam"}
+ schema = {
+ u"properties" : {
+ "foo" : {u"type" : "string"},
+ "bar" : {u"minItems" : 2},
+ "baz" : {u"maximum" : 10, u"enum" : [2, 4, 6, 8]},
+ }
+ }
+
+ errors = list(self.validator.iter_errors(instance, schema))
+ self.assertEqual(len(errors), 4)
+
+
+class TestValidationErrorMessages(unittest.TestCase):
+ def message_for(self, instance, schema, *args, **kwargs):
+ kwargs.setdefault("cls", Draft3Validator)
+ with self.assertRaises(ValidationError) as e:
+ validate(instance, schema, *args, **kwargs)
+ return e.exception.message
+
+ def test_single_type_failure(self):
+ message = self.message_for(instance=1, schema={u"type" : u"string"})
+ self.assertEqual(message, "1 is not of type %r" % u"string")
+
+ def test_single_type_list_failure(self):
+ message = self.message_for(instance=1, schema={u"type" : [u"string"]})
+ self.assertEqual(message, "1 is not of type %r" % u"string")
+
+ def test_multiple_type_failure(self):
+ types = u"string", u"object"
+ message = self.message_for(instance=1, schema={u"type" : list(types)})
+ self.assertEqual(message, "1 is not of type %r, %r" % types)
+
+ def test_object_without_title_type_failure(self):
+ type = {u"type" : [{u"minimum" : 3}]}
+ message = self.message_for(instance=1, schema={u"type" : [type]})
+ self.assertEqual(message, "1 is not of type %r" % (type,))
+
+ def test_object_with_name_type_failure(self):
+ name = "Foo"
+ schema = {u"type" : [{u"name" : name, u"minimum" : 3}]}
+ message = self.message_for(instance=1, schema=schema)
+ self.assertEqual(message, "1 is not of type %r" % (name,))
+
+ def test_minimum(self):
+ message = self.message_for(instance=1, schema={"minimum" : 2})
+ self.assertEqual(message, "1 is less than the minimum of 2")
+
+ def test_maximum(self):
+ message = self.message_for(instance=1, schema={"maximum" : 0})
+ self.assertEqual(message, "1 is greater than the maximum of 0")
+
+ def test_dependencies_failure_has_single_element_not_list(self):
+ depend, on = "bar", "foo"
+ schema = {u"dependencies" : {depend : on}}
+ message = self.message_for({"bar" : 2}, schema)
+ self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
+
+ def test_additionalItems_single_failure(self):
+ message = self.message_for(
+ [2], {u"items" : [], u"additionalItems" : False},
+ )
+ self.assertIn("(2 was unexpected)", message)
+
+ def test_additionalItems_multiple_failures(self):
+ message = self.message_for(
+ [1, 2, 3], {u"items" : [], u"additionalItems" : False}
+ )
+ self.assertIn("(1, 2, 3 were unexpected)", message)
+
+ def test_additionalProperties_single_failure(self):
+ additional = "foo"
+ schema = {u"additionalProperties" : False}
+ message = self.message_for({additional : 2}, schema)
+ self.assertIn("(%r was unexpected)" % (additional,), message)
+
+ def test_additionalProperties_multiple_failures(self):
+ schema = {u"additionalProperties" : False}
+ message = self.message_for(dict.fromkeys(["foo", "bar"]), schema)
+
+ self.assertIn(repr("foo"), message)
+ self.assertIn(repr("bar"), message)
+ self.assertIn("were unexpected)", message)
+
+ def test_invalid_format_default_message(self):
+ checker = FormatChecker(formats=())
+ check_fn = mock.Mock(return_value=False)
+ checker.checks(u"thing")(check_fn)
+
+ schema = {u"format" : u"thing"}
+ message = self.message_for("bla", schema, format_checker=checker)
+
+ self.assertIn(repr("bla"), message)
+ self.assertIn(repr("thing"), message)
+ self.assertIn("is not a", message)
+
+
+class TestValidationErrorDetails(unittest.TestCase):
+ # TODO: These really need unit tests for each individual validator, rather
+ # than just these higher level tests.
+ def test_anyOf(self):
+ instance = 5
+ schema = {
+ "anyOf": [
+ {"minimum": 20},
+ {"type": "string"}
+ ]
+ }
+
+ validator = Draft4Validator(schema)
+ errors = list(validator.iter_errors(instance))
+ self.assertEqual(len(errors), 1)
+ e = errors[0]
+
+ self.assertEqual(e.validator, "anyOf")
+ self.assertEqual(e.validator_value, schema["anyOf"])
+ self.assertEqual(e.instance, instance)
+ self.assertEqual(e.schema, schema)
+ self.assertIsNone(e.parent)
+
+ self.assertEqual(e.path, deque([]))
+ self.assertEqual(e.relative_path, deque([]))
+ self.assertEqual(e.absolute_path, deque([]))
+
+ self.assertEqual(e.schema_path, deque(["anyOf"]))
+ self.assertEqual(e.relative_schema_path, deque(["anyOf"]))
+ self.assertEqual(e.absolute_schema_path, deque(["anyOf"]))
+
+ self.assertEqual(len(e.context), 2)
+
+ e1, e2 = sorted_errors(e.context)
+
+ self.assertEqual(e1.validator, "minimum")
+ self.assertEqual(e1.validator_value, schema["anyOf"][0]["minimum"])
+ self.assertEqual(e1.instance, instance)
+ self.assertEqual(e1.schema, schema["anyOf"][0])
+ self.assertIs(e1.parent, e)
+
+ self.assertEqual(e1.path, deque([]))
+ self.assertEqual(e1.absolute_path, deque([]))
+ self.assertEqual(e1.relative_path, deque([]))
+
+ self.assertEqual(e1.schema_path, deque([0, "minimum"]))
+ self.assertEqual(e1.relative_schema_path, deque([0, "minimum"]))
+ self.assertEqual(
+ e1.absolute_schema_path, deque(["anyOf", 0, "minimum"]),
+ )
+
+ self.assertFalse(e1.context)
+
+ self.assertEqual(e2.validator, "type")
+ self.assertEqual(e2.validator_value, schema["anyOf"][1]["type"])
+ self.assertEqual(e2.instance, instance)
+ self.assertEqual(e2.schema, schema["anyOf"][1])
+ self.assertIs(e2.parent, e)
+
+ self.assertEqual(e2.path, deque([]))
+ self.assertEqual(e2.relative_path, deque([]))
+ self.assertEqual(e2.absolute_path, deque([]))
+
+ self.assertEqual(e2.schema_path, deque([1, "type"]))
+ self.assertEqual(e2.relative_schema_path, deque([1, "type"]))
+ self.assertEqual(e2.absolute_schema_path, deque(["anyOf", 1, "type"]))
+
+ self.assertEqual(len(e2.context), 0)
+
+ def test_type(self):
+ instance = {"foo": 1}
+ schema = {
+ "type": [
+ {"type": "integer"},
+ {
+ "type": "object",
+ "properties": {
+ "foo": {"enum": [2]}
+ }
+ }
+ ]
+ }
+
+ validator = Draft3Validator(schema)
+ errors = list(validator.iter_errors(instance))
+ self.assertEqual(len(errors), 1)
+ e = errors[0]
+
+ self.assertEqual(e.validator, "type")
+ self.assertEqual(e.validator_value, schema["type"])
+ self.assertEqual(e.instance, instance)
+ self.assertEqual(e.schema, schema)
+ self.assertIsNone(e.parent)
+
+ self.assertEqual(e.path, deque([]))
+ self.assertEqual(e.relative_path, deque([]))
+ self.assertEqual(e.absolute_path, deque([]))
+
+ self.assertEqual(e.schema_path, deque(["type"]))
+ self.assertEqual(e.relative_schema_path, deque(["type"]))
+ self.assertEqual(e.absolute_schema_path, deque(["type"]))
+
+ self.assertEqual(len(e.context), 2)
+
+ e1, e2 = sorted_errors(e.context)
+
+ self.assertEqual(e1.validator, "type")
+ self.assertEqual(e1.validator_value, schema["type"][0]["type"])
+ self.assertEqual(e1.instance, instance)
+ self.assertEqual(e1.schema, schema["type"][0])
+ self.assertIs(e1.parent, e)
+
+ self.assertEqual(e1.path, deque([]))
+ self.assertEqual(e1.relative_path, deque([]))
+ self.assertEqual(e1.absolute_path, deque([]))
+
+ self.assertEqual(e1.schema_path, deque([0, "type"]))
+ self.assertEqual(e1.relative_schema_path, deque([0, "type"]))
+ self.assertEqual(e1.absolute_schema_path, deque(["type", 0, "type"]))
+
+ self.assertFalse(e1.context)
+
+ self.assertEqual(e2.validator, "enum")
+ self.assertEqual(e2.validator_value, [2])
+ self.assertEqual(e2.instance, 1)
+ self.assertEqual(e2.schema, {u"enum" : [2]})
+ self.assertIs(e2.parent, e)
+
+ self.assertEqual(e2.path, deque(["foo"]))
+ self.assertEqual(e2.relative_path, deque(["foo"]))
+ self.assertEqual(e2.absolute_path, deque(["foo"]))
+
+ self.assertEqual(
+ e2.schema_path, deque([1, "properties", "foo", "enum"]),
+ )
+ self.assertEqual(
+ e2.relative_schema_path, deque([1, "properties", "foo", "enum"]),
+ )
+ self.assertEqual(
+ e2.absolute_schema_path,
+ deque(["type", 1, "properties", "foo", "enum"]),
+ )
+
+ self.assertFalse(e2.context)
+
+ def test_single_nesting(self):
+ instance = {"foo" : 2, "bar" : [1], "baz" : 15, "quux" : "spam"}
+ schema = {
+ "properties" : {
+ "foo" : {"type" : "string"},
+ "bar" : {"minItems" : 2},
+ "baz" : {"maximum" : 10, "enum" : [2, 4, 6, 8]},
+ }
+ }
+
+ validator = Draft3Validator(schema)
+ errors = validator.iter_errors(instance)
+ e1, e2, e3, e4 = sorted_errors(errors)
+
+ self.assertEqual(e1.path, deque(["bar"]))
+ self.assertEqual(e2.path, deque(["baz"]))
+ self.assertEqual(e3.path, deque(["baz"]))
+ self.assertEqual(e4.path, deque(["foo"]))
+
+ self.assertEqual(e1.relative_path, deque(["bar"]))
+ self.assertEqual(e2.relative_path, deque(["baz"]))
+ self.assertEqual(e3.relative_path, deque(["baz"]))
+ self.assertEqual(e4.relative_path, deque(["foo"]))
+
+ self.assertEqual(e1.absolute_path, deque(["bar"]))
+ self.assertEqual(e2.absolute_path, deque(["baz"]))
+ self.assertEqual(e3.absolute_path, deque(["baz"]))
+ self.assertEqual(e4.absolute_path, deque(["foo"]))
+
+ self.assertEqual(e1.validator, "minItems")
+ self.assertEqual(e2.validator, "enum")
+ self.assertEqual(e3.validator, "maximum")
+ self.assertEqual(e4.validator, "type")
+
+ def test_multiple_nesting(self):
+ instance = [1, {"foo" : 2, "bar" : {"baz" : [1]}}, "quux"]
+ schema = {
+ "type" : "string",
+ "items" : {
+ "type" : ["string", "object"],
+ "properties" : {
+ "foo" : {"enum" : [1, 3]},
+ "bar" : {
+ "type" : "array",
+ "properties" : {
+ "bar" : {"required" : True},
+ "baz" : {"minItems" : 2},
+ }
+ }
+ }
+ }
+ }
+
+ validator = Draft3Validator(schema)
+ errors = validator.iter_errors(instance)
+ e1, e2, e3, e4, e5, e6 = sorted_errors(errors)
+
+ self.assertEqual(e1.path, deque([]))
+ self.assertEqual(e2.path, deque([0]))
+ self.assertEqual(e3.path, deque([1, "bar"]))
+ self.assertEqual(e4.path, deque([1, "bar", "bar"]))
+ self.assertEqual(e5.path, deque([1, "bar", "baz"]))
+ self.assertEqual(e6.path, deque([1, "foo"]))
+
+ self.assertEqual(e1.schema_path, deque(["type"]))
+ self.assertEqual(e2.schema_path, deque(["items", "type"]))
+ self.assertEqual(
+ list(e3.schema_path), ["items", "properties", "bar", "type"],
+ )
+ self.assertEqual(
+ list(e4.schema_path),
+ ["items", "properties", "bar", "properties", "bar", "required"],
+ )
+ self.assertEqual(
+ list(e5.schema_path),
+ ["items", "properties", "bar", "properties", "baz", "minItems"]
+ )
+ self.assertEqual(
+ list(e6.schema_path), ["items", "properties", "foo", "enum"],
+ )
+
+ self.assertEqual(e1.validator, "type")
+ self.assertEqual(e2.validator, "type")
+ self.assertEqual(e3.validator, "type")
+ self.assertEqual(e4.validator, "required")
+ self.assertEqual(e5.validator, "minItems")
+ self.assertEqual(e6.validator, "enum")
+
+ def test_additionalProperties(self):
+ instance = {"bar": "bar", "foo": 2}
+ schema = {
+ "additionalProperties" : {"type": "integer", "minimum": 5}
+ }
+
+ validator = Draft3Validator(schema)
+ errors = validator.iter_errors(instance)
+ e1, e2 = sorted_errors(errors)
+
+ self.assertEqual(e1.path, deque(["bar"]))
+ self.assertEqual(e2.path, deque(["foo"]))
+
+ self.assertEqual(e1.validator, "type")
+ self.assertEqual(e2.validator, "minimum")
+
+ def test_patternProperties(self):
+ instance = {"bar": 1, "foo": 2}
+ schema = {
+ "patternProperties" : {
+ "bar": {"type": "string"},
+ "foo": {"minimum": 5}
+ }
+ }
+
+ validator = Draft3Validator(schema)
+ errors = validator.iter_errors(instance)
+ e1, e2 = sorted_errors(errors)
+
+ self.assertEqual(e1.path, deque(["bar"]))
+ self.assertEqual(e2.path, deque(["foo"]))
+
+ self.assertEqual(e1.validator, "type")
+ self.assertEqual(e2.validator, "minimum")
+
+ def test_additionalItems(self):
+ instance = ["foo", 1]
+ schema = {
+ "items": [],
+ "additionalItems" : {"type": "integer", "minimum": 5}
+ }
+
+ validator = Draft3Validator(schema)
+ errors = validator.iter_errors(instance)
+ e1, e2 = sorted_errors(errors)
+
+ self.assertEqual(e1.path, deque([0]))
+ self.assertEqual(e2.path, deque([1]))
+
+ self.assertEqual(e1.validator, "type")
+ self.assertEqual(e2.validator, "minimum")
+
+ def test_additionalItems_with_items(self):
+ instance = ["foo", "bar", 1]
+ schema = {
+ "items": [{}],
+ "additionalItems" : {"type": "integer", "minimum": 5}
+ }
+
+ validator = Draft3Validator(schema)
+ errors = validator.iter_errors(instance)
+ e1, e2 = sorted_errors(errors)
+
+ self.assertEqual(e1.path, deque([1]))
+ self.assertEqual(e2.path, deque([2]))
+
+ self.assertEqual(e1.validator, "type")
+ self.assertEqual(e2.validator, "minimum")
+
+
+class ValidatorTestMixin(object):
+ def setUp(self):
+ self.instance = mock.Mock()
+ self.schema = {}
+ self.resolver = mock.Mock()
+ self.validator = self.validator_class(self.schema)
+
+ def test_valid_instances_are_valid(self):
+ errors = iter([])
+
+ with mock.patch.object(
+ self.validator, "iter_errors", return_value=errors,
+ ):
+ self.assertTrue(
+ self.validator.is_valid(self.instance, self.schema)
+ )
+
+ def test_invalid_instances_are_not_valid(self):
+ errors = iter([mock.Mock()])
+
+ with mock.patch.object(
+ self.validator, "iter_errors", return_value=errors,
+ ):
+ self.assertFalse(
+ self.validator.is_valid(self.instance, self.schema)
+ )
+
+ def test_non_existent_properties_are_ignored(self):
+ instance, my_property, my_value = mock.Mock(), mock.Mock(), mock.Mock()
+ validate(instance=instance, schema={my_property : my_value})
+
+ def test_it_creates_a_ref_resolver_if_not_provided(self):
+ self.assertIsInstance(self.validator.resolver, RefResolver)
+
+ def test_it_delegates_to_a_ref_resolver(self):
+ resolver = RefResolver("", {})
+ schema = {"$ref" : mock.Mock()}
+
+ @contextmanager
+ def resolving():
+ yield {"type": "integer"}
+
+ with mock.patch.object(resolver, "resolving") as resolve:
+ resolve.return_value = resolving()
+ with self.assertRaises(ValidationError):
+ self.validator_class(schema, resolver=resolver).validate(None)
+
+ resolve.assert_called_once_with(schema["$ref"])
+
+ def test_is_type_is_true_for_valid_type(self):
+ self.assertTrue(self.validator.is_type("foo", "string"))
+
+ def test_is_type_is_false_for_invalid_type(self):
+ self.assertFalse(self.validator.is_type("foo", "array"))
+
+ def test_is_type_evades_bool_inheriting_from_int(self):
+ self.assertFalse(self.validator.is_type(True, "integer"))
+ self.assertFalse(self.validator.is_type(True, "number"))
+
+ def test_is_type_raises_exception_for_unknown_type(self):
+ with self.assertRaises(UnknownType):
+ self.validator.is_type("foo", object())
+
+
+class TestDraft3Validator(ValidatorTestMixin, unittest.TestCase):
+ validator_class = Draft3Validator
+
+ def test_is_type_is_true_for_any_type(self):
+ self.assertTrue(self.validator.is_valid(mock.Mock(), {"type": "any"}))
+
+ def test_is_type_does_not_evade_bool_if_it_is_being_tested(self):
+ self.assertTrue(self.validator.is_type(True, "boolean"))
+ self.assertTrue(self.validator.is_valid(True, {"type": "any"}))
+
+ def test_non_string_custom_types(self):
+ schema = {'type': [None]}
+ cls = self.validator_class(schema, types={None: type(None)})
+ cls.validate(None, schema)
+
+
+class TestDraft4Validator(ValidatorTestMixin, unittest.TestCase):
+ validator_class = Draft4Validator
+
+
+class TestBuiltinFormats(unittest.TestCase):
+ """
+ The built-in (specification-defined) formats do not raise type errors.
+
+ If an instance or value is not a string, it should be ignored.
+
+ """
+
+
+for format in FormatChecker.checkers:
+ def test(self, format=format):
+ v = Draft4Validator({"format": format}, format_checker=FormatChecker())
+ v.validate(123)
+
+ name = "test_{0}_ignores_non_strings".format(format)
+ test.__name__ = name
+ setattr(TestBuiltinFormats, name, test)
+ del test # Ugh py.test. Stop discovering top level tests.
+
+
+class TestValidatorFor(unittest.TestCase):
+ def test_draft_3(self):
+ schema = {"$schema" : "http://json-schema.org/draft-03/schema"}
+ self.assertIs(validator_for(schema), Draft3Validator)
+
+ schema = {"$schema" : "http://json-schema.org/draft-03/schema#"}
+ self.assertIs(validator_for(schema), Draft3Validator)
+
+ def test_draft_4(self):
+ schema = {"$schema" : "http://json-schema.org/draft-04/schema"}
+ self.assertIs(validator_for(schema), Draft4Validator)
+
+ schema = {"$schema" : "http://json-schema.org/draft-04/schema#"}
+ self.assertIs(validator_for(schema), Draft4Validator)
+
+ def test_custom_validator(self):
+ Validator = create(meta_schema={"id" : "meta schema id"}, version="12")
+ schema = {"$schema" : "meta schema id"}
+ self.assertIs(validator_for(schema), Validator)
+
+ def test_validator_for_jsonschema_default(self):
+ self.assertIs(validator_for({}), Draft4Validator)
+
+ def test_validator_for_custom_default(self):
+ self.assertIs(validator_for({}, default=None), None)
+
+
+class TestValidate(unittest.TestCase):
+ def test_draft3_validator_is_chosen(self):
+ schema = {"$schema" : "http://json-schema.org/draft-03/schema#"}
+ with mock.patch.object(Draft3Validator, "check_schema") as chk_schema:
+ validate({}, schema)
+ chk_schema.assert_called_once_with(schema)
+ # Make sure it works without the empty fragment
+ schema = {"$schema" : "http://json-schema.org/draft-03/schema"}
+ with mock.patch.object(Draft3Validator, "check_schema") as chk_schema:
+ validate({}, schema)
+ chk_schema.assert_called_once_with(schema)
+
+ def test_draft4_validator_is_chosen(self):
+ schema = {"$schema" : "http://json-schema.org/draft-04/schema#"}
+ with mock.patch.object(Draft4Validator, "check_schema") as chk_schema:
+ validate({}, schema)
+ chk_schema.assert_called_once_with(schema)
+
+ def test_draft4_validator_is_the_default(self):
+ with mock.patch.object(Draft4Validator, "check_schema") as chk_schema:
+ validate({}, {})
+ chk_schema.assert_called_once_with({})
+
+
+class TestRefResolver(unittest.TestCase):
+
+ base_uri = ""
+ stored_uri = "foo://stored"
+ stored_schema = {"stored" : "schema"}
+
+ def setUp(self):
+ self.referrer = {}
+ self.store = {self.stored_uri : self.stored_schema}
+ self.resolver = RefResolver(self.base_uri, self.referrer, self.store)
+
+ def test_it_does_not_retrieve_schema_urls_from_the_network(self):
+ ref = Draft3Validator.META_SCHEMA["id"]
+ with mock.patch.object(self.resolver, "resolve_remote") as remote:
+ with self.resolver.resolving(ref) as resolved:
+ self.assertEqual(resolved, Draft3Validator.META_SCHEMA)
+ self.assertFalse(remote.called)
+
+ def test_it_resolves_local_refs(self):
+ ref = "#/properties/foo"
+ self.referrer["properties"] = {"foo" : object()}
+ with self.resolver.resolving(ref) as resolved:
+ self.assertEqual(resolved, self.referrer["properties"]["foo"])
+
+ def test_it_resolves_local_refs_with_id(self):
+ schema = {"id": "foo://bar/schema#", "a": {"foo": "bar"}}
+ resolver = RefResolver.from_schema(schema)
+ with resolver.resolving("#/a") as resolved:
+ self.assertEqual(resolved, schema["a"])
+ with resolver.resolving("foo://bar/schema#/a") as resolved:
+ self.assertEqual(resolved, schema["a"])
+
+ def test_it_retrieves_stored_refs(self):
+ with self.resolver.resolving(self.stored_uri) as resolved:
+ self.assertIs(resolved, self.stored_schema)
+
+ self.resolver.store["cached_ref"] = {"foo" : 12}
+ with self.resolver.resolving("cached_ref#/foo") as resolved:
+ self.assertEqual(resolved, 12)
+
+ def test_it_retrieves_unstored_refs_via_requests(self):
+ ref = "http://bar#baz"
+ schema = {"baz" : 12}
+
+ with mock.patch("jsonschema.validators.requests") as requests:
+ requests.get.return_value.json.return_value = schema
+ with self.resolver.resolving(ref) as resolved:
+ self.assertEqual(resolved, 12)
+ requests.get.assert_called_once_with("http://bar")
+
+ def test_it_retrieves_unstored_refs_via_urlopen(self):
+ ref = "http://bar#baz"
+ schema = {"baz" : 12}
+
+ with mock.patch("jsonschema.validators.requests", None):
+ with mock.patch("jsonschema.validators.urlopen") as urlopen:
+ urlopen.return_value.read.return_value = (
+ json.dumps(schema).encode("utf8"))
+ with self.resolver.resolving(ref) as resolved:
+ self.assertEqual(resolved, 12)
+ urlopen.assert_called_once_with("http://bar")
+
+ def test_it_can_construct_a_base_uri_from_a_schema(self):
+ schema = {"id" : "foo"}
+ resolver = RefResolver.from_schema(schema)
+ self.assertEqual(resolver.base_uri, "foo")
+ with resolver.resolving("") as resolved:
+ self.assertEqual(resolved, schema)
+ with resolver.resolving("#") as resolved:
+ self.assertEqual(resolved, schema)
+ with resolver.resolving("foo") as resolved:
+ self.assertEqual(resolved, schema)
+ with resolver.resolving("foo#") as resolved:
+ self.assertEqual(resolved, schema)
+
+ def test_it_can_construct_a_base_uri_from_a_schema_without_id(self):
+ schema = {}
+ resolver = RefResolver.from_schema(schema)
+ self.assertEqual(resolver.base_uri, "")
+ with resolver.resolving("") as resolved:
+ self.assertEqual(resolved, schema)
+ with resolver.resolving("#") as resolved:
+ self.assertEqual(resolved, schema)
+
+ def test_custom_uri_scheme_handlers(self):
+ schema = {"foo": "bar"}
+ ref = "foo://bar"
+ foo_handler = mock.Mock(return_value=schema)
+ resolver = RefResolver("", {}, handlers={"foo": foo_handler})
+ with resolver.resolving(ref) as resolved:
+ self.assertEqual(resolved, schema)
+ foo_handler.assert_called_once_with(ref)
+
+ def test_cache_remote_on(self):
+ ref = "foo://bar"
+ foo_handler = mock.Mock()
+ resolver = RefResolver(
+ "", {}, cache_remote=True, handlers={"foo" : foo_handler},
+ )
+ with resolver.resolving(ref):
+ pass
+ with resolver.resolving(ref):
+ pass
+ foo_handler.assert_called_once_with(ref)
+
+ def test_cache_remote_off(self):
+ ref = "foo://bar"
+ foo_handler = mock.Mock()
+ resolver = RefResolver(
+ "", {}, cache_remote=False, handlers={"foo" : foo_handler},
+ )
+ with resolver.resolving(ref):
+ pass
+ with resolver.resolving(ref):
+ pass
+ self.assertEqual(foo_handler.call_count, 2)
+
+ def test_if_you_give_it_junk_you_get_a_resolution_error(self):
+ ref = "foo://bar"
+ foo_handler = mock.Mock(side_effect=ValueError("Oh no! What's this?"))
+ resolver = RefResolver("", {}, handlers={"foo" : foo_handler})
+ with self.assertRaises(RefResolutionError) as err:
+ with resolver.resolving(ref):
+ pass
+ self.assertEqual(str(err.exception), "Oh no! What's this?")
+
+
+def sorted_errors(errors):
+ def key(error):
+ return (
+ [str(e) for e in error.path],
+ [str(e) for e in error.schema_path]
+ )
+ return sorted(errors, key=key)
diff --git a/lib/spack/external/jsonschema/validators.py b/lib/spack/external/jsonschema/validators.py
new file mode 100644
index 0000000000..3e326844f4
--- /dev/null
+++ b/lib/spack/external/jsonschema/validators.py
@@ -0,0 +1,428 @@
+from __future__ import division
+
+import contextlib
+import json
+import numbers
+
+try:
+ import requests
+except ImportError:
+ requests = None
+
+from jsonschema import _utils, _validators
+from jsonschema.compat import (
+ Sequence, urljoin, urlsplit, urldefrag, unquote, urlopen,
+ str_types, int_types, iteritems,
+)
+from jsonschema.exceptions import ErrorTree # Backwards compatibility # noqa
+from jsonschema.exceptions import RefResolutionError, SchemaError, UnknownType
+
+
+_unset = _utils.Unset()
+
+validators = {}
+meta_schemas = _utils.URIDict()
+
+
+def validates(version):
+ """
+ Register the decorated validator for a ``version`` of the specification.
+
+ Registered validators and their meta schemas will be considered when
+ parsing ``$schema`` properties' URIs.
+
+ :argument str version: an identifier to use as the version's name
+ :returns: a class decorator to decorate the validator with the version
+
+ """
+
+ def _validates(cls):
+ validators[version] = cls
+ if u"id" in cls.META_SCHEMA:
+ meta_schemas[cls.META_SCHEMA[u"id"]] = cls
+ return cls
+ return _validates
+
+
+def create(meta_schema, validators=(), version=None, default_types=None): # noqa
+ if default_types is None:
+ default_types = {
+ u"array" : list, u"boolean" : bool, u"integer" : int_types,
+ u"null" : type(None), u"number" : numbers.Number, u"object" : dict,
+ u"string" : str_types,
+ }
+
+ class Validator(object):
+ VALIDATORS = dict(validators)
+ META_SCHEMA = dict(meta_schema)
+ DEFAULT_TYPES = dict(default_types)
+
+ def __init__(
+ self, schema, types=(), resolver=None, format_checker=None,
+ ):
+ self._types = dict(self.DEFAULT_TYPES)
+ self._types.update(types)
+
+ if resolver is None:
+ resolver = RefResolver.from_schema(schema)
+
+ self.resolver = resolver
+ self.format_checker = format_checker
+ self.schema = schema
+
+ @classmethod
+ def check_schema(cls, schema):
+ for error in cls(cls.META_SCHEMA).iter_errors(schema):
+ raise SchemaError.create_from(error)
+
+ def iter_errors(self, instance, _schema=None):
+ if _schema is None:
+ _schema = self.schema
+
+ with self.resolver.in_scope(_schema.get(u"id", u"")):
+ ref = _schema.get(u"$ref")
+ if ref is not None:
+ validators = [(u"$ref", ref)]
+ else:
+ validators = iteritems(_schema)
+
+ for k, v in validators:
+ validator = self.VALIDATORS.get(k)
+ if validator is None:
+ continue
+
+ errors = validator(self, v, instance, _schema) or ()
+ for error in errors:
+ # set details if not already set by the called fn
+ error._set(
+ validator=k,
+ validator_value=v,
+ instance=instance,
+ schema=_schema,
+ )
+ if k != u"$ref":
+ error.schema_path.appendleft(k)
+ yield error
+
+ def descend(self, instance, schema, path=None, schema_path=None):
+ for error in self.iter_errors(instance, schema):
+ if path is not None:
+ error.path.appendleft(path)
+ if schema_path is not None:
+ error.schema_path.appendleft(schema_path)
+ yield error
+
+ def validate(self, *args, **kwargs):
+ for error in self.iter_errors(*args, **kwargs):
+ raise error
+
+ def is_type(self, instance, type):
+ if type not in self._types:
+ raise UnknownType(type, instance, self.schema)
+ pytypes = self._types[type]
+
+ # bool inherits from int, so ensure bools aren't reported as ints
+ if isinstance(instance, bool):
+ pytypes = _utils.flatten(pytypes)
+ is_number = any(
+ issubclass(pytype, numbers.Number) for pytype in pytypes
+ )
+ if is_number and bool not in pytypes:
+ return False
+ return isinstance(instance, pytypes)
+
+ def is_valid(self, instance, _schema=None):
+ error = next(self.iter_errors(instance, _schema), None)
+ return error is None
+
+ if version is not None:
+ Validator = validates(version)(Validator)
+ Validator.__name__ = version.title().replace(" ", "") + "Validator"
+
+ return Validator
+
+
+def extend(validator, validators, version=None):
+ all_validators = dict(validator.VALIDATORS)
+ all_validators.update(validators)
+ return create(
+ meta_schema=validator.META_SCHEMA,
+ validators=all_validators,
+ version=version,
+ default_types=validator.DEFAULT_TYPES,
+ )
+
+
+Draft3Validator = create(
+ meta_schema=_utils.load_schema("draft3"),
+ validators={
+ u"$ref" : _validators.ref,
+ u"additionalItems" : _validators.additionalItems,
+ u"additionalProperties" : _validators.additionalProperties,
+ u"dependencies" : _validators.dependencies,
+ u"disallow" : _validators.disallow_draft3,
+ u"divisibleBy" : _validators.multipleOf,
+ u"enum" : _validators.enum,
+ u"extends" : _validators.extends_draft3,
+ u"format" : _validators.format,
+ u"items" : _validators.items,
+ u"maxItems" : _validators.maxItems,
+ u"maxLength" : _validators.maxLength,
+ u"maximum" : _validators.maximum,
+ u"minItems" : _validators.minItems,
+ u"minLength" : _validators.minLength,
+ u"minimum" : _validators.minimum,
+ u"multipleOf" : _validators.multipleOf,
+ u"pattern" : _validators.pattern,
+ u"patternProperties" : _validators.patternProperties,
+ u"properties" : _validators.properties_draft3,
+ u"type" : _validators.type_draft3,
+ u"uniqueItems" : _validators.uniqueItems,
+ },
+ version="draft3",
+)
+
+Draft4Validator = create(
+ meta_schema=_utils.load_schema("draft4"),
+ validators={
+ u"$ref" : _validators.ref,
+ u"additionalItems" : _validators.additionalItems,
+ u"additionalProperties" : _validators.additionalProperties,
+ u"allOf" : _validators.allOf_draft4,
+ u"anyOf" : _validators.anyOf_draft4,
+ u"dependencies" : _validators.dependencies,
+ u"enum" : _validators.enum,
+ u"format" : _validators.format,
+ u"items" : _validators.items,
+ u"maxItems" : _validators.maxItems,
+ u"maxLength" : _validators.maxLength,
+ u"maxProperties" : _validators.maxProperties_draft4,
+ u"maximum" : _validators.maximum,
+ u"minItems" : _validators.minItems,
+ u"minLength" : _validators.minLength,
+ u"minProperties" : _validators.minProperties_draft4,
+ u"minimum" : _validators.minimum,
+ u"multipleOf" : _validators.multipleOf,
+ u"not" : _validators.not_draft4,
+ u"oneOf" : _validators.oneOf_draft4,
+ u"pattern" : _validators.pattern,
+ u"patternProperties" : _validators.patternProperties,
+ u"properties" : _validators.properties_draft4,
+ u"required" : _validators.required_draft4,
+ u"type" : _validators.type_draft4,
+ u"uniqueItems" : _validators.uniqueItems,
+ },
+ version="draft4",
+)
+
+
+class RefResolver(object):
+ """
+ Resolve JSON References.
+
+ :argument str base_uri: URI of the referring document
+ :argument referrer: the actual referring document
+ :argument dict store: a mapping from URIs to documents to cache
+ :argument bool cache_remote: whether remote refs should be cached after
+ first resolution
+ :argument dict handlers: a mapping from URI schemes to functions that
+ should be used to retrieve them
+
+ """
+
+ def __init__(
+ self, base_uri, referrer, store=(), cache_remote=True, handlers=(),
+ ):
+ self.base_uri = base_uri
+ self.resolution_scope = base_uri
+ # This attribute is not used, it is for backwards compatibility
+ self.referrer = referrer
+ self.cache_remote = cache_remote
+ self.handlers = dict(handlers)
+
+ self.store = _utils.URIDict(
+ (id, validator.META_SCHEMA)
+ for id, validator in iteritems(meta_schemas)
+ )
+ self.store.update(store)
+ self.store[base_uri] = referrer
+
+ @classmethod
+ def from_schema(cls, schema, *args, **kwargs):
+ """
+ Construct a resolver from a JSON schema object.
+
+ :argument schema schema: the referring schema
+ :rtype: :class:`RefResolver`
+
+ """
+
+ return cls(schema.get(u"id", u""), schema, *args, **kwargs)
+
+ @contextlib.contextmanager
+ def in_scope(self, scope):
+ old_scope = self.resolution_scope
+ self.resolution_scope = urljoin(old_scope, scope)
+ try:
+ yield
+ finally:
+ self.resolution_scope = old_scope
+
+ @contextlib.contextmanager
+ def resolving(self, ref):
+ """
+ Context manager which resolves a JSON ``ref`` and enters the
+ resolution scope of this ref.
+
+ :argument str ref: reference to resolve
+
+ """
+
+ full_uri = urljoin(self.resolution_scope, ref)
+ uri, fragment = urldefrag(full_uri)
+ if not uri:
+ uri = self.base_uri
+
+ if uri in self.store:
+ document = self.store[uri]
+ else:
+ try:
+ document = self.resolve_remote(uri)
+ except Exception as exc:
+ raise RefResolutionError(exc)
+
+ old_base_uri, self.base_uri = self.base_uri, uri
+ try:
+ with self.in_scope(uri):
+ yield self.resolve_fragment(document, fragment)
+ finally:
+ self.base_uri = old_base_uri
+
+ def resolve_fragment(self, document, fragment):
+ """
+ Resolve a ``fragment`` within the referenced ``document``.
+
+ :argument document: the referrant document
+ :argument str fragment: a URI fragment to resolve within it
+
+ """
+
+ fragment = fragment.lstrip(u"/")
+ parts = unquote(fragment).split(u"/") if fragment else []
+
+ for part in parts:
+ part = part.replace(u"~1", u"/").replace(u"~0", u"~")
+
+ if isinstance(document, Sequence):
+ # Array indexes should be turned into integers
+ try:
+ part = int(part)
+ except ValueError:
+ pass
+ try:
+ document = document[part]
+ except (TypeError, LookupError):
+ raise RefResolutionError(
+ "Unresolvable JSON pointer: %r" % fragment
+ )
+
+ return document
+
+ def resolve_remote(self, uri):
+ """
+ Resolve a remote ``uri``.
+
+ Does not check the store first, but stores the retrieved document in
+ the store if :attr:`RefResolver.cache_remote` is True.
+
+ .. note::
+
+ If the requests_ library is present, ``jsonschema`` will use it to
+ request the remote ``uri``, so that the correct encoding is
+ detected and used.
+
+ If it isn't, or if the scheme of the ``uri`` is not ``http`` or
+ ``https``, UTF-8 is assumed.
+
+ :argument str uri: the URI to resolve
+ :returns: the retrieved document
+
+ .. _requests: http://pypi.python.org/pypi/requests/
+
+ """
+
+ scheme = urlsplit(uri).scheme
+
+ if scheme in self.handlers:
+ result = self.handlers[scheme](uri)
+ elif (
+ scheme in [u"http", u"https"] and
+ requests and
+ getattr(requests.Response, "json", None) is not None
+ ):
+ # Requests has support for detecting the correct encoding of
+ # json over http
+ if callable(requests.Response.json):
+ result = requests.get(uri).json()
+ else:
+ result = requests.get(uri).json
+ else:
+ # Otherwise, pass off to urllib and assume utf-8
+ result = json.loads(urlopen(uri).read().decode("utf-8"))
+
+ if self.cache_remote:
+ self.store[uri] = result
+ return result
+
+
+def validator_for(schema, default=_unset):
+ if default is _unset:
+ default = Draft4Validator
+ return meta_schemas.get(schema.get(u"$schema", u""), default)
+
+
+def validate(instance, schema, cls=None, *args, **kwargs):
+ """
+ Validate an instance under the given schema.
+
+ >>> validate([2, 3, 4], {"maxItems" : 2})
+ Traceback (most recent call last):
+ ...
+ ValidationError: [2, 3, 4] is too long
+
+ :func:`validate` will first verify that the provided schema is itself
+ valid, since not doing so can lead to less obvious error messages and fail
+ in less obvious or consistent ways. If you know you have a valid schema
+ already or don't care, you might prefer using the
+ :meth:`~IValidator.validate` method directly on a specific validator
+ (e.g. :meth:`Draft4Validator.validate`).
+
+
+ :argument instance: the instance to validate
+ :argument schema: the schema to validate with
+ :argument cls: an :class:`IValidator` class that will be used to validate
+ the instance.
+
+ If the ``cls`` argument is not provided, two things will happen in
+ accordance with the specification. First, if the schema has a
+ :validator:`$schema` property containing a known meta-schema [#]_ then the
+ proper validator will be used. The specification recommends that all
+ schemas contain :validator:`$schema` properties for this reason. If no
+ :validator:`$schema` property is found, the default validator class is
+ :class:`Draft4Validator`.
+
+ Any other provided positional and keyword arguments will be passed on when
+ instantiating the ``cls``.
+
+ :raises:
+ :exc:`ValidationError` if the instance is invalid
+
+ :exc:`SchemaError` if the schema itself is invalid
+
+ .. rubric:: Footnotes
+ .. [#] known by a validator registered with :func:`validates`
+ """
+ if cls is None:
+ cls = validator_for(schema)
+ cls.check_schema(schema)
+ cls(schema, *args, **kwargs).validate(instance)
diff --git a/lib/spack/external/nose/LICENSE b/lib/spack/external/nose/LICENSE
new file mode 100644
index 0000000000..8add30ad59
--- /dev/null
+++ b/lib/spack/external/nose/LICENSE
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/lib/spack/external/nose/__init__.py b/lib/spack/external/nose/__init__.py
new file mode 100644
index 0000000000..1ae1362b7a
--- /dev/null
+++ b/lib/spack/external/nose/__init__.py
@@ -0,0 +1,15 @@
+from nose.core import collector, main, run, run_exit, runmodule
+# backwards compatibility
+from nose.exc import SkipTest, DeprecatedTest
+from nose.tools import with_setup
+
+__author__ = 'Jason Pellerin'
+__versioninfo__ = (1, 3, 7)
+__version__ = '.'.join(map(str, __versioninfo__))
+
+__all__ = [
+ 'main', 'run', 'run_exit', 'runmodule', 'with_setup',
+ 'SkipTest', 'DeprecatedTest', 'collector'
+ ]
+
+
diff --git a/lib/spack/external/nose/__main__.py b/lib/spack/external/nose/__main__.py
new file mode 100644
index 0000000000..b402d9df12
--- /dev/null
+++ b/lib/spack/external/nose/__main__.py
@@ -0,0 +1,8 @@
+import sys
+
+from nose.core import run_exit
+
+if sys.argv[0].endswith('__main__.py'):
+ sys.argv[0] = '%s -m nose' % sys.executable
+
+run_exit()
diff --git a/lib/spack/external/nose/case.py b/lib/spack/external/nose/case.py
new file mode 100644
index 0000000000..cffa4ab4c9
--- /dev/null
+++ b/lib/spack/external/nose/case.py
@@ -0,0 +1,397 @@
+"""nose unittest.TestCase subclasses. It is not necessary to subclass these
+classes when writing tests; they are used internally by nose.loader.TestLoader
+to create test cases from test functions and methods in test classes.
+"""
+import logging
+import sys
+import unittest
+from inspect import isfunction
+from nose.config import Config
+from nose.failure import Failure # for backwards compatibility
+from nose.util import resolve_name, test_address, try_run
+
+log = logging.getLogger(__name__)
+
+
+__all__ = ['Test']
+
+
+class Test(unittest.TestCase):
+ """The universal test case wrapper.
+
+ When a plugin sees a test, it will always see an instance of this
+ class. To access the actual test case that will be run, access the
+ test property of the nose.case.Test instance.
+ """
+ __test__ = False # do not collect
+ def __init__(self, test, config=None, resultProxy=None):
+ # sanity check
+ if not callable(test):
+ raise TypeError("nose.case.Test called with argument %r that "
+ "is not callable. A callable is required."
+ % test)
+ self.test = test
+ if config is None:
+ config = Config()
+ self.config = config
+ self.tbinfo = None
+ self.capturedOutput = None
+ self.resultProxy = resultProxy
+ self.plugins = config.plugins
+ self.passed = None
+ unittest.TestCase.__init__(self)
+
+ def __call__(self, *arg, **kwarg):
+ return self.run(*arg, **kwarg)
+
+ def __str__(self):
+ name = self.plugins.testName(self)
+ if name is not None:
+ return name
+ return str(self.test)
+
+ def __repr__(self):
+ return "Test(%r)" % self.test
+
+ def afterTest(self, result):
+ """Called after test is complete (after result.stopTest)
+ """
+ try:
+ afterTest = result.afterTest
+ except AttributeError:
+ pass
+ else:
+ afterTest(self.test)
+
+ def beforeTest(self, result):
+ """Called before test is run (before result.startTest)
+ """
+ try:
+ beforeTest = result.beforeTest
+ except AttributeError:
+ pass
+ else:
+ beforeTest(self.test)
+
+ def exc_info(self):
+ """Extract exception info.
+ """
+ exc, exv, tb = sys.exc_info()
+ return (exc, exv, tb)
+
+ def id(self):
+ """Get a short(er) description of the test
+ """
+ return self.test.id()
+
+ def address(self):
+ """Return a round-trip name for this test, a name that can be
+ fed back as input to loadTestByName and (assuming the same
+ plugin configuration) result in the loading of this test.
+ """
+ if hasattr(self.test, 'address'):
+ return self.test.address()
+ else:
+ # not a nose case
+ return test_address(self.test)
+
+ def _context(self):
+ try:
+ return self.test.context
+ except AttributeError:
+ pass
+ try:
+ return self.test.__class__
+ except AttributeError:
+ pass
+ try:
+ return resolve_name(self.test.__module__)
+ except AttributeError:
+ pass
+ return None
+ context = property(_context, None, None,
+ """Get the context object of this test (if any).""")
+
+ def run(self, result):
+ """Modified run for the test wrapper.
+
+ From here we don't call result.startTest or stopTest or
+ addSuccess. The wrapper calls addError/addFailure only if its
+ own setup or teardown fails, or running the wrapped test fails
+ (eg, if the wrapped "test" is not callable).
+
+ Two additional methods are called, beforeTest and
+ afterTest. These give plugins a chance to modify the wrapped
+ test before it is called and do cleanup after it is
+ called. They are called unconditionally.
+ """
+ if self.resultProxy:
+ result = self.resultProxy(result, self)
+ try:
+ try:
+ self.beforeTest(result)
+ self.runTest(result)
+ except KeyboardInterrupt:
+ raise
+ except:
+ err = sys.exc_info()
+ result.addError(self, err)
+ finally:
+ self.afterTest(result)
+
+ def runTest(self, result):
+ """Run the test. Plugins may alter the test by returning a
+ value from prepareTestCase. The value must be callable and
+ must accept one argument, the result instance.
+ """
+ test = self.test
+ plug_test = self.config.plugins.prepareTestCase(self)
+ if plug_test is not None:
+ test = plug_test
+ test(result)
+
+ def shortDescription(self):
+ desc = self.plugins.describeTest(self)
+ if desc is not None:
+ return desc
+ # work around bug in unittest.TestCase.shortDescription
+ # with multiline docstrings.
+ test = self.test
+ try:
+ test._testMethodDoc = test._testMethodDoc.strip()# 2.5
+ except AttributeError:
+ try:
+ # 2.4 and earlier
+ test._TestCase__testMethodDoc = \
+ test._TestCase__testMethodDoc.strip()
+ except AttributeError:
+ pass
+ # 2.7 compat: shortDescription() always returns something
+ # which is a change from 2.6 and below, and breaks the
+ # testName plugin call.
+ try:
+ desc = self.test.shortDescription()
+ except Exception:
+ # this is probably caused by a problem in test.__str__() and is
+ # only triggered by python 3.1's unittest!
+ pass
+ try:
+ if desc == str(self.test):
+ return
+ except Exception:
+ # If str() triggers an exception then ignore it.
+ # see issue 422
+ pass
+ return desc
+
+
+class TestBase(unittest.TestCase):
+ """Common functionality for FunctionTestCase and MethodTestCase.
+ """
+ __test__ = False # do not collect
+
+ def id(self):
+ return str(self)
+
+ def runTest(self):
+ self.test(*self.arg)
+
+ def shortDescription(self):
+ if hasattr(self.test, 'description'):
+ return self.test.description
+ func, arg = self._descriptors()
+ doc = getattr(func, '__doc__', None)
+ if not doc:
+ doc = str(self)
+ return doc.strip().split("\n")[0].strip()
+
+
+class FunctionTestCase(TestBase):
+ """TestCase wrapper for test functions.
+
+ Don't use this class directly; it is used internally in nose to
+ create test cases for test functions.
+ """
+ __test__ = False # do not collect
+
+ def __init__(self, test, setUp=None, tearDown=None, arg=tuple(),
+ descriptor=None):
+ """Initialize the MethodTestCase.
+
+ Required argument:
+
+ * test -- the test function to call.
+
+ Optional arguments:
+
+ * setUp -- function to run at setup.
+
+ * tearDown -- function to run at teardown.
+
+ * arg -- arguments to pass to the test function. This is to support
+ generator functions that yield arguments.
+
+ * descriptor -- the function, other than the test, that should be used
+ to construct the test name. This is to support generator functions.
+ """
+
+ self.test = test
+ self.setUpFunc = setUp
+ self.tearDownFunc = tearDown
+ self.arg = arg
+ self.descriptor = descriptor
+ TestBase.__init__(self)
+
+ def address(self):
+ """Return a round-trip name for this test, a name that can be
+ fed back as input to loadTestByName and (assuming the same
+ plugin configuration) result in the loading of this test.
+ """
+ if self.descriptor is not None:
+ return test_address(self.descriptor)
+ else:
+ return test_address(self.test)
+
+ def _context(self):
+ return resolve_name(self.test.__module__)
+ context = property(_context, None, None,
+ """Get context (module) of this test""")
+
+ def setUp(self):
+ """Run any setup function attached to the test function
+ """
+ if self.setUpFunc:
+ self.setUpFunc()
+ else:
+ names = ('setup', 'setUp', 'setUpFunc')
+ try_run(self.test, names)
+
+ def tearDown(self):
+ """Run any teardown function attached to the test function
+ """
+ if self.tearDownFunc:
+ self.tearDownFunc()
+ else:
+ names = ('teardown', 'tearDown', 'tearDownFunc')
+ try_run(self.test, names)
+
+ def __str__(self):
+ func, arg = self._descriptors()
+ if hasattr(func, 'compat_func_name'):
+ name = func.compat_func_name
+ else:
+ name = func.__name__
+ name = "%s.%s" % (func.__module__, name)
+ if arg:
+ name = "%s%s" % (name, arg)
+ # FIXME need to include the full dir path to disambiguate
+ # in cases where test module of the same name was seen in
+ # another directory (old fromDirectory)
+ return name
+ __repr__ = __str__
+
+ def _descriptors(self):
+ """Get the descriptors of the test function: the function and
+ arguments that will be used to construct the test name. In
+ most cases, this is the function itself and no arguments. For
+ tests generated by generator functions, the original
+ (generator) function and args passed to the generated function
+ are returned.
+ """
+ if self.descriptor:
+ return self.descriptor, self.arg
+ else:
+ return self.test, self.arg
+
+
+class MethodTestCase(TestBase):
+ """Test case wrapper for test methods.
+
+ Don't use this class directly; it is used internally in nose to
+ create test cases for test methods.
+ """
+ __test__ = False # do not collect
+
+ def __init__(self, method, test=None, arg=tuple(), descriptor=None):
+ """Initialize the MethodTestCase.
+
+ Required argument:
+
+ * method -- the method to call, may be bound or unbound. In either
+ case, a new instance of the method's class will be instantiated to
+ make the call. Note: In Python 3.x, if using an unbound method, you
+ must wrap it using pyversion.unbound_method.
+
+ Optional arguments:
+
+ * test -- the test function to call. If this is passed, it will be
+ called instead of getting a new bound method of the same name as the
+ desired method from the test instance. This is to support generator
+ methods that yield inline functions.
+
+ * arg -- arguments to pass to the test function. This is to support
+ generator methods that yield arguments.
+
+ * descriptor -- the function, other than the test, that should be used
+ to construct the test name. This is to support generator methods.
+ """
+ self.method = method
+ self.test = test
+ self.arg = arg
+ self.descriptor = descriptor
+ if isfunction(method):
+ raise ValueError("Unbound methods must be wrapped using pyversion.unbound_method before passing to MethodTestCase")
+ self.cls = method.im_class
+ self.inst = self.cls()
+ if self.test is None:
+ method_name = self.method.__name__
+ self.test = getattr(self.inst, method_name)
+ TestBase.__init__(self)
+
+ def __str__(self):
+ func, arg = self._descriptors()
+ if hasattr(func, 'compat_func_name'):
+ name = func.compat_func_name
+ else:
+ name = func.__name__
+ name = "%s.%s.%s" % (self.cls.__module__,
+ self.cls.__name__,
+ name)
+ if arg:
+ name = "%s%s" % (name, arg)
+ return name
+ __repr__ = __str__
+
+ def address(self):
+ """Return a round-trip name for this test, a name that can be
+ fed back as input to loadTestByName and (assuming the same
+ plugin configuration) result in the loading of this test.
+ """
+ if self.descriptor is not None:
+ return test_address(self.descriptor)
+ else:
+ return test_address(self.method)
+
+ def _context(self):
+ return self.cls
+ context = property(_context, None, None,
+ """Get context (class) of this test""")
+
+ def setUp(self):
+ try_run(self.inst, ('setup', 'setUp'))
+
+ def tearDown(self):
+ try_run(self.inst, ('teardown', 'tearDown'))
+
+ def _descriptors(self):
+ """Get the descriptors of the test method: the method and
+ arguments that will be used to construct the test name. In
+ most cases, this is the method itself and no arguments. For
+ tests generated by generator methods, the original
+ (generator) method and args passed to the generated method
+ or function are returned.
+ """
+ if self.descriptor:
+ return self.descriptor, self.arg
+ else:
+ return self.method, self.arg
diff --git a/lib/spack/external/nose/commands.py b/lib/spack/external/nose/commands.py
new file mode 100644
index 0000000000..ef0e9caed4
--- /dev/null
+++ b/lib/spack/external/nose/commands.py
@@ -0,0 +1,172 @@
+"""
+nosetests setuptools command
+----------------------------
+
+The easiest way to run tests with nose is to use the `nosetests` setuptools
+command::
+
+ python setup.py nosetests
+
+This command has one *major* benefit over the standard `test` command: *all
+nose plugins are supported*.
+
+To configure the `nosetests` command, add a [nosetests] section to your
+setup.cfg. The [nosetests] section can contain any command line arguments that
+nosetests supports. The differences between issuing an option on the command
+line and adding it to setup.cfg are:
+
+* In setup.cfg, the -- prefix must be excluded
+* In setup.cfg, command line flags that take no arguments must be given an
+ argument flag (1, T or TRUE for active, 0, F or FALSE for inactive)
+
+Here's an example [nosetests] setup.cfg section::
+
+ [nosetests]
+ verbosity=1
+ detailed-errors=1
+ with-coverage=1
+ cover-package=nose
+ debug=nose.loader
+ pdb=1
+ pdb-failures=1
+
+If you commonly run nosetests with a large number of options, using
+the nosetests setuptools command and configuring with setup.cfg can
+make running your tests much less tedious. (Note that the same options
+and format supported in setup.cfg are supported in all other config
+files, and the nosetests script will also load config files.)
+
+Another reason to run tests with the command is that the command will
+install packages listed in your `tests_require`, as well as doing a
+complete build of your package before running tests. For packages with
+dependencies or that build C extensions, using the setuptools command
+can be more convenient than building by hand and running the nosetests
+script.
+
+Bootstrapping
+-------------
+
+If you are distributing your project and want users to be able to run tests
+without having to install nose themselves, add nose to the setup_requires
+section of your setup()::
+
+ setup(
+ # ...
+ setup_requires=['nose>=1.0']
+ )
+
+This will direct setuptools to download and activate nose during the setup
+process, making the ``nosetests`` command available.
+
+"""
+try:
+ from setuptools import Command
+except ImportError:
+ Command = nosetests = None
+else:
+ from nose.config import Config, option_blacklist, user_config_files, \
+ flag, _bool
+ from nose.core import TestProgram
+ from nose.plugins import DefaultPluginManager
+
+
+ def get_user_options(parser):
+ """convert a optparse option list into a distutils option tuple list"""
+ opt_list = []
+ for opt in parser.option_list:
+ if opt._long_opts[0][2:] in option_blacklist:
+ continue
+ long_name = opt._long_opts[0][2:]
+ if opt.action not in ('store_true', 'store_false'):
+ long_name = long_name + "="
+ short_name = None
+ if opt._short_opts:
+ short_name = opt._short_opts[0][1:]
+ opt_list.append((long_name, short_name, opt.help or ""))
+ return opt_list
+
+
+ class nosetests(Command):
+ description = "Run unit tests using nosetests"
+ __config = Config(files=user_config_files(),
+ plugins=DefaultPluginManager())
+ __parser = __config.getParser()
+ user_options = get_user_options(__parser)
+
+ def initialize_options(self):
+ """create the member variables, but change hyphens to
+ underscores
+ """
+
+ self.option_to_cmds = {}
+ for opt in self.__parser.option_list:
+ cmd_name = opt._long_opts[0][2:]
+ option_name = cmd_name.replace('-', '_')
+ self.option_to_cmds[option_name] = cmd_name
+ setattr(self, option_name, None)
+ self.attr = None
+
+ def finalize_options(self):
+ """nothing to do here"""
+ pass
+
+ def run(self):
+ """ensure tests are capable of being run, then
+ run nose.main with a reconstructed argument list"""
+ if getattr(self.distribution, 'use_2to3', False):
+ # If we run 2to3 we can not do this inplace:
+
+ # Ensure metadata is up-to-date
+ build_py = self.get_finalized_command('build_py')
+ build_py.inplace = 0
+ build_py.run()
+ bpy_cmd = self.get_finalized_command("build_py")
+ build_path = bpy_cmd.build_lib
+
+ # Build extensions
+ egg_info = self.get_finalized_command('egg_info')
+ egg_info.egg_base = build_path
+ egg_info.run()
+
+ build_ext = self.get_finalized_command('build_ext')
+ build_ext.inplace = 0
+ build_ext.run()
+ else:
+ self.run_command('egg_info')
+
+ # Build extensions in-place
+ build_ext = self.get_finalized_command('build_ext')
+ build_ext.inplace = 1
+ build_ext.run()
+
+ if self.distribution.install_requires:
+ self.distribution.fetch_build_eggs(
+ self.distribution.install_requires)
+ if self.distribution.tests_require:
+ self.distribution.fetch_build_eggs(
+ self.distribution.tests_require)
+
+ ei_cmd = self.get_finalized_command("egg_info")
+ argv = ['nosetests', '--where', ei_cmd.egg_base]
+ for (option_name, cmd_name) in self.option_to_cmds.items():
+ if option_name in option_blacklist:
+ continue
+ value = getattr(self, option_name)
+ if value is not None:
+ argv.extend(
+ self.cfgToArg(option_name.replace('_', '-'), value))
+ TestProgram(argv=argv, config=self.__config)
+
+ def cfgToArg(self, optname, value):
+ argv = []
+ long_optname = '--' + optname
+ opt = self.__parser.get_option(long_optname)
+ if opt.action in ('store_true', 'store_false'):
+ if not flag(value):
+ raise ValueError("Invalid value '%s' for '%s'" % (
+ value, optname))
+ if _bool(value):
+ argv.append(long_optname)
+ else:
+ argv.extend([long_optname, value])
+ return argv
diff --git a/lib/spack/external/nose/config.py b/lib/spack/external/nose/config.py
new file mode 100644
index 0000000000..125eb5579d
--- /dev/null
+++ b/lib/spack/external/nose/config.py
@@ -0,0 +1,661 @@
+import logging
+import optparse
+import os
+import re
+import sys
+import ConfigParser
+from optparse import OptionParser
+from nose.util import absdir, tolist
+from nose.plugins.manager import NoPlugins
+from warnings import warn, filterwarnings
+
+log = logging.getLogger(__name__)
+
+# not allowed in config files
+option_blacklist = ['help', 'verbose']
+
+config_files = [
+ # Linux users will prefer this
+ "~/.noserc",
+ # Windows users will prefer this
+ "~/nose.cfg"
+ ]
+
+# plaforms on which the exe check defaults to off
+# Windows and IronPython
+exe_allowed_platforms = ('win32', 'cli')
+
+filterwarnings("always", category=DeprecationWarning,
+ module=r'(.*\.)?nose\.config')
+
+class NoSuchOptionError(Exception):
+ def __init__(self, name):
+ Exception.__init__(self, name)
+ self.name = name
+
+
+class ConfigError(Exception):
+ pass
+
+
+class ConfiguredDefaultsOptionParser(object):
+ """
+ Handler for options from commandline and config files.
+ """
+ def __init__(self, parser, config_section, error=None, file_error=None):
+ self._parser = parser
+ self._config_section = config_section
+ if error is None:
+ error = self._parser.error
+ self._error = error
+ if file_error is None:
+ file_error = lambda msg, **kw: error(msg)
+ self._file_error = file_error
+
+ def _configTuples(self, cfg, filename):
+ config = []
+ if self._config_section in cfg.sections():
+ for name, value in cfg.items(self._config_section):
+ config.append((name, value, filename))
+ return config
+
+ def _readFromFilenames(self, filenames):
+ config = []
+ for filename in filenames:
+ cfg = ConfigParser.RawConfigParser()
+ try:
+ cfg.read(filename)
+ except ConfigParser.Error, exc:
+ raise ConfigError("Error reading config file %r: %s" %
+ (filename, str(exc)))
+ config.extend(self._configTuples(cfg, filename))
+ return config
+
+ def _readFromFileObject(self, fh):
+ cfg = ConfigParser.RawConfigParser()
+ try:
+ filename = fh.name
+ except AttributeError:
+ filename = '<???>'
+ try:
+ cfg.readfp(fh)
+ except ConfigParser.Error, exc:
+ raise ConfigError("Error reading config file %r: %s" %
+ (filename, str(exc)))
+ return self._configTuples(cfg, filename)
+
+ def _readConfiguration(self, config_files):
+ try:
+ config_files.readline
+ except AttributeError:
+ filename_or_filenames = config_files
+ if isinstance(filename_or_filenames, basestring):
+ filenames = [filename_or_filenames]
+ else:
+ filenames = filename_or_filenames
+ config = self._readFromFilenames(filenames)
+ else:
+ fh = config_files
+ config = self._readFromFileObject(fh)
+ return config
+
+ def _processConfigValue(self, name, value, values, parser):
+ opt_str = '--' + name
+ option = parser.get_option(opt_str)
+ if option is None:
+ raise NoSuchOptionError(name)
+ else:
+ option.process(opt_str, value, values, parser)
+
+ def _applyConfigurationToValues(self, parser, config, values):
+ for name, value, filename in config:
+ if name in option_blacklist:
+ continue
+ try:
+ self._processConfigValue(name, value, values, parser)
+ except NoSuchOptionError, exc:
+ self._file_error(
+ "Error reading config file %r: "
+ "no such option %r" % (filename, exc.name),
+ name=name, filename=filename)
+ except optparse.OptionValueError, exc:
+ msg = str(exc).replace('--' + name, repr(name), 1)
+ self._file_error("Error reading config file %r: "
+ "%s" % (filename, msg),
+ name=name, filename=filename)
+
+ def parseArgsAndConfigFiles(self, args, config_files):
+ values = self._parser.get_default_values()
+ try:
+ config = self._readConfiguration(config_files)
+ except ConfigError, exc:
+ self._error(str(exc))
+ else:
+ try:
+ self._applyConfigurationToValues(self._parser, config, values)
+ except ConfigError, exc:
+ self._error(str(exc))
+ return self._parser.parse_args(args, values)
+
+
+class Config(object):
+ """nose configuration.
+
+ Instances of Config are used throughout nose to configure
+ behavior, including plugin lists. Here are the default values for
+ all config keys::
+
+ self.env = env = kw.pop('env', {})
+ self.args = ()
+ self.testMatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
+ self.addPaths = not env.get('NOSE_NOPATH', False)
+ self.configSection = 'nosetests'
+ self.debug = env.get('NOSE_DEBUG')
+ self.debugLog = env.get('NOSE_DEBUG_LOG')
+ self.exclude = None
+ self.getTestCaseNamesCompat = False
+ self.includeExe = env.get('NOSE_INCLUDE_EXE',
+ sys.platform in exe_allowed_platforms)
+ self.ignoreFiles = (re.compile(r'^\.'),
+ re.compile(r'^_'),
+ re.compile(r'^setup\.py$')
+ )
+ self.include = None
+ self.loggingConfig = None
+ self.logStream = sys.stderr
+ self.options = NoOptions()
+ self.parser = None
+ self.plugins = NoPlugins()
+ self.srcDirs = ('lib', 'src')
+ self.runOnInit = True
+ self.stopOnError = env.get('NOSE_STOP', False)
+ self.stream = sys.stderr
+ self.testNames = ()
+ self.verbosity = int(env.get('NOSE_VERBOSE', 1))
+ self.where = ()
+ self.py3where = ()
+ self.workingDir = None
+ """
+
+ def __init__(self, **kw):
+ self.env = env = kw.pop('env', {})
+ self.args = ()
+ self.testMatchPat = env.get('NOSE_TESTMATCH',
+ r'(?:^|[\b_\.%s-])[Tt]est' % os.sep)
+ self.testMatch = re.compile(self.testMatchPat)
+ self.addPaths = not env.get('NOSE_NOPATH', False)
+ self.configSection = 'nosetests'
+ self.debug = env.get('NOSE_DEBUG')
+ self.debugLog = env.get('NOSE_DEBUG_LOG')
+ self.exclude = None
+ self.getTestCaseNamesCompat = False
+ self.includeExe = env.get('NOSE_INCLUDE_EXE',
+ sys.platform in exe_allowed_platforms)
+ self.ignoreFilesDefaultStrings = [r'^\.',
+ r'^_',
+ r'^setup\.py$',
+ ]
+ self.ignoreFiles = map(re.compile, self.ignoreFilesDefaultStrings)
+ self.include = None
+ self.loggingConfig = None
+ self.logStream = sys.stderr
+ self.options = NoOptions()
+ self.parser = None
+ self.plugins = NoPlugins()
+ self.srcDirs = ('lib', 'src')
+ self.runOnInit = True
+ self.stopOnError = env.get('NOSE_STOP', False)
+ self.stream = sys.stderr
+ self.testNames = []
+ self.verbosity = int(env.get('NOSE_VERBOSE', 1))
+ self.where = ()
+ self.py3where = ()
+ self.workingDir = os.getcwd()
+ self.traverseNamespace = False
+ self.firstPackageWins = False
+ self.parserClass = OptionParser
+ self.worker = False
+
+ self._default = self.__dict__.copy()
+ self.update(kw)
+ self._orig = self.__dict__.copy()
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ del state['stream']
+ del state['_orig']
+ del state['_default']
+ del state['env']
+ del state['logStream']
+ # FIXME remove plugins, have only plugin manager class
+ state['plugins'] = self.plugins.__class__
+ return state
+
+ def __setstate__(self, state):
+ plugincls = state.pop('plugins')
+ self.update(state)
+ self.worker = True
+ # FIXME won't work for static plugin lists
+ self.plugins = plugincls()
+ self.plugins.loadPlugins()
+ # needed so .can_configure gets set appropriately
+ dummy_parser = self.parserClass()
+ self.plugins.addOptions(dummy_parser, {})
+ self.plugins.configure(self.options, self)
+
+ def __repr__(self):
+ d = self.__dict__.copy()
+ # don't expose env, could include sensitive info
+ d['env'] = {}
+ keys = [ k for k in d.keys()
+ if not k.startswith('_') ]
+ keys.sort()
+ return "Config(%s)" % ', '.join([ '%s=%r' % (k, d[k])
+ for k in keys ])
+ __str__ = __repr__
+
+ def _parseArgs(self, argv, cfg_files):
+ def warn_sometimes(msg, name=None, filename=None):
+ if (hasattr(self.plugins, 'excludedOption') and
+ self.plugins.excludedOption(name)):
+ msg = ("Option %r in config file %r ignored: "
+ "excluded by runtime environment" %
+ (name, filename))
+ warn(msg, RuntimeWarning)
+ else:
+ raise ConfigError(msg)
+ parser = ConfiguredDefaultsOptionParser(
+ self.getParser(), self.configSection, file_error=warn_sometimes)
+ return parser.parseArgsAndConfigFiles(argv[1:], cfg_files)
+
+ def configure(self, argv=None, doc=None):
+ """Configure the nose running environment. Execute configure before
+ collecting tests with nose.TestCollector to enable output capture and
+ other features.
+ """
+ env = self.env
+ if argv is None:
+ argv = sys.argv
+
+ cfg_files = getattr(self, 'files', [])
+ options, args = self._parseArgs(argv, cfg_files)
+ # If -c --config has been specified on command line,
+ # load those config files and reparse
+ if getattr(options, 'files', []):
+ options, args = self._parseArgs(argv, options.files)
+
+ self.options = options
+ if args:
+ self.testNames = args
+ if options.testNames is not None:
+ self.testNames.extend(tolist(options.testNames))
+
+ if options.py3where is not None:
+ if sys.version_info >= (3,):
+ options.where = options.py3where
+
+ # `where` is an append action, so it can't have a default value
+ # in the parser, or that default will always be in the list
+ if not options.where:
+ options.where = env.get('NOSE_WHERE', None)
+
+ # include and exclude also
+ if not options.ignoreFiles:
+ options.ignoreFiles = env.get('NOSE_IGNORE_FILES', [])
+ if not options.include:
+ options.include = env.get('NOSE_INCLUDE', [])
+ if not options.exclude:
+ options.exclude = env.get('NOSE_EXCLUDE', [])
+
+ self.addPaths = options.addPaths
+ self.stopOnError = options.stopOnError
+ self.verbosity = options.verbosity
+ self.includeExe = options.includeExe
+ self.traverseNamespace = options.traverseNamespace
+ self.debug = options.debug
+ self.debugLog = options.debugLog
+ self.loggingConfig = options.loggingConfig
+ self.firstPackageWins = options.firstPackageWins
+ self.configureLogging()
+
+ if not options.byteCompile:
+ sys.dont_write_bytecode = True
+
+ if options.where is not None:
+ self.configureWhere(options.where)
+
+ if options.testMatch:
+ self.testMatch = re.compile(options.testMatch)
+
+ if options.ignoreFiles:
+ self.ignoreFiles = map(re.compile, tolist(options.ignoreFiles))
+ log.info("Ignoring files matching %s", options.ignoreFiles)
+ else:
+ log.info("Ignoring files matching %s", self.ignoreFilesDefaultStrings)
+
+ if options.include:
+ self.include = map(re.compile, tolist(options.include))
+ log.info("Including tests matching %s", options.include)
+
+ if options.exclude:
+ self.exclude = map(re.compile, tolist(options.exclude))
+ log.info("Excluding tests matching %s", options.exclude)
+
+ # When listing plugins we don't want to run them
+ if not options.showPlugins:
+ self.plugins.configure(options, self)
+ self.plugins.begin()
+
+ def configureLogging(self):
+ """Configure logging for nose, or optionally other packages. Any logger
+ name may be set with the debug option, and that logger will be set to
+ debug level and be assigned the same handler as the nose loggers, unless
+ it already has a handler.
+ """
+ if self.loggingConfig:
+ from logging.config import fileConfig
+ fileConfig(self.loggingConfig)
+ return
+
+ format = logging.Formatter('%(name)s: %(levelname)s: %(message)s')
+ if self.debugLog:
+ handler = logging.FileHandler(self.debugLog)
+ else:
+ handler = logging.StreamHandler(self.logStream)
+ handler.setFormatter(format)
+
+ logger = logging.getLogger('nose')
+ logger.propagate = 0
+
+ # only add our default handler if there isn't already one there
+ # this avoids annoying duplicate log messages.
+ found = False
+ if self.debugLog:
+ debugLogAbsPath = os.path.abspath(self.debugLog)
+ for h in logger.handlers:
+ if type(h) == logging.FileHandler and \
+ h.baseFilename == debugLogAbsPath:
+ found = True
+ else:
+ for h in logger.handlers:
+ if type(h) == logging.StreamHandler and \
+ h.stream == self.logStream:
+ found = True
+ if not found:
+ logger.addHandler(handler)
+
+ # default level
+ lvl = logging.WARNING
+ if self.verbosity >= 5:
+ lvl = 0
+ elif self.verbosity >= 4:
+ lvl = logging.DEBUG
+ elif self.verbosity >= 3:
+ lvl = logging.INFO
+ logger.setLevel(lvl)
+
+ # individual overrides
+ if self.debug:
+ # no blanks
+ debug_loggers = [ name for name in self.debug.split(',')
+ if name ]
+ for logger_name in debug_loggers:
+ l = logging.getLogger(logger_name)
+ l.setLevel(logging.DEBUG)
+ if not l.handlers and not logger_name.startswith('nose'):
+ l.addHandler(handler)
+
+ def configureWhere(self, where):
+ """Configure the working directory or directories for the test run.
+ """
+ from nose.importer import add_path
+ self.workingDir = None
+ where = tolist(where)
+ warned = False
+ for path in where:
+ if not self.workingDir:
+ abs_path = absdir(path)
+ if abs_path is None:
+ raise ValueError("Working directory '%s' not found, or "
+ "not a directory" % path)
+ log.info("Set working dir to %s", abs_path)
+ self.workingDir = abs_path
+ if self.addPaths and \
+ os.path.exists(os.path.join(abs_path, '__init__.py')):
+ log.info("Working directory %s is a package; "
+ "adding to sys.path" % abs_path)
+ add_path(abs_path)
+ continue
+ if not warned:
+ warn("Use of multiple -w arguments is deprecated and "
+ "support may be removed in a future release. You can "
+ "get the same behavior by passing directories without "
+ "the -w argument on the command line, or by using the "
+ "--tests argument in a configuration file.",
+ DeprecationWarning)
+ warned = True
+ self.testNames.append(path)
+
+ def default(self):
+ """Reset all config values to defaults.
+ """
+ self.__dict__.update(self._default)
+
+ def getParser(self, doc=None):
+ """Get the command line option parser.
+ """
+ if self.parser:
+ return self.parser
+ env = self.env
+ parser = self.parserClass(doc)
+ parser.add_option(
+ "-V","--version", action="store_true",
+ dest="version", default=False,
+ help="Output nose version and exit")
+ parser.add_option(
+ "-p", "--plugins", action="store_true",
+ dest="showPlugins", default=False,
+ help="Output list of available plugins and exit. Combine with "
+ "higher verbosity for greater detail")
+ parser.add_option(
+ "-v", "--verbose",
+ action="count", dest="verbosity",
+ default=self.verbosity,
+ help="Be more verbose. [NOSE_VERBOSE]")
+ parser.add_option(
+ "--verbosity", action="store", dest="verbosity",
+ metavar='VERBOSITY',
+ type="int", help="Set verbosity; --verbosity=2 is "
+ "the same as -v")
+ parser.add_option(
+ "-q", "--quiet", action="store_const", const=0, dest="verbosity",
+ help="Be less verbose")
+ parser.add_option(
+ "-c", "--config", action="append", dest="files",
+ metavar="FILES",
+ help="Load configuration from config file(s). May be specified "
+ "multiple times; in that case, all config files will be "
+ "loaded and combined")
+ parser.add_option(
+ "-w", "--where", action="append", dest="where",
+ metavar="WHERE",
+ help="Look for tests in this directory. "
+ "May be specified multiple times. The first directory passed "
+ "will be used as the working directory, in place of the current "
+ "working directory, which is the default. Others will be added "
+ "to the list of tests to execute. [NOSE_WHERE]"
+ )
+ parser.add_option(
+ "--py3where", action="append", dest="py3where",
+ metavar="PY3WHERE",
+ help="Look for tests in this directory under Python 3.x. "
+ "Functions the same as 'where', but only applies if running under "
+ "Python 3.x or above. Note that, if present under 3.x, this "
+ "option completely replaces any directories specified with "
+ "'where', so the 'where' option becomes ineffective. "
+ "[NOSE_PY3WHERE]"
+ )
+ parser.add_option(
+ "-m", "--match", "--testmatch", action="store",
+ dest="testMatch", metavar="REGEX",
+ help="Files, directories, function names, and class names "
+ "that match this regular expression are considered tests. "
+ "Default: %s [NOSE_TESTMATCH]" % self.testMatchPat,
+ default=self.testMatchPat)
+ parser.add_option(
+ "--tests", action="store", dest="testNames", default=None,
+ metavar='NAMES',
+ help="Run these tests (comma-separated list). This argument is "
+ "useful mainly from configuration files; on the command line, "
+ "just pass the tests to run as additional arguments with no "
+ "switch.")
+ parser.add_option(
+ "-l", "--debug", action="store",
+ dest="debug", default=self.debug,
+ help="Activate debug logging for one or more systems. "
+ "Available debug loggers: nose, nose.importer, "
+ "nose.inspector, nose.plugins, nose.result and "
+ "nose.selector. Separate multiple names with a comma.")
+ parser.add_option(
+ "--debug-log", dest="debugLog", action="store",
+ default=self.debugLog, metavar="FILE",
+ help="Log debug messages to this file "
+ "(default: sys.stderr)")
+ parser.add_option(
+ "--logging-config", "--log-config",
+ dest="loggingConfig", action="store",
+ default=self.loggingConfig, metavar="FILE",
+ help="Load logging config from this file -- bypasses all other"
+ " logging config settings.")
+ parser.add_option(
+ "-I", "--ignore-files", action="append", dest="ignoreFiles",
+ metavar="REGEX",
+ help="Completely ignore any file that matches this regular "
+ "expression. Takes precedence over any other settings or "
+ "plugins. "
+ "Specifying this option will replace the default setting. "
+ "Specify this option multiple times "
+ "to add more regular expressions [NOSE_IGNORE_FILES]")
+ parser.add_option(
+ "-e", "--exclude", action="append", dest="exclude",
+ metavar="REGEX",
+ help="Don't run tests that match regular "
+ "expression [NOSE_EXCLUDE]")
+ parser.add_option(
+ "-i", "--include", action="append", dest="include",
+ metavar="REGEX",
+ help="This regular expression will be applied to files, "
+ "directories, function names, and class names for a chance "
+ "to include additional tests that do not match TESTMATCH. "
+ "Specify this option multiple times "
+ "to add more regular expressions [NOSE_INCLUDE]")
+ parser.add_option(
+ "-x", "--stop", action="store_true", dest="stopOnError",
+ default=self.stopOnError,
+ help="Stop running tests after the first error or failure")
+ parser.add_option(
+ "-P", "--no-path-adjustment", action="store_false",
+ dest="addPaths",
+ default=self.addPaths,
+ help="Don't make any changes to sys.path when "
+ "loading tests [NOSE_NOPATH]")
+ parser.add_option(
+ "--exe", action="store_true", dest="includeExe",
+ default=self.includeExe,
+ help="Look for tests in python modules that are "
+ "executable. Normal behavior is to exclude executable "
+ "modules, since they may not be import-safe "
+ "[NOSE_INCLUDE_EXE]")
+ parser.add_option(
+ "--noexe", action="store_false", dest="includeExe",
+ help="DO NOT look for tests in python modules that are "
+ "executable. (The default on the windows platform is to "
+ "do so.)")
+ parser.add_option(
+ "--traverse-namespace", action="store_true",
+ default=self.traverseNamespace, dest="traverseNamespace",
+ help="Traverse through all path entries of a namespace package")
+ parser.add_option(
+ "--first-package-wins", "--first-pkg-wins", "--1st-pkg-wins",
+ action="store_true", default=False, dest="firstPackageWins",
+ help="nose's importer will normally evict a package from sys."
+ "modules if it sees a package with the same name in a different "
+ "location. Set this option to disable that behavior.")
+ parser.add_option(
+ "--no-byte-compile",
+ action="store_false", default=True, dest="byteCompile",
+ help="Prevent nose from byte-compiling the source into .pyc files "
+ "while nose is scanning for and running tests.")
+
+ self.plugins.loadPlugins()
+ self.pluginOpts(parser)
+
+ self.parser = parser
+ return parser
+
+ def help(self, doc=None):
+ """Return the generated help message
+ """
+ return self.getParser(doc).format_help()
+
+ def pluginOpts(self, parser):
+ self.plugins.addOptions(parser, self.env)
+
+ def reset(self):
+ self.__dict__.update(self._orig)
+
+ def todict(self):
+ return self.__dict__.copy()
+
+ def update(self, d):
+ self.__dict__.update(d)
+
+
+class NoOptions(object):
+ """Options container that returns None for all options.
+ """
+ def __getstate__(self):
+ return {}
+
+ def __setstate__(self, state):
+ pass
+
+ def __getnewargs__(self):
+ return ()
+
+ def __nonzero__(self):
+ return False
+
+
+def user_config_files():
+ """Return path to any existing user config files
+ """
+ return filter(os.path.exists,
+ map(os.path.expanduser, config_files))
+
+
+def all_config_files():
+ """Return path to any existing user config files, plus any setup.cfg
+ in the current working directory.
+ """
+ user = user_config_files()
+ if os.path.exists('setup.cfg'):
+ return user + ['setup.cfg']
+ return user
+
+
+# used when parsing config files
+def flag(val):
+ """Does the value look like an on/off flag?"""
+ if val == 1:
+ return True
+ elif val == 0:
+ return False
+ val = str(val)
+ if len(val) > 5:
+ return False
+ return val.upper() in ('1', '0', 'F', 'T', 'TRUE', 'FALSE', 'ON', 'OFF')
+
+
+def _bool(val):
+ return str(val).upper() in ('1', 'T', 'TRUE', 'ON')
diff --git a/lib/spack/external/nose/core.py b/lib/spack/external/nose/core.py
new file mode 100644
index 0000000000..49e7939b98
--- /dev/null
+++ b/lib/spack/external/nose/core.py
@@ -0,0 +1,341 @@
+"""Implements nose test program and collector.
+"""
+from __future__ import generators
+
+import logging
+import os
+import sys
+import time
+import unittest
+
+from nose.config import Config, all_config_files
+from nose.loader import defaultTestLoader
+from nose.plugins.manager import PluginManager, DefaultPluginManager, \
+ RestrictedPluginManager
+from nose.result import TextTestResult
+from nose.suite import FinalizingSuiteWrapper
+from nose.util import isclass, tolist
+
+
+log = logging.getLogger('nose.core')
+compat_24 = sys.version_info >= (2, 4)
+
+__all__ = ['TestProgram', 'main', 'run', 'run_exit', 'runmodule', 'collector',
+ 'TextTestRunner']
+
+
+class TextTestRunner(unittest.TextTestRunner):
+ """Test runner that uses nose's TextTestResult to enable errorClasses,
+ as well as providing hooks for plugins to override or replace the test
+ output stream, results, and the test case itself.
+ """
+ def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
+ config=None):
+ if config is None:
+ config = Config()
+ self.config = config
+ unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity)
+
+
+ def _makeResult(self):
+ return TextTestResult(self.stream,
+ self.descriptions,
+ self.verbosity,
+ self.config)
+
+ def run(self, test):
+ """Overrides to provide plugin hooks and defer all output to
+ the test result class.
+ """
+ wrapper = self.config.plugins.prepareTest(test)
+ if wrapper is not None:
+ test = wrapper
+
+ # plugins can decorate or capture the output stream
+ wrapped = self.config.plugins.setOutputStream(self.stream)
+ if wrapped is not None:
+ self.stream = wrapped
+
+ result = self._makeResult()
+ start = time.time()
+ try:
+ test(result)
+ except KeyboardInterrupt:
+ pass
+ stop = time.time()
+ result.printErrors()
+ result.printSummary(start, stop)
+ self.config.plugins.finalize(result)
+ return result
+
+
+class TestProgram(unittest.TestProgram):
+ """Collect and run tests, returning success or failure.
+
+ The arguments to TestProgram() are the same as to
+ :func:`main()` and :func:`run()`:
+
+ * module: All tests are in this module (default: None)
+ * defaultTest: Tests to load (default: '.')
+ * argv: Command line arguments (default: None; sys.argv is read)
+ * testRunner: Test runner instance (default: None)
+ * testLoader: Test loader instance (default: None)
+ * env: Environment; ignored if config is provided (default: None;
+ os.environ is read)
+ * config: :class:`nose.config.Config` instance (default: None)
+ * suite: Suite or list of tests to run (default: None). Passing a
+ suite or lists of tests will bypass all test discovery and
+ loading. *ALSO NOTE* that if you pass a unittest.TestSuite
+ instance as the suite, context fixtures at the class, module and
+ package level will not be used, and many plugin hooks will not
+ be called. If you want normal nose behavior, either pass a list
+ of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
+ * exit: Exit after running tests and printing report (default: True)
+ * plugins: List of plugins to use; ignored if config is provided
+ (default: load plugins with DefaultPluginManager)
+ * addplugins: List of **extra** plugins to use. Pass a list of plugin
+ instances in this argument to make custom plugins available while
+ still using the DefaultPluginManager.
+ """
+ verbosity = 1
+
+ def __init__(self, module=None, defaultTest='.', argv=None,
+ testRunner=None, testLoader=None, env=None, config=None,
+ suite=None, exit=True, plugins=None, addplugins=None):
+ if env is None:
+ env = os.environ
+ if config is None:
+ config = self.makeConfig(env, plugins)
+ if addplugins:
+ config.plugins.addPlugins(extraplugins=addplugins)
+ self.config = config
+ self.suite = suite
+ self.exit = exit
+ extra_args = {}
+ version = sys.version_info[0:2]
+ if version >= (2,7) and version != (3,0):
+ extra_args['exit'] = exit
+ unittest.TestProgram.__init__(
+ self, module=module, defaultTest=defaultTest,
+ argv=argv, testRunner=testRunner, testLoader=testLoader,
+ **extra_args)
+
+ def getAllConfigFiles(self, env=None):
+ env = env or {}
+ if env.get('NOSE_IGNORE_CONFIG_FILES', False):
+ return []
+ else:
+ return all_config_files()
+
+ def makeConfig(self, env, plugins=None):
+ """Load a Config, pre-filled with user config files if any are
+ found.
+ """
+ cfg_files = self.getAllConfigFiles(env)
+ if plugins:
+ manager = PluginManager(plugins=plugins)
+ else:
+ manager = DefaultPluginManager()
+ return Config(
+ env=env, files=cfg_files, plugins=manager)
+
+ def parseArgs(self, argv):
+ """Parse argv and env and configure running environment.
+ """
+ self.config.configure(argv, doc=self.usage())
+ log.debug("configured %s", self.config)
+
+ # quick outs: version, plugins (optparse would have already
+ # caught and exited on help)
+ if self.config.options.version:
+ from nose import __version__
+ sys.stdout = sys.__stdout__
+ print "%s version %s" % (os.path.basename(sys.argv[0]), __version__)
+ sys.exit(0)
+
+ if self.config.options.showPlugins:
+ self.showPlugins()
+ sys.exit(0)
+
+ if self.testLoader is None:
+ self.testLoader = defaultTestLoader(config=self.config)
+ elif isclass(self.testLoader):
+ self.testLoader = self.testLoader(config=self.config)
+ plug_loader = self.config.plugins.prepareTestLoader(self.testLoader)
+ if plug_loader is not None:
+ self.testLoader = plug_loader
+ log.debug("test loader is %s", self.testLoader)
+
+ # FIXME if self.module is a string, add it to self.testNames? not sure
+
+ if self.config.testNames:
+ self.testNames = self.config.testNames
+ else:
+ self.testNames = tolist(self.defaultTest)
+ log.debug('defaultTest %s', self.defaultTest)
+ log.debug('Test names are %s', self.testNames)
+ if self.config.workingDir is not None:
+ os.chdir(self.config.workingDir)
+ self.createTests()
+
+ def createTests(self):
+ """Create the tests to run. If a self.suite
+ is set, then that suite will be used. Otherwise, tests will be
+ loaded from the given test names (self.testNames) using the
+ test loader.
+ """
+ log.debug("createTests called with %s", self.suite)
+ if self.suite is not None:
+ # We were given an explicit suite to run. Make sure it's
+ # loaded and wrapped correctly.
+ self.test = self.testLoader.suiteClass(self.suite)
+ else:
+ self.test = self.testLoader.loadTestsFromNames(self.testNames)
+
+ def runTests(self):
+ """Run Tests. Returns true on success, false on failure, and sets
+ self.success to the same value.
+ """
+ log.debug("runTests called")
+ if self.testRunner is None:
+ self.testRunner = TextTestRunner(stream=self.config.stream,
+ verbosity=self.config.verbosity,
+ config=self.config)
+ plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
+ if plug_runner is not None:
+ self.testRunner = plug_runner
+ result = self.testRunner.run(self.test)
+ self.success = result.wasSuccessful()
+ if self.exit:
+ sys.exit(not self.success)
+ return self.success
+
+ def showPlugins(self):
+ """Print list of available plugins.
+ """
+ import textwrap
+
+ class DummyParser:
+ def __init__(self):
+ self.options = []
+ def add_option(self, *arg, **kw):
+ self.options.append((arg, kw.pop('help', '')))
+
+ v = self.config.verbosity
+ self.config.plugins.sort()
+ for p in self.config.plugins:
+ print "Plugin %s" % p.name
+ if v >= 2:
+ print " score: %s" % p.score
+ print '\n'.join(textwrap.wrap(p.help().strip(),
+ initial_indent=' ',
+ subsequent_indent=' '))
+ if v >= 3:
+ parser = DummyParser()
+ p.addOptions(parser)
+ if len(parser.options):
+ print
+ print " Options:"
+ for opts, help in parser.options:
+ print ' %s' % (', '.join(opts))
+ if help:
+ print '\n'.join(
+ textwrap.wrap(help.strip(),
+ initial_indent=' ',
+ subsequent_indent=' '))
+ print
+
+ def usage(cls):
+ import nose
+ try:
+ ld = nose.__loader__
+ text = ld.get_data(os.path.join(
+ os.path.dirname(__file__), 'usage.txt'))
+ except AttributeError:
+ f = open(os.path.join(
+ os.path.dirname(__file__), 'usage.txt'), 'r')
+ try:
+ text = f.read()
+ finally:
+ f.close()
+ # Ensure that we return str, not bytes.
+ if not isinstance(text, str):
+ text = text.decode('utf-8')
+ return text
+ usage = classmethod(usage)
+
+# backwards compatibility
+run_exit = main = TestProgram
+
+
+def run(*arg, **kw):
+ """Collect and run tests, returning success or failure.
+
+ The arguments to `run()` are the same as to `main()`:
+
+ * module: All tests are in this module (default: None)
+ * defaultTest: Tests to load (default: '.')
+ * argv: Command line arguments (default: None; sys.argv is read)
+ * testRunner: Test runner instance (default: None)
+ * testLoader: Test loader instance (default: None)
+ * env: Environment; ignored if config is provided (default: None;
+ os.environ is read)
+ * config: :class:`nose.config.Config` instance (default: None)
+ * suite: Suite or list of tests to run (default: None). Passing a
+ suite or lists of tests will bypass all test discovery and
+ loading. *ALSO NOTE* that if you pass a unittest.TestSuite
+ instance as the suite, context fixtures at the class, module and
+ package level will not be used, and many plugin hooks will not
+ be called. If you want normal nose behavior, either pass a list
+ of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
+ * plugins: List of plugins to use; ignored if config is provided
+ (default: load plugins with DefaultPluginManager)
+ * addplugins: List of **extra** plugins to use. Pass a list of plugin
+ instances in this argument to make custom plugins available while
+ still using the DefaultPluginManager.
+
+ With the exception that the ``exit`` argument is always set
+ to False.
+ """
+ kw['exit'] = False
+ return TestProgram(*arg, **kw).success
+
+
+def runmodule(name='__main__', **kw):
+ """Collect and run tests in a single module only. Defaults to running
+ tests in __main__. Additional arguments to TestProgram may be passed
+ as keyword arguments.
+ """
+ main(defaultTest=name, **kw)
+
+
+def collector():
+ """TestSuite replacement entry point. Use anywhere you might use a
+ unittest.TestSuite. The collector will, by default, load options from
+ all config files and execute loader.loadTestsFromNames() on the
+ configured testNames, or '.' if no testNames are configured.
+ """
+ # plugins that implement any of these methods are disabled, since
+ # we don't control the test runner and won't be able to run them
+ # finalize() is also not called, but plugins that use it aren't disabled,
+ # because capture needs it.
+ setuptools_incompat = ('report', 'prepareTest',
+ 'prepareTestLoader', 'prepareTestRunner',
+ 'setOutputStream')
+
+ plugins = RestrictedPluginManager(exclude=setuptools_incompat)
+ conf = Config(files=all_config_files(),
+ plugins=plugins)
+ conf.configure(argv=['collector'])
+ loader = defaultTestLoader(conf)
+
+ if conf.testNames:
+ suite = loader.loadTestsFromNames(conf.testNames)
+ else:
+ suite = loader.loadTestsFromNames(('.',))
+ return FinalizingSuiteWrapper(suite, plugins.finalize)
+
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/spack/external/nose/exc.py b/lib/spack/external/nose/exc.py
new file mode 100644
index 0000000000..8b780db0d4
--- /dev/null
+++ b/lib/spack/external/nose/exc.py
@@ -0,0 +1,9 @@
+"""Exceptions for marking tests as skipped or deprecated.
+
+This module exists to provide backwards compatibility with previous
+versions of nose where skipped and deprecated tests were core
+functionality, rather than being provided by plugins. It may be
+removed in a future release.
+"""
+from nose.plugins.skip import SkipTest
+from nose.plugins.deprecated import DeprecatedTest
diff --git a/lib/spack/external/nose/ext/__init__.py b/lib/spack/external/nose/ext/__init__.py
new file mode 100644
index 0000000000..5fd1516a09
--- /dev/null
+++ b/lib/spack/external/nose/ext/__init__.py
@@ -0,0 +1,3 @@
+"""
+External or vendor files
+"""
diff --git a/lib/spack/external/nose/ext/dtcompat.py b/lib/spack/external/nose/ext/dtcompat.py
new file mode 100644
index 0000000000..332cf08c12
--- /dev/null
+++ b/lib/spack/external/nose/ext/dtcompat.py
@@ -0,0 +1,2272 @@
+# Module doctest.
+# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
+# Major enhancements and refactoring by:
+# Jim Fulton
+# Edward Loper
+
+# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
+#
+# Modified for inclusion in nose to provide support for DocFileTest in
+# python 2.3:
+#
+# - all doctests removed from module (they fail under 2.3 and 2.5)
+# - now handles the $py.class extension when ran under Jython
+
+r"""Module doctest -- a framework for running examples in docstrings.
+
+In simplest use, end each module M to be tested with:
+
+def _test():
+ import doctest
+ doctest.testmod()
+
+if __name__ == "__main__":
+ _test()
+
+Then running the module as a script will cause the examples in the
+docstrings to get executed and verified:
+
+python M.py
+
+This won't display anything unless an example fails, in which case the
+failing example(s) and the cause(s) of the failure(s) are printed to stdout
+(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
+line of output is "Test failed.".
+
+Run it with the -v switch instead:
+
+python M.py -v
+
+and a detailed report of all examples tried is printed to stdout, along
+with assorted summaries at the end.
+
+You can force verbose mode by passing "verbose=True" to testmod, or prohibit
+it by passing "verbose=False". In either of those cases, sys.argv is not
+examined by testmod.
+
+There are a variety of other ways to run doctests, including integration
+with the unittest framework, and support for running non-Python text
+files containing doctests. There are also many ways to override parts
+of doctest's default behaviors. See the Library Reference Manual for
+details.
+"""
+
+__docformat__ = 'reStructuredText en'
+
+__all__ = [
+ # 0, Option Flags
+ 'register_optionflag',
+ 'DONT_ACCEPT_TRUE_FOR_1',
+ 'DONT_ACCEPT_BLANKLINE',
+ 'NORMALIZE_WHITESPACE',
+ 'ELLIPSIS',
+ 'IGNORE_EXCEPTION_DETAIL',
+ 'COMPARISON_FLAGS',
+ 'REPORT_UDIFF',
+ 'REPORT_CDIFF',
+ 'REPORT_NDIFF',
+ 'REPORT_ONLY_FIRST_FAILURE',
+ 'REPORTING_FLAGS',
+ # 1. Utility Functions
+ 'is_private',
+ # 2. Example & DocTest
+ 'Example',
+ 'DocTest',
+ # 3. Doctest Parser
+ 'DocTestParser',
+ # 4. Doctest Finder
+ 'DocTestFinder',
+ # 5. Doctest Runner
+ 'DocTestRunner',
+ 'OutputChecker',
+ 'DocTestFailure',
+ 'UnexpectedException',
+ 'DebugRunner',
+ # 6. Test Functions
+ 'testmod',
+ 'testfile',
+ 'run_docstring_examples',
+ # 7. Tester
+ 'Tester',
+ # 8. Unittest Support
+ 'DocTestSuite',
+ 'DocFileSuite',
+ 'set_unittest_reportflags',
+ # 9. Debugging Support
+ 'script_from_examples',
+ 'testsource',
+ 'debug_src',
+ 'debug',
+]
+
+import __future__
+
+import sys, traceback, inspect, linecache, os, re
+import unittest, difflib, pdb, tempfile
+import warnings
+from StringIO import StringIO
+
+# Don't whine about the deprecated is_private function in this
+# module's tests.
+warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
+ __name__, 0)
+
+# There are 4 basic classes:
+# - Example: a <source, want> pair, plus an intra-docstring line number.
+# - DocTest: a collection of examples, parsed from a docstring, plus
+# info about where the docstring came from (name, filename, lineno).
+# - DocTestFinder: extracts DocTests from a given object's docstring and
+# its contained objects' docstrings.
+# - DocTestRunner: runs DocTest cases, and accumulates statistics.
+#
+# So the basic picture is:
+#
+# list of:
+# +------+ +---------+ +-------+
+# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
+# +------+ +---------+ +-------+
+# | Example |
+# | ... |
+# | Example |
+# +---------+
+
+# Option constants.
+
+OPTIONFLAGS_BY_NAME = {}
+def register_optionflag(name):
+ # Create a new flag unless `name` is already known.
+ return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
+
+DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
+DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
+NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
+ELLIPSIS = register_optionflag('ELLIPSIS')
+IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
+
+COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
+ DONT_ACCEPT_BLANKLINE |
+ NORMALIZE_WHITESPACE |
+ ELLIPSIS |
+ IGNORE_EXCEPTION_DETAIL)
+
+REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
+REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
+REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
+REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
+
+REPORTING_FLAGS = (REPORT_UDIFF |
+ REPORT_CDIFF |
+ REPORT_NDIFF |
+ REPORT_ONLY_FIRST_FAILURE)
+
+# Special string markers for use in `want` strings:
+BLANKLINE_MARKER = '<BLANKLINE>'
+ELLIPSIS_MARKER = '...'
+
+######################################################################
+## Table of Contents
+######################################################################
+# 1. Utility Functions
+# 2. Example & DocTest -- store test cases
+# 3. DocTest Parser -- extracts examples from strings
+# 4. DocTest Finder -- extracts test cases from objects
+# 5. DocTest Runner -- runs test cases
+# 6. Test Functions -- convenient wrappers for testing
+# 7. Tester Class -- for backwards compatibility
+# 8. Unittest Support
+# 9. Debugging Support
+# 10. Example Usage
+
+######################################################################
+## 1. Utility Functions
+######################################################################
+
+def is_private(prefix, base):
+ """prefix, base -> true iff name prefix + "." + base is "private".
+
+ Prefix may be an empty string, and base does not contain a period.
+ Prefix is ignored (although functions you write conforming to this
+ protocol may make use of it).
+ Return true iff base begins with an (at least one) underscore, but
+ does not both begin and end with (at least) two underscores.
+ """
+ warnings.warn("is_private is deprecated; it wasn't useful; "
+ "examine DocTestFinder.find() lists instead",
+ DeprecationWarning, stacklevel=2)
+ return base[:1] == "_" and not base[:2] == "__" == base[-2:]
+
+def _extract_future_flags(globs):
+ """
+ Return the compiler-flags associated with the future features that
+ have been imported into the given namespace (globs).
+ """
+ flags = 0
+ for fname in __future__.all_feature_names:
+ feature = globs.get(fname, None)
+ if feature is getattr(__future__, fname):
+ flags |= feature.compiler_flag
+ return flags
+
+def _normalize_module(module, depth=2):
+ """
+ Return the module specified by `module`. In particular:
+ - If `module` is a module, then return module.
+ - If `module` is a string, then import and return the
+ module with that name.
+ - If `module` is None, then return the calling module.
+ The calling module is assumed to be the module of
+ the stack frame at the given depth in the call stack.
+ """
+ if inspect.ismodule(module):
+ return module
+ elif isinstance(module, (str, unicode)):
+ return __import__(module, globals(), locals(), ["*"])
+ elif module is None:
+ return sys.modules[sys._getframe(depth).f_globals['__name__']]
+ else:
+ raise TypeError("Expected a module, string, or None")
+
+def _indent(s, indent=4):
+ """
+ Add the given number of space characters to the beginning every
+ non-blank line in `s`, and return the result.
+ """
+ # This regexp matches the start of non-blank lines:
+ return re.sub('(?m)^(?!$)', indent*' ', s)
+
+def _exception_traceback(exc_info):
+ """
+ Return a string containing a traceback message for the given
+ exc_info tuple (as returned by sys.exc_info()).
+ """
+ # Get a traceback message.
+ excout = StringIO()
+ exc_type, exc_val, exc_tb = exc_info
+ traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
+ return excout.getvalue()
+
+# Override some StringIO methods.
+class _SpoofOut(StringIO):
+ def getvalue(self):
+ result = StringIO.getvalue(self)
+ # If anything at all was written, make sure there's a trailing
+ # newline. There's no way for the expected output to indicate
+ # that a trailing newline is missing.
+ if result and not result.endswith("\n"):
+ result += "\n"
+ # Prevent softspace from screwing up the next test case, in
+ # case they used print with a trailing comma in an example.
+ if hasattr(self, "softspace"):
+ del self.softspace
+ return result
+
+ def truncate(self, size=None):
+ StringIO.truncate(self, size)
+ if hasattr(self, "softspace"):
+ del self.softspace
+
+# Worst-case linear-time ellipsis matching.
+def _ellipsis_match(want, got):
+ if ELLIPSIS_MARKER not in want:
+ return want == got
+
+ # Find "the real" strings.
+ ws = want.split(ELLIPSIS_MARKER)
+ assert len(ws) >= 2
+
+ # Deal with exact matches possibly needed at one or both ends.
+ startpos, endpos = 0, len(got)
+ w = ws[0]
+ if w: # starts with exact match
+ if got.startswith(w):
+ startpos = len(w)
+ del ws[0]
+ else:
+ return False
+ w = ws[-1]
+ if w: # ends with exact match
+ if got.endswith(w):
+ endpos -= len(w)
+ del ws[-1]
+ else:
+ return False
+
+ if startpos > endpos:
+ # Exact end matches required more characters than we have, as in
+ # _ellipsis_match('aa...aa', 'aaa')
+ return False
+
+ # For the rest, we only need to find the leftmost non-overlapping
+ # match for each piece. If there's no overall match that way alone,
+ # there's no overall match period.
+ for w in ws:
+ # w may be '' at times, if there are consecutive ellipses, or
+ # due to an ellipsis at the start or end of `want`. That's OK.
+ # Search for an empty string succeeds, and doesn't change startpos.
+ startpos = got.find(w, startpos, endpos)
+ if startpos < 0:
+ return False
+ startpos += len(w)
+
+ return True
+
+def _comment_line(line):
+ "Return a commented form of the given line"
+ line = line.rstrip()
+ if line:
+ return '# '+line
+ else:
+ return '#'
+
+class _OutputRedirectingPdb(pdb.Pdb):
+ """
+ A specialized version of the python debugger that redirects stdout
+ to a given stream when interacting with the user. Stdout is *not*
+ redirected when traced code is executed.
+ """
+ def __init__(self, out):
+ self.__out = out
+ pdb.Pdb.__init__(self)
+
+ def trace_dispatch(self, *args):
+ # Redirect stdout to the given stream.
+ save_stdout = sys.stdout
+ sys.stdout = self.__out
+ # Call Pdb's trace dispatch method.
+ try:
+ return pdb.Pdb.trace_dispatch(self, *args)
+ finally:
+ sys.stdout = save_stdout
+
+# [XX] Normalize with respect to os.path.pardir?
+def _module_relative_path(module, path):
+ if not inspect.ismodule(module):
+ raise TypeError, 'Expected a module: %r' % module
+ if path.startswith('/'):
+ raise ValueError, 'Module-relative files may not have absolute paths'
+
+ # Find the base directory for the path.
+ if hasattr(module, '__file__'):
+ # A normal module/package
+ basedir = os.path.split(module.__file__)[0]
+ elif module.__name__ == '__main__':
+ # An interactive session.
+ if len(sys.argv)>0 and sys.argv[0] != '':
+ basedir = os.path.split(sys.argv[0])[0]
+ else:
+ basedir = os.curdir
+ else:
+ # A module w/o __file__ (this includes builtins)
+ raise ValueError("Can't resolve paths relative to the module " +
+ module + " (it has no __file__)")
+
+ # Combine the base directory and the path.
+ return os.path.join(basedir, *(path.split('/')))
+
+######################################################################
+## 2. Example & DocTest
+######################################################################
+## - An "example" is a <source, want> pair, where "source" is a
+## fragment of source code, and "want" is the expected output for
+## "source." The Example class also includes information about
+## where the example was extracted from.
+##
+## - A "doctest" is a collection of examples, typically extracted from
+## a string (such as an object's docstring). The DocTest class also
+## includes information about where the string was extracted from.
+
+class Example:
+ """
+ A single doctest example, consisting of source code and expected
+ output. `Example` defines the following attributes:
+
+ - source: A single Python statement, always ending with a newline.
+ The constructor adds a newline if needed.
+
+ - want: The expected output from running the source code (either
+ from stdout, or a traceback in case of exception). `want` ends
+ with a newline unless it's empty, in which case it's an empty
+ string. The constructor adds a newline if needed.
+
+ - exc_msg: The exception message generated by the example, if
+ the example is expected to generate an exception; or `None` if
+ it is not expected to generate an exception. This exception
+ message is compared against the return value of
+ `traceback.format_exception_only()`. `exc_msg` ends with a
+ newline unless it's `None`. The constructor adds a newline
+ if needed.
+
+ - lineno: The line number within the DocTest string containing
+ this Example where the Example begins. This line number is
+ zero-based, with respect to the beginning of the DocTest.
+
+ - indent: The example's indentation in the DocTest string.
+ I.e., the number of space characters that preceed the
+ example's first prompt.
+
+ - options: A dictionary mapping from option flags to True or
+ False, which is used to override default options for this
+ example. Any option flags not contained in this dictionary
+ are left at their default value (as specified by the
+ DocTestRunner's optionflags). By default, no options are set.
+ """
+ def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
+ options=None):
+ # Normalize inputs.
+ if not source.endswith('\n'):
+ source += '\n'
+ if want and not want.endswith('\n'):
+ want += '\n'
+ if exc_msg is not None and not exc_msg.endswith('\n'):
+ exc_msg += '\n'
+ # Store properties.
+ self.source = source
+ self.want = want
+ self.lineno = lineno
+ self.indent = indent
+ if options is None: options = {}
+ self.options = options
+ self.exc_msg = exc_msg
+
+class DocTest:
+ """
+ A collection of doctest examples that should be run in a single
+ namespace. Each `DocTest` defines the following attributes:
+
+ - examples: the list of examples.
+
+ - globs: The namespace (aka globals) that the examples should
+ be run in.
+
+ - name: A name identifying the DocTest (typically, the name of
+ the object whose docstring this DocTest was extracted from).
+
+ - filename: The name of the file that this DocTest was extracted
+ from, or `None` if the filename is unknown.
+
+ - lineno: The line number within filename where this DocTest
+ begins, or `None` if the line number is unavailable. This
+ line number is zero-based, with respect to the beginning of
+ the file.
+
+ - docstring: The string that the examples were extracted from,
+ or `None` if the string is unavailable.
+ """
+ def __init__(self, examples, globs, name, filename, lineno, docstring):
+ """
+ Create a new DocTest containing the given examples. The
+ DocTest's globals are initialized with a copy of `globs`.
+ """
+ assert not isinstance(examples, basestring), \
+ "DocTest no longer accepts str; use DocTestParser instead"
+ self.examples = examples
+ self.docstring = docstring
+ self.globs = globs.copy()
+ self.name = name
+ self.filename = filename
+ self.lineno = lineno
+
+ def __repr__(self):
+ if len(self.examples) == 0:
+ examples = 'no examples'
+ elif len(self.examples) == 1:
+ examples = '1 example'
+ else:
+ examples = '%d examples' % len(self.examples)
+ return ('<DocTest %s from %s:%s (%s)>' %
+ (self.name, self.filename, self.lineno, examples))
+
+
+ # This lets us sort tests by name:
+ def __cmp__(self, other):
+ if not isinstance(other, DocTest):
+ return -1
+ return cmp((self.name, self.filename, self.lineno, id(self)),
+ (other.name, other.filename, other.lineno, id(other)))
+
+######################################################################
+## 3. DocTestParser
+######################################################################
+
+class DocTestParser:
+ """
+ A class used to parse strings containing doctest examples.
+ """
+ # This regular expression is used to find doctest examples in a
+ # string. It defines three groups: `source` is the source code
+ # (including leading indentation and prompts); `indent` is the
+ # indentation of the first (PS1) line of the source code; and
+ # `want` is the expected output (including leading indentation).
+ _EXAMPLE_RE = re.compile(r'''
+ # Source consists of a PS1 line followed by zero or more PS2 lines.
+ (?P<source>
+ (?:^(?P<indent> [ ]*) >>> .*) # PS1 line
+ (?:\n [ ]* \.\.\. .*)*) # PS2 lines
+ \n?
+ # Want consists of any non-blank lines that do not start with PS1.
+ (?P<want> (?:(?![ ]*$) # Not a blank line
+ (?![ ]*>>>) # Not a line starting with PS1
+ .*$\n? # But any other line
+ )*)
+ ''', re.MULTILINE | re.VERBOSE)
+
+ # A regular expression for handling `want` strings that contain
+ # expected exceptions. It divides `want` into three pieces:
+ # - the traceback header line (`hdr`)
+ # - the traceback stack (`stack`)
+ # - the exception message (`msg`), as generated by
+ # traceback.format_exception_only()
+ # `msg` may have multiple lines. We assume/require that the
+ # exception message is the first non-indented line starting with a word
+ # character following the traceback header line.
+ _EXCEPTION_RE = re.compile(r"""
+ # Grab the traceback header. Different versions of Python have
+ # said different things on the first traceback line.
+ ^(?P<hdr> Traceback\ \(
+ (?: most\ recent\ call\ last
+ | innermost\ last
+ ) \) :
+ )
+ \s* $ # toss trailing whitespace on the header.
+ (?P<stack> .*?) # don't blink: absorb stuff until...
+ ^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
+ """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+
+ # A callable returning a true value iff its argument is a blank line
+ # or contains a single comment.
+ _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
+
+ def parse(self, string, name='<string>'):
+ """
+ Divide the given string into examples and intervening text,
+ and return them as a list of alternating Examples and strings.
+ Line numbers for the Examples are 0-based. The optional
+ argument `name` is a name identifying this string, and is only
+ used for error messages.
+ """
+ string = string.expandtabs()
+ # If all lines begin with the same indentation, then strip it.
+ min_indent = self._min_indent(string)
+ if min_indent > 0:
+ string = '\n'.join([l[min_indent:] for l in string.split('\n')])
+
+ output = []
+ charno, lineno = 0, 0
+ # Find all doctest examples in the string:
+ for m in self._EXAMPLE_RE.finditer(string):
+ # Add the pre-example text to `output`.
+ output.append(string[charno:m.start()])
+ # Update lineno (lines before this example)
+ lineno += string.count('\n', charno, m.start())
+ # Extract info from the regexp match.
+ (source, options, want, exc_msg) = \
+ self._parse_example(m, name, lineno)
+ # Create an Example, and add it to the list.
+ if not self._IS_BLANK_OR_COMMENT(source):
+ output.append( Example(source, want, exc_msg,
+ lineno=lineno,
+ indent=min_indent+len(m.group('indent')),
+ options=options) )
+ # Update lineno (lines inside this example)
+ lineno += string.count('\n', m.start(), m.end())
+ # Update charno.
+ charno = m.end()
+ # Add any remaining post-example text to `output`.
+ output.append(string[charno:])
+ return output
+
+ def get_doctest(self, string, globs, name, filename, lineno):
+ """
+ Extract all doctest examples from the given string, and
+ collect them into a `DocTest` object.
+
+ `globs`, `name`, `filename`, and `lineno` are attributes for
+ the new `DocTest` object. See the documentation for `DocTest`
+ for more information.
+ """
+ return DocTest(self.get_examples(string, name), globs,
+ name, filename, lineno, string)
+
+ def get_examples(self, string, name='<string>'):
+ """
+ Extract all doctest examples from the given string, and return
+ them as a list of `Example` objects. Line numbers are
+ 0-based, because it's most common in doctests that nothing
+ interesting appears on the same line as opening triple-quote,
+ and so the first interesting line is called \"line 1\" then.
+
+ The optional argument `name` is a name identifying this
+ string, and is only used for error messages.
+ """
+ return [x for x in self.parse(string, name)
+ if isinstance(x, Example)]
+
+ def _parse_example(self, m, name, lineno):
+ """
+ Given a regular expression match from `_EXAMPLE_RE` (`m`),
+ return a pair `(source, want)`, where `source` is the matched
+ example's source code (with prompts and indentation stripped);
+ and `want` is the example's expected output (with indentation
+ stripped).
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ # Get the example's indentation level.
+ indent = len(m.group('indent'))
+
+ # Divide source into lines; check that they're properly
+ # indented; and then strip their indentation & prompts.
+ source_lines = m.group('source').split('\n')
+ self._check_prompt_blank(source_lines, indent, name, lineno)
+ self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
+ source = '\n'.join([sl[indent+4:] for sl in source_lines])
+
+ # Divide want into lines; check that it's properly indented; and
+ # then strip the indentation. Spaces before the last newline should
+ # be preserved, so plain rstrip() isn't good enough.
+ want = m.group('want')
+ want_lines = want.split('\n')
+ if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
+ del want_lines[-1] # forget final newline & spaces after it
+ self._check_prefix(want_lines, ' '*indent, name,
+ lineno + len(source_lines))
+ want = '\n'.join([wl[indent:] for wl in want_lines])
+
+ # If `want` contains a traceback message, then extract it.
+ m = self._EXCEPTION_RE.match(want)
+ if m:
+ exc_msg = m.group('msg')
+ else:
+ exc_msg = None
+
+ # Extract options from the source.
+ options = self._find_options(source, name, lineno)
+
+ return source, options, want, exc_msg
+
+ # This regular expression looks for option directives in the
+ # source code of an example. Option directives are comments
+ # starting with "doctest:". Warning: this may give false
+ # positives for string-literals that contain the string
+ # "#doctest:". Eliminating these false positives would require
+ # actually parsing the string; but we limit them by ignoring any
+ # line containing "#doctest:" that is *followed* by a quote mark.
+ _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
+ re.MULTILINE)
+
+ def _find_options(self, source, name, lineno):
+ """
+ Return a dictionary containing option overrides extracted from
+ option directives in the given source string.
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ options = {}
+ # (note: with the current regexp, this will match at most once:)
+ for m in self._OPTION_DIRECTIVE_RE.finditer(source):
+ option_strings = m.group(1).replace(',', ' ').split()
+ for option in option_strings:
+ if (option[0] not in '+-' or
+ option[1:] not in OPTIONFLAGS_BY_NAME):
+ raise ValueError('line %r of the doctest for %s '
+ 'has an invalid option: %r' %
+ (lineno+1, name, option))
+ flag = OPTIONFLAGS_BY_NAME[option[1:]]
+ options[flag] = (option[0] == '+')
+ if options and self._IS_BLANK_OR_COMMENT(source):
+ raise ValueError('line %r of the doctest for %s has an option '
+ 'directive on a line with no example: %r' %
+ (lineno, name, source))
+ return options
+
+ # This regular expression finds the indentation of every non-blank
+ # line in a string.
+ _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
+
+ def _min_indent(self, s):
+ "Return the minimum indentation of any non-blank line in `s`"
+ indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
+ if len(indents) > 0:
+ return min(indents)
+ else:
+ return 0
+
+ def _check_prompt_blank(self, lines, indent, name, lineno):
+ """
+ Given the lines of a source string (including prompts and
+ leading indentation), check to make sure that every prompt is
+ followed by a space character. If any line is not followed by
+ a space character, then raise ValueError.
+ """
+ for i, line in enumerate(lines):
+ if len(line) >= indent+4 and line[indent+3] != ' ':
+ raise ValueError('line %r of the docstring for %s '
+ 'lacks blank after %s: %r' %
+ (lineno+i+1, name,
+ line[indent:indent+3], line))
+
+ def _check_prefix(self, lines, prefix, name, lineno):
+ """
+ Check that every line in the given list starts with the given
+ prefix; if any line does not, then raise a ValueError.
+ """
+ for i, line in enumerate(lines):
+ if line and not line.startswith(prefix):
+ raise ValueError('line %r of the docstring for %s has '
+ 'inconsistent leading whitespace: %r' %
+ (lineno+i+1, name, line))
+
+
+######################################################################
+## 4. DocTest Finder
+######################################################################
+
+class DocTestFinder:
+ """
+ A class used to extract the DocTests that are relevant to a given
+ object, from its docstring and the docstrings of its contained
+ objects. Doctests can currently be extracted from the following
+ object types: modules, functions, classes, methods, staticmethods,
+ classmethods, and properties.
+ """
+
+ def __init__(self, verbose=False, parser=DocTestParser(),
+ recurse=True, _namefilter=None, exclude_empty=True):
+ """
+ Create a new doctest finder.
+
+ The optional argument `parser` specifies a class or
+ function that should be used to create new DocTest objects (or
+ objects that implement the same interface as DocTest). The
+ signature for this factory function should match the signature
+ of the DocTest constructor.
+
+ If the optional argument `recurse` is false, then `find` will
+ only examine the given object, and not any contained objects.
+
+ If the optional argument `exclude_empty` is false, then `find`
+ will include tests for objects with empty docstrings.
+ """
+ self._parser = parser
+ self._verbose = verbose
+ self._recurse = recurse
+ self._exclude_empty = exclude_empty
+ # _namefilter is undocumented, and exists only for temporary backward-
+ # compatibility support of testmod's deprecated isprivate mess.
+ self._namefilter = _namefilter
+
+ def find(self, obj, name=None, module=None, globs=None,
+ extraglobs=None):
+ """
+ Return a list of the DocTests that are defined by the given
+ object's docstring, or by any of its contained objects'
+ docstrings.
+
+ The optional parameter `module` is the module that contains
+ the given object. If the module is not specified or is None, then
+ the test finder will attempt to automatically determine the
+ correct module. The object's module is used:
+
+ - As a default namespace, if `globs` is not specified.
+ - To prevent the DocTestFinder from extracting DocTests
+ from objects that are imported from other modules.
+ - To find the name of the file containing the object.
+ - To help find the line number of the object within its
+ file.
+
+ Contained objects whose module does not match `module` are ignored.
+
+ If `module` is False, no attempt to find the module will be made.
+ This is obscure, of use mostly in tests: if `module` is False, or
+ is None but cannot be found automatically, then all objects are
+ considered to belong to the (non-existent) module, so all contained
+ objects will (recursively) be searched for doctests.
+
+ The globals for each DocTest is formed by combining `globs`
+ and `extraglobs` (bindings in `extraglobs` override bindings
+ in `globs`). A new copy of the globals dictionary is created
+ for each DocTest. If `globs` is not specified, then it
+ defaults to the module's `__dict__`, if specified, or {}
+ otherwise. If `extraglobs` is not specified, then it defaults
+ to {}.
+
+ """
+ # If name was not specified, then extract it from the object.
+ if name is None:
+ name = getattr(obj, '__name__', None)
+ if name is None:
+ raise ValueError("DocTestFinder.find: name must be given "
+ "when obj.__name__ doesn't exist: %r" %
+ (type(obj),))
+
+ # Find the module that contains the given object (if obj is
+ # a module, then module=obj.). Note: this may fail, in which
+ # case module will be None.
+ if module is False:
+ module = None
+ elif module is None:
+ module = inspect.getmodule(obj)
+
+ # Read the module's source code. This is used by
+ # DocTestFinder._find_lineno to find the line number for a
+ # given object's docstring.
+ try:
+ file = inspect.getsourcefile(obj) or inspect.getfile(obj)
+ source_lines = linecache.getlines(file)
+ if not source_lines:
+ source_lines = None
+ except TypeError:
+ source_lines = None
+
+ # Initialize globals, and merge in extraglobs.
+ if globs is None:
+ if module is None:
+ globs = {}
+ else:
+ globs = module.__dict__.copy()
+ else:
+ globs = globs.copy()
+ if extraglobs is not None:
+ globs.update(extraglobs)
+
+ # Recursively expore `obj`, extracting DocTests.
+ tests = []
+ self._find(tests, obj, name, module, source_lines, globs, {})
+ # Sort the tests by alpha order of names, for consistency in
+ # verbose-mode output. This was a feature of doctest in Pythons
+ # <= 2.3 that got lost by accident in 2.4. It was repaired in
+ # 2.4.4 and 2.5.
+ tests.sort()
+ return tests
+
+ def _filter(self, obj, prefix, base):
+ """
+ Return true if the given object should not be examined.
+ """
+ return (self._namefilter is not None and
+ self._namefilter(prefix, base))
+
+ def _from_module(self, module, object):
+ """
+ Return true if the given object is defined in the given
+ module.
+ """
+ if module is None:
+ return True
+ elif inspect.isfunction(object):
+ return module.__dict__ is object.func_globals
+ elif inspect.isclass(object):
+ # Some jython classes don't set __module__
+ return module.__name__ == getattr(object, '__module__', None)
+ elif inspect.getmodule(object) is not None:
+ return module is inspect.getmodule(object)
+ elif hasattr(object, '__module__'):
+ return module.__name__ == object.__module__
+ elif isinstance(object, property):
+ return True # [XX] no way not be sure.
+ else:
+ raise ValueError("object must be a class or function")
+
+ def _find(self, tests, obj, name, module, source_lines, globs, seen):
+ """
+ Find tests for the given object and any contained objects, and
+ add them to `tests`.
+ """
+ if self._verbose:
+ print 'Finding tests in %s' % name
+
+ # If we've already processed this object, then ignore it.
+ if id(obj) in seen:
+ return
+ seen[id(obj)] = 1
+
+ # Find a test for this object, and add it to the list of tests.
+ test = self._get_test(obj, name, module, globs, source_lines)
+ if test is not None:
+ tests.append(test)
+
+ # Look for tests in a module's contained objects.
+ if inspect.ismodule(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ # Check if this contained object should be ignored.
+ if self._filter(val, name, valname):
+ continue
+ valname = '%s.%s' % (name, valname)
+ # Recurse to functions & classes.
+ if ((inspect.isfunction(val) or inspect.isclass(val)) and
+ self._from_module(module, val)):
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a module's __test__ dictionary.
+ if inspect.ismodule(obj) and self._recurse:
+ for valname, val in getattr(obj, '__test__', {}).items():
+ if not isinstance(valname, basestring):
+ raise ValueError("DocTestFinder.find: __test__ keys "
+ "must be strings: %r" %
+ (type(valname),))
+ if not (inspect.isfunction(val) or inspect.isclass(val) or
+ inspect.ismethod(val) or inspect.ismodule(val) or
+ isinstance(val, basestring)):
+ raise ValueError("DocTestFinder.find: __test__ values "
+ "must be strings, functions, methods, "
+ "classes, or modules: %r" %
+ (type(val),))
+ valname = '%s.__test__.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a class's contained objects.
+ if inspect.isclass(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ # Check if this contained object should be ignored.
+ if self._filter(val, name, valname):
+ continue
+ # Special handling for staticmethod/classmethod.
+ if isinstance(val, staticmethod):
+ val = getattr(obj, valname)
+ if isinstance(val, classmethod):
+ val = getattr(obj, valname).im_func
+
+ # Recurse to methods, properties, and nested classes.
+ if ((inspect.isfunction(val) or inspect.isclass(val) or
+ isinstance(val, property)) and
+ self._from_module(module, val)):
+ valname = '%s.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ def _get_test(self, obj, name, module, globs, source_lines):
+ """
+ Return a DocTest for the given object, if it defines a docstring;
+ otherwise, return None.
+ """
+ # Extract the object's docstring. If it doesn't have one,
+ # then return None (no test for this object).
+ if isinstance(obj, basestring):
+ docstring = obj
+ else:
+ try:
+ if obj.__doc__ is None:
+ docstring = ''
+ else:
+ docstring = obj.__doc__
+ if not isinstance(docstring, basestring):
+ docstring = str(docstring)
+ except (TypeError, AttributeError):
+ docstring = ''
+
+ # Find the docstring's location in the file.
+ lineno = self._find_lineno(obj, source_lines)
+
+ # Don't bother if the docstring is empty.
+ if self._exclude_empty and not docstring:
+ return None
+
+ # Return a DocTest for this object.
+ if module is None:
+ filename = None
+ else:
+ filename = getattr(module, '__file__', module.__name__)
+ if filename[-4:] in (".pyc", ".pyo"):
+ filename = filename[:-1]
+ elif sys.platform.startswith('java') and \
+ filename.endswith('$py.class'):
+ filename = '%s.py' % filename[:-9]
+ return self._parser.get_doctest(docstring, globs, name,
+ filename, lineno)
+
+ def _find_lineno(self, obj, source_lines):
+ """
+ Return a line number of the given object's docstring. Note:
+ this method assumes that the object has a docstring.
+ """
+ lineno = None
+
+ # Find the line number for modules.
+ if inspect.ismodule(obj):
+ lineno = 0
+
+ # Find the line number for classes.
+ # Note: this could be fooled if a class is defined multiple
+ # times in a single file.
+ if inspect.isclass(obj):
+ if source_lines is None:
+ return None
+ pat = re.compile(r'^\s*class\s*%s\b' %
+ getattr(obj, '__name__', '-'))
+ for i, line in enumerate(source_lines):
+ if pat.match(line):
+ lineno = i
+ break
+
+ # Find the line number for functions & methods.
+ if inspect.ismethod(obj): obj = obj.im_func
+ if inspect.isfunction(obj): obj = obj.func_code
+ if inspect.istraceback(obj): obj = obj.tb_frame
+ if inspect.isframe(obj): obj = obj.f_code
+ if inspect.iscode(obj):
+ lineno = getattr(obj, 'co_firstlineno', None)-1
+
+ # Find the line number where the docstring starts. Assume
+ # that it's the first line that begins with a quote mark.
+ # Note: this could be fooled by a multiline function
+ # signature, where a continuation line begins with a quote
+ # mark.
+ if lineno is not None:
+ if source_lines is None:
+ return lineno+1
+ pat = re.compile('(^|.*:)\s*\w*("|\')')
+ for lineno in range(lineno, len(source_lines)):
+ if pat.match(source_lines[lineno]):
+ return lineno
+
+ # We couldn't find the line number.
+ return None
+
+######################################################################
+## 5. DocTest Runner
+######################################################################
+
+class DocTestRunner:
+ # This divider string is used to separate failure messages, and to
+ # separate sections of the summary.
+ DIVIDER = "*" * 70
+
+ def __init__(self, checker=None, verbose=None, optionflags=0):
+ """
+ Create a new test runner.
+
+ Optional keyword arg `checker` is the `OutputChecker` that
+ should be used to compare the expected outputs and actual
+ outputs of doctest examples.
+
+ Optional keyword arg 'verbose' prints lots of stuff if true,
+ only failures if false; by default, it's true iff '-v' is in
+ sys.argv.
+
+ Optional argument `optionflags` can be used to control how the
+ test runner compares expected output to actual output, and how
+ it displays failures. See the documentation for `testmod` for
+ more information.
+ """
+ self._checker = checker or OutputChecker()
+ if verbose is None:
+ verbose = '-v' in sys.argv
+ self._verbose = verbose
+ self.optionflags = optionflags
+ self.original_optionflags = optionflags
+
+ # Keep track of the examples we've run.
+ self.tries = 0
+ self.failures = 0
+ self._name2ft = {}
+
+ # Create a fake output target for capturing doctest output.
+ self._fakeout = _SpoofOut()
+
+ #/////////////////////////////////////////////////////////////////
+ # Reporting methods
+ #/////////////////////////////////////////////////////////////////
+
+ def report_start(self, out, test, example):
+ """
+ Report that the test runner is about to process the given
+ example. (Only displays a message if verbose=True)
+ """
+ if self._verbose:
+ if example.want:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting:\n' + _indent(example.want))
+ else:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting nothing\n')
+
+ def report_success(self, out, test, example, got):
+ """
+ Report that the given example ran successfully. (Only
+ displays a message if verbose=True)
+ """
+ if self._verbose:
+ out("ok\n")
+
+ def report_failure(self, out, test, example, got):
+ """
+ Report that the given example failed.
+ """
+ out(self._failure_header(test, example) +
+ self._checker.output_difference(example, got, self.optionflags))
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ """
+ Report that the given example raised an unexpected exception.
+ """
+ out(self._failure_header(test, example) +
+ 'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
+
+ def _failure_header(self, test, example):
+ out = [self.DIVIDER]
+ if test.filename:
+ if test.lineno is not None and example.lineno is not None:
+ lineno = test.lineno + example.lineno + 1
+ else:
+ lineno = '?'
+ out.append('File "%s", line %s, in %s' %
+ (test.filename, lineno, test.name))
+ else:
+ out.append('Line %s, in %s' % (example.lineno+1, test.name))
+ out.append('Failed example:')
+ source = example.source
+ out.append(_indent(source))
+ return '\n'.join(out)
+
+ #/////////////////////////////////////////////////////////////////
+ # DocTest Running
+ #/////////////////////////////////////////////////////////////////
+
+ def __run(self, test, compileflags, out):
+ """
+ Run the examples in `test`. Write the outcome of each example
+ with one of the `DocTestRunner.report_*` methods, using the
+ writer function `out`. `compileflags` is the set of compiler
+ flags that should be used to execute examples. Return a tuple
+ `(f, t)`, where `t` is the number of examples tried, and `f`
+ is the number of examples that failed. The examples are run
+ in the namespace `test.globs`.
+ """
+ # Keep track of the number of failures and tries.
+ failures = tries = 0
+
+ # Save the option flags (since option directives can be used
+ # to modify them).
+ original_optionflags = self.optionflags
+
+ SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
+
+ check = self._checker.check_output
+
+ # Process each example.
+ for examplenum, example in enumerate(test.examples):
+
+ # If REPORT_ONLY_FIRST_FAILURE is set, then supress
+ # reporting after the first failure.
+ quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
+ failures > 0)
+
+ # Merge in the example's options.
+ self.optionflags = original_optionflags
+ if example.options:
+ for (optionflag, val) in example.options.items():
+ if val:
+ self.optionflags |= optionflag
+ else:
+ self.optionflags &= ~optionflag
+
+ # Record that we started this example.
+ tries += 1
+ if not quiet:
+ self.report_start(out, test, example)
+
+ # Use a special filename for compile(), so we can retrieve
+ # the source code during interactive debugging (see
+ # __patched_linecache_getlines).
+ filename = '<doctest %s[%d]>' % (test.name, examplenum)
+
+ # Run the example in the given context (globs), and record
+ # any exception that gets raised. (But don't intercept
+ # keyboard interrupts.)
+ try:
+ # Don't blink! This is where the user's code gets run.
+ exec compile(example.source, filename, "single",
+ compileflags, 1) in test.globs
+ self.debugger.set_continue() # ==== Example Finished ====
+ exception = None
+ except KeyboardInterrupt:
+ raise
+ except:
+ exception = sys.exc_info()
+ self.debugger.set_continue() # ==== Example Finished ====
+
+ got = self._fakeout.getvalue() # the actual output
+ self._fakeout.truncate(0)
+ outcome = FAILURE # guilty until proved innocent or insane
+
+ # If the example executed without raising any exceptions,
+ # verify its output.
+ if exception is None:
+ if check(example.want, got, self.optionflags):
+ outcome = SUCCESS
+
+ # The example raised an exception: check if it was expected.
+ else:
+ exc_info = sys.exc_info()
+ exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
+ if not quiet:
+ got += _exception_traceback(exc_info)
+
+ # If `example.exc_msg` is None, then we weren't expecting
+ # an exception.
+ if example.exc_msg is None:
+ outcome = BOOM
+
+ # We expected an exception: see whether it matches.
+ elif check(example.exc_msg, exc_msg, self.optionflags):
+ outcome = SUCCESS
+
+ # Another chance if they didn't care about the detail.
+ elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
+ m1 = re.match(r'[^:]*:', example.exc_msg)
+ m2 = re.match(r'[^:]*:', exc_msg)
+ if m1 and m2 and check(m1.group(0), m2.group(0),
+ self.optionflags):
+ outcome = SUCCESS
+
+ # Report the outcome.
+ if outcome is SUCCESS:
+ if not quiet:
+ self.report_success(out, test, example, got)
+ elif outcome is FAILURE:
+ if not quiet:
+ self.report_failure(out, test, example, got)
+ failures += 1
+ elif outcome is BOOM:
+ if not quiet:
+ self.report_unexpected_exception(out, test, example,
+ exc_info)
+ failures += 1
+ else:
+ assert False, ("unknown outcome", outcome)
+
+ # Restore the option flags (in case they were modified)
+ self.optionflags = original_optionflags
+
+ # Record and return the number of failures and tries.
+ self.__record_outcome(test, failures, tries)
+ return failures, tries
+
+ def __record_outcome(self, test, f, t):
+ """
+ Record the fact that the given DocTest (`test`) generated `f`
+ failures out of `t` tried examples.
+ """
+ f2, t2 = self._name2ft.get(test.name, (0,0))
+ self._name2ft[test.name] = (f+f2, t+t2)
+ self.failures += f
+ self.tries += t
+
+ __LINECACHE_FILENAME_RE = re.compile(r'<doctest '
+ r'(?P<name>[\w\.]+)'
+ r'\[(?P<examplenum>\d+)\]>$')
+ def __patched_linecache_getlines(self, filename):
+ m = self.__LINECACHE_FILENAME_RE.match(filename)
+ if m and m.group('name') == self.test.name:
+ example = self.test.examples[int(m.group('examplenum'))]
+ return example.source.splitlines(True)
+ else:
+ return self.save_linecache_getlines(filename)
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+ """
+ Run the examples in `test`, and display the results using the
+ writer function `out`.
+
+ The examples are run in the namespace `test.globs`. If
+ `clear_globs` is true (the default), then this namespace will
+ be cleared after the test runs, to help with garbage
+ collection. If you would like to examine the namespace after
+ the test completes, then use `clear_globs=False`.
+
+ `compileflags` gives the set of flags that should be used by
+ the Python compiler when running the examples. If not
+ specified, then it will default to the set of future-import
+ flags that apply to `globs`.
+
+ The output of each example is checked using
+ `DocTestRunner.check_output`, and the results are formatted by
+ the `DocTestRunner.report_*` methods.
+ """
+ self.test = test
+
+ if compileflags is None:
+ compileflags = _extract_future_flags(test.globs)
+
+ save_stdout = sys.stdout
+ if out is None:
+ out = save_stdout.write
+ sys.stdout = self._fakeout
+
+ # Patch pdb.set_trace to restore sys.stdout during interactive
+ # debugging (so it's not still redirected to self._fakeout).
+ # Note that the interactive output will go to *our*
+ # save_stdout, even if that's not the real sys.stdout; this
+ # allows us to write test cases for the set_trace behavior.
+ save_set_trace = pdb.set_trace
+ self.debugger = _OutputRedirectingPdb(save_stdout)
+ self.debugger.reset()
+ pdb.set_trace = self.debugger.set_trace
+
+ # Patch linecache.getlines, so we can see the example's source
+ # when we're inside the debugger.
+ self.save_linecache_getlines = linecache.getlines
+ linecache.getlines = self.__patched_linecache_getlines
+
+ try:
+ return self.__run(test, compileflags, out)
+ finally:
+ sys.stdout = save_stdout
+ pdb.set_trace = save_set_trace
+ linecache.getlines = self.save_linecache_getlines
+ if clear_globs:
+ test.globs.clear()
+
+ #/////////////////////////////////////////////////////////////////
+ # Summarization
+ #/////////////////////////////////////////////////////////////////
+ def summarize(self, verbose=None):
+ """
+ Print a summary of all the test cases that have been run by
+ this DocTestRunner, and return a tuple `(f, t)`, where `f` is
+ the total number of failed examples, and `t` is the total
+ number of tried examples.
+
+ The optional `verbose` argument controls how detailed the
+ summary is. If the verbosity is not specified, then the
+ DocTestRunner's verbosity is used.
+ """
+ if verbose is None:
+ verbose = self._verbose
+ notests = []
+ passed = []
+ failed = []
+ totalt = totalf = 0
+ for x in self._name2ft.items():
+ name, (f, t) = x
+ assert f <= t
+ totalt += t
+ totalf += f
+ if t == 0:
+ notests.append(name)
+ elif f == 0:
+ passed.append( (name, t) )
+ else:
+ failed.append(x)
+ if verbose:
+ if notests:
+ print len(notests), "items had no tests:"
+ notests.sort()
+ for thing in notests:
+ print " ", thing
+ if passed:
+ print len(passed), "items passed all tests:"
+ passed.sort()
+ for thing, count in passed:
+ print " %3d tests in %s" % (count, thing)
+ if failed:
+ print self.DIVIDER
+ print len(failed), "items had failures:"
+ failed.sort()
+ for thing, (f, t) in failed:
+ print " %3d of %3d in %s" % (f, t, thing)
+ if verbose:
+ print totalt, "tests in", len(self._name2ft), "items."
+ print totalt - totalf, "passed and", totalf, "failed."
+ if totalf:
+ print "***Test Failed***", totalf, "failures."
+ elif verbose:
+ print "Test passed."
+ return totalf, totalt
+
+ #/////////////////////////////////////////////////////////////////
+ # Backward compatibility cruft to maintain doctest.master.
+ #/////////////////////////////////////////////////////////////////
+ def merge(self, other):
+ d = self._name2ft
+ for name, (f, t) in other._name2ft.items():
+ if name in d:
+ print "*** DocTestRunner.merge: '" + name + "' in both" \
+ " testers; summing outcomes."
+ f2, t2 = d[name]
+ f = f + f2
+ t = t + t2
+ d[name] = f, t
+
+class OutputChecker:
+ """
+ A class used to check the whether the actual output from a doctest
+ example matches the expected output. `OutputChecker` defines two
+ methods: `check_output`, which compares a given pair of outputs,
+ and returns true if they match; and `output_difference`, which
+ returns a string describing the differences between two outputs.
+ """
+ def check_output(self, want, got, optionflags):
+ """
+ Return True iff the actual output from an example (`got`)
+ matches the expected output (`want`). These strings are
+ always considered to match if they are identical; but
+ depending on what option flags the test runner is using,
+ several non-exact match types are also possible. See the
+ documentation for `TestRunner` for more information about
+ option flags.
+ """
+ # Handle the common case first, for efficiency:
+ # if they're string-identical, always return true.
+ if got == want:
+ return True
+
+ # The values True and False replaced 1 and 0 as the return
+ # value for boolean comparisons in Python 2.3.
+ if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
+ if (got,want) == ("True\n", "1\n"):
+ return True
+ if (got,want) == ("False\n", "0\n"):
+ return True
+
+ # <BLANKLINE> can be used as a special sequence to signify a
+ # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
+ if not (optionflags & DONT_ACCEPT_BLANKLINE):
+ # Replace <BLANKLINE> in want with a blank line.
+ want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
+ '', want)
+ # If a line in got contains only spaces, then remove the
+ # spaces.
+ got = re.sub('(?m)^\s*?$', '', got)
+ if got == want:
+ return True
+
+ # This flag causes doctest to ignore any differences in the
+ # contents of whitespace strings. Note that this can be used
+ # in conjunction with the ELLIPSIS flag.
+ if optionflags & NORMALIZE_WHITESPACE:
+ got = ' '.join(got.split())
+ want = ' '.join(want.split())
+ if got == want:
+ return True
+
+ # The ELLIPSIS flag says to let the sequence "..." in `want`
+ # match any substring in `got`.
+ if optionflags & ELLIPSIS:
+ if _ellipsis_match(want, got):
+ return True
+
+ # We didn't find any match; return false.
+ return False
+
+ # Should we do a fancy diff?
+ def _do_a_fancy_diff(self, want, got, optionflags):
+ # Not unless they asked for a fancy diff.
+ if not optionflags & (REPORT_UDIFF |
+ REPORT_CDIFF |
+ REPORT_NDIFF):
+ return False
+
+ # If expected output uses ellipsis, a meaningful fancy diff is
+ # too hard ... or maybe not. In two real-life failures Tim saw,
+ # a diff was a major help anyway, so this is commented out.
+ # [todo] _ellipsis_match() knows which pieces do and don't match,
+ # and could be the basis for a kick-ass diff in this case.
+ ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
+ ## return False
+
+ # ndiff does intraline difference marking, so can be useful even
+ # for 1-line differences.
+ if optionflags & REPORT_NDIFF:
+ return True
+
+ # The other diff types need at least a few lines to be helpful.
+ return want.count('\n') > 2 and got.count('\n') > 2
+
+ def output_difference(self, example, got, optionflags):
+ """
+ Return a string describing the differences between the
+ expected output for a given example (`example`) and the actual
+ output (`got`). `optionflags` is the set of option flags used
+ to compare `want` and `got`.
+ """
+ want = example.want
+ # If <BLANKLINE>s are being used, then replace blank lines
+ # with <BLANKLINE> in the actual output string.
+ if not (optionflags & DONT_ACCEPT_BLANKLINE):
+ got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
+
+ # Check if we should use diff.
+ if self._do_a_fancy_diff(want, got, optionflags):
+ # Split want & got into lines.
+ want_lines = want.splitlines(True) # True == keep line ends
+ got_lines = got.splitlines(True)
+ # Use difflib to find their differences.
+ if optionflags & REPORT_UDIFF:
+ diff = difflib.unified_diff(want_lines, got_lines, n=2)
+ diff = list(diff)[2:] # strip the diff header
+ kind = 'unified diff with -expected +actual'
+ elif optionflags & REPORT_CDIFF:
+ diff = difflib.context_diff(want_lines, got_lines, n=2)
+ diff = list(diff)[2:] # strip the diff header
+ kind = 'context diff with expected followed by actual'
+ elif optionflags & REPORT_NDIFF:
+ engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
+ diff = list(engine.compare(want_lines, got_lines))
+ kind = 'ndiff with -expected +actual'
+ else:
+ assert 0, 'Bad diff option'
+ # Remove trailing whitespace on diff output.
+ diff = [line.rstrip() + '\n' for line in diff]
+ return 'Differences (%s):\n' % kind + _indent(''.join(diff))
+
+ # If we're not using diff, then simply list the expected
+ # output followed by the actual output.
+ if want and got:
+ return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
+ elif want:
+ return 'Expected:\n%sGot nothing\n' % _indent(want)
+ elif got:
+ return 'Expected nothing\nGot:\n%s' % _indent(got)
+ else:
+ return 'Expected nothing\nGot nothing\n'
+
+class DocTestFailure(Exception):
+ """A DocTest example has failed in debugging mode.
+
+ The exception instance has variables:
+
+ - test: the DocTest object being run
+
+ - excample: the Example object that failed
+
+ - got: the actual output
+ """
+ def __init__(self, test, example, got):
+ self.test = test
+ self.example = example
+ self.got = got
+
+ def __str__(self):
+ return str(self.test)
+
+class UnexpectedException(Exception):
+ """A DocTest example has encountered an unexpected exception
+
+ The exception instance has variables:
+
+ - test: the DocTest object being run
+
+ - excample: the Example object that failed
+
+ - exc_info: the exception info
+ """
+ def __init__(self, test, example, exc_info):
+ self.test = test
+ self.example = example
+ self.exc_info = exc_info
+
+ def __str__(self):
+ return str(self.test)
+
+class DebugRunner(DocTestRunner):
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+ r = DocTestRunner.run(self, test, compileflags, out, False)
+ if clear_globs:
+ test.globs.clear()
+ return r
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ raise UnexpectedException(test, example, exc_info)
+
+ def report_failure(self, out, test, example, got):
+ raise DocTestFailure(test, example, got)
+
+######################################################################
+## 6. Test Functions
+######################################################################
+# These should be backwards compatible.
+
+# For backward compatibility, a global instance of a DocTestRunner
+# class, updated by testmod.
+master = None
+
+def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
+ report=True, optionflags=0, extraglobs=None,
+ raise_on_error=False, exclude_empty=False):
+ """m=None, name=None, globs=None, verbose=None, isprivate=None,
+ report=True, optionflags=0, extraglobs=None, raise_on_error=False,
+ exclude_empty=False
+
+ Test examples in docstrings in functions and classes reachable
+ from module m (or the current module if m is not supplied), starting
+ with m.__doc__. Unless isprivate is specified, private names
+ are not skipped.
+
+ Also test examples reachable from dict m.__test__ if it exists and is
+ not None. m.__test__ maps names to functions, classes and strings;
+ function and class docstrings are tested even if the name is private;
+ strings are tested directly, as if they were docstrings.
+
+ Return (#failures, #tests).
+
+ See doctest.__doc__ for an overview.
+
+ Optional keyword arg "name" gives the name of the module; by default
+ use m.__name__.
+
+ Optional keyword arg "globs" gives a dict to be used as the globals
+ when executing examples; by default, use m.__dict__. A copy of this
+ dict is actually used for each docstring, so that each docstring's
+ examples start with a clean slate.
+
+ Optional keyword arg "extraglobs" gives a dictionary that should be
+ merged into the globals that are used to execute examples. By
+ default, no extra globals are used. This is new in 2.4.
+
+ Optional keyword arg "verbose" prints lots of stuff if true, prints
+ only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+ Optional keyword arg "report" prints a summary at the end when true,
+ else prints nothing at the end. In verbose mode, the summary is
+ detailed, else very brief (in fact, empty if all tests passed).
+
+ Optional keyword arg "optionflags" or's together module constants,
+ and defaults to 0. This is new in 2.3. Possible values (see the
+ docs for details):
+
+ DONT_ACCEPT_TRUE_FOR_1
+ DONT_ACCEPT_BLANKLINE
+ NORMALIZE_WHITESPACE
+ ELLIPSIS
+ IGNORE_EXCEPTION_DETAIL
+ REPORT_UDIFF
+ REPORT_CDIFF
+ REPORT_NDIFF
+ REPORT_ONLY_FIRST_FAILURE
+
+ Optional keyword arg "raise_on_error" raises an exception on the
+ first unexpected exception or failure. This allows failures to be
+ post-mortem debugged.
+
+ Deprecated in Python 2.4:
+ Optional keyword arg "isprivate" specifies a function used to
+ determine whether a name is private. The default function is
+ treat all functions as public. Optionally, "isprivate" can be
+ set to doctest.is_private to skip over functions marked as private
+ using the underscore naming convention; see its docs for details.
+
+ Advanced tomfoolery: testmod runs methods of a local instance of
+ class doctest.Tester, then merges the results into (or creates)
+ global Tester instance doctest.master. Methods of doctest.master
+ can be called directly too, if you want to do something unusual.
+ Passing report=0 to testmod is especially useful then, to delay
+ displaying a summary. Invoke doctest.master.summarize(verbose)
+ when you're done fiddling.
+ """
+ global master
+
+ if isprivate is not None:
+ warnings.warn("the isprivate argument is deprecated; "
+ "examine DocTestFinder.find() lists instead",
+ DeprecationWarning)
+
+ # If no module was given, then use __main__.
+ if m is None:
+ # DWA - m will still be None if this wasn't invoked from the command
+ # line, in which case the following TypeError is about as good an error
+ # as we should expect
+ m = sys.modules.get('__main__')
+
+ # Check that we were actually given a module.
+ if not inspect.ismodule(m):
+ raise TypeError("testmod: module required; %r" % (m,))
+
+ # If no name was given, then use the module's name.
+ if name is None:
+ name = m.__name__
+
+ # Find, parse, and run all tests in the given module.
+ finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
+
+ if raise_on_error:
+ runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+ else:
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+ for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
+ runner.run(test)
+
+ if report:
+ runner.summarize()
+
+ if master is None:
+ master = runner
+ else:
+ master.merge(runner)
+
+ return runner.failures, runner.tries
+
+def testfile(filename, module_relative=True, name=None, package=None,
+ globs=None, verbose=None, report=True, optionflags=0,
+ extraglobs=None, raise_on_error=False, parser=DocTestParser()):
+ """
+ Test examples in the given file. Return (#failures, #tests).
+
+ Optional keyword arg "module_relative" specifies how filenames
+ should be interpreted:
+
+ - If "module_relative" is True (the default), then "filename"
+ specifies a module-relative path. By default, this path is
+ relative to the calling module's directory; but if the
+ "package" argument is specified, then it is relative to that
+ package. To ensure os-independence, "filename" should use
+ "/" characters to separate path segments, and should not
+ be an absolute path (i.e., it may not begin with "/").
+
+ - If "module_relative" is False, then "filename" specifies an
+ os-specific path. The path may be absolute or relative (to
+ the current working directory).
+
+ Optional keyword arg "name" gives the name of the test; by default
+ use the file's basename.
+
+ Optional keyword argument "package" is a Python package or the
+ name of a Python package whose directory should be used as the
+ base directory for a module relative filename. If no package is
+ specified, then the calling module's directory is used as the base
+ directory for module relative filenames. It is an error to
+ specify "package" if "module_relative" is False.
+
+ Optional keyword arg "globs" gives a dict to be used as the globals
+ when executing examples; by default, use {}. A copy of this dict
+ is actually used for each docstring, so that each docstring's
+ examples start with a clean slate.
+
+ Optional keyword arg "extraglobs" gives a dictionary that should be
+ merged into the globals that are used to execute examples. By
+ default, no extra globals are used.
+
+ Optional keyword arg "verbose" prints lots of stuff if true, prints
+ only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+ Optional keyword arg "report" prints a summary at the end when true,
+ else prints nothing at the end. In verbose mode, the summary is
+ detailed, else very brief (in fact, empty if all tests passed).
+
+ Optional keyword arg "optionflags" or's together module constants,
+ and defaults to 0. Possible values (see the docs for details):
+
+ DONT_ACCEPT_TRUE_FOR_1
+ DONT_ACCEPT_BLANKLINE
+ NORMALIZE_WHITESPACE
+ ELLIPSIS
+ IGNORE_EXCEPTION_DETAIL
+ REPORT_UDIFF
+ REPORT_CDIFF
+ REPORT_NDIFF
+ REPORT_ONLY_FIRST_FAILURE
+
+ Optional keyword arg "raise_on_error" raises an exception on the
+ first unexpected exception or failure. This allows failures to be
+ post-mortem debugged.
+
+ Optional keyword arg "parser" specifies a DocTestParser (or
+ subclass) that should be used to extract tests from the files.
+
+ Advanced tomfoolery: testmod runs methods of a local instance of
+ class doctest.Tester, then merges the results into (or creates)
+ global Tester instance doctest.master. Methods of doctest.master
+ can be called directly too, if you want to do something unusual.
+ Passing report=0 to testmod is especially useful then, to delay
+ displaying a summary. Invoke doctest.master.summarize(verbose)
+ when you're done fiddling.
+ """
+ global master
+
+ if package and not module_relative:
+ raise ValueError("Package may only be specified for module-"
+ "relative paths.")
+
+ # Relativize the path
+ if module_relative:
+ package = _normalize_module(package)
+ filename = _module_relative_path(package, filename)
+
+ # If no name was given, then use the file's name.
+ if name is None:
+ name = os.path.basename(filename)
+
+ # Assemble the globals.
+ if globs is None:
+ globs = {}
+ else:
+ globs = globs.copy()
+ if extraglobs is not None:
+ globs.update(extraglobs)
+
+ if raise_on_error:
+ runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+ else:
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+ # Read the file, convert it to a test, and run it.
+ s = open(filename).read()
+ test = parser.get_doctest(s, globs, name, filename, 0)
+ runner.run(test)
+
+ if report:
+ runner.summarize()
+
+ if master is None:
+ master = runner
+ else:
+ master.merge(runner)
+
+ return runner.failures, runner.tries
+
+def run_docstring_examples(f, globs, verbose=False, name="NoName",
+ compileflags=None, optionflags=0):
+ """
+ Test examples in the given object's docstring (`f`), using `globs`
+ as globals. Optional argument `name` is used in failure messages.
+ If the optional argument `verbose` is true, then generate output
+ even if there are no failures.
+
+ `compileflags` gives the set of flags that should be used by the
+ Python compiler when running the examples. If not specified, then
+ it will default to the set of future-import flags that apply to
+ `globs`.
+
+ Optional keyword arg `optionflags` specifies options for the
+ testing and output. See the documentation for `testmod` for more
+ information.
+ """
+ # Find, parse, and run all tests in the given module.
+ finder = DocTestFinder(verbose=verbose, recurse=False)
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+ for test in finder.find(f, name, globs=globs):
+ runner.run(test, compileflags=compileflags)
+
+######################################################################
+## 7. Tester
+######################################################################
+# This is provided only for backwards compatibility. It's not
+# actually used in any way.
+
+class Tester:
+ def __init__(self, mod=None, globs=None, verbose=None,
+ isprivate=None, optionflags=0):
+
+ warnings.warn("class Tester is deprecated; "
+ "use class doctest.DocTestRunner instead",
+ DeprecationWarning, stacklevel=2)
+ if mod is None and globs is None:
+ raise TypeError("Tester.__init__: must specify mod or globs")
+ if mod is not None and not inspect.ismodule(mod):
+ raise TypeError("Tester.__init__: mod must be a module; %r" %
+ (mod,))
+ if globs is None:
+ globs = mod.__dict__
+ self.globs = globs
+
+ self.verbose = verbose
+ self.isprivate = isprivate
+ self.optionflags = optionflags
+ self.testfinder = DocTestFinder(_namefilter=isprivate)
+ self.testrunner = DocTestRunner(verbose=verbose,
+ optionflags=optionflags)
+
+ def runstring(self, s, name):
+ test = DocTestParser().get_doctest(s, self.globs, name, None, None)
+ if self.verbose:
+ print "Running string", name
+ (f,t) = self.testrunner.run(test)
+ if self.verbose:
+ print f, "of", t, "examples failed in string", name
+ return (f,t)
+
+ def rundoc(self, object, name=None, module=None):
+ f = t = 0
+ tests = self.testfinder.find(object, name, module=module,
+ globs=self.globs)
+ for test in tests:
+ (f2, t2) = self.testrunner.run(test)
+ (f,t) = (f+f2, t+t2)
+ return (f,t)
+
+ def rundict(self, d, name, module=None):
+ import new
+ m = new.module(name)
+ m.__dict__.update(d)
+ if module is None:
+ module = False
+ return self.rundoc(m, name, module)
+
+ def run__test__(self, d, name):
+ import new
+ m = new.module(name)
+ m.__test__ = d
+ return self.rundoc(m, name)
+
+ def summarize(self, verbose=None):
+ return self.testrunner.summarize(verbose)
+
+ def merge(self, other):
+ self.testrunner.merge(other.testrunner)
+
+######################################################################
+## 8. Unittest Support
+######################################################################
+
+_unittest_reportflags = 0
+
+def set_unittest_reportflags(flags):
+ global _unittest_reportflags
+
+ if (flags & REPORTING_FLAGS) != flags:
+ raise ValueError("Only reporting flags allowed", flags)
+ old = _unittest_reportflags
+ _unittest_reportflags = flags
+ return old
+
+
+class DocTestCase(unittest.TestCase):
+
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None):
+
+ unittest.TestCase.__init__(self)
+ self._dt_optionflags = optionflags
+ self._dt_checker = checker
+ self._dt_test = test
+ self._dt_setUp = setUp
+ self._dt_tearDown = tearDown
+
+ def setUp(self):
+ test = self._dt_test
+
+ if self._dt_setUp is not None:
+ self._dt_setUp(test)
+
+ def tearDown(self):
+ test = self._dt_test
+
+ if self._dt_tearDown is not None:
+ self._dt_tearDown(test)
+
+ test.globs.clear()
+
+ def runTest(self):
+ test = self._dt_test
+ old = sys.stdout
+ new = StringIO()
+ optionflags = self._dt_optionflags
+
+ if not (optionflags & REPORTING_FLAGS):
+ # The option flags don't include any reporting flags,
+ # so add the default reporting flags
+ optionflags |= _unittest_reportflags
+
+ runner = DocTestRunner(optionflags=optionflags,
+ checker=self._dt_checker, verbose=False)
+
+ try:
+ runner.DIVIDER = "-"*70
+ failures, tries = runner.run(
+ test, out=new.write, clear_globs=False)
+ finally:
+ sys.stdout = old
+
+ if failures:
+ raise self.failureException(self.format_failure(new.getvalue()))
+
+ def format_failure(self, err):
+ test = self._dt_test
+ if test.lineno is None:
+ lineno = 'unknown line number'
+ else:
+ lineno = '%s' % test.lineno
+ lname = '.'.join(test.name.split('.')[-1:])
+ return ('Failed doctest test for %s\n'
+ ' File "%s", line %s, in %s\n\n%s'
+ % (test.name, test.filename, lineno, lname, err)
+ )
+
+ def debug(self):
+ self.setUp()
+ runner = DebugRunner(optionflags=self._dt_optionflags,
+ checker=self._dt_checker, verbose=False)
+ runner.run(self._dt_test)
+ self.tearDown()
+
+ def id(self):
+ return self._dt_test.name
+
+ def __repr__(self):
+ name = self._dt_test.name.split('.')
+ return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
+
+ __str__ = __repr__
+
+ def shortDescription(self):
+ return "Doctest: " + self._dt_test.name
+
+def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
+ **options):
+ """
+ Convert doctest tests for a module to a unittest test suite.
+
+ This converts each documentation string in a module that
+ contains doctest tests to a unittest test case. If any of the
+ tests in a doc string fail, then the test case fails. An exception
+ is raised showing the name of the file containing the test and a
+ (sometimes approximate) line number.
+
+ The `module` argument provides the module to be tested. The argument
+ can be either a module or a module name.
+
+ If no argument is given, the calling module is used.
+
+ A number of options may be provided as keyword arguments:
+
+ setUp
+ A set-up function. This is called before running the
+ tests in each file. The setUp function will be passed a DocTest
+ object. The setUp function can access the test globals as the
+ globs attribute of the test passed.
+
+ tearDown
+ A tear-down function. This is called after running the
+ tests in each file. The tearDown function will be passed a DocTest
+ object. The tearDown function can access the test globals as the
+ globs attribute of the test passed.
+
+ globs
+ A dictionary containing initial global variables for the tests.
+
+ optionflags
+ A set of doctest option flags expressed as an integer.
+ """
+
+ if test_finder is None:
+ test_finder = DocTestFinder()
+
+ module = _normalize_module(module)
+ tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
+ if globs is None:
+ globs = module.__dict__
+ if not tests:
+ # Why do we want to do this? Because it reveals a bug that might
+ # otherwise be hidden.
+ raise ValueError(module, "has no tests")
+
+ tests.sort()
+ suite = unittest.TestSuite()
+ for test in tests:
+ if len(test.examples) == 0:
+ continue
+ if not test.filename:
+ filename = module.__file__
+ if filename[-4:] in (".pyc", ".pyo"):
+ filename = filename[:-1]
+ elif sys.platform.startswith('java') and \
+ filename.endswith('$py.class'):
+ filename = '%s.py' % filename[:-9]
+ test.filename = filename
+ suite.addTest(DocTestCase(test, **options))
+
+ return suite
+
+class DocFileCase(DocTestCase):
+
+ def id(self):
+ return '_'.join(self._dt_test.name.split('.'))
+
+ def __repr__(self):
+ return self._dt_test.filename
+ __str__ = __repr__
+
+ def format_failure(self, err):
+ return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
+ % (self._dt_test.name, self._dt_test.filename, err)
+ )
+
+def DocFileTest(path, module_relative=True, package=None,
+ globs=None, parser=DocTestParser(), **options):
+ if globs is None:
+ globs = {}
+
+ if package and not module_relative:
+ raise ValueError("Package may only be specified for module-"
+ "relative paths.")
+
+ # Relativize the path.
+ if module_relative:
+ package = _normalize_module(package)
+ path = _module_relative_path(package, path)
+
+ # Find the file and read it.
+ name = os.path.basename(path)
+ doc = open(path).read()
+
+ # Convert it to a test, and wrap it in a DocFileCase.
+ test = parser.get_doctest(doc, globs, name, path, 0)
+ return DocFileCase(test, **options)
+
+def DocFileSuite(*paths, **kw):
+ """A unittest suite for one or more doctest files.
+
+ The path to each doctest file is given as a string; the
+ interpretation of that string depends on the keyword argument
+ "module_relative".
+
+ A number of options may be provided as keyword arguments:
+
+ module_relative
+ If "module_relative" is True, then the given file paths are
+ interpreted as os-independent module-relative paths. By
+ default, these paths are relative to the calling module's
+ directory; but if the "package" argument is specified, then
+ they are relative to that package. To ensure os-independence,
+ "filename" should use "/" characters to separate path
+ segments, and may not be an absolute path (i.e., it may not
+ begin with "/").
+
+ If "module_relative" is False, then the given file paths are
+ interpreted as os-specific paths. These paths may be absolute
+ or relative (to the current working directory).
+
+ package
+ A Python package or the name of a Python package whose directory
+ should be used as the base directory for module relative paths.
+ If "package" is not specified, then the calling module's
+ directory is used as the base directory for module relative
+ filenames. It is an error to specify "package" if
+ "module_relative" is False.
+
+ setUp
+ A set-up function. This is called before running the
+ tests in each file. The setUp function will be passed a DocTest
+ object. The setUp function can access the test globals as the
+ globs attribute of the test passed.
+
+ tearDown
+ A tear-down function. This is called after running the
+ tests in each file. The tearDown function will be passed a DocTest
+ object. The tearDown function can access the test globals as the
+ globs attribute of the test passed.
+
+ globs
+ A dictionary containing initial global variables for the tests.
+
+ optionflags
+ A set of doctest option flags expressed as an integer.
+
+ parser
+ A DocTestParser (or subclass) that should be used to extract
+ tests from the files.
+ """
+ suite = unittest.TestSuite()
+
+ # We do this here so that _normalize_module is called at the right
+ # level. If it were called in DocFileTest, then this function
+ # would be the caller and we might guess the package incorrectly.
+ if kw.get('module_relative', True):
+ kw['package'] = _normalize_module(kw.get('package'))
+
+ for path in paths:
+ suite.addTest(DocFileTest(path, **kw))
+
+ return suite
+
+######################################################################
+## 9. Debugging Support
+######################################################################
+
+def script_from_examples(s):
+ output = []
+ for piece in DocTestParser().parse(s):
+ if isinstance(piece, Example):
+ # Add the example's source code (strip trailing NL)
+ output.append(piece.source[:-1])
+ # Add the expected output:
+ want = piece.want
+ if want:
+ output.append('# Expected:')
+ output += ['## '+l for l in want.split('\n')[:-1]]
+ else:
+ # Add non-example text.
+ output += [_comment_line(l)
+ for l in piece.split('\n')[:-1]]
+
+ # Trim junk on both ends.
+ while output and output[-1] == '#':
+ output.pop()
+ while output and output[0] == '#':
+ output.pop(0)
+ # Combine the output, and return it.
+ # Add a courtesy newline to prevent exec from choking (see bug #1172785)
+ return '\n'.join(output) + '\n'
+
+def testsource(module, name):
+ """Extract the test sources from a doctest docstring as a script.
+
+ Provide the module (or dotted name of the module) containing the
+ test to be debugged and the name (within the module) of the object
+ with the doc string with tests to be debugged.
+ """
+ module = _normalize_module(module)
+ tests = DocTestFinder().find(module)
+ test = [t for t in tests if t.name == name]
+ if not test:
+ raise ValueError(name, "not found in tests")
+ test = test[0]
+ testsrc = script_from_examples(test.docstring)
+ return testsrc
+
+def debug_src(src, pm=False, globs=None):
+ """Debug a single doctest docstring, in argument `src`'"""
+ testsrc = script_from_examples(src)
+ debug_script(testsrc, pm, globs)
+
+def debug_script(src, pm=False, globs=None):
+ "Debug a test script. `src` is the script, as a string."
+ import pdb
+
+ # Note that tempfile.NameTemporaryFile() cannot be used. As the
+ # docs say, a file so created cannot be opened by name a second time
+ # on modern Windows boxes, and execfile() needs to open it.
+ srcfilename = tempfile.mktemp(".py", "doctestdebug")
+ f = open(srcfilename, 'w')
+ f.write(src)
+ f.close()
+
+ try:
+ if globs:
+ globs = globs.copy()
+ else:
+ globs = {}
+
+ if pm:
+ try:
+ execfile(srcfilename, globs, globs)
+ except:
+ print sys.exc_info()[1]
+ pdb.post_mortem(sys.exc_info()[2])
+ else:
+ # Note that %r is vital here. '%s' instead can, e.g., cause
+ # backslashes to get treated as metacharacters on Windows.
+ pdb.run("execfile(%r)" % srcfilename, globs, globs)
+
+ finally:
+ os.remove(srcfilename)
+
+def debug(module, name, pm=False):
+ """Debug a single doctest docstring.
+
+ Provide the module (or dotted name of the module) containing the
+ test to be debugged and the name (within the module) of the object
+ with the docstring with tests to be debugged.
+ """
+ module = _normalize_module(module)
+ testsrc = testsource(module, name)
+ debug_script(testsrc, pm, module.__dict__)
+
+
+__test__ = {}
diff --git a/lib/spack/external/nose/failure.py b/lib/spack/external/nose/failure.py
new file mode 100644
index 0000000000..c5fabfda5e
--- /dev/null
+++ b/lib/spack/external/nose/failure.py
@@ -0,0 +1,42 @@
+import logging
+import unittest
+from traceback import format_tb
+from nose.pyversion import is_base_exception
+
+log = logging.getLogger(__name__)
+
+
+__all__ = ['Failure']
+
+
+class Failure(unittest.TestCase):
+ """Unloadable or unexecutable test.
+
+ A Failure case is placed in a test suite to indicate the presence of a
+ test that could not be loaded or executed. A common example is a test
+ module that fails to import.
+
+ """
+ __test__ = False # do not collect
+ def __init__(self, exc_class, exc_val, tb=None, address=None):
+ log.debug("A failure! %s %s %s", exc_class, exc_val, format_tb(tb))
+ self.exc_class = exc_class
+ self.exc_val = exc_val
+ self.tb = tb
+ self._address = address
+ unittest.TestCase.__init__(self)
+
+ def __str__(self):
+ return "Failure: %s (%s)" % (
+ getattr(self.exc_class, '__name__', self.exc_class), self.exc_val)
+
+ def address(self):
+ return self._address
+
+ def runTest(self):
+ if self.tb is not None:
+ if is_base_exception(self.exc_val):
+ raise self.exc_val, None, self.tb
+ raise self.exc_class, self.exc_val, self.tb
+ else:
+ raise self.exc_class(self.exc_val)
diff --git a/lib/spack/external/nose/importer.py b/lib/spack/external/nose/importer.py
new file mode 100644
index 0000000000..e677658ce6
--- /dev/null
+++ b/lib/spack/external/nose/importer.py
@@ -0,0 +1,167 @@
+"""Implements an importer that looks only in specific path (ignoring
+sys.path), and uses a per-path cache in addition to sys.modules. This is
+necessary because test modules in different directories frequently have the
+same names, which means that the first loaded would mask the rest when using
+the builtin importer.
+"""
+import logging
+import os
+import sys
+from nose.config import Config
+
+from imp import find_module, load_module, acquire_lock, release_lock
+
+log = logging.getLogger(__name__)
+
+try:
+ _samefile = os.path.samefile
+except AttributeError:
+ def _samefile(src, dst):
+ return (os.path.normcase(os.path.realpath(src)) ==
+ os.path.normcase(os.path.realpath(dst)))
+
+
+class Importer(object):
+ """An importer class that does only path-specific imports. That
+ is, the given module is not searched for on sys.path, but only at
+ the path or in the directory specified.
+ """
+ def __init__(self, config=None):
+ if config is None:
+ config = Config()
+ self.config = config
+
+ def importFromPath(self, path, fqname):
+ """Import a dotted-name package whose tail is at path. In other words,
+ given foo.bar and path/to/foo/bar.py, import foo from path/to/foo then
+ bar from path/to/foo/bar, returning bar.
+ """
+ # find the base dir of the package
+ path_parts = os.path.normpath(os.path.abspath(path)).split(os.sep)
+ name_parts = fqname.split('.')
+ if path_parts[-1] == '__init__.py':
+ path_parts.pop()
+ path_parts = path_parts[:-(len(name_parts))]
+ dir_path = os.sep.join(path_parts)
+ # then import fqname starting from that dir
+ return self.importFromDir(dir_path, fqname)
+
+ def importFromDir(self, dir, fqname):
+ """Import a module *only* from path, ignoring sys.path and
+ reloading if the version in sys.modules is not the one we want.
+ """
+ dir = os.path.normpath(os.path.abspath(dir))
+ log.debug("Import %s from %s", fqname, dir)
+
+ # FIXME reimplement local per-dir cache?
+
+ # special case for __main__
+ if fqname == '__main__':
+ return sys.modules[fqname]
+
+ if self.config.addPaths:
+ add_path(dir, self.config)
+
+ path = [dir]
+ parts = fqname.split('.')
+ part_fqname = ''
+ mod = parent = fh = None
+
+ for part in parts:
+ if part_fqname == '':
+ part_fqname = part
+ else:
+ part_fqname = "%s.%s" % (part_fqname, part)
+ try:
+ acquire_lock()
+ log.debug("find module part %s (%s) in %s",
+ part, part_fqname, path)
+ fh, filename, desc = find_module(part, path)
+ old = sys.modules.get(part_fqname)
+ if old is not None:
+ # test modules frequently have name overlap; make sure
+ # we get a fresh copy of anything we are trying to load
+ # from a new path
+ log.debug("sys.modules has %s as %s", part_fqname, old)
+ if (self.sameModule(old, filename)
+ or (self.config.firstPackageWins and
+ getattr(old, '__path__', None))):
+ mod = old
+ else:
+ del sys.modules[part_fqname]
+ mod = load_module(part_fqname, fh, filename, desc)
+ else:
+ mod = load_module(part_fqname, fh, filename, desc)
+ finally:
+ if fh:
+ fh.close()
+ release_lock()
+ if parent:
+ setattr(parent, part, mod)
+ if hasattr(mod, '__path__'):
+ path = mod.__path__
+ parent = mod
+ return mod
+
+ def _dirname_if_file(self, filename):
+ # We only take the dirname if we have a path to a non-dir,
+ # because taking the dirname of a symlink to a directory does not
+ # give the actual directory parent.
+ if os.path.isdir(filename):
+ return filename
+ else:
+ return os.path.dirname(filename)
+
+ def sameModule(self, mod, filename):
+ mod_paths = []
+ if hasattr(mod, '__path__'):
+ for path in mod.__path__:
+ mod_paths.append(self._dirname_if_file(path))
+ elif hasattr(mod, '__file__'):
+ mod_paths.append(self._dirname_if_file(mod.__file__))
+ else:
+ # builtin or other module-like object that
+ # doesn't have __file__; must be new
+ return False
+ new_path = self._dirname_if_file(filename)
+ for mod_path in mod_paths:
+ log.debug(
+ "module already loaded? mod: %s new: %s",
+ mod_path, new_path)
+ if _samefile(mod_path, new_path):
+ return True
+ return False
+
+
+def add_path(path, config=None):
+ """Ensure that the path, or the root of the current package (if
+ path is in a package), is in sys.path.
+ """
+
+ # FIXME add any src-looking dirs seen too... need to get config for that
+
+ log.debug('Add path %s' % path)
+ if not path:
+ return []
+ added = []
+ parent = os.path.dirname(path)
+ if (parent
+ and os.path.exists(os.path.join(path, '__init__.py'))):
+ added.extend(add_path(parent, config))
+ elif not path in sys.path:
+ log.debug("insert %s into sys.path", path)
+ sys.path.insert(0, path)
+ added.append(path)
+ if config and config.srcDirs:
+ for dirname in config.srcDirs:
+ dirpath = os.path.join(path, dirname)
+ if os.path.isdir(dirpath):
+ sys.path.insert(0, dirpath)
+ added.append(dirpath)
+ return added
+
+
+def remove_path(path):
+ log.debug('Remove path %s' % path)
+ if path in sys.path:
+ sys.path.remove(path)
diff --git a/lib/spack/external/nose/inspector.py b/lib/spack/external/nose/inspector.py
new file mode 100644
index 0000000000..a6c4a3e3b6
--- /dev/null
+++ b/lib/spack/external/nose/inspector.py
@@ -0,0 +1,207 @@
+"""Simple traceback introspection. Used to add additional information to
+AssertionErrors in tests, so that failure messages may be more informative.
+"""
+import inspect
+import logging
+import re
+import sys
+import textwrap
+import tokenize
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+log = logging.getLogger(__name__)
+
+def inspect_traceback(tb):
+ """Inspect a traceback and its frame, returning source for the expression
+ where the exception was raised, with simple variable replacement performed
+ and the line on which the exception was raised marked with '>>'
+ """
+ log.debug('inspect traceback %s', tb)
+
+ # we only want the innermost frame, where the exception was raised
+ while tb.tb_next:
+ tb = tb.tb_next
+
+ frame = tb.tb_frame
+ lines, exc_line = tbsource(tb)
+
+ # figure out the set of lines to grab.
+ inspect_lines, mark_line = find_inspectable_lines(lines, exc_line)
+ src = StringIO(textwrap.dedent(''.join(inspect_lines)))
+ exp = Expander(frame.f_locals, frame.f_globals)
+
+ while inspect_lines:
+ try:
+ for tok in tokenize.generate_tokens(src.readline):
+ exp(*tok)
+ except tokenize.TokenError, e:
+ # this can happen if our inspectable region happens to butt up
+ # against the end of a construct like a docstring with the closing
+ # """ on separate line
+ log.debug("Tokenizer error: %s", e)
+ inspect_lines.pop(0)
+ mark_line -= 1
+ src = StringIO(textwrap.dedent(''.join(inspect_lines)))
+ exp = Expander(frame.f_locals, frame.f_globals)
+ continue
+ break
+ padded = []
+ if exp.expanded_source:
+ exp_lines = exp.expanded_source.split('\n')
+ ep = 0
+ for line in exp_lines:
+ if ep == mark_line:
+ padded.append('>> ' + line)
+ else:
+ padded.append(' ' + line)
+ ep += 1
+ return '\n'.join(padded)
+
+
+def tbsource(tb, context=6):
+ """Get source from a traceback object.
+
+ A tuple of two things is returned: a list of lines of context from
+ the source code, and the index of the current line within that list.
+ The optional second argument specifies the number of lines of context
+ to return, which are centered around the current line.
+
+ .. Note ::
+ This is adapted from inspect.py in the python 2.4 standard library,
+ since a bug in the 2.3 version of inspect prevents it from correctly
+ locating source lines in a traceback frame.
+ """
+
+ lineno = tb.tb_lineno
+ frame = tb.tb_frame
+
+ if context > 0:
+ start = lineno - 1 - context//2
+ log.debug("lineno: %s start: %s", lineno, start)
+
+ try:
+ lines, dummy = inspect.findsource(frame)
+ except IOError:
+ lines, index = [''], 0
+ else:
+ all_lines = lines
+ start = max(start, 1)
+ start = max(0, min(start, len(lines) - context))
+ lines = lines[start:start+context]
+ index = lineno - 1 - start
+
+ # python 2.5 compat: if previous line ends in a continuation,
+ # decrement start by 1 to match 2.4 behavior
+ if sys.version_info >= (2, 5) and index > 0:
+ while lines[index-1].strip().endswith('\\'):
+ start -= 1
+ lines = all_lines[start:start+context]
+ else:
+ lines, index = [''], 0
+ log.debug("tbsource lines '''%s''' around index %s", lines, index)
+ return (lines, index)
+
+
+def find_inspectable_lines(lines, pos):
+ """Find lines in home that are inspectable.
+
+ Walk back from the err line up to 3 lines, but don't walk back over
+ changes in indent level.
+
+ Walk forward up to 3 lines, counting \ separated lines as 1. Don't walk
+ over changes in indent level (unless part of an extended line)
+ """
+ cnt = re.compile(r'\\[\s\n]*$')
+ df = re.compile(r':[\s\n]*$')
+ ind = re.compile(r'^(\s*)')
+ toinspect = []
+ home = lines[pos]
+ home_indent = ind.match(home).groups()[0]
+
+ before = lines[max(pos-3, 0):pos]
+ before.reverse()
+ after = lines[pos+1:min(pos+4, len(lines))]
+
+ for line in before:
+ if ind.match(line).groups()[0] == home_indent:
+ toinspect.append(line)
+ else:
+ break
+ toinspect.reverse()
+ toinspect.append(home)
+ home_pos = len(toinspect)-1
+ continued = cnt.search(home)
+ for line in after:
+ if ((continued or ind.match(line).groups()[0] == home_indent)
+ and not df.search(line)):
+ toinspect.append(line)
+ continued = cnt.search(line)
+ else:
+ break
+ log.debug("Inspecting lines '''%s''' around %s", toinspect, home_pos)
+ return toinspect, home_pos
+
+
+class Expander:
+ """Simple expression expander. Uses tokenize to find the names and
+ expands any that can be looked up in the frame.
+ """
+ def __init__(self, locals, globals):
+ self.locals = locals
+ self.globals = globals
+ self.lpos = None
+ self.expanded_source = ''
+
+ def __call__(self, ttype, tok, start, end, line):
+ # TODO
+ # deal with unicode properly
+
+ # TODO
+ # Dealing with instance members
+ # always keep the last thing seen
+ # if the current token is a dot,
+ # get ready to getattr(lastthing, this thing) on the
+ # next call.
+
+ if self.lpos is not None:
+ if start[1] >= self.lpos:
+ self.expanded_source += ' ' * (start[1]-self.lpos)
+ elif start[1] < self.lpos:
+ # newline, indent correctly
+ self.expanded_source += ' ' * start[1]
+ self.lpos = end[1]
+
+ if ttype == tokenize.INDENT:
+ pass
+ elif ttype == tokenize.NAME:
+ # Clean this junk up
+ try:
+ val = self.locals[tok]
+ if callable(val):
+ val = tok
+ else:
+ val = repr(val)
+ except KeyError:
+ try:
+ val = self.globals[tok]
+ if callable(val):
+ val = tok
+ else:
+ val = repr(val)
+
+ except KeyError:
+ val = tok
+ # FIXME... not sure how to handle things like funcs, classes
+ # FIXME this is broken for some unicode strings
+ self.expanded_source += val
+ else:
+ self.expanded_source += tok
+ # if this is the end of the line and the line ends with
+ # \, then tack a \ and newline onto the output
+ # print line[end[1]:]
+ if re.match(r'\s+\\\n', line[end[1]:]):
+ self.expanded_source += ' \\\n'
diff --git a/lib/spack/external/nose/loader.py b/lib/spack/external/nose/loader.py
new file mode 100644
index 0000000000..3744e54ff6
--- /dev/null
+++ b/lib/spack/external/nose/loader.py
@@ -0,0 +1,623 @@
+"""
+Test Loader
+-----------
+
+nose's test loader implements the same basic functionality as its
+superclass, unittest.TestLoader, but extends it by more liberal
+interpretations of what may be a test and how a test may be named.
+"""
+from __future__ import generators
+
+import logging
+import os
+import sys
+import unittest
+import types
+from inspect import isfunction
+from nose.pyversion import unbound_method, ismethod
+from nose.case import FunctionTestCase, MethodTestCase
+from nose.failure import Failure
+from nose.config import Config
+from nose.importer import Importer, add_path, remove_path
+from nose.selector import defaultSelector, TestAddress
+from nose.util import func_lineno, getpackage, isclass, isgenerator, \
+ ispackage, regex_last_key, resolve_name, transplant_func, \
+ transplant_class, test_address
+from nose.suite import ContextSuiteFactory, ContextList, LazySuite
+from nose.pyversion import sort_list, cmp_to_key
+
+
+log = logging.getLogger(__name__)
+#log.setLevel(logging.DEBUG)
+
+# for efficiency and easier mocking
+op_normpath = os.path.normpath
+op_abspath = os.path.abspath
+op_join = os.path.join
+op_isdir = os.path.isdir
+op_isfile = os.path.isfile
+
+
+__all__ = ['TestLoader', 'defaultTestLoader']
+
+
+class TestLoader(unittest.TestLoader):
+ """Test loader that extends unittest.TestLoader to:
+
+ * Load tests from test-like functions and classes that are not
+ unittest.TestCase subclasses
+ * Find and load test modules in a directory
+ * Support tests that are generators
+ * Support easy extensions of or changes to that behavior through plugins
+ """
+ config = None
+ importer = None
+ workingDir = None
+ selector = None
+ suiteClass = None
+
+ def __init__(self, config=None, importer=None, workingDir=None,
+ selector=None):
+ """Initialize a test loader.
+
+ Parameters (all optional):
+
+ * config: provide a `nose.config.Config`_ or other config class
+ instance; if not provided a `nose.config.Config`_ with
+ default values is used.
+ * importer: provide an importer instance that implements
+ `importFromPath`. If not provided, a
+ `nose.importer.Importer`_ is used.
+ * workingDir: the directory to which file and module names are
+ relative. If not provided, assumed to be the current working
+ directory.
+ * selector: a selector class or instance. If a class is
+ provided, it will be instantiated with one argument, the
+ current config. If not provided, a `nose.selector.Selector`_
+ is used.
+ """
+ if config is None:
+ config = Config()
+ if importer is None:
+ importer = Importer(config=config)
+ if workingDir is None:
+ workingDir = config.workingDir
+ if selector is None:
+ selector = defaultSelector(config)
+ elif isclass(selector):
+ selector = selector(config)
+ self.config = config
+ self.importer = importer
+ self.workingDir = op_normpath(op_abspath(workingDir))
+ self.selector = selector
+ if config.addPaths:
+ add_path(workingDir, config)
+ self.suiteClass = ContextSuiteFactory(config=config)
+
+ self._visitedPaths = set([])
+
+ unittest.TestLoader.__init__(self)
+
+ def getTestCaseNames(self, testCaseClass):
+ """Override to select with selector, unless
+ config.getTestCaseNamesCompat is True
+ """
+ if self.config.getTestCaseNamesCompat:
+ return unittest.TestLoader.getTestCaseNames(self, testCaseClass)
+
+ def wanted(attr, cls=testCaseClass, sel=self.selector):
+ item = getattr(cls, attr, None)
+ if isfunction(item):
+ item = unbound_method(cls, item)
+ elif not ismethod(item):
+ return False
+ return sel.wantMethod(item)
+
+ cases = filter(wanted, dir(testCaseClass))
+
+ # add runTest if nothing else picked
+ if not cases and hasattr(testCaseClass, 'runTest'):
+ cases = ['runTest']
+ if self.sortTestMethodsUsing:
+ sort_list(cases, cmp_to_key(self.sortTestMethodsUsing))
+ return cases
+
+ def _haveVisited(self, path):
+ # For cases where path is None, we always pretend we haven't visited
+ # them.
+ if path is None:
+ return False
+
+ return path in self._visitedPaths
+
+ def _addVisitedPath(self, path):
+ if path is not None:
+ self._visitedPaths.add(path)
+
+ def loadTestsFromDir(self, path):
+ """Load tests from the directory at path. This is a generator
+ -- each suite of tests from a module or other file is yielded
+ and is expected to be executed before the next file is
+ examined.
+ """
+ log.debug("load from dir %s", path)
+ plugins = self.config.plugins
+ plugins.beforeDirectory(path)
+ if self.config.addPaths:
+ paths_added = add_path(path, self.config)
+
+ entries = os.listdir(path)
+ sort_list(entries, regex_last_key(self.config.testMatch))
+ for entry in entries:
+ # this hard-coded initial-dot test will be removed:
+ # http://code.google.com/p/python-nose/issues/detail?id=82
+ if entry.startswith('.'):
+ continue
+ entry_path = op_abspath(op_join(path, entry))
+ is_file = op_isfile(entry_path)
+ wanted = False
+ if is_file:
+ is_dir = False
+ wanted = self.selector.wantFile(entry_path)
+ else:
+ is_dir = op_isdir(entry_path)
+ if is_dir:
+ # this hard-coded initial-underscore test will be removed:
+ # http://code.google.com/p/python-nose/issues/detail?id=82
+ if entry.startswith('_'):
+ continue
+ wanted = self.selector.wantDirectory(entry_path)
+ is_package = ispackage(entry_path)
+
+ # Python 3.3 now implements PEP 420: Implicit Namespace Packages.
+ # As a result, it's now possible that parent paths that have a
+ # segment with the same basename as our package ends up
+ # in module.__path__. So we have to keep track of what we've
+ # visited, and not-revisit them again.
+ if wanted and not self._haveVisited(entry_path):
+ self._addVisitedPath(entry_path)
+ if is_file:
+ plugins.beforeContext()
+ if entry.endswith('.py'):
+ yield self.loadTestsFromName(
+ entry_path, discovered=True)
+ else:
+ yield self.loadTestsFromFile(entry_path)
+ plugins.afterContext()
+ elif is_package:
+ # Load the entry as a package: given the full path,
+ # loadTestsFromName() will figure it out
+ yield self.loadTestsFromName(
+ entry_path, discovered=True)
+ else:
+ # Another test dir in this one: recurse lazily
+ yield self.suiteClass(
+ lambda: self.loadTestsFromDir(entry_path))
+ tests = []
+ for test in plugins.loadTestsFromDir(path):
+ tests.append(test)
+ # TODO: is this try/except needed?
+ try:
+ if tests:
+ yield self.suiteClass(tests)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ yield self.suiteClass([Failure(*sys.exc_info())])
+
+ # pop paths
+ if self.config.addPaths:
+ for p in paths_added:
+ remove_path(p)
+ plugins.afterDirectory(path)
+
+ def loadTestsFromFile(self, filename):
+ """Load tests from a non-module file. Default is to raise a
+ ValueError; plugins may implement `loadTestsFromFile` to
+ provide a list of tests loaded from the file.
+ """
+ log.debug("Load from non-module file %s", filename)
+ try:
+ tests = [test for test in
+ self.config.plugins.loadTestsFromFile(filename)]
+ if tests:
+ # Plugins can yield False to indicate that they were
+ # unable to load tests from a file, but it was not an
+ # error -- the file just had no tests to load.
+ tests = filter(None, tests)
+ return self.suiteClass(tests)
+ else:
+ # Nothing was able to even try to load from this file
+ open(filename, 'r').close() # trigger os error
+ raise ValueError("Unable to load tests from file %s"
+ % filename)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ return self.suiteClass(
+ [Failure(exc[0], exc[1], exc[2],
+ address=(filename, None, None))])
+
+ def loadTestsFromGenerator(self, generator, module):
+ """Lazy-load tests from a generator function. The generator function
+ may yield either:
+
+ * a callable, or
+ * a function name resolvable within the same module
+ """
+ def generate(g=generator, m=module):
+ try:
+ for test in g():
+ test_func, arg = self.parseGeneratedTest(test)
+ if not callable(test_func):
+ test_func = getattr(m, test_func)
+ yield FunctionTestCase(test_func, arg=arg, descriptor=g)
+ except KeyboardInterrupt:
+ raise
+ except:
+ exc = sys.exc_info()
+ yield Failure(exc[0], exc[1], exc[2],
+ address=test_address(generator))
+ return self.suiteClass(generate, context=generator, can_split=False)
+
+ def loadTestsFromGeneratorMethod(self, generator, cls):
+ """Lazy-load tests from a generator method.
+
+ This is more complicated than loading from a generator function,
+ since a generator method may yield:
+
+ * a function
+ * a bound or unbound method, or
+ * a method name
+ """
+ # convert the unbound generator method
+ # into a bound method so it can be called below
+ if hasattr(generator, 'im_class'):
+ cls = generator.im_class
+ inst = cls()
+ method = generator.__name__
+ generator = getattr(inst, method)
+
+ def generate(g=generator, c=cls):
+ try:
+ for test in g():
+ test_func, arg = self.parseGeneratedTest(test)
+ if not callable(test_func):
+ test_func = unbound_method(c, getattr(c, test_func))
+ if ismethod(test_func):
+ yield MethodTestCase(test_func, arg=arg, descriptor=g)
+ elif callable(test_func):
+ # In this case we're forcing the 'MethodTestCase'
+ # to run the inline function as its test call,
+ # but using the generator method as the 'method of
+ # record' (so no need to pass it as the descriptor)
+ yield MethodTestCase(g, test=test_func, arg=arg)
+ else:
+ yield Failure(
+ TypeError,
+ "%s is not a callable or method" % test_func)
+ except KeyboardInterrupt:
+ raise
+ except:
+ exc = sys.exc_info()
+ yield Failure(exc[0], exc[1], exc[2],
+ address=test_address(generator))
+ return self.suiteClass(generate, context=generator, can_split=False)
+
+ def loadTestsFromModule(self, module, path=None, discovered=False):
+ """Load all tests from module and return a suite containing
+ them. If the module has been discovered and is not test-like,
+ the suite will be empty by default, though plugins may add
+ their own tests.
+ """
+ log.debug("Load from module %s", module)
+ tests = []
+ test_classes = []
+ test_funcs = []
+ # For *discovered* modules, we only load tests when the module looks
+ # testlike. For modules we've been directed to load, we always
+ # look for tests. (discovered is set to True by loadTestsFromDir)
+ if not discovered or self.selector.wantModule(module):
+ for item in dir(module):
+ test = getattr(module, item, None)
+ # print "Check %s (%s) in %s" % (item, test, module.__name__)
+ if isclass(test):
+ if self.selector.wantClass(test):
+ test_classes.append(test)
+ elif isfunction(test) and self.selector.wantFunction(test):
+ test_funcs.append(test)
+ sort_list(test_classes, lambda x: x.__name__)
+ sort_list(test_funcs, func_lineno)
+ tests = map(lambda t: self.makeTest(t, parent=module),
+ test_classes + test_funcs)
+
+ # Now, descend into packages
+ # FIXME can or should this be lazy?
+ # is this syntax 2.2 compatible?
+ module_paths = getattr(module, '__path__', [])
+
+ if path:
+ path = os.path.normcase(os.path.realpath(path))
+
+ for module_path in module_paths:
+ log.debug("Load tests from module path %s?", module_path)
+ log.debug("path: %s os.path.realpath(%s): %s",
+ path, os.path.normcase(module_path),
+ os.path.realpath(os.path.normcase(module_path)))
+ if (self.config.traverseNamespace or not path) or \
+ os.path.realpath(
+ os.path.normcase(module_path)).startswith(path):
+ # Egg files can be on sys.path, so make sure the path is a
+ # directory before trying to load from it.
+ if os.path.isdir(module_path):
+ tests.extend(self.loadTestsFromDir(module_path))
+
+ for test in self.config.plugins.loadTestsFromModule(module, path):
+ tests.append(test)
+
+ return self.suiteClass(ContextList(tests, context=module))
+
+ def loadTestsFromName(self, name, module=None, discovered=False):
+ """Load tests from the entity with the given name.
+
+ The name may indicate a file, directory, module, or any object
+ within a module. See `nose.util.split_test_name` for details on
+ test name parsing.
+ """
+ # FIXME refactor this method into little bites?
+ log.debug("load from %s (%s)", name, module)
+
+ suite = self.suiteClass
+
+ # give plugins first crack
+ plug_tests = self.config.plugins.loadTestsFromName(name, module)
+ if plug_tests:
+ return suite(plug_tests)
+
+ addr = TestAddress(name, workingDir=self.workingDir)
+ if module:
+ # Two cases:
+ # name is class.foo
+ # The addr will be incorrect, since it thinks class.foo is
+ # a dotted module name. It's actually a dotted attribute
+ # name. In this case we want to use the full submitted
+ # name as the name to load from the module.
+ # name is module:class.foo
+ # The addr will be correct. The part we want is the part after
+ # the :, which is in addr.call.
+ if addr.call:
+ name = addr.call
+ parent, obj = self.resolve(name, module)
+ if (isclass(parent)
+ and getattr(parent, '__module__', None) != module.__name__
+ and not isinstance(obj, Failure)):
+ parent = transplant_class(parent, module.__name__)
+ obj = getattr(parent, obj.__name__)
+ log.debug("parent %s obj %s module %s", parent, obj, module)
+ if isinstance(obj, Failure):
+ return suite([obj])
+ else:
+ return suite(ContextList([self.makeTest(obj, parent)],
+ context=parent))
+ else:
+ if addr.module:
+ try:
+ if addr.filename is None:
+ module = resolve_name(addr.module)
+ else:
+ self.config.plugins.beforeImport(
+ addr.filename, addr.module)
+ # FIXME: to support module.name names,
+ # do what resolve-name does and keep trying to
+ # import, popping tail of module into addr.call,
+ # until we either get an import or run out of
+ # module parts
+ try:
+ module = self.importer.importFromPath(
+ addr.filename, addr.module)
+ finally:
+ self.config.plugins.afterImport(
+ addr.filename, addr.module)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ return suite([Failure(exc[0], exc[1], exc[2],
+ address=addr.totuple())])
+ if addr.call:
+ return self.loadTestsFromName(addr.call, module)
+ else:
+ return self.loadTestsFromModule(
+ module, addr.filename,
+ discovered=discovered)
+ elif addr.filename:
+ path = addr.filename
+ if addr.call:
+ package = getpackage(path)
+ if package is None:
+ return suite([
+ Failure(ValueError,
+ "Can't find callable %s in file %s: "
+ "file is not a python module" %
+ (addr.call, path),
+ address=addr.totuple())])
+ return self.loadTestsFromName(addr.call, module=package)
+ else:
+ if op_isdir(path):
+ # In this case we *can* be lazy since we know
+ # that each module in the dir will be fully
+ # loaded before its tests are executed; we
+ # also know that we're not going to be asked
+ # to load from . and ./some_module.py *as part
+ # of this named test load*
+ return LazySuite(
+ lambda: self.loadTestsFromDir(path))
+ elif op_isfile(path):
+ return self.loadTestsFromFile(path)
+ else:
+ return suite([
+ Failure(OSError, "No such file %s" % path,
+ address=addr.totuple())])
+ else:
+ # just a function? what to do? I think it can only be
+ # handled when module is not None
+ return suite([
+ Failure(ValueError, "Unresolvable test name %s" % name,
+ address=addr.totuple())])
+
+ def loadTestsFromNames(self, names, module=None):
+ """Load tests from all names, returning a suite containing all
+ tests.
+ """
+ plug_res = self.config.plugins.loadTestsFromNames(names, module)
+ if plug_res:
+ suite, names = plug_res
+ if suite:
+ return self.suiteClass([
+ self.suiteClass(suite),
+ unittest.TestLoader.loadTestsFromNames(self, names, module)
+ ])
+ return unittest.TestLoader.loadTestsFromNames(self, names, module)
+
+ def loadTestsFromTestCase(self, testCaseClass):
+ """Load tests from a unittest.TestCase subclass.
+ """
+ cases = []
+ plugins = self.config.plugins
+ for case in plugins.loadTestsFromTestCase(testCaseClass):
+ cases.append(case)
+ # For efficiency in the most common case, just call and return from
+ # super. This avoids having to extract cases and rebuild a context
+ # suite when there are no plugin-contributed cases.
+ if not cases:
+ return super(TestLoader, self).loadTestsFromTestCase(testCaseClass)
+ cases.extend(
+ [case for case in
+ super(TestLoader, self).loadTestsFromTestCase(testCaseClass)])
+ return self.suiteClass(cases)
+
+ def loadTestsFromTestClass(self, cls):
+ """Load tests from a test class that is *not* a unittest.TestCase
+ subclass.
+
+ In this case, we can't depend on the class's `__init__` taking method
+ name arguments, so we have to compose a MethodTestCase for each
+ method in the class that looks testlike.
+ """
+ def wanted(attr, cls=cls, sel=self.selector):
+ item = getattr(cls, attr, None)
+ if isfunction(item):
+ item = unbound_method(cls, item)
+ elif not ismethod(item):
+ return False
+ return sel.wantMethod(item)
+ cases = [self.makeTest(getattr(cls, case), cls)
+ for case in filter(wanted, dir(cls))]
+ for test in self.config.plugins.loadTestsFromTestClass(cls):
+ cases.append(test)
+ return self.suiteClass(ContextList(cases, context=cls))
+
+ def makeTest(self, obj, parent=None):
+ try:
+ return self._makeTest(obj, parent)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ try:
+ addr = test_address(obj)
+ except KeyboardInterrupt:
+ raise
+ except:
+ addr = None
+ return Failure(exc[0], exc[1], exc[2], address=addr)
+
+ def _makeTest(self, obj, parent=None):
+ """Given a test object and its parent, return a test case
+ or test suite.
+ """
+ plug_tests = []
+ try:
+ addr = test_address(obj)
+ except KeyboardInterrupt:
+ raise
+ except:
+ addr = None
+ for test in self.config.plugins.makeTest(obj, parent):
+ plug_tests.append(test)
+ # TODO: is this try/except needed?
+ try:
+ if plug_tests:
+ return self.suiteClass(plug_tests)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ return Failure(exc[0], exc[1], exc[2], address=addr)
+
+ if isfunction(obj) and parent and not isinstance(parent, types.ModuleType):
+ # This is a Python 3.x 'unbound method'. Wrap it with its
+ # associated class..
+ obj = unbound_method(parent, obj)
+
+ if isinstance(obj, unittest.TestCase):
+ return obj
+ elif isclass(obj):
+ if parent and obj.__module__ != parent.__name__:
+ obj = transplant_class(obj, parent.__name__)
+ if issubclass(obj, unittest.TestCase):
+ return self.loadTestsFromTestCase(obj)
+ else:
+ return self.loadTestsFromTestClass(obj)
+ elif ismethod(obj):
+ if parent is None:
+ parent = obj.__class__
+ if issubclass(parent, unittest.TestCase):
+ return parent(obj.__name__)
+ else:
+ if isgenerator(obj):
+ return self.loadTestsFromGeneratorMethod(obj, parent)
+ else:
+ return MethodTestCase(obj)
+ elif isfunction(obj):
+ if parent and obj.__module__ != parent.__name__:
+ obj = transplant_func(obj, parent.__name__)
+ if isgenerator(obj):
+ return self.loadTestsFromGenerator(obj, parent)
+ else:
+ return FunctionTestCase(obj)
+ else:
+ return Failure(TypeError,
+ "Can't make a test from %s" % obj,
+ address=addr)
+
+ def resolve(self, name, module):
+ """Resolve name within module
+ """
+ obj = module
+ parts = name.split('.')
+ for part in parts:
+ parent, obj = obj, getattr(obj, part, None)
+ if obj is None:
+ # no such test
+ obj = Failure(ValueError, "No such test %s" % name)
+ return parent, obj
+
+ def parseGeneratedTest(self, test):
+ """Given the yield value of a test generator, return a func and args.
+
+ This is used in the two loadTestsFromGenerator* methods.
+
+ """
+ if not isinstance(test, tuple): # yield test
+ test_func, arg = (test, tuple())
+ elif len(test) == 1: # yield (test,)
+ test_func, arg = (test[0], tuple())
+ else: # yield test, foo, bar, ...
+ assert len(test) > 1 # sanity check
+ test_func, arg = (test[0], test[1:])
+ return test_func, arg
+
+defaultTestLoader = TestLoader
+
diff --git a/lib/spack/external/nose/plugins/__init__.py b/lib/spack/external/nose/plugins/__init__.py
new file mode 100644
index 0000000000..08ee8f3230
--- /dev/null
+++ b/lib/spack/external/nose/plugins/__init__.py
@@ -0,0 +1,190 @@
+"""
+Writing Plugins
+---------------
+
+nose supports plugins for test collection, selection, observation and
+reporting. There are two basic rules for plugins:
+
+* Plugin classes should subclass :class:`nose.plugins.Plugin`.
+
+* Plugins may implement any of the methods described in the class
+ :doc:`IPluginInterface <interface>` in nose.plugins.base. Please note that
+ this class is for documentary purposes only; plugins may not subclass
+ IPluginInterface.
+
+Hello World
+===========
+
+Here's a basic plugin. It doesn't do much so read on for more ideas or dive
+into the :doc:`IPluginInterface <interface>` to see all available hooks.
+
+.. code-block:: python
+
+ import logging
+ import os
+
+ from nose.plugins import Plugin
+
+ log = logging.getLogger('nose.plugins.helloworld')
+
+ class HelloWorld(Plugin):
+ name = 'helloworld'
+
+ def options(self, parser, env=os.environ):
+ super(HelloWorld, self).options(parser, env=env)
+
+ def configure(self, options, conf):
+ super(HelloWorld, self).configure(options, conf)
+ if not self.enabled:
+ return
+
+ def finalize(self, result):
+ log.info('Hello pluginized world!')
+
+Registering
+===========
+
+.. Note::
+ Important note: the following applies only to the default
+ plugin manager. Other plugin managers may use different means to
+ locate and load plugins.
+
+For nose to find a plugin, it must be part of a package that uses
+setuptools_, and the plugin must be included in the entry points defined
+in the setup.py for the package:
+
+.. code-block:: python
+
+ setup(name='Some plugin',
+ # ...
+ entry_points = {
+ 'nose.plugins.0.10': [
+ 'someplugin = someplugin:SomePlugin'
+ ]
+ },
+ # ...
+ )
+
+Once the package is installed with install or develop, nose will be able
+to load the plugin.
+
+.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
+
+Registering a plugin without setuptools
+=======================================
+
+It is currently possible to register a plugin programmatically by
+creating a custom nose runner like this :
+
+.. code-block:: python
+
+ import nose
+ from yourplugin import YourPlugin
+
+ if __name__ == '__main__':
+ nose.main(addplugins=[YourPlugin()])
+
+Defining options
+================
+
+All plugins must implement the methods ``options(self, parser, env)``
+and ``configure(self, options, conf)``. Subclasses of nose.plugins.Plugin
+that want the standard options should call the superclass methods.
+
+nose uses optparse.OptionParser from the standard library to parse
+arguments. A plugin's ``options()`` method receives a parser
+instance. It's good form for a plugin to use that instance only to add
+additional arguments that take only long arguments (--like-this). Most
+of nose's built-in arguments get their default value from an environment
+variable.
+
+A plugin's ``configure()`` method receives the parsed ``OptionParser`` options
+object, as well as the current config object. Plugins should configure their
+behavior based on the user-selected settings, and may raise exceptions
+if the configured behavior is nonsensical.
+
+Logging
+=======
+
+nose uses the logging classes from the standard library. To enable users
+to view debug messages easily, plugins should use ``logging.getLogger()`` to
+acquire a logger in the ``nose.plugins`` namespace.
+
+Recipes
+=======
+
+* Writing a plugin that monitors or controls test result output
+
+ Implement any or all of ``addError``, ``addFailure``, etc., to monitor test
+ results. If you also want to monitor output, implement
+ ``setOutputStream`` and keep a reference to the output stream. If you
+ want to prevent the builtin ``TextTestResult`` output, implement
+ ``setOutputSteam`` and *return a dummy stream*. The default output will go
+ to the dummy stream, while you send your desired output to the real stream.
+
+ Example: `examples/html_plugin/htmlplug.py`_
+
+* Writing a plugin that handles exceptions
+
+ Subclass :doc:`ErrorClassPlugin <errorclasses>`.
+
+ Examples: :doc:`nose.plugins.deprecated <deprecated>`,
+ :doc:`nose.plugins.skip <skip>`
+
+* Writing a plugin that adds detail to error reports
+
+ Implement ``formatError`` and/or ``formatFailure``. The error tuple
+ you return (error class, error message, traceback) will replace the
+ original error tuple.
+
+ Examples: :doc:`nose.plugins.capture <capture>`,
+ :doc:`nose.plugins.failuredetail <failuredetail>`
+
+* Writing a plugin that loads tests from files other than python modules
+
+ Implement ``wantFile`` and ``loadTestsFromFile``. In ``wantFile``,
+ return True for files that you want to examine for tests. In
+ ``loadTestsFromFile``, for those files, return an iterable
+ containing TestCases (or yield them as you find them;
+ ``loadTestsFromFile`` may also be a generator).
+
+ Example: :doc:`nose.plugins.doctests <doctests>`
+
+* Writing a plugin that prints a report
+
+ Implement ``begin`` if you need to perform setup before testing
+ begins. Implement ``report`` and output your report to the provided stream.
+
+ Examples: :doc:`nose.plugins.cover <cover>`, :doc:`nose.plugins.prof <prof>`
+
+* Writing a plugin that selects or rejects tests
+
+ Implement any or all ``want*`` methods. Return False to reject the test
+ candidate, True to accept it -- which means that the test candidate
+ will pass through the rest of the system, so you must be prepared to
+ load tests from it if tests can't be loaded by the core loader or
+ another plugin -- and None if you don't care.
+
+ Examples: :doc:`nose.plugins.attrib <attrib>`,
+ :doc:`nose.plugins.doctests <doctests>`, :doc:`nose.plugins.testid <testid>`
+
+
+More Examples
+=============
+
+See any builtin plugin or example plugin in the examples_ directory in
+the nose source distribution. There is a list of third-party plugins
+`on jottit`_.
+
+.. _examples/html_plugin/htmlplug.py: http://python-nose.googlecode.com/svn/trunk/examples/html_plugin/htmlplug.py
+.. _examples: http://python-nose.googlecode.com/svn/trunk/examples
+.. _on jottit: http://nose-plugins.jottit.com/
+
+"""
+from nose.plugins.base import Plugin
+from nose.plugins.manager import *
+from nose.plugins.plugintest import PluginTester
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/lib/spack/external/nose/plugins/allmodules.py b/lib/spack/external/nose/plugins/allmodules.py
new file mode 100644
index 0000000000..1ccd7773a7
--- /dev/null
+++ b/lib/spack/external/nose/plugins/allmodules.py
@@ -0,0 +1,45 @@
+"""Use the AllModules plugin by passing ``--all-modules`` or setting the
+NOSE_ALL_MODULES environment variable to enable collection and execution of
+tests in all python modules. Normal nose behavior is to look for tests only in
+modules that match testMatch.
+
+More information: :doc:`../doc_tests/test_allmodules/test_allmodules`
+
+.. warning ::
+
+ This plugin can have surprising interactions with plugins that load tests
+ from what nose normally considers non-test modules, such as
+ the :doc:`doctest plugin <doctests>`. This is because any given
+ object in a module can't be loaded both by a plugin and the normal nose
+ :class:`test loader <nose.loader.TestLoader>`. Also, if you have functions
+ or classes in non-test modules that look like tests but aren't, you will
+ likely see errors as nose attempts to run them as tests.
+
+"""
+
+import os
+from nose.plugins.base import Plugin
+
+class AllModules(Plugin):
+ """Collect tests from all python modules.
+ """
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ env_opt = 'NOSE_ALL_MODULES'
+ parser.add_option('--all-modules',
+ action="store_true",
+ dest=self.enableOpt,
+ default=env.get(env_opt),
+ help="Enable plugin %s: %s [%s]" %
+ (self.__class__.__name__, self.help(), env_opt))
+
+ def wantFile(self, file):
+ """Override to return True for all files ending with .py"""
+ # always want .py files
+ if file.endswith('.py'):
+ return True
+
+ def wantModule(self, module):
+ """Override return True for all modules"""
+ return True
diff --git a/lib/spack/external/nose/plugins/attrib.py b/lib/spack/external/nose/plugins/attrib.py
new file mode 100644
index 0000000000..3d4422a23a
--- /dev/null
+++ b/lib/spack/external/nose/plugins/attrib.py
@@ -0,0 +1,286 @@
+"""Attribute selector plugin.
+
+Oftentimes when testing you will want to select tests based on
+criteria rather then simply by filename. For example, you might want
+to run all tests except for the slow ones. You can do this with the
+Attribute selector plugin by setting attributes on your test methods.
+Here is an example:
+
+.. code-block:: python
+
+ def test_big_download():
+ import urllib
+ # commence slowness...
+
+ test_big_download.slow = 1
+
+Once you've assigned an attribute ``slow = 1`` you can exclude that
+test and all other tests having the slow attribute by running ::
+
+ $ nosetests -a '!slow'
+
+There is also a decorator available for you that will set attributes.
+Here's how to set ``slow=1`` like above with the decorator:
+
+.. code-block:: python
+
+ from nose.plugins.attrib import attr
+ @attr('slow')
+ def test_big_download():
+ import urllib
+ # commence slowness...
+
+And here's how to set an attribute with a specific value:
+
+.. code-block:: python
+
+ from nose.plugins.attrib import attr
+ @attr(speed='slow')
+ def test_big_download():
+ import urllib
+ # commence slowness...
+
+This test could be run with ::
+
+ $ nosetests -a speed=slow
+
+In Python 2.6 and higher, ``@attr`` can be used on a class to set attributes
+on all its test methods at once. For example:
+
+.. code-block:: python
+
+ from nose.plugins.attrib import attr
+ @attr(speed='slow')
+ class MyTestCase:
+ def test_long_integration(self):
+ pass
+ def test_end_to_end_something(self):
+ pass
+
+Below is a reference to the different syntaxes available.
+
+Simple syntax
+-------------
+
+Examples of using the ``-a`` and ``--attr`` options:
+
+* ``nosetests -a status=stable``
+ Only runs tests with attribute "status" having value "stable"
+
+* ``nosetests -a priority=2,status=stable``
+ Runs tests having both attributes and values
+
+* ``nosetests -a priority=2 -a slow``
+ Runs tests that match either attribute
+
+* ``nosetests -a tags=http``
+ If a test's ``tags`` attribute was a list and it contained the value
+ ``http`` then it would be run
+
+* ``nosetests -a slow``
+ Runs tests with the attribute ``slow`` if its value does not equal False
+ (False, [], "", etc...)
+
+* ``nosetests -a '!slow'``
+ Runs tests that do NOT have the attribute ``slow`` or have a ``slow``
+ attribute that is equal to False
+ **NOTE**:
+ if your shell (like bash) interprets '!' as a special character make sure to
+ put single quotes around it.
+
+Expression Evaluation
+---------------------
+
+Examples using the ``-A`` and ``--eval-attr`` options:
+
+* ``nosetests -A "not slow"``
+ Evaluates the Python expression "not slow" and runs the test if True
+
+* ``nosetests -A "(priority > 5) and not slow"``
+ Evaluates a complex Python expression and runs the test if True
+
+"""
+import inspect
+import logging
+import os
+import sys
+from inspect import isfunction
+from nose.plugins.base import Plugin
+from nose.util import tolist
+
+log = logging.getLogger('nose.plugins.attrib')
+compat_24 = sys.version_info >= (2, 4)
+
+def attr(*args, **kwargs):
+ """Decorator that adds attributes to classes or functions
+ for use with the Attribute (-a) plugin.
+ """
+ def wrap_ob(ob):
+ for name in args:
+ setattr(ob, name, True)
+ for name, value in kwargs.iteritems():
+ setattr(ob, name, value)
+ return ob
+ return wrap_ob
+
+def get_method_attr(method, cls, attr_name, default = False):
+ """Look up an attribute on a method/ function.
+ If the attribute isn't found there, looking it up in the
+ method's class, if any.
+ """
+ Missing = object()
+ value = getattr(method, attr_name, Missing)
+ if value is Missing and cls is not None:
+ value = getattr(cls, attr_name, Missing)
+ if value is Missing:
+ return default
+ return value
+
+
+class ContextHelper:
+ """Object that can act as context dictionary for eval and looks up
+ names as attributes on a method/ function and its class.
+ """
+ def __init__(self, method, cls):
+ self.method = method
+ self.cls = cls
+
+ def __getitem__(self, name):
+ return get_method_attr(self.method, self.cls, name)
+
+
+class AttributeSelector(Plugin):
+ """Selects test cases to be run based on their attributes.
+ """
+
+ def __init__(self):
+ Plugin.__init__(self)
+ self.attribs = []
+
+ def options(self, parser, env):
+ """Register command line options"""
+ parser.add_option("-a", "--attr",
+ dest="attr", action="append",
+ default=env.get('NOSE_ATTR'),
+ metavar="ATTR",
+ help="Run only tests that have attributes "
+ "specified by ATTR [NOSE_ATTR]")
+ # disable in < 2.4: eval can't take needed args
+ if compat_24:
+ parser.add_option("-A", "--eval-attr",
+ dest="eval_attr", metavar="EXPR", action="append",
+ default=env.get('NOSE_EVAL_ATTR'),
+ help="Run only tests for whose attributes "
+ "the Python expression EXPR evaluates "
+ "to True [NOSE_EVAL_ATTR]")
+
+ def configure(self, options, config):
+ """Configure the plugin and system, based on selected options.
+
+ attr and eval_attr may each be lists.
+
+ self.attribs will be a list of lists of tuples. In that list, each
+ list is a group of attributes, all of which must match for the rule to
+ match.
+ """
+ self.attribs = []
+
+ # handle python eval-expression parameter
+ if compat_24 and options.eval_attr:
+ eval_attr = tolist(options.eval_attr)
+ for attr in eval_attr:
+ # "<python expression>"
+ # -> eval(expr) in attribute context must be True
+ def eval_in_context(expr, obj, cls):
+ return eval(expr, None, ContextHelper(obj, cls))
+ self.attribs.append([(attr, eval_in_context)])
+
+ # attribute requirements are a comma separated list of
+ # 'key=value' pairs
+ if options.attr:
+ std_attr = tolist(options.attr)
+ for attr in std_attr:
+ # all attributes within an attribute group must match
+ attr_group = []
+ for attrib in attr.strip().split(","):
+ # don't die on trailing comma
+ if not attrib:
+ continue
+ items = attrib.split("=", 1)
+ if len(items) > 1:
+ # "name=value"
+ # -> 'str(obj.name) == value' must be True
+ key, value = items
+ else:
+ key = items[0]
+ if key[0] == "!":
+ # "!name"
+ # 'bool(obj.name)' must be False
+ key = key[1:]
+ value = False
+ else:
+ # "name"
+ # -> 'bool(obj.name)' must be True
+ value = True
+ attr_group.append((key, value))
+ self.attribs.append(attr_group)
+ if self.attribs:
+ self.enabled = True
+
+ def validateAttrib(self, method, cls = None):
+ """Verify whether a method has the required attributes
+ The method is considered a match if it matches all attributes
+ for any attribute group.
+ ."""
+ # TODO: is there a need for case-sensitive value comparison?
+ any = False
+ for group in self.attribs:
+ match = True
+ for key, value in group:
+ attr = get_method_attr(method, cls, key)
+ if callable(value):
+ if not value(key, method, cls):
+ match = False
+ break
+ elif value is True:
+ # value must exist and be True
+ if not bool(attr):
+ match = False
+ break
+ elif value is False:
+ # value must not exist or be False
+ if bool(attr):
+ match = False
+ break
+ elif type(attr) in (list, tuple):
+ # value must be found in the list attribute
+ if not str(value).lower() in [str(x).lower()
+ for x in attr]:
+ match = False
+ break
+ else:
+ # value must match, convert to string and compare
+ if (value != attr
+ and str(value).lower() != str(attr).lower()):
+ match = False
+ break
+ any = any or match
+ if any:
+ # not True because we don't want to FORCE the selection of the
+ # item, only say that it is acceptable
+ return None
+ return False
+
+ def wantFunction(self, function):
+ """Accept the function if its attributes match.
+ """
+ return self.validateAttrib(function)
+
+ def wantMethod(self, method):
+ """Accept the method if its attributes match.
+ """
+ try:
+ cls = method.im_class
+ except AttributeError:
+ return False
+ return self.validateAttrib(method, cls)
diff --git a/lib/spack/external/nose/plugins/base.py b/lib/spack/external/nose/plugins/base.py
new file mode 100644
index 0000000000..f09beb696f
--- /dev/null
+++ b/lib/spack/external/nose/plugins/base.py
@@ -0,0 +1,725 @@
+import os
+import textwrap
+from optparse import OptionConflictError
+from warnings import warn
+from nose.util import tolist
+
+class Plugin(object):
+ """Base class for nose plugins. It's recommended but not *necessary* to
+ subclass this class to create a plugin, but all plugins *must* implement
+ `options(self, parser, env)` and `configure(self, options, conf)`, and
+ must have the attributes `enabled`, `name` and `score`. The `name`
+ attribute may contain hyphens ('-').
+
+ Plugins should not be enabled by default.
+
+ Subclassing Plugin (and calling the superclass methods in
+ __init__, configure, and options, if you override them) will give
+ your plugin some friendly default behavior:
+
+ * A --with-$name option will be added to the command line interface
+ to enable the plugin, and a corresponding environment variable
+ will be used as the default value. The plugin class's docstring
+ will be used as the help for this option.
+ * The plugin will not be enabled unless this option is selected by
+ the user.
+ """
+ can_configure = False
+ enabled = False
+ enableOpt = None
+ name = None
+ score = 100
+
+ def __init__(self):
+ if self.name is None:
+ self.name = self.__class__.__name__.lower()
+ if self.enableOpt is None:
+ self.enableOpt = "enable_plugin_%s" % self.name.replace('-', '_')
+
+ def addOptions(self, parser, env=None):
+ """Add command-line options for this plugin.
+
+ The base plugin class adds --with-$name by default, used to enable the
+ plugin.
+
+ .. warning :: Don't implement addOptions unless you want to override
+ all default option handling behavior, including
+ warnings for conflicting options. Implement
+ :meth:`options
+ <nose.plugins.base.IPluginInterface.options>`
+ instead.
+ """
+ self.add_options(parser, env)
+
+ def add_options(self, parser, env=None):
+ """Non-camel-case version of func name for backwards compatibility.
+
+ .. warning ::
+
+ DEPRECATED: Do not use this method,
+ use :meth:`options <nose.plugins.base.IPluginInterface.options>`
+ instead.
+
+ """
+ # FIXME raise deprecation warning if wasn't called by wrapper
+ if env is None:
+ env = os.environ
+ try:
+ self.options(parser, env)
+ self.can_configure = True
+ except OptionConflictError, e:
+ warn("Plugin %s has conflicting option string: %s and will "
+ "be disabled" % (self, e), RuntimeWarning)
+ self.enabled = False
+ self.can_configure = False
+
+ def options(self, parser, env):
+ """Register commandline options.
+
+ Implement this method for normal options behavior with protection from
+ OptionConflictErrors. If you override this method and want the default
+ --with-$name option to be registered, be sure to call super().
+ """
+ env_opt = 'NOSE_WITH_%s' % self.name.upper()
+ env_opt = env_opt.replace('-', '_')
+ parser.add_option("--with-%s" % self.name,
+ action="store_true",
+ dest=self.enableOpt,
+ default=env.get(env_opt),
+ help="Enable plugin %s: %s [%s]" %
+ (self.__class__.__name__, self.help(), env_opt))
+
+ def configure(self, options, conf):
+ """Configure the plugin and system, based on selected options.
+
+ The base plugin class sets the plugin to enabled if the enable option
+ for the plugin (self.enableOpt) is true.
+ """
+ if not self.can_configure:
+ return
+ self.conf = conf
+ if hasattr(options, self.enableOpt):
+ self.enabled = getattr(options, self.enableOpt)
+
+ def help(self):
+ """Return help for this plugin. This will be output as the help
+ section of the --with-$name option that enables the plugin.
+ """
+ if self.__class__.__doc__:
+ # doc sections are often indented; compress the spaces
+ return textwrap.dedent(self.__class__.__doc__)
+ return "(no help available)"
+
+ # Compatiblity shim
+ def tolist(self, val):
+ warn("Plugin.tolist is deprecated. Use nose.util.tolist instead",
+ DeprecationWarning)
+ return tolist(val)
+
+
+class IPluginInterface(object):
+ """
+ IPluginInterface describes the plugin API. Do not subclass or use this
+ class directly.
+ """
+ def __new__(cls, *arg, **kw):
+ raise TypeError("IPluginInterface class is for documentation only")
+
+ def addOptions(self, parser, env):
+ """Called to allow plugin to register command-line options with the
+ parser. DO NOT return a value from this method unless you want to stop
+ all other plugins from setting their options.
+
+ .. warning ::
+
+ DEPRECATED -- implement
+ :meth:`options <nose.plugins.base.IPluginInterface.options>` instead.
+ """
+ pass
+ add_options = addOptions
+ add_options.deprecated = True
+
+ def addDeprecated(self, test):
+ """Called when a deprecated test is seen. DO NOT return a value
+ unless you want to stop other plugins from seeing the deprecated
+ test.
+
+ .. warning :: DEPRECATED -- check error class in addError instead
+ """
+ pass
+ addDeprecated.deprecated = True
+
+ def addError(self, test, err):
+ """Called when a test raises an uncaught exception. DO NOT return a
+ value unless you want to stop other plugins from seeing that the
+ test has raised an error.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ addError.changed = True
+
+ def addFailure(self, test, err):
+ """Called when a test fails. DO NOT return a value unless you
+ want to stop other plugins from seeing that the test has failed.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: 3-tuple
+ :type err: sys.exc_info() tuple
+ """
+ pass
+ addFailure.changed = True
+
+ def addSkip(self, test):
+ """Called when a test is skipped. DO NOT return a value unless
+ you want to stop other plugins from seeing the skipped test.
+
+ .. warning:: DEPRECATED -- check error class in addError instead
+ """
+ pass
+ addSkip.deprecated = True
+
+ def addSuccess(self, test):
+ """Called when a test passes. DO NOT return a value unless you
+ want to stop other plugins from seeing the passing test.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ addSuccess.changed = True
+
+ def afterContext(self):
+ """Called after a context (generally a module) has been
+ lazy-loaded, imported, setup, had its tests loaded and
+ executed, and torn down.
+ """
+ pass
+ afterContext._new = True
+
+ def afterDirectory(self, path):
+ """Called after all tests have been loaded from directory at path
+ and run.
+
+ :param path: the directory that has finished processing
+ :type path: string
+ """
+ pass
+ afterDirectory._new = True
+
+ def afterImport(self, filename, module):
+ """Called after module is imported from filename. afterImport
+ is called even if the import failed.
+
+ :param filename: The file that was loaded
+ :type filename: string
+ :param module: The name of the module
+ :type module: string
+ """
+ pass
+ afterImport._new = True
+
+ def afterTest(self, test):
+ """Called after the test has been run and the result recorded
+ (after stopTest).
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ afterTest._new = True
+
+ def beforeContext(self):
+ """Called before a context (generally a module) is
+ examined. Because the context is not yet loaded, plugins don't
+ get to know what the context is; so any context operations
+ should use a stack that is pushed in `beforeContext` and popped
+ in `afterContext` to ensure they operate symmetrically.
+
+ `beforeContext` and `afterContext` are mainly useful for tracking
+ and restoring global state around possible changes from within a
+ context, whatever the context may be. If you need to operate on
+ contexts themselves, see `startContext` and `stopContext`, which
+ are passed the context in question, but are called after
+ it has been loaded (imported in the module case).
+ """
+ pass
+ beforeContext._new = True
+
+ def beforeDirectory(self, path):
+ """Called before tests are loaded from directory at path.
+
+ :param path: the directory that is about to be processed
+ """
+ pass
+ beforeDirectory._new = True
+
+ def beforeImport(self, filename, module):
+ """Called before module is imported from filename.
+
+ :param filename: The file that will be loaded
+ :param module: The name of the module found in file
+ :type module: string
+ """
+ beforeImport._new = True
+
+ def beforeTest(self, test):
+ """Called before the test is run (before startTest).
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ beforeTest._new = True
+
+ def begin(self):
+ """Called before any tests are collected or run. Use this to
+ perform any setup needed before testing begins.
+ """
+ pass
+
+ def configure(self, options, conf):
+ """Called after the command line has been parsed, with the
+ parsed options and the config container. Here, implement any
+ config storage or changes to state or operation that are set
+ by command line options.
+
+ DO NOT return a value from this method unless you want to
+ stop all other plugins from being configured.
+ """
+ pass
+
+ def finalize(self, result):
+ """Called after all report output, including output from all
+ plugins, has been sent to the stream. Use this to print final
+ test results or perform final cleanup. Return None to allow
+ other plugins to continue printing, or any other value to stop
+ them.
+
+ :param result: test result object
+
+ .. Note:: When tests are run under a test runner other than
+ :class:`nose.core.TextTestRunner`, such as
+ via ``python setup.py test``, this method may be called
+ **before** the default report output is sent.
+ """
+ pass
+
+ def describeTest(self, test):
+ """Return a test description.
+
+ Called by :meth:`nose.case.Test.shortDescription`.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ describeTest._new = True
+
+ def formatError(self, test, err):
+ """Called in result.addError, before plugin.addError. If you
+ want to replace or modify the error tuple, return a new error
+ tuple, otherwise return err, the original error tuple.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ formatError._new = True
+ formatError.chainable = True
+ # test arg is not chainable
+ formatError.static_args = (True, False)
+
+ def formatFailure(self, test, err):
+ """Called in result.addFailure, before plugin.addFailure. If you
+ want to replace or modify the error tuple, return a new error
+ tuple, otherwise return err, the original error tuple.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ formatFailure._new = True
+ formatFailure.chainable = True
+ # test arg is not chainable
+ formatFailure.static_args = (True, False)
+
+ def handleError(self, test, err):
+ """Called on addError. To handle the error yourself and prevent normal
+ error processing, return a true value.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ handleError._new = True
+
+ def handleFailure(self, test, err):
+ """Called on addFailure. To handle the failure yourself and
+ prevent normal failure processing, return a true value.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ handleFailure._new = True
+
+ def loadTestsFromDir(self, path):
+ """Return iterable of tests from a directory. May be a
+ generator. Each item returned must be a runnable
+ unittest.TestCase (or subclass) instance or suite instance.
+ Return None if your plugin cannot collect any tests from
+ directory.
+
+ :param path: The path to the directory.
+ """
+ pass
+ loadTestsFromDir.generative = True
+ loadTestsFromDir._new = True
+
+ def loadTestsFromModule(self, module, path=None):
+ """Return iterable of tests in a module. May be a
+ generator. Each item returned must be a runnable
+ unittest.TestCase (or subclass) instance.
+ Return None if your plugin cannot
+ collect any tests from module.
+
+ :param module: The module object
+ :type module: python module
+ :param path: the path of the module to search, to distinguish from
+ namespace package modules
+
+ .. note::
+
+ NEW. The ``path`` parameter will only be passed by nose 0.11
+ or above.
+ """
+ pass
+ loadTestsFromModule.generative = True
+
+ def loadTestsFromName(self, name, module=None, importPath=None):
+ """Return tests in this file or module. Return None if you are not able
+ to load any tests, or an iterable if you are. May be a
+ generator.
+
+ :param name: The test name. May be a file or module name plus a test
+ callable. Use split_test_name to split into parts. Or it might
+ be some crazy name of your own devising, in which case, do
+ whatever you want.
+ :param module: Module from which the name is to be loaded
+ :param importPath: Path from which file (must be a python module) was
+ found
+
+ .. warning:: DEPRECATED: this argument will NOT be passed.
+ """
+ pass
+ loadTestsFromName.generative = True
+
+ def loadTestsFromNames(self, names, module=None):
+ """Return a tuple of (tests loaded, remaining names). Return
+ None if you are not able to load any tests. Multiple plugins
+ may implement loadTestsFromNames; the remaining name list from
+ each will be passed to the next as input.
+
+ :param names: List of test names.
+ :type names: iterable
+ :param module: Module from which the names are to be loaded
+ """
+ pass
+ loadTestsFromNames._new = True
+ loadTestsFromNames.chainable = True
+
+ def loadTestsFromFile(self, filename):
+ """Return tests in this file. Return None if you are not
+ interested in loading any tests, or an iterable if you are and
+ can load some. May be a generator. *If you are interested in
+ loading tests from the file and encounter no errors, but find
+ no tests, yield False or return [False].*
+
+ .. Note:: This method replaces loadTestsFromPath from the 0.9
+ API.
+
+ :param filename: The full path to the file or directory.
+ """
+ pass
+ loadTestsFromFile.generative = True
+ loadTestsFromFile._new = True
+
+ def loadTestsFromPath(self, path):
+ """
+ .. warning:: DEPRECATED -- use loadTestsFromFile instead
+ """
+ pass
+ loadTestsFromPath.deprecated = True
+
+ def loadTestsFromTestCase(self, cls):
+ """Return tests in this test case class. Return None if you are
+ not able to load any tests, or an iterable if you are. May be a
+ generator.
+
+ :param cls: The test case class. Must be subclass of
+ :class:`unittest.TestCase`.
+ """
+ pass
+ loadTestsFromTestCase.generative = True
+
+ def loadTestsFromTestClass(self, cls):
+ """Return tests in this test class. Class will *not* be a
+ unittest.TestCase subclass. Return None if you are not able to
+ load any tests, an iterable if you are. May be a generator.
+
+ :param cls: The test case class. Must be **not** be subclass of
+ :class:`unittest.TestCase`.
+ """
+ pass
+ loadTestsFromTestClass._new = True
+ loadTestsFromTestClass.generative = True
+
+ def makeTest(self, obj, parent):
+ """Given an object and its parent, return or yield one or more
+ test cases. Each test must be a unittest.TestCase (or subclass)
+ instance. This is called before default test loading to allow
+ plugins to load an alternate test case or cases for an
+ object. May be a generator.
+
+ :param obj: The object to be made into a test
+ :param parent: The parent of obj (eg, for a method, the class)
+ """
+ pass
+ makeTest._new = True
+ makeTest.generative = True
+
+ def options(self, parser, env):
+ """Called to allow plugin to register command line
+ options with the parser.
+
+ DO NOT return a value from this method unless you want to stop
+ all other plugins from setting their options.
+
+ :param parser: options parser instance
+ :type parser: :class:`ConfigParser.ConfigParser`
+ :param env: environment, default is os.environ
+ """
+ pass
+ options._new = True
+
+ def prepareTest(self, test):
+ """Called before the test is run by the test runner. Please
+ note the article *the* in the previous sentence: prepareTest
+ is called *only once*, and is passed the test case or test
+ suite that the test runner will execute. It is *not* called
+ for each individual test case. If you return a non-None value,
+ that return value will be run as the test. Use this hook to
+ wrap or decorate the test with another function. If you need
+ to modify or wrap individual test cases, use `prepareTestCase`
+ instead.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+
+ def prepareTestCase(self, test):
+ """Prepare or wrap an individual test case. Called before
+ execution of the test. The test passed here is a
+ nose.case.Test instance; the case to be executed is in the
+ test attribute of the passed case. To modify the test to be
+ run, you should return a callable that takes one argument (the
+ test result object) -- it is recommended that you *do not*
+ side-effect the nose.case.Test instance you have been passed.
+
+ Keep in mind that when you replace the test callable you are
+ replacing the run() method of the test case -- including the
+ exception handling and result calls, etc.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ prepareTestCase._new = True
+
+ def prepareTestLoader(self, loader):
+ """Called before tests are loaded. To replace the test loader,
+ return a test loader. To allow other plugins to process the
+ test loader, return None. Only one plugin may replace the test
+ loader. Only valid when using nose.TestProgram.
+
+ :param loader: :class:`nose.loader.TestLoader`
+ (or other loader) instance
+ """
+ pass
+ prepareTestLoader._new = True
+
+ def prepareTestResult(self, result):
+ """Called before the first test is run. To use a different
+ test result handler for all tests than the given result,
+ return a test result handler. NOTE however that this handler
+ will only be seen by tests, that is, inside of the result
+ proxy system. The TestRunner and TestProgram -- whether nose's
+ or other -- will continue to see the original result
+ handler. For this reason, it is usually better to monkeypatch
+ the result (for instance, if you want to handle some
+ exceptions in a unique way). Only one plugin may replace the
+ result, but many may monkeypatch it. If you want to
+ monkeypatch and stop other plugins from doing so, monkeypatch
+ and return the patched result.
+
+ :param result: :class:`nose.result.TextTestResult`
+ (or other result) instance
+ """
+ pass
+ prepareTestResult._new = True
+
+ def prepareTestRunner(self, runner):
+ """Called before tests are run. To replace the test runner,
+ return a test runner. To allow other plugins to process the
+ test runner, return None. Only valid when using nose.TestProgram.
+
+ :param runner: :class:`nose.core.TextTestRunner`
+ (or other runner) instance
+ """
+ pass
+ prepareTestRunner._new = True
+
+ def report(self, stream):
+ """Called after all error output has been printed. Print your
+ plugin's report to the provided stream. Return None to allow
+ other plugins to print reports, any other value to stop them.
+
+ :param stream: stream object; send your output here
+ :type stream: file-like object
+ """
+ pass
+
+ def setOutputStream(self, stream):
+ """Called before test output begins. To direct test output to a
+ new stream, return a stream object, which must implement a
+ `write(msg)` method. If you only want to note the stream, not
+ capture or redirect it, then return None.
+
+ :param stream: stream object; send your output here
+ :type stream: file-like object
+ """
+
+ def startContext(self, context):
+ """Called before context setup and the running of tests in the
+ context. Note that tests have already been *loaded* from the
+ context before this call.
+
+ :param context: the context about to be setup. May be a module or
+ class, or any other object that contains tests.
+ """
+ pass
+ startContext._new = True
+
+ def startTest(self, test):
+ """Called before each test is run. DO NOT return a value unless
+ you want to stop other plugins from seeing the test start.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+
+ def stopContext(self, context):
+ """Called after the tests in a context have run and the
+ context has been torn down.
+
+ :param context: the context that has been torn down. May be a module or
+ class, or any other object that contains tests.
+ """
+ pass
+ stopContext._new = True
+
+ def stopTest(self, test):
+ """Called after each test is run. DO NOT return a value unless
+ you want to stop other plugins from seeing that the test has stopped.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+
+ def testName(self, test):
+ """Return a short test name. Called by `nose.case.Test.__str__`.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ testName._new = True
+
+ def wantClass(self, cls):
+ """Return true if you want the main test selector to collect
+ tests from this class, false if you don't, and None if you don't
+ care.
+
+ :param cls: The class being examined by the selector
+ """
+ pass
+
+ def wantDirectory(self, dirname):
+ """Return true if you want test collection to descend into this
+ directory, false if you do not, and None if you don't care.
+
+ :param dirname: Full path to directory being examined by the selector
+ """
+ pass
+
+ def wantFile(self, file):
+ """Return true if you want to collect tests from this file,
+ false if you do not and None if you don't care.
+
+ Change from 0.9: The optional package parameter is no longer passed.
+
+ :param file: Full path to file being examined by the selector
+ """
+ pass
+
+ def wantFunction(self, function):
+ """Return true to collect this function as a test, false to
+ prevent it from being collected, and None if you don't care.
+
+ :param function: The function object being examined by the selector
+ """
+ pass
+
+ def wantMethod(self, method):
+ """Return true to collect this method as a test, false to
+ prevent it from being collected, and None if you don't care.
+
+ :param method: The method object being examined by the selector
+ :type method: unbound method
+ """
+ pass
+
+ def wantModule(self, module):
+ """Return true if you want to collection to descend into this
+ module, false to prevent the collector from descending into the
+ module, and None if you don't care.
+
+ :param module: The module object being examined by the selector
+ :type module: python module
+ """
+ pass
+
+ def wantModuleTests(self, module):
+ """
+ .. warning:: DEPRECATED -- this method will not be called, it has
+ been folded into wantModule.
+ """
+ pass
+ wantModuleTests.deprecated = True
+
diff --git a/lib/spack/external/nose/plugins/builtin.py b/lib/spack/external/nose/plugins/builtin.py
new file mode 100644
index 0000000000..4fcc0018ad
--- /dev/null
+++ b/lib/spack/external/nose/plugins/builtin.py
@@ -0,0 +1,34 @@
+"""
+Lists builtin plugins.
+"""
+plugins = []
+builtins = (
+ ('nose.plugins.attrib', 'AttributeSelector'),
+ ('nose.plugins.capture', 'Capture'),
+ ('nose.plugins.logcapture', 'LogCapture'),
+ ('nose.plugins.cover', 'Coverage'),
+ ('nose.plugins.debug', 'Pdb'),
+ ('nose.plugins.deprecated', 'Deprecated'),
+ ('nose.plugins.doctests', 'Doctest'),
+ ('nose.plugins.isolate', 'IsolationPlugin'),
+ ('nose.plugins.failuredetail', 'FailureDetail'),
+ ('nose.plugins.prof', 'Profile'),
+ ('nose.plugins.skip', 'Skip'),
+ ('nose.plugins.testid', 'TestId'),
+ ('nose.plugins.multiprocess', 'MultiProcess'),
+ ('nose.plugins.xunit', 'Xunit'),
+ ('nose.plugins.allmodules', 'AllModules'),
+ ('nose.plugins.collect', 'CollectOnly'),
+ )
+
+for module, cls in builtins:
+ try:
+ plugmod = __import__(module, globals(), locals(), [cls])
+ except KeyboardInterrupt:
+ raise
+ except:
+ continue
+ plug = getattr(plugmod, cls)
+ plugins.append(plug)
+ globals()[cls] = plug
+
diff --git a/lib/spack/external/nose/plugins/capture.py b/lib/spack/external/nose/plugins/capture.py
new file mode 100644
index 0000000000..fa4e5dcaaf
--- /dev/null
+++ b/lib/spack/external/nose/plugins/capture.py
@@ -0,0 +1,115 @@
+"""
+This plugin captures stdout during test execution. If the test fails
+or raises an error, the captured output will be appended to the error
+or failure output. It is enabled by default but can be disabled with
+the options ``-s`` or ``--nocapture``.
+
+:Options:
+ ``--nocapture``
+ Don't capture stdout (any stdout output will be printed immediately)
+
+"""
+import logging
+import os
+import sys
+from nose.plugins.base import Plugin
+from nose.pyversion import exc_to_unicode, force_unicode
+from nose.util import ln
+from StringIO import StringIO
+
+
+log = logging.getLogger(__name__)
+
+class Capture(Plugin):
+ """
+ Output capture plugin. Enabled by default. Disable with ``-s`` or
+ ``--nocapture``. This plugin captures stdout during test execution,
+ appending any output captured to the error or failure output,
+ should the test fail or raise an error.
+ """
+ enabled = True
+ env_opt = 'NOSE_NOCAPTURE'
+ name = 'capture'
+ score = 1600
+
+ def __init__(self):
+ self.stdout = []
+ self._buf = None
+
+ def options(self, parser, env):
+ """Register commandline options
+ """
+ parser.add_option(
+ "-s", "--nocapture", action="store_false",
+ default=not env.get(self.env_opt), dest="capture",
+ help="Don't capture stdout (any stdout output "
+ "will be printed immediately) [NOSE_NOCAPTURE]")
+
+ def configure(self, options, conf):
+ """Configure plugin. Plugin is enabled by default.
+ """
+ self.conf = conf
+ if not options.capture:
+ self.enabled = False
+
+ def afterTest(self, test):
+ """Clear capture buffer.
+ """
+ self.end()
+ self._buf = None
+
+ def begin(self):
+ """Replace sys.stdout with capture buffer.
+ """
+ self.start() # get an early handle on sys.stdout
+
+ def beforeTest(self, test):
+ """Flush capture buffer.
+ """
+ self.start()
+
+ def formatError(self, test, err):
+ """Add captured output to error report.
+ """
+ test.capturedOutput = output = self.buffer
+ self._buf = None
+ if not output:
+ # Don't return None as that will prevent other
+ # formatters from formatting and remove earlier formatters
+ # formats, instead return the err we got
+ return err
+ ec, ev, tb = err
+ return (ec, self.addCaptureToErr(ev, output), tb)
+
+ def formatFailure(self, test, err):
+ """Add captured output to failure report.
+ """
+ return self.formatError(test, err)
+
+ def addCaptureToErr(self, ev, output):
+ ev = exc_to_unicode(ev)
+ output = force_unicode(output)
+ return u'\n'.join([ev, ln(u'>> begin captured stdout <<'),
+ output, ln(u'>> end captured stdout <<')])
+
+ def start(self):
+ self.stdout.append(sys.stdout)
+ self._buf = StringIO()
+ sys.stdout = self._buf
+
+ def end(self):
+ if self.stdout:
+ sys.stdout = self.stdout.pop()
+
+ def finalize(self, result):
+ """Restore stdout.
+ """
+ while self.stdout:
+ self.end()
+
+ def _get_buffer(self):
+ if self._buf is not None:
+ return self._buf.getvalue()
+
+ buffer = property(_get_buffer, None, None,
+ """Captured stdout output.""")
diff --git a/lib/spack/external/nose/plugins/collect.py b/lib/spack/external/nose/plugins/collect.py
new file mode 100644
index 0000000000..6f9f0faa77
--- /dev/null
+++ b/lib/spack/external/nose/plugins/collect.py
@@ -0,0 +1,94 @@
+"""
+This plugin bypasses the actual execution of tests, and instead just collects
+test names. Fixtures are also bypassed, so running nosetests with the
+collection plugin enabled should be very quick.
+
+This plugin is useful in combination with the testid plugin (``--with-id``).
+Run both together to get an indexed list of all tests, which will enable you to
+run individual tests by index number.
+
+This plugin is also useful for counting tests in a test suite, and making
+people watching your demo think all of your tests pass.
+"""
+from nose.plugins.base import Plugin
+from nose.case import Test
+import logging
+import unittest
+
+log = logging.getLogger(__name__)
+
+
+class CollectOnly(Plugin):
+ """
+ Collect and output test names only, don't run any tests.
+ """
+ name = "collect-only"
+ enableOpt = 'collect_only'
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ parser.add_option('--collect-only',
+ action='store_true',
+ dest=self.enableOpt,
+ default=env.get('NOSE_COLLECT_ONLY'),
+ help="Enable collect-only: %s [COLLECT_ONLY]" %
+ (self.help()))
+
+ def prepareTestLoader(self, loader):
+ """Install collect-only suite class in TestLoader.
+ """
+ # Disable context awareness
+ log.debug("Preparing test loader")
+ loader.suiteClass = TestSuiteFactory(self.conf)
+
+ def prepareTestCase(self, test):
+ """Replace actual test with dummy that always passes.
+ """
+ # Return something that always passes
+ log.debug("Preparing test case %s", test)
+ if not isinstance(test, Test):
+ return
+ def run(result):
+ # We need to make these plugin calls because there won't be
+ # a result proxy, due to using a stripped-down test suite
+ self.conf.plugins.startTest(test)
+ result.startTest(test)
+ self.conf.plugins.addSuccess(test)
+ result.addSuccess(test)
+ self.conf.plugins.stopTest(test)
+ result.stopTest(test)
+ return run
+
+
+class TestSuiteFactory:
+ """
+ Factory for producing configured test suites.
+ """
+ def __init__(self, conf):
+ self.conf = conf
+
+ def __call__(self, tests=(), **kw):
+ return TestSuite(tests, conf=self.conf)
+
+
+class TestSuite(unittest.TestSuite):
+ """
+ Basic test suite that bypasses most proxy and plugin calls, but does
+ wrap tests in a nose.case.Test so prepareTestCase will be called.
+ """
+ def __init__(self, tests=(), conf=None):
+ self.conf = conf
+ # Exec lazy suites: makes discovery depth-first
+ if callable(tests):
+ tests = tests()
+ log.debug("TestSuite(%r)", tests)
+ unittest.TestSuite.__init__(self, tests)
+
+ def addTest(self, test):
+ log.debug("Add test %s", test)
+ if isinstance(test, unittest.TestSuite):
+ self._tests.append(test)
+ else:
+ self._tests.append(Test(test, config=self.conf))
+
diff --git a/lib/spack/external/nose/plugins/cover.py b/lib/spack/external/nose/plugins/cover.py
new file mode 100644
index 0000000000..fbe2e30dcd
--- /dev/null
+++ b/lib/spack/external/nose/plugins/cover.py
@@ -0,0 +1,271 @@
+"""If you have Ned Batchelder's coverage_ module installed, you may activate a
+coverage report with the ``--with-coverage`` switch or NOSE_WITH_COVERAGE
+environment variable. The coverage report will cover any python source module
+imported after the start of the test run, excluding modules that match
+testMatch. If you want to include those modules too, use the ``--cover-tests``
+switch, or set the NOSE_COVER_TESTS environment variable to a true value. To
+restrict the coverage report to modules from a particular package or packages,
+use the ``--cover-package`` switch or the NOSE_COVER_PACKAGE environment
+variable.
+
+.. _coverage: http://www.nedbatchelder.com/code/modules/coverage.html
+"""
+import logging
+import re
+import sys
+import StringIO
+from nose.plugins.base import Plugin
+from nose.util import src, tolist
+
+log = logging.getLogger(__name__)
+
+
+class Coverage(Plugin):
+ """
+ Activate a coverage report using Ned Batchelder's coverage module.
+ """
+ coverTests = False
+ coverPackages = None
+ coverInstance = None
+ coverErase = False
+ coverMinPercentage = None
+ score = 200
+ status = {}
+
+ def options(self, parser, env):
+ """
+ Add options to command line.
+ """
+ super(Coverage, self).options(parser, env)
+ parser.add_option("--cover-package", action="append",
+ default=env.get('NOSE_COVER_PACKAGE'),
+ metavar="PACKAGE",
+ dest="cover_packages",
+ help="Restrict coverage output to selected packages "
+ "[NOSE_COVER_PACKAGE]")
+ parser.add_option("--cover-erase", action="store_true",
+ default=env.get('NOSE_COVER_ERASE'),
+ dest="cover_erase",
+ help="Erase previously collected coverage "
+ "statistics before run")
+ parser.add_option("--cover-tests", action="store_true",
+ dest="cover_tests",
+ default=env.get('NOSE_COVER_TESTS'),
+ help="Include test modules in coverage report "
+ "[NOSE_COVER_TESTS]")
+ parser.add_option("--cover-min-percentage", action="store",
+ dest="cover_min_percentage",
+ default=env.get('NOSE_COVER_MIN_PERCENTAGE'),
+ help="Minimum percentage of coverage for tests "
+ "to pass [NOSE_COVER_MIN_PERCENTAGE]")
+ parser.add_option("--cover-inclusive", action="store_true",
+ dest="cover_inclusive",
+ default=env.get('NOSE_COVER_INCLUSIVE'),
+ help="Include all python files under working "
+ "directory in coverage report. Useful for "
+ "discovering holes in test coverage if not all "
+ "files are imported by the test suite. "
+ "[NOSE_COVER_INCLUSIVE]")
+ parser.add_option("--cover-html", action="store_true",
+ default=env.get('NOSE_COVER_HTML'),
+ dest='cover_html',
+ help="Produce HTML coverage information")
+ parser.add_option('--cover-html-dir', action='store',
+ default=env.get('NOSE_COVER_HTML_DIR', 'cover'),
+ dest='cover_html_dir',
+ metavar='DIR',
+ help='Produce HTML coverage information in dir')
+ parser.add_option("--cover-branches", action="store_true",
+ default=env.get('NOSE_COVER_BRANCHES'),
+ dest="cover_branches",
+ help="Include branch coverage in coverage report "
+ "[NOSE_COVER_BRANCHES]")
+ parser.add_option("--cover-xml", action="store_true",
+ default=env.get('NOSE_COVER_XML'),
+ dest="cover_xml",
+ help="Produce XML coverage information")
+ parser.add_option("--cover-xml-file", action="store",
+ default=env.get('NOSE_COVER_XML_FILE', 'coverage.xml'),
+ dest="cover_xml_file",
+ metavar="FILE",
+ help="Produce XML coverage information in file")
+
+ def configure(self, options, conf):
+ """
+ Configure plugin.
+ """
+ try:
+ self.status.pop('active')
+ except KeyError:
+ pass
+ super(Coverage, self).configure(options, conf)
+ if self.enabled:
+ try:
+ import coverage
+ if not hasattr(coverage, 'coverage'):
+ raise ImportError("Unable to import coverage module")
+ except ImportError:
+ log.error("Coverage not available: "
+ "unable to import coverage module")
+ self.enabled = False
+ return
+ self.conf = conf
+ self.coverErase = options.cover_erase
+ self.coverTests = options.cover_tests
+ self.coverPackages = []
+ if options.cover_packages:
+ if isinstance(options.cover_packages, (list, tuple)):
+ cover_packages = options.cover_packages
+ else:
+ cover_packages = [options.cover_packages]
+ for pkgs in [tolist(x) for x in cover_packages]:
+ self.coverPackages.extend(pkgs)
+ self.coverInclusive = options.cover_inclusive
+ if self.coverPackages:
+ log.info("Coverage report will include only packages: %s",
+ self.coverPackages)
+ self.coverHtmlDir = None
+ if options.cover_html:
+ self.coverHtmlDir = options.cover_html_dir
+ log.debug('Will put HTML coverage report in %s', self.coverHtmlDir)
+ self.coverBranches = options.cover_branches
+ self.coverXmlFile = None
+ if options.cover_min_percentage:
+ self.coverMinPercentage = int(options.cover_min_percentage.rstrip('%'))
+ if options.cover_xml:
+ self.coverXmlFile = options.cover_xml_file
+ log.debug('Will put XML coverage report in %s', self.coverXmlFile)
+ if self.enabled:
+ self.status['active'] = True
+ self.coverInstance = coverage.coverage(auto_data=False,
+ branch=self.coverBranches, data_suffix=conf.worker,
+ source=self.coverPackages)
+ self.coverInstance._warn_no_data = False
+ self.coverInstance.is_worker = conf.worker
+ self.coverInstance.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]')
+
+ log.debug("Coverage begin")
+ self.skipModules = sys.modules.keys()[:]
+ if self.coverErase:
+ log.debug("Clearing previously collected coverage statistics")
+ self.coverInstance.combine()
+ self.coverInstance.erase()
+
+ if not self.coverInstance.is_worker:
+ self.coverInstance.load()
+ self.coverInstance.start()
+
+
+ def beforeTest(self, *args, **kwargs):
+ """
+ Begin recording coverage information.
+ """
+
+ if self.coverInstance.is_worker:
+ self.coverInstance.load()
+ self.coverInstance.start()
+
+ def afterTest(self, *args, **kwargs):
+ """
+ Stop recording coverage information.
+ """
+
+ if self.coverInstance.is_worker:
+ self.coverInstance.stop()
+ self.coverInstance.save()
+
+
+ def report(self, stream):
+ """
+ Output code coverage report.
+ """
+ log.debug("Coverage report")
+ self.coverInstance.stop()
+ self.coverInstance.combine()
+ self.coverInstance.save()
+ modules = [module
+ for name, module in sys.modules.items()
+ if self.wantModuleCoverage(name, module)]
+ log.debug("Coverage report will cover modules: %s", modules)
+ self.coverInstance.report(modules, file=stream)
+
+ import coverage
+ if self.coverHtmlDir:
+ log.debug("Generating HTML coverage report")
+ try:
+ self.coverInstance.html_report(modules, self.coverHtmlDir)
+ except coverage.misc.CoverageException, e:
+ log.warning("Failed to generate HTML report: %s" % str(e))
+
+ if self.coverXmlFile:
+ log.debug("Generating XML coverage report")
+ try:
+ self.coverInstance.xml_report(modules, self.coverXmlFile)
+ except coverage.misc.CoverageException, e:
+ log.warning("Failed to generate XML report: %s" % str(e))
+
+ # make sure we have minimum required coverage
+ if self.coverMinPercentage:
+ f = StringIO.StringIO()
+ self.coverInstance.report(modules, file=f)
+
+ multiPackageRe = (r'-------\s\w+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?'
+ r'\s+(\d+)%\s+\d*\s{0,1}$')
+ singlePackageRe = (r'-------\s[\w./]+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?'
+ r'\s+(\d+)%(?:\s+[-\d, ]+)\s{0,1}$')
+
+ m = re.search(multiPackageRe, f.getvalue())
+ if m is None:
+ m = re.search(singlePackageRe, f.getvalue())
+
+ if m:
+ percentage = int(m.groups()[0])
+ if percentage < self.coverMinPercentage:
+ log.error('TOTAL Coverage did not reach minimum '
+ 'required: %d%%' % self.coverMinPercentage)
+ sys.exit(1)
+ else:
+ log.error("No total percentage was found in coverage output, "
+ "something went wrong.")
+
+
+ def wantModuleCoverage(self, name, module):
+ if not hasattr(module, '__file__'):
+ log.debug("no coverage of %s: no __file__", name)
+ return False
+ module_file = src(module.__file__)
+ if not module_file or not module_file.endswith('.py'):
+ log.debug("no coverage of %s: not a python file", name)
+ return False
+ if self.coverPackages:
+ for package in self.coverPackages:
+ if (re.findall(r'^%s\b' % re.escape(package), name)
+ and (self.coverTests
+ or not self.conf.testMatch.search(name))):
+ log.debug("coverage for %s", name)
+ return True
+ if name in self.skipModules:
+ log.debug("no coverage for %s: loaded before coverage start",
+ name)
+ return False
+ if self.conf.testMatch.search(name) and not self.coverTests:
+ log.debug("no coverage for %s: is a test", name)
+ return False
+ # accept any package that passed the previous tests, unless
+ # coverPackages is on -- in that case, if we wanted this
+ # module, we would have already returned True
+ return not self.coverPackages
+
+ def wantFile(self, file, package=None):
+ """If inclusive coverage enabled, return true for all source files
+ in wanted packages.
+ """
+ if self.coverInclusive:
+ if file.endswith(".py"):
+ if package and self.coverPackages:
+ for want in self.coverPackages:
+ if package.startswith(want):
+ return True
+ else:
+ return True
+ return None
diff --git a/lib/spack/external/nose/plugins/debug.py b/lib/spack/external/nose/plugins/debug.py
new file mode 100644
index 0000000000..78243e60d0
--- /dev/null
+++ b/lib/spack/external/nose/plugins/debug.py
@@ -0,0 +1,67 @@
+"""
+This plugin provides ``--pdb`` and ``--pdb-failures`` options. The ``--pdb``
+option will drop the test runner into pdb when it encounters an error. To
+drop into pdb on failure, use ``--pdb-failures``.
+"""
+
+import pdb
+from nose.plugins.base import Plugin
+
+class Pdb(Plugin):
+ """
+ Provides --pdb and --pdb-failures options that cause the test runner to
+ drop into pdb if it encounters an error or failure, respectively.
+ """
+ enabled_for_errors = False
+ enabled_for_failures = False
+ score = 5 # run last, among builtins
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ parser.add_option(
+ "--pdb", action="store_true", dest="debugBoth",
+ default=env.get('NOSE_PDB', False),
+ help="Drop into debugger on failures or errors")
+ parser.add_option(
+ "--pdb-failures", action="store_true",
+ dest="debugFailures",
+ default=env.get('NOSE_PDB_FAILURES', False),
+ help="Drop into debugger on failures")
+ parser.add_option(
+ "--pdb-errors", action="store_true",
+ dest="debugErrors",
+ default=env.get('NOSE_PDB_ERRORS', False),
+ help="Drop into debugger on errors")
+
+ def configure(self, options, conf):
+ """Configure which kinds of exceptions trigger plugin.
+ """
+ self.conf = conf
+ self.enabled_for_errors = options.debugErrors or options.debugBoth
+ self.enabled_for_failures = options.debugFailures or options.debugBoth
+ self.enabled = self.enabled_for_failures or self.enabled_for_errors
+
+ def addError(self, test, err):
+ """Enter pdb if configured to debug errors.
+ """
+ if not self.enabled_for_errors:
+ return
+ self.debug(err)
+
+ def addFailure(self, test, err):
+ """Enter pdb if configured to debug failures.
+ """
+ if not self.enabled_for_failures:
+ return
+ self.debug(err)
+
+ def debug(self, err):
+ import sys # FIXME why is this import here?
+ ec, ev, tb = err
+ stdout = sys.stdout
+ sys.stdout = sys.__stdout__
+ try:
+ pdb.post_mortem(tb)
+ finally:
+ sys.stdout = stdout
diff --git a/lib/spack/external/nose/plugins/deprecated.py b/lib/spack/external/nose/plugins/deprecated.py
new file mode 100644
index 0000000000..461a26be63
--- /dev/null
+++ b/lib/spack/external/nose/plugins/deprecated.py
@@ -0,0 +1,45 @@
+"""
+This plugin installs a DEPRECATED error class for the :class:`DeprecatedTest`
+exception. When :class:`DeprecatedTest` is raised, the exception will be logged
+in the deprecated attribute of the result, ``D`` or ``DEPRECATED`` (verbose)
+will be output, and the exception will not be counted as an error or failure.
+It is enabled by default, but can be turned off by using ``--no-deprecated``.
+"""
+
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+
+class DeprecatedTest(Exception):
+ """Raise this exception to mark a test as deprecated.
+ """
+ pass
+
+
+class Deprecated(ErrorClassPlugin):
+ """
+ Installs a DEPRECATED error class for the DeprecatedTest exception. Enabled
+ by default.
+ """
+ enabled = True
+ deprecated = ErrorClass(DeprecatedTest,
+ label='DEPRECATED',
+ isfailure=False)
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ env_opt = 'NOSE_WITHOUT_DEPRECATED'
+ parser.add_option('--no-deprecated', action='store_true',
+ dest='noDeprecated', default=env.get(env_opt, False),
+ help="Disable special handling of DeprecatedTest "
+ "exceptions.")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ if not self.can_configure:
+ return
+ self.conf = conf
+ disable = getattr(options, 'noDeprecated', False)
+ if disable:
+ self.enabled = False
diff --git a/lib/spack/external/nose/plugins/doctests.py b/lib/spack/external/nose/plugins/doctests.py
new file mode 100644
index 0000000000..5ef65799f3
--- /dev/null
+++ b/lib/spack/external/nose/plugins/doctests.py
@@ -0,0 +1,455 @@
+"""Use the Doctest plugin with ``--with-doctest`` or the NOSE_WITH_DOCTEST
+environment variable to enable collection and execution of :mod:`doctests
+<doctest>`. Because doctests are usually included in the tested package
+(instead of being grouped into packages or modules of their own), nose only
+looks for them in the non-test packages it discovers in the working directory.
+
+Doctests may also be placed into files other than python modules, in which
+case they can be collected and executed by using the ``--doctest-extension``
+switch or NOSE_DOCTEST_EXTENSION environment variable to indicate which file
+extension(s) to load.
+
+When loading doctests from non-module files, use the ``--doctest-fixtures``
+switch to specify how to find modules containing fixtures for the tests. A
+module name will be produced by appending the value of that switch to the base
+name of each doctest file loaded. For example, a doctest file "widgets.rst"
+with the switch ``--doctest_fixtures=_fixt`` will load fixtures from the module
+``widgets_fixt.py``.
+
+A fixtures module may define any or all of the following functions:
+
+* setup([module]) or setup_module([module])
+
+ Called before the test runs. You may raise SkipTest to skip all tests.
+
+* teardown([module]) or teardown_module([module])
+
+ Called after the test runs, if setup/setup_module did not raise an
+ unhandled exception.
+
+* setup_test(test)
+
+ Called before the test. NOTE: the argument passed is a
+ doctest.DocTest instance, *not* a unittest.TestCase.
+
+* teardown_test(test)
+
+ Called after the test, if setup_test did not raise an exception. NOTE: the
+ argument passed is a doctest.DocTest instance, *not* a unittest.TestCase.
+
+Doctests are run like any other test, with the exception that output
+capture does not work; doctest does its own output capture while running a
+test.
+
+.. note ::
+
+ See :doc:`../doc_tests/test_doctest_fixtures/doctest_fixtures` for
+ additional documentation and examples.
+
+"""
+from __future__ import generators
+
+import logging
+import os
+import sys
+import unittest
+from inspect import getmodule
+from nose.plugins.base import Plugin
+from nose.suite import ContextList
+from nose.util import anyp, getpackage, test_address, resolve_name, \
+ src, tolist, isproperty
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+import sys
+import __builtin__ as builtin_mod
+
+log = logging.getLogger(__name__)
+
+try:
+ import doctest
+ doctest.DocTestCase
+ # system version of doctest is acceptable, but needs a monkeypatch
+except (ImportError, AttributeError):
+ # system version is too old
+ import nose.ext.dtcompat as doctest
+
+
+#
+# Doctest and coverage don't get along, so we need to create
+# a monkeypatch that will replace the part of doctest that
+# interferes with coverage reports.
+#
+# The monkeypatch is based on this zope patch:
+# http://svn.zope.org/Zope3/trunk/src/zope/testing/doctest.py?rev=28679&r1=28703&r2=28705
+#
+_orp = doctest._OutputRedirectingPdb
+
+class NoseOutputRedirectingPdb(_orp):
+ def __init__(self, out):
+ self.__debugger_used = False
+ _orp.__init__(self, out)
+
+ def set_trace(self):
+ self.__debugger_used = True
+ _orp.set_trace(self, sys._getframe().f_back)
+
+ def set_continue(self):
+ # Calling set_continue unconditionally would break unit test
+ # coverage reporting, as Bdb.set_continue calls sys.settrace(None).
+ if self.__debugger_used:
+ _orp.set_continue(self)
+doctest._OutputRedirectingPdb = NoseOutputRedirectingPdb
+
+
+class DoctestSuite(unittest.TestSuite):
+ """
+ Doctest suites are parallelizable at the module or file level only,
+ since they may be attached to objects that are not individually
+ addressable (like properties). This suite subclass is used when
+ loading doctests from a module to ensure that behavior.
+
+ This class is used only if the plugin is not fully prepared;
+ in normal use, the loader's suiteClass is used.
+
+ """
+ can_split = False
+
+ def __init__(self, tests=(), context=None, can_split=False):
+ self.context = context
+ self.can_split = can_split
+ unittest.TestSuite.__init__(self, tests=tests)
+
+ def address(self):
+ return test_address(self.context)
+
+ def __iter__(self):
+ # 2.3 compat
+ return iter(self._tests)
+
+ def __str__(self):
+ return str(self._tests)
+
+
+class Doctest(Plugin):
+ """
+ Activate doctest plugin to find and run doctests in non-test modules.
+ """
+ extension = None
+ suiteClass = DoctestSuite
+
+ def options(self, parser, env):
+ """Register commmandline options.
+ """
+ Plugin.options(self, parser, env)
+ parser.add_option('--doctest-tests', action='store_true',
+ dest='doctest_tests',
+ default=env.get('NOSE_DOCTEST_TESTS'),
+ help="Also look for doctests in test modules. "
+ "Note that classes, methods and functions should "
+ "have either doctests or non-doctest tests, "
+ "not both. [NOSE_DOCTEST_TESTS]")
+ parser.add_option('--doctest-extension', action="append",
+ dest="doctestExtension",
+ metavar="EXT",
+ help="Also look for doctests in files with "
+ "this extension [NOSE_DOCTEST_EXTENSION]")
+ parser.add_option('--doctest-result-variable',
+ dest='doctest_result_var',
+ default=env.get('NOSE_DOCTEST_RESULT_VAR'),
+ metavar="VAR",
+ help="Change the variable name set to the result of "
+ "the last interpreter command from the default '_'. "
+ "Can be used to avoid conflicts with the _() "
+ "function used for text translation. "
+ "[NOSE_DOCTEST_RESULT_VAR]")
+ parser.add_option('--doctest-fixtures', action="store",
+ dest="doctestFixtures",
+ metavar="SUFFIX",
+ help="Find fixtures for a doctest file in module "
+ "with this name appended to the base name "
+ "of the doctest file")
+ parser.add_option('--doctest-options', action="append",
+ dest="doctestOptions",
+ metavar="OPTIONS",
+ help="Specify options to pass to doctest. " +
+ "Eg. '+ELLIPSIS,+NORMALIZE_WHITESPACE'")
+ # Set the default as a list, if given in env; otherwise
+ # an additional value set on the command line will cause
+ # an error.
+ env_setting = env.get('NOSE_DOCTEST_EXTENSION')
+ if env_setting is not None:
+ parser.set_defaults(doctestExtension=tolist(env_setting))
+
+ def configure(self, options, config):
+ """Configure plugin.
+ """
+ Plugin.configure(self, options, config)
+ self.doctest_result_var = options.doctest_result_var
+ self.doctest_tests = options.doctest_tests
+ self.extension = tolist(options.doctestExtension)
+ self.fixtures = options.doctestFixtures
+ self.finder = doctest.DocTestFinder()
+ self.optionflags = 0
+ if options.doctestOptions:
+ flags = ",".join(options.doctestOptions).split(',')
+ for flag in flags:
+ if not flag or flag[0] not in '+-':
+ raise ValueError(
+ "Must specify doctest options with starting " +
+ "'+' or '-'. Got %s" % (flag,))
+ mode, option_name = flag[0], flag[1:]
+ option_flag = doctest.OPTIONFLAGS_BY_NAME.get(option_name)
+ if not option_flag:
+ raise ValueError("Unknown doctest option %s" %
+ (option_name,))
+ if mode == '+':
+ self.optionflags |= option_flag
+ elif mode == '-':
+ self.optionflags &= ~option_flag
+
+ def prepareTestLoader(self, loader):
+ """Capture loader's suiteClass.
+
+ This is used to create test suites from doctest files.
+
+ """
+ self.suiteClass = loader.suiteClass
+
+ def loadTestsFromModule(self, module):
+ """Load doctests from the module.
+ """
+ log.debug("loading from %s", module)
+ if not self.matches(module.__name__):
+ log.debug("Doctest doesn't want module %s", module)
+ return
+ try:
+ tests = self.finder.find(module)
+ except AttributeError:
+ log.exception("Attribute error loading from %s", module)
+ # nose allows module.__test__ = False; doctest does not and throws
+ # AttributeError
+ return
+ if not tests:
+ log.debug("No tests found in %s", module)
+ return
+ tests.sort()
+ module_file = src(module.__file__)
+ # FIXME this breaks the id plugin somehow (tests probably don't
+ # get wrapped in result proxy or something)
+ cases = []
+ for test in tests:
+ if not test.examples:
+ continue
+ if not test.filename:
+ test.filename = module_file
+ cases.append(DocTestCase(test,
+ optionflags=self.optionflags,
+ result_var=self.doctest_result_var))
+ if cases:
+ yield self.suiteClass(cases, context=module, can_split=False)
+
+ def loadTestsFromFile(self, filename):
+ """Load doctests from the file.
+
+ Tests are loaded only if filename's extension matches
+ configured doctest extension.
+
+ """
+ if self.extension and anyp(filename.endswith, self.extension):
+ name = os.path.basename(filename)
+ dh = open(filename)
+ try:
+ doc = dh.read()
+ finally:
+ dh.close()
+
+ fixture_context = None
+ globs = {'__file__': filename}
+ if self.fixtures:
+ base, ext = os.path.splitext(name)
+ dirname = os.path.dirname(filename)
+ sys.path.append(dirname)
+ fixt_mod = base + self.fixtures
+ try:
+ fixture_context = __import__(
+ fixt_mod, globals(), locals(), ["nop"])
+ except ImportError, e:
+ log.debug(
+ "Could not import %s: %s (%s)", fixt_mod, e, sys.path)
+ log.debug("Fixture module %s resolved to %s",
+ fixt_mod, fixture_context)
+ if hasattr(fixture_context, 'globs'):
+ globs = fixture_context.globs(globs)
+ parser = doctest.DocTestParser()
+ test = parser.get_doctest(
+ doc, globs=globs, name=name,
+ filename=filename, lineno=0)
+ if test.examples:
+ case = DocFileCase(
+ test,
+ optionflags=self.optionflags,
+ setUp=getattr(fixture_context, 'setup_test', None),
+ tearDown=getattr(fixture_context, 'teardown_test', None),
+ result_var=self.doctest_result_var)
+ if fixture_context:
+ yield ContextList((case,), context=fixture_context)
+ else:
+ yield case
+ else:
+ yield False # no tests to load
+
+ def makeTest(self, obj, parent):
+ """Look for doctests in the given object, which will be a
+ function, method or class.
+ """
+ name = getattr(obj, '__name__', 'Unnammed %s' % type(obj))
+ doctests = self.finder.find(obj, module=getmodule(parent), name=name)
+ if doctests:
+ for test in doctests:
+ if len(test.examples) == 0:
+ continue
+ yield DocTestCase(test, obj=obj, optionflags=self.optionflags,
+ result_var=self.doctest_result_var)
+
+ def matches(self, name):
+ # FIXME this seems wrong -- nothing is ever going to
+ # fail this test, since we're given a module NAME not FILE
+ if name == '__init__.py':
+ return False
+ # FIXME don't think we need include/exclude checks here?
+ return ((self.doctest_tests or not self.conf.testMatch.search(name)
+ or (self.conf.include
+ and filter(None,
+ [inc.search(name)
+ for inc in self.conf.include])))
+ and (not self.conf.exclude
+ or not filter(None,
+ [exc.search(name)
+ for exc in self.conf.exclude])))
+
+ def wantFile(self, file):
+ """Override to select all modules and any file ending with
+ configured doctest extension.
+ """
+ # always want .py files
+ if file.endswith('.py'):
+ return True
+ # also want files that match my extension
+ if (self.extension
+ and anyp(file.endswith, self.extension)
+ and (not self.conf.exclude
+ or not filter(None,
+ [exc.search(file)
+ for exc in self.conf.exclude]))):
+ return True
+ return None
+
+
+class DocTestCase(doctest.DocTestCase):
+ """Overrides DocTestCase to
+ provide an address() method that returns the correct address for
+ the doctest case. To provide hints for address(), an obj may also
+ be passed -- this will be used as the test object for purposes of
+ determining the test address, if it is provided.
+ """
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None, obj=None, result_var='_'):
+ self._result_var = result_var
+ self._nose_obj = obj
+ super(DocTestCase, self).__init__(
+ test, optionflags=optionflags, setUp=setUp, tearDown=tearDown,
+ checker=checker)
+
+ def address(self):
+ if self._nose_obj is not None:
+ return test_address(self._nose_obj)
+ obj = resolve_name(self._dt_test.name)
+
+ if isproperty(obj):
+ # properties have no connection to the class they are in
+ # so we can't just look 'em up, we have to first look up
+ # the class, then stick the prop on the end
+ parts = self._dt_test.name.split('.')
+ class_name = '.'.join(parts[:-1])
+ cls = resolve_name(class_name)
+ base_addr = test_address(cls)
+ return (base_addr[0], base_addr[1],
+ '.'.join([base_addr[2], parts[-1]]))
+ else:
+ return test_address(obj)
+
+ # doctests loaded via find(obj) omit the module name
+ # so we need to override id, __repr__ and shortDescription
+ # bonus: this will squash a 2.3 vs 2.4 incompatiblity
+ def id(self):
+ name = self._dt_test.name
+ filename = self._dt_test.filename
+ if filename is not None:
+ pk = getpackage(filename)
+ if pk is None:
+ return name
+ if not name.startswith(pk):
+ name = "%s.%s" % (pk, name)
+ return name
+
+ def __repr__(self):
+ name = self.id()
+ name = name.split('.')
+ return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
+ __str__ = __repr__
+
+ def shortDescription(self):
+ return 'Doctest: %s' % self.id()
+
+ def setUp(self):
+ if self._result_var is not None:
+ self._old_displayhook = sys.displayhook
+ sys.displayhook = self._displayhook
+ super(DocTestCase, self).setUp()
+
+ def _displayhook(self, value):
+ if value is None:
+ return
+ setattr(builtin_mod, self._result_var, value)
+ print repr(value)
+
+ def tearDown(self):
+ super(DocTestCase, self).tearDown()
+ if self._result_var is not None:
+ sys.displayhook = self._old_displayhook
+ delattr(builtin_mod, self._result_var)
+
+
+class DocFileCase(doctest.DocFileCase):
+ """Overrides to provide address() method that returns the correct
+ address for the doc file case.
+ """
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None, result_var='_'):
+ self._result_var = result_var
+ super(DocFileCase, self).__init__(
+ test, optionflags=optionflags, setUp=setUp, tearDown=tearDown,
+ checker=None)
+
+ def address(self):
+ return (self._dt_test.filename, None, None)
+
+ def setUp(self):
+ if self._result_var is not None:
+ self._old_displayhook = sys.displayhook
+ sys.displayhook = self._displayhook
+ super(DocFileCase, self).setUp()
+
+ def _displayhook(self, value):
+ if value is None:
+ return
+ setattr(builtin_mod, self._result_var, value)
+ print repr(value)
+
+ def tearDown(self):
+ super(DocFileCase, self).tearDown()
+ if self._result_var is not None:
+ sys.displayhook = self._old_displayhook
+ delattr(builtin_mod, self._result_var)
diff --git a/lib/spack/external/nose/plugins/errorclass.py b/lib/spack/external/nose/plugins/errorclass.py
new file mode 100644
index 0000000000..d1540e0070
--- /dev/null
+++ b/lib/spack/external/nose/plugins/errorclass.py
@@ -0,0 +1,210 @@
+"""
+ErrorClass Plugins
+------------------
+
+ErrorClass plugins provide an easy way to add support for custom
+handling of particular classes of exceptions.
+
+An ErrorClass plugin defines one or more ErrorClasses and how each is
+handled and reported on. Each error class is stored in a different
+attribute on the result, and reported separately. Each error class must
+indicate the exceptions that fall under that class, the label to use
+for reporting, and whether exceptions of the class should be
+considered as failures for the whole test run.
+
+ErrorClasses use a declarative syntax. Assign an ErrorClass to the
+attribute you wish to add to the result object, defining the
+exceptions, label and isfailure attributes. For example, to declare an
+ErrorClassPlugin that defines TodoErrors (and subclasses of TodoError)
+as an error class with the label 'TODO' that is considered a failure,
+do this:
+
+ >>> class Todo(Exception):
+ ... pass
+ >>> class TodoError(ErrorClassPlugin):
+ ... todo = ErrorClass(Todo, label='TODO', isfailure=True)
+
+The MetaErrorClass metaclass translates the ErrorClass declarations
+into the tuples used by the error handling and reporting functions in
+the result. This is an internal format and subject to change; you
+should always use the declarative syntax for attaching ErrorClasses to
+an ErrorClass plugin.
+
+ >>> TodoError.errorClasses # doctest: +ELLIPSIS
+ ((<class ...Todo...>, ('todo', 'TODO', True)),)
+
+Let's see the plugin in action. First some boilerplate.
+
+ >>> import sys
+ >>> import unittest
+ >>> try:
+ ... # 2.7+
+ ... from unittest.runner import _WritelnDecorator
+ ... except ImportError:
+ ... from unittest import _WritelnDecorator
+ ...
+ >>> buf = _WritelnDecorator(sys.stdout)
+
+Now define a test case that raises a Todo.
+
+ >>> class TestTodo(unittest.TestCase):
+ ... def runTest(self):
+ ... raise Todo("I need to test something")
+ >>> case = TestTodo()
+
+Prepare the result using our plugin. Normally this happens during the
+course of test execution within nose -- you won't be doing this
+yourself. For the purposes of this testing document, I'm stepping
+through the internal process of nose so you can see what happens at
+each step.
+
+ >>> plugin = TodoError()
+ >>> from nose.result import _TextTestResult
+ >>> result = _TextTestResult(stream=buf, descriptions=0, verbosity=2)
+ >>> plugin.prepareTestResult(result)
+
+Now run the test. TODO is printed.
+
+ >>> _ = case(result) # doctest: +ELLIPSIS
+ runTest (....TestTodo) ... TODO: I need to test something
+
+Errors and failures are empty, but todo has our test:
+
+ >>> result.errors
+ []
+ >>> result.failures
+ []
+ >>> result.todo # doctest: +ELLIPSIS
+ [(<....TestTodo testMethod=runTest>, '...Todo: I need to test something\\n')]
+ >>> result.printErrors() # doctest: +ELLIPSIS
+ <BLANKLINE>
+ ======================================================================
+ TODO: runTest (....TestTodo)
+ ----------------------------------------------------------------------
+ Traceback (most recent call last):
+ ...
+ ...Todo: I need to test something
+ <BLANKLINE>
+
+Since we defined a Todo as a failure, the run was not successful.
+
+ >>> result.wasSuccessful()
+ False
+"""
+
+from nose.pyversion import make_instancemethod
+from nose.plugins.base import Plugin
+from nose.result import TextTestResult
+from nose.util import isclass
+
+class MetaErrorClass(type):
+ """Metaclass for ErrorClassPlugins that allows error classes to be
+ set up in a declarative manner.
+ """
+ def __init__(self, name, bases, attr):
+ errorClasses = []
+ for name, detail in attr.items():
+ if isinstance(detail, ErrorClass):
+ attr.pop(name)
+ for cls in detail:
+ errorClasses.append(
+ (cls, (name, detail.label, detail.isfailure)))
+ super(MetaErrorClass, self).__init__(name, bases, attr)
+ self.errorClasses = tuple(errorClasses)
+
+
+class ErrorClass(object):
+ def __init__(self, *errorClasses, **kw):
+ self.errorClasses = errorClasses
+ try:
+ for key in ('label', 'isfailure'):
+ setattr(self, key, kw.pop(key))
+ except KeyError:
+ raise TypeError("%r is a required named argument for ErrorClass"
+ % key)
+
+ def __iter__(self):
+ return iter(self.errorClasses)
+
+
+class ErrorClassPlugin(Plugin):
+ """
+ Base class for ErrorClass plugins. Subclass this class and declare the
+ exceptions that you wish to handle as attributes of the subclass.
+ """
+ __metaclass__ = MetaErrorClass
+ score = 1000
+ errorClasses = ()
+
+ def addError(self, test, err):
+ err_cls, a, b = err
+ if not isclass(err_cls):
+ return
+ classes = [e[0] for e in self.errorClasses]
+ if filter(lambda c: issubclass(err_cls, c), classes):
+ return True
+
+ def prepareTestResult(self, result):
+ if not hasattr(result, 'errorClasses'):
+ self.patchResult(result)
+ for cls, (storage_attr, label, isfail) in self.errorClasses:
+ if cls not in result.errorClasses:
+ storage = getattr(result, storage_attr, [])
+ setattr(result, storage_attr, storage)
+ result.errorClasses[cls] = (storage, label, isfail)
+
+ def patchResult(self, result):
+ result.printLabel = print_label_patch(result)
+ result._orig_addError, result.addError = \
+ result.addError, add_error_patch(result)
+ result._orig_wasSuccessful, result.wasSuccessful = \
+ result.wasSuccessful, wassuccessful_patch(result)
+ if hasattr(result, 'printErrors'):
+ result._orig_printErrors, result.printErrors = \
+ result.printErrors, print_errors_patch(result)
+ if hasattr(result, 'addSkip'):
+ result._orig_addSkip, result.addSkip = \
+ result.addSkip, add_skip_patch(result)
+ result.errorClasses = {}
+
+
+def add_error_patch(result):
+ """Create a new addError method to patch into a result instance
+ that recognizes the errorClasses attribute and deals with
+ errorclasses correctly.
+ """
+ return make_instancemethod(TextTestResult.addError, result)
+
+
+def print_errors_patch(result):
+ """Create a new printErrors method that prints errorClasses items
+ as well.
+ """
+ return make_instancemethod(TextTestResult.printErrors, result)
+
+
+def print_label_patch(result):
+ """Create a new printLabel method that prints errorClasses items
+ as well.
+ """
+ return make_instancemethod(TextTestResult.printLabel, result)
+
+
+def wassuccessful_patch(result):
+ """Create a new wasSuccessful method that checks errorClasses for
+ exceptions that were put into other slots than error or failure
+ but that still count as not success.
+ """
+ return make_instancemethod(TextTestResult.wasSuccessful, result)
+
+
+def add_skip_patch(result):
+ """Create a new addSkip method to patch into a result instance
+ that delegates to addError.
+ """
+ return make_instancemethod(TextTestResult.addSkip, result)
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/lib/spack/external/nose/plugins/failuredetail.py b/lib/spack/external/nose/plugins/failuredetail.py
new file mode 100644
index 0000000000..6462865dd0
--- /dev/null
+++ b/lib/spack/external/nose/plugins/failuredetail.py
@@ -0,0 +1,49 @@
+"""
+This plugin provides assert introspection. When the plugin is enabled
+and a test failure occurs, the traceback is displayed with extra context
+around the line in which the exception was raised. Simple variable
+substitution is also performed in the context output to provide more
+debugging information.
+"""
+
+from nose.plugins import Plugin
+from nose.pyversion import exc_to_unicode, force_unicode
+from nose.inspector import inspect_traceback
+
+class FailureDetail(Plugin):
+ """
+ Plugin that provides extra information in tracebacks of test failures.
+ """
+ score = 1600 # before capture
+
+ def options(self, parser, env):
+ """Register commmandline options.
+ """
+ parser.add_option(
+ "-d", "--detailed-errors", "--failure-detail",
+ action="store_true",
+ default=env.get('NOSE_DETAILED_ERRORS'),
+ dest="detailedErrors", help="Add detail to error"
+ " output by attempting to evaluate failed"
+ " asserts [NOSE_DETAILED_ERRORS]")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ if not self.can_configure:
+ return
+ self.enabled = options.detailedErrors
+ self.conf = conf
+
+ def formatFailure(self, test, err):
+ """Add detail from traceback inspection to error message of a failure.
+ """
+ ec, ev, tb = err
+ tbinfo, str_ev = None, exc_to_unicode(ev)
+
+ if tb:
+ tbinfo = force_unicode(inspect_traceback(tb))
+ str_ev = '\n'.join([str_ev, tbinfo])
+ test.tbinfo = tbinfo
+ return (ec, str_ev, tb)
+
diff --git a/lib/spack/external/nose/plugins/isolate.py b/lib/spack/external/nose/plugins/isolate.py
new file mode 100644
index 0000000000..13235dfbd1
--- /dev/null
+++ b/lib/spack/external/nose/plugins/isolate.py
@@ -0,0 +1,103 @@
+"""The isolation plugin resets the contents of sys.modules after running
+each test module or package. Use it by setting ``--with-isolation`` or the
+NOSE_WITH_ISOLATION environment variable.
+
+The effects are similar to wrapping the following functions around the
+import and execution of each test module::
+
+ def setup(module):
+ module._mods = sys.modules.copy()
+
+ def teardown(module):
+ to_del = [ m for m in sys.modules.keys() if m not in
+ module._mods ]
+ for mod in to_del:
+ del sys.modules[mod]
+ sys.modules.update(module._mods)
+
+Isolation works only during lazy loading. In normal use, this is only
+during discovery of modules within a directory, where the process of
+importing, loading tests and running tests from each module is
+encapsulated in a single loadTestsFromName call. This plugin
+implements loadTestsFromNames to force the same lazy-loading there,
+which allows isolation to work in directed mode as well as discovery,
+at the cost of some efficiency: lazy-loading names forces full context
+setup and teardown to run for each name, defeating the grouping that
+is normally used to ensure that context setup and teardown are run the
+fewest possible times for a given set of names.
+
+.. warning ::
+
+ This plugin should not be used in conjunction with other plugins
+ that assume that modules, once imported, will stay imported; for
+ instance, it may cause very odd results when used with the coverage
+ plugin.
+
+"""
+
+import logging
+import sys
+
+from nose.plugins import Plugin
+
+
+log = logging.getLogger('nose.plugins.isolation')
+
+class IsolationPlugin(Plugin):
+ """
+ Activate the isolation plugin to isolate changes to external
+ modules to a single test module or package. The isolation plugin
+ resets the contents of sys.modules after each test module or
+ package runs to its state before the test. PLEASE NOTE that this
+ plugin should not be used with the coverage plugin, or in any other case
+ where module reloading may produce undesirable side-effects.
+ """
+ score = 10 # I want to be last
+ name = 'isolation'
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ Plugin.configure(self, options, conf)
+ self._mod_stack = []
+
+ def beforeContext(self):
+ """Copy sys.modules onto my mod stack
+ """
+ mods = sys.modules.copy()
+ self._mod_stack.append(mods)
+
+ def afterContext(self):
+ """Pop my mod stack and restore sys.modules to the state
+ it was in when mod stack was pushed.
+ """
+ mods = self._mod_stack.pop()
+ to_del = [ m for m in sys.modules.keys() if m not in mods ]
+ if to_del:
+ log.debug('removing sys modules entries: %s', to_del)
+ for mod in to_del:
+ del sys.modules[mod]
+ sys.modules.update(mods)
+
+ def loadTestsFromNames(self, names, module=None):
+ """Create a lazy suite that calls beforeContext and afterContext
+ around each name. The side-effect of this is that full context
+ fixtures will be set up and torn down around each test named.
+ """
+ # Fast path for when we don't care
+ if not names or len(names) == 1:
+ return
+ loader = self.loader
+ plugins = self.conf.plugins
+ def lazy():
+ for name in names:
+ plugins.beforeContext()
+ yield loader.loadTestsFromName(name, module=module)
+ plugins.afterContext()
+ return (loader.suiteClass(lazy), [])
+
+ def prepareTestLoader(self, loader):
+ """Get handle on test loader so we can use it in loadTestsFromNames.
+ """
+ self.loader = loader
+
diff --git a/lib/spack/external/nose/plugins/logcapture.py b/lib/spack/external/nose/plugins/logcapture.py
new file mode 100644
index 0000000000..4c9a79f6fd
--- /dev/null
+++ b/lib/spack/external/nose/plugins/logcapture.py
@@ -0,0 +1,245 @@
+"""
+This plugin captures logging statements issued during test execution. When an
+error or failure occurs, the captured log messages are attached to the running
+test in the test.capturedLogging attribute, and displayed with the error failure
+output. It is enabled by default but can be turned off with the option
+``--nologcapture``.
+
+You can filter captured logging statements with the ``--logging-filter`` option.
+If set, it specifies which logger(s) will be captured; loggers that do not match
+will be passed. Example: specifying ``--logging-filter=sqlalchemy,myapp``
+will ensure that only statements logged via sqlalchemy.engine, myapp
+or myapp.foo.bar logger will be logged.
+
+You can remove other installed logging handlers with the
+``--logging-clear-handlers`` option.
+"""
+
+import logging
+from logging import Handler
+import threading
+
+from nose.plugins.base import Plugin
+from nose.util import anyp, ln, safe_str
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+log = logging.getLogger(__name__)
+
+class FilterSet(object):
+ def __init__(self, filter_components):
+ self.inclusive, self.exclusive = self._partition(filter_components)
+
+ # @staticmethod
+ def _partition(components):
+ inclusive, exclusive = [], []
+ for component in components:
+ if component.startswith('-'):
+ exclusive.append(component[1:])
+ else:
+ inclusive.append(component)
+ return inclusive, exclusive
+ _partition = staticmethod(_partition)
+
+ def allow(self, record):
+ """returns whether this record should be printed"""
+ if not self:
+ # nothing to filter
+ return True
+ return self._allow(record) and not self._deny(record)
+
+ # @staticmethod
+ def _any_match(matchers, record):
+ """return the bool of whether `record` starts with
+ any item in `matchers`"""
+ def record_matches_key(key):
+ return record == key or record.startswith(key + '.')
+ return anyp(bool, map(record_matches_key, matchers))
+ _any_match = staticmethod(_any_match)
+
+ def _allow(self, record):
+ if not self.inclusive:
+ return True
+ return self._any_match(self.inclusive, record)
+
+ def _deny(self, record):
+ if not self.exclusive:
+ return False
+ return self._any_match(self.exclusive, record)
+
+
+class MyMemoryHandler(Handler):
+ def __init__(self, logformat, logdatefmt, filters):
+ Handler.__init__(self)
+ fmt = logging.Formatter(logformat, logdatefmt)
+ self.setFormatter(fmt)
+ self.filterset = FilterSet(filters)
+ self.buffer = []
+ def emit(self, record):
+ self.buffer.append(self.format(record))
+ def flush(self):
+ pass # do nothing
+ def truncate(self):
+ self.buffer = []
+ def filter(self, record):
+ if self.filterset.allow(record.name):
+ return Handler.filter(self, record)
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ del state['lock']
+ return state
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ self.lock = threading.RLock()
+
+
+class LogCapture(Plugin):
+ """
+ Log capture plugin. Enabled by default. Disable with --nologcapture.
+ This plugin captures logging statements issued during test execution,
+ appending any output captured to the error or failure output,
+ should the test fail or raise an error.
+ """
+ enabled = True
+ env_opt = 'NOSE_NOLOGCAPTURE'
+ name = 'logcapture'
+ score = 500
+ logformat = '%(name)s: %(levelname)s: %(message)s'
+ logdatefmt = None
+ clear = False
+ filters = ['-nose']
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ parser.add_option(
+ "--nologcapture", action="store_false",
+ default=not env.get(self.env_opt), dest="logcapture",
+ help="Disable logging capture plugin. "
+ "Logging configuration will be left intact."
+ " [NOSE_NOLOGCAPTURE]")
+ parser.add_option(
+ "--logging-format", action="store", dest="logcapture_format",
+ default=env.get('NOSE_LOGFORMAT') or self.logformat,
+ metavar="FORMAT",
+ help="Specify custom format to print statements. "
+ "Uses the same format as used by standard logging handlers."
+ " [NOSE_LOGFORMAT]")
+ parser.add_option(
+ "--logging-datefmt", action="store", dest="logcapture_datefmt",
+ default=env.get('NOSE_LOGDATEFMT') or self.logdatefmt,
+ metavar="FORMAT",
+ help="Specify custom date/time format to print statements. "
+ "Uses the same format as used by standard logging handlers."
+ " [NOSE_LOGDATEFMT]")
+ parser.add_option(
+ "--logging-filter", action="store", dest="logcapture_filters",
+ default=env.get('NOSE_LOGFILTER'),
+ metavar="FILTER",
+ help="Specify which statements to filter in/out. "
+ "By default, everything is captured. If the output is too"
+ " verbose,\nuse this option to filter out needless output.\n"
+ "Example: filter=foo will capture statements issued ONLY to\n"
+ " foo or foo.what.ever.sub but not foobar or other logger.\n"
+ "Specify multiple loggers with comma: filter=foo,bar,baz.\n"
+ "If any logger name is prefixed with a minus, eg filter=-foo,\n"
+ "it will be excluded rather than included. Default: "
+ "exclude logging messages from nose itself (-nose)."
+ " [NOSE_LOGFILTER]\n")
+ parser.add_option(
+ "--logging-clear-handlers", action="store_true",
+ default=False, dest="logcapture_clear",
+ help="Clear all other logging handlers")
+ parser.add_option(
+ "--logging-level", action="store",
+ default='NOTSET', dest="logcapture_level",
+ help="Set the log level to capture")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ self.conf = conf
+ # Disable if explicitly disabled, or if logging is
+ # configured via logging config file
+ if not options.logcapture or conf.loggingConfig:
+ self.enabled = False
+ self.logformat = options.logcapture_format
+ self.logdatefmt = options.logcapture_datefmt
+ self.clear = options.logcapture_clear
+ self.loglevel = options.logcapture_level
+ if options.logcapture_filters:
+ self.filters = options.logcapture_filters.split(',')
+
+ def setupLoghandler(self):
+ # setup our handler with root logger
+ root_logger = logging.getLogger()
+ if self.clear:
+ if hasattr(root_logger, "handlers"):
+ for handler in root_logger.handlers:
+ root_logger.removeHandler(handler)
+ for logger in logging.Logger.manager.loggerDict.values():
+ if hasattr(logger, "handlers"):
+ for handler in logger.handlers:
+ logger.removeHandler(handler)
+ # make sure there isn't one already
+ # you can't simply use "if self.handler not in root_logger.handlers"
+ # since at least in unit tests this doesn't work --
+ # LogCapture() is instantiated for each test case while root_logger
+ # is module global
+ # so we always add new MyMemoryHandler instance
+ for handler in root_logger.handlers[:]:
+ if isinstance(handler, MyMemoryHandler):
+ root_logger.handlers.remove(handler)
+ root_logger.addHandler(self.handler)
+ # to make sure everything gets captured
+ loglevel = getattr(self, "loglevel", "NOTSET")
+ root_logger.setLevel(getattr(logging, loglevel))
+
+ def begin(self):
+ """Set up logging handler before test run begins.
+ """
+ self.start()
+
+ def start(self):
+ self.handler = MyMemoryHandler(self.logformat, self.logdatefmt,
+ self.filters)
+ self.setupLoghandler()
+
+ def end(self):
+ pass
+
+ def beforeTest(self, test):
+ """Clear buffers and handlers before test.
+ """
+ self.setupLoghandler()
+
+ def afterTest(self, test):
+ """Clear buffers after test.
+ """
+ self.handler.truncate()
+
+ def formatFailure(self, test, err):
+ """Add captured log messages to failure output.
+ """
+ return self.formatError(test, err)
+
+ def formatError(self, test, err):
+ """Add captured log messages to error output.
+ """
+ # logic flow copied from Capture.formatError
+ test.capturedLogging = records = self.formatLogRecords()
+ if not records:
+ return err
+ ec, ev, tb = err
+ return (ec, self.addCaptureToErr(ev, records), tb)
+
+ def formatLogRecords(self):
+ return map(safe_str, self.handler.buffer)
+
+ def addCaptureToErr(self, ev, records):
+ return '\n'.join([safe_str(ev), ln('>> begin captured logging <<')] + \
+ records + \
+ [ln('>> end captured logging <<')])
diff --git a/lib/spack/external/nose/plugins/manager.py b/lib/spack/external/nose/plugins/manager.py
new file mode 100644
index 0000000000..4d2ed22b6f
--- /dev/null
+++ b/lib/spack/external/nose/plugins/manager.py
@@ -0,0 +1,460 @@
+"""
+Plugin Manager
+--------------
+
+A plugin manager class is used to load plugins, manage the list of
+loaded plugins, and proxy calls to those plugins.
+
+The plugin managers provided with nose are:
+
+:class:`PluginManager`
+ This manager doesn't implement loadPlugins, so it can only work
+ with a static list of plugins.
+
+:class:`BuiltinPluginManager`
+ This manager loads plugins referenced in ``nose.plugins.builtin``.
+
+:class:`EntryPointPluginManager`
+ This manager uses setuptools entrypoints to load plugins.
+
+:class:`ExtraPluginsPluginManager`
+ This manager loads extra plugins specified with the keyword
+ `addplugins`.
+
+:class:`DefaultPluginMananger`
+ This is the manager class that will be used by default. If
+ setuptools is installed, it is a subclass of
+ :class:`EntryPointPluginManager` and :class:`BuiltinPluginManager`;
+ otherwise, an alias to :class:`BuiltinPluginManager`.
+
+:class:`RestrictedPluginManager`
+ This manager is for use in test runs where some plugin calls are
+ not available, such as runs started with ``python setup.py test``,
+ where the test runner is the default unittest :class:`TextTestRunner`. It
+ is a subclass of :class:`DefaultPluginManager`.
+
+Writing a plugin manager
+========================
+
+If you want to load plugins via some other means, you can write a
+plugin manager and pass an instance of your plugin manager class when
+instantiating the :class:`nose.config.Config` instance that you pass to
+:class:`TestProgram` (or :func:`main` or :func:`run`).
+
+To implement your plugin loading scheme, implement ``loadPlugins()``,
+and in that method, call ``addPlugin()`` with an instance of each plugin
+you wish to make available. Make sure to call
+``super(self).loadPlugins()`` as well if have subclassed a manager
+other than ``PluginManager``.
+
+"""
+import inspect
+import logging
+import os
+import sys
+from itertools import chain as iterchain
+from warnings import warn
+import nose.config
+from nose.failure import Failure
+from nose.plugins.base import IPluginInterface
+from nose.pyversion import sort_list
+
+try:
+ import cPickle as pickle
+except:
+ import pickle
+try:
+ from cStringIO import StringIO
+except:
+ from StringIO import StringIO
+
+
+__all__ = ['DefaultPluginManager', 'PluginManager', 'EntryPointPluginManager',
+ 'BuiltinPluginManager', 'RestrictedPluginManager']
+
+log = logging.getLogger(__name__)
+
+
+class PluginProxy(object):
+ """Proxy for plugin calls. Essentially a closure bound to the
+ given call and plugin list.
+
+ The plugin proxy also must be bound to a particular plugin
+ interface specification, so that it knows what calls are available
+ and any special handling that is required for each call.
+ """
+ interface = IPluginInterface
+ def __init__(self, call, plugins):
+ try:
+ self.method = getattr(self.interface, call)
+ except AttributeError:
+ raise AttributeError("%s is not a valid %s method"
+ % (call, self.interface.__name__))
+ self.call = self.makeCall(call)
+ self.plugins = []
+ for p in plugins:
+ self.addPlugin(p, call)
+
+ def __call__(self, *arg, **kw):
+ return self.call(*arg, **kw)
+
+ def addPlugin(self, plugin, call):
+ """Add plugin to my list of plugins to call, if it has the attribute
+ I'm bound to.
+ """
+ meth = getattr(plugin, call, None)
+ if meth is not None:
+ if call == 'loadTestsFromModule' and \
+ len(inspect.getargspec(meth)[0]) == 2:
+ orig_meth = meth
+ meth = lambda module, path, **kwargs: orig_meth(module)
+ self.plugins.append((plugin, meth))
+
+ def makeCall(self, call):
+ if call == 'loadTestsFromNames':
+ # special case -- load tests from names behaves somewhat differently
+ # from other chainable calls, because plugins return a tuple, only
+ # part of which can be chained to the next plugin.
+ return self._loadTestsFromNames
+
+ meth = self.method
+ if getattr(meth, 'generative', False):
+ # call all plugins and yield a flattened iterator of their results
+ return lambda *arg, **kw: list(self.generate(*arg, **kw))
+ elif getattr(meth, 'chainable', False):
+ return self.chain
+ else:
+ # return a value from the first plugin that returns non-None
+ return self.simple
+
+ def chain(self, *arg, **kw):
+ """Call plugins in a chain, where the result of each plugin call is
+ sent to the next plugin as input. The final output result is returned.
+ """
+ result = None
+ # extract the static arguments (if any) from arg so they can
+ # be passed to each plugin call in the chain
+ static = [a for (static, a)
+ in zip(getattr(self.method, 'static_args', []), arg)
+ if static]
+ for p, meth in self.plugins:
+ result = meth(*arg, **kw)
+ arg = static[:]
+ arg.append(result)
+ return result
+
+ def generate(self, *arg, **kw):
+ """Call all plugins, yielding each item in each non-None result.
+ """
+ for p, meth in self.plugins:
+ result = None
+ try:
+ result = meth(*arg, **kw)
+ if result is not None:
+ for r in result:
+ yield r
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ yield Failure(*exc)
+ continue
+
+ def simple(self, *arg, **kw):
+ """Call all plugins, returning the first non-None result.
+ """
+ for p, meth in self.plugins:
+ result = meth(*arg, **kw)
+ if result is not None:
+ return result
+
+ def _loadTestsFromNames(self, names, module=None):
+ """Chainable but not quite normal. Plugins return a tuple of
+ (tests, names) after processing the names. The tests are added
+ to a suite that is accumulated throughout the full call, while
+ names are input for the next plugin in the chain.
+ """
+ suite = []
+ for p, meth in self.plugins:
+ result = meth(names, module=module)
+ if result is not None:
+ suite_part, names = result
+ if suite_part:
+ suite.extend(suite_part)
+ return suite, names
+
+
+class NoPlugins(object):
+ """Null Plugin manager that has no plugins."""
+ interface = IPluginInterface
+ def __init__(self):
+ self._plugins = self.plugins = ()
+
+ def __iter__(self):
+ return ()
+
+ def _doNothing(self, *args, **kwds):
+ pass
+
+ def _emptyIterator(self, *args, **kwds):
+ return ()
+
+ def __getattr__(self, call):
+ method = getattr(self.interface, call)
+ if getattr(method, "generative", False):
+ return self._emptyIterator
+ else:
+ return self._doNothing
+
+ def addPlugin(self, plug):
+ raise NotImplementedError()
+
+ def addPlugins(self, plugins):
+ raise NotImplementedError()
+
+ def configure(self, options, config):
+ pass
+
+ def loadPlugins(self):
+ pass
+
+ def sort(self):
+ pass
+
+
+class PluginManager(object):
+ """Base class for plugin managers. PluginManager is intended to be
+ used only with a static list of plugins. The loadPlugins() implementation
+ only reloads plugins from _extraplugins to prevent those from being
+ overridden by a subclass.
+
+ The basic functionality of a plugin manager is to proxy all unknown
+ attributes through a ``PluginProxy`` to a list of plugins.
+
+ Note that the list of plugins *may not* be changed after the first plugin
+ call.
+ """
+ proxyClass = PluginProxy
+
+ def __init__(self, plugins=(), proxyClass=None):
+ self._plugins = []
+ self._extraplugins = ()
+ self._proxies = {}
+ if plugins:
+ self.addPlugins(plugins)
+ if proxyClass is not None:
+ self.proxyClass = proxyClass
+
+ def __getattr__(self, call):
+ try:
+ return self._proxies[call]
+ except KeyError:
+ proxy = self.proxyClass(call, self._plugins)
+ self._proxies[call] = proxy
+ return proxy
+
+ def __iter__(self):
+ return iter(self.plugins)
+
+ def addPlugin(self, plug):
+ # allow, for instance, plugins loaded via entry points to
+ # supplant builtin plugins.
+ new_name = getattr(plug, 'name', object())
+ self._plugins[:] = [p for p in self._plugins
+ if getattr(p, 'name', None) != new_name]
+ self._plugins.append(plug)
+
+ def addPlugins(self, plugins=(), extraplugins=()):
+ """extraplugins are maintained in a separate list and
+ re-added by loadPlugins() to prevent their being overwritten
+ by plugins added by a subclass of PluginManager
+ """
+ self._extraplugins = extraplugins
+ for plug in iterchain(plugins, extraplugins):
+ self.addPlugin(plug)
+
+ def configure(self, options, config):
+ """Configure the set of plugins with the given options
+ and config instance. After configuration, disabled plugins
+ are removed from the plugins list.
+ """
+ log.debug("Configuring plugins")
+ self.config = config
+ cfg = PluginProxy('configure', self._plugins)
+ cfg(options, config)
+ enabled = [plug for plug in self._plugins if plug.enabled]
+ self.plugins = enabled
+ self.sort()
+ log.debug("Plugins enabled: %s", enabled)
+
+ def loadPlugins(self):
+ for plug in self._extraplugins:
+ self.addPlugin(plug)
+
+ def sort(self):
+ return sort_list(self._plugins, lambda x: getattr(x, 'score', 1), reverse=True)
+
+ def _get_plugins(self):
+ return self._plugins
+
+ def _set_plugins(self, plugins):
+ self._plugins = []
+ self.addPlugins(plugins)
+
+ plugins = property(_get_plugins, _set_plugins, None,
+ """Access the list of plugins managed by
+ this plugin manager""")
+
+
+class ZeroNinePlugin:
+ """Proxy for 0.9 plugins, adapts 0.10 calls to 0.9 standard.
+ """
+ def __init__(self, plugin):
+ self.plugin = plugin
+
+ def options(self, parser, env=os.environ):
+ self.plugin.add_options(parser, env)
+
+ def addError(self, test, err):
+ if not hasattr(self.plugin, 'addError'):
+ return
+ # switch off to addSkip, addDeprecated if those types
+ from nose.exc import SkipTest, DeprecatedTest
+ ec, ev, tb = err
+ if issubclass(ec, SkipTest):
+ if not hasattr(self.plugin, 'addSkip'):
+ return
+ return self.plugin.addSkip(test.test)
+ elif issubclass(ec, DeprecatedTest):
+ if not hasattr(self.plugin, 'addDeprecated'):
+ return
+ return self.plugin.addDeprecated(test.test)
+ # add capt
+ capt = test.capturedOutput
+ return self.plugin.addError(test.test, err, capt)
+
+ def loadTestsFromFile(self, filename):
+ if hasattr(self.plugin, 'loadTestsFromPath'):
+ return self.plugin.loadTestsFromPath(filename)
+
+ def addFailure(self, test, err):
+ if not hasattr(self.plugin, 'addFailure'):
+ return
+ # add capt and tbinfo
+ capt = test.capturedOutput
+ tbinfo = test.tbinfo
+ return self.plugin.addFailure(test.test, err, capt, tbinfo)
+
+ def addSuccess(self, test):
+ if not hasattr(self.plugin, 'addSuccess'):
+ return
+ capt = test.capturedOutput
+ self.plugin.addSuccess(test.test, capt)
+
+ def startTest(self, test):
+ if not hasattr(self.plugin, 'startTest'):
+ return
+ return self.plugin.startTest(test.test)
+
+ def stopTest(self, test):
+ if not hasattr(self.plugin, 'stopTest'):
+ return
+ return self.plugin.stopTest(test.test)
+
+ def __getattr__(self, val):
+ return getattr(self.plugin, val)
+
+
+class EntryPointPluginManager(PluginManager):
+ """Plugin manager that loads plugins from the `nose.plugins` and
+ `nose.plugins.0.10` entry points.
+ """
+ entry_points = (('nose.plugins.0.10', None),
+ ('nose.plugins', ZeroNinePlugin))
+
+ def loadPlugins(self):
+ """Load plugins by iterating the `nose.plugins` entry point.
+ """
+ from pkg_resources import iter_entry_points
+ loaded = {}
+ for entry_point, adapt in self.entry_points:
+ for ep in iter_entry_points(entry_point):
+ if ep.name in loaded:
+ continue
+ loaded[ep.name] = True
+ log.debug('%s load plugin %s', self.__class__.__name__, ep)
+ try:
+ plugcls = ep.load()
+ except KeyboardInterrupt:
+ raise
+ except Exception, e:
+ # never want a plugin load to kill the test run
+ # but we can't log here because the logger is not yet
+ # configured
+ warn("Unable to load plugin %s: %s" % (ep, e),
+ RuntimeWarning)
+ continue
+ if adapt:
+ plug = adapt(plugcls())
+ else:
+ plug = plugcls()
+ self.addPlugin(plug)
+ super(EntryPointPluginManager, self).loadPlugins()
+
+
+class BuiltinPluginManager(PluginManager):
+ """Plugin manager that loads plugins from the list in
+ `nose.plugins.builtin`.
+ """
+ def loadPlugins(self):
+ """Load plugins in nose.plugins.builtin
+ """
+ from nose.plugins import builtin
+ for plug in builtin.plugins:
+ self.addPlugin(plug())
+ super(BuiltinPluginManager, self).loadPlugins()
+
+try:
+ import pkg_resources
+ class DefaultPluginManager(EntryPointPluginManager, BuiltinPluginManager):
+ pass
+
+except ImportError:
+ class DefaultPluginManager(BuiltinPluginManager):
+ pass
+
+class RestrictedPluginManager(DefaultPluginManager):
+ """Plugin manager that restricts the plugin list to those not
+ excluded by a list of exclude methods. Any plugin that implements
+ an excluded method will be removed from the manager's plugin list
+ after plugins are loaded.
+ """
+ def __init__(self, plugins=(), exclude=(), load=True):
+ DefaultPluginManager.__init__(self, plugins)
+ self.load = load
+ self.exclude = exclude
+ self.excluded = []
+ self._excludedOpts = None
+
+ def excludedOption(self, name):
+ if self._excludedOpts is None:
+ from optparse import OptionParser
+ self._excludedOpts = OptionParser(add_help_option=False)
+ for plugin in self.excluded:
+ plugin.options(self._excludedOpts, env={})
+ return self._excludedOpts.get_option('--' + name)
+
+ def loadPlugins(self):
+ if self.load:
+ DefaultPluginManager.loadPlugins(self)
+ allow = []
+ for plugin in self.plugins:
+ ok = True
+ for method in self.exclude:
+ if hasattr(plugin, method):
+ ok = False
+ self.excluded.append(plugin)
+ break
+ if ok:
+ allow.append(plugin)
+ self.plugins = allow
diff --git a/lib/spack/external/nose/plugins/multiprocess.py b/lib/spack/external/nose/plugins/multiprocess.py
new file mode 100644
index 0000000000..2cae744a11
--- /dev/null
+++ b/lib/spack/external/nose/plugins/multiprocess.py
@@ -0,0 +1,835 @@
+"""
+Overview
+========
+
+The multiprocess plugin enables you to distribute your test run among a set of
+worker processes that run tests in parallel. This can speed up CPU-bound test
+runs (as long as the number of work processeses is around the number of
+processors or cores available), but is mainly useful for IO-bound tests that
+spend most of their time waiting for data to arrive from someplace else.
+
+.. note ::
+
+ See :doc:`../doc_tests/test_multiprocess/multiprocess` for
+ additional documentation and examples. Use of this plugin on python
+ 2.5 or earlier requires the multiprocessing_ module, also available
+ from PyPI.
+
+.. _multiprocessing : http://code.google.com/p/python-multiprocessing/
+
+How tests are distributed
+=========================
+
+The ideal case would be to dispatch each test to a worker process
+separately. This ideal is not attainable in all cases, however, because many
+test suites depend on context (class, module or package) fixtures.
+
+The plugin can't know (unless you tell it -- see below!) if a context fixture
+can be called many times concurrently (is re-entrant), or if it can be shared
+among tests running in different processes. Therefore, if a context has
+fixtures, the default behavior is to dispatch the entire suite to a worker as
+a unit.
+
+Controlling distribution
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+There are two context-level variables that you can use to control this default
+behavior.
+
+If a context's fixtures are re-entrant, set ``_multiprocess_can_split_ = True``
+in the context, and the plugin will dispatch tests in suites bound to that
+context as if the context had no fixtures. This means that the fixtures will
+execute concurrently and multiple times, typically once per test.
+
+If a context's fixtures can be shared by tests running in different processes
+-- such as a package-level fixture that starts an external http server or
+initializes a shared database -- then set ``_multiprocess_shared_ = True`` in
+the context. These fixtures will then execute in the primary nose process, and
+tests in those contexts will be individually dispatched to run in parallel.
+
+How results are collected and reported
+======================================
+
+As each test or suite executes in a worker process, results (failures, errors,
+and specially handled exceptions like SkipTest) are collected in that
+process. When the worker process finishes, it returns results to the main
+nose process. There, any progress output is printed (dots!), and the
+results from the test run are combined into a consolidated result
+set. When results have been received for all dispatched tests, or all
+workers have died, the result summary is output as normal.
+
+Beware!
+=======
+
+Not all test suites will benefit from, or even operate correctly using, this
+plugin. For example, CPU-bound tests will run more slowly if you don't have
+multiple processors. There are also some differences in plugin
+interactions and behaviors due to the way in which tests are dispatched and
+loaded. In general, test loading under this plugin operates as if it were
+always in directed mode instead of discovered mode. For instance, doctests
+in test modules will always be found when using this plugin with the doctest
+plugin.
+
+But the biggest issue you will face is probably concurrency. Unless you
+have kept your tests as religiously pure unit tests, with no side-effects, no
+ordering issues, and no external dependencies, chances are you will experience
+odd, intermittent and unexplainable failures and errors when using this
+plugin. This doesn't necessarily mean the plugin is broken; it may mean that
+your test suite is not safe for concurrency.
+
+New Features in 1.1.0
+=====================
+
+* functions generated by test generators are now added to the worker queue
+ making them multi-threaded.
+* fixed timeout functionality, now functions will be terminated with a
+ TimedOutException exception when they exceed their execution time. The
+ worker processes are not terminated.
+* added ``--process-restartworker`` option to restart workers once they are
+ done, this helps control memory usage. Sometimes memory leaks can accumulate
+ making long runs very difficult.
+* added global _instantiate_plugins to configure which plugins are started
+ on the worker processes.
+
+"""
+
+import logging
+import os
+import sys
+import time
+import traceback
+import unittest
+import pickle
+import signal
+import nose.case
+from nose.core import TextTestRunner
+from nose import failure
+from nose import loader
+from nose.plugins.base import Plugin
+from nose.pyversion import bytes_
+from nose.result import TextTestResult
+from nose.suite import ContextSuite
+from nose.util import test_address
+try:
+ # 2.7+
+ from unittest.runner import _WritelnDecorator
+except ImportError:
+ from unittest import _WritelnDecorator
+from Queue import Empty
+from warnings import warn
+try:
+ from cStringIO import StringIO
+except ImportError:
+ import StringIO
+
+# this is a list of plugin classes that will be checked for and created inside
+# each worker process
+_instantiate_plugins = None
+
+log = logging.getLogger(__name__)
+
+Process = Queue = Pool = Event = Value = Array = None
+
+# have to inherit KeyboardInterrupt to it will interrupt process properly
+class TimedOutException(KeyboardInterrupt):
+ def __init__(self, value = "Timed Out"):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+def _import_mp():
+ global Process, Queue, Pool, Event, Value, Array
+ try:
+ from multiprocessing import Manager, Process
+ #prevent the server process created in the manager which holds Python
+ #objects and allows other processes to manipulate them using proxies
+ #to interrupt on SIGINT (keyboardinterrupt) so that the communication
+ #channel between subprocesses and main process is still usable after
+ #ctrl+C is received in the main process.
+ old=signal.signal(signal.SIGINT, signal.SIG_IGN)
+ m = Manager()
+ #reset it back so main process will receive a KeyboardInterrupt
+ #exception on ctrl+c
+ signal.signal(signal.SIGINT, old)
+ Queue, Pool, Event, Value, Array = (
+ m.Queue, m.Pool, m.Event, m.Value, m.Array
+ )
+ except ImportError:
+ warn("multiprocessing module is not available, multiprocess plugin "
+ "cannot be used", RuntimeWarning)
+
+
+class TestLet:
+ def __init__(self, case):
+ try:
+ self._id = case.id()
+ except AttributeError:
+ pass
+ self._short_description = case.shortDescription()
+ self._str = str(case)
+
+ def id(self):
+ return self._id
+
+ def shortDescription(self):
+ return self._short_description
+
+ def __str__(self):
+ return self._str
+
+class MultiProcess(Plugin):
+ """
+ Run tests in multiple processes. Requires processing module.
+ """
+ score = 1000
+ status = {}
+
+ def options(self, parser, env):
+ """
+ Register command-line options.
+ """
+ parser.add_option("--processes", action="store",
+ default=env.get('NOSE_PROCESSES', 0),
+ dest="multiprocess_workers",
+ metavar="NUM",
+ help="Spread test run among this many processes. "
+ "Set a number equal to the number of processors "
+ "or cores in your machine for best results. "
+ "Pass a negative number to have the number of "
+ "processes automatically set to the number of "
+ "cores. Passing 0 means to disable parallel "
+ "testing. Default is 0 unless NOSE_PROCESSES is "
+ "set. "
+ "[NOSE_PROCESSES]")
+ parser.add_option("--process-timeout", action="store",
+ default=env.get('NOSE_PROCESS_TIMEOUT', 10),
+ dest="multiprocess_timeout",
+ metavar="SECONDS",
+ help="Set timeout for return of results from each "
+ "test runner process. Default is 10. "
+ "[NOSE_PROCESS_TIMEOUT]")
+ parser.add_option("--process-restartworker", action="store_true",
+ default=env.get('NOSE_PROCESS_RESTARTWORKER', False),
+ dest="multiprocess_restartworker",
+ help="If set, will restart each worker process once"
+ " their tests are done, this helps control memory "
+ "leaks from killing the system. "
+ "[NOSE_PROCESS_RESTARTWORKER]")
+
+ def configure(self, options, config):
+ """
+ Configure plugin.
+ """
+ try:
+ self.status.pop('active')
+ except KeyError:
+ pass
+ if not hasattr(options, 'multiprocess_workers'):
+ self.enabled = False
+ return
+ # don't start inside of a worker process
+ if config.worker:
+ return
+ self.config = config
+ try:
+ workers = int(options.multiprocess_workers)
+ except (TypeError, ValueError):
+ workers = 0
+ if workers:
+ _import_mp()
+ if Process is None:
+ self.enabled = False
+ return
+ # Negative number of workers will cause multiprocessing to hang.
+ # Set the number of workers to the CPU count to avoid this.
+ if workers < 0:
+ try:
+ import multiprocessing
+ workers = multiprocessing.cpu_count()
+ except NotImplementedError:
+ self.enabled = False
+ return
+ self.enabled = True
+ self.config.multiprocess_workers = workers
+ t = float(options.multiprocess_timeout)
+ self.config.multiprocess_timeout = t
+ r = int(options.multiprocess_restartworker)
+ self.config.multiprocess_restartworker = r
+ self.status['active'] = True
+
+ def prepareTestLoader(self, loader):
+ """Remember loader class so MultiProcessTestRunner can instantiate
+ the right loader.
+ """
+ self.loaderClass = loader.__class__
+
+ def prepareTestRunner(self, runner):
+ """Replace test runner with MultiProcessTestRunner.
+ """
+ # replace with our runner class
+ return MultiProcessTestRunner(stream=runner.stream,
+ verbosity=self.config.verbosity,
+ config=self.config,
+ loaderClass=self.loaderClass)
+
+def signalhandler(sig, frame):
+ raise TimedOutException()
+
+class MultiProcessTestRunner(TextTestRunner):
+ waitkilltime = 5.0 # max time to wait to terminate a process that does not
+ # respond to SIGILL
+ def __init__(self, **kw):
+ self.loaderClass = kw.pop('loaderClass', loader.defaultTestLoader)
+ super(MultiProcessTestRunner, self).__init__(**kw)
+
+ def collect(self, test, testQueue, tasks, to_teardown, result):
+ # dispatch and collect results
+ # put indexes only on queue because tests aren't picklable
+ for case in self.nextBatch(test):
+ log.debug("Next batch %s (%s)", case, type(case))
+ if (isinstance(case, nose.case.Test) and
+ isinstance(case.test, failure.Failure)):
+ log.debug("Case is a Failure")
+ case(result) # run here to capture the failure
+ continue
+ # handle shared fixtures
+ if isinstance(case, ContextSuite) and case.context is failure.Failure:
+ log.debug("Case is a Failure")
+ case(result) # run here to capture the failure
+ continue
+ elif isinstance(case, ContextSuite) and self.sharedFixtures(case):
+ log.debug("%s has shared fixtures", case)
+ try:
+ case.setUp()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ log.debug("%s setup failed", sys.exc_info())
+ result.addError(case, sys.exc_info())
+ else:
+ to_teardown.append(case)
+ if case.factory:
+ ancestors=case.factory.context.get(case, [])
+ for an in ancestors[:2]:
+ #log.debug('reset ancestor %s', an)
+ if getattr(an, '_multiprocess_shared_', False):
+ an._multiprocess_can_split_=True
+ #an._multiprocess_shared_=False
+ self.collect(case, testQueue, tasks, to_teardown, result)
+
+ else:
+ test_addr = self.addtask(testQueue,tasks,case)
+ log.debug("Queued test %s (%s) to %s",
+ len(tasks), test_addr, testQueue)
+
+ def startProcess(self, iworker, testQueue, resultQueue, shouldStop, result):
+ currentaddr = Value('c',bytes_(''))
+ currentstart = Value('d',time.time())
+ keyboardCaught = Event()
+ p = Process(target=runner,
+ args=(iworker, testQueue,
+ resultQueue,
+ currentaddr,
+ currentstart,
+ keyboardCaught,
+ shouldStop,
+ self.loaderClass,
+ result.__class__,
+ pickle.dumps(self.config)))
+ p.currentaddr = currentaddr
+ p.currentstart = currentstart
+ p.keyboardCaught = keyboardCaught
+ old = signal.signal(signal.SIGILL, signalhandler)
+ p.start()
+ signal.signal(signal.SIGILL, old)
+ return p
+
+ def run(self, test):
+ """
+ Execute the test (which may be a test suite). If the test is a suite,
+ distribute it out among as many processes as have been configured, at
+ as fine a level as is possible given the context fixtures defined in
+ the suite or any sub-suites.
+
+ """
+ log.debug("%s.run(%s) (%s)", self, test, os.getpid())
+ wrapper = self.config.plugins.prepareTest(test)
+ if wrapper is not None:
+ test = wrapper
+
+ # plugins can decorate or capture the output stream
+ wrapped = self.config.plugins.setOutputStream(self.stream)
+ if wrapped is not None:
+ self.stream = wrapped
+
+ testQueue = Queue()
+ resultQueue = Queue()
+ tasks = []
+ completed = []
+ workers = []
+ to_teardown = []
+ shouldStop = Event()
+
+ result = self._makeResult()
+ start = time.time()
+
+ self.collect(test, testQueue, tasks, to_teardown, result)
+
+ log.debug("Starting %s workers", self.config.multiprocess_workers)
+ for i in range(self.config.multiprocess_workers):
+ p = self.startProcess(i, testQueue, resultQueue, shouldStop, result)
+ workers.append(p)
+ log.debug("Started worker process %s", i+1)
+
+ total_tasks = len(tasks)
+ # need to keep track of the next time to check for timeouts in case
+ # more than one process times out at the same time.
+ nexttimeout=self.config.multiprocess_timeout
+ thrownError = None
+
+ try:
+ while tasks:
+ log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs",
+ len(completed), total_tasks,nexttimeout)
+ try:
+ iworker, addr, newtask_addrs, batch_result = resultQueue.get(
+ timeout=nexttimeout)
+ log.debug('Results received for worker %d, %s, new tasks: %d',
+ iworker,addr,len(newtask_addrs))
+ try:
+ try:
+ tasks.remove(addr)
+ except ValueError:
+ log.warn('worker %s failed to remove from tasks: %s',
+ iworker,addr)
+ total_tasks += len(newtask_addrs)
+ tasks.extend(newtask_addrs)
+ except KeyError:
+ log.debug("Got result for unknown task? %s", addr)
+ log.debug("current: %s",str(list(tasks)[0]))
+ else:
+ completed.append([addr,batch_result])
+ self.consolidate(result, batch_result)
+ if (self.config.stopOnError
+ and not result.wasSuccessful()):
+ # set the stop condition
+ shouldStop.set()
+ break
+ if self.config.multiprocess_restartworker:
+ log.debug('joining worker %s',iworker)
+ # wait for working, but not that important if worker
+ # cannot be joined in fact, for workers that add to
+ # testQueue, they will not terminate until all their
+ # items are read
+ workers[iworker].join(timeout=1)
+ if not shouldStop.is_set() and not testQueue.empty():
+ log.debug('starting new process on worker %s',iworker)
+ workers[iworker] = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result)
+ except Empty:
+ log.debug("Timed out with %s tasks pending "
+ "(empty testQueue=%r): %s",
+ len(tasks),testQueue.empty(),str(tasks))
+ any_alive = False
+ for iworker, w in enumerate(workers):
+ if w.is_alive():
+ worker_addr = bytes_(w.currentaddr.value,'ascii')
+ timeprocessing = time.time() - w.currentstart.value
+ if ( len(worker_addr) == 0
+ and timeprocessing > self.config.multiprocess_timeout-0.1):
+ log.debug('worker %d has finished its work item, '
+ 'but is not exiting? do we wait for it?',
+ iworker)
+ else:
+ any_alive = True
+ if (len(worker_addr) > 0
+ and timeprocessing > self.config.multiprocess_timeout-0.1):
+ log.debug('timed out worker %s: %s',
+ iworker,worker_addr)
+ w.currentaddr.value = bytes_('')
+ # If the process is in C++ code, sending a SIGILL
+ # might not send a python KeybordInterrupt exception
+ # therefore, send multiple signals until an
+ # exception is caught. If this takes too long, then
+ # terminate the process
+ w.keyboardCaught.clear()
+ startkilltime = time.time()
+ while not w.keyboardCaught.is_set() and w.is_alive():
+ if time.time()-startkilltime > self.waitkilltime:
+ # have to terminate...
+ log.error("terminating worker %s",iworker)
+ w.terminate()
+ # there is a small probability that the
+ # terminated process might send a result,
+ # which has to be specially handled or
+ # else processes might get orphaned.
+ workers[iworker] = w = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result)
+ break
+ os.kill(w.pid, signal.SIGILL)
+ time.sleep(0.1)
+ if not any_alive and testQueue.empty():
+ log.debug("All workers dead")
+ break
+ nexttimeout=self.config.multiprocess_timeout
+ for w in workers:
+ if w.is_alive() and len(w.currentaddr.value) > 0:
+ timeprocessing = time.time()-w.currentstart.value
+ if timeprocessing <= self.config.multiprocess_timeout:
+ nexttimeout = min(nexttimeout,
+ self.config.multiprocess_timeout-timeprocessing)
+ log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks))
+
+ except (KeyboardInterrupt, SystemExit), e:
+ log.info('parent received ctrl-c when waiting for test results')
+ thrownError = e
+ #resultQueue.get(False)
+
+ result.addError(test, sys.exc_info())
+
+ try:
+ for case in to_teardown:
+ log.debug("Tearing down shared fixtures for %s", case)
+ try:
+ case.tearDown()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ result.addError(case, sys.exc_info())
+
+ stop = time.time()
+
+ # first write since can freeze on shutting down processes
+ result.printErrors()
+ result.printSummary(start, stop)
+ self.config.plugins.finalize(result)
+
+ if thrownError is None:
+ log.debug("Tell all workers to stop")
+ for w in workers:
+ if w.is_alive():
+ testQueue.put('STOP', block=False)
+
+ # wait for the workers to end
+ for iworker,worker in enumerate(workers):
+ if worker.is_alive():
+ log.debug('joining worker %s',iworker)
+ worker.join()
+ if worker.is_alive():
+ log.debug('failed to join worker %s',iworker)
+ except (KeyboardInterrupt, SystemExit):
+ log.info('parent received ctrl-c when shutting down: stop all processes')
+ for worker in workers:
+ if worker.is_alive():
+ worker.terminate()
+
+ if thrownError: raise thrownError
+ else: raise
+
+ return result
+
+ def addtask(testQueue,tasks,case):
+ arg = None
+ if isinstance(case,nose.case.Test) and hasattr(case.test,'arg'):
+ # this removes the top level descriptor and allows real function
+ # name to be returned
+ case.test.descriptor = None
+ arg = case.test.arg
+ test_addr = MultiProcessTestRunner.address(case)
+ testQueue.put((test_addr,arg), block=False)
+ if arg is not None:
+ test_addr += str(arg)
+ if tasks is not None:
+ tasks.append(test_addr)
+ return test_addr
+ addtask = staticmethod(addtask)
+
+ def address(case):
+ if hasattr(case, 'address'):
+ file, mod, call = case.address()
+ elif hasattr(case, 'context'):
+ file, mod, call = test_address(case.context)
+ else:
+ raise Exception("Unable to convert %s to address" % case)
+ parts = []
+ if file is None:
+ if mod is None:
+ raise Exception("Unaddressable case %s" % case)
+ else:
+ parts.append(mod)
+ else:
+ # strip __init__.py(c) from end of file part
+ # if present, having it there confuses loader
+ dirname, basename = os.path.split(file)
+ if basename.startswith('__init__'):
+ file = dirname
+ parts.append(file)
+ if call is not None:
+ parts.append(call)
+ return ':'.join(map(str, parts))
+ address = staticmethod(address)
+
+ def nextBatch(self, test):
+ # allows tests or suites to mark themselves as not safe
+ # for multiprocess execution
+ if hasattr(test, 'context'):
+ if not getattr(test.context, '_multiprocess_', True):
+ return
+
+ if ((isinstance(test, ContextSuite)
+ and test.hasFixtures(self.checkCanSplit))
+ or not getattr(test, 'can_split', True)
+ or not isinstance(test, unittest.TestSuite)):
+ # regular test case, or a suite with context fixtures
+
+ # special case: when run like nosetests path/to/module.py
+ # the top-level suite has only one item, and it shares
+ # the same context as that item. In that case, we want the
+ # item, not the top-level suite
+ if isinstance(test, ContextSuite):
+ contained = list(test)
+ if (len(contained) == 1
+ and getattr(contained[0],
+ 'context', None) == test.context):
+ test = contained[0]
+ yield test
+ else:
+ # Suite is without fixtures at this level; but it may have
+ # fixtures at any deeper level, so we need to examine it all
+ # the way down to the case level
+ for case in test:
+ for batch in self.nextBatch(case):
+ yield batch
+
+ def checkCanSplit(context, fixt):
+ """
+ Callback that we use to check whether the fixtures found in a
+ context or ancestor are ones we care about.
+
+ Contexts can tell us that their fixtures are reentrant by setting
+ _multiprocess_can_split_. So if we see that, we return False to
+ disregard those fixtures.
+ """
+ if not fixt:
+ return False
+ if getattr(context, '_multiprocess_can_split_', False):
+ return False
+ return True
+ checkCanSplit = staticmethod(checkCanSplit)
+
+ def sharedFixtures(self, case):
+ context = getattr(case, 'context', None)
+ if not context:
+ return False
+ return getattr(context, '_multiprocess_shared_', False)
+
+ def consolidate(self, result, batch_result):
+ log.debug("batch result is %s" , batch_result)
+ try:
+ output, testsRun, failures, errors, errorClasses = batch_result
+ except ValueError:
+ log.debug("result in unexpected format %s", batch_result)
+ failure.Failure(*sys.exc_info())(result)
+ return
+ self.stream.write(output)
+ result.testsRun += testsRun
+ result.failures.extend(failures)
+ result.errors.extend(errors)
+ for key, (storage, label, isfail) in errorClasses.items():
+ if key not in result.errorClasses:
+ # Ordinarily storage is result attribute
+ # but it's only processed through the errorClasses
+ # dict, so it's ok to fake it here
+ result.errorClasses[key] = ([], label, isfail)
+ mystorage, _junk, _junk = result.errorClasses[key]
+ mystorage.extend(storage)
+ log.debug("Ran %s tests (total: %s)", testsRun, result.testsRun)
+
+
+def runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+ keyboardCaught, shouldStop, loaderClass, resultClass, config):
+ try:
+ try:
+ return __runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+ keyboardCaught, shouldStop, loaderClass, resultClass, config)
+ except KeyboardInterrupt:
+ log.debug('Worker %s keyboard interrupt, stopping',ix)
+ except Empty:
+ log.debug("Worker %s timed out waiting for tasks", ix)
+
+def __runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+ keyboardCaught, shouldStop, loaderClass, resultClass, config):
+
+ config = pickle.loads(config)
+ dummy_parser = config.parserClass()
+ if _instantiate_plugins is not None:
+ for pluginclass in _instantiate_plugins:
+ plugin = pluginclass()
+ plugin.addOptions(dummy_parser,{})
+ config.plugins.addPlugin(plugin)
+ config.plugins.configure(config.options,config)
+ config.plugins.begin()
+ log.debug("Worker %s executing, pid=%d", ix,os.getpid())
+ loader = loaderClass(config=config)
+ loader.suiteClass.suiteClass = NoSharedFixtureContextSuite
+
+ def get():
+ return testQueue.get(timeout=config.multiprocess_timeout)
+
+ def makeResult():
+ stream = _WritelnDecorator(StringIO())
+ result = resultClass(stream, descriptions=1,
+ verbosity=config.verbosity,
+ config=config)
+ plug_result = config.plugins.prepareTestResult(result)
+ if plug_result:
+ return plug_result
+ return result
+
+ def batch(result):
+ failures = [(TestLet(c), err) for c, err in result.failures]
+ errors = [(TestLet(c), err) for c, err in result.errors]
+ errorClasses = {}
+ for key, (storage, label, isfail) in result.errorClasses.items():
+ errorClasses[key] = ([(TestLet(c), err) for c, err in storage],
+ label, isfail)
+ return (
+ result.stream.getvalue(),
+ result.testsRun,
+ failures,
+ errors,
+ errorClasses)
+ for test_addr, arg in iter(get, 'STOP'):
+ if shouldStop.is_set():
+ log.exception('Worker %d STOPPED',ix)
+ break
+ result = makeResult()
+ test = loader.loadTestsFromNames([test_addr])
+ test.testQueue = testQueue
+ test.tasks = []
+ test.arg = arg
+ log.debug("Worker %s Test is %s (%s)", ix, test_addr, test)
+ try:
+ if arg is not None:
+ test_addr = test_addr + str(arg)
+ currentaddr.value = bytes_(test_addr)
+ currentstart.value = time.time()
+ test(result)
+ currentaddr.value = bytes_('')
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ except KeyboardInterrupt, e: #TimedOutException:
+ timeout = isinstance(e, TimedOutException)
+ if timeout:
+ keyboardCaught.set()
+ if len(currentaddr.value):
+ if timeout:
+ msg = 'Worker %s timed out, failing current test %s'
+ else:
+ msg = 'Worker %s keyboard interrupt, failing current test %s'
+ log.exception(msg,ix,test_addr)
+ currentaddr.value = bytes_('')
+ failure.Failure(*sys.exc_info())(result)
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ else:
+ if timeout:
+ msg = 'Worker %s test %s timed out'
+ else:
+ msg = 'Worker %s test %s keyboard interrupt'
+ log.debug(msg,ix,test_addr)
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ if not timeout:
+ raise
+ except SystemExit:
+ currentaddr.value = bytes_('')
+ log.exception('Worker %s system exit',ix)
+ raise
+ except:
+ currentaddr.value = bytes_('')
+ log.exception("Worker %s error running test or returning "
+ "results",ix)
+ failure.Failure(*sys.exc_info())(result)
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ if config.multiprocess_restartworker:
+ break
+ log.debug("Worker %s ending", ix)
+
+
+class NoSharedFixtureContextSuite(ContextSuite):
+ """
+ Context suite that never fires shared fixtures.
+
+ When a context sets _multiprocess_shared_, fixtures in that context
+ are executed by the main process. Using this suite class prevents them
+ from executing in the runner process as well.
+
+ """
+ testQueue = None
+ tasks = None
+ arg = None
+ def setupContext(self, context):
+ if getattr(context, '_multiprocess_shared_', False):
+ return
+ super(NoSharedFixtureContextSuite, self).setupContext(context)
+
+ def teardownContext(self, context):
+ if getattr(context, '_multiprocess_shared_', False):
+ return
+ super(NoSharedFixtureContextSuite, self).teardownContext(context)
+ def run(self, result):
+ """Run tests in suite inside of suite fixtures.
+ """
+ # proxy the result for myself
+ log.debug("suite %s (%s) run called, tests: %s",
+ id(self), self, self._tests)
+ if self.resultProxy:
+ result, orig = self.resultProxy(result, self), result
+ else:
+ result, orig = result, result
+ try:
+ #log.debug('setUp for %s', id(self));
+ self.setUp()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.error_context = 'setup'
+ result.addError(self, self._exc_info())
+ return
+ try:
+ for test in self._tests:
+ if (isinstance(test,nose.case.Test)
+ and self.arg is not None):
+ test.test.arg = self.arg
+ else:
+ test.arg = self.arg
+ test.testQueue = self.testQueue
+ test.tasks = self.tasks
+ if result.shouldStop:
+ log.debug("stopping")
+ break
+ # each nose.case.Test will create its own result proxy
+ # so the cases need the original result, to avoid proxy
+ # chains
+ #log.debug('running test %s in suite %s', test, self);
+ try:
+ test(orig)
+ except KeyboardInterrupt, e:
+ timeout = isinstance(e, TimedOutException)
+ if timeout:
+ msg = 'Timeout when running test %s in suite %s'
+ else:
+ msg = 'KeyboardInterrupt when running test %s in suite %s'
+ log.debug(msg, test, self)
+ err = (TimedOutException,TimedOutException(str(test)),
+ sys.exc_info()[2])
+ test.config.plugins.addError(test,err)
+ orig.addError(test,err)
+ if not timeout:
+ raise
+ finally:
+ self.has_run = True
+ try:
+ #log.debug('tearDown for %s', id(self));
+ self.tearDown()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.error_context = 'teardown'
+ result.addError(self, self._exc_info())
diff --git a/lib/spack/external/nose/plugins/plugintest.py b/lib/spack/external/nose/plugins/plugintest.py
new file mode 100644
index 0000000000..76d0d2c48c
--- /dev/null
+++ b/lib/spack/external/nose/plugins/plugintest.py
@@ -0,0 +1,416 @@
+"""
+Testing Plugins
+===============
+
+The plugin interface is well-tested enough to safely unit test your
+use of its hooks with some level of confidence. However, there is also
+a mixin for unittest.TestCase called PluginTester that's designed to
+test plugins in their native runtime environment.
+
+Here's a simple example with a do-nothing plugin and a composed suite.
+
+ >>> import unittest
+ >>> from nose.plugins import Plugin, PluginTester
+ >>> class FooPlugin(Plugin):
+ ... pass
+ >>> class TestPluginFoo(PluginTester, unittest.TestCase):
+ ... activate = '--with-foo'
+ ... plugins = [FooPlugin()]
+ ... def test_foo(self):
+ ... for line in self.output:
+ ... # i.e. check for patterns
+ ... pass
+ ...
+ ... # or check for a line containing ...
+ ... assert "ValueError" in self.output
+ ... def makeSuite(self):
+ ... class TC(unittest.TestCase):
+ ... def runTest(self):
+ ... raise ValueError("I hate foo")
+ ... return [TC('runTest')]
+ ...
+ >>> res = unittest.TestResult()
+ >>> case = TestPluginFoo('test_foo')
+ >>> _ = case(res)
+ >>> res.errors
+ []
+ >>> res.failures
+ []
+ >>> res.wasSuccessful()
+ True
+ >>> res.testsRun
+ 1
+
+And here is a more complex example of testing a plugin that has extra
+arguments and reads environment variables.
+
+ >>> import unittest, os
+ >>> from nose.plugins import Plugin, PluginTester
+ >>> class FancyOutputter(Plugin):
+ ... name = "fancy"
+ ... def configure(self, options, conf):
+ ... Plugin.configure(self, options, conf)
+ ... if not self.enabled:
+ ... return
+ ... self.fanciness = 1
+ ... if options.more_fancy:
+ ... self.fanciness = 2
+ ... if 'EVEN_FANCIER' in self.env:
+ ... self.fanciness = 3
+ ...
+ ... def options(self, parser, env=os.environ):
+ ... self.env = env
+ ... parser.add_option('--more-fancy', action='store_true')
+ ... Plugin.options(self, parser, env=env)
+ ...
+ ... def report(self, stream):
+ ... stream.write("FANCY " * self.fanciness)
+ ...
+ >>> class TestFancyOutputter(PluginTester, unittest.TestCase):
+ ... activate = '--with-fancy' # enables the plugin
+ ... plugins = [FancyOutputter()]
+ ... args = ['--more-fancy']
+ ... env = {'EVEN_FANCIER': '1'}
+ ...
+ ... def test_fancy_output(self):
+ ... assert "FANCY FANCY FANCY" in self.output, (
+ ... "got: %s" % self.output)
+ ... def makeSuite(self):
+ ... class TC(unittest.TestCase):
+ ... def runTest(self):
+ ... raise ValueError("I hate fancy stuff")
+ ... return [TC('runTest')]
+ ...
+ >>> res = unittest.TestResult()
+ >>> case = TestFancyOutputter('test_fancy_output')
+ >>> _ = case(res)
+ >>> res.errors
+ []
+ >>> res.failures
+ []
+ >>> res.wasSuccessful()
+ True
+ >>> res.testsRun
+ 1
+
+"""
+
+import re
+import sys
+from warnings import warn
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+__all__ = ['PluginTester', 'run']
+
+from os import getpid
+class MultiProcessFile(object):
+ """
+ helper for testing multiprocessing
+
+ multiprocessing poses a problem for doctests, since the strategy
+ of replacing sys.stdout/stderr with file-like objects then
+ inspecting the results won't work: the child processes will
+ write to the objects, but the data will not be reflected
+ in the parent doctest-ing process.
+
+ The solution is to create file-like objects which will interact with
+ multiprocessing in a more desirable way.
+
+ All processes can write to this object, but only the creator can read.
+ This allows the testing system to see a unified picture of I/O.
+ """
+ def __init__(self):
+ # per advice at:
+ # http://docs.python.org/library/multiprocessing.html#all-platforms
+ self.__master = getpid()
+ self.__queue = Manager().Queue()
+ self.__buffer = StringIO()
+ self.softspace = 0
+
+ def buffer(self):
+ if getpid() != self.__master:
+ return
+
+ from Queue import Empty
+ from collections import defaultdict
+ cache = defaultdict(str)
+ while True:
+ try:
+ pid, data = self.__queue.get_nowait()
+ except Empty:
+ break
+ if pid == ():
+ #show parent output after children
+ #this is what users see, usually
+ pid = ( 1e100, ) # googol!
+ cache[pid] += data
+ for pid in sorted(cache):
+ #self.__buffer.write( '%s wrote: %r\n' % (pid, cache[pid]) ) #DEBUG
+ self.__buffer.write( cache[pid] )
+ def write(self, data):
+ # note that these pids are in the form of current_process()._identity
+ # rather than OS pids
+ from multiprocessing import current_process
+ pid = current_process()._identity
+ self.__queue.put((pid, data))
+ def __iter__(self):
+ "getattr doesn't work for iter()"
+ self.buffer()
+ return self.__buffer
+ def seek(self, offset, whence=0):
+ self.buffer()
+ return self.__buffer.seek(offset, whence)
+ def getvalue(self):
+ self.buffer()
+ return self.__buffer.getvalue()
+ def __getattr__(self, attr):
+ return getattr(self.__buffer, attr)
+
+try:
+ from multiprocessing import Manager
+ Buffer = MultiProcessFile
+except ImportError:
+ Buffer = StringIO
+
+class PluginTester(object):
+ """A mixin for testing nose plugins in their runtime environment.
+
+ Subclass this and mix in unittest.TestCase to run integration/functional
+ tests on your plugin. When setUp() is called, the stub test suite is
+ executed with your plugin so that during an actual test you can inspect the
+ artifacts of how your plugin interacted with the stub test suite.
+
+ - activate
+
+ - the argument to send nosetests to activate the plugin
+
+ - suitepath
+
+ - if set, this is the path of the suite to test. Otherwise, you
+ will need to use the hook, makeSuite()
+
+ - plugins
+
+ - the list of plugins to make available during the run. Note
+ that this does not mean these plugins will be *enabled* during
+ the run -- only the plugins enabled by the activate argument
+ or other settings in argv or env will be enabled.
+
+ - args
+
+ - a list of arguments to add to the nosetests command, in addition to
+ the activate argument
+
+ - env
+
+ - optional dict of environment variables to send nosetests
+
+ """
+ activate = None
+ suitepath = None
+ args = None
+ env = {}
+ argv = None
+ plugins = []
+ ignoreFiles = None
+
+ def makeSuite(self):
+ """returns a suite object of tests to run (unittest.TestSuite())
+
+ If self.suitepath is None, this must be implemented. The returned suite
+ object will be executed with all plugins activated. It may return
+ None.
+
+ Here is an example of a basic suite object you can return ::
+
+ >>> import unittest
+ >>> class SomeTest(unittest.TestCase):
+ ... def runTest(self):
+ ... raise ValueError("Now do something, plugin!")
+ ...
+ >>> unittest.TestSuite([SomeTest()]) # doctest: +ELLIPSIS
+ <unittest...TestSuite tests=[<...SomeTest testMethod=runTest>]>
+
+ """
+ raise NotImplementedError
+
+ def _execPlugin(self):
+ """execute the plugin on the internal test suite.
+ """
+ from nose.config import Config
+ from nose.core import TestProgram
+ from nose.plugins.manager import PluginManager
+
+ suite = None
+ stream = Buffer()
+ conf = Config(env=self.env,
+ stream=stream,
+ plugins=PluginManager(plugins=self.plugins))
+ if self.ignoreFiles is not None:
+ conf.ignoreFiles = self.ignoreFiles
+ if not self.suitepath:
+ suite = self.makeSuite()
+
+ self.nose = TestProgram(argv=self.argv, config=conf, suite=suite,
+ exit=False)
+ self.output = AccessDecorator(stream)
+
+ def setUp(self):
+ """runs nosetests with the specified test suite, all plugins
+ activated.
+ """
+ self.argv = ['nosetests', self.activate]
+ if self.args:
+ self.argv.extend(self.args)
+ if self.suitepath:
+ self.argv.append(self.suitepath)
+
+ self._execPlugin()
+
+
+class AccessDecorator(object):
+ stream = None
+ _buf = None
+ def __init__(self, stream):
+ self.stream = stream
+ stream.seek(0)
+ self._buf = stream.read()
+ stream.seek(0)
+ def __contains__(self, val):
+ return val in self._buf
+ def __iter__(self):
+ return iter(self.stream)
+ def __str__(self):
+ return self._buf
+
+
+def blankline_separated_blocks(text):
+ "a bunch of === characters is also considered a blank line"
+ block = []
+ for line in text.splitlines(True):
+ block.append(line)
+ line = line.strip()
+ if not line or line.startswith('===') and not line.strip('='):
+ yield "".join(block)
+ block = []
+ if block:
+ yield "".join(block)
+
+
+def remove_stack_traces(out):
+ # this regexp taken from Python 2.5's doctest
+ traceback_re = re.compile(r"""
+ # Grab the traceback header. Different versions of Python have
+ # said different things on the first traceback line.
+ ^(?P<hdr> Traceback\ \(
+ (?: most\ recent\ call\ last
+ | innermost\ last
+ ) \) :
+ )
+ \s* $ # toss trailing whitespace on the header.
+ (?P<stack> .*?) # don't blink: absorb stuff until...
+ ^(?=\w) # a line *starts* with alphanum.
+ .*?(?P<exception> \w+ ) # exception name
+ (?P<msg> [:\n] .*) # the rest
+ """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+ blocks = []
+ for block in blankline_separated_blocks(out):
+ blocks.append(traceback_re.sub(r"\g<hdr>\n...\n\g<exception>\g<msg>", block))
+ return "".join(blocks)
+
+
+def simplify_warnings(out):
+ warn_re = re.compile(r"""
+ # Cut the file and line no, up to the warning name
+ ^.*:\d+:\s
+ (?P<category>\w+): \s+ # warning category
+ (?P<detail>.+) $ \n? # warning message
+ ^ .* $ # stack frame
+ """, re.VERBOSE | re.MULTILINE)
+ return warn_re.sub(r"\g<category>: \g<detail>", out)
+
+
+def remove_timings(out):
+ return re.sub(
+ r"Ran (\d+ tests?) in [0-9.]+s", r"Ran \1 in ...s", out)
+
+
+def munge_nose_output_for_doctest(out):
+ """Modify nose output to make it easy to use in doctests."""
+ out = remove_stack_traces(out)
+ out = simplify_warnings(out)
+ out = remove_timings(out)
+ return out.strip()
+
+
+def run(*arg, **kw):
+ """
+ Specialized version of nose.run for use inside of doctests that
+ test test runs.
+
+ This version of run() prints the result output to stdout. Before
+ printing, the output is processed by replacing the timing
+ information with an ellipsis (...), removing traceback stacks, and
+ removing trailing whitespace.
+
+ Use this version of run wherever you are writing a doctest that
+ tests nose (or unittest) test result output.
+
+ Note: do not use doctest: +ELLIPSIS when testing nose output,
+ since ellipses ("test_foo ... ok") in your expected test runner
+ output may match multiple lines of output, causing spurious test
+ passes!
+ """
+ from nose import run
+ from nose.config import Config
+ from nose.plugins.manager import PluginManager
+
+ buffer = Buffer()
+ if 'config' not in kw:
+ plugins = kw.pop('plugins', [])
+ if isinstance(plugins, list):
+ plugins = PluginManager(plugins=plugins)
+ env = kw.pop('env', {})
+ kw['config'] = Config(env=env, plugins=plugins)
+ if 'argv' not in kw:
+ kw['argv'] = ['nosetests', '-v']
+ kw['config'].stream = buffer
+
+ # Set up buffering so that all output goes to our buffer,
+ # or warn user if deprecated behavior is active. If this is not
+ # done, prints and warnings will either be out of place or
+ # disappear.
+ stderr = sys.stderr
+ stdout = sys.stdout
+ if kw.pop('buffer_all', False):
+ sys.stdout = sys.stderr = buffer
+ restore = True
+ else:
+ restore = False
+ warn("The behavior of nose.plugins.plugintest.run() will change in "
+ "the next release of nose. The current behavior does not "
+ "correctly account for output to stdout and stderr. To enable "
+ "correct behavior, use run_buffered() instead, or pass "
+ "the keyword argument buffer_all=True to run().",
+ DeprecationWarning, stacklevel=2)
+ try:
+ run(*arg, **kw)
+ finally:
+ if restore:
+ sys.stderr = stderr
+ sys.stdout = stdout
+ out = buffer.getvalue()
+ print munge_nose_output_for_doctest(out)
+
+
+def run_buffered(*arg, **kw):
+ kw['buffer_all'] = True
+ run(*arg, **kw)
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/lib/spack/external/nose/plugins/prof.py b/lib/spack/external/nose/plugins/prof.py
new file mode 100644
index 0000000000..4d304a934b
--- /dev/null
+++ b/lib/spack/external/nose/plugins/prof.py
@@ -0,0 +1,154 @@
+"""This plugin will run tests using the hotshot profiler, which is part
+of the standard library. To turn it on, use the ``--with-profile`` option
+or set the NOSE_WITH_PROFILE environment variable. Profiler output can be
+controlled with the ``--profile-sort`` and ``--profile-restrict`` options,
+and the profiler output file may be changed with ``--profile-stats-file``.
+
+See the `hotshot documentation`_ in the standard library documentation for
+more details on the various output options.
+
+.. _hotshot documentation: http://docs.python.org/library/hotshot.html
+"""
+
+try:
+ import hotshot
+ from hotshot import stats
+except ImportError:
+ hotshot, stats = None, None
+import logging
+import os
+import sys
+import tempfile
+from nose.plugins.base import Plugin
+from nose.util import tolist
+
+log = logging.getLogger('nose.plugins')
+
+class Profile(Plugin):
+ """
+ Use this plugin to run tests using the hotshot profiler.
+ """
+ pfile = None
+ clean_stats_file = False
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ if not self.available():
+ return
+ Plugin.options(self, parser, env)
+ parser.add_option('--profile-sort', action='store', dest='profile_sort',
+ default=env.get('NOSE_PROFILE_SORT', 'cumulative'),
+ metavar="SORT",
+ help="Set sort order for profiler output")
+ parser.add_option('--profile-stats-file', action='store',
+ dest='profile_stats_file',
+ metavar="FILE",
+ default=env.get('NOSE_PROFILE_STATS_FILE'),
+ help='Profiler stats file; default is a new '
+ 'temp file on each run')
+ parser.add_option('--profile-restrict', action='append',
+ dest='profile_restrict',
+ metavar="RESTRICT",
+ default=env.get('NOSE_PROFILE_RESTRICT'),
+ help="Restrict profiler output. See help for "
+ "pstats.Stats for details")
+
+ def available(cls):
+ return hotshot is not None
+ available = classmethod(available)
+
+ def begin(self):
+ """Create profile stats file and load profiler.
+ """
+ if not self.available():
+ return
+ self._create_pfile()
+ self.prof = hotshot.Profile(self.pfile)
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ if not self.available():
+ self.enabled = False
+ return
+ Plugin.configure(self, options, conf)
+ self.conf = conf
+ if options.profile_stats_file:
+ self.pfile = options.profile_stats_file
+ self.clean_stats_file = False
+ else:
+ self.pfile = None
+ self.clean_stats_file = True
+ self.fileno = None
+ self.sort = options.profile_sort
+ self.restrict = tolist(options.profile_restrict)
+
+ def prepareTest(self, test):
+ """Wrap entire test run in :func:`prof.runcall`.
+ """
+ if not self.available():
+ return
+ log.debug('preparing test %s' % test)
+ def run_and_profile(result, prof=self.prof, test=test):
+ self._create_pfile()
+ prof.runcall(test, result)
+ return run_and_profile
+
+ def report(self, stream):
+ """Output profiler report.
+ """
+ log.debug('printing profiler report')
+ self.prof.close()
+ prof_stats = stats.load(self.pfile)
+ prof_stats.sort_stats(self.sort)
+
+ # 2.5 has completely different stream handling from 2.4 and earlier.
+ # Before 2.5, stats objects have no stream attribute; in 2.5 and later
+ # a reference sys.stdout is stored before we can tweak it.
+ compat_25 = hasattr(prof_stats, 'stream')
+ if compat_25:
+ tmp = prof_stats.stream
+ prof_stats.stream = stream
+ else:
+ tmp = sys.stdout
+ sys.stdout = stream
+ try:
+ if self.restrict:
+ log.debug('setting profiler restriction to %s', self.restrict)
+ prof_stats.print_stats(*self.restrict)
+ else:
+ prof_stats.print_stats()
+ finally:
+ if compat_25:
+ prof_stats.stream = tmp
+ else:
+ sys.stdout = tmp
+
+ def finalize(self, result):
+ """Clean up stats file, if configured to do so.
+ """
+ if not self.available():
+ return
+ try:
+ self.prof.close()
+ except AttributeError:
+ # TODO: is this trying to catch just the case where not
+ # hasattr(self.prof, "close")? If so, the function call should be
+ # moved out of the try: suite.
+ pass
+ if self.clean_stats_file:
+ if self.fileno:
+ try:
+ os.close(self.fileno)
+ except OSError:
+ pass
+ try:
+ os.unlink(self.pfile)
+ except OSError:
+ pass
+ return None
+
+ def _create_pfile(self):
+ if not self.pfile:
+ self.fileno, self.pfile = tempfile.mkstemp()
+ self.clean_stats_file = True
diff --git a/lib/spack/external/nose/plugins/skip.py b/lib/spack/external/nose/plugins/skip.py
new file mode 100644
index 0000000000..9d1ac8f604
--- /dev/null
+++ b/lib/spack/external/nose/plugins/skip.py
@@ -0,0 +1,63 @@
+"""
+This plugin installs a SKIP error class for the SkipTest exception.
+When SkipTest is raised, the exception will be logged in the skipped
+attribute of the result, 'S' or 'SKIP' (verbose) will be output, and
+the exception will not be counted as an error or failure. This plugin
+is enabled by default but may be disabled with the ``--no-skip`` option.
+"""
+
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+
+# on SkipTest:
+# - unittest SkipTest is first preference, but it's only available
+# for >= 2.7
+# - unittest2 SkipTest is second preference for older pythons. This
+# mirrors logic for choosing SkipTest exception in testtools
+# - if none of the above, provide custom class
+try:
+ from unittest.case import SkipTest
+except ImportError:
+ try:
+ from unittest2.case import SkipTest
+ except ImportError:
+ class SkipTest(Exception):
+ """Raise this exception to mark a test as skipped.
+ """
+ pass
+
+
+class Skip(ErrorClassPlugin):
+ """
+ Plugin that installs a SKIP error class for the SkipTest
+ exception. When SkipTest is raised, the exception will be logged
+ in the skipped attribute of the result, 'S' or 'SKIP' (verbose)
+ will be output, and the exception will not be counted as an error
+ or failure.
+ """
+ enabled = True
+ skipped = ErrorClass(SkipTest,
+ label='SKIP',
+ isfailure=False)
+
+ def options(self, parser, env):
+ """
+ Add my options to command line.
+ """
+ env_opt = 'NOSE_WITHOUT_SKIP'
+ parser.add_option('--no-skip', action='store_true',
+ dest='noSkip', default=env.get(env_opt, False),
+ help="Disable special handling of SkipTest "
+ "exceptions.")
+
+ def configure(self, options, conf):
+ """
+ Configure plugin. Skip plugin is enabled by default.
+ """
+ if not self.can_configure:
+ return
+ self.conf = conf
+ disable = getattr(options, 'noSkip', False)
+ if disable:
+ self.enabled = False
+
diff --git a/lib/spack/external/nose/plugins/testid.py b/lib/spack/external/nose/plugins/testid.py
new file mode 100644
index 0000000000..ae8119bd01
--- /dev/null
+++ b/lib/spack/external/nose/plugins/testid.py
@@ -0,0 +1,311 @@
+"""
+This plugin adds a test id (like #1) to each test name output. After
+you've run once to generate test ids, you can re-run individual
+tests by activating the plugin and passing the ids (with or
+without the # prefix) instead of test names.
+
+For example, if your normal test run looks like::
+
+ % nosetests -v
+ tests.test_a ... ok
+ tests.test_b ... ok
+ tests.test_c ... ok
+
+When adding ``--with-id`` you'll see::
+
+ % nosetests -v --with-id
+ #1 tests.test_a ... ok
+ #2 tests.test_b ... ok
+ #3 tests.test_c ... ok
+
+Then you can re-run individual tests by supplying just an id number::
+
+ % nosetests -v --with-id 2
+ #2 tests.test_b ... ok
+
+You can also pass multiple id numbers::
+
+ % nosetests -v --with-id 2 3
+ #2 tests.test_b ... ok
+ #3 tests.test_c ... ok
+
+Since most shells consider '#' a special character, you can leave it out when
+specifying a test id.
+
+Note that when run without the -v switch, no special output is displayed, but
+the ids file is still written.
+
+Looping over failed tests
+-------------------------
+
+This plugin also adds a mode that will direct the test runner to record
+failed tests. Subsequent test runs will then run only the tests that failed
+last time. Activate this mode with the ``--failed`` switch::
+
+ % nosetests -v --failed
+ #1 test.test_a ... ok
+ #2 test.test_b ... ERROR
+ #3 test.test_c ... FAILED
+ #4 test.test_d ... ok
+
+On the second run, only tests #2 and #3 will run::
+
+ % nosetests -v --failed
+ #2 test.test_b ... ERROR
+ #3 test.test_c ... FAILED
+
+As you correct errors and tests pass, they'll drop out of subsequent runs.
+
+First::
+
+ % nosetests -v --failed
+ #2 test.test_b ... ok
+ #3 test.test_c ... FAILED
+
+Second::
+
+ % nosetests -v --failed
+ #3 test.test_c ... FAILED
+
+When all tests pass, the full set will run on the next invocation.
+
+First::
+
+ % nosetests -v --failed
+ #3 test.test_c ... ok
+
+Second::
+
+ % nosetests -v --failed
+ #1 test.test_a ... ok
+ #2 test.test_b ... ok
+ #3 test.test_c ... ok
+ #4 test.test_d ... ok
+
+.. note ::
+
+ If you expect to use ``--failed`` regularly, it's a good idea to always run
+ using the ``--with-id`` option. This will ensure that an id file is always
+ created, allowing you to add ``--failed`` to the command line as soon as
+ you have failing tests. Otherwise, your first run using ``--failed`` will
+ (perhaps surprisingly) run *all* tests, because there won't be an id file
+ containing the record of failed tests from your previous run.
+
+"""
+__test__ = False
+
+import logging
+import os
+from nose.plugins import Plugin
+from nose.util import src, set
+
+try:
+ from cPickle import dump, load
+except ImportError:
+ from pickle import dump, load
+
+log = logging.getLogger(__name__)
+
+
+class TestId(Plugin):
+ """
+ Activate to add a test id (like #1) to each test name output. Activate
+ with --failed to rerun failing tests only.
+ """
+ name = 'id'
+ idfile = None
+ collecting = True
+ loopOnFailed = False
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ Plugin.options(self, parser, env)
+ parser.add_option('--id-file', action='store', dest='testIdFile',
+ default='.noseids', metavar="FILE",
+ help="Store test ids found in test runs in this "
+ "file. Default is the file .noseids in the "
+ "working directory.")
+ parser.add_option('--failed', action='store_true',
+ dest='failed', default=False,
+ help="Run the tests that failed in the last "
+ "test run.")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ Plugin.configure(self, options, conf)
+ if options.failed:
+ self.enabled = True
+ self.loopOnFailed = True
+ log.debug("Looping on failed tests")
+ self.idfile = os.path.expanduser(options.testIdFile)
+ if not os.path.isabs(self.idfile):
+ self.idfile = os.path.join(conf.workingDir, self.idfile)
+ self.id = 1
+ # Ids and tests are mirror images: ids are {id: test address} and
+ # tests are {test address: id}
+ self.ids = {}
+ self.tests = {}
+ self.failed = []
+ self.source_names = []
+ # used to track ids seen when tests is filled from
+ # loaded ids file
+ self._seen = {}
+ self._write_hashes = conf.verbosity >= 2
+
+ def finalize(self, result):
+ """Save new ids file, if needed.
+ """
+ if result.wasSuccessful():
+ self.failed = []
+ if self.collecting:
+ ids = dict(list(zip(list(self.tests.values()), list(self.tests.keys()))))
+ else:
+ ids = self.ids
+ fh = open(self.idfile, 'wb')
+ dump({'ids': ids,
+ 'failed': self.failed,
+ 'source_names': self.source_names}, fh)
+ fh.close()
+ log.debug('Saved test ids: %s, failed %s to %s',
+ ids, self.failed, self.idfile)
+
+ def loadTestsFromNames(self, names, module=None):
+ """Translate ids in the list of requested names into their
+ test addresses, if they are found in my dict of tests.
+ """
+ log.debug('ltfn %s %s', names, module)
+ try:
+ fh = open(self.idfile, 'rb')
+ data = load(fh)
+ if 'ids' in data:
+ self.ids = data['ids']
+ self.failed = data['failed']
+ self.source_names = data['source_names']
+ else:
+ # old ids field
+ self.ids = data
+ self.failed = []
+ self.source_names = names
+ if self.ids:
+ self.id = max(self.ids) + 1
+ self.tests = dict(list(zip(list(self.ids.values()), list(self.ids.keys()))))
+ else:
+ self.id = 1
+ log.debug(
+ 'Loaded test ids %s tests %s failed %s sources %s from %s',
+ self.ids, self.tests, self.failed, self.source_names,
+ self.idfile)
+ fh.close()
+ except ValueError, e:
+ # load() may throw a ValueError when reading the ids file, if it
+ # was generated with a newer version of Python than we are currently
+ # running.
+ log.debug('Error loading %s : %s', self.idfile, str(e))
+ except IOError:
+ log.debug('IO error reading %s', self.idfile)
+
+ if self.loopOnFailed and self.failed:
+ self.collecting = False
+ names = self.failed
+ self.failed = []
+ # I don't load any tests myself, only translate names like '#2'
+ # into the associated test addresses
+ translated = []
+ new_source = []
+ really_new = []
+ for name in names:
+ trans = self.tr(name)
+ if trans != name:
+ translated.append(trans)
+ else:
+ new_source.append(name)
+ # names that are not ids and that are not in the current
+ # list of source names go into the list for next time
+ if new_source:
+ new_set = set(new_source)
+ old_set = set(self.source_names)
+ log.debug("old: %s new: %s", old_set, new_set)
+ really_new = [s for s in new_source
+ if not s in old_set]
+ if really_new:
+ # remember new sources
+ self.source_names.extend(really_new)
+ if not translated:
+ # new set of source names, no translations
+ # means "run the requested tests"
+ names = new_source
+ else:
+ # no new names to translate and add to id set
+ self.collecting = False
+ log.debug("translated: %s new sources %s names %s",
+ translated, really_new, names)
+ return (None, translated + really_new or names)
+
+ def makeName(self, addr):
+ log.debug("Make name %s", addr)
+ filename, module, call = addr
+ if filename is not None:
+ head = src(filename)
+ else:
+ head = module
+ if call is not None:
+ return "%s:%s" % (head, call)
+ return head
+
+ def setOutputStream(self, stream):
+ """Get handle on output stream so the plugin can print id #s
+ """
+ self.stream = stream
+
+ def startTest(self, test):
+ """Maybe output an id # before the test name.
+
+ Example output::
+
+ #1 test.test ... ok
+ #2 test.test_two ... ok
+
+ """
+ adr = test.address()
+ log.debug('start test %s (%s)', adr, adr in self.tests)
+ if adr in self.tests:
+ if adr in self._seen:
+ self.write(' ')
+ else:
+ self.write('#%s ' % self.tests[adr])
+ self._seen[adr] = 1
+ return
+ self.tests[adr] = self.id
+ self.write('#%s ' % self.id)
+ self.id += 1
+
+ def afterTest(self, test):
+ # None means test never ran, False means failed/err
+ if test.passed is False:
+ try:
+ key = str(self.tests[test.address()])
+ except KeyError:
+ # never saw this test -- startTest didn't run
+ pass
+ else:
+ if key not in self.failed:
+ self.failed.append(key)
+
+ def tr(self, name):
+ log.debug("tr '%s'", name)
+ try:
+ key = int(name.replace('#', ''))
+ except ValueError:
+ return name
+ log.debug("Got key %s", key)
+ # I'm running tests mapped from the ids file,
+ # not collecting new ones
+ if key in self.ids:
+ return self.makeName(self.ids[key])
+ return name
+
+ def write(self, output):
+ if self._write_hashes:
+ self.stream.write(output)
diff --git a/lib/spack/external/nose/plugins/xunit.py b/lib/spack/external/nose/plugins/xunit.py
new file mode 100644
index 0000000000..90b52f5f61
--- /dev/null
+++ b/lib/spack/external/nose/plugins/xunit.py
@@ -0,0 +1,341 @@
+"""This plugin provides test results in the standard XUnit XML format.
+
+It's designed for the `Jenkins`_ (previously Hudson) continuous build
+system, but will probably work for anything else that understands an
+XUnit-formatted XML representation of test results.
+
+Add this shell command to your builder ::
+
+ nosetests --with-xunit
+
+And by default a file named nosetests.xml will be written to the
+working directory.
+
+In a Jenkins builder, tick the box named "Publish JUnit test result report"
+under the Post-build Actions and enter this value for Test report XMLs::
+
+ **/nosetests.xml
+
+If you need to change the name or location of the file, you can set the
+``--xunit-file`` option.
+
+If you need to change the name of the test suite, you can set the
+``--xunit-testsuite-name`` option.
+
+Here is an abbreviated version of what an XML test report might look like::
+
+ <?xml version="1.0" encoding="UTF-8"?>
+ <testsuite name="nosetests" tests="1" errors="1" failures="0" skip="0">
+ <testcase classname="path_to_test_suite.TestSomething"
+ name="test_it" time="0">
+ <error type="exceptions.TypeError" message="oops, wrong type">
+ Traceback (most recent call last):
+ ...
+ TypeError: oops, wrong type
+ </error>
+ </testcase>
+ </testsuite>
+
+.. _Jenkins: http://jenkins-ci.org/
+
+"""
+import codecs
+import doctest
+import os
+import sys
+import traceback
+import re
+import inspect
+from StringIO import StringIO
+from time import time
+from xml.sax import saxutils
+
+from nose.plugins.base import Plugin
+from nose.exc import SkipTest
+from nose.pyversion import force_unicode, format_exception
+
+# Invalid XML characters, control characters 0-31 sans \t, \n and \r
+CONTROL_CHARACTERS = re.compile(r"[\000-\010\013\014\016-\037]")
+
+TEST_ID = re.compile(r'^(.*?)(\(.*\))$')
+
+def xml_safe(value):
+ """Replaces invalid XML characters with '?'."""
+ return CONTROL_CHARACTERS.sub('?', value)
+
+def escape_cdata(cdata):
+ """Escape a string for an XML CDATA section."""
+ return xml_safe(cdata).replace(']]>', ']]>]]&gt;<![CDATA[')
+
+def id_split(idval):
+ m = TEST_ID.match(idval)
+ if m:
+ name, fargs = m.groups()
+ head, tail = name.rsplit(".", 1)
+ return [head, tail+fargs]
+ else:
+ return idval.rsplit(".", 1)
+
+def nice_classname(obj):
+ """Returns a nice name for class object or class instance.
+
+ >>> nice_classname(Exception()) # doctest: +ELLIPSIS
+ '...Exception'
+ >>> nice_classname(Exception) # doctest: +ELLIPSIS
+ '...Exception'
+
+ """
+ if inspect.isclass(obj):
+ cls_name = obj.__name__
+ else:
+ cls_name = obj.__class__.__name__
+ mod = inspect.getmodule(obj)
+ if mod:
+ name = mod.__name__
+ # jython
+ if name.startswith('org.python.core.'):
+ name = name[len('org.python.core.'):]
+ return "%s.%s" % (name, cls_name)
+ else:
+ return cls_name
+
+def exc_message(exc_info):
+ """Return the exception's message."""
+ exc = exc_info[1]
+ if exc is None:
+ # str exception
+ result = exc_info[0]
+ else:
+ try:
+ result = str(exc)
+ except UnicodeEncodeError:
+ try:
+ result = unicode(exc)
+ except UnicodeError:
+ # Fallback to args as neither str nor
+ # unicode(Exception(u'\xe6')) work in Python < 2.6
+ result = exc.args[0]
+ result = force_unicode(result, 'UTF-8')
+ return xml_safe(result)
+
+class Tee(object):
+ def __init__(self, encoding, *args):
+ self._encoding = encoding
+ self._streams = args
+
+ def write(self, data):
+ data = force_unicode(data, self._encoding)
+ for s in self._streams:
+ s.write(data)
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def flush(self):
+ for s in self._streams:
+ s.flush()
+
+ def isatty(self):
+ return False
+
+
+class Xunit(Plugin):
+ """This plugin provides test results in the standard XUnit XML format."""
+ name = 'xunit'
+ score = 1500
+ encoding = 'UTF-8'
+ error_report_file = None
+
+ def __init__(self):
+ super(Xunit, self).__init__()
+ self._capture_stack = []
+ self._currentStdout = None
+ self._currentStderr = None
+
+ def _timeTaken(self):
+ if hasattr(self, '_timer'):
+ taken = time() - self._timer
+ else:
+ # test died before it ran (probably error in setup())
+ # or success/failure added before test started probably
+ # due to custom TestResult munging
+ taken = 0.0
+ return taken
+
+ def _quoteattr(self, attr):
+ """Escape an XML attribute. Value can be unicode."""
+ attr = xml_safe(attr)
+ return saxutils.quoteattr(attr)
+
+ def options(self, parser, env):
+ """Sets additional command line options."""
+ Plugin.options(self, parser, env)
+ parser.add_option(
+ '--xunit-file', action='store',
+ dest='xunit_file', metavar="FILE",
+ default=env.get('NOSE_XUNIT_FILE', 'nosetests.xml'),
+ help=("Path to xml file to store the xunit report in. "
+ "Default is nosetests.xml in the working directory "
+ "[NOSE_XUNIT_FILE]"))
+
+ parser.add_option(
+ '--xunit-testsuite-name', action='store',
+ dest='xunit_testsuite_name', metavar="PACKAGE",
+ default=env.get('NOSE_XUNIT_TESTSUITE_NAME', 'nosetests'),
+ help=("Name of the testsuite in the xunit xml, generated by plugin. "
+ "Default test suite name is nosetests."))
+
+ def configure(self, options, config):
+ """Configures the xunit plugin."""
+ Plugin.configure(self, options, config)
+ self.config = config
+ if self.enabled:
+ self.stats = {'errors': 0,
+ 'failures': 0,
+ 'passes': 0,
+ 'skipped': 0
+ }
+ self.errorlist = []
+ self.error_report_file_name = os.path.realpath(options.xunit_file)
+ self.xunit_testsuite_name = options.xunit_testsuite_name
+
+ def report(self, stream):
+ """Writes an Xunit-formatted XML file
+
+ The file includes a report of test errors and failures.
+
+ """
+ self.error_report_file = codecs.open(self.error_report_file_name, 'w',
+ self.encoding, 'replace')
+ self.stats['encoding'] = self.encoding
+ self.stats['testsuite_name'] = self.xunit_testsuite_name
+ self.stats['total'] = (self.stats['errors'] + self.stats['failures']
+ + self.stats['passes'] + self.stats['skipped'])
+ self.error_report_file.write(
+ u'<?xml version="1.0" encoding="%(encoding)s"?>'
+ u'<testsuite name="%(testsuite_name)s" tests="%(total)d" '
+ u'errors="%(errors)d" failures="%(failures)d" '
+ u'skip="%(skipped)d">' % self.stats)
+ self.error_report_file.write(u''.join([force_unicode(e, self.encoding)
+ for e in self.errorlist]))
+ self.error_report_file.write(u'</testsuite>')
+ self.error_report_file.close()
+ if self.config.verbosity > 1:
+ stream.writeln("-" * 70)
+ stream.writeln("XML: %s" % self.error_report_file.name)
+
+ def _startCapture(self):
+ self._capture_stack.append((sys.stdout, sys.stderr))
+ self._currentStdout = StringIO()
+ self._currentStderr = StringIO()
+ sys.stdout = Tee(self.encoding, self._currentStdout, sys.stdout)
+ sys.stderr = Tee(self.encoding, self._currentStderr, sys.stderr)
+
+ def startContext(self, context):
+ self._startCapture()
+
+ def stopContext(self, context):
+ self._endCapture()
+
+ def beforeTest(self, test):
+ """Initializes a timer before starting a test."""
+ self._timer = time()
+ self._startCapture()
+
+ def _endCapture(self):
+ if self._capture_stack:
+ sys.stdout, sys.stderr = self._capture_stack.pop()
+
+ def afterTest(self, test):
+ self._endCapture()
+ self._currentStdout = None
+ self._currentStderr = None
+
+ def finalize(self, test):
+ while self._capture_stack:
+ self._endCapture()
+
+ def _getCapturedStdout(self):
+ if self._currentStdout:
+ value = self._currentStdout.getvalue()
+ if value:
+ return '<system-out><![CDATA[%s]]></system-out>' % escape_cdata(
+ value)
+ return ''
+
+ def _getCapturedStderr(self):
+ if self._currentStderr:
+ value = self._currentStderr.getvalue()
+ if value:
+ return '<system-err><![CDATA[%s]]></system-err>' % escape_cdata(
+ value)
+ return ''
+
+ def addError(self, test, err, capt=None):
+ """Add error output to Xunit report.
+ """
+ taken = self._timeTaken()
+
+ if issubclass(err[0], SkipTest):
+ type = 'skipped'
+ self.stats['skipped'] += 1
+ else:
+ type = 'error'
+ self.stats['errors'] += 1
+
+ tb = format_exception(err, self.encoding)
+ id = test.id()
+
+ self.errorlist.append(
+ u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
+ u'<%(type)s type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
+ u'</%(type)s>%(systemout)s%(systemerr)s</testcase>' %
+ {'cls': self._quoteattr(id_split(id)[0]),
+ 'name': self._quoteattr(id_split(id)[-1]),
+ 'taken': taken,
+ 'type': type,
+ 'errtype': self._quoteattr(nice_classname(err[0])),
+ 'message': self._quoteattr(exc_message(err)),
+ 'tb': escape_cdata(tb),
+ 'systemout': self._getCapturedStdout(),
+ 'systemerr': self._getCapturedStderr(),
+ })
+
+ def addFailure(self, test, err, capt=None, tb_info=None):
+ """Add failure output to Xunit report.
+ """
+ taken = self._timeTaken()
+ tb = format_exception(err, self.encoding)
+ self.stats['failures'] += 1
+ id = test.id()
+
+ self.errorlist.append(
+ u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
+ u'<failure type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
+ u'</failure>%(systemout)s%(systemerr)s</testcase>' %
+ {'cls': self._quoteattr(id_split(id)[0]),
+ 'name': self._quoteattr(id_split(id)[-1]),
+ 'taken': taken,
+ 'errtype': self._quoteattr(nice_classname(err[0])),
+ 'message': self._quoteattr(exc_message(err)),
+ 'tb': escape_cdata(tb),
+ 'systemout': self._getCapturedStdout(),
+ 'systemerr': self._getCapturedStderr(),
+ })
+
+ def addSuccess(self, test, capt=None):
+ """Add success output to Xunit report.
+ """
+ taken = self._timeTaken()
+ self.stats['passes'] += 1
+ id = test.id()
+ self.errorlist.append(
+ '<testcase classname=%(cls)s name=%(name)s '
+ 'time="%(taken).3f">%(systemout)s%(systemerr)s</testcase>' %
+ {'cls': self._quoteattr(id_split(id)[0]),
+ 'name': self._quoteattr(id_split(id)[-1]),
+ 'taken': taken,
+ 'systemout': self._getCapturedStdout(),
+ 'systemerr': self._getCapturedStderr(),
+ })
diff --git a/lib/spack/external/nose/proxy.py b/lib/spack/external/nose/proxy.py
new file mode 100644
index 0000000000..c2676cb195
--- /dev/null
+++ b/lib/spack/external/nose/proxy.py
@@ -0,0 +1,188 @@
+"""
+Result Proxy
+------------
+
+The result proxy wraps the result instance given to each test. It
+performs two functions: enabling extended error/failure reporting
+and calling plugins.
+
+As each result event is fired, plugins are called with the same event;
+however, plugins are called with the nose.case.Test instance that
+wraps the actual test. So when a test fails and calls
+result.addFailure(self, err), the result proxy calls
+addFailure(self.test, err) for each plugin. This allows plugins to
+have a single stable interface for all test types, and also to
+manipulate the test object itself by setting the `test` attribute of
+the nose.case.Test that they receive.
+"""
+import logging
+from nose.config import Config
+
+
+log = logging.getLogger(__name__)
+
+
+def proxied_attribute(local_attr, proxied_attr, doc):
+ """Create a property that proxies attribute ``proxied_attr`` through
+ the local attribute ``local_attr``.
+ """
+ def fget(self):
+ return getattr(getattr(self, local_attr), proxied_attr)
+ def fset(self, value):
+ setattr(getattr(self, local_attr), proxied_attr, value)
+ def fdel(self):
+ delattr(getattr(self, local_attr), proxied_attr)
+ return property(fget, fset, fdel, doc)
+
+
+class ResultProxyFactory(object):
+ """Factory for result proxies. Generates a ResultProxy bound to each test
+ and the result passed to the test.
+ """
+ def __init__(self, config=None):
+ if config is None:
+ config = Config()
+ self.config = config
+ self.__prepared = False
+ self.__result = None
+
+ def __call__(self, result, test):
+ """Return a ResultProxy for the current test.
+
+ On first call, plugins are given a chance to replace the
+ result used for the remaining tests. If a plugin returns a
+ value from prepareTestResult, that object will be used as the
+ result for all tests.
+ """
+ if not self.__prepared:
+ self.__prepared = True
+ plug_result = self.config.plugins.prepareTestResult(result)
+ if plug_result is not None:
+ self.__result = result = plug_result
+ if self.__result is not None:
+ result = self.__result
+ return ResultProxy(result, test, config=self.config)
+
+
+class ResultProxy(object):
+ """Proxy to TestResults (or other results handler).
+
+ One ResultProxy is created for each nose.case.Test. The result
+ proxy calls plugins with the nose.case.Test instance (instead of
+ the wrapped test case) as each result call is made. Finally, the
+ real result method is called, also with the nose.case.Test
+ instance as the test parameter.
+
+ """
+ def __init__(self, result, test, config=None):
+ if config is None:
+ config = Config()
+ self.config = config
+ self.plugins = config.plugins
+ self.result = result
+ self.test = test
+
+ def __repr__(self):
+ return repr(self.result)
+
+ def _prepareErr(self, err):
+ if not isinstance(err[1], Exception) and isinstance(err[0], type):
+ # Turn value back into an Exception (required in Python 3.x).
+ # Plugins do all sorts of crazy things with exception values.
+ # Convert it to a custom subclass of Exception with the same
+ # name as the actual exception to make it print correctly.
+ value = type(err[0].__name__, (Exception,), {})(err[1])
+ err = (err[0], value, err[2])
+ return err
+
+ def assertMyTest(self, test):
+ # The test I was called with must be my .test or my
+ # .test's .test. or my .test.test's .case
+
+ case = getattr(self.test, 'test', None)
+ assert (test is self.test
+ or test is case
+ or test is getattr(case, '_nose_case', None)), (
+ "ResultProxy for %r (%s) was called with test %r (%s)"
+ % (self.test, id(self.test), test, id(test)))
+
+ def afterTest(self, test):
+ self.assertMyTest(test)
+ self.plugins.afterTest(self.test)
+ if hasattr(self.result, "afterTest"):
+ self.result.afterTest(self.test)
+
+ def beforeTest(self, test):
+ self.assertMyTest(test)
+ self.plugins.beforeTest(self.test)
+ if hasattr(self.result, "beforeTest"):
+ self.result.beforeTest(self.test)
+
+ def addError(self, test, err):
+ self.assertMyTest(test)
+ plugins = self.plugins
+ plugin_handled = plugins.handleError(self.test, err)
+ if plugin_handled:
+ return
+ # test.passed is set in result, to account for error classes
+ formatted = plugins.formatError(self.test, err)
+ if formatted is not None:
+ err = formatted
+ plugins.addError(self.test, err)
+ self.result.addError(self.test, self._prepareErr(err))
+ if not self.result.wasSuccessful() and self.config.stopOnError:
+ self.shouldStop = True
+
+ def addFailure(self, test, err):
+ self.assertMyTest(test)
+ plugins = self.plugins
+ plugin_handled = plugins.handleFailure(self.test, err)
+ if plugin_handled:
+ return
+ self.test.passed = False
+ formatted = plugins.formatFailure(self.test, err)
+ if formatted is not None:
+ err = formatted
+ plugins.addFailure(self.test, err)
+ self.result.addFailure(self.test, self._prepareErr(err))
+ if self.config.stopOnError:
+ self.shouldStop = True
+
+ def addSkip(self, test, reason):
+ # 2.7 compat shim
+ from nose.plugins.skip import SkipTest
+ self.assertMyTest(test)
+ plugins = self.plugins
+ if not isinstance(reason, Exception):
+ # for Python 3.2+
+ reason = Exception(reason)
+ plugins.addError(self.test, (SkipTest, reason, None))
+ self.result.addSkip(self.test, reason)
+
+ def addSuccess(self, test):
+ self.assertMyTest(test)
+ self.plugins.addSuccess(self.test)
+ self.result.addSuccess(self.test)
+
+ def startTest(self, test):
+ self.assertMyTest(test)
+ self.plugins.startTest(self.test)
+ self.result.startTest(self.test)
+
+ def stop(self):
+ self.result.stop()
+
+ def stopTest(self, test):
+ self.assertMyTest(test)
+ self.plugins.stopTest(self.test)
+ self.result.stopTest(self.test)
+
+ # proxied attributes
+ shouldStop = proxied_attribute('result', 'shouldStop',
+ """Should the test run stop?""")
+ errors = proxied_attribute('result', 'errors',
+ """Tests that raised an exception""")
+ failures = proxied_attribute('result', 'failures',
+ """Tests that failed""")
+ testsRun = proxied_attribute('result', 'testsRun',
+ """Number of tests run""")
diff --git a/lib/spack/external/nose/pyversion.py b/lib/spack/external/nose/pyversion.py
new file mode 100644
index 0000000000..091238da75
--- /dev/null
+++ b/lib/spack/external/nose/pyversion.py
@@ -0,0 +1,215 @@
+"""
+This module contains fixups for using nose under different versions of Python.
+"""
+import sys
+import os
+import traceback
+import types
+import inspect
+import nose.util
+
+__all__ = ['make_instancemethod', 'cmp_to_key', 'sort_list', 'ClassType',
+ 'TypeType', 'UNICODE_STRINGS', 'unbound_method', 'ismethod',
+ 'bytes_', 'is_base_exception', 'force_unicode', 'exc_to_unicode',
+ 'format_exception']
+
+# In Python 3.x, all strings are unicode (the call to 'unicode()' in the 2.x
+# source will be replaced with 'str()' when running 2to3, so this test will
+# then become true)
+UNICODE_STRINGS = (type(unicode()) == type(str()))
+
+if sys.version_info[:2] < (3, 0):
+ def force_unicode(s, encoding='UTF-8'):
+ try:
+ s = unicode(s)
+ except UnicodeDecodeError:
+ s = str(s).decode(encoding, 'replace')
+
+ return s
+else:
+ def force_unicode(s, encoding='UTF-8'):
+ return str(s)
+
+# new.instancemethod() is obsolete for new-style classes (Python 3.x)
+# We need to use descriptor methods instead.
+try:
+ import new
+ def make_instancemethod(function, instance):
+ return new.instancemethod(function.im_func, instance,
+ instance.__class__)
+except ImportError:
+ def make_instancemethod(function, instance):
+ return function.__get__(instance, instance.__class__)
+
+# To be forward-compatible, we do all list sorts using keys instead of cmp
+# functions. However, part of the unittest.TestLoader API involves a
+# user-provideable cmp function, so we need some way to convert that.
+def cmp_to_key(mycmp):
+ 'Convert a cmp= function into a key= function'
+ class Key(object):
+ def __init__(self, obj):
+ self.obj = obj
+ def __lt__(self, other):
+ return mycmp(self.obj, other.obj) < 0
+ def __gt__(self, other):
+ return mycmp(self.obj, other.obj) > 0
+ def __eq__(self, other):
+ return mycmp(self.obj, other.obj) == 0
+ return Key
+
+# Python 2.3 also does not support list-sorting by key, so we need to convert
+# keys to cmp functions if we're running on old Python..
+if sys.version_info < (2, 4):
+ def sort_list(l, key, reverse=False):
+ if reverse:
+ return l.sort(lambda a, b: cmp(key(b), key(a)))
+ else:
+ return l.sort(lambda a, b: cmp(key(a), key(b)))
+else:
+ def sort_list(l, key, reverse=False):
+ return l.sort(key=key, reverse=reverse)
+
+# In Python 3.x, all objects are "new style" objects descended from 'type', and
+# thus types.ClassType and types.TypeType don't exist anymore. For
+# compatibility, we make sure they still work.
+if hasattr(types, 'ClassType'):
+ ClassType = types.ClassType
+ TypeType = types.TypeType
+else:
+ ClassType = type
+ TypeType = type
+
+# The following emulates the behavior (we need) of an 'unbound method' under
+# Python 3.x (namely, the ability to have a class associated with a function
+# definition so that things can do stuff based on its associated class)
+class UnboundMethod:
+ def __init__(self, cls, func):
+ # Make sure we have all the same attributes as the original function,
+ # so that the AttributeSelector plugin will work correctly...
+ self.__dict__ = func.__dict__.copy()
+ self._func = func
+ self.__self__ = UnboundSelf(cls)
+ if sys.version_info < (3, 0):
+ self.im_class = cls
+ self.__doc__ = getattr(func, '__doc__', None)
+
+ def address(self):
+ cls = self.__self__.cls
+ modname = cls.__module__
+ module = sys.modules[modname]
+ filename = getattr(module, '__file__', None)
+ if filename is not None:
+ filename = os.path.abspath(filename)
+ return (nose.util.src(filename), modname, "%s.%s" % (cls.__name__,
+ self._func.__name__))
+
+ def __call__(self, *args, **kwargs):
+ return self._func(*args, **kwargs)
+
+ def __getattr__(self, attr):
+ return getattr(self._func, attr)
+
+ def __repr__(self):
+ return '<unbound method %s.%s>' % (self.__self__.cls.__name__,
+ self._func.__name__)
+
+class UnboundSelf:
+ def __init__(self, cls):
+ self.cls = cls
+
+ # We have to do this hackery because Python won't let us override the
+ # __class__ attribute...
+ def __getattribute__(self, attr):
+ if attr == '__class__':
+ return self.cls
+ else:
+ return object.__getattribute__(self, attr)
+
+def unbound_method(cls, func):
+ if inspect.ismethod(func):
+ return func
+ if not inspect.isfunction(func):
+ raise TypeError('%s is not a function' % (repr(func),))
+ return UnboundMethod(cls, func)
+
+def ismethod(obj):
+ return inspect.ismethod(obj) or isinstance(obj, UnboundMethod)
+
+
+# Make a pseudo-bytes function that can be called without the encoding arg:
+if sys.version_info >= (3, 0):
+ def bytes_(s, encoding='utf8'):
+ if isinstance(s, bytes):
+ return s
+ return bytes(s, encoding)
+else:
+ def bytes_(s, encoding=None):
+ return str(s)
+
+
+if sys.version_info[:2] >= (2, 6):
+ def isgenerator(o):
+ if isinstance(o, UnboundMethod):
+ o = o._func
+ return inspect.isgeneratorfunction(o) or inspect.isgenerator(o)
+else:
+ try:
+ from compiler.consts import CO_GENERATOR
+ except ImportError:
+ # IronPython doesn't have a complier module
+ CO_GENERATOR=0x20
+
+ def isgenerator(func):
+ try:
+ return func.func_code.co_flags & CO_GENERATOR != 0
+ except AttributeError:
+ return False
+
+# Make a function to help check if an exception is derived from BaseException.
+# In Python 2.4, we just use Exception instead.
+if sys.version_info[:2] < (2, 5):
+ def is_base_exception(exc):
+ return isinstance(exc, Exception)
+else:
+ def is_base_exception(exc):
+ return isinstance(exc, BaseException)
+
+if sys.version_info[:2] < (3, 0):
+ def exc_to_unicode(ev, encoding='utf-8'):
+ if is_base_exception(ev):
+ if not hasattr(ev, '__unicode__'):
+ # 2.5-
+ if not hasattr(ev, 'message'):
+ # 2.4
+ msg = len(ev.args) and ev.args[0] or ''
+ else:
+ msg = ev.message
+ msg = force_unicode(msg, encoding=encoding)
+ clsname = force_unicode(ev.__class__.__name__,
+ encoding=encoding)
+ ev = u'%s: %s' % (clsname, msg)
+ elif not isinstance(ev, unicode):
+ ev = repr(ev)
+
+ return force_unicode(ev, encoding=encoding)
+else:
+ def exc_to_unicode(ev, encoding='utf-8'):
+ return str(ev)
+
+def format_exception(exc_info, encoding='UTF-8'):
+ ec, ev, tb = exc_info
+
+ # Our exception object may have been turned into a string, and Python 3's
+ # traceback.format_exception() doesn't take kindly to that (it expects an
+ # actual exception object). So we work around it, by doing the work
+ # ourselves if ev is not an exception object.
+ if not is_base_exception(ev):
+ tb_data = force_unicode(
+ ''.join(traceback.format_tb(tb)),
+ encoding)
+ ev = exc_to_unicode(ev)
+ return tb_data + ev
+ else:
+ return force_unicode(
+ ''.join(traceback.format_exception(*exc_info)),
+ encoding)
diff --git a/lib/spack/external/nose/result.py b/lib/spack/external/nose/result.py
new file mode 100644
index 0000000000..f974a14ae2
--- /dev/null
+++ b/lib/spack/external/nose/result.py
@@ -0,0 +1,200 @@
+"""
+Test Result
+-----------
+
+Provides a TextTestResult that extends unittest's _TextTestResult to
+provide support for error classes (such as the builtin skip and
+deprecated classes), and hooks for plugins to take over or extend
+reporting.
+"""
+
+import logging
+try:
+ # 2.7+
+ from unittest.runner import _TextTestResult
+except ImportError:
+ from unittest import _TextTestResult
+from nose.config import Config
+from nose.util import isclass, ln as _ln # backwards compat
+
+log = logging.getLogger('nose.result')
+
+
+def _exception_detail(exc):
+ # this is what stdlib module traceback does
+ try:
+ return str(exc)
+ except:
+ return '<unprintable %s object>' % type(exc).__name__
+
+
+class TextTestResult(_TextTestResult):
+ """Text test result that extends unittest's default test result
+ support for a configurable set of errorClasses (eg, Skip,
+ Deprecated, TODO) that extend the errors/failures/success triad.
+ """
+ def __init__(self, stream, descriptions, verbosity, config=None,
+ errorClasses=None):
+ if errorClasses is None:
+ errorClasses = {}
+ self.errorClasses = errorClasses
+ if config is None:
+ config = Config()
+ self.config = config
+ _TextTestResult.__init__(self, stream, descriptions, verbosity)
+
+ def addSkip(self, test, reason):
+ # 2.7 skip compat
+ from nose.plugins.skip import SkipTest
+ if SkipTest in self.errorClasses:
+ storage, label, isfail = self.errorClasses[SkipTest]
+ storage.append((test, reason))
+ self.printLabel(label, (SkipTest, reason, None))
+
+ def addError(self, test, err):
+ """Overrides normal addError to add support for
+ errorClasses. If the exception is a registered class, the
+ error will be added to the list for that class, not errors.
+ """
+ ec, ev, tb = err
+ try:
+ exc_info = self._exc_info_to_string(err, test)
+ except TypeError:
+ # 2.3 compat
+ exc_info = self._exc_info_to_string(err)
+ for cls, (storage, label, isfail) in self.errorClasses.items():
+ #if 'Skip' in cls.__name__ or 'Skip' in ec.__name__:
+ # from nose.tools import set_trace
+ # set_trace()
+ if isclass(ec) and issubclass(ec, cls):
+ if isfail:
+ test.passed = False
+ storage.append((test, exc_info))
+ self.printLabel(label, err)
+ return
+ self.errors.append((test, exc_info))
+ test.passed = False
+ self.printLabel('ERROR')
+
+ # override to bypass changes in 2.7
+ def getDescription(self, test):
+ if self.descriptions:
+ return test.shortDescription() or str(test)
+ else:
+ return str(test)
+
+ def printLabel(self, label, err=None):
+ # Might get patched into a streamless result
+ stream = getattr(self, 'stream', None)
+ if stream is not None:
+ if self.showAll:
+ message = [label]
+ if err:
+ detail = _exception_detail(err[1])
+ if detail:
+ message.append(detail)
+ stream.writeln(": ".join(message))
+ elif self.dots:
+ stream.write(label[:1])
+
+ def printErrors(self):
+ """Overrides to print all errorClasses errors as well.
+ """
+ _TextTestResult.printErrors(self)
+ for cls in self.errorClasses.keys():
+ storage, label, isfail = self.errorClasses[cls]
+ if isfail:
+ self.printErrorList(label, storage)
+ # Might get patched into a result with no config
+ if hasattr(self, 'config'):
+ self.config.plugins.report(self.stream)
+
+ def printSummary(self, start, stop):
+ """Called by the test runner to print the final summary of test
+ run results.
+ """
+ write = self.stream.write
+ writeln = self.stream.writeln
+ taken = float(stop - start)
+ run = self.testsRun
+ plural = run != 1 and "s" or ""
+
+ writeln(self.separator2)
+ writeln("Ran %s test%s in %.3fs" % (run, plural, taken))
+ writeln()
+
+ summary = {}
+ eckeys = self.errorClasses.keys()
+ for cls in eckeys:
+ storage, label, isfail = self.errorClasses[cls]
+ count = len(storage)
+ if not count:
+ continue
+ summary[label] = count
+ if len(self.failures):
+ summary['failures'] = len(self.failures)
+ if len(self.errors):
+ summary['errors'] = len(self.errors)
+
+ if not self.wasSuccessful():
+ write("FAILED")
+ else:
+ write("OK")
+ items = summary.items()
+ if items:
+ items.sort()
+ write(" (")
+ write(", ".join(["%s=%s" % (label, count) for
+ label, count in items]))
+ writeln(")")
+ else:
+ writeln()
+
+ def wasSuccessful(self):
+ """Overrides to check that there are no errors in errorClasses
+ lists that are marked as errors and should cause a run to
+ fail.
+ """
+ if self.errors or self.failures:
+ return False
+ for cls in self.errorClasses.keys():
+ storage, label, isfail = self.errorClasses[cls]
+ if not isfail:
+ continue
+ if storage:
+ return False
+ return True
+
+ def _addError(self, test, err):
+ try:
+ exc_info = self._exc_info_to_string(err, test)
+ except TypeError:
+ # 2.3: does not take test arg
+ exc_info = self._exc_info_to_string(err)
+ self.errors.append((test, exc_info))
+ if self.showAll:
+ self.stream.write('ERROR')
+ elif self.dots:
+ self.stream.write('E')
+
+ def _exc_info_to_string(self, err, test=None):
+ # 2.7 skip compat
+ from nose.plugins.skip import SkipTest
+ if isclass(err[0]) and issubclass(err[0], SkipTest):
+ return str(err[1])
+ # 2.3/2.4 -- 2.4 passes test, 2.3 does not
+ try:
+ return _TextTestResult._exc_info_to_string(self, err, test)
+ except TypeError:
+ # 2.3: does not take test arg
+ return _TextTestResult._exc_info_to_string(self, err)
+
+
+def ln(*arg, **kw):
+ from warnings import warn
+ warn("ln() has moved to nose.util from nose.result and will be removed "
+ "from nose.result in a future release. Please update your imports ",
+ DeprecationWarning)
+ return _ln(*arg, **kw)
+
+
diff --git a/lib/spack/external/nose/selector.py b/lib/spack/external/nose/selector.py
new file mode 100644
index 0000000000..b63f7af0b1
--- /dev/null
+++ b/lib/spack/external/nose/selector.py
@@ -0,0 +1,251 @@
+"""
+Test Selection
+--------------
+
+Test selection is handled by a Selector. The test loader calls the
+appropriate selector method for each object it encounters that it
+thinks may be a test.
+"""
+import logging
+import os
+import unittest
+from nose.config import Config
+from nose.util import split_test_name, src, getfilename, getpackage, ispackage, is_executable
+
+log = logging.getLogger(__name__)
+
+__all__ = ['Selector', 'defaultSelector', 'TestAddress']
+
+
+# for efficiency and easier mocking
+op_join = os.path.join
+op_basename = os.path.basename
+op_exists = os.path.exists
+op_splitext = os.path.splitext
+op_isabs = os.path.isabs
+op_abspath = os.path.abspath
+
+
+class Selector(object):
+ """Core test selector. Examines test candidates and determines whether,
+ given the specified configuration, the test candidate should be selected
+ as a test.
+ """
+ def __init__(self, config):
+ if config is None:
+ config = Config()
+ self.configure(config)
+
+ def configure(self, config):
+ self.config = config
+ self.exclude = config.exclude
+ self.ignoreFiles = config.ignoreFiles
+ self.include = config.include
+ self.plugins = config.plugins
+ self.match = config.testMatch
+
+ def matches(self, name):
+ """Does the name match my requirements?
+
+ To match, a name must match config.testMatch OR config.include
+ and it must not match config.exclude
+ """
+ return ((self.match.search(name)
+ or (self.include and
+ filter(None,
+ [inc.search(name) for inc in self.include])))
+ and ((not self.exclude)
+ or not filter(None,
+ [exc.search(name) for exc in self.exclude])
+ ))
+
+ def wantClass(self, cls):
+ """Is the class a wanted test class?
+
+ A class must be a unittest.TestCase subclass, or match test name
+ requirements. Classes that start with _ are always excluded.
+ """
+ declared = getattr(cls, '__test__', None)
+ if declared is not None:
+ wanted = declared
+ else:
+ wanted = (not cls.__name__.startswith('_')
+ and (issubclass(cls, unittest.TestCase)
+ or self.matches(cls.__name__)))
+
+ plug_wants = self.plugins.wantClass(cls)
+ if plug_wants is not None:
+ log.debug("Plugin setting selection of %s to %s", cls, plug_wants)
+ wanted = plug_wants
+ log.debug("wantClass %s? %s", cls, wanted)
+ return wanted
+
+ def wantDirectory(self, dirname):
+ """Is the directory a wanted test directory?
+
+ All package directories match, so long as they do not match exclude.
+ All other directories must match test requirements.
+ """
+ tail = op_basename(dirname)
+ if ispackage(dirname):
+ wanted = (not self.exclude
+ or not filter(None,
+ [exc.search(tail) for exc in self.exclude]
+ ))
+ else:
+ wanted = (self.matches(tail)
+ or (self.config.srcDirs
+ and tail in self.config.srcDirs))
+ plug_wants = self.plugins.wantDirectory(dirname)
+ if plug_wants is not None:
+ log.debug("Plugin setting selection of %s to %s",
+ dirname, plug_wants)
+ wanted = plug_wants
+ log.debug("wantDirectory %s? %s", dirname, wanted)
+ return wanted
+
+ def wantFile(self, file):
+ """Is the file a wanted test file?
+
+ The file must be a python source file and match testMatch or
+ include, and not match exclude. Files that match ignore are *never*
+ wanted, regardless of plugin, testMatch, include or exclude settings.
+ """
+ # never, ever load files that match anything in ignore
+ # (.* _* and *setup*.py by default)
+ base = op_basename(file)
+ ignore_matches = [ ignore_this for ignore_this in self.ignoreFiles
+ if ignore_this.search(base) ]
+ if ignore_matches:
+ log.debug('%s matches ignoreFiles pattern; skipped',
+ base)
+ return False
+ if not self.config.includeExe and is_executable(file):
+ log.info('%s is executable; skipped', file)
+ return False
+ dummy, ext = op_splitext(base)
+ pysrc = ext == '.py'
+
+ wanted = pysrc and self.matches(base)
+ plug_wants = self.plugins.wantFile(file)
+ if plug_wants is not None:
+ log.debug("plugin setting want %s to %s", file, plug_wants)
+ wanted = plug_wants
+ log.debug("wantFile %s? %s", file, wanted)
+ return wanted
+
+ def wantFunction(self, function):
+ """Is the function a test function?
+ """
+ try:
+ if hasattr(function, 'compat_func_name'):
+ funcname = function.compat_func_name
+ else:
+ funcname = function.__name__
+ except AttributeError:
+ # not a function
+ return False
+ declared = getattr(function, '__test__', None)
+ if declared is not None:
+ wanted = declared
+ else:
+ wanted = not funcname.startswith('_') and self.matches(funcname)
+ plug_wants = self.plugins.wantFunction(function)
+ if plug_wants is not None:
+ wanted = plug_wants
+ log.debug("wantFunction %s? %s", function, wanted)
+ return wanted
+
+ def wantMethod(self, method):
+ """Is the method a test method?
+ """
+ try:
+ method_name = method.__name__
+ except AttributeError:
+ # not a method
+ return False
+ if method_name.startswith('_'):
+ # never collect 'private' methods
+ return False
+ declared = getattr(method, '__test__', None)
+ if declared is not None:
+ wanted = declared
+ else:
+ wanted = self.matches(method_name)
+ plug_wants = self.plugins.wantMethod(method)
+ if plug_wants is not None:
+ wanted = plug_wants
+ log.debug("wantMethod %s? %s", method, wanted)
+ return wanted
+
+ def wantModule(self, module):
+ """Is the module a test module?
+
+ The tail of the module name must match test requirements. One exception:
+ we always want __main__.
+ """
+ declared = getattr(module, '__test__', None)
+ if declared is not None:
+ wanted = declared
+ else:
+ wanted = self.matches(module.__name__.split('.')[-1]) \
+ or module.__name__ == '__main__'
+ plug_wants = self.plugins.wantModule(module)
+ if plug_wants is not None:
+ wanted = plug_wants
+ log.debug("wantModule %s? %s", module, wanted)
+ return wanted
+
+defaultSelector = Selector
+
+
+class TestAddress(object):
+ """A test address represents a user's request to run a particular
+ test. The user may specify a filename or module (or neither),
+ and/or a callable (a class, function, or method). The naming
+ format for test addresses is:
+
+ filename_or_module:callable
+
+ Filenames that are not absolute will be made absolute relative to
+ the working dir.
+
+ The filename or module part will be considered a module name if it
+ doesn't look like a file, that is, if it doesn't exist on the file
+ system and it doesn't contain any directory separators and it
+ doesn't end in .py.
+
+ Callables may be a class name, function name, method name, or
+ class.method specification.
+ """
+ def __init__(self, name, workingDir=None):
+ if workingDir is None:
+ workingDir = os.getcwd()
+ self.name = name
+ self.workingDir = workingDir
+ self.filename, self.module, self.call = split_test_name(name)
+ log.debug('Test name %s resolved to file %s, module %s, call %s',
+ name, self.filename, self.module, self.call)
+ if self.filename is None:
+ if self.module is not None:
+ self.filename = getfilename(self.module, self.workingDir)
+ if self.filename:
+ self.filename = src(self.filename)
+ if not op_isabs(self.filename):
+ self.filename = op_abspath(op_join(workingDir,
+ self.filename))
+ if self.module is None:
+ self.module = getpackage(self.filename)
+ log.debug(
+ 'Final resolution of test name %s: file %s module %s call %s',
+ name, self.filename, self.module, self.call)
+
+ def totuple(self):
+ return (self.filename, self.module, self.call)
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return "%s: (%s, %s, %s)" % (self.name, self.filename,
+ self.module, self.call)
diff --git a/lib/spack/external/nose/sphinx/__init__.py b/lib/spack/external/nose/sphinx/__init__.py
new file mode 100644
index 0000000000..2ae28399f5
--- /dev/null
+++ b/lib/spack/external/nose/sphinx/__init__.py
@@ -0,0 +1 @@
+pass
diff --git a/lib/spack/external/nose/sphinx/pluginopts.py b/lib/spack/external/nose/sphinx/pluginopts.py
new file mode 100644
index 0000000000..d2b284ab27
--- /dev/null
+++ b/lib/spack/external/nose/sphinx/pluginopts.py
@@ -0,0 +1,189 @@
+"""
+Adds a sphinx directive that can be used to automatically document a plugin.
+
+this::
+
+ .. autoplugin :: nose.plugins.foo
+ :plugin: Pluggy
+
+produces::
+
+ .. automodule :: nose.plugins.foo
+
+ Options
+ -------
+
+ .. cmdoption :: --foo=BAR, --fooble=BAR
+
+ Do the foo thing to the new thing.
+
+ Plugin
+ ------
+
+ .. autoclass :: nose.plugins.foo.Pluggy
+ :members:
+
+ Source
+ ------
+
+ .. include :: path/to/nose/plugins/foo.py
+ :literal:
+
+"""
+import os
+try:
+ from docutils import nodes, utils
+ from docutils.statemachine import ViewList
+ from docutils.parsers.rst import directives
+except ImportError:
+ pass # won't run anyway
+
+from nose.util import resolve_name
+from nose.plugins.base import Plugin
+from nose.plugins.manager import BuiltinPluginManager
+from nose.config import Config
+from nose.core import TestProgram
+from inspect import isclass
+
+
+def autoplugin_directive(dirname, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ mod_name = arguments[0]
+ mod = resolve_name(mod_name)
+ plug_name = options.get('plugin', None)
+ if plug_name:
+ obj = getattr(mod, plug_name)
+ else:
+ for entry in dir(mod):
+ obj = getattr(mod, entry)
+ if isclass(obj) and issubclass(obj, Plugin) and obj is not Plugin:
+ plug_name = '%s.%s' % (mod_name, entry)
+ break
+
+ # mod docstring
+ rst = ViewList()
+ rst.append('.. automodule :: %s\n' % mod_name, '<autodoc>')
+ rst.append('', '<autodoc>')
+
+ # options
+ rst.append('Options', '<autodoc>')
+ rst.append('-------', '<autodoc>')
+ rst.append('', '<autodoc>')
+
+ plug = obj()
+ opts = OptBucket()
+ plug.options(opts, {})
+ for opt in opts:
+ rst.append(opt.options(), '<autodoc>')
+ rst.append(' \n', '<autodoc>')
+ rst.append(' ' + opt.help + '\n', '<autodoc>')
+ rst.append('\n', '<autodoc>')
+
+ # plugin class
+ rst.append('Plugin', '<autodoc>')
+ rst.append('------', '<autodoc>')
+ rst.append('', '<autodoc>')
+
+ rst.append('.. autoclass :: %s\n' % plug_name, '<autodoc>')
+ rst.append(' :members:\n', '<autodoc>')
+ rst.append(' :show-inheritance:\n', '<autodoc>')
+ rst.append('', '<autodoc>')
+
+ # source
+ rst.append('Source', '<autodoc>')
+ rst.append('------', '<autodoc>')
+ rst.append(
+ '.. include :: %s\n' % utils.relative_path(
+ state_machine.document['source'],
+ os.path.abspath(mod.__file__.replace('.pyc', '.py'))),
+ '<autodoc>')
+ rst.append(' :literal:\n', '<autodoc>')
+ rst.append('', '<autodoc>')
+
+ node = nodes.section()
+ node.document = state.document
+ surrounding_title_styles = state.memo.title_styles
+ surrounding_section_level = state.memo.section_level
+ state.memo.title_styles = []
+ state.memo.section_level = 0
+ state.nested_parse(rst, 0, node, match_titles=1)
+ state.memo.title_styles = surrounding_title_styles
+ state.memo.section_level = surrounding_section_level
+
+ return node.children
+
+
+def autohelp_directive(dirname, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ """produces rst from nose help"""
+ config = Config(parserClass=OptBucket,
+ plugins=BuiltinPluginManager())
+ parser = config.getParser(TestProgram.usage())
+ rst = ViewList()
+ for line in parser.format_help().split('\n'):
+ rst.append(line, '<autodoc>')
+
+ rst.append('Options', '<autodoc>')
+ rst.append('-------', '<autodoc>')
+ rst.append('', '<autodoc>')
+ for opt in parser:
+ rst.append(opt.options(), '<autodoc>')
+ rst.append(' \n', '<autodoc>')
+ rst.append(' ' + opt.help + '\n', '<autodoc>')
+ rst.append('\n', '<autodoc>')
+ node = nodes.section()
+ node.document = state.document
+ surrounding_title_styles = state.memo.title_styles
+ surrounding_section_level = state.memo.section_level
+ state.memo.title_styles = []
+ state.memo.section_level = 0
+ state.nested_parse(rst, 0, node, match_titles=1)
+ state.memo.title_styles = surrounding_title_styles
+ state.memo.section_level = surrounding_section_level
+
+ return node.children
+
+
+class OptBucket(object):
+ def __init__(self, doc=None, prog='nosetests'):
+ self.opts = []
+ self.doc = doc
+ self.prog = prog
+
+ def __iter__(self):
+ return iter(self.opts)
+
+ def format_help(self):
+ return self.doc.replace('%prog', self.prog).replace(':\n', '::\n')
+
+ def add_option(self, *arg, **kw):
+ self.opts.append(Opt(*arg, **kw))
+
+
+class Opt(object):
+ def __init__(self, *arg, **kw):
+ self.opts = arg
+ self.action = kw.pop('action', None)
+ self.default = kw.pop('default', None)
+ self.metavar = kw.pop('metavar', None)
+ self.help = kw.pop('help', None)
+
+ def options(self):
+ buf = []
+ for optstring in self.opts:
+ desc = optstring
+ if self.action not in ('store_true', 'store_false'):
+ desc += '=%s' % self.meta(optstring)
+ buf.append(desc)
+ return '.. cmdoption :: ' + ', '.join(buf)
+
+ def meta(self, optstring):
+ # FIXME optparser default metavar?
+ return self.metavar or 'DEFAULT'
+
+
+def setup(app):
+ app.add_directive('autoplugin',
+ autoplugin_directive, 1, (1, 0, 1),
+ plugin=directives.unchanged)
+ app.add_directive('autohelp', autohelp_directive, 0, (0, 0, 1))
diff --git a/lib/spack/external/nose/suite.py b/lib/spack/external/nose/suite.py
new file mode 100644
index 0000000000..a831105e34
--- /dev/null
+++ b/lib/spack/external/nose/suite.py
@@ -0,0 +1,609 @@
+"""
+Test Suites
+-----------
+
+Provides a LazySuite, which is a suite whose test list is a generator
+function, and ContextSuite,which can run fixtures (setup/teardown
+functions or methods) for the context that contains its tests.
+
+"""
+from __future__ import generators
+
+import logging
+import sys
+import unittest
+from nose.case import Test
+from nose.config import Config
+from nose.proxy import ResultProxyFactory
+from nose.util import isclass, resolve_name, try_run
+
+if sys.platform == 'cli':
+ if sys.version_info[:2] < (2, 6):
+ import clr
+ clr.AddReference("IronPython")
+ from IronPython.Runtime.Exceptions import StringException
+ else:
+ class StringException(Exception):
+ pass
+
+log = logging.getLogger(__name__)
+#log.setLevel(logging.DEBUG)
+
+# Singleton for default value -- see ContextSuite.__init__ below
+_def = object()
+
+
+def _strclass(cls):
+ return "%s.%s" % (cls.__module__, cls.__name__)
+
+class MixedContextError(Exception):
+ """Error raised when a context suite sees tests from more than
+ one context.
+ """
+ pass
+
+
+class LazySuite(unittest.TestSuite):
+ """A suite that may use a generator as its list of tests
+ """
+ def __init__(self, tests=()):
+ """Initialize the suite. tests may be an iterable or a generator
+ """
+ super(LazySuite, self).__init__()
+ self._set_tests(tests)
+
+ def __iter__(self):
+ return iter(self._tests)
+
+ def __repr__(self):
+ return "<%s tests=generator (%s)>" % (
+ _strclass(self.__class__), id(self))
+
+ def __hash__(self):
+ return object.__hash__(self)
+
+ __str__ = __repr__
+
+ def addTest(self, test):
+ self._precache.append(test)
+
+ # added to bypass run changes in 2.7's unittest
+ def run(self, result):
+ for test in self._tests:
+ if result.shouldStop:
+ break
+ test(result)
+ return result
+
+ def __nonzero__(self):
+ log.debug("tests in %s?", id(self))
+ if self._precache:
+ return True
+ if self.test_generator is None:
+ return False
+ try:
+ test = self.test_generator.next()
+ if test is not None:
+ self._precache.append(test)
+ return True
+ except StopIteration:
+ pass
+ return False
+
+ def _get_tests(self):
+ log.debug("precache is %s", self._precache)
+ for test in self._precache:
+ yield test
+ if self.test_generator is None:
+ return
+ for test in self.test_generator:
+ yield test
+
+ def _set_tests(self, tests):
+ self._precache = []
+ is_suite = isinstance(tests, unittest.TestSuite)
+ if callable(tests) and not is_suite:
+ self.test_generator = tests()
+ elif is_suite:
+ # Suites need special treatment: they must be called like
+ # tests for their setup/teardown to run (if any)
+ self.addTests([tests])
+ self.test_generator = None
+ else:
+ self.addTests(tests)
+ self.test_generator = None
+
+ _tests = property(_get_tests, _set_tests, None,
+ "Access the tests in this suite. Access is through a "
+ "generator, so iteration may not be repeatable.")
+
+
+class ContextSuite(LazySuite):
+ """A suite with context.
+
+ A ContextSuite executes fixtures (setup and teardown functions or
+ methods) for the context containing its tests.
+
+ The context may be explicitly passed. If it is not, a context (or
+ nested set of contexts) will be constructed by examining the tests
+ in the suite.
+ """
+ failureException = unittest.TestCase.failureException
+ was_setup = False
+ was_torndown = False
+ classSetup = ('setup_class', 'setup_all', 'setupClass', 'setupAll',
+ 'setUpClass', 'setUpAll')
+ classTeardown = ('teardown_class', 'teardown_all', 'teardownClass',
+ 'teardownAll', 'tearDownClass', 'tearDownAll')
+ moduleSetup = ('setup_module', 'setupModule', 'setUpModule', 'setup',
+ 'setUp')
+ moduleTeardown = ('teardown_module', 'teardownModule', 'tearDownModule',
+ 'teardown', 'tearDown')
+ packageSetup = ('setup_package', 'setupPackage', 'setUpPackage')
+ packageTeardown = ('teardown_package', 'teardownPackage',
+ 'tearDownPackage')
+
+ def __init__(self, tests=(), context=None, factory=None,
+ config=None, resultProxy=None, can_split=True):
+ log.debug("Context suite for %s (%s) (%s)", tests, context, id(self))
+ self.context = context
+ self.factory = factory
+ if config is None:
+ config = Config()
+ self.config = config
+ self.resultProxy = resultProxy
+ self.has_run = False
+ self.can_split = can_split
+ self.error_context = None
+ super(ContextSuite, self).__init__(tests)
+
+ def __repr__(self):
+ return "<%s context=%s>" % (
+ _strclass(self.__class__),
+ getattr(self.context, '__name__', self.context))
+ __str__ = __repr__
+
+ def id(self):
+ if self.error_context:
+ return '%s:%s' % (repr(self), self.error_context)
+ else:
+ return repr(self)
+
+ def __hash__(self):
+ return object.__hash__(self)
+
+ # 2.3 compat -- force 2.4 call sequence
+ def __call__(self, *arg, **kw):
+ return self.run(*arg, **kw)
+
+ def exc_info(self):
+ """Hook for replacing error tuple output
+ """
+ return sys.exc_info()
+
+ def _exc_info(self):
+ """Bottleneck to fix up IronPython string exceptions
+ """
+ e = self.exc_info()
+ if sys.platform == 'cli':
+ if isinstance(e[0], StringException):
+ # IronPython throws these StringExceptions, but
+ # traceback checks type(etype) == str. Make a real
+ # string here.
+ e = (str(e[0]), e[1], e[2])
+
+ return e
+
+ def run(self, result):
+ """Run tests in suite inside of suite fixtures.
+ """
+ # proxy the result for myself
+ log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests)
+ #import pdb
+ #pdb.set_trace()
+ if self.resultProxy:
+ result, orig = self.resultProxy(result, self), result
+ else:
+ result, orig = result, result
+ try:
+ self.setUp()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.error_context = 'setup'
+ result.addError(self, self._exc_info())
+ return
+ try:
+ for test in self._tests:
+ if result.shouldStop:
+ log.debug("stopping")
+ break
+ # each nose.case.Test will create its own result proxy
+ # so the cases need the original result, to avoid proxy
+ # chains
+ test(orig)
+ finally:
+ self.has_run = True
+ try:
+ self.tearDown()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.error_context = 'teardown'
+ result.addError(self, self._exc_info())
+
+ def hasFixtures(self, ctx_callback=None):
+ context = self.context
+ if context is None:
+ return False
+ if self.implementsAnyFixture(context, ctx_callback=ctx_callback):
+ return True
+ # My context doesn't have any, but its ancestors might
+ factory = self.factory
+ if factory:
+ ancestors = factory.context.get(self, [])
+ for ancestor in ancestors:
+ if self.implementsAnyFixture(
+ ancestor, ctx_callback=ctx_callback):
+ return True
+ return False
+
+ def implementsAnyFixture(self, context, ctx_callback):
+ if isclass(context):
+ names = self.classSetup + self.classTeardown
+ else:
+ names = self.moduleSetup + self.moduleTeardown
+ if hasattr(context, '__path__'):
+ names += self.packageSetup + self.packageTeardown
+ # If my context has any fixture attribute, I have fixtures
+ fixt = False
+ for m in names:
+ if hasattr(context, m):
+ fixt = True
+ break
+ if ctx_callback is None:
+ return fixt
+ return ctx_callback(context, fixt)
+
+ def setUp(self):
+ log.debug("suite %s setUp called, tests: %s", id(self), self._tests)
+ if not self:
+ # I have no tests
+ log.debug("suite %s has no tests", id(self))
+ return
+ if self.was_setup:
+ log.debug("suite %s already set up", id(self))
+ return
+ context = self.context
+ if context is None:
+ return
+ # before running my own context's setup, I need to
+ # ask the factory if my context's contexts' setups have been run
+ factory = self.factory
+ if factory:
+ # get a copy, since we'll be destroying it as we go
+ ancestors = factory.context.get(self, [])[:]
+ while ancestors:
+ ancestor = ancestors.pop()
+ log.debug("ancestor %s may need setup", ancestor)
+ if ancestor in factory.was_setup:
+ continue
+ log.debug("ancestor %s does need setup", ancestor)
+ self.setupContext(ancestor)
+ if not context in factory.was_setup:
+ self.setupContext(context)
+ else:
+ self.setupContext(context)
+ self.was_setup = True
+ log.debug("completed suite setup")
+
+ def setupContext(self, context):
+ self.config.plugins.startContext(context)
+ log.debug("%s setup context %s", self, context)
+ if self.factory:
+ if context in self.factory.was_setup:
+ return
+ # note that I ran the setup for this context, so that I'll run
+ # the teardown in my teardown
+ self.factory.was_setup[context] = self
+ if isclass(context):
+ names = self.classSetup
+ else:
+ names = self.moduleSetup
+ if hasattr(context, '__path__'):
+ names = self.packageSetup + names
+ try_run(context, names)
+
+ def shortDescription(self):
+ if self.context is None:
+ return "test suite"
+ return "test suite for %s" % self.context
+
+ def tearDown(self):
+ log.debug('context teardown')
+ if not self.was_setup or self.was_torndown:
+ log.debug(
+ "No reason to teardown (was_setup? %s was_torndown? %s)"
+ % (self.was_setup, self.was_torndown))
+ return
+ self.was_torndown = True
+ context = self.context
+ if context is None:
+ log.debug("No context to tear down")
+ return
+
+ # for each ancestor... if the ancestor was setup
+ # and I did the setup, I can do teardown
+ factory = self.factory
+ if factory:
+ ancestors = factory.context.get(self, []) + [context]
+ for ancestor in ancestors:
+ log.debug('ancestor %s may need teardown', ancestor)
+ if not ancestor in factory.was_setup:
+ log.debug('ancestor %s was not setup', ancestor)
+ continue
+ if ancestor in factory.was_torndown:
+ log.debug('ancestor %s already torn down', ancestor)
+ continue
+ setup = factory.was_setup[ancestor]
+ log.debug("%s setup ancestor %s", setup, ancestor)
+ if setup is self:
+ self.teardownContext(ancestor)
+ else:
+ self.teardownContext(context)
+
+ def teardownContext(self, context):
+ log.debug("%s teardown context %s", self, context)
+ if self.factory:
+ if context in self.factory.was_torndown:
+ return
+ self.factory.was_torndown[context] = self
+ if isclass(context):
+ names = self.classTeardown
+ else:
+ names = self.moduleTeardown
+ if hasattr(context, '__path__'):
+ names = self.packageTeardown + names
+ try_run(context, names)
+ self.config.plugins.stopContext(context)
+
+ # FIXME the wrapping has to move to the factory?
+ def _get_wrapped_tests(self):
+ for test in self._get_tests():
+ if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
+ yield test
+ else:
+ yield Test(test,
+ config=self.config,
+ resultProxy=self.resultProxy)
+
+ _tests = property(_get_wrapped_tests, LazySuite._set_tests, None,
+ "Access the tests in this suite. Tests are returned "
+ "inside of a context wrapper.")
+
+
+class ContextSuiteFactory(object):
+ """Factory for ContextSuites. Called with a collection of tests,
+ the factory decides on a hierarchy of contexts by introspecting
+ the collection or the tests themselves to find the objects
+ containing the test objects. It always returns one suite, but that
+ suite may consist of a hierarchy of nested suites.
+ """
+ suiteClass = ContextSuite
+ def __init__(self, config=None, suiteClass=None, resultProxy=_def):
+ if config is None:
+ config = Config()
+ self.config = config
+ if suiteClass is not None:
+ self.suiteClass = suiteClass
+ # Using a singleton to represent default instead of None allows
+ # passing resultProxy=None to turn proxying off.
+ if resultProxy is _def:
+ resultProxy = ResultProxyFactory(config=config)
+ self.resultProxy = resultProxy
+ self.suites = {}
+ self.context = {}
+ self.was_setup = {}
+ self.was_torndown = {}
+
+ def __call__(self, tests, **kw):
+ """Return ``ContextSuite`` for tests. ``tests`` may either
+ be a callable (in which case the resulting ContextSuite will
+ have no parent context and be evaluated lazily) or an
+ iterable. In that case the tests will wrapped in
+ nose.case.Test, be examined and the context of each found and a
+ suite of suites returned, organized into a stack with the
+ outermost suites belonging to the outermost contexts.
+ """
+ log.debug("Create suite for %s", tests)
+ context = kw.pop('context', getattr(tests, 'context', None))
+ log.debug("tests %s context %s", tests, context)
+ if context is None:
+ tests = self.wrapTests(tests)
+ try:
+ context = self.findContext(tests)
+ except MixedContextError:
+ return self.makeSuite(self.mixedSuites(tests), None, **kw)
+ return self.makeSuite(tests, context, **kw)
+
+ def ancestry(self, context):
+ """Return the ancestry of the context (that is, all of the
+ packages and modules containing the context), in order of
+ descent with the outermost ancestor last.
+ This method is a generator.
+ """
+ log.debug("get ancestry %s", context)
+ if context is None:
+ return
+ # Methods include reference to module they are defined in, we
+ # don't want that, instead want the module the class is in now
+ # (classes are re-ancestored elsewhere).
+ if hasattr(context, 'im_class'):
+ context = context.im_class
+ elif hasattr(context, '__self__'):
+ context = context.__self__.__class__
+ if hasattr(context, '__module__'):
+ ancestors = context.__module__.split('.')
+ elif hasattr(context, '__name__'):
+ ancestors = context.__name__.split('.')[:-1]
+ else:
+ raise TypeError("%s has no ancestors?" % context)
+ while ancestors:
+ log.debug(" %s ancestors %s", context, ancestors)
+ yield resolve_name('.'.join(ancestors))
+ ancestors.pop()
+
+ def findContext(self, tests):
+ if callable(tests) or isinstance(tests, unittest.TestSuite):
+ return None
+ context = None
+ for test in tests:
+ # Don't look at suites for contexts, only tests
+ ctx = getattr(test, 'context', None)
+ if ctx is None:
+ continue
+ if context is None:
+ context = ctx
+ elif context != ctx:
+ raise MixedContextError(
+ "Tests with different contexts in same suite! %s != %s"
+ % (context, ctx))
+ return context
+
+ def makeSuite(self, tests, context, **kw):
+ suite = self.suiteClass(
+ tests, context=context, config=self.config, factory=self,
+ resultProxy=self.resultProxy, **kw)
+ if context is not None:
+ self.suites.setdefault(context, []).append(suite)
+ self.context.setdefault(suite, []).append(context)
+ log.debug("suite %s has context %s", suite,
+ getattr(context, '__name__', None))
+ for ancestor in self.ancestry(context):
+ self.suites.setdefault(ancestor, []).append(suite)
+ self.context[suite].append(ancestor)
+ log.debug("suite %s has ancestor %s", suite, ancestor.__name__)
+ return suite
+
+ def mixedSuites(self, tests):
+ """The complex case where there are tests that don't all share
+ the same context. Groups tests into suites with common ancestors,
+ according to the following (essentially tail-recursive) procedure:
+
+ Starting with the context of the first test, if it is not
+ None, look for tests in the remaining tests that share that
+ ancestor. If any are found, group into a suite with that
+ ancestor as the context, and replace the current suite with
+ that suite. Continue this process for each ancestor of the
+ first test, until all ancestors have been processed. At this
+ point if any tests remain, recurse with those tests as the
+ input, returning a list of the common suite (which may be the
+ suite or test we started with, if no common tests were found)
+ plus the results of recursion.
+ """
+ if not tests:
+ return []
+ head = tests.pop(0)
+ if not tests:
+ return [head] # short circuit when none are left to combine
+ suite = head # the common ancestry suite, so far
+ tail = tests[:]
+ context = getattr(head, 'context', None)
+ if context is not None:
+ ancestors = [context] + [a for a in self.ancestry(context)]
+ for ancestor in ancestors:
+ common = [suite] # tests with ancestor in common, so far
+ remain = [] # tests that remain to be processed
+ for test in tail:
+ found_common = False
+ test_ctx = getattr(test, 'context', None)
+ if test_ctx is None:
+ remain.append(test)
+ continue
+ if test_ctx is ancestor:
+ common.append(test)
+ continue
+ for test_ancestor in self.ancestry(test_ctx):
+ if test_ancestor is ancestor:
+ common.append(test)
+ found_common = True
+ break
+ if not found_common:
+ remain.append(test)
+ if common:
+ suite = self.makeSuite(common, ancestor)
+ tail = self.mixedSuites(remain)
+ return [suite] + tail
+
+ def wrapTests(self, tests):
+ log.debug("wrap %s", tests)
+ if callable(tests) or isinstance(tests, unittest.TestSuite):
+ log.debug("I won't wrap")
+ return tests
+ wrapped = []
+ for test in tests:
+ log.debug("wrapping %s", test)
+ if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
+ wrapped.append(test)
+ elif isinstance(test, ContextList):
+ wrapped.append(self.makeSuite(test, context=test.context))
+ else:
+ wrapped.append(
+ Test(test, config=self.config, resultProxy=self.resultProxy)
+ )
+ return wrapped
+
+
+class ContextList(object):
+ """Not quite a suite -- a group of tests in a context. This is used
+ to hint the ContextSuiteFactory about what context the tests
+ belong to, in cases where it may be ambiguous or missing.
+ """
+ def __init__(self, tests, context=None):
+ self.tests = tests
+ self.context = context
+
+ def __iter__(self):
+ return iter(self.tests)
+
+
+class FinalizingSuiteWrapper(unittest.TestSuite):
+ """Wraps suite and calls final function after suite has
+ executed. Used to call final functions in cases (like running in
+ the standard test runner) where test running is not under nose's
+ control.
+ """
+ def __init__(self, suite, finalize):
+ super(FinalizingSuiteWrapper, self).__init__()
+ self.suite = suite
+ self.finalize = finalize
+
+ def __call__(self, *arg, **kw):
+ return self.run(*arg, **kw)
+
+ # 2.7 compat
+ def __iter__(self):
+ return iter(self.suite)
+
+ def run(self, *arg, **kw):
+ try:
+ return self.suite(*arg, **kw)
+ finally:
+ self.finalize(*arg, **kw)
+
+
+# backwards compat -- sort of
+class TestDir:
+ def __init__(*arg, **kw):
+ raise NotImplementedError(
+ "TestDir is not usable with nose 0.10. The class is present "
+ "in nose.suite for backwards compatibility purposes but it "
+ "may not be used.")
+
+
+class TestModule:
+ def __init__(*arg, **kw):
+ raise NotImplementedError(
+ "TestModule is not usable with nose 0.10. The class is present "
+ "in nose.suite for backwards compatibility purposes but it "
+ "may not be used.")
diff --git a/lib/spack/external/nose/tools/__init__.py b/lib/spack/external/nose/tools/__init__.py
new file mode 100644
index 0000000000..74dab16a74
--- /dev/null
+++ b/lib/spack/external/nose/tools/__init__.py
@@ -0,0 +1,15 @@
+"""
+Tools for testing
+-----------------
+
+nose.tools provides a few convenience functions to make writing tests
+easier. You don't have to use them; nothing in the rest of nose depends
+on any of these methods.
+
+"""
+from nose.tools.nontrivial import *
+from nose.tools.nontrivial import __all__ as nontrivial_all
+from nose.tools.trivial import *
+from nose.tools.trivial import __all__ as trivial_all
+
+__all__ = trivial_all + nontrivial_all
diff --git a/lib/spack/external/nose/tools/nontrivial.py b/lib/spack/external/nose/tools/nontrivial.py
new file mode 100644
index 0000000000..283973245b
--- /dev/null
+++ b/lib/spack/external/nose/tools/nontrivial.py
@@ -0,0 +1,151 @@
+"""Tools not exempt from being descended into in tracebacks"""
+
+import time
+
+
+__all__ = ['make_decorator', 'raises', 'set_trace', 'timed', 'with_setup',
+ 'TimeExpired', 'istest', 'nottest']
+
+
+class TimeExpired(AssertionError):
+ pass
+
+
+def make_decorator(func):
+ """
+ Wraps a test decorator so as to properly replicate metadata
+ of the decorated function, including nose's additional stuff
+ (namely, setup and teardown).
+ """
+ def decorate(newfunc):
+ if hasattr(func, 'compat_func_name'):
+ name = func.compat_func_name
+ else:
+ name = func.__name__
+ newfunc.__dict__ = func.__dict__
+ newfunc.__doc__ = func.__doc__
+ newfunc.__module__ = func.__module__
+ if not hasattr(newfunc, 'compat_co_firstlineno'):
+ newfunc.compat_co_firstlineno = func.func_code.co_firstlineno
+ try:
+ newfunc.__name__ = name
+ except TypeError:
+ # can't set func name in 2.3
+ newfunc.compat_func_name = name
+ return newfunc
+ return decorate
+
+
+def raises(*exceptions):
+ """Test must raise one of expected exceptions to pass.
+
+ Example use::
+
+ @raises(TypeError, ValueError)
+ def test_raises_type_error():
+ raise TypeError("This test passes")
+
+ @raises(Exception)
+ def test_that_fails_by_passing():
+ pass
+
+ If you want to test many assertions about exceptions in a single test,
+ you may want to use `assert_raises` instead.
+ """
+ valid = ' or '.join([e.__name__ for e in exceptions])
+ def decorate(func):
+ name = func.__name__
+ def newfunc(*arg, **kw):
+ try:
+ func(*arg, **kw)
+ except exceptions:
+ pass
+ except:
+ raise
+ else:
+ message = "%s() did not raise %s" % (name, valid)
+ raise AssertionError(message)
+ newfunc = make_decorator(func)(newfunc)
+ return newfunc
+ return decorate
+
+
+def set_trace():
+ """Call pdb.set_trace in the calling frame, first restoring
+ sys.stdout to the real output stream. Note that sys.stdout is NOT
+ reset to whatever it was before the call once pdb is done!
+ """
+ import pdb
+ import sys
+ stdout = sys.stdout
+ sys.stdout = sys.__stdout__
+ pdb.Pdb().set_trace(sys._getframe().f_back)
+
+
+def timed(limit):
+ """Test must finish within specified time limit to pass.
+
+ Example use::
+
+ @timed(.1)
+ def test_that_fails():
+ time.sleep(.2)
+ """
+ def decorate(func):
+ def newfunc(*arg, **kw):
+ start = time.time()
+ result = func(*arg, **kw)
+ end = time.time()
+ if end - start > limit:
+ raise TimeExpired("Time limit (%s) exceeded" % limit)
+ return result
+ newfunc = make_decorator(func)(newfunc)
+ return newfunc
+ return decorate
+
+
+def with_setup(setup=None, teardown=None):
+ """Decorator to add setup and/or teardown methods to a test function::
+
+ @with_setup(setup, teardown)
+ def test_something():
+ " ... "
+
+ Note that `with_setup` is useful *only* for test functions, not for test
+ methods or inside of TestCase subclasses.
+ """
+ def decorate(func, setup=setup, teardown=teardown):
+ if setup:
+ if hasattr(func, 'setup'):
+ _old_s = func.setup
+ def _s():
+ setup()
+ _old_s()
+ func.setup = _s
+ else:
+ func.setup = setup
+ if teardown:
+ if hasattr(func, 'teardown'):
+ _old_t = func.teardown
+ def _t():
+ _old_t()
+ teardown()
+ func.teardown = _t
+ else:
+ func.teardown = teardown
+ return func
+ return decorate
+
+
+def istest(func):
+ """Decorator to mark a function or method as a test
+ """
+ func.__test__ = True
+ return func
+
+
+def nottest(func):
+ """Decorator to mark a function or method as *not* a test
+ """
+ func.__test__ = False
+ return func
diff --git a/lib/spack/external/nose/tools/trivial.py b/lib/spack/external/nose/tools/trivial.py
new file mode 100644
index 0000000000..cf83efeda5
--- /dev/null
+++ b/lib/spack/external/nose/tools/trivial.py
@@ -0,0 +1,54 @@
+"""Tools so trivial that tracebacks should not descend into them
+
+We define the ``__unittest`` symbol in their module namespace so unittest will
+skip them when printing tracebacks, just as it does for their corresponding
+methods in ``unittest`` proper.
+
+"""
+import re
+import unittest
+
+
+__all__ = ['ok_', 'eq_']
+
+# Use the same flag as unittest itself to prevent descent into these functions:
+__unittest = 1
+
+
+def ok_(expr, msg=None):
+ """Shorthand for assert. Saves 3 whole characters!
+ """
+ if not expr:
+ raise AssertionError(msg)
+
+
+def eq_(a, b, msg=None):
+ """Shorthand for 'assert a == b, "%r != %r" % (a, b)
+ """
+ if not a == b:
+ raise AssertionError(msg or "%r != %r" % (a, b))
+
+
+#
+# Expose assert* from unittest.TestCase
+# - give them pep8 style names
+#
+caps = re.compile('([A-Z])')
+
+def pep8(name):
+ return caps.sub(lambda m: '_' + m.groups()[0].lower(), name)
+
+class Dummy(unittest.TestCase):
+ def nop():
+ pass
+_t = Dummy('nop')
+
+for at in [ at for at in dir(_t)
+ if at.startswith('assert') and not '_' in at ]:
+ pepd = pep8(at)
+ vars()[pepd] = getattr(_t, at)
+ __all__.append(pepd)
+
+del Dummy
+del _t
+del pep8
diff --git a/lib/spack/external/nose/twistedtools.py b/lib/spack/external/nose/twistedtools.py
new file mode 100644
index 0000000000..8d9c6ffe9b
--- /dev/null
+++ b/lib/spack/external/nose/twistedtools.py
@@ -0,0 +1,173 @@
+"""
+Twisted integration
+-------------------
+
+This module provides a very simple way to integrate your tests with the
+Twisted_ event loop.
+
+You must import this module *before* importing anything from Twisted itself!
+
+Example::
+
+ from nose.twistedtools import reactor, deferred
+
+ @deferred()
+ def test_resolve():
+ return reactor.resolve("www.python.org")
+
+Or, more realistically::
+
+ @deferred(timeout=5.0)
+ def test_resolve():
+ d = reactor.resolve("www.python.org")
+ def check_ip(ip):
+ assert ip == "67.15.36.43"
+ d.addCallback(check_ip)
+ return d
+
+.. _Twisted: http://twistedmatrix.com/trac/
+"""
+
+import sys
+from Queue import Queue, Empty
+from nose.tools import make_decorator, TimeExpired
+
+__all__ = [
+ 'threaded_reactor', 'reactor', 'deferred', 'TimeExpired',
+ 'stop_reactor'
+]
+
+_twisted_thread = None
+
+def threaded_reactor():
+ """
+ Start the Twisted reactor in a separate thread, if not already done.
+ Returns the reactor.
+ The thread will automatically be destroyed when all the tests are done.
+ """
+ global _twisted_thread
+ try:
+ from twisted.internet import reactor
+ except ImportError:
+ return None, None
+ if not _twisted_thread:
+ from twisted.python import threadable
+ from threading import Thread
+ _twisted_thread = Thread(target=lambda: reactor.run( \
+ installSignalHandlers=False))
+ _twisted_thread.setDaemon(True)
+ _twisted_thread.start()
+ return reactor, _twisted_thread
+
+# Export global reactor variable, as Twisted does
+reactor, reactor_thread = threaded_reactor()
+
+
+def stop_reactor():
+ """Stop the reactor and join the reactor thread until it stops.
+ Call this function in teardown at the module or package level to
+ reset the twisted system after your tests. You *must* do this if
+ you mix tests using these tools and tests using twisted.trial.
+ """
+ global _twisted_thread
+
+ def stop_reactor():
+ '''Helper for calling stop from withing the thread.'''
+ reactor.stop()
+
+ reactor.callFromThread(stop_reactor)
+ reactor_thread.join()
+ for p in reactor.getDelayedCalls():
+ if p.active():
+ p.cancel()
+ _twisted_thread = None
+
+
+def deferred(timeout=None):
+ """
+ By wrapping a test function with this decorator, you can return a
+ twisted Deferred and the test will wait for the deferred to be triggered.
+ The whole test function will run inside the Twisted event loop.
+
+ The optional timeout parameter specifies the maximum duration of the test.
+ The difference with timed() is that timed() will still wait for the test
+ to end, while deferred() will stop the test when its timeout has expired.
+ The latter is more desireable when dealing with network tests, because
+ the result may actually never arrive.
+
+ If the callback is triggered, the test has passed.
+ If the errback is triggered or the timeout expires, the test has failed.
+
+ Example::
+
+ @deferred(timeout=5.0)
+ def test_resolve():
+ return reactor.resolve("www.python.org")
+
+ Attention! If you combine this decorator with other decorators (like
+ "raises"), deferred() must be called *first*!
+
+ In other words, this is good::
+
+ @raises(DNSLookupError)
+ @deferred()
+ def test_error():
+ return reactor.resolve("xxxjhjhj.biz")
+
+ and this is bad::
+
+ @deferred()
+ @raises(DNSLookupError)
+ def test_error():
+ return reactor.resolve("xxxjhjhj.biz")
+ """
+ reactor, reactor_thread = threaded_reactor()
+ if reactor is None:
+ raise ImportError("twisted is not available or could not be imported")
+ # Check for common syntax mistake
+ # (otherwise, tests can be silently ignored
+ # if one writes "@deferred" instead of "@deferred()")
+ try:
+ timeout is None or timeout + 0
+ except TypeError:
+ raise TypeError("'timeout' argument must be a number or None")
+
+ def decorate(func):
+ def wrapper(*args, **kargs):
+ q = Queue()
+ def callback(value):
+ q.put(None)
+ def errback(failure):
+ # Retrieve and save full exception info
+ try:
+ failure.raiseException()
+ except:
+ q.put(sys.exc_info())
+ def g():
+ try:
+ d = func(*args, **kargs)
+ try:
+ d.addCallbacks(callback, errback)
+ # Check for a common mistake and display a nice error
+ # message
+ except AttributeError:
+ raise TypeError("you must return a twisted Deferred "
+ "from your test case!")
+ # Catch exceptions raised in the test body (from the
+ # Twisted thread)
+ except:
+ q.put(sys.exc_info())
+ reactor.callFromThread(g)
+ try:
+ error = q.get(timeout=timeout)
+ except Empty:
+ raise TimeExpired("timeout expired before end of test (%f s.)"
+ % timeout)
+ # Re-raise all exceptions
+ if error is not None:
+ exc_type, exc_value, tb = error
+ raise exc_type, exc_value, tb
+ wrapper = make_decorator(func)(wrapper)
+ return wrapper
+ return decorate
+
diff --git a/lib/spack/external/nose/usage.txt b/lib/spack/external/nose/usage.txt
new file mode 100644
index 0000000000..bc96894ab7
--- /dev/null
+++ b/lib/spack/external/nose/usage.txt
@@ -0,0 +1,115 @@
+nose collects tests automatically from python source files,
+directories and packages found in its working directory (which
+defaults to the current working directory). Any python source file,
+directory or package that matches the testMatch regular expression
+(by default: `(?:^|[\b_\.-])[Tt]est)` will be collected as a test (or
+source for collection of tests). In addition, all other packages
+found in the working directory will be examined for python source files
+or directories that match testMatch. Package discovery descends all
+the way down the tree, so package.tests and package.sub.tests and
+package.sub.sub2.tests will all be collected.
+
+Within a test directory or package, any python source file matching
+testMatch will be examined for test cases. Within a test module,
+functions and classes whose names match testMatch and TestCase
+subclasses with any name will be loaded and executed as tests. Tests
+may use the assert keyword or raise AssertionErrors to indicate test
+failure. TestCase subclasses may do the same or use the various
+TestCase methods available.
+
+**It is important to note that the default behavior of nose is to
+not include tests from files which are executable.** To include
+tests from such files, remove their executable bit or use
+the --exe flag (see 'Options' section below).
+
+Selecting Tests
+---------------
+
+To specify which tests to run, pass test names on the command line:
+
+ %prog only_test_this.py
+
+Test names specified may be file or module names, and may optionally
+indicate the test case to run by separating the module or file name
+from the test case name with a colon. Filenames may be relative or
+absolute. Examples:
+
+ %prog test.module
+ %prog another.test:TestCase.test_method
+ %prog a.test:TestCase
+ %prog /path/to/test/file.py:test_function
+
+You may also change the working directory where nose looks for tests
+by using the -w switch:
+
+ %prog -w /path/to/tests
+
+Note, however, that support for multiple -w arguments is now deprecated
+and will be removed in a future release. As of nose 0.10, you can get
+the same behavior by specifying the target directories *without*
+the -w switch:
+
+ %prog /path/to/tests /another/path/to/tests
+
+Further customization of test selection and loading is possible
+through the use of plugins.
+
+Test result output is identical to that of unittest, except for
+the additional features (error classes, and plugin-supplied
+features such as output capture and assert introspection) detailed
+in the options below.
+
+Configuration
+-------------
+
+In addition to passing command-line options, you may also put
+configuration options in your project's *setup.cfg* file, or a .noserc
+or nose.cfg file in your home directory. In any of these standard
+ini-style config files, you put your nosetests configuration in a
+``[nosetests]`` section. Options are the same as on the command line,
+with the -- prefix removed. For options that are simple switches, you
+must supply a value:
+
+ [nosetests]
+ verbosity=3
+ with-doctest=1
+
+All configuration files that are found will be loaded and their
+options combined. You can override the standard config file loading
+with the ``-c`` option.
+
+Using Plugins
+-------------
+
+There are numerous nose plugins available via easy_install and
+elsewhere. To use a plugin, just install it. The plugin will add
+command line options to nosetests. To verify that the plugin is installed,
+run:
+
+ nosetests --plugins
+
+You can add -v or -vv to that command to show more information
+about each plugin.
+
+If you are running nose.main() or nose.run() from a script, you
+can specify a list of plugins to use by passing a list of plugins
+with the plugins keyword argument.
+
+0.9 plugins
+-----------
+
+nose 1.0 can use SOME plugins that were written for nose 0.9. The
+default plugin manager inserts a compatibility wrapper around 0.9
+plugins that adapts the changed plugin api calls. However, plugins
+that access nose internals are likely to fail, especially if they
+attempt to access test case or test suite classes. For example,
+plugins that try to determine if a test passed to startTest is an
+individual test or a suite will fail, partly because suites are no
+longer passed to startTest and partly because it's likely that the
+plugin is trying to find out if the test is an instance of a class
+that no longer exists.
+
+0.10 and 0.11 plugins
+---------------------
+
+All plugins written for nose 0.10 and 0.11 should work with nose 1.0.
diff --git a/lib/spack/external/nose/util.py b/lib/spack/external/nose/util.py
new file mode 100644
index 0000000000..bfe16589ea
--- /dev/null
+++ b/lib/spack/external/nose/util.py
@@ -0,0 +1,668 @@
+"""Utility functions and classes used by nose internally.
+"""
+import inspect
+import itertools
+import logging
+import stat
+import os
+import re
+import sys
+import types
+import unittest
+from nose.pyversion import ClassType, TypeType, isgenerator, ismethod
+
+
+log = logging.getLogger('nose')
+
+ident_re = re.compile(r'^[A-Za-z_][A-Za-z0-9_.]*$')
+class_types = (ClassType, TypeType)
+skip_pattern = r"(?:\.svn)|(?:[^.]+\.py[co])|(?:.*~)|(?:.*\$py\.class)|(?:__pycache__)"
+
+try:
+ set()
+ set = set # make from nose.util import set happy
+except NameError:
+ try:
+ from sets import Set as set
+ except ImportError:
+ pass
+
+
+def ls_tree(dir_path="",
+ skip_pattern=skip_pattern,
+ indent="|-- ", branch_indent="| ",
+ last_indent="`-- ", last_branch_indent=" "):
+ # TODO: empty directories look like non-directory files
+ return "\n".join(_ls_tree_lines(dir_path, skip_pattern,
+ indent, branch_indent,
+ last_indent, last_branch_indent))
+
+
+def _ls_tree_lines(dir_path, skip_pattern,
+ indent, branch_indent, last_indent, last_branch_indent):
+ if dir_path == "":
+ dir_path = os.getcwd()
+
+ lines = []
+
+ names = os.listdir(dir_path)
+ names.sort()
+ dirs, nondirs = [], []
+ for name in names:
+ if re.match(skip_pattern, name):
+ continue
+ if os.path.isdir(os.path.join(dir_path, name)):
+ dirs.append(name)
+ else:
+ nondirs.append(name)
+
+ # list non-directories first
+ entries = list(itertools.chain([(name, False) for name in nondirs],
+ [(name, True) for name in dirs]))
+ def ls_entry(name, is_dir, ind, branch_ind):
+ if not is_dir:
+ yield ind + name
+ else:
+ path = os.path.join(dir_path, name)
+ if not os.path.islink(path):
+ yield ind + name
+ subtree = _ls_tree_lines(path, skip_pattern,
+ indent, branch_indent,
+ last_indent, last_branch_indent)
+ for x in subtree:
+ yield branch_ind + x
+ for name, is_dir in entries[:-1]:
+ for line in ls_entry(name, is_dir, indent, branch_indent):
+ yield line
+ if entries:
+ name, is_dir = entries[-1]
+ for line in ls_entry(name, is_dir, last_indent, last_branch_indent):
+ yield line
+
+
+def absdir(path):
+ """Return absolute, normalized path to directory, if it exists; None
+ otherwise.
+ """
+ if not os.path.isabs(path):
+ path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
+ path)))
+ if path is None or not os.path.isdir(path):
+ return None
+ return path
+
+
+def absfile(path, where=None):
+ """Return absolute, normalized path to file (optionally in directory
+ where), or None if the file can't be found either in where or the current
+ working directory.
+ """
+ orig = path
+ if where is None:
+ where = os.getcwd()
+ if isinstance(where, list) or isinstance(where, tuple):
+ for maybe_path in where:
+ maybe_abs = absfile(path, maybe_path)
+ if maybe_abs is not None:
+ return maybe_abs
+ return None
+ if not os.path.isabs(path):
+ path = os.path.normpath(os.path.abspath(os.path.join(where, path)))
+ if path is None or not os.path.exists(path):
+ if where != os.getcwd():
+ # try the cwd instead
+ path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
+ orig)))
+ if path is None or not os.path.exists(path):
+ return None
+ if os.path.isdir(path):
+ # might want an __init__.py from pacakge
+ init = os.path.join(path,'__init__.py')
+ if os.path.isfile(init):
+ return init
+ elif os.path.isfile(path):
+ return path
+ return None
+
+
+def anyp(predicate, iterable):
+ for item in iterable:
+ if predicate(item):
+ return True
+ return False
+
+
+def file_like(name):
+ """A name is file-like if it is a path that exists, or it has a
+ directory part, or it ends in .py, or it isn't a legal python
+ identifier.
+ """
+ return (os.path.exists(name)
+ or os.path.dirname(name)
+ or name.endswith('.py')
+ or not ident_re.match(os.path.splitext(name)[0]))
+
+
+def func_lineno(func):
+ """Get the line number of a function. First looks for
+ compat_co_firstlineno, then func_code.co_first_lineno.
+ """
+ try:
+ return func.compat_co_firstlineno
+ except AttributeError:
+ try:
+ return func.func_code.co_firstlineno
+ except AttributeError:
+ return -1
+
+
+def isclass(obj):
+ """Is obj a class? Inspect's isclass is too liberal and returns True
+ for objects that can't be subclasses of anything.
+ """
+ obj_type = type(obj)
+ return obj_type in class_types or issubclass(obj_type, type)
+
+
+# backwards compat (issue #64)
+is_generator = isgenerator
+
+
+def ispackage(path):
+ """
+ Is this path a package directory?
+
+ >>> ispackage('nose')
+ True
+ >>> ispackage('unit_tests')
+ False
+ >>> ispackage('nose/plugins')
+ True
+ >>> ispackage('nose/loader.py')
+ False
+ """
+ if os.path.isdir(path):
+ # at least the end of the path must be a legal python identifier
+ # and __init__.py[co] must exist
+ end = os.path.basename(path)
+ if ident_re.match(end):
+ for init in ('__init__.py', '__init__.pyc', '__init__.pyo'):
+ if os.path.isfile(os.path.join(path, init)):
+ return True
+ if sys.platform.startswith('java') and \
+ os.path.isfile(os.path.join(path, '__init__$py.class')):
+ return True
+ return False
+
+
+def isproperty(obj):
+ """
+ Is this a property?
+
+ >>> class Foo:
+ ... def got(self):
+ ... return 2
+ ... def get(self):
+ ... return 1
+ ... get = property(get)
+
+ >>> isproperty(Foo.got)
+ False
+ >>> isproperty(Foo.get)
+ True
+ """
+ return type(obj) == property
+
+
+def getfilename(package, relativeTo=None):
+ """Find the python source file for a package, relative to a
+ particular directory (defaults to current working directory if not
+ given).
+ """
+ if relativeTo is None:
+ relativeTo = os.getcwd()
+ path = os.path.join(relativeTo, os.sep.join(package.split('.')))
+ if os.path.exists(path + '/__init__.py'):
+ return path
+ filename = path + '.py'
+ if os.path.exists(filename):
+ return filename
+ return None
+
+
+def getpackage(filename):
+ """
+ Find the full dotted package name for a given python source file
+ name. Returns None if the file is not a python source file.
+
+ >>> getpackage('foo.py')
+ 'foo'
+ >>> getpackage('biff/baf.py')
+ 'baf'
+ >>> getpackage('nose/util.py')
+ 'nose.util'
+
+ Works for directories too.
+
+ >>> getpackage('nose')
+ 'nose'
+ >>> getpackage('nose/plugins')
+ 'nose.plugins'
+
+ And __init__ files stuck onto directories
+
+ >>> getpackage('nose/plugins/__init__.py')
+ 'nose.plugins'
+
+ Absolute paths also work.
+
+ >>> path = os.path.abspath(os.path.join('nose', 'plugins'))
+ >>> getpackage(path)
+ 'nose.plugins'
+ """
+ src_file = src(filename)
+ if (os.path.isdir(src_file) or not src_file.endswith('.py')) and not ispackage(src_file):
+ return None
+ base, ext = os.path.splitext(os.path.basename(src_file))
+ if base == '__init__':
+ mod_parts = []
+ else:
+ mod_parts = [base]
+ path, part = os.path.split(os.path.split(src_file)[0])
+ while part:
+ if ispackage(os.path.join(path, part)):
+ mod_parts.append(part)
+ else:
+ break
+ path, part = os.path.split(path)
+ mod_parts.reverse()
+ return '.'.join(mod_parts)
+
+
+def ln(label):
+ """Draw a 70-char-wide divider, with label in the middle.
+
+ >>> ln('hello there')
+ '---------------------------- hello there -----------------------------'
+ """
+ label_len = len(label) + 2
+ chunk = (70 - label_len) // 2
+ out = '%s %s %s' % ('-' * chunk, label, '-' * chunk)
+ pad = 70 - len(out)
+ if pad > 0:
+ out = out + ('-' * pad)
+ return out
+
+
+def resolve_name(name, module=None):
+ """Resolve a dotted name to a module and its parts. This is stolen
+ wholesale from unittest.TestLoader.loadTestByName.
+
+ >>> resolve_name('nose.util') #doctest: +ELLIPSIS
+ <module 'nose.util' from...>
+ >>> resolve_name('nose.util.resolve_name') #doctest: +ELLIPSIS
+ <function resolve_name at...>
+ """
+ parts = name.split('.')
+ parts_copy = parts[:]
+ if module is None:
+ while parts_copy:
+ try:
+ log.debug("__import__ %s", name)
+ module = __import__('.'.join(parts_copy))
+ break
+ except ImportError:
+ del parts_copy[-1]
+ if not parts_copy:
+ raise
+ parts = parts[1:]
+ obj = module
+ log.debug("resolve: %s, %s, %s, %s", parts, name, obj, module)
+ for part in parts:
+ obj = getattr(obj, part)
+ return obj
+
+
+def split_test_name(test):
+ """Split a test name into a 3-tuple containing file, module, and callable
+ names, any of which (but not all) may be blank.
+
+ Test names are in the form:
+
+ file_or_module:callable
+
+ Either side of the : may be dotted. To change the splitting behavior, you
+ can alter nose.util.split_test_re.
+ """
+ norm = os.path.normpath
+ file_or_mod = test
+ fn = None
+ if not ':' in test:
+ # only a file or mod part
+ if file_like(test):
+ return (norm(test), None, None)
+ else:
+ return (None, test, None)
+
+ # could be path|mod:callable, or a : in the file path someplace
+ head, tail = os.path.split(test)
+ if not head:
+ # this is a case like 'foo:bar' -- generally a module
+ # name followed by a callable, but also may be a windows
+ # drive letter followed by a path
+ try:
+ file_or_mod, fn = test.split(':')
+ if file_like(fn):
+ # must be a funny path
+ file_or_mod, fn = test, None
+ except ValueError:
+ # more than one : in the test
+ # this is a case like c:\some\path.py:a_test
+ parts = test.split(':')
+ if len(parts[0]) == 1:
+ file_or_mod, fn = ':'.join(parts[:-1]), parts[-1]
+ else:
+ # nonsense like foo:bar:baz
+ raise ValueError("Test name '%s' could not be parsed. Please "
+ "format test names as path:callable or "
+ "module:callable." % (test,))
+ elif not tail:
+ # this is a case like 'foo:bar/'
+ # : must be part of the file path, so ignore it
+ file_or_mod = test
+ else:
+ if ':' in tail:
+ file_part, fn = tail.split(':')
+ else:
+ file_part = tail
+ file_or_mod = os.sep.join([head, file_part])
+ if file_or_mod:
+ if file_like(file_or_mod):
+ return (norm(file_or_mod), None, fn)
+ else:
+ return (None, file_or_mod, fn)
+ else:
+ return (None, None, fn)
+split_test_name.__test__ = False # do not collect
+
+
+def test_address(test):
+ """Find the test address for a test, which may be a module, filename,
+ class, method or function.
+ """
+ if hasattr(test, "address"):
+ return test.address()
+ # type-based polymorphism sucks in general, but I believe is
+ # appropriate here
+ t = type(test)
+ file = module = call = None
+ if t == types.ModuleType:
+ file = getattr(test, '__file__', None)
+ module = getattr(test, '__name__', None)
+ return (src(file), module, call)
+ if t == types.FunctionType or issubclass(t, type) or t == types.ClassType:
+ module = getattr(test, '__module__', None)
+ if module is not None:
+ m = sys.modules[module]
+ file = getattr(m, '__file__', None)
+ if file is not None:
+ file = os.path.abspath(file)
+ call = getattr(test, '__name__', None)
+ return (src(file), module, call)
+ if t == types.MethodType:
+ cls_adr = test_address(test.im_class)
+ return (src(cls_adr[0]), cls_adr[1],
+ "%s.%s" % (cls_adr[2], test.__name__))
+ # handle unittest.TestCase instances
+ if isinstance(test, unittest.TestCase):
+ if (hasattr(test, '_FunctionTestCase__testFunc') # pre 2.7
+ or hasattr(test, '_testFunc')): # 2.7
+ # unittest FunctionTestCase
+ try:
+ return test_address(test._FunctionTestCase__testFunc)
+ except AttributeError:
+ return test_address(test._testFunc)
+ # regular unittest.TestCase
+ cls_adr = test_address(test.__class__)
+ # 2.5 compat: __testMethodName changed to _testMethodName
+ try:
+ method_name = test._TestCase__testMethodName
+ except AttributeError:
+ method_name = test._testMethodName
+ return (src(cls_adr[0]), cls_adr[1],
+ "%s.%s" % (cls_adr[2], method_name))
+ if (hasattr(test, '__class__') and
+ test.__class__.__module__ not in ('__builtin__', 'builtins')):
+ return test_address(test.__class__)
+ raise TypeError("I don't know what %s is (%s)" % (test, t))
+test_address.__test__ = False # do not collect
+
+
+def try_run(obj, names):
+ """Given a list of possible method names, try to run them with the
+ provided object. Keep going until something works. Used to run
+ setup/teardown methods for module, package, and function tests.
+ """
+ for name in names:
+ func = getattr(obj, name, None)
+ if func is not None:
+ if type(obj) == types.ModuleType:
+ # py.test compatibility
+ if isinstance(func, types.FunctionType):
+ args, varargs, varkw, defaults = \
+ inspect.getargspec(func)
+ else:
+ # Not a function. If it's callable, call it anyway
+ if hasattr(func, '__call__') and not inspect.ismethod(func):
+ func = func.__call__
+ try:
+ args, varargs, varkw, defaults = \
+ inspect.getargspec(func)
+ args.pop(0) # pop the self off
+ except TypeError:
+ raise TypeError("Attribute %s of %r is not a python "
+ "function. Only functions or callables"
+ " may be used as fixtures." %
+ (name, obj))
+ if len(args):
+ log.debug("call fixture %s.%s(%s)", obj, name, obj)
+ return func(obj)
+ log.debug("call fixture %s.%s", obj, name)
+ return func()
+
+
+def src(filename):
+ """Find the python source file for a .pyc, .pyo or $py.class file on
+ jython. Returns the filename provided if it is not a python source
+ file.
+ """
+ if filename is None:
+ return filename
+ if sys.platform.startswith('java') and filename.endswith('$py.class'):
+ return '.'.join((filename[:-9], 'py'))
+ base, ext = os.path.splitext(filename)
+ if ext in ('.pyc', '.pyo', '.py'):
+ return '.'.join((base, 'py'))
+ return filename
+
+
+def regex_last_key(regex):
+ """Sort key function factory that puts items that match a
+ regular expression last.
+
+ >>> from nose.config import Config
+ >>> from nose.pyversion import sort_list
+ >>> c = Config()
+ >>> regex = c.testMatch
+ >>> entries = ['.', '..', 'a_test', 'src', 'lib', 'test', 'foo.py']
+ >>> sort_list(entries, regex_last_key(regex))
+ >>> entries
+ ['.', '..', 'foo.py', 'lib', 'src', 'a_test', 'test']
+ """
+ def k(obj):
+ if regex.search(obj):
+ return (1, obj)
+ return (0, obj)
+ return k
+
+
+def tolist(val):
+ """Convert a value that may be a list or a (possibly comma-separated)
+ string into a list. The exception: None is returned as None, not [None].
+
+ >>> tolist(["one", "two"])
+ ['one', 'two']
+ >>> tolist("hello")
+ ['hello']
+ >>> tolist("separate,values, with, commas, spaces , are ,ok")
+ ['separate', 'values', 'with', 'commas', 'spaces', 'are', 'ok']
+ """
+ if val is None:
+ return None
+ try:
+ # might already be a list
+ val.extend([])
+ return val
+ except AttributeError:
+ pass
+ # might be a string
+ try:
+ return re.split(r'\s*,\s*', val)
+ except TypeError:
+ # who knows...
+ return list(val)
+
+
+class odict(dict):
+ """Simple ordered dict implementation, based on:
+
+ http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
+ """
+ def __init__(self, *arg, **kw):
+ self._keys = []
+ super(odict, self).__init__(*arg, **kw)
+
+ def __delitem__(self, key):
+ super(odict, self).__delitem__(key)
+ self._keys.remove(key)
+
+ def __setitem__(self, key, item):
+ super(odict, self).__setitem__(key, item)
+ if key not in self._keys:
+ self._keys.append(key)
+
+ def __str__(self):
+ return "{%s}" % ', '.join(["%r: %r" % (k, v) for k, v in self.items()])
+
+ def clear(self):
+ super(odict, self).clear()
+ self._keys = []
+
+ def copy(self):
+ d = super(odict, self).copy()
+ d._keys = self._keys[:]
+ return d
+
+ def items(self):
+ return zip(self._keys, self.values())
+
+ def keys(self):
+ return self._keys[:]
+
+ def setdefault(self, key, failobj=None):
+ item = super(odict, self).setdefault(key, failobj)
+ if key not in self._keys:
+ self._keys.append(key)
+ return item
+
+ def update(self, dict):
+ super(odict, self).update(dict)
+ for key in dict.keys():
+ if key not in self._keys:
+ self._keys.append(key)
+
+ def values(self):
+ return map(self.get, self._keys)
+
+
+def transplant_func(func, module):
+ """
+ Make a function imported from module A appear as if it is located
+ in module B.
+
+ >>> from pprint import pprint
+ >>> pprint.__module__
+ 'pprint'
+ >>> pp = transplant_func(pprint, __name__)
+ >>> pp.__module__
+ 'nose.util'
+
+ The original function is not modified.
+
+ >>> pprint.__module__
+ 'pprint'
+
+ Calling the transplanted function calls the original.
+
+ >>> pp([1, 2])
+ [1, 2]
+ >>> pprint([1,2])
+ [1, 2]
+
+ """
+ from nose.tools import make_decorator
+ if isgenerator(func):
+ def newfunc(*arg, **kw):
+ for v in func(*arg, **kw):
+ yield v
+ else:
+ def newfunc(*arg, **kw):
+ return func(*arg, **kw)
+
+ newfunc = make_decorator(func)(newfunc)
+ newfunc.__module__ = module
+ return newfunc
+
+
+def transplant_class(cls, module):
+ """
+ Make a class appear to reside in `module`, rather than the module in which
+ it is actually defined.
+
+ >>> from nose.failure import Failure
+ >>> Failure.__module__
+ 'nose.failure'
+ >>> Nf = transplant_class(Failure, __name__)
+ >>> Nf.__module__
+ 'nose.util'
+ >>> Nf.__name__
+ 'Failure'
+
+ """
+ class C(cls):
+ pass
+ C.__module__ = module
+ C.__name__ = cls.__name__
+ return C
+
+
+def safe_str(val, encoding='utf-8'):
+ try:
+ return str(val)
+ except UnicodeEncodeError:
+ if isinstance(val, Exception):
+ return ' '.join([safe_str(arg, encoding)
+ for arg in val])
+ return unicode(val).encode(encoding)
+
+
+def is_executable(file):
+ if not os.path.exists(file):
+ return False
+ st = os.stat(file)
+ return bool(st.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/lib/spack/external/ordereddict.py b/lib/spack/external/ordereddict_backport.py
index 8ddad1477e..8ddad1477e 100644
--- a/lib/spack/external/ordereddict.py
+++ b/lib/spack/external/ordereddict_backport.py
diff --git a/lib/spack/external/pyqver2.py b/lib/spack/external/pyqver2.py
index 4a16e2811e..4690239748 100755
--- a/lib/spack/external/pyqver2.py
+++ b/lib/spack/external/pyqver2.py
@@ -30,7 +30,8 @@ import sys
StandardModules = {
"__future__": (2, 1),
"abc": (2, 6),
- "argparse": (2, 7),
+# skip argparse now that it's in lib/spack/external
+# "argparse": (2, 7),
"ast": (2, 6),
"atexit": (2, 0),
"bz2": (2, 3),
diff --git a/lib/spack/llnl/util/filesystem.py b/lib/spack/llnl/util/filesystem.py
index 03f25d3dff..70d46a7f77 100644
--- a/lib/spack/llnl/util/filesystem.py
+++ b/lib/spack/llnl/util/filesystem.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -25,9 +25,13 @@
__all__ = ['set_install_permissions', 'install', 'install_tree', 'traverse_tree',
'expand_user', 'working_dir', 'touch', 'touchp', 'mkdirp',
'force_remove', 'join_path', 'ancestor', 'can_access', 'filter_file',
- 'FileFilter', 'change_sed_delimiter', 'is_exe', 'force_symlink']
+ 'FileFilter', 'change_sed_delimiter', 'is_exe', 'force_symlink',
+ 'set_executable', 'copy_mode', 'unset_executable_mode',
+ 'remove_dead_links', 'remove_linked_tree', 'find_library_path',
+ 'fix_darwin_install_name']
import os
+import glob
import sys
import re
import shutil
@@ -36,6 +40,7 @@ import errno
import getpass
from contextlib import contextmanager, closing
from tempfile import NamedTemporaryFile
+import subprocess
import llnl.util.tty as tty
from spack.util.compression import ALLOWED_ARCHIVE_TYPES
@@ -152,15 +157,28 @@ def set_install_permissions(path):
def copy_mode(src, dest):
src_mode = os.stat(src).st_mode
dest_mode = os.stat(dest).st_mode
- if src_mode | stat.S_IXUSR: dest_mode |= stat.S_IXUSR
- if src_mode | stat.S_IXGRP: dest_mode |= stat.S_IXGRP
- if src_mode | stat.S_IXOTH: dest_mode |= stat.S_IXOTH
+ if src_mode & stat.S_IXUSR: dest_mode |= stat.S_IXUSR
+ if src_mode & stat.S_IXGRP: dest_mode |= stat.S_IXGRP
+ if src_mode & stat.S_IXOTH: dest_mode |= stat.S_IXOTH
os.chmod(dest, dest_mode)
+def unset_executable_mode(path):
+ mode = os.stat(path).st_mode
+ mode &= ~stat.S_IXUSR
+ mode &= ~stat.S_IXGRP
+ mode &= ~stat.S_IXOTH
+ os.chmod(path, mode)
+
+
def install(src, dest):
"""Manually install a file to a particular location."""
tty.debug("Installing %s to %s" % (src, dest))
+
+ # Expand dsst to its eventual full path if it is a directory.
+ if os.path.isdir(dest):
+ dest = join_path(dest, os.path.basename(src))
+
shutil.copy(src, dest)
set_install_permissions(dest)
copy_mode(src, dest)
@@ -235,7 +253,7 @@ def touchp(path):
def force_symlink(src, dest):
try:
os.symlink(src, dest)
- except OSError, e:
+ except OSError as e:
os.remove(dest)
os.symlink(src, dest)
@@ -339,3 +357,82 @@ def traverse_tree(source_root, dest_root, rel_path='', **kwargs):
if order == 'post':
yield (source_path, dest_path)
+
+
+def set_executable(path):
+ st = os.stat(path)
+ os.chmod(path, st.st_mode | stat.S_IEXEC)
+
+
+def remove_dead_links(root):
+ """
+ Removes any dead link that is present in root
+
+ Args:
+ root: path where to search for dead links
+
+ """
+ for file in os.listdir(root):
+ path = join_path(root, file)
+ if os.path.islink(path):
+ real_path = os.path.realpath(path)
+ if not os.path.exists(real_path):
+ os.unlink(path)
+
+def remove_linked_tree(path):
+ """
+ Removes a directory and its contents. If the directory is a
+ symlink, follows the link and removes the real directory before
+ removing the link.
+
+ Args:
+ path: directory to be removed
+
+ """
+ if os.path.exists(path):
+ if os.path.islink(path):
+ shutil.rmtree(os.path.realpath(path), True)
+ os.unlink(path)
+ else:
+ shutil.rmtree(path, True)
+
+
+def fix_darwin_install_name(path):
+ """
+ Fix install name of dynamic libraries on Darwin to have full path.
+ There are two parts of this task:
+ (i) use install_name('-id',...) to change install name of a single lib;
+ (ii) use install_name('-change',...) to change the cross linking between libs.
+ The function assumes that all libraries are in one folder and currently won't
+ follow subfolders.
+
+ Args:
+ path: directory in which .dylib files are alocated
+
+ """
+ libs = glob.glob(join_path(path,"*.dylib"))
+ for lib in libs:
+ # fix install name first:
+ subprocess.Popen(["install_name_tool", "-id",lib,lib], stdout=subprocess.PIPE).communicate()[0]
+ long_deps = subprocess.Popen(["otool", "-L",lib], stdout=subprocess.PIPE).communicate()[0].split('\n')
+ deps = [dep.partition(' ')[0][1::] for dep in long_deps[2:-1]]
+ # fix all dependencies:
+ for dep in deps:
+ for loc in libs:
+ if dep == os.path.basename(loc):
+ subprocess.Popen(["install_name_tool", "-change",dep,loc,lib], stdout=subprocess.PIPE).communicate()[0]
+ break
+
+
+def find_library_path(libname, *paths):
+ """Searches for a file called <libname> in each path.
+
+ Return:
+ directory where the library was found, if found. None otherwise.
+
+ """
+ for path in paths:
+ library = join_path(path, libname)
+ if os.path.exists(library):
+ return path
+ return None
diff --git a/lib/spack/llnl/util/lang.py b/lib/spack/llnl/util/lang.py
index 156ee34c9e..3b4e2c8352 100644
--- a/lib/spack/llnl/util/lang.py
+++ b/lib/spack/llnl/util/lang.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -26,6 +26,7 @@ import os
import re
import sys
import functools
+import collections
import inspect
# Ignore emacs backups when listing modules
@@ -112,11 +113,12 @@ def partition_list(elements, predicate):
def caller_locals():
"""This will return the locals of the *parent* of the caller.
- This allows a fucntion to insert variables into its caller's
+ This allows a function to insert variables into its caller's
scope. Yes, this is some black magic, and yes it's useful
for implementing things like depends_on and provides.
"""
- stack = inspect.stack()
+ # Passing zero here skips line context for speed.
+ stack = inspect.stack(0)
try:
return stack[2][0].f_locals
finally:
@@ -127,7 +129,8 @@ def get_calling_module_name():
"""Make sure that the caller is a class definition, and return the
enclosing module's name.
"""
- stack = inspect.stack()
+ # Passing zero here skips line context for speed.
+ stack = inspect.stack(0)
try:
# Make sure locals contain __module__
caller_locals = stack[2][0].f_locals
@@ -167,16 +170,32 @@ def has_method(cls, name):
return False
-def memoized(obj):
+class memoized(object):
"""Decorator that caches the results of a function, storing them
in an attribute of that function."""
- cache = obj.cache = {}
- @functools.wraps(obj)
- def memoizer(*args, **kwargs):
- if args not in cache:
- cache[args] = obj(*args, **kwargs)
- return cache[args]
- return memoizer
+ def __init__(self, func):
+ self.func = func
+ self.cache = {}
+
+
+ def __call__(self, *args):
+ if not isinstance(args, collections.Hashable):
+ # Not hashable, so just call the function.
+ return self.func(*args)
+
+ if args not in self.cache:
+ self.cache[args] = self.func(*args)
+ return self.cache[args]
+
+
+ def __get__(self, obj, objtype):
+ """Support instance methods."""
+ return functools.partial(self.__call__, obj)
+
+
+ def clear(self):
+ """Expunge cache so that self.func will be called again."""
+ self.cache.clear()
def list_modules(directory, **kwargs):
@@ -218,11 +237,11 @@ def key_ordering(cls):
if not has_method(cls, '_cmp_key'):
raise TypeError("'%s' doesn't define _cmp_key()." % cls.__name__)
- setter('__eq__', lambda s,o: o is not None and s._cmp_key() == o._cmp_key())
+ setter('__eq__', lambda s,o: (s is o) or (o is not None and s._cmp_key() == o._cmp_key()))
setter('__lt__', lambda s,o: o is not None and s._cmp_key() < o._cmp_key())
setter('__le__', lambda s,o: o is not None and s._cmp_key() <= o._cmp_key())
- setter('__ne__', lambda s,o: o is None or s._cmp_key() != o._cmp_key())
+ setter('__ne__', lambda s,o: (s is not o) and (o is None or s._cmp_key() != o._cmp_key()))
setter('__gt__', lambda s,o: o is None or s._cmp_key() > o._cmp_key())
setter('__ge__', lambda s,o: o is None or s._cmp_key() >= o._cmp_key())
diff --git a/lib/spack/llnl/util/link_tree.py b/lib/spack/llnl/util/link_tree.py
index 583f077b79..6ae8aff75c 100644
--- a/lib/spack/llnl/util/link_tree.py
+++ b/lib/spack/llnl/util/link_tree.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/llnl/util/lock.py b/lib/spack/llnl/util/lock.py
index ac3684bd55..a7a9bf6b19 100644
--- a/lib/spack/llnl/util/lock.py
+++ b/lib/spack/llnl/util/lock.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/llnl/util/tty/__init__.py b/lib/spack/llnl/util/tty/__init__.py
index 48368543ff..3ecd3a4ac2 100644
--- a/lib/spack/llnl/util/tty/__init__.py
+++ b/lib/spack/llnl/util/tty/__init__.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -63,35 +63,46 @@ def msg(message, *args):
def info(message, *args, **kwargs):
format = kwargs.get('format', '*b')
stream = kwargs.get('stream', sys.stdout)
+ wrap = kwargs.get('wrap', False)
cprint("@%s{==>} %s" % (format, cescape(str(message))), stream=stream)
for arg in args:
- lines = textwrap.wrap(
- str(arg), initial_indent=indent, subsequent_indent=indent)
- for line in lines:
- stream.write(line + '\n')
+ if wrap:
+ lines = textwrap.wrap(
+ str(arg), initial_indent=indent, subsequent_indent=indent)
+ for line in lines:
+ stream.write(line + '\n')
+ else:
+ stream.write(indent + str(arg) + '\n')
-def verbose(message, *args):
+def verbose(message, *args, **kwargs):
if _verbose:
- info(message, *args, format='c')
+ kwargs.setdefault('format', 'c')
+ info(message, *args, **kwargs)
-def debug(message, *args):
+def debug(message, *args, **kwargs):
if _debug:
- info(message, *args, format='g', stream=sys.stderr)
+ kwargs.setdefault('format', 'g')
+ kwargs.setdefault('stream', sys.stderr)
+ info(message, *args, **kwargs)
-def error(message, *args):
- info("Error: " + str(message), *args, format='*r', stream=sys.stderr)
+def error(message, *args, **kwargs):
+ kwargs.setdefault('format', '*r')
+ kwargs.setdefault('stream', sys.stderr)
+ info("Error: " + str(message), *args, **kwargs)
-def warn(message, *args):
- info("Warning: " + str(message), *args, format='*Y', stream=sys.stderr)
+def warn(message, *args, **kwargs):
+ kwargs.setdefault('format', '*Y')
+ kwargs.setdefault('stream', sys.stderr)
+ info("Warning: " + str(message), *args, **kwargs)
-def die(message, *args):
- error(message, *args)
+def die(message, *args, **kwargs):
+ error(message, *args, **kwargs)
sys.exit(1)
diff --git a/lib/spack/llnl/util/tty/colify.py b/lib/spack/llnl/util/tty/colify.py
index db928444c7..47c3cc4f8f 100644
--- a/lib/spack/llnl/util/tty/colify.py
+++ b/lib/spack/llnl/util/tty/colify.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -210,6 +210,13 @@ def colify(elts, **options):
def colify_table(table, **options):
+ """Version of colify() for data expressed in rows, (list of lists).
+
+ Same as regular colify but takes a list of lists, where each
+ sub-list must be the same length, and each is interpreted as a
+ row in a table. Regular colify displays a sequential list of
+ values in columns.
+ """
if table is None:
raise TypeError("Can't call colify_table on NoneType")
elif not table or not table[0]:
diff --git a/lib/spack/llnl/util/tty/color.py b/lib/spack/llnl/util/tty/color.py
index 0d09303da0..167a99d3c2 100644
--- a/lib/spack/llnl/util/tty/color.py
+++ b/lib/spack/llnl/util/tty/color.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/llnl/util/tty/log.py b/lib/spack/llnl/util/tty/log.py
index 5a52d45bc7..22f1087e53 100644
--- a/lib/spack/llnl/util/tty/log.py
+++ b/lib/spack/llnl/util/tty/log.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -122,6 +122,10 @@ class log_output(object):
self.force_color = force_color
self.debug = debug
+ # Default is to try file-descriptor reassignment unless the system
+ # out/err streams do not have an associated file descriptor
+ self.directAssignment = False
+
def trace(self, frame, event, arg):
"""Jumps to __exit__ on the child process."""
raise _SkipWithBlock()
@@ -185,13 +189,21 @@ class log_output(object):
# Child: redirect output, execute the with block.
os.close(read)
- # Save old stdout and stderr
- self._stdout = os.dup(sys.stdout.fileno())
- self._stderr = os.dup(sys.stderr.fileno())
-
- # redirect to the pipe.
- os.dup2(write, sys.stdout.fileno())
- os.dup2(write, sys.stderr.fileno())
+ try:
+ # Save old stdout and stderr
+ self._stdout = os.dup(sys.stdout.fileno())
+ self._stderr = os.dup(sys.stderr.fileno())
+
+ # redirect to the pipe.
+ os.dup2(write, sys.stdout.fileno())
+ os.dup2(write, sys.stderr.fileno())
+ except AttributeError:
+ self.directAssignment = True
+ self._stdout = sys.stdout
+ self._stderr = sys.stderr
+ output_redirect = os.fdopen(write, 'w')
+ sys.stdout = output_redirect
+ sys.stderr = output_redirect
if self.force_color:
color._force_color = True
@@ -218,8 +230,12 @@ class log_output(object):
#
# TODO: think about how this works outside install.
# TODO: ideally would propagate exception to parent...
- os.dup2(self._stdout, sys.stdout.fileno())
- os.dup2(self._stderr, sys.stderr.fileno())
+ if self.directAssignment:
+ sys.stdout = self._stdout
+ sys.stderr = self._stderr
+ else:
+ os.dup2(self._stdout, sys.stdout.fileno())
+ os.dup2(self._stderr, sys.stderr.fileno())
return False
diff --git a/lib/spack/spack/__init__.py b/lib/spack/spack/__init__.py
index 1ecf662178..9108e1d0e3 100644
--- a/lib/spack/spack/__init__.py
+++ b/lib/spack/spack/__init__.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -23,8 +23,11 @@
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
+import sys
import tempfile
+import getpass
from llnl.util.filesystem import *
+import llnl.util.tty as tty
# This lives in $prefix/lib/spack/spack/__file__
spack_root = ancestor(__file__, 4)
@@ -41,6 +44,7 @@ test_path = join_path(module_path, "test")
hooks_path = join_path(module_path, "hooks")
var_path = join_path(spack_root, "var", "spack")
stage_path = join_path(var_path, "stage")
+repos_path = join_path(var_path, "repos")
share_path = join_path(spack_root, "share", "spack")
prefix = spack_root
@@ -49,11 +53,14 @@ install_path = join_path(opt_path, "spack")
etc_path = join_path(prefix, "etc")
#
-# Set up the packages database.
+# Set up the default packages database.
#
-from spack.packages import PackageDB
-packages_path = join_path(var_path, "packages")
-db = PackageDB(packages_path)
+import spack.repository
+try:
+ repo = spack.repository.RepoPath()
+ sys.meta_path.append(repo)
+except spack.error.SpackError, e:
+ tty.die('while initializing Spack RepoPath:', e.message)
#
# Set up the installed packages database
@@ -62,13 +69,10 @@ from spack.database import Database
installed_db = Database(install_path)
#
-# Paths to mock files for testing.
+# Paths to built-in Spack repositories.
#
-mock_packages_path = join_path(var_path, "mock_packages")
-
-mock_config_path = join_path(var_path, "mock_configs")
-mock_site_config = join_path(mock_config_path, "site_spackconfig")
-mock_user_config = join_path(mock_config_path, "user_spackconfig")
+packages_path = join_path(repos_path, "builtin")
+mock_packages_path = join_path(repos_path, "builtin.mock")
#
# This controls how spack lays out install prefixes and
@@ -78,6 +82,20 @@ from spack.directory_layout import YamlDirectoryLayout
install_layout = YamlDirectoryLayout(install_path)
#
+# This controls how packages are sorted when trying to choose
+# the most preferred package. More preferred packages are sorted
+# first.
+#
+from spack.preferred_packages import PreferredPackages
+pkgsort = PreferredPackages()
+
+#
+# This tests ABI compatibility between packages
+#
+from spack.abi import ABI
+abi = ABI()
+
+#
# This controls how things are concretized in spack.
# Replace it with a subclass if you want different
# policies.
@@ -111,9 +129,15 @@ use_tmp_stage = True
# that it can create.
tmp_dirs = []
_default_tmp = tempfile.gettempdir()
-if _default_tmp != os.getcwd():
- tmp_dirs.append(os.path.join(_default_tmp, 'spack-stage'))
-tmp_dirs.append('/nfs/tmp2/%u/spack-stage')
+_tmp_user = getpass.getuser()
+
+_tmp_candidates = (_default_tmp, '/nfs/tmp2', '/tmp', '/var/tmp')
+for path in _tmp_candidates:
+ # don't add a second username if it's already unique by user.
+ if not _tmp_user in path:
+ tmp_dirs.append(join_path(path, '%u', 'spack-stage'))
+ else:
+ tmp_dirs.append(join_path(path, 'spack-stage'))
# Whether spack should allow installation of unsafe versions of
# software. "Unsafe" versions are ones it doesn't have a checksum
@@ -140,7 +164,7 @@ sys_type = None
# When packages call 'from spack import *', this extra stuff is brought in.
#
# Spack internal code should call 'import spack' and accesses other
-# variables (spack.db, paths, etc.) directly.
+# variables (spack.repo, paths, etc.) directly.
#
# TODO: maybe this should be separated out and should go in build_environment.py?
# TODO: it's not clear where all the stuff that needs to be included in packages
@@ -162,3 +186,10 @@ __all__ += spack.directives.__all__
import spack.util.executable
from spack.util.executable import *
__all__ += spack.util.executable.__all__
+
+from spack.package import \
+ install_dependency_symlinks, flatten_dependencies, DependencyConflictError, \
+ InstallError, ExternalPackageError
+__all__ += [
+ 'install_dependency_symlinks', 'flatten_dependencies', 'DependencyConflictError',
+ 'InstallError', 'ExternalPackageError']
diff --git a/lib/spack/spack/abi.py b/lib/spack/spack/abi.py
new file mode 100644
index 0000000000..7e565bcbf9
--- /dev/null
+++ b/lib/spack/spack/abi.py
@@ -0,0 +1,128 @@
+##############################################################################
+# Copyright (c) 2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+
+import os
+import spack
+import spack.spec
+from spack.spec import CompilerSpec
+from spack.util.executable import Executable, ProcessError
+from llnl.util.lang import memoized
+
+class ABI(object):
+ """This class provides methods to test ABI compatibility between specs.
+ The current implementation is rather rough and could be improved."""
+
+ def architecture_compatible(self, parent, child):
+ """Returns true iff the parent and child specs have ABI compatible architectures."""
+ return not parent.architecture or not child.architecture or parent.architecture == child.architecture
+
+
+ @memoized
+ def _gcc_get_libstdcxx_version(self, version):
+ """Returns gcc ABI compatibility info by getting the library version of
+ a compiler's libstdc++.so or libgcc_s.so"""
+ spec = CompilerSpec("gcc", version)
+ compilers = spack.compilers.compilers_for_spec(spec)
+ if not compilers:
+ return None
+ compiler = compilers[0]
+ rungcc = None
+ libname = None
+ output = None
+ if compiler.cxx:
+ rungcc = Executable(compiler.cxx)
+ libname = "libstdc++.so"
+ elif compiler.cc:
+ rungcc = Executable(compiler.cc)
+ libname = "libgcc_s.so"
+ else:
+ return None
+ try:
+ output = rungcc("--print-file-name=%s" % libname, return_output=True)
+ except ProcessError, e:
+ return None
+ if not output:
+ return None
+ libpath = os.readlink(output.strip())
+ if not libpath:
+ return None
+ return os.path.basename(libpath)
+
+
+ @memoized
+ def _gcc_compiler_compare(self, pversion, cversion):
+ """Returns true iff the gcc version pversion and cversion
+ are ABI compatible."""
+ plib = self._gcc_get_libstdcxx_version(pversion)
+ clib = self._gcc_get_libstdcxx_version(cversion)
+ if not plib or not clib:
+ return False
+ return plib == clib
+
+
+ def _intel_compiler_compare(self, pversion, cversion):
+ """Returns true iff the intel version pversion and cversion
+ are ABI compatible"""
+
+ # Test major and minor versions. Ignore build version.
+ if (len(pversion.version) < 2 or len(cversion.version) < 2):
+ return False
+ return pversion.version[:2] == cversion.version[:2]
+
+
+ def compiler_compatible(self, parent, child, **kwargs):
+ """Returns true iff the compilers for parent and child specs are ABI compatible"""
+ if not parent.compiler or not child.compiler:
+ return True
+
+ if parent.compiler.name != child.compiler.name:
+ # Different compiler families are assumed ABI incompatible
+ return False
+
+ if kwargs.get('loose', False):
+ return True
+
+ # TODO: Can we move the specialized ABI matching stuff
+ # TODO: into compiler classes?
+ for pversion in parent.compiler.versions:
+ for cversion in child.compiler.versions:
+ # For a few compilers use specialized comparisons. Otherwise
+ # match on version match.
+ if pversion.satisfies(cversion):
+ return True
+ elif (parent.compiler.name == "gcc" and
+ self._gcc_compiler_compare(pversion, cversion)):
+ return True
+ elif (parent.compiler.name == "intel" and
+ self._intel_compiler_compare(pversion, cversion)):
+ return True
+ return False
+
+
+ def compatible(self, parent, child, **kwargs):
+ """Returns true iff a parent and child spec are ABI compatible"""
+ loosematch = kwargs.get('loose', False)
+ return self.architecture_compatible(parent, child) and \
+ self.compiler_compatible(parent, child, loose=loosematch)
diff --git a/lib/spack/spack/architecture.py b/lib/spack/spack/architecture.py
index 6c874e30be..2701fab90c 100644
--- a/lib/spack/spack/architecture.py
+++ b/lib/spack/spack/architecture.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/build_environment.py b/lib/spack/spack/build_environment.py
index f30a5b0702..e456f11292 100644
--- a/lib/spack/spack/build_environment.py
+++ b/lib/spack/spack/build_environment.py
@@ -3,7 +3,7 @@ This module contains all routines related to setting up the package
build environment. All of this is set up by package.py just before
install() is called.
-There are two parts to the bulid environment:
+There are two parts to the build environment:
1. Python build environment (i.e. install() method)
@@ -13,7 +13,7 @@ There are two parts to the bulid environment:
the package's module scope. Ths allows package writers to call
them all directly in Package.install() without writing 'self.'
everywhere. No, this isn't Pythonic. Yes, it makes the code more
- readable and more like the shell script from whcih someone is
+ readable and more like the shell script from which someone is
likely porting.
2. Build execution environment
@@ -35,10 +35,14 @@ import platform
from llnl.util.filesystem import *
import spack
+import llnl.util.tty as tty
+from llnl.util.filesystem import *
+from spack.environment import EnvironmentModifications, validate
import spack.compilers as compilers
import spack.compiler as Compiler
from spack.util.executable import Executable, which
from spack.util.environment import *
+from spack.util.executable import Executable, which
#
# This can be set by the user to globally disable parallel builds.
@@ -58,6 +62,12 @@ SPACK_DEBUG = 'SPACK_DEBUG'
SPACK_SHORT_SPEC = 'SPACK_SHORT_SPEC'
SPACK_DEBUG_LOG_DIR = 'SPACK_DEBUG_LOG_DIR'
+
+# Platform-specific library suffix.
+dso_suffix = 'dylib' if sys.platform == 'darwin' else 'so'
+
+
+
class MakeExecutable(Executable):
"""Special callable executable object for make so the user can
specify parallel or not on a per-invocation basis. Using
@@ -83,76 +93,95 @@ class MakeExecutable(Executable):
return super(MakeExecutable, self).__call__(*args, **kwargs)
-def set_compiler_environment_variables(pkg):
- assert(pkg.spec.concrete)
+def set_compiler_environment_variables(pkg, env):
+ assert pkg.spec.concrete
compiler = pkg.compiler
flags = pkg.spec.compiler_flags
# Set compiler variables used by CMake and autotools
- os.environ['CC'] = join_path(spack.build_env_path, 'cc')
- os.environ['CXX'] = join_path(spack.build_env_path, 'c++')
- os.environ['F77'] = join_path(spack.build_env_path, 'f77')
- os.environ['FC'] = join_path(spack.build_env_path, 'f90')
+ assert all(key in pkg.compiler.link_paths for key in ('cc', 'cxx', 'f77', 'fc'))
+
+ # Populate an object with the list of environment modifications
+ # and return it
+ # TODO : add additional kwargs for better diagnostics, like requestor, ttyout, ttyerr, etc.
+ link_dir = spack.build_env_path
+ env.set('CC', join_path(link_dir, pkg.compiler.link_paths['cc']))
+ env.set('CXX', join_path(link_dir, pkg.compiler.link_paths['cxx']))
+ env.set('F77', join_path(link_dir, pkg.compiler.link_paths['f77']))
+ env.set('FC', join_path(link_dir, pkg.compiler.link_paths['fc']))
# Set SPACK compiler variables so that our wrapper knows what to call
if compiler.cc:
- os.environ['SPACK_CC'] = compiler.cc
+ env.set('SPACK_CC', compiler.cc)
if compiler.cxx:
- os.environ['SPACK_CXX'] = compiler.cxx
+ env.set('SPACK_CXX', compiler.cxx)
if compiler.f77:
- os.environ['SPACK_F77'] = compiler.f77
+ env.set('SPACK_F77', compiler.f77)
if compiler.fc:
- os.environ['SPACK_FC'] = compiler.fc
-
+ env.set('SPACK_FC', compiler.fc)
# Add every valid compiler flag to the environment, prefaced by "SPACK_"
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Concreteness guarantees key safety here
- if flags[flag] != '':
- os.environ['SPACK_'+flag.upper()] = ' '.join(f for f in flags[flag])
+ if flags[flag] != []:
+ env.set('SPACK_'+flag.upper(), ' '.join(f for f in flags[flag]))
- os.environ['SPACK_COMPILER_SPEC'] = str(pkg.spec.compiler)
+ env.set('SPACK_COMPILER_SPEC', str(pkg.spec.compiler))
+ return env
-def set_build_environment_variables(pkg):
- """This ensures a clean install environment when we build packages.
+def set_build_environment_variables(pkg, env):
+ """
+ This ensures a clean install environment when we build packages
"""
# Add spack build environment path with compiler wrappers first in
- # the path. We handle case sensitivity conflicts like "CC" and
- # "cc" by putting one in the <build_env_path>/case-insensitive
+ # the path. We add both spack.env_path, which includes default
+ # wrappers (cc, c++, f77, f90), AND a subdirectory containing
+ # compiler-specific symlinks. The latter ensures that builds that
+ # are sensitive to the *name* of the compiler see the right name
+ # when we're building with the wrappers.
+ #
+ # Conflicts on case-insensitive systems (like "CC" and "cc") are
+ # handled by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
- env_paths = [spack.build_env_path,
- join_path(spack.build_env_path, 'case-insensitive')]
- path_put_first("PATH", env_paths)
- path_set(SPACK_ENV_PATH, env_paths)
-
- # Prefixes of all of the package's dependencies go in
- # SPACK_DEPENDENCIES
+ env_paths = []
+ for item in [spack.build_env_path, join_path(spack.build_env_path, pkg.compiler.name)]:
+ env_paths.append(item)
+ ci = join_path(item, 'case-insensitive')
+ if os.path.isdir(ci):
+ env_paths.append(ci)
+
+ for item in reversed(env_paths):
+ env.prepend_path('PATH', item)
+ env.set_path(SPACK_ENV_PATH, env_paths)
+
+ # Prefixes of all of the package's dependencies go in SPACK_DEPENDENCIES
dep_prefixes = [d.prefix for d in pkg.spec.traverse(root=False)]
- path_set(SPACK_DEPENDENCIES, dep_prefixes)
+ env.set_path(SPACK_DEPENDENCIES, dep_prefixes)
+ env.set_path('CMAKE_PREFIX_PATH', dep_prefixes) # Add dependencies to CMAKE_PREFIX_PATH
# Install prefix
- os.environ[SPACK_PREFIX] = pkg.prefix
+ env.set(SPACK_PREFIX, pkg.prefix)
# Install root prefix
- os.environ[SPACK_INSTALL] = spack.install_path
+ env.set(SPACK_INSTALL, spack.install_path)
- # Remove these vars from the environment during build becaus they
+ # Remove these vars from the environment during build because they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
- pop_keys(os.environ, "LD_LIBRARY_PATH", "LD_RUN_PATH", "DYLD_LIBRARY_PATH")
+ env.unset('LD_LIBRARY_PATH')
+ env.unset('LD_RUN_PATH')
+ env.unset('DYLD_LIBRARY_PATH')
# Add bin directories from dependencies to the PATH for the build.
- bin_dirs = ['%s/bin' % prefix for prefix in dep_prefixes]
- path_put_first('PATH', [bin for bin in bin_dirs if os.path.isdir(bin)])
+ bin_dirs = reversed(filter(os.path.isdir, ['%s/bin' % prefix for prefix in dep_prefixes]))
+ for item in bin_dirs:
+ env.prepend_path('PATH', item)
# Working directory for the spack command itself, for debug logs.
if spack.debug:
- os.environ[SPACK_DEBUG] = "TRUE"
- os.environ[SPACK_SHORT_SPEC] = pkg.spec.short_spec
- os.environ[SPACK_DEBUG_LOG_DIR] = spack.spack_working_dir
-
- # Add dependencies to CMAKE_PREFIX_PATH
- path_set("CMAKE_PREFIX_PATH", dep_prefixes)
+ env.set(SPACK_DEBUG, 'TRUE')
+ env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
+ env.set(SPACK_DEBUG_LOG_DIR, spack.spack_working_dir)
# Add any pkgconfig directories to PKG_CONFIG_PATH
pkg_config_dirs = []
@@ -161,21 +190,23 @@ def set_build_environment_variables(pkg):
pcdir = join_path(p, libdir, 'pkgconfig')
if os.path.isdir(pcdir):
pkg_config_dirs.append(pcdir)
- path_set("PKG_CONFIG_PATH", pkg_config_dirs)
+ env.set_path('PKG_CONFIG_PATH', pkg_config_dirs)
+
+ return env
-def set_module_variables_for_package(pkg):
+def set_module_variables_for_package(pkg, module):
"""Populate the module scope of install() with some useful functions.
This makes things easier for package writers.
"""
- m = pkg.module
-
# number of jobs spack will to build with.
jobs = multiprocessing.cpu_count()
if not pkg.parallel:
jobs = 1
elif pkg.make_jobs:
jobs = pkg.make_jobs
+
+ m = module
m.make_jobs = jobs
# TODO: make these build deps that can be installed if not found.
@@ -193,7 +224,7 @@ def set_module_variables_for_package(pkg):
# TODO: of build dependencies, as opposed to link dependencies.
# TODO: Currently, everything is a link dependency, but tools like
# TODO: this shouldn't be.
- m.cmake = which("cmake")
+ m.cmake = Executable('cmake')
# standard CMake arguments
m.std_cmake_args = ['-DCMAKE_INSTALL_PREFIX=%s' % pkg.prefix,
@@ -205,6 +236,13 @@ def set_module_variables_for_package(pkg):
m.std_cmake_args.append('-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=FALSE')
m.std_cmake_args.append('-DCMAKE_INSTALL_RPATH=%s' % ":".join(get_rpaths(pkg)))
+ # Put spack compiler paths in module scope.
+ link_dir = spack.build_env_path
+ m.spack_cc = join_path(link_dir, pkg.compiler.link_paths['cc'])
+ m.spack_cxx = join_path(link_dir, pkg.compiler.link_paths['cxx'])
+ m.spack_f77 = join_path(link_dir, pkg.compiler.link_paths['f77'])
+ m.spack_fc = join_path(link_dir, pkg.compiler.link_paths['fc'])
+
# Emulate some shell commands for convenience
m.pwd = os.getcwd
m.cd = os.chdir
@@ -224,27 +262,82 @@ def set_module_variables_for_package(pkg):
# a Prefix object.
m.prefix = pkg.prefix
+ # Platform-specific library suffix.
+ m.dso_suffix = dso_suffix
+
def get_rpaths(pkg):
"""Get a list of all the rpaths for a package."""
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
- rpaths.extend(d.prefix.lib for d in pkg.spec.traverse(root=False)
+ rpaths.extend(d.prefix.lib for d in pkg.spec.dependencies.values()
if os.path.isdir(d.prefix.lib))
- rpaths.extend(d.prefix.lib64 for d in pkg.spec.traverse(root=False)
+ rpaths.extend(d.prefix.lib64 for d in pkg.spec.dependencies.values()
if os.path.isdir(d.prefix.lib64))
return rpaths
+def parent_class_modules(cls):
+ """Get list of super class modules that are all descend from spack.Package"""
+ if not issubclass(cls, spack.Package) or issubclass(spack.Package, cls):
+ return []
+ result = []
+ module = sys.modules.get(cls.__module__)
+ if module:
+ result = [ module ]
+ for c in cls.__bases__:
+ result.extend(parent_class_modules(c))
+ return result
+
+
def setup_package(pkg):
"""Execute all environment setup routines."""
- set_compiler_environment_variables(pkg)
- set_build_environment_variables(pkg)
- set_module_variables_for_package(pkg)
-
- # Allow dependencies to set up environment as well.
- for dep_spec in pkg.spec.traverse(root=False):
- dep_spec.package.setup_dependent_environment(
- pkg.module, dep_spec, pkg.spec)
+ spack_env = EnvironmentModifications()
+ run_env = EnvironmentModifications()
+
+ # Before proceeding, ensure that specs and packages are consistent
+ #
+ # This is a confusing behavior due to how packages are
+ # constructed. `setup_dependent_package` may set attributes on
+ # specs in the DAG for use by other packages' install
+ # method. However, spec.package will look up a package via
+ # spack.repo, which defensively copies specs into packages. This
+ # code ensures that all packages in the DAG have pieces of the
+ # same spec object at build time.
+ #
+ # This is safe for the build process, b/c the build process is a
+ # throwaway environment, but it is kind of dirty.
+ #
+ # TODO: Think about how to avoid this fix and do something cleaner.
+ for s in pkg.spec.traverse(): s.package.spec = s
+
+ set_compiler_environment_variables(pkg, spack_env)
+ set_build_environment_variables(pkg, spack_env)
+
+ # traverse in postorder so package can use vars from its dependencies
+ spec = pkg.spec
+ for dspec in pkg.spec.traverse(order='post', root=False):
+ # If a user makes their own package repo, e.g.
+ # spack.repos.mystuff.libelf.Libelf, and they inherit from
+ # an existing class like spack.repos.original.libelf.Libelf,
+ # then set the module variables for both classes so the
+ # parent class can still use them if it gets called.
+ spkg = dspec.package
+ modules = parent_class_modules(spkg.__class__)
+ for mod in modules:
+ set_module_variables_for_package(spkg, mod)
+ set_module_variables_for_package(spkg, spkg.module)
+
+ # Allow dependencies to modify the module
+ dpkg = dspec.package
+ dpkg.setup_dependent_package(pkg.module, spec)
+ dpkg.setup_dependent_environment(spack_env, run_env, spec)
+
+ set_module_variables_for_package(pkg, pkg.module)
+ pkg.setup_environment(spack_env, run_env)
+
+ # Make sure nothing's strange about the Spack environment.
+ validate(spack_env, tty.warn)
+ spack_env.apply_modifications()
def fork(pkg, function):
@@ -261,23 +354,23 @@ def fork(pkg, function):
# do stuff
build_env.fork(pkg, child_fun)
- Forked processes are run with the build environemnt set up by
+ Forked processes are run with the build environment set up by
spack.build_environment. This allows package authors to have
- full control over the environment, etc. without offecting
+ full control over the environment, etc. without affecting
other builds that might be executed in the same spack call.
- If something goes wrong, the child process is expected toprint
+ If something goes wrong, the child process is expected to print
the error and the parent process will exit with error as
well. If things go well, the child exits and the parent
carries on.
"""
try:
pid = os.fork()
- except OSError, e:
+ except OSError as e:
raise InstallError("Unable to fork build process: %s" % e)
if pid == 0:
- # Give the child process the package's build environemnt.
+ # Give the child process the package's build environment.
setup_package(pkg)
try:
@@ -288,7 +381,7 @@ def fork(pkg, function):
# which interferes with unit tests.
os._exit(0)
- except spack.error.SpackError, e:
+ except spack.error.SpackError as e:
e.die()
except:
@@ -303,8 +396,7 @@ def fork(pkg, function):
# message. Just make the parent exit with an error code.
pid, returncode = os.waitpid(pid, 0)
if returncode != 0:
- raise InstallError("Installation process had nonzero exit code."
- .format(str(returncode)))
+ raise InstallError("Installation process had nonzero exit code.".format(str(returncode)))
class InstallError(spack.error.SpackError):
diff --git a/lib/spack/spack/cmd/__init__.py b/lib/spack/spack/cmd/__init__.py
index 6ce6fa0960..6c635a1e6c 100644
--- a/lib/spack/spack/cmd/__init__.py
+++ b/lib/spack/spack/cmd/__init__.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -31,6 +31,15 @@ from llnl.util.lang import attr_setdefault
import spack
import spack.spec
+import spack.config
+
+#
+# Settings for commands that modify configuration
+#
+# Commands that modify confguration By default modify the *highest* priority scope.
+default_modify_scope = spack.config.highest_precedence_scope().name
+# Commands that list confguration list *all* scopes by default.
+default_list_scope = None
# cmd has a submodule called "list" so preserve the python list module
python_list = list
diff --git a/lib/spack/spack/cmd/activate.py b/lib/spack/spack/cmd/activate.py
index 1004f1f8e6..bcd01f2a28 100644
--- a/lib/spack/spack/cmd/activate.py
+++ b/lib/spack/spack/cmd/activate.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
import llnl.util.tty as tty
import spack
import spack.cmd
diff --git a/lib/spack/spack/cmd/arch.py b/lib/spack/spack/cmd/arch.py
index f0e88d1849..db27544ffd 100644
--- a/lib/spack/spack/cmd/arch.py
+++ b/lib/spack/spack/cmd/arch.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/cmd/bootstrap.py b/lib/spack/spack/cmd/bootstrap.py
index f75b68b00a..bdbd623b39 100644
--- a/lib/spack/spack/cmd/bootstrap.py
+++ b/lib/spack/spack/cmd/bootstrap.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -42,7 +42,7 @@ def get_origin_url():
git = which('git', required=True)
origin_url = git(
'--git-dir=%s' % git_dir, 'config', '--get', 'remote.origin.url',
- return_output=True)
+ output=str)
return origin_url.strip()
diff --git a/lib/spack/spack/cmd/cd.py b/lib/spack/spack/cmd/cd.py
index 24d56db7d0..16cbe6555a 100644
--- a/lib/spack/spack/cmd/cd.py
+++ b/lib/spack/spack/cmd/cd.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/cmd/checksum.py b/lib/spack/spack/cmd/checksum.py
index 8a448450c2..518d2703dc 100644
--- a/lib/spack/spack/cmd/checksum.py
+++ b/lib/spack/spack/cmd/checksum.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,23 +22,18 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-import os
-import re
-from external import argparse
+import argparse
import hashlib
-from pprint import pprint
-from subprocess import CalledProcessError
import llnl.util.tty as tty
-from llnl.util.tty.colify import colify
-
import spack
import spack.cmd
import spack.util.crypto
from spack.stage import Stage, FailedDownloadError
from spack.version import *
-description ="Checksum available versions of a package."
+description = "Checksum available versions of a package."
+
def setup_parser(subparser):
subparser.add_argument(
@@ -58,30 +53,28 @@ def get_checksums(versions, urls, **kwargs):
tty.msg("Downloading...")
hashes = []
- for i, (url, version) in enumerate(zip(urls, versions)):
- stage = Stage(url)
+ i = 0
+ for url, version in zip(urls, versions):
try:
- stage.fetch()
- if i == 0 and first_stage_function:
- first_stage_function(stage)
-
- hashes.append(
- spack.util.crypto.checksum(hashlib.md5, stage.archive_file))
- except FailedDownloadError, e:
+ with Stage(url, keep=keep_stage) as stage:
+ stage.fetch()
+ if i == 0 and first_stage_function:
+ first_stage_function(stage)
+
+ hashes.append((version,
+ spack.util.crypto.checksum(hashlib.md5, stage.archive_file)))
+ i += 1
+ except FailedDownloadError as e:
tty.msg("Failed to fetch %s" % url)
- continue
-
- finally:
- if not keep_stage:
- stage.destroy()
-
- return zip(versions, hashes)
+ except Exception as e:
+ tty.msg('Something failed on %s, skipping.\n (%s)' % (url, e))
+ return hashes
def checksum(parser, args):
# get the package we're going to generate checksums for
- pkg = spack.db.get(args.package)
+ pkg = spack.repo.get(args.package)
# If the user asked for specific versions, use those.
if args.versions:
@@ -95,13 +88,13 @@ def checksum(parser, args):
else:
versions = pkg.fetch_remote_versions()
if not versions:
- tty.die("Could not fetch any versions for %s." % pkg.name)
+ tty.die("Could not fetch any versions for %s" % pkg.name)
sorted_versions = sorted(versions, reverse=True)
- tty.msg("Found %s versions of %s." % (len(versions), pkg.name),
+ tty.msg("Found %s versions of %s" % (len(versions), pkg.name),
*spack.cmd.elide_list(
- ["%-10s%s" % (v, versions[v]) for v in sorted_versions]))
+ ["%-10s%s" % (v, versions[v]) for v in sorted_versions]))
print
archives_to_fetch = tty.get_number(
"How many would you like to checksum?", default=5, abort='q')
@@ -116,7 +109,7 @@ def checksum(parser, args):
keep_stage=args.keep_stage)
if not version_hashes:
- tty.die("Could not fetch any versions for %s." % pkg.name)
+ tty.die("Could not fetch any versions for %s" % pkg.name)
version_lines = [" version('%s', '%s')" % (v, h) for v, h in version_hashes]
tty.msg("Checksummed new versions of %s:" % pkg.name, *version_lines)
diff --git a/lib/spack/spack/cmd/clean.py b/lib/spack/spack/cmd/clean.py
index c20136ebe5..6e7179122c 100644
--- a/lib/spack/spack/cmd/clean.py
+++ b/lib/spack/spack/cmd/clean.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
import llnl.util.tty as tty
@@ -42,5 +42,5 @@ def clean(parser, args):
specs = spack.cmd.parse_specs(args.packages, concretize=True)
for spec in specs:
- package = spack.db.get(spec)
+ package = spack.repo.get(spec)
package.do_clean()
diff --git a/lib/spack/spack/cmd/compiler.py b/lib/spack/spack/cmd/compiler.py
index 2a64dc914e..3e58e82184 100644
--- a/lib/spack/spack/cmd/compiler.py
+++ b/lib/spack/spack/cmd/compiler.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,8 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import sys
+import argparse
import llnl.util.tty as tty
from llnl.util.tty.color import colorize
@@ -41,17 +42,32 @@ def setup_parser(subparser):
sp = subparser.add_subparsers(
metavar='SUBCOMMAND', dest='compiler_command')
- update_parser = sp.add_parser(
- 'add', help='Add compilers to the Spack configuration.')
- update_parser.add_argument('add_paths', nargs=argparse.REMAINDER)
-
- remove_parser = sp.add_parser('remove', help='remove compiler')
- remove_parser.add_argument('path')
-
- list_parser = sp.add_parser('list', help='list available compilers')
-
- info_parser = sp.add_parser('info', help='Show compiler paths.')
+ scopes = spack.config.config_scopes
+
+ # Add
+ add_parser = sp.add_parser('add', help='Add compilers to the Spack configuration.')
+ add_parser.add_argument('add_paths', nargs=argparse.REMAINDER)
+ add_parser.add_argument('--scope', choices=scopes, default=spack.cmd.default_modify_scope,
+ help="Configuration scope to modify.")
+
+ # Remove
+ remove_parser = sp.add_parser('remove', aliases=['rm'], help='Remove compiler by spec.')
+ remove_parser.add_argument(
+ '-a', '--all', action='store_true', help='Remove ALL compilers that match spec.')
+ remove_parser.add_argument('compiler_spec')
+ remove_parser.add_argument('--scope', choices=scopes, default=spack.cmd.default_modify_scope,
+ help="Configuration scope to modify.")
+
+ # List
+ list_parser = sp.add_parser('list', help='list available compilers')
+ list_parser.add_argument('--scope', choices=scopes, default=spack.cmd.default_list_scope,
+ help="Configuration scope to read from.")
+
+ # Info
+ info_parser = sp.add_parser('info', help='Show compiler paths.')
info_parser.add_argument('compiler_spec')
+ info_parser.add_argument('--scope', choices=scopes, default=spack.cmd.default_list_scope,
+ help="Configuration scope to read from.")
def compiler_add(args):
@@ -62,29 +78,43 @@ def compiler_add(args):
paths = get_path('PATH')
compilers = [c for c in spack.compilers.find_compilers(*args.add_paths)
- if c.spec not in spack.compilers.all_compilers()]
+ if c.spec not in spack.compilers.all_compilers(scope=args.scope)]
if compilers:
- spack.compilers.add_compilers_to_config('user', *compilers)
+ spack.compilers.add_compilers_to_config(compilers, scope=args.scope)
n = len(compilers)
- tty.msg("Added %d new compiler%s to %s" % (
- n, 's' if n > 1 else '', spack.config.get_config_scope_filename('user', 'compilers')))
+ s = 's' if n > 1 else ''
+ filename = spack.config.get_config_filename(args.scope, 'compilers')
+ tty.msg("Added %d new compiler%s to %s" % (n, s, filename))
colify(reversed(sorted(c.spec for c in compilers)), indent=4)
else:
tty.msg("Found no new compilers")
def compiler_remove(args):
- pass
+ cspec = CompilerSpec(args.compiler_spec)
+ compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)
+
+ if not compilers:
+ tty.die("No compilers match spec %s" % cspec)
+ elif not args.all and len(compilers) > 1:
+ tty.error("Multiple compilers match spec %s. Choose one:" % cspec)
+ colify(reversed(sorted([c.spec for c in compilers])), indent=4)
+ tty.msg("Or, you can use `spack compiler remove -a` to remove all of them.")
+ sys.exit(1)
+
+ for compiler in compilers:
+ spack.compilers.remove_compiler_from_config(compiler.spec, scope=args.scope)
+ tty.msg("Removed compiler %s" % compiler.spec)
def compiler_info(args):
"""Print info about all compilers matching a spec."""
cspec = CompilerSpec(args.compiler_spec)
- compilers = spack.compilers.compilers_for_spec(cspec)
+ compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)
if not compilers:
- tty.error("No compilers match spec %s." % cspec)
+ tty.error("No compilers match spec %s" % cspec)
else:
for c in compilers:
print str(c.spec) + ":"
@@ -96,7 +126,7 @@ def compiler_info(args):
def compiler_list(args):
tty.msg("Available compilers")
- index = index_by(spack.compilers.all_compilers(), 'name')
+ index = index_by(spack.compilers.all_compilers(scope=args.scope), 'name')
for i, (name, compilers) in enumerate(index.items()):
if i >= 1: print
@@ -108,6 +138,7 @@ def compiler_list(args):
def compiler(parser, args):
action = { 'add' : compiler_add,
'remove' : compiler_remove,
+ 'rm' : compiler_remove,
'info' : compiler_info,
'list' : compiler_list }
action[args.compiler_command](args)
diff --git a/lib/spack/spack/cmd/compilers.py b/lib/spack/spack/cmd/compilers.py
index 8d046bfd7c..7e09016f2d 100644
--- a/lib/spack/spack/cmd/compilers.py
+++ b/lib/spack/spack/cmd/compilers.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -26,9 +26,14 @@ import llnl.util.tty as tty
from llnl.util.tty.colify import colify
from llnl.util.lang import index_by
+import spack
from spack.cmd.compiler import compiler_list
description = "List available compilers. Same as 'spack compiler list'."
+def setup_parser(subparser):
+ subparser.add_argument('--scope', choices=spack.config.config_scopes,
+ help="Configuration scope to read/modify.")
+
def compilers(parser, args):
compiler_list(args)
diff --git a/lib/spack/spack/cmd/config.py b/lib/spack/spack/cmd/config.py
index 8c18f88b64..5e6d4e4d7d 100644
--- a/lib/spack/spack/cmd/config.py
+++ b/lib/spack/spack/cmd/config.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -23,7 +23,7 @@
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import sys
-from external import argparse
+import argparse
import llnl.util.tty as tty
@@ -44,22 +44,22 @@ def setup_parser(subparser):
sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='config_command')
get_parser = sp.add_parser('get', help='Print configuration values.')
- get_parser.add_argument('category', help="Configuration category to print.")
+ get_parser.add_argument('section', help="Configuration section to print.")
edit_parser = sp.add_parser('edit', help='Edit configuration file.')
- edit_parser.add_argument('category', help="Configuration category to edit")
+ edit_parser.add_argument('section', help="Configuration section to edit")
def config_get(args):
- spack.config.print_category(args.category)
+ spack.config.print_section(args.section)
def config_edit(args):
if not args.scope:
args.scope = 'user'
- if not args.category:
- args.category = None
- config_file = spack.config.get_config_scope_filename(args.scope, args.category)
+ if not args.section:
+ args.section = None
+ config_file = spack.config.get_config_filename(args.scope, args.section)
spack.editor(config_file)
diff --git a/lib/spack/spack/cmd/create.py b/lib/spack/spack/cmd/create.py
index 46e6bcec14..f0cd50b8df 100644
--- a/lib/spack/spack/cmd/create.py
+++ b/lib/spack/spack/cmd/create.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -27,16 +27,18 @@ import os
import hashlib
import re
-from external.ordereddict import OrderedDict
+from ordereddict_backport import OrderedDict
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp
import spack
import spack.cmd
import spack.cmd.checksum
-import spack.package
import spack.url
+import spack.util.web
+from spack.spec import Spec
from spack.util.naming import *
+from spack.repository import Repo, RepoError
import spack.util.crypto as crypto
from spack.util.executable import which
@@ -85,18 +87,34 @@ ${versions}
""")
+def make_version_calls(ver_hash_tuples):
+ """Adds a version() call to the package for each version found."""
+ max_len = max(len(str(v)) for v, h in ver_hash_tuples)
+ format = " version(%%-%ds, '%%s')" % (max_len + 2)
+ return '\n'.join(format % ("'%s'" % v, h) for v, h in ver_hash_tuples)
+
+
def setup_parser(subparser):
subparser.add_argument('url', nargs='?', help="url of package archive")
subparser.add_argument(
- '--keep-stage', action='store_true', dest='keep_stage',
+ '--keep-stage', action='store_true',
help="Don't clean up staging area when command completes.")
subparser.add_argument(
- '-n', '--name', dest='alternate_name', default=None,
+ '-n', '--name', dest='alternate_name', default=None, metavar='NAME',
help="Override the autodetected name for the created package.")
subparser.add_argument(
+ '-r', '--repo', default=None,
+ help="Path to a repository where the package should be created.")
+ subparser.add_argument(
+ '-N', '--namespace',
+ help="Specify a namespace for the package. Must be the namespace of "
+ "a repository registered with Spack.")
+ subparser.add_argument(
'-f', '--force', action='store_true', dest='force',
help="Overwrite any existing package file with the same name.")
+ setup_parser.subparser = subparser
+
class ConfigureGuesser(object):
def __call__(self, stage):
@@ -114,7 +132,7 @@ class ConfigureGuesser(object):
# Peek inside the tarball.
tar = which('tar')
output = tar(
- "--exclude=*/*/*", "-tf", stage.archive_file, return_output=True)
+ "--exclude=*/*/*", "-tf", stage.archive_file, output=str)
lines = output.split("\n")
# Set the configure line to the one that matched.
@@ -134,20 +152,11 @@ class ConfigureGuesser(object):
self.build_system = build_system
-def make_version_calls(ver_hash_tuples):
- """Adds a version() call to the package for each version found."""
- max_len = max(len(str(v)) for v, h in ver_hash_tuples)
- format = " version(%%-%ds, '%%s')" % (max_len + 2)
- return '\n'.join(format % ("'%s'" % v, h) for v, h in ver_hash_tuples)
-
-
-def create(parser, args):
- url = args.url
-
+def guess_name_and_version(url, args):
# Try to deduce name and version of the new package from the URL
version = spack.url.parse_version(url)
if not version:
- tty.die("Couldn't guess a version string from %s." % url)
+ tty.die("Couldn't guess a version string from %s" % url)
# Try to guess a name. If it doesn't work, allow the user to override.
if args.alternate_name:
@@ -160,13 +169,53 @@ def create(parser, args):
tty.die("Couldn't guess a name for this package. Try running:", "",
"spack create --name <name> <url>")
- if not valid_module_name(name):
+ if not valid_fully_qualified_module_name(name):
tty.die("Package name can only contain A-Z, a-z, 0-9, '_' and '-'")
- tty.msg("This looks like a URL for %s version %s." % (name, version))
- tty.msg("Creating template for package %s" % name)
+ return name, version
+
- versions = spack.package.find_versions_of_archive(url)
+def find_repository(spec, args):
+ # figure out namespace for spec
+ if spec.namespace and args.namespace and spec.namespace != args.namespace:
+ tty.die("Namespaces '%s' and '%s' do not match." % (spec.namespace, args.namespace))
+
+ if not spec.namespace and args.namespace:
+ spec.namespace = args.namespace
+
+ # Figure out where the new package should live.
+ repo_path = args.repo
+ if repo_path is not None:
+ try:
+ repo = Repo(repo_path)
+ if spec.namespace and spec.namespace != repo.namespace:
+ tty.die("Can't create package with namespace %s in repo with namespace %s"
+ % (spec.namespace, repo.namespace))
+ except RepoError as e:
+ tty.die(str(e))
+ else:
+ if spec.namespace:
+ repo = spack.repo.get_repo(spec.namespace, None)
+ if not repo:
+ tty.die("Unknown namespace: %s" % spec.namespace)
+ else:
+ repo = spack.repo.first_repo()
+
+ # Set the namespace on the spec if it's not there already
+ if not spec.namespace:
+ spec.namespace = repo.namespace
+
+ return repo
+
+
+def fetch_tarballs(url, name, version):
+ """Try to find versions of the supplied archive by scraping the web.
+
+ Prompts the user to select how many to download if many are found.
+
+
+ """
+ versions = spack.util.web.find_versions_of_archive(url)
rkeys = sorted(versions.keys(), reverse=True)
versions = OrderedDict(zip(rkeys, (versions[v] for v in rkeys)))
@@ -184,25 +233,47 @@ def create(parser, args):
default=5, abort='q')
if not archives_to_fetch:
- tty.msg("Aborted.")
- return
+ tty.die("Aborted.")
+
+ sorted_versions = sorted(versions.keys(), reverse=True)
+ sorted_urls = [versions[v] for v in sorted_versions]
+ return sorted_versions[:archives_to_fetch], sorted_urls[:archives_to_fetch]
+
+
+def create(parser, args):
+ url = args.url
+ if not url:
+ setup_parser.subparser.print_help()
+ return
+
+ # Figure out a name and repo for the package.
+ name, version = guess_name_and_version(url, args)
+ spec = Spec(name)
+ name = spec.name # factors out namespace, if any
+ repo = find_repository(spec, args)
+
+ tty.msg("This looks like a URL for %s version %s" % (name, version))
+ tty.msg("Creating template for package %s" % name)
+
+ # Fetch tarballs (prompting user if necessary)
+ versions, urls = fetch_tarballs(url, name, version)
+ # Try to guess what configure system is used.
guesser = ConfigureGuesser()
ver_hash_tuples = spack.cmd.checksum.get_checksums(
- versions.keys()[:archives_to_fetch],
- [versions[v] for v in versions.keys()[:archives_to_fetch]],
+ versions, urls,
first_stage_function=guesser,
keep_stage=args.keep_stage)
if not ver_hash_tuples:
- tty.die("Could not fetch any tarballs for %s." % name)
+ tty.die("Could not fetch any tarballs for %s" % name)
# Prepend 'py-' to python package names, by convention.
if guesser.build_system == 'python':
name = 'py-%s' % name
# Create a directory for the new package.
- pkg_path = spack.db.filename_for_package_name(name)
+ pkg_path = repo.filename_for_package_name(name)
if os.path.exists(pkg_path) and not args.force:
tty.die("%s already exists." % pkg_path)
else:
@@ -220,4 +291,4 @@ def create(parser, args):
# If everything checks out, go ahead and edit.
spack.editor(pkg_path)
- tty.msg("Created package %s." % pkg_path)
+ tty.msg("Created package %s" % pkg_path)
diff --git a/lib/spack/spack/cmd/deactivate.py b/lib/spack/spack/cmd/deactivate.py
index 1f0e303cdf..d6b23d6a08 100644
--- a/lib/spack/spack/cmd/deactivate.py
+++ b/lib/spack/spack/cmd/deactivate.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
import llnl.util.tty as tty
import spack
@@ -37,7 +37,7 @@ def setup_parser(subparser):
help="Run deactivation even if spec is NOT currently activated.")
subparser.add_argument(
'-a', '--all', action='store_true',
- help="Deactivate all extensions of an extendable pacakge, or "
+ help="Deactivate all extensions of an extendable package, or "
"deactivate an extension AND its dependencies.")
subparser.add_argument(
'spec', nargs=argparse.REMAINDER, help="spec of package extension to deactivate.")
diff --git a/lib/spack/spack/cmd/dependents.py b/lib/spack/spack/cmd/dependents.py
index 652f243b98..de76098d2f 100644
--- a/lib/spack/spack/cmd/dependents.py
+++ b/lib/spack/spack/cmd/dependents.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
import llnl.util.tty as tty
diff --git a/lib/spack/spack/cmd/diy.py b/lib/spack/spack/cmd/diy.py
index 9f8a6d39db..45f13e4463 100644
--- a/lib/spack/spack/cmd/diy.py
+++ b/lib/spack/spack/cmd/diy.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -24,7 +24,7 @@
##############################################################################
import sys
import os
-from external import argparse
+import argparse
import llnl.util.tty as tty
@@ -46,6 +46,9 @@ def setup_parser(subparser):
'--skip-patch', action='store_true',
help="Skip patching for the DIY build.")
subparser.add_argument(
+ '-q', '--quiet', action='store_true', dest='quiet',
+ help="Do not display verbose build output while installing.")
+ subparser.add_argument(
'spec', nargs=argparse.REMAINDER,
help="specs to use for install. Must contain package AND verison.")
@@ -61,7 +64,7 @@ def diy(self, args):
# Take a write lock before checking for existence.
with spack.installed_db.write_transaction():
spec = specs[0]
- if not spack.db.exists(spec.name):
+ if not spack.repo.exists(spec.name):
tty.warn("No such package: %s" % spec.name)
create = tty.get_yes_or_no("Create this package?", default=False)
if not create:
@@ -69,14 +72,14 @@ def diy(self, args):
sys.exit(1)
else:
tty.msg("Running 'spack edit -f %s'" % spec.name)
- edit_package(spec.name, True)
+ edit_package(spec.name, spack.repo.first_repo(), None, True)
return
- if not spec.version.concrete:
- tty.die("spack diy spec must have a single, concrete version.")
+ if not spec.versions.concrete:
+ tty.die("spack diy spec must have a single, concrete version. Did you forget a package version number?")
spec.concretize()
- package = spack.db.get(spec)
+ package = spack.repo.get(spec)
if package.installed:
tty.error("Already installed in %s" % package.prefix)
@@ -92,4 +95,5 @@ def diy(self, args):
package.do_install(
keep_prefix=args.keep_prefix,
ignore_deps=args.ignore_deps,
+ verbose=not args.quiet,
keep_stage=True) # don't remove source dir for DIY.
diff --git a/lib/spack/spack/cmd/doc.py b/lib/spack/spack/cmd/doc.py
index 601ae26e5e..29cadec94f 100644
--- a/lib/spack/spack/cmd/doc.py
+++ b/lib/spack/spack/cmd/doc.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/cmd/edit.py b/lib/spack/spack/cmd/edit.py
index b8764ba391..a20e40df9b 100644
--- a/lib/spack/spack/cmd/edit.py
+++ b/lib/spack/spack/cmd/edit.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -30,6 +30,8 @@ from llnl.util.filesystem import mkdirp, join_path
import spack
import spack.cmd
+from spack.spec import Spec
+from spack.repository import Repo
from spack.util.naming import mod_to_class
description = "Open package files in $EDITOR"
@@ -53,9 +55,16 @@ class ${class_name}(Package):
""")
-def edit_package(name, force=False):
- path = spack.db.filename_for_package_name(name)
+def edit_package(name, repo_path, namespace, force=False):
+ if repo_path:
+ repo = Repo(repo_path)
+ elif namespace:
+ repo = spack.repo.get_repo(namespace)
+ else:
+ repo = spack.repo
+ path = repo.filename_for_package_name(name)
+ spec = Spec(name)
if os.path.exists(path):
if not os.path.isfile(path):
tty.die("Something's wrong. '%s' is not a file!" % path)
@@ -63,13 +72,13 @@ def edit_package(name, force=False):
tty.die("Insufficient permissions on '%s'!" % path)
elif not force:
tty.die("No package '%s'. Use spack create, or supply -f/--force "
- "to edit a new file." % name)
+ "to edit a new file." % spec.name)
else:
mkdirp(os.path.dirname(path))
with open(path, "w") as pkg_file:
pkg_file.write(
package_template.substitute(
- name=name, class_name=mod_to_class(name)))
+ name=spec.name, class_name=mod_to_class(spec.name)))
spack.editor(path)
@@ -78,9 +87,26 @@ def setup_parser(subparser):
subparser.add_argument(
'-f', '--force', dest='force', action='store_true',
help="Open a new file in $EDITOR even if package doesn't exist.")
- subparser.add_argument(
- '-c', '--command', dest='edit_command', action='store_true',
- help="Edit the command with the supplied name instead of a package.")
+
+ excl_args = subparser.add_mutually_exclusive_group()
+
+ # Various filetypes you can edit directly from the cmd line.
+ excl_args.add_argument(
+ '-c', '--command', dest='path', action='store_const',
+ const=spack.cmd.command_path, help="Edit the command with the supplied name.")
+ excl_args.add_argument(
+ '-t', '--test', dest='path', action='store_const',
+ const=spack.test_path, help="Edit the test with the supplied name.")
+ excl_args.add_argument(
+ '-m', '--module', dest='path', action='store_const',
+ const=spack.module_path, help="Edit the main spack module with the supplied name.")
+
+ # Options for editing packages
+ excl_args.add_argument(
+ '-r', '--repo', default=None, help="Path to repo to edit package in.")
+ excl_args.add_argument(
+ '-N', '--namespace', default=None, help="Namespace of package to edit.")
+
subparser.add_argument(
'name', nargs='?', default=None, help="name of package to edit")
@@ -88,19 +114,17 @@ def setup_parser(subparser):
def edit(parser, args):
name = args.name
- if args.edit_command:
- if not name:
- path = spack.cmd.command_path
- else:
- path = join_path(spack.cmd.command_path, name + ".py")
- if not os.path.exists(path):
+ path = spack.packages_path
+ if args.path:
+ path = args.path
+ if name:
+ path = join_path(path, name + ".py")
+ if not args.force and not os.path.exists(path):
tty.die("No command named '%s'." % name)
spack.editor(path)
+ elif name:
+ edit_package(name, args.repo, args.namespace, args.force)
else:
# By default open the directory where packages or commands live.
- if not name:
- path = spack.packages_path
- spack.editor(path)
- else:
- edit_package(name, args.force)
+ spack.editor(path)
diff --git a/lib/spack/spack/cmd/env.py b/lib/spack/spack/cmd/env.py
index bde76b5daf..525e955a00 100644
--- a/lib/spack/spack/cmd/env.py
+++ b/lib/spack/spack/cmd/env.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -23,7 +23,7 @@
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
-from external import argparse
+import argparse
import llnl.util.tty as tty
import spack.cmd
import spack.build_environment as build_env
diff --git a/lib/spack/spack/cmd/extensions.py b/lib/spack/spack/cmd/extensions.py
index 7cadc424b0..ccb0fe4e1f 100644
--- a/lib/spack/spack/cmd/extensions.py
+++ b/lib/spack/spack/cmd/extensions.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -23,7 +23,7 @@
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import sys
-from external import argparse
+import argparse
import llnl.util.tty as tty
from llnl.util.tty.colify import colify
@@ -37,7 +37,7 @@ description = "List extensions for package."
def setup_parser(subparser):
format_group = subparser.add_mutually_exclusive_group()
format_group.add_argument(
- '-l', '--long', action='store_const', dest='mode', const='long',
+ '-l', '--long', action='store_true', dest='long',
help='Show dependency hashes as well as versions.')
format_group.add_argument(
'-p', '--paths', action='store_const', dest='mode', const='paths',
@@ -54,7 +54,9 @@ def extensions(parser, args):
if not args.spec:
tty.die("extensions requires a package spec.")
+ #
# Checks
+ #
spec = spack.cmd.parse_specs(args.spec)
if len(spec) > 1:
tty.die("Can only list extensions for one package.")
@@ -70,8 +72,9 @@ def extensions(parser, args):
if not args.mode:
args.mode = 'short'
+ #
# List package names of extensions
- extensions = spack.db.extensions_for(spec)
+ extensions = spack.repo.extensions_for(spec)
if not extensions:
tty.msg("%s has no extensions." % spec.cshort_spec)
return
@@ -79,7 +82,9 @@ def extensions(parser, args):
tty.msg("%d extensions:" % len(extensions))
colify(ext.name for ext in extensions)
+ #
# List specs of installed extensions.
+ #
installed = [s.spec for s in spack.installed_db.installed_extensions_for(spec)]
print
if not installed:
@@ -88,11 +93,13 @@ def extensions(parser, args):
tty.msg("%d installed:" % len(installed))
spack.cmd.find.display_specs(installed, mode=args.mode)
+ #
# List specs of activated extensions.
+ #
activated = spack.install_layout.extension_map(spec)
print
if not activated:
tty.msg("None activated.")
return
tty.msg("%d currently activated:" % len(activated))
- spack.cmd.find.display_specs(activated.values(), mode=args.mode)
+ spack.cmd.find.display_specs(activated.values(), mode=args.mode, long=args.long)
diff --git a/lib/spack/spack/cmd/fetch.py b/lib/spack/spack/cmd/fetch.py
index 0ccebd9486..adad545cae 100644
--- a/lib/spack/spack/cmd/fetch.py
+++ b/lib/spack/spack/cmd/fetch.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
import spack
import spack.cmd
@@ -34,9 +34,12 @@ def setup_parser(subparser):
'-n', '--no-checksum', action='store_true', dest='no_checksum',
help="Do not check packages against checksum")
subparser.add_argument(
+ '-m', '--missing', action='store_true', help="Also fetch all missing dependencies")
+ subparser.add_argument(
+ '-D', '--dependencies', action='store_true', help="Also fetch all dependencies")
+ subparser.add_argument(
'packages', nargs=argparse.REMAINDER, help="specs of packages to fetch")
-
def fetch(parser, args):
if not args.packages:
tty.die("fetch requires at least one package argument")
@@ -46,5 +49,13 @@ def fetch(parser, args):
specs = spack.cmd.parse_specs(args.packages, concretize=True)
for spec in specs:
- package = spack.db.get(spec)
+ if args.missing or args.dependencies:
+ to_fetch = set()
+ for s in spec.traverse():
+ package = spack.repo.get(s)
+ if args.missing and package.installed:
+ continue
+ package.do_fetch()
+
+ package = spack.repo.get(spec)
package.do_fetch()
diff --git a/lib/spack/spack/cmd/find.py b/lib/spack/spack/cmd/find.py
index 6697e52de2..ef582a1680 100644
--- a/lib/spack/spack/cmd/find.py
+++ b/lib/spack/spack/cmd/find.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -25,7 +25,7 @@
import sys
import collections
import itertools
-from external import argparse
+import argparse
from StringIO import StringIO
import llnl.util.tty as tty
@@ -41,6 +41,9 @@ description ="Find installed spack packages"
def setup_parser(subparser):
format_group = subparser.add_mutually_exclusive_group()
format_group.add_argument(
+ '-s', '--short', action='store_const', dest='mode', const='short',
+ help='Show only specs (default)')
+ format_group.add_argument(
'-p', '--paths', action='store_const', dest='mode', const='paths',
help='Show paths to package install directories')
format_group.add_argument(
@@ -66,6 +69,9 @@ def setup_parser(subparser):
subparser.add_argument(
'-M', '--only-missing', action='store_true', dest='only_missing',
help='Show only missing dependencies.')
+ subparser.add_argument(
+ '-N', '--namespace', action='store_true',
+ help='Show fully qualified package names.')
subparser.add_argument(
'query_specs', nargs=argparse.REMAINDER,
@@ -79,16 +85,18 @@ def gray_hash(spec, length):
def display_specs(specs, **kwargs):
mode = kwargs.get('mode', 'short')
hashes = kwargs.get('long', False)
+ namespace = kwargs.get('namespace', False)
hlen = 7
if kwargs.get('very_long', False):
hashes = True
hlen = None
- format_string = '$_$@$+'
+ nfmt = '.' if namespace else '_'
+ format_string = '$%s$@$+' %nfmt
flags = kwargs.get('show_flags', False)
if flags:
- format_string = '$_$@$%+$+'
+ format_string = '$.$@$%+$+' if nfmt == '.' else '$_$@$%+$+'
# Make a dict with specs keyed by architecture and compiler.
index = index_by(specs, ('architecture', 'compiler'))
@@ -132,7 +140,7 @@ def display_specs(specs, **kwargs):
string = ""
if hashes:
string += gray_hash(s, hlen) + ' '
- string += s.format('$-_$@$+', color=True)
+ string += s.format('$-%s$@$+' % nfmt, color=True)
return string
colify(fmt(s) for s in specs)
@@ -153,7 +161,7 @@ def find(parser, args):
# Filter out specs that don't exist.
query_specs = spack.cmd.parse_specs(args.query_specs)
query_specs, nonexisting = partition_list(
- query_specs, lambda s: spack.db.exists(s.name) or s.name == "")
+ query_specs, lambda s: spack.repo.exists(s.name) or s.name == "")
if nonexisting:
msg = "No such package%s: " % ('s' if len(nonexisting) > 1 else '')
diff --git a/lib/spack/spack/cmd/graph.py b/lib/spack/spack/cmd/graph.py
index cb93a1b543..586a852351 100644
--- a/lib/spack/spack/cmd/graph.py
+++ b/lib/spack/spack/cmd/graph.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
import spack
import spack.cmd
diff --git a/lib/spack/spack/cmd/help.py b/lib/spack/spack/cmd/help.py
index eae3aabd97..841a0d5bcb 100644
--- a/lib/spack/spack/cmd/help.py
+++ b/lib/spack/spack/cmd/help.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/cmd/info.py b/lib/spack/spack/cmd/info.py
index 085e4db44d..c93db55c63 100644
--- a/lib/spack/spack/cmd/info.py
+++ b/lib/spack/spack/cmd/info.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -52,7 +52,7 @@ def print_text_info(pkg):
print "Safe versions: "
if not pkg.versions:
- print("None")
+ print(" None")
else:
pad = padder(pkg.versions, 4)
for v in reversed(sorted(pkg.versions)):
@@ -62,7 +62,7 @@ def print_text_info(pkg):
print
print "Variants:"
if not pkg.variants:
- print "None"
+ print " None"
else:
pad = padder(pkg.variants, 4)
@@ -105,5 +105,5 @@ def print_text_info(pkg):
def info(parser, args):
- pkg = spack.db.get(args.name)
+ pkg = spack.repo.get(args.name)
print_text_info(pkg)
diff --git a/lib/spack/spack/cmd/install.py b/lib/spack/spack/cmd/install.py
index 836a6260c8..1fef750c80 100644
--- a/lib/spack/spack/cmd/install.py
+++ b/lib/spack/spack/cmd/install.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
import llnl.util.tty as tty
@@ -70,7 +70,7 @@ def install(parser, args):
specs = spack.cmd.parse_specs(args.packages, concretize=True)
for spec in specs:
- package = spack.db.get(spec)
+ package = spack.repo.get(spec)
with spack.installed_db.write_transaction():
package.do_install(
keep_prefix=args.keep_prefix,
diff --git a/lib/spack/spack/cmd/list.py b/lib/spack/spack/cmd/list.py
index 1f0978a18e..7c50ccb9cd 100644
--- a/lib/spack/spack/cmd/list.py
+++ b/lib/spack/spack/cmd/list.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -24,7 +24,7 @@
##############################################################################
import sys
import llnl.util.tty as tty
-from external import argparse
+import argparse
from llnl.util.tty.colify import colify
import spack
@@ -43,7 +43,7 @@ def setup_parser(subparser):
def list(parser, args):
# Start with all package names.
- pkgs = spack.db.all_package_names()
+ pkgs = spack.repo.all_package_names()
# filter if a filter arg was provided
if args.filter:
diff --git a/lib/spack/spack/cmd/load.py b/lib/spack/spack/cmd/load.py
index 06574d9725..30d86c3b01 100644
--- a/lib/spack/spack/cmd/load.py
+++ b/lib/spack/spack/cmd/load.py
@@ -6,7 +6,7 @@
# Written by David Beckingsale, david@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
import spack.modules
description ="Add package to environment using modules."
diff --git a/lib/spack/spack/cmd/location.py b/lib/spack/spack/cmd/location.py
index e8e9c3f277..307ee8982d 100644
--- a/lib/spack/spack/cmd/location.py
+++ b/lib/spack/spack/cmd/location.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -24,7 +24,7 @@
##############################################################################
import os
import sys
-from external import argparse
+import argparse
import llnl.util.tty as tty
from llnl.util.filesystem import join_path
@@ -32,7 +32,7 @@ from llnl.util.filesystem import join_path
import spack
import spack.cmd
-description="Print out locations of various diectories used by Spack"
+description="Print out locations of various directories used by Spack"
def setup_parser(subparser):
global directories
@@ -72,7 +72,7 @@ def location(parser, args):
print spack.prefix
elif args.packages:
- print spack.db.root
+ print spack.repo.root
elif args.stages:
print spack.stage_path
@@ -94,12 +94,12 @@ def location(parser, args):
if args.package_dir:
# This one just needs the spec name.
- print join_path(spack.db.root, spec.name)
+ print join_path(spack.repo.root, spec.name)
else:
# These versions need concretized specs.
spec.concretize()
- pkg = spack.db.get(spec)
+ pkg = spack.repo.get(spec)
if args.stage_dir:
print pkg.stage.path
diff --git a/lib/spack/spack/cmd/md5.py b/lib/spack/spack/cmd/md5.py
index dfa1be412b..f99fc0f8c2 100644
--- a/lib/spack/spack/cmd/md5.py
+++ b/lib/spack/spack/cmd/md5.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,32 +22,51 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-import os
+import argparse
import hashlib
-from external import argparse
+import os
import llnl.util.tty as tty
-from llnl.util.filesystem import *
-
import spack.util.crypto
+from spack.stage import Stage, FailedDownloadError
+
+description = "Calculate md5 checksums for files/urls."
-description = "Calculate md5 checksums for files."
def setup_parser(subparser):
setup_parser.parser = subparser
subparser.add_argument('files', nargs=argparse.REMAINDER,
help="Files to checksum.")
+
+def compute_md5_checksum(url):
+ if not os.path.isfile(url):
+ with Stage(url) as stage:
+ stage.fetch()
+ value = spack.util.crypto.checksum(hashlib.md5, stage.archive_file)
+ else:
+ value = spack.util.crypto.checksum(hashlib.md5, url)
+ return value
+
+
def md5(parser, args):
if not args.files:
setup_parser.parser.print_help()
return 1
- for f in args.files:
- if not os.path.isfile(f):
- tty.die("Not a file: %s" % f)
- if not can_access(f):
- tty.die("Cannot read file: %s" % f)
+ results = []
+ for url in args.files:
+ try:
+ checksum = compute_md5_checksum(url)
+ results.append((checksum, url))
+ except FailedDownloadError as e:
+ tty.warn("Failed to fetch %s" % url)
+ tty.warn("%s" % e)
+ except IOError as e:
+ tty.warn("Error when reading %s" % url)
+ tty.warn("%s" % e)
- checksum = spack.util.crypto.checksum(hashlib.md5, f)
- print "%s %s" % (checksum, f)
+ # Dump the MD5s at last without interleaving them with downloads
+ tty.msg("%d MD5 checksums:" % len(results))
+ for checksum, url in results:
+ print "%s %s" % (checksum, url)
diff --git a/lib/spack/spack/cmd/mirror.py b/lib/spack/spack/cmd/mirror.py
index 2356170a9a..fcd15a6a90 100644
--- a/lib/spack/spack/cmd/mirror.py
+++ b/lib/spack/spack/cmd/mirror.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -26,7 +26,7 @@ import os
import sys
from datetime import datetime
-from external import argparse
+import argparse
import llnl.util.tty as tty
from llnl.util.tty.colify import colify
@@ -36,6 +36,7 @@ import spack.config
import spack.mirror
from spack.spec import Spec
from spack.error import SpackError
+from spack.util.spack_yaml import syaml_dict
description = "Manage mirrors."
@@ -47,26 +48,46 @@ def setup_parser(subparser):
sp = subparser.add_subparsers(
metavar='SUBCOMMAND', dest='mirror_command')
+ # Create
create_parser = sp.add_parser('create', help=mirror_create.__doc__)
create_parser.add_argument('-d', '--directory', default=None,
help="Directory in which to create mirror.")
create_parser.add_argument(
- 'specs', nargs=argparse.REMAINDER, help="Specs of packages to put in mirror")
+ 'specs', nargs=argparse.REMAINDER,
+ help="Specs of packages to put in mirror")
create_parser.add_argument(
'-f', '--file', help="File with specs of packages to put in mirror.")
create_parser.add_argument(
+ '-D', '--dependencies', action='store_true',
+ help="Also fetch all dependencies")
+ create_parser.add_argument(
'-o', '--one-version-per-spec', action='store_const', const=1, default=0,
help="Only fetch one 'preferred' version per spec, not all known versions.")
+ scopes = spack.config.config_scopes
+
+ # Add
add_parser = sp.add_parser('add', help=mirror_add.__doc__)
add_parser.add_argument('name', help="Mnemonic name for mirror.")
add_parser.add_argument(
'url', help="URL of mirror directory created by 'spack mirror create'.")
+ add_parser.add_argument(
+ '--scope', choices=scopes, default=spack.cmd.default_modify_scope,
+ help="Configuration scope to modify.")
- remove_parser = sp.add_parser('remove', help=mirror_remove.__doc__)
+ # Remove
+ remove_parser = sp.add_parser('remove', aliases=['rm'],
+ help=mirror_remove.__doc__)
remove_parser.add_argument('name')
+ remove_parser.add_argument(
+ '--scope', choices=scopes, default=spack.cmd.default_modify_scope,
+ help="Configuration scope to modify.")
+ # List
list_parser = sp.add_parser('list', help=mirror_list.__doc__)
+ list_parser.add_argument(
+ '--scope', choices=scopes, default=spack.cmd.default_list_scope,
+ help="Configuration scope to read from.")
def mirror_add(args):
@@ -75,50 +96,72 @@ def mirror_add(args):
if url.startswith('/'):
url = 'file://' + url
- mirror_dict = { args.name : url }
- spack.config.add_to_mirror_config({ args.name : url })
+ mirrors = spack.config.get_config('mirrors', scope=args.scope)
+ if not mirrors:
+ mirrors = syaml_dict()
+
+ for name, u in mirrors.items():
+ if name == args.name:
+ tty.die("Mirror with name %s already exists." % name)
+ if u == url:
+ tty.die("Mirror with url %s already exists." % url)
+ # should only be one item per mirror dict.
+
+ items = [(n,u) for n,u in mirrors.items()]
+ items.insert(0, (args.name, url))
+ mirrors = syaml_dict(items)
+ spack.config.update_config('mirrors', mirrors, scope=args.scope)
def mirror_remove(args):
"""Remove a mirror by name."""
name = args.name
- rmd_something = spack.config.remove_from_config('mirrors', name)
- if not rmd_something:
- tty.die("No such mirror: %s" % name)
+ mirrors = spack.config.get_config('mirrors', scope=args.scope)
+ if not mirrors:
+ mirrors = syaml_dict()
+
+ if not name in mirrors:
+ tty.die("No mirror with name %s" % name)
+
+ old_value = mirrors.pop(name)
+ spack.config.update_config('mirrors', mirrors, scope=args.scope)
+ tty.msg("Removed mirror %s with url %s" % (name, old_value))
def mirror_list(args):
"""Print out available mirrors to the console."""
- sec_names = spack.config.get_mirror_config()
- if not sec_names:
+ mirrors = spack.config.get_config('mirrors', scope=args.scope)
+ if not mirrors:
tty.msg("No mirrors configured.")
return
- max_len = max(len(s) for s in sec_names)
+ max_len = max(len(n) for n in mirrors.keys())
fmt = "%%-%ds%%s" % (max_len + 4)
- for name, val in sec_names.iteritems():
- print fmt % (name, val)
+ for name in mirrors:
+ print fmt % (name, mirrors[name])
def _read_specs_from_file(filename):
+ specs = []
with open(filename, "r") as stream:
for i, string in enumerate(stream):
try:
s = Spec(string)
s.package
- args.specs.append(s)
+ specs.append(s)
except SpackError, e:
tty.die("Parse error in %s, line %d:" % (args.file, i+1),
">>> " + string, str(e))
+ return specs
def mirror_create(args):
"""Create a directory to be used as a spack mirror, and fill it with
package archives."""
# try to parse specs from the command line first.
- specs = spack.cmd.parse_specs(args.specs)
+ specs = spack.cmd.parse_specs(args.specs, concretize=True)
# If there is a file, parse each line as a spec and add it to the list.
if args.file:
@@ -128,9 +171,18 @@ def mirror_create(args):
# If nothing is passed, use all packages.
if not specs:
- specs = [Spec(n) for n in spack.db.all_package_names()]
+ specs = [Spec(n) for n in spack.repo.all_package_names()]
specs.sort(key=lambda s: s.format("$_$@").lower())
+ # If the user asked for dependencies, traverse spec DAG get them.
+ if args.dependencies:
+ new_specs = set()
+ for spec in specs:
+ spec.concretize()
+ for s in spec.traverse():
+ new_specs.add(s)
+ specs = list(new_specs)
+
# Default name for directory is spack-mirror-<DATESTAMP>
directory = args.directory
if not directory:
@@ -151,7 +203,7 @@ def mirror_create(args):
verb = "updated" if existed else "created"
tty.msg(
- "Successfully %s mirror in %s." % (verb, directory),
+ "Successfully %s mirror in %s" % (verb, directory),
"Archive stats:",
" %-4d already present" % p,
" %-4d added" % m,
@@ -165,6 +217,7 @@ def mirror(parser, args):
action = { 'create' : mirror_create,
'add' : mirror_add,
'remove' : mirror_remove,
+ 'rm' : mirror_remove,
'list' : mirror_list }
action[args.mirror_command](args)
diff --git a/lib/spack/spack/cmd/module.py b/lib/spack/spack/cmd/module.py
index 654b0cb2fa..a67f5c0c13 100644
--- a/lib/spack/spack/cmd/module.py
+++ b/lib/spack/spack/cmd/module.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,21 +22,16 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-import sys
import os
import shutil
-from external import argparse
+import sys
import llnl.util.tty as tty
-from llnl.util.lang import partition_list
-from llnl.util.filesystem import mkdirp
-
import spack.cmd
+from llnl.util.filesystem import mkdirp
from spack.modules import module_types
from spack.util.string import *
-from spack.spec import Spec
-
description ="Manipulate modules and dotkits."
@@ -58,7 +53,7 @@ def module_find(mtype, spec_array):
should type to use that package's module.
"""
if mtype not in module_types:
- tty.die("Invalid module type: '%s'. Options are %s." % (mtype, comma_or(module_types)))
+ tty.die("Invalid module type: '%s'. Options are %s" % (mtype, comma_or(module_types)))
specs = spack.cmd.parse_specs(spec_array)
if len(specs) > 1:
@@ -78,9 +73,9 @@ def module_find(mtype, spec_array):
mt = module_types[mtype]
mod = mt(specs[0])
if not os.path.isfile(mod.file_name):
- tty.die("No %s module is installed for %s." % (mtype, spec))
+ tty.die("No %s module is installed for %s" % (mtype, spec))
- print mod.use_name
+ print(mod.use_name)
def module_refresh():
@@ -94,11 +89,10 @@ def module_refresh():
shutil.rmtree(cls.path, ignore_errors=False)
mkdirp(cls.path)
for spec in specs:
- tty.debug(" Writing file for %s." % spec)
+ tty.debug(" Writing file for %s" % spec)
cls(spec).write()
-
def module(parser, args):
if args.module_command == 'refresh':
module_refresh()
diff --git a/lib/spack/spack/cmd/package-list.py b/lib/spack/spack/cmd/package-list.py
index f048482845..5e37d5c16b 100644
--- a/lib/spack/spack/cmd/package-list.py
+++ b/lib/spack/spack/cmd/package-list.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -34,7 +34,7 @@ description = "Print a list of all packages in reStructuredText."
def github_url(pkg):
"""Link to a package file on github."""
- return ("https://github.com/scalability-llnl/spack/blob/master/var/spack/packages/%s/package.py" %
+ return ("https://github.com/llnl/spack/blob/master/var/spack/packages/%s/package.py" %
pkg.name)
@@ -48,7 +48,7 @@ def rst_table(elts):
def print_rst_package_list():
"""Print out information on all packages in restructured text."""
- pkgs = sorted(spack.db.all_packages(), key=lambda s:s.name.lower())
+ pkgs = sorted(spack.repo.all_packages(), key=lambda s:s.name.lower())
print ".. _package-list:"
print
diff --git a/lib/spack/spack/cmd/patch.py b/lib/spack/spack/cmd/patch.py
index a6556c4828..b04b402738 100644
--- a/lib/spack/spack/cmd/patch.py
+++ b/lib/spack/spack/cmd/patch.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,8 +22,9 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
+import llnl.util.tty as tty
import spack.cmd
import spack
@@ -47,5 +48,5 @@ def patch(parser, args):
specs = spack.cmd.parse_specs(args.packages, concretize=True)
for spec in specs:
- package = spack.db.get(spec)
+ package = spack.repo.get(spec)
package.do_patch()
diff --git a/lib/spack/spack/cmd/pkg.py b/lib/spack/spack/cmd/pkg.py
index 055b7c2062..20a3fc5fc2 100644
--- a/lib/spack/spack/cmd/pkg.py
+++ b/lib/spack/spack/cmd/pkg.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -24,7 +24,7 @@
##############################################################################
import os
-from external import argparse
+import argparse
import llnl.util.tty as tty
from llnl.util.tty.colify import colify
@@ -77,15 +77,16 @@ def get_git():
def list_packages(rev):
git = get_git()
- relpath = spack.packages_path[len(spack.prefix + os.path.sep):] + os.path.sep
+ pkgpath = os.path.join(spack.packages_path, 'packages')
+ relpath = pkgpath[len(spack.prefix + os.path.sep):] + os.path.sep
output = git('ls-tree', '--full-tree', '--name-only', rev, relpath,
- return_output=True)
+ output=str)
return sorted(line[len(relpath):] for line in output.split('\n') if line)
def pkg_add(args):
for pkg_name in args.packages:
- filename = spack.db.filename_for_package_name(pkg_name)
+ filename = spack.repo.filename_for_package_name(pkg_name)
if not os.path.isfile(filename):
tty.die("No such package: %s. Path does not exist:" % pkg_name, filename)
diff --git a/lib/spack/spack/cmd/providers.py b/lib/spack/spack/cmd/providers.py
index 2bcdc9fba2..49d6ac192a 100644
--- a/lib/spack/spack/cmd/providers.py
+++ b/lib/spack/spack/cmd/providers.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -23,7 +23,7 @@
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
-from external import argparse
+import argparse
from llnl.util.tty.colify import colify
@@ -39,4 +39,4 @@ def setup_parser(subparser):
def providers(parser, args):
for spec in spack.cmd.parse_specs(args.vpkg_spec):
- colify(sorted(spack.db.providers_for(spec)), indent=4)
+ colify(sorted(spack.repo.providers_for(spec)), indent=4)
diff --git a/lib/spack/spack/cmd/purge.py b/lib/spack/spack/cmd/purge.py
index 9b96937149..d5d7513c46 100644
--- a/lib/spack/spack/cmd/purge.py
+++ b/lib/spack/spack/cmd/purge.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/cmd/python.py b/lib/spack/spack/cmd/python.py
index 7bd2e45ce0..5325e8fd9a 100644
--- a/lib/spack/spack/cmd/python.py
+++ b/lib/spack/spack/cmd/python.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -25,20 +25,23 @@
import os
import sys
import code
-from external import argparse
+import argparse
import platform
import spack
def setup_parser(subparser):
subparser.add_argument(
+ '-c', dest='python_command', help='Command to execute.')
+ subparser.add_argument(
'python_args', nargs=argparse.REMAINDER, help="File to run plus arguments.")
description = "Launch an interpreter as spack would launch a command"
def python(parser, args):
# Fake a main python shell by setting __name__ to __main__.
- console = code.InteractiveConsole({'__name__' : '__main__'})
+ console = code.InteractiveConsole({'__name__' : '__main__',
+ 'spack' : spack})
if "PYTHONSTARTUP" in os.environ:
startup_file = os.environ["PYTHONSTARTUP"]
@@ -47,7 +50,10 @@ def python(parser, args):
console.runsource(startup.read(), startup_file, 'exec')
python_args = args.python_args
- if python_args:
+ python_command = args.python_command
+ if python_command:
+ console.runsource(python_command)
+ elif python_args:
sys.argv = python_args
with open(python_args[0]) as file:
console.runsource(file.read(), python_args[0], 'exec')
diff --git a/lib/spack/spack/cmd/reindex.py b/lib/spack/spack/cmd/reindex.py
index b584729ea4..2b30ef8814 100644
--- a/lib/spack/spack/cmd/reindex.py
+++ b/lib/spack/spack/cmd/reindex.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
import spack
description = "Rebuild Spack's package database."
diff --git a/lib/spack/spack/cmd/repo.py b/lib/spack/spack/cmd/repo.py
new file mode 100644
index 0000000000..87c782000f
--- /dev/null
+++ b/lib/spack/spack/cmd/repo.py
@@ -0,0 +1,174 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://software.llnl.gov/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+import os
+import re
+import shutil
+
+from external import argparse
+import llnl.util.tty as tty
+from llnl.util.filesystem import join_path, mkdirp
+
+import spack.spec
+import spack.config
+from spack.util.environment import get_path
+from spack.repository import *
+
+description = "Manage package source repositories."
+
+def setup_parser(subparser):
+ sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='repo_command')
+ scopes = spack.config.config_scopes
+
+ # Create
+ create_parser = sp.add_parser('create', help=repo_create.__doc__)
+ create_parser.add_argument(
+ 'directory', help="Directory to create the repo in.")
+ create_parser.add_argument(
+ 'namespace', help="Namespace to identify packages in the repository. "
+ "Defaults to the directory name.", nargs='?')
+
+ # List
+ list_parser = sp.add_parser('list', help=repo_list.__doc__)
+ list_parser.add_argument(
+ '--scope', choices=scopes, default=spack.cmd.default_list_scope,
+ help="Configuration scope to read from.")
+
+ # Add
+ add_parser = sp.add_parser('add', help=repo_add.__doc__)
+ add_parser.add_argument('path', help="Path to a Spack package repository directory.")
+ add_parser.add_argument(
+ '--scope', choices=scopes, default=spack.cmd.default_modify_scope,
+ help="Configuration scope to modify.")
+
+ # Remove
+ remove_parser = sp.add_parser('remove', help=repo_remove.__doc__, aliases=['rm'])
+ remove_parser.add_argument(
+ 'path_or_namespace',
+ help="Path or namespace of a Spack package repository.")
+ remove_parser.add_argument(
+ '--scope', choices=scopes, default=spack.cmd.default_modify_scope,
+ help="Configuration scope to modify.")
+
+
+def repo_create(args):
+ """Create a new package repository."""
+ full_path, namespace = create_repo(args.directory, args.namespace)
+ tty.msg("Created repo with namespace '%s'." % namespace)
+ tty.msg("To register it with spack, run this command:",
+ 'spack repo add %s' % full_path)
+
+
+def repo_add(args):
+ """Add a package source to Spack's configuration."""
+ path = args.path
+
+ # real_path is absolute and handles substitution.
+ canon_path = canonicalize_path(path)
+
+ # check if the path exists
+ if not os.path.exists(canon_path):
+ tty.die("No such file or directory: %s" % path)
+
+ # Make sure the path is a directory.
+ if not os.path.isdir(canon_path):
+ tty.die("Not a Spack repository: %s" % path)
+
+ # Make sure it's actually a spack repository by constructing it.
+ repo = Repo(canon_path)
+
+ # If that succeeds, finally add it to the configuration.
+ repos = spack.config.get_config('repos', args.scope)
+ if not repos: repos = []
+
+ if repo.root in repos or path in repos:
+ tty.die("Repository is already registered with Spack: %s" % path)
+
+ repos.insert(0, canon_path)
+ spack.config.update_config('repos', repos, args.scope)
+ tty.msg("Created repo with namespace '%s'." % repo.namespace)
+
+
+def repo_remove(args):
+ """Remove a repository from Spack's configuration."""
+ repos = spack.config.get_config('repos', args.scope)
+ path_or_namespace = args.path_or_namespace
+
+ # If the argument is a path, remove that repository from config.
+ canon_path = canonicalize_path(path_or_namespace)
+ for repo_path in repos:
+ repo_canon_path = canonicalize_path(repo_path)
+ if canon_path == repo_canon_path:
+ repos.remove(repo_path)
+ spack.config.update_config('repos', repos, args.scope)
+ tty.msg("Removed repository %s" % repo_path)
+ return
+
+ # If it is a namespace, remove corresponding repo
+ for path in repos:
+ try:
+ repo = Repo(path)
+ if repo.namespace == path_or_namespace:
+ repos.remove(path)
+ spack.config.update_config('repos', repos, args.scope)
+ tty.msg("Removed repository %s with namespace '%s'."
+ % (repo.root, repo.namespace))
+ return
+ except RepoError as e:
+ continue
+
+ tty.die("No repository with path or namespace: %s"
+ % path_or_namespace)
+
+
+def repo_list(args):
+ """Show registered repositories and their namespaces."""
+ roots = spack.config.get_config('repos', args.scope)
+ repos = []
+ for r in roots:
+ try:
+ repos.append(Repo(r))
+ except RepoError as e:
+ continue
+
+ msg = "%d package repositor" % len(repos)
+ msg += "y." if len(repos) == 1 else "ies."
+ tty.msg(msg)
+
+ if not repos:
+ return
+
+ max_ns_len = max(len(r.namespace) for r in repos)
+ for repo in repos:
+ fmt = "%%-%ds%%s" % (max_ns_len + 4)
+ print fmt % (repo.namespace, repo.root)
+
+
+def repo(parser, args):
+ action = { 'create' : repo_create,
+ 'list' : repo_list,
+ 'add' : repo_add,
+ 'remove' : repo_remove,
+ 'rm' : repo_remove}
+ action[args.repo_command](args)
diff --git a/lib/spack/spack/cmd/restage.py b/lib/spack/spack/cmd/restage.py
index e735a12c32..540c2ef2a5 100644
--- a/lib/spack/spack/cmd/restage.py
+++ b/lib/spack/spack/cmd/restage.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
import llnl.util.tty as tty
@@ -42,5 +42,5 @@ def restage(parser, args):
specs = spack.cmd.parse_specs(args.packages, concretize=True)
for spec in specs:
- package = spack.db.get(spec)
+ package = spack.repo.get(spec)
package.do_restage()
diff --git a/lib/spack/spack/cmd/spec.py b/lib/spack/spack/cmd/spec.py
index 407519313c..43a106ea37 100644
--- a/lib/spack/spack/cmd/spec.py
+++ b/lib/spack/spack/cmd/spec.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
import spack.cmd
import llnl.util.tty as tty
diff --git a/lib/spack/spack/cmd/stage.py b/lib/spack/spack/cmd/stage.py
index f3dc97be17..975bb54ef7 100644
--- a/lib/spack/spack/cmd/stage.py
+++ b/lib/spack/spack/cmd/stage.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -23,7 +23,7 @@
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
-from external import argparse
+import argparse
import llnl.util.tty as tty
import spack
@@ -35,6 +35,9 @@ def setup_parser(subparser):
subparser.add_argument(
'-n', '--no-checksum', action='store_true', dest='no_checksum',
help="Do not check downloaded packages against checksum")
+ subparser.add_argument(
+ '-p', '--path', dest='path',
+ help="Path to stage package, does not add to spack tree")
subparser.add_argument(
'specs', nargs=argparse.REMAINDER, help="specs of packages to stage")
@@ -49,5 +52,7 @@ def stage(parser, args):
specs = spack.cmd.parse_specs(args.specs, concretize=True)
for spec in specs:
- package = spack.db.get(spec)
+ package = spack.repo.get(spec)
+ if args.path:
+ package.path = args.path
package.do_stage()
diff --git a/lib/spack/spack/cmd/test-install.py b/lib/spack/spack/cmd/test-install.py
index 68b761d5dc..656873a2f0 100644
--- a/lib/spack/spack/cmd/test-install.py
+++ b/lib/spack/spack/cmd/test-install.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
import xml.etree.ElementTree as ET
import itertools
import re
@@ -37,20 +37,20 @@ from spack.build_environment import InstallError
from spack.fetch_strategy import FetchError
import spack.cmd
-description = "Treat package installations as unit tests and output formatted test results"
+description = "Run package installation as a unit test, output formatted results."
def setup_parser(subparser):
subparser.add_argument(
'-j', '--jobs', action='store', type=int,
help="Explicitly set number of make jobs. Default is #cpus.")
-
+
subparser.add_argument(
'-n', '--no-checksum', action='store_true', dest='no_checksum',
help="Do not check packages against checksum")
-
+
subparser.add_argument(
'-o', '--output', action='store', help="test output goes in this file")
-
+
subparser.add_argument(
'package', nargs=argparse.REMAINDER, help="spec of package to install")
@@ -59,10 +59,10 @@ class JunitResultFormat(object):
def __init__(self):
self.root = ET.Element('testsuite')
self.tests = []
-
+
def add_test(self, buildId, testResult, buildInfo=None):
self.tests.append((buildId, testResult, buildInfo))
-
+
def write_to(self, stream):
self.root.set('tests', '{0}'.format(len(self.tests)))
for buildId, testResult, buildInfo in self.tests:
@@ -84,25 +84,25 @@ class TestResult(object):
PASSED = 0
FAILED = 1
SKIPPED = 2
-
+
class BuildId(object):
def __init__(self, spec):
self.name = spec.name
self.version = spec.version
self.hashId = spec.dag_hash()
-
+
def stringId(self):
return "-".join(str(x) for x in (self.name, self.version, self.hashId))
def __hash__(self):
return hash((self.name, self.version, self.hashId))
-
+
def __eq__(self, other):
if not isinstance(other, BuildId):
return False
-
- return ((self.name, self.version, self.hashId) ==
+
+ return ((self.name, self.version, self.hashId) ==
(other.name, other.version, other.hashId))
@@ -114,19 +114,19 @@ def fetch_log(path):
def failed_dependencies(spec):
- return set(childSpec for childSpec in spec.dependencies.itervalues() if not
- spack.db.get(childSpec).installed)
+ return set(childSpec for childSpec in spec.dependencies.itervalues() if not
+ spack.repo.get(childSpec).installed)
def create_test_output(topSpec, newInstalls, output, getLogFunc=fetch_log):
- # Post-order traversal is not strictly required but it makes sense to output
+ # Post-order traversal is not strictly required but it makes sense to output
# tests for dependencies first.
for spec in topSpec.traverse(order='post'):
if spec not in newInstalls:
continue
failedDeps = failed_dependencies(spec)
- package = spack.db.get(spec)
+ package = spack.repo.get(spec)
if failedDeps:
result = TestResult.SKIPPED
dep = iter(failedDeps).next()
@@ -143,12 +143,12 @@ def create_test_output(topSpec, newInstalls, output, getLogFunc=fetch_log):
re.search('error:', line, re.IGNORECASE))
errOutput = errMessages if errMessages else lines[-10:]
errOutput = '\n'.join(itertools.chain(
- [spec.to_yaml(), "Errors:"], errOutput,
+ [spec.to_yaml(), "Errors:"], errOutput,
["Build Log:", package.build_log_path]))
else:
result = TestResult.PASSED
errOutput = None
-
+
bId = BuildId(spec)
output.add_test(bId, result, errOutput)
@@ -163,18 +163,18 @@ def test_install(parser, args):
if args.no_checksum:
spack.do_checksum = False # TODO: remove this global.
-
+
specs = spack.cmd.parse_specs(args.package, concretize=True)
if len(specs) > 1:
tty.die("Only 1 top-level package can be specified")
topSpec = iter(specs).next()
-
+
newInstalls = set()
for spec in topSpec.traverse():
- package = spack.db.get(spec)
+ package = spack.repo.get(spec)
if not package.installed:
newInstalls.add(spec)
-
+
if not args.output:
bId = BuildId(topSpec)
outputDir = join_path(os.getcwd(), "test-output")
@@ -183,12 +183,12 @@ def test_install(parser, args):
outputFpath = join_path(outputDir, "test-{0}.xml".format(bId.stringId()))
else:
outputFpath = args.output
-
+
for spec in topSpec.traverse(order='post'):
# Calling do_install for the top-level package would be sufficient but
# this attempts to keep going if any package fails (other packages which
# are not dependents may succeed)
- package = spack.db.get(spec)
+ package = spack.repo.get(spec)
if (not failed_dependencies(spec)) and (not package.installed):
try:
package.do_install(
@@ -202,7 +202,7 @@ def test_install(parser, args):
pass
except FetchError:
pass
-
+
jrf = JunitResultFormat()
handled = {}
create_test_output(topSpec, newInstalls, jrf)
diff --git a/lib/spack/spack/cmd/test.py b/lib/spack/spack/cmd/test.py
index b1418ac2f1..ddc6cb4fce 100644
--- a/lib/spack/spack/cmd/test.py
+++ b/lib/spack/spack/cmd/test.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,8 +22,10 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+import os
from pprint import pprint
+from llnl.util.filesystem import join_path, mkdirp
from llnl.util.tty.colify import colify
from llnl.util.lang import list_modules
@@ -38,6 +40,12 @@ def setup_parser(subparser):
subparser.add_argument(
'-l', '--list', action='store_true', dest='list', help="Show available tests")
subparser.add_argument(
+ '--createXmlOutput', action='store_true', dest='createXmlOutput',
+ help="Create JUnit XML from test results")
+ subparser.add_argument(
+ '--xmlOutputDir', dest='xmlOutputDir',
+ help="Nose creates XML files in this directory")
+ subparser.add_argument(
'-v', '--verbose', action='store_true', dest='verbose',
help="verbose output")
@@ -48,4 +56,14 @@ def test(parser, args):
colify(spack.test.list_tests(), indent=2)
else:
- spack.test.run(args.names, args.verbose)
+ if not args.createXmlOutput:
+ outputDir = None
+ else:
+ if not args.xmlOutputDir:
+ outputDir = join_path(os.getcwd(), "test-output")
+ else:
+ outputDir = os.path.abspath(args.xmlOutputDir)
+
+ if not os.path.exists(outputDir):
+ mkdirp(outputDir)
+ spack.test.run(args.names, outputDir, args.verbose)
diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py
index e80f2d2636..8b5b46bbee 100644
--- a/lib/spack/spack/cmd/uninstall.py
+++ b/lib/spack/spack/cmd/uninstall.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,19 +22,34 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-import sys
-from external import argparse
+from __future__ import print_function
-import llnl.util.tty as tty
-from llnl.util.tty.colify import colify
+import argparse
+import llnl.util.tty as tty
import spack
import spack.cmd
-import spack.packages
+import spack.repository
from spack.cmd.find import display_specs
-from spack.package import PackageStillNeededError
-description="Remove an installed package"
+description = "Remove an installed package"
+
+error_message = """You can either:
+ a) Use a more specific spec, or
+ b) use spack uninstall -a to uninstall ALL matching specs.
+"""
+
+
+def ask_for_confirmation(message):
+ while True:
+ tty.msg(message + '[y/n]')
+ choice = raw_input().lower()
+ if choice == 'y':
+ break
+ elif choice == 'n':
+ raise SystemExit('Operation aborted')
+ tty.warn('Please reply either "y" or "n"')
+
def setup_parser(subparser):
subparser.add_argument(
@@ -42,11 +57,102 @@ def setup_parser(subparser):
help="Remove regardless of whether other packages depend on this one.")
subparser.add_argument(
'-a', '--all', action='store_true', dest='all',
- help="USE CAREFULLY. Remove ALL installed packages that match each supplied spec. " +
- "i.e., if you say uninstall libelf, ALL versions of libelf are uninstalled. " +
- "This is both useful and dangerous, like rm -r.")
+ help="USE CAREFULLY. Remove ALL installed packages that match each " +
+ "supplied spec. i.e., if you say uninstall libelf, ALL versions of " +
+ "libelf are uninstalled. This is both useful and dangerous, like rm -r.")
subparser.add_argument(
- 'packages', nargs=argparse.REMAINDER, help="specs of packages to uninstall")
+ '-d', '--dependents', action='store_true', dest='dependents',
+ help='Also uninstall any packages that depend on the ones given via command line.'
+ )
+ subparser.add_argument(
+ '-y', '--yes-to-all', action='store_true', dest='yes_to_all',
+ help='Assume "yes" is the answer to every confirmation asked to the user.'
+
+ )
+ subparser.add_argument('packages', nargs=argparse.REMAINDER, help="specs of packages to uninstall")
+
+
+def concretize_specs(specs, allow_multiple_matches=False, force=False):
+ """
+ Returns a list of specs matching the non necessarily concretized specs given from cli
+
+ Args:
+ specs: list of specs to be matched against installed packages
+ allow_multiple_matches : boolean (if True multiple matches for each item in specs are admitted)
+
+ Return:
+ list of specs
+ """
+ specs_from_cli = [] # List of specs that match expressions given via command line
+ has_errors = False
+ for spec in specs:
+ matching = spack.installed_db.query(spec)
+ # For each spec provided, make sure it refers to only one package.
+ # Fail and ask user to be unambiguous if it doesn't
+ if not allow_multiple_matches and len(matching) > 1:
+ tty.error("%s matches multiple packages:" % spec)
+ print()
+ display_specs(matching, long=True, show_flags=True)
+ print()
+ has_errors = True
+
+ # No installed package matches the query
+ if len(matching) == 0 and not force:
+ tty.error("%s does not match any installed packages." % spec)
+ has_errors = True
+
+ specs_from_cli.extend(matching)
+ if has_errors:
+ tty.die(error_message)
+
+ return specs_from_cli
+
+
+def installed_dependents(specs):
+ """
+ Returns a dictionary that maps a spec with a list of its installed dependents
+
+ Args:
+ specs: list of specs to be checked for dependents
+
+ Returns:
+ dictionary of installed dependents
+ """
+ dependents = {}
+ for item in specs:
+ lst = [x for x in item.package.installed_dependents if x not in specs]
+ if lst:
+ lst = list(set(lst))
+ dependents[item] = lst
+ return dependents
+
+
+def do_uninstall(specs, force):
+ """
+ Uninstalls all the specs in a list.
+
+ Args:
+ specs: list of specs to be uninstalled
+ force: force uninstallation (boolean)
+ """
+ packages = []
+ for item in specs:
+ try:
+ # should work if package is known to spack
+ packages.append(item.package)
+ except spack.repository.UnknownPackageError as e:
+ # The package.py file has gone away -- but still
+ # want to uninstall.
+ spack.Package(item).do_uninstall(force=True)
+
+ # Sort packages to be uninstalled by the number of installed dependents
+ # This ensures we do things in the right order
+ def num_installed_deps(pkg):
+ return len(pkg.installed_dependents)
+
+ packages.sort(key=num_installed_deps)
+ for item in packages:
+ item.do_uninstall(force=force)
def uninstall(parser, args):
@@ -55,50 +161,34 @@ def uninstall(parser, args):
with spack.installed_db.write_transaction():
specs = spack.cmd.parse_specs(args.packages)
+ # Gets the list of installed specs that match the ones give via cli
+ uninstall_list = concretize_specs(specs, args.all, args.force) # takes care of '-a' is given in the cli
+ dependent_list = installed_dependents(uninstall_list) # takes care of '-d'
- # For each spec provided, make sure it refers to only one package.
- # Fail and ask user to be unambiguous if it doesn't
- pkgs = []
- for spec in specs:
- matching_specs = spack.installed_db.query(spec)
- if not args.all and len(matching_specs) > 1:
- tty.error("%s matches multiple packages:" % spec)
- print
- display_specs(matching_specs, long=True)
- print
- print "You can either:"
- print " a) Use a more specific spec, or"
- print " b) use spack uninstall -a to uninstall ALL matching specs."
- sys.exit(1)
-
- if len(matching_specs) == 0:
- if args.force: continue
- tty.die("%s does not match any installed packages." % spec)
-
- for s in matching_specs:
- try:
- # should work if package is known to spack
- pkgs.append(s.package)
-
- except spack.packages.UnknownPackageError, e:
- # The package.py file has gone away -- but still want to uninstall.
- spack.Package(s).do_uninstall(force=True)
-
- # Sort packages to be uninstalled by the number of installed dependents
- # This ensures we do things in the right order
- def num_installed_deps(pkg):
- return len(pkg.installed_dependents)
- pkgs.sort(key=num_installed_deps)
-
- # Uninstall packages in order now.
- for pkg in pkgs:
- try:
- pkg.do_uninstall(force=args.force)
- except PackageStillNeededError, e:
- tty.error("Will not uninstall %s" % e.spec.format("$_$@$%@$#", color=True))
- print
- print "The following packages depend on it:"
- display_specs(e.dependents, long=True)
- print
- print "You can use spack uninstall -f to force this action."
- sys.exit(1)
+ # Process dependent_list and update uninstall_list
+ has_error = False
+ if dependent_list and not args.dependents and not args.force:
+ for spec, lst in dependent_list.items():
+ tty.error("Will not uninstall %s" % spec.format("$_$@$%@$#", color=True))
+ print('')
+ print("The following packages depend on it:")
+ display_specs(lst, long=True)
+ print('')
+ has_error = True
+ elif args.dependents:
+ for key, lst in dependent_list.items():
+ uninstall_list.extend(lst)
+ uninstall_list = list(set(uninstall_list))
+
+ if has_error:
+ tty.die('You can use spack uninstall --dependents to uninstall these dependencies as well')
+
+ if not args.yes_to_all:
+ tty.msg("The following packages will be uninstalled : ")
+ print('')
+ display_specs(uninstall_list, long=True, show_flags=True)
+ print('')
+ ask_for_confirmation('Do you want to proceed ? ')
+
+ # Uninstall everything on the list
+ do_uninstall(uninstall_list, args.force)
diff --git a/lib/spack/spack/cmd/unload.py b/lib/spack/spack/cmd/unload.py
index 6442c48cb1..cfb640ee6f 100644
--- a/lib/spack/spack/cmd/unload.py
+++ b/lib/spack/spack/cmd/unload.py
@@ -6,7 +6,7 @@
# Written by David Beckingsale, david@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
import spack.modules
description ="Remove package from environment using module."
diff --git a/lib/spack/spack/cmd/unuse.py b/lib/spack/spack/cmd/unuse.py
index 2a7229a3a0..06176a976b 100644
--- a/lib/spack/spack/cmd/unuse.py
+++ b/lib/spack/spack/cmd/unuse.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
import spack.modules
description ="Remove package from environment using dotkit."
diff --git a/lib/spack/spack/cmd/url-parse.py b/lib/spack/spack/cmd/url-parse.py
new file mode 100644
index 0000000000..077c793d2e
--- /dev/null
+++ b/lib/spack/spack/cmd/url-parse.py
@@ -0,0 +1,75 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+import sys
+
+import llnl.util.tty as tty
+
+import spack
+import spack.url
+from spack.util.web import find_versions_of_archive
+
+description = "Show parsing of a URL, optionally spider web for other versions."
+
+def setup_parser(subparser):
+ subparser.add_argument('url', help="url of a package archive")
+ subparser.add_argument(
+ '-s', '--spider', action='store_true', help="Spider the source page for versions.")
+
+
+def print_name_and_version(url):
+ name, ns, nl, ntup, ver, vs, vl, vtup = spack.url.substitution_offsets(url)
+ underlines = [" "] * max(ns+nl, vs+vl)
+ for i in range(ns, ns+nl):
+ underlines[i] = '-'
+ for i in range(vs, vs+vl):
+ underlines[i] = '~'
+
+ print " %s" % url
+ print " %s" % ''.join(underlines)
+
+
+def url_parse(parser, args):
+ url = args.url
+
+ ver, vs, vl = spack.url.parse_version_offset(url)
+ name, ns, nl = spack.url.parse_name_offset(url, ver)
+
+ tty.msg("Parsing URL:")
+ try:
+ print_name_and_version(url)
+ except spack.url.UrlParseError as e:
+ tty.error(str(e))
+
+ print
+ tty.msg("Substituting version 9.9.9b:")
+ newurl = spack.url.substitute_version(url, '9.9.9b')
+ print_name_and_version(newurl)
+
+ if args.spider:
+ print
+ tty.msg("Spidering for versions:")
+ versions = find_versions_of_archive(url)
+ for v in sorted(versions):
+ print "%-20s%s" % (v, versions[v])
diff --git a/lib/spack/spack/cmd/urls.py b/lib/spack/spack/cmd/urls.py
index 417ce3ab68..ebab65f7d1 100644
--- a/lib/spack/spack/cmd/urls.py
+++ b/lib/spack/spack/cmd/urls.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -41,7 +41,7 @@ def setup_parser(subparser):
def urls(parser, args):
urls = set()
- for pkg in spack.db.all_packages():
+ for pkg in spack.repo.all_packages():
url = getattr(pkg.__class__, 'url', None)
if url:
urls.add(url)
diff --git a/lib/spack/spack/cmd/use.py b/lib/spack/spack/cmd/use.py
index e34c194739..c09695cfd3 100644
--- a/lib/spack/spack/cmd/use.py
+++ b/lib/spack/spack/cmd/use.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from external import argparse
+import argparse
import spack.modules
description ="Add package to environment using dotkit."
diff --git a/lib/spack/spack/cmd/versions.py b/lib/spack/spack/cmd/versions.py
index ed16728261..bba75dae96 100644
--- a/lib/spack/spack/cmd/versions.py
+++ b/lib/spack/spack/cmd/versions.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -34,7 +34,7 @@ def setup_parser(subparser):
def versions(parser, args):
- pkg = spack.db.get(args.package)
+ pkg = spack.repo.get(args.package)
safe_versions = pkg.versions
fetched_versions = pkg.fetch_remote_versions()
diff --git a/lib/spack/spack/compiler.py b/lib/spack/spack/compiler.py
index 9041c6f759..24b79a5a42 100644
--- a/lib/spack/spack/compiler.py
+++ b/lib/spack/spack/compiler.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -51,7 +51,7 @@ _version_cache = {}
def get_compiler_version(compiler_path, version_arg, regex='(.*)'):
if not compiler_path in _version_cache:
compiler = Executable(compiler_path)
- output = compiler(version_arg, return_output=True, error=None)
+ output = compiler(version_arg, output=str, error=str)
match = re.search(regex, output)
_version_cache[compiler_path] = match.group(1) if match else 'unknown'
@@ -264,12 +264,12 @@ class Compiler(object):
def __repr__(self):
- """Return a string represntation of the compiler toolchain."""
+ """Return a string representation of the compiler toolchain."""
return self.__str__()
def __str__(self):
- """Return a string represntation of the compiler toolchain."""
+ """Return a string representation of the compiler toolchain."""
return "%s(%s)" % (
self.name, '\n '.join((str(s) for s in (self.cc, self.cxx, self.f77, self.fc))))
diff --git a/lib/spack/spack/compilers/__init__.py b/lib/spack/spack/compilers/__init__.py
index 895bf8bc95..d625949ceb 100644
--- a/lib/spack/spack/compilers/__init__.py
+++ b/lib/spack/spack/compilers/__init__.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -27,6 +27,7 @@ system and configuring Spack to use multiple compilers.
"""
import imp
import os
+import platform
from llnl.util.lang import memoized, list_modules
from llnl.util.filesystem import join_path
@@ -35,6 +36,7 @@ import spack
import spack.error
import spack.spec
import spack.config
+import spack.architecture
from spack.util.multiproc import parmap
import spack.compiler as Comp
@@ -46,50 +48,136 @@ from spack.util.environment import get_path
_imported_compilers_module = 'spack.compilers'
_required_instance_vars = ['cc', 'cxx', 'f77', 'fc']
-_default_order = ['gcc', 'intel', 'pgi', 'clang', 'xlc']
+# TODO: customize order in config file
+if platform.system() == 'Darwin':
+ _default_order = ['clang', 'gcc', 'intel']
+else:
+ _default_order = ['gcc', 'intel', 'pgi', 'clang', 'xlc', 'nag']
+
def _auto_compiler_spec(function):
- def converter(cspec_like):
+ def converter(cspec_like, *args, **kwargs):
if not isinstance(cspec_like, spack.spec.CompilerSpec):
cspec_like = spack.spec.CompilerSpec(cspec_like)
- return function(cspec_like)
+ return function(cspec_like, *args, **kwargs)
return converter
-def _get_config():
- """Get a Spack config, but make sure it has compiler configuration
- first."""
- # If any configuration file has compilers, just stick with the
- # ones already configured.
- config = spack.config.get_compilers_config()
- existing = [spack.spec.CompilerSpec(s)
- for s in config]
- if existing:
- return config
+def _to_dict(compiler):
+ """Return a dict version of compiler suitable to insert in YAML."""
+ return {
+ str(compiler.spec) : dict(
+ (attr, getattr(compiler, attr, None))
+ for attr in _required_instance_vars)
+ }
- compilers = find_compilers(*get_path('PATH'))
- add_compilers_to_config('user', *compilers)
- # After writing compilers to the user config, return a full config
- # from all files.
- return spack.config.get_compilers_config()
+def get_compiler_config(arch=None, scope=None):
+ """Return the compiler configuration for the specified architecture.
+ """
+ # Check whether we're on a front-end (native) architecture.
+ my_arch = spack.architecture.sys_type()
+ if arch is None:
+ arch = my_arch
+
+ def init_compiler_config():
+ """Compiler search used when Spack has no compilers."""
+ config[arch] = {}
+ compilers = find_compilers(*get_path('PATH'))
+ for compiler in compilers:
+ config[arch].update(_to_dict(compiler))
+ spack.config.update_config('compilers', config, scope=scope)
+
+ config = spack.config.get_config('compilers', scope=scope)
+
+ # Update the configuration if there are currently no compilers
+ # configured. Avoid updating automatically if there ARE site
+ # compilers configured but no user ones.
+ if arch == my_arch and arch not in config:
+ if scope is None:
+ # We know no compilers were configured in any scope.
+ init_compiler_config()
+ elif scope == 'user':
+ # Check the site config and update the user config if
+ # nothing is configured at the site level.
+ site_config = spack.config.get_config('compilers', scope='site')
+ if not site_config:
+ init_compiler_config()
+
+ return config[arch] if arch in config else {}
+
+
+def add_compilers_to_config(compilers, arch=None, scope=None):
+ """Add compilers to the config for the specified architecture.
+
+ Arguments:
+ - compilers: a list of Compiler objects.
+ - arch: arch to add compilers for.
+ - scope: configuration scope to modify.
+ """
+ if arch is None:
+ arch = spack.architecture.sys_type()
+
+ compiler_config = get_compiler_config(arch, scope)
+ for compiler in compilers:
+ compiler_config[str(compiler.spec)] = dict(
+ (c, getattr(compiler, c, "None"))
+ for c in _required_instance_vars)
+
+ update = { arch : compiler_config }
+ spack.config.update_config('compilers', update, scope)
+
+
+@_auto_compiler_spec
+def remove_compiler_from_config(compiler_spec, arch=None, scope=None):
+ """Remove compilers from the config, by spec.
+
+ Arguments:
+ - compiler_specs: a list of CompilerSpec objects.
+ - arch: arch to add compilers for.
+ - scope: configuration scope to modify.
+ """
+ if arch is None:
+ arch = spack.architecture.sys_type()
+
+ compiler_config = get_compiler_config(arch, scope)
+ del compiler_config[str(compiler_spec)]
+ update = { arch : compiler_config }
+
+ spack.config.update_config('compilers', update, scope)
+
+
+def all_compilers_config(arch=None, scope=None):
+ """Return a set of specs for all the compiler versions currently
+ available to build with. These are instances of CompilerSpec.
+ """
+ # Get compilers for this architecture.
+ arch_config = get_compiler_config(arch, scope)
+
+ # Merge 'all' compilers with arch-specific ones.
+ # Arch-specific compilers have higher precedence.
+ merged_config = get_compiler_config('all', scope=scope)
+ merged_config = spack.config._merge_yaml(merged_config, arch_config)
+
+ return merged_config
+
+
+def all_compilers(arch=None, scope=None):
+ # Return compiler specs from the merged config.
+ return [spack.spec.CompilerSpec(s)
+ for s in all_compilers_config(arch, scope)]
-_cached_default_compiler = None
def default_compiler():
- global _cached_default_compiler
- if _cached_default_compiler:
- return _cached_default_compiler
versions = []
- for name in _default_order: # TODO: customize order.
+ for name in _default_order:
versions = find(name)
- if versions: break
-
- if not versions:
+ if versions:
+ break
+ else:
raise NoCompilersError()
- _cached_default_compiler = sorted(versions)[-1]
- return _cached_default_compiler
+ return sorted(versions)[-1]
def find_compilers(*path):
@@ -124,20 +212,6 @@ def find_compilers(*path):
return clist
-def add_compilers_to_config(scope, *compilers):
- compiler_config_tree = {}
- for compiler in compilers:
- compiler_entry = {}
- for c in _required_instance_vars:
- val = getattr(compiler, c)
- if not val:
- val = "None"
- compiler_entry[c] = val
- compiler_config_tree[str(compiler.spec)] = compiler_entry
- spack.config.add_to_compiler_config(compiler_config_tree, scope)
-
-
-
def supported_compilers():
"""Return a set of names of compilers supported by Spack.
@@ -153,27 +227,19 @@ def supported(compiler_spec):
return compiler_spec.name in supported_compilers()
-def all_compilers():
- """Return a set of specs for all the compiler versions currently
- available to build with. These are instances of CompilerSpec.
- """
- configuration = _get_config()
- return [spack.spec.CompilerSpec(s) for s in configuration]
-
-
@_auto_compiler_spec
-def find(compiler_spec):
+def find(compiler_spec, arch=None, scope=None):
"""Return specs of available compilers that match the supplied
- compiler spec. Return an empty list if nothing found."""
- return [c for c in all_compilers() if c.satisfies(compiler_spec)]
+ compiler spec. Return an list if nothing found."""
+ return [c for c in all_compilers(arch, scope) if c.satisfies(compiler_spec)]
@_auto_compiler_spec
-def compilers_for_spec(compiler_spec):
+def compilers_for_spec(compiler_spec, arch=None, scope=None):
"""This gets all compilers that satisfy the supplied CompilerSpec.
Returns an empty list if none are found.
"""
- config = _get_config()
+ config = all_compilers_config(arch, scope)
def get_compiler(cspec):
items = config[str(cspec)]
@@ -196,7 +262,7 @@ def compilers_for_spec(compiler_spec):
flags[f] = items[f]
return cls(cspec, *compiler_paths, **flags)
- matches = find(compiler_spec)
+ matches = find(compiler_spec, arch, scope)
return [get_compiler(cspec) for cspec in matches]
diff --git a/lib/spack/spack/compilers/clang.py b/lib/spack/spack/compilers/clang.py
index 790901c86e..e406d86a24 100644
--- a/lib/spack/spack/compilers/clang.py
+++ b/lib/spack/spack/compilers/clang.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,10 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+import re
+import spack.compiler as cpr
from spack.compiler import *
+from spack.util.executable import *
class Clang(Compiler):
# Subclasses use possible names of C compiler
@@ -37,15 +40,44 @@ class Clang(Compiler):
# Subclasses use possible names of Fortran 90 compiler
fc_names = []
+ # Named wrapper links within spack.build_env_path
+ link_paths = { 'cc' : 'clang/clang',
+ 'cxx' : 'clang/clang++',
+ # Use default wrappers for fortran, in case provided in compilers.yaml
+ 'f77' : 'f77',
+ 'fc' : 'f90' }
@classmethod
def default_version(self, comp):
"""The '--version' option works for clang compilers.
- Output looks like this::
+ On most platforms, output looks like this::
clang version 3.1 (trunk 149096)
Target: x86_64-unknown-linux-gnu
Thread model: posix
+
+ On Mac OS X, it looks like this:
+
+ Apple LLVM version 7.0.2 (clang-700.1.81)
+ Target: x86_64-apple-darwin15.2.0
+ Thread model: posix
+
"""
- return get_compiler_version(
- comp, '--version', r'(?:clang version|based on LLVM) ([^ )]+)')
+ if comp not in cpr._version_cache:
+ compiler = Executable(comp)
+ output = compiler('--version', output=str, error=str)
+
+ ver = 'unknown'
+ match = re.search(r'^Apple LLVM version ([^ )]+)', output)
+ if match:
+ # Apple's LLVM compiler has its own versions, so suffix them.
+ ver = match.group(1) + '-apple'
+ else:
+ # Normal clang compiler versions are left as-is
+ match = re.search(r'^clang version ([^ )]+)', output)
+ if match:
+ ver = match.group(1)
+
+ cpr._version_cache[comp] = ver
+
+ return cpr._version_cache[comp]
diff --git a/lib/spack/spack/compilers/gcc.py b/lib/spack/spack/compilers/gcc.py
index f0d27d590e..64214db32d 100644
--- a/lib/spack/spack/compilers/gcc.py
+++ b/lib/spack/spack/compilers/gcc.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -40,7 +40,14 @@ class Gcc(Compiler):
fc_names = ['gfortran']
# MacPorts builds gcc versions with prefixes and -mp-X.Y suffixes.
- suffixes = [r'-mp-\d\.\d']
+ # Homebrew and Linuxes may build gcc with -X, -X.Y suffixes
+ suffixes = [r'-mp-\d\.\d', r'-\d\.\d', r'-\d']
+
+ # Named wrapper links within spack.build_env_path
+ link_paths = {'cc' : 'gcc/gcc',
+ 'cxx' : 'gcc/g++',
+ 'f77' : 'gcc/gfortran',
+ 'fc' : 'gcc/gfortran' }
@property
def cxx11_flag(self):
diff --git a/lib/spack/spack/compilers/intel.py b/lib/spack/spack/compilers/intel.py
index 2a72c4eaea..69e9764790 100644
--- a/lib/spack/spack/compilers/intel.py
+++ b/lib/spack/spack/compilers/intel.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -37,6 +37,12 @@ class Intel(Compiler):
# Subclasses use possible names of Fortran 90 compiler
fc_names = ['ifort']
+ # Named wrapper links within spack.build_env_path
+ link_paths = { 'cc' : 'intel/icc',
+ 'cxx' : 'intel/icpc',
+ 'f77' : 'intel/ifort',
+ 'fc' : 'intel/ifort' }
+
@property
def cxx11_flag(self):
if self.version < ver('11.1'):
diff --git a/lib/spack/spack/compilers/nag.py b/lib/spack/spack/compilers/nag.py
new file mode 100644
index 0000000000..527a05a090
--- /dev/null
+++ b/lib/spack/spack/compilers/nag.py
@@ -0,0 +1,33 @@
+from spack.compiler import *
+
+class Nag(Compiler):
+ # Subclasses use possible names of C compiler
+ cc_names = []
+
+ # Subclasses use possible names of C++ compiler
+ cxx_names = []
+
+ # Subclasses use possible names of Fortran 77 compiler
+ f77_names = ['nagfor']
+
+ # Subclasses use possible names of Fortran 90 compiler
+ fc_names = ['nagfor']
+
+ # Named wrapper links within spack.build_env_path
+ link_paths = { # Use default wrappers for C and C++, in case provided in compilers.yaml
+ 'cc' : 'cc',
+ 'cxx' : 'c++',
+ 'f77' : 'nag/nagfor',
+ 'fc' : 'nag/nagfor' }
+
+ @classmethod
+ def default_version(self, comp):
+ """The '-V' option works for nag compilers.
+ Output looks like this::
+
+ NAG Fortran Compiler Release 6.0(Hibiya) Build 1037
+ Product NPL6A60NA for x86-64 Linux
+ Copyright 1990-2015 The Numerical Algorithms Group Ltd., Oxford, U.K.
+ """
+ return get_compiler_version(
+ comp, '-V', r'NAG Fortran Compiler Release ([0-9.]+)')
diff --git a/lib/spack/spack/compilers/pgi.py b/lib/spack/spack/compilers/pgi.py
index d97f24c12e..c6a1078bd9 100644
--- a/lib/spack/spack/compilers/pgi.py
+++ b/lib/spack/spack/compilers/pgi.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -29,22 +29,28 @@ class Pgi(Compiler):
cc_names = ['pgcc']
# Subclasses use possible names of C++ compiler
- cxx_names = ['pgCC']
+ cxx_names = ['pgc++', 'pgCC']
# Subclasses use possible names of Fortran 77 compiler
- f77_names = ['pgf77']
+ f77_names = ['pgfortran', 'pgf77']
# Subclasses use possible names of Fortran 90 compiler
- fc_names = ['pgf95', 'pgf90']
+ fc_names = ['pgfortran', 'pgf95', 'pgf90']
+
+ # Named wrapper links within spack.build_env_path
+ link_paths = { 'cc' : 'pgi/pgcc',
+ 'cxx' : 'pgi/pgc++',
+ 'f77' : 'pgi/pgfortran',
+ 'fc' : 'pgi/pgfortran' }
@classmethod
def default_version(cls, comp):
"""The '-V' option works for all the PGI compilers.
Output looks like this::
- pgf95 10.2-0 64-bit target on x86-64 Linux -tp nehalem-64
- Copyright 1989-2000, The Portland Group, Inc. All Rights Reserved.
- Copyright 2000-2010, STMicroelectronics, Inc. All Rights Reserved.
+ pgcc 15.10-0 64-bit target on x86-64 Linux -tp sandybridge
+ The Portland Group - PGI Compilers and Tools
+ Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
"""
return get_compiler_version(
comp, '-V', r'pg[^ ]* ([^ ]+) \d\d\d?-bit target')
diff --git a/lib/spack/spack/compilers/xl.py b/lib/spack/spack/compilers/xl.py
index 562186b865..c1d55109a3 100644
--- a/lib/spack/spack/compilers/xl.py
+++ b/lib/spack/spack/compilers/xl.py
@@ -7,7 +7,7 @@
# Written by François Bissey, francois.bissey@canterbury.ac.nz, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -38,6 +38,12 @@ class Xl(Compiler):
# Subclasses use possible names of Fortran 90 compiler
fc_names = ['xlf90','xlf90_r','xlf95','xlf95_r','xlf2003','xlf2003_r','xlf2008','xlf2008_r']
+ # Named wrapper links within spack.build_env_path
+ link_paths = { 'cc' : 'xl/xlc',
+ 'cxx' : 'xl/xlc++',
+ 'f77' : 'xl/xlf',
+ 'fc' : 'xl/xlf90' }
+
@property
def cxx11_flag(self):
if self.version < ver('13.1'):
diff --git a/lib/spack/spack/concretize.py b/lib/spack/spack/concretize.py
index 484da97e90..6c0fda2d59 100644
--- a/lib/spack/spack/concretize.py
+++ b/lib/spack/spack/concretize.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -33,11 +33,16 @@ or user preferences.
TODO: make this customizable and allow users to configure
concretization policies.
"""
+import spack
import spack.spec
import spack.compilers
import spack.architecture
import spack.error
from spack.version import *
+from functools import partial
+from spec import DependencyMap
+from itertools import chain
+from spack.config import *
import spack.compiler as Compiler
@@ -47,10 +52,92 @@ class DefaultConcretizer(object):
default concretization strategies, or you can override all of them.
"""
+ def _valid_virtuals_and_externals(self, spec):
+ """Returns a list of candidate virtual dep providers and external
+ packages that coiuld be used to concretize a spec."""
+ # First construct a list of concrete candidates to replace spec with.
+ candidates = [spec]
+ if spec.virtual:
+ providers = spack.repo.providers_for(spec)
+ if not providers:
+ raise UnsatisfiableProviderSpecError(providers[0], spec)
+ spec_w_preferred_providers = find_spec(
+ spec, lambda(x): spack.pkgsort.spec_has_preferred_provider(x.name, spec.name))
+ if not spec_w_preferred_providers:
+ spec_w_preferred_providers = spec
+ provider_cmp = partial(spack.pkgsort.provider_compare, spec_w_preferred_providers.name, spec.name)
+ candidates = sorted(providers, cmp=provider_cmp)
+
+ # For each candidate package, if it has externals, add those to the usable list.
+ # if it's not buildable, then *only* add the externals.
+ usable = []
+ for cspec in candidates:
+ if is_spec_buildable(cspec):
+ usable.append(cspec)
+ externals = spec_externals(cspec)
+ for ext in externals:
+ if ext.satisfies(spec):
+ usable.append(ext)
+
+ # If nothing is in the usable list now, it's because we aren't
+ # allowed to build anything.
+ if not usable:
+ raise NoBuildError(spec)
+
+ def cmp_externals(a, b):
+ if a.name != b.name:
+ # We're choosing between different providers, so
+ # maintain order from provider sort
+ return candidates.index(a) - candidates.index(b)
+
+ result = cmp_specs(a, b)
+ if result != 0:
+ return result
+
+ # prefer external packages to internal packages.
+ if a.external is None or b.external is None:
+ return -cmp(a.external, b.external)
+ else:
+ return cmp(a.external, b.external)
+
+ usable.sort(cmp=cmp_externals)
+ return usable
+
+
+ def choose_virtual_or_external(self, spec):
+ """Given a list of candidate virtual and external packages, try to
+ find one that is most ABI compatible.
+ """
+ candidates = self._valid_virtuals_and_externals(spec)
+ if not candidates:
+ return candidates
+
+ # Find the nearest spec in the dag that has a compiler. We'll
+ # use that spec to calibrate compiler compatibility.
+ abi_exemplar = find_spec(spec, lambda(x): x.compiler)
+ if not abi_exemplar:
+ abi_exemplar = spec.root
+
+ # Make a list including ABI compatibility of specs with the exemplar.
+ strict = [spack.abi.compatible(c, abi_exemplar) for c in candidates]
+ loose = [spack.abi.compatible(c, abi_exemplar, loose=True) for c in candidates]
+ keys = zip(strict, loose, candidates)
+
+ # Sort candidates from most to least compatibility.
+ # Note:
+ # 1. We reverse because True > False.
+ # 2. Sort is stable, so c's keep their order.
+ keys.sort(key=lambda k:k[:2], reverse=True)
+
+ # Pull the candidates back out and return them in order
+ candidates = [c for s,l,c in keys]
+ return candidates
+
+
def concretize_version(self, spec):
"""If the spec is already concrete, return. Otherwise take
- the most recent available version, and default to the package's
- version if there are no avaialble versions.
+ the preferred version from spackconfig, and default to the package's
+ version if there are no available versions.
TODO: In many cases we probably want to look for installed
versions of each package and use an installed version
@@ -68,12 +155,18 @@ class DefaultConcretizer(object):
# If there are known available versions, return the most recent
# version that satisfies the spec
pkg = spec.package
+ cmp_versions = partial(spack.pkgsort.version_compare, spec.name)
valid_versions = sorted(
[v for v in pkg.versions
- if any(v.satisfies(sv) for sv in spec.versions)])
+ if any(v.satisfies(sv) for sv in spec.versions)],
+ cmp=cmp_versions)
+
+ def prefer_key(v):
+ return pkg.versions.get(Version(v)).get('preferred', False)
+ valid_versions.sort(key=prefer_key, reverse=True)
if valid_versions:
- spec.versions = ver([valid_versions[-1]])
+ spec.versions = ver([valid_versions[0]])
else:
# We don't know of any SAFE versions that match the given
# spec. Grab the spec's versions and grab the highest
@@ -127,7 +220,7 @@ class DefaultConcretizer(object):
the default variants from the package specification.
"""
changed = False
- for name, variant in spec.package.variants.items():
+ for name, variant in spec.package_class.variants.items():
if name not in spec.variants:
spec.variants[name] = spack.spec.VariantSpec(name, variant.default)
changed = True
@@ -138,10 +231,10 @@ class DefaultConcretizer(object):
"""If the spec already has a compiler, we're done. If not, then take
the compiler used for the nearest ancestor with a compiler
spec and use that. If the ancestor's compiler is not
- concrete, then give it a valid version. If there is no
- ancestor with a compiler, use the system default compiler.
+ concrete, then used the preferred compiler as specified in
+ spackconfig.
- Intuition: Use the system default if no package that depends on
+ Intuition: Use the spackconfig default if no package that depends on
this one has a strict compiler requirement. Otherwise, try to
build with the compiler that will be used by libraries that
link to this one, to maximize compatibility.
@@ -153,25 +246,28 @@ class DefaultConcretizer(object):
spec.compiler in all_compilers):
return False
- try:
- nearest = next(p for p in spec.traverse(direction='parents')
- if p.compiler is not None).compiler
-
- if not nearest in all_compilers:
- # Take the newest compiler that saisfies the spec
- matches = sorted(spack.compilers.find(nearest))
- if not matches:
- raise UnavailableCompilerVersionError(nearest)
-
- # copy concrete version into nearest spec
- nearest.versions = matches[-1].versions.copy()
- assert(nearest.concrete)
-
- spec.compiler = nearest.copy()
-
- except StopIteration:
- spec.compiler = spack.compilers.default_compiler().copy()
-
+ #Find the another spec that has a compiler, or the root if none do
+ other_spec = spec if spec.compiler else find_spec(spec, lambda(x) : x.compiler)
+ if not other_spec:
+ other_spec = spec.root
+ other_compiler = other_spec.compiler
+ assert(other_spec)
+
+ # Check if the compiler is already fully specified
+ if other_compiler in all_compilers:
+ spec.compiler = other_compiler.copy()
+ return True
+
+ # Filter the compilers into a sorted list based on the compiler_order from spackconfig
+ compiler_list = all_compilers if not other_compiler else spack.compilers.find(other_compiler)
+ cmp_compilers = partial(spack.pkgsort.compiler_compare, other_spec.name)
+ matches = sorted(compiler_list, cmp=cmp_compilers)
+ if not matches:
+ raise UnavailableCompilerVersionError(other_compiler)
+
+ # copy concrete version into other_compiler
+ spec.compiler = matches[0].copy()
+ assert(spec.compiler.concrete)
return True # things changed.
@@ -229,18 +325,78 @@ class DefaultConcretizer(object):
return ret
- def choose_provider(self, spec, providers):
- """This is invoked for virtual specs. Given a spec with a virtual name,
- say "mpi", and a list of specs of possible providers of that spec,
- select a provider and return it.
- """
- assert(spec.virtual)
- assert(providers)
-
- index = spack.spec.index_specs(providers)
- first_key = sorted(index.keys())[0]
- latest_version = sorted(index[first_key])[-1]
- return latest_version
+# def choose_provider(self, spec, providers):
+# """This is invoked for virtual specs. Given a spec with a virtual name,
+# say "mpi", and a list of specs of possible providers of that spec,
+# select a provider and return it.
+# """
+# assert(spec.virtual)
+# assert(providers)
+# index = spack.spec.index_specs(providers)
+# first_key = sorted(index.keys())[0]
+# latest_version = sorted(index[first_key])[-1]
+# return latest_version
+
+
+def find_spec(spec, condition):
+ """Searches the dag from spec in an intelligent order and looks
+ for a spec that matches a condition"""
+ # First search parents, then search children
+ dagiter = chain(spec.traverse(direction='parents', root=False),
+ spec.traverse(direction='children', root=False))
+ visited = set()
+ for relative in dagiter:
+ if condition(relative):
+ return relative
+ visited.add(id(relative))
+
+ # Then search all other relatives in the DAG *except* spec
+ for relative in spec.root.traverse():
+ if relative is spec: continue
+ if id(relative) in visited: continue
+ if condition(relative):
+ return relative
+
+ # Finally search spec itself.
+ if condition(spec):
+ return spec
+
+ return None # Nothing matched the condition.
+
+
+def cmp_specs(lhs, rhs):
+ # Package name sort order is not configurable, always goes alphabetical
+ if lhs.name != rhs.name:
+ return cmp(lhs.name, rhs.name)
+
+ # Package version is second in compare order
+ pkgname = lhs.name
+ if lhs.versions != rhs.versions:
+ return spack.pkgsort.version_compare(
+ pkgname, lhs.versions, rhs.versions)
+
+ # Compiler is third
+ if lhs.compiler != rhs.compiler:
+ return spack.pkgsort.compiler_compare(
+ pkgname, lhs.compiler, rhs.compiler)
+
+ # Variants
+ if lhs.variants != rhs.variants:
+ return spack.pkgsort.variant_compare(
+ pkgname, lhs.variants, rhs.variants)
+
+ # Architecture
+ if lhs.architecture != rhs.architecture:
+ return spack.pkgsort.architecture_compare(
+ pkgname, lhs.architecture, rhs.architecture)
+
+ # Dependency is not configurable
+ lhash, rhash = hash(lhs), hash(rhs)
+ if lhash != rhash:
+ return -1 if lhash < rhash else 1
+
+ # Equal specs
+ return 0
class UnavailableCompilerVersionError(spack.error.SpackError):
@@ -258,3 +414,11 @@ class NoValidVersionError(spack.error.SpackError):
def __init__(self, spec):
super(NoValidVersionError, self).__init__(
"There are no valid versions for %s that match '%s'" % (spec.name, spec.versions))
+
+
+class NoBuildError(spack.error.SpackError):
+ """Raised when a package is configured with the buildable option False, but
+ no satisfactory external versions can be found"""
+ def __init__(self, spec):
+ super(NoBuildError, self).__init__(
+ "The spec '%s' is configured as not buildable, and no matching external installs were found" % spec.name)
diff --git a/lib/spack/spack/config.py b/lib/spack/spack/config.py
index 3e91958c2c..14e5aaf4fb 100644
--- a/lib/spack/spack/config.py
+++ b/lib/spack/spack/config.py
@@ -1,12 +1,12 @@
##############################################################################
-# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -45,11 +45,11 @@ several configuration files, such as compilers.yaml or mirrors.yaml.
Configuration file format
===============================
-Configuration files are formatted using YAML syntax.
-This format is implemented by Python's
-yaml class, and it's easy to read and versatile.
+Configuration files are formatted using YAML syntax. This format is
+implemented by libyaml (included with Spack as an external module),
+and it's easy to read and versatile.
-The config files are structured as trees, like this ``compiler`` section::
+Config files are structured as trees, like this ``compiler`` section::
compilers:
chaos_5_x86_64_ib:
@@ -67,274 +67,574 @@ In this example, entries like ''compilers'' and ''xlc@12.1'' are used to
categorize entries beneath them in the tree. At the root of the tree,
entries like ''cc'' and ''cxx'' are specified as name/value pairs.
-Spack returns these trees as nested dicts. The dict for the above example
-would looks like:
-
- { 'compilers' :
- { 'chaos_5_x86_64_ib' :
- { 'gcc@4.4.7' :
- { 'cc' : '/usr/bin/gcc',
- 'cxx' : '/usr/bin/g++'
- 'f77' : '/usr/bin/gfortran'
- 'fc' : '/usr/bin/gfortran' }
- }
- { 'bgqos_0' :
- { 'cc' : '/usr/local/bin/mpixlc' }
- }
- }
-
-Some routines, like get_mirrors_config and get_compilers_config may strip
-off the top-levels of the tree and return subtrees.
+``config.get_config()`` returns these trees as nested dicts, but it
+strips the first level off. So, ``config.get_config('compilers')``
+would return something like this for the above example:
+
+ { 'chaos_5_x86_64_ib' :
+ { 'gcc@4.4.7' :
+ { 'cc' : '/usr/bin/gcc',
+ 'cxx' : '/usr/bin/g++'
+ 'f77' : '/usr/bin/gfortran'
+ 'fc' : '/usr/bin/gfortran' }
+ }
+ { 'bgqos_0' :
+ { 'cc' : '/usr/local/bin/mpixlc' } }
+
+Likewise, the ``mirrors.yaml`` file's first line must be ``mirrors:``,
+but ``get_config()`` strips that off too.
+
+Precedence
+===============================
+
+``config.py`` routines attempt to recursively merge configuration
+across scopes. So if there are ``compilers.py`` files in both the
+site scope and the user scope, ``get_config('compilers')`` will return
+merged dictionaries of *all* the compilers available. If a user
+compiler conflicts with a site compiler, Spack will overwrite the site
+configuration wtih the user configuration. If both the user and site
+``mirrors.yaml`` files contain lists of mirrors, then ``get_config()``
+will return a concatenated list of mirrors, with the user config items
+first.
+
+Sometimes, it is useful to *completely* override a site setting with a
+user one. To accomplish this, you can use *two* colons at the end of
+a key in a configuration file. For example, this:
+
+ compilers::
+ chaos_5_x86_64_ib:
+ gcc@4.4.7:
+ cc: /usr/bin/gcc
+ cxx: /usr/bin/g++
+ f77: /usr/bin/gfortran
+ fc: /usr/bin/gfortran
+ bgqos_0:
+ xlc@12.1:
+ cc: /usr/local/bin/mpixlc
+ ...
+
+Will make Spack take compilers *only* from the user configuration, and
+the site configuration will be ignored.
+
"""
import os
-import exceptions
+import re
import sys
+import copy
+import jsonschema
+from jsonschema import Draft4Validator, validators
+import yaml
+from yaml.error import MarkedYAMLError
+from ordereddict_backport import OrderedDict
-from external.ordereddict import OrderedDict
-from llnl.util.lang import memoized
-import spack.error
-
-from external import yaml
-from external.yaml.error import MarkedYAMLError
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp
+import copy
+
+import spack
+from spack.error import SpackError
+
+# Hacked yaml for configuration files preserves line numbers.
+import spack.util.spack_yaml as syaml
+
+
+"""Dict from section names -> schema for that section."""
+section_schemas = {
+ 'compilers': {
+ '$schema': 'http://json-schema.org/schema#',
+ 'title': 'Spack compiler configuration file schema',
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'patternProperties': {
+ 'compilers:?': { # optional colon for overriding site config.
+ 'type': 'object',
+ 'default': {},
+ 'additionalProperties': False,
+ 'patternProperties': {
+ r'\w[\w-]*': { # architecture
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'patternProperties': {
+ r'\w[\w-]*@\w[\w-]*': { # compiler spec
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'required': ['cc', 'cxx', 'f77', 'fc'],
+ 'properties': {
+ 'cc': { 'anyOf': [ {'type' : 'string' },
+ {'type' : 'null' }]},
+ 'cxx': { 'anyOf': [ {'type' : 'string' },
+ {'type' : 'null' }]},
+ 'f77': { 'anyOf': [ {'type' : 'string' },
+ {'type' : 'null' }]},
+ 'fc': { 'anyOf': [ {'type' : 'string' },
+ {'type' : 'null' }]},
+ },},},},},},},},
+
+ 'mirrors': {
+ '$schema': 'http://json-schema.org/schema#',
+ 'title': 'Spack mirror configuration file schema',
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'patternProperties': {
+ r'mirrors:?': {
+ 'type': 'object',
+ 'default': {},
+ 'additionalProperties': False,
+ 'patternProperties': {
+ r'\w[\w-]*': {
+ 'type': 'string'},},},},},
+
+ 'repos': {
+ '$schema': 'http://json-schema.org/schema#',
+ 'title': 'Spack repository configuration file schema',
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'patternProperties': {
+ r'repos:?': {
+ 'type': 'array',
+ 'default': [],
+ 'items': {
+ 'type': 'string'},},},},
+ 'packages': {
+ '$schema': 'http://json-schema.org/schema#',
+ 'title': 'Spack package configuration file schema',
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'patternProperties': {
+ r'packages:?': {
+ 'type': 'object',
+ 'default': {},
+ 'additionalProperties': False,
+ 'patternProperties': {
+ r'\w[\w-]*': { # package name
+ 'type': 'object',
+ 'default': {},
+ 'additionalProperties': False,
+ 'properties': {
+ 'version': {
+ 'type' : 'array',
+ 'default' : [],
+ 'items' : { 'anyOf' : [ { 'type' : 'string' },
+ { 'type' : 'number'}]}}, #version strings
+ 'compiler': {
+ 'type' : 'array',
+ 'default' : [],
+ 'items' : { 'type' : 'string' } }, #compiler specs
+ 'buildable': {
+ 'type': 'boolean',
+ 'default': True,
+ },
+ 'providers': {
+ 'type': 'object',
+ 'default': {},
+ 'additionalProperties': False,
+ 'patternProperties': {
+ r'\w[\w-]*': {
+ 'type' : 'array',
+ 'default' : [],
+ 'items' : { 'type' : 'string' },},},},
+ 'paths': {
+ 'type' : 'object',
+ 'default' : {},
+ }
+ },},},},},},
+ 'modules': {
+ '$schema': 'http://json-schema.org/schema#',
+ 'title': 'Spack module file configuration file schema',
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'patternProperties': {
+ r'modules:?': {
+ 'type': 'object',
+ 'default': {},
+ 'additionalProperties': False,
+ 'properties': {
+ 'enable': {
+ 'type': 'array',
+ 'default': [],
+ 'items': {
+ 'type': 'string'
+ }
+ }
+ }
+ },
+ },
+ },
+}
+
+"""OrderedDict of config scopes keyed by name.
+ Later scopes will override earlier scopes.
+"""
+config_scopes = OrderedDict()
+
+
+def validate_section_name(section):
+ """Raise a ValueError if the section is not a valid section."""
+ if section not in section_schemas:
+ raise ValueError("Invalid config section: '%s'. Options are %s"
+ % (section, section_schemas))
+
+
+def extend_with_default(validator_class):
+ """Add support for the 'default' attribute for properties and patternProperties.
+
+ jsonschema does not handle this out of the box -- it only
+ validates. This allows us to set default values for configs
+ where certain fields are `None` b/c they're deleted or
+ commented out.
+
+ """
+ validate_properties = validator_class.VALIDATORS["properties"]
+ validate_pattern_properties = validator_class.VALIDATORS["patternProperties"]
+
+ def set_defaults(validator, properties, instance, schema):
+ for property, subschema in properties.iteritems():
+ if "default" in subschema:
+ instance.setdefault(property, subschema["default"])
+ for err in validate_properties(validator, properties, instance, schema):
+ yield err
+
+ def set_pp_defaults(validator, properties, instance, schema):
+ for property, subschema in properties.iteritems():
+ if "default" in subschema:
+ if isinstance(instance, dict):
+ for key, val in instance.iteritems():
+ if re.match(property, key) and val is None:
+ instance[key] = subschema["default"]
+
+ for err in validate_pattern_properties(validator, properties, instance, schema):
+ yield err
+
+ return validators.extend(validator_class, {
+ "properties" : set_defaults,
+ "patternProperties" : set_pp_defaults
+ })
+
-_config_sections = {}
-class _ConfigCategory:
- name = None
- filename = None
- merge = True
- def __init__(self, n, f, m):
- self.name = n
- self.filename = f
- self.merge = m
- self.files_read_from = []
- self.result_dict = {}
- _config_sections[n] = self
-
-_ConfigCategory('compilers', 'compilers.yaml', True)
-_ConfigCategory('mirrors', 'mirrors.yaml', True)
-_ConfigCategory('view', 'views.yaml', True)
-_ConfigCategory('order', 'orders.yaml', True)
-
-"""Names of scopes and their corresponding configuration files."""
-config_scopes = [('site', os.path.join(spack.etc_path, 'spack')),
- ('user', os.path.expanduser('~/.spack'))]
-
-_compiler_by_arch = {}
-_read_config_file_result = {}
-def _read_config_file(filename):
- """Read a given YAML configuration file"""
- global _read_config_file_result
- if filename in _read_config_file_result:
- return _read_config_file_result[filename]
+DefaultSettingValidator = extend_with_default(Draft4Validator)
+def validate_section(data, schema):
+ """Validate data read in from a Spack YAML file.
+
+ This leverages the line information (start_mark, end_mark) stored
+ on Spack YAML structures.
+
+ """
try:
- with open(filename) as f:
- ydict = yaml.load(f)
- except MarkedYAMLError, e:
- tty.die("Error parsing yaml%s: %s" % (str(e.context_mark), e.problem))
- except exceptions.IOError, e:
- _read_config_file_result[filename] = None
+ DefaultSettingValidator(schema).validate(data)
+ except jsonschema.ValidationError as e:
+ raise ConfigFormatError(e, data)
+
+
+class ConfigScope(object):
+ """This class represents a configuration scope.
+
+ A scope is one directory containing named configuration files.
+ Each file is a config "section" (e.g., mirrors, compilers, etc).
+ """
+
+ def __init__(self, name, path):
+ self.name = name # scope name.
+ self.path = path # path to directory containing configs.
+ self.sections = {} # sections read from config files.
+
+ # Register in a dict of all ConfigScopes
+ # TODO: make this cleaner. Mocking up for testing is brittle.
+ global config_scopes
+ config_scopes[name] = self
+
+ def get_section_filename(self, section):
+ validate_section_name(section)
+ return os.path.join(self.path, "%s.yaml" % section)
+
+
+ def get_section(self, section):
+ if not section in self.sections:
+ path = self.get_section_filename(section)
+ schema = section_schemas[section]
+ data = _read_config_file(path, schema)
+ self.sections[section] = data
+ return self.sections[section]
+
+
+ def write_section(self, section):
+ filename = self.get_section_filename(section)
+ data = self.get_section(section)
+ try:
+ mkdirp(self.path)
+ with open(filename, 'w') as f:
+ validate_section(data, section_schemas[section])
+ syaml.dump(data, stream=f, default_flow_style=False)
+ except jsonschema.ValidationError as e:
+ raise ConfigSanityError(e, data)
+ except (yaml.YAMLError, IOError) as e:
+ raise ConfigFileError("Error writing to config file: '%s'" % str(e))
+
+
+ def clear(self):
+ """Empty cached config information."""
+ self.sections = {}
+
+
+ConfigScope('site', os.path.join(spack.etc_path, 'spack')),
+ConfigScope('user', os.path.expanduser('~/.spack'))
+
+
+def highest_precedence_scope():
+ """Get the scope with highest precedence (prefs will override others)."""
+ return config_scopes.values()[-1]
+
+
+def validate_scope(scope):
+ """Ensure that scope is valid, and return a valid scope if it is None.
+
+ This should be used by routines in ``config.py`` to validate
+ scope name arguments, and to determine a default scope where no
+ scope is specified.
+
+ """
+ if scope is None:
+ # default to the scope with highest precedence.
+ return highest_precedence_scope()
+
+ elif scope in config_scopes:
+ return config_scopes[scope]
+
+ else:
+ raise ValueError("Invalid config scope: '%s'. Must be one of %s"
+ % (scope, config_scopes.keys()))
+
+
+def _read_config_file(filename, schema):
+ """Read a YAML configuration file."""
+ # Ignore nonexisting files.
+ if not os.path.exists(filename):
return None
- _read_config_file_result[filename] = ydict
- return ydict
+
+ elif not os.path.isfile(filename):
+ raise ConfigFileError(
+ "Invlaid configuration. %s exists but is not a file." % filename)
+
+ elif not os.access(filename, os.R_OK):
+ raise ConfigFileError("Config file is not readable: %s" % filename)
+
+ try:
+ tty.debug("Reading config file %s" % filename)
+ with open(filename) as f:
+ data = syaml.load(f)
+
+ if data:
+ validate_section(data, schema)
+ return data
+
+ except MarkedYAMLError as e:
+ raise ConfigFileError(
+ "Error parsing yaml%s: %s" % (str(e.context_mark), e.problem))
+
+ except IOError as e:
+ raise ConfigFileError(
+ "Error reading configuration file %s: %s" % (filename, str(e)))
def clear_config_caches():
"""Clears the caches for configuration files, which will cause them
to be re-read upon the next request"""
- for key,s in _config_sections.iteritems():
- s.files_read_from = []
- s.result_dict = {}
- spack.config._read_config_file_result = {}
- spack.config._compiler_by_arch = {}
- spack.compilers._cached_default_compiler = None
-
-
-def _merge_dicts(d1, d2):
- """Recursively merges two configuration trees, with entries
- in d2 taking precedence over d1"""
- if not d1:
- return d2.copy()
- if not d2:
- return d1
-
- for key2, val2 in d2.iteritems():
- if not key2 in d1:
- d1[key2] = val2
- continue
- val1 = d1[key2]
- if isinstance(val1, dict) and isinstance(val2, dict):
- d1[key2] = _merge_dicts(val1, val2)
- continue
- if isinstance(val1, list) and isinstance(val2, list):
- val1.extend(val2)
- seen = set()
- d1[key2] = [ x for x in val1 if not (x in seen or seen.add(x)) ]
- continue
- d1[key2] = val2
- return d1
-
-
-def get_config(category_name):
- """Get the confguration tree for the names category. Strips off the
- top-level category entry from the dict"""
- global config_scopes
- category = _config_sections[category_name]
- if category.result_dict:
- return category.result_dict
-
- category.result_dict = {}
- for scope, scope_path in config_scopes:
- path = os.path.join(scope_path, category.filename)
- result = _read_config_file(path)
- if not result:
- continue
- if not category_name in result:
- continue
- category.files_read_from.insert(0, path)
- result = result[category_name]
- if category.merge:
- category.result_dict = _merge_dicts(category.result_dict, result)
- else:
- category.result_dict = result
- return category.result_dict
-
-
-def get_compilers_config(arch=None):
- """Get the compiler configuration from config files for the given
- architecture. Strips off the architecture component of the
- configuration"""
- global _compiler_by_arch
- if not arch:
- arch = spack.architecture.sys_type()
- if arch in _compiler_by_arch:
- return _compiler_by_arch[arch]
-
- cc_config = get_config('compilers')
- if arch in cc_config and 'all' in cc_config:
- arch_compiler = dict(cc_config[arch])
- _compiler_by_arch[arch] = _merge_dict(arch_compiler, cc_config['all'])
- elif arch in cc_config:
- _compiler_by_arch[arch] = cc_config[arch]
- elif 'all' in cc_config:
- _compiler_by_arch[arch] = cc_config['all']
+ for scope in config_scopes.values():
+ scope.clear()
+
+
+def _merge_yaml(dest, source):
+ """Merges source into dest; entries in source take precedence over dest.
+
+ This routine may modify dest and should be assigned to dest, in
+ case dest was None to begin with, e.g.:
+
+ dest = _merge_yaml(dest, source)
+
+ Config file authors can optionally end any attribute in a dict
+ with `::` instead of `:`, and the key will override that of the
+ parent instead of merging.
+
+ """
+ def they_are(t):
+ return isinstance(dest, t) and isinstance(source, t)
+
+ # If both are None, handle specially and return None.
+ if source is None and dest is None:
+ return None
+
+ # If source is None, overwrite with source.
+ elif source is None:
+ return None
+
+ # Source list is prepended (for precedence)
+ if they_are(list):
+ seen = set(source)
+ dest[:] = source + [x for x in dest if x not in seen]
+ return dest
+
+ # Source dict is merged into dest.
+ elif they_are(dict):
+ for sk, sv in source.iteritems():
+ if not sk in dest:
+ dest[sk] = copy.copy(sv)
+ else:
+ dest[sk] = _merge_yaml(dest[sk], source[sk])
+ return dest
+
+ # In any other case, overwrite with a copy of the source value.
else:
- _compiler_by_arch[arch] = {}
- return _compiler_by_arch[arch]
-
-
-def get_mirror_config():
- """Get the mirror configuration from config files"""
- return get_config('mirrors')
-
-
-def get_config_scope_dirname(scope):
- """For a scope return the config directory"""
- global config_scopes
- for s,p in config_scopes:
- if s == scope:
- return p
- tty.die("Unknown scope %s. Valid options are %s" %
- (scope, ", ".join([s for s,p in config_scopes])))
-
-
-def get_config_scope_filename(scope, category_name):
- """For some scope and category, get the name of the configuration file"""
- if not category_name in _config_sections:
- tty.die("Unknown config category %s. Valid options are: %s" %
- (category_name, ", ".join([s for s in _config_sections])))
- return os.path.join(get_config_scope_dirname(scope), _config_sections[category_name].filename)
-
-
-def add_to_config(category_name, addition_dict, scope=None):
- """Merge a new dict into a configuration tree and write the new
- configuration to disk"""
- global _read_config_file_result
- get_config(category_name)
- category = _config_sections[category_name]
-
- #If scope is specified, use it. Otherwise use the last config scope that
- #we successfully parsed data from.
- file = None
- path = None
- if not scope and not category.files_read_from:
- scope = 'user'
- if scope:
- try:
- dir = get_config_scope_dirname(scope)
- if not os.path.exists(dir):
- mkdirp(dir)
- path = os.path.join(dir, category.filename)
- file = open(path, 'w')
- except exceptions.IOError, e:
- pass
+ return copy.copy(source)
+
+
+def get_config(section, scope=None):
+ """Get configuration settings for a section.
+
+ Strips off the top-level section name from the YAML dict.
+ """
+ validate_section_name(section)
+ merged_section = syaml.syaml_dict()
+
+ if scope is None:
+ scopes = config_scopes.values()
else:
- for p in category.files_read_from:
- try:
- file = open(p, 'w')
- except exceptions.IOError, e:
- pass
- if file:
- path = p
- break;
- if not file:
- tty.die('Unable to write to config file %s' % path)
-
- #Merge the new information into the existing file info, then write to disk
- new_dict = _read_config_file_result[path]
- if new_dict and category_name in new_dict:
- new_dict = new_dict[category_name]
- new_dict = _merge_dicts(new_dict, addition_dict)
- new_dict = { category_name : new_dict }
- _read_config_file_result[path] = new_dict
- yaml.dump(new_dict, stream=file, default_flow_style=False)
- file.close()
-
- #Merge the new information into the cached results
- category.result_dict = _merge_dicts(category.result_dict, addition_dict)
-
-
-def add_to_mirror_config(addition_dict, scope=None):
- """Add mirrors to the configuration files"""
- add_to_config('mirrors', addition_dict, scope)
-
-
-def add_to_compiler_config(addition_dict, scope=None, arch=None):
- """Add compilerss to the configuration files"""
- if not arch:
- arch = spack.architecture.sys_type()
- add_to_config('compilers', { arch : addition_dict }, scope)
- clear_config_caches()
-
-
-def remove_from_config(category_name, key_to_rm, scope=None):
- """Remove a configuration key and write a new configuration to disk"""
- global config_scopes
- get_config(category_name)
- scopes_to_rm_from = [scope] if scope else [s for s,p in config_scopes]
- category = _config_sections[category_name]
-
- rmd_something = False
- for s in scopes_to_rm_from:
- path = get_config_scope_filename(scope, category_name)
- result = _read_config_file(path)
- if not result:
+ scopes = [validate_scope(scope)]
+
+ for scope in scopes:
+ # read potentially cached data from the scope.
+ data = scope.get_section(section)
+
+ # Skip empty configs
+ if not data or not isinstance(data, dict):
continue
- if not key_to_rm in result[category_name]:
+
+ # Allow complete override of site config with '<section>::'
+ override_key = section + ':'
+ if not (section in data or override_key in data):
+ tty.warn("Skipping bad configuration file: '%s'" % scope.path)
continue
- with open(path, 'w') as f:
- result[category_name].pop(key_to_rm, None)
- yaml.dump(result, stream=f, default_flow_style=False)
- category.result_dict.pop(key_to_rm, None)
- rmd_something = True
- return rmd_something
-
-
-"""Print a configuration to stdout"""
-def print_category(category_name):
- if not category_name in _config_sections:
- tty.die("Unknown config category %s. Valid options are: %s" %
- (category_name, ", ".join([s for s in _config_sections])))
- yaml.dump(get_config(category_name), stream=sys.stdout, default_flow_style=False)
+
+ if override_key in data:
+ merged_section = data[override_key]
+ else:
+ merged_section = _merge_yaml(merged_section, data[section])
+
+ return merged_section
+
+
+def get_config_filename(scope, section):
+ """For some scope and section, get the name of the configuration file"""
+ scope = validate_scope(scope)
+ return scope.get_section_filename(section)
+
+
+def update_config(section, update_data, scope=None):
+ """Update the configuration file for a particular scope.
+
+ Overwrites contents of a section in a scope with update_data,
+ then writes out the config file.
+
+ update_data should have the top-level section name stripped off
+ (it will be re-added). Data itself can be a list, dict, or any
+ other yaml-ish structure.
+
+ """
+ # read in the config to ensure we've got current data
+ get_config(section)
+
+ validate_section_name(section) # validate section name
+ scope = validate_scope(scope) # get ConfigScope object from string.
+
+ # read only the requested section's data.
+ scope.sections[section] = { section : update_data }
+ scope.write_section(section)
+
+
+def print_section(section):
+ """Print a configuration to stdout."""
+ try:
+ data = syaml.syaml_dict()
+ data[section] = get_config(section)
+ syaml.dump(data, stream=sys.stdout, default_flow_style=False)
+ except (yaml.YAMLError, IOError) as e:
+ raise ConfigError("Error reading configuration: %s" % section)
+
+
+def spec_externals(spec):
+ """Return a list of external specs (with external directory path filled in),
+ one for each known external installation."""
+ allpkgs = get_config('packages')
+ name = spec.name
+
+ external_specs = []
+ pkg_paths = allpkgs.get(name, {}).get('paths', None)
+ if not pkg_paths:
+ return []
+
+ for external_spec, path in pkg_paths.iteritems():
+ if not path:
+ # skip entries without paths (avoid creating extra Specs)
+ continue
+
+ external_spec = spack.spec.Spec(external_spec, external=path)
+ if external_spec.satisfies(spec):
+ external_specs.append(external_spec)
+ return external_specs
+
+
+def is_spec_buildable(spec):
+ """Return true if the spec pkgspec is configured as buildable"""
+ allpkgs = get_config('packages')
+ name = spec.name
+ if not spec.name in allpkgs:
+ return True
+ if not 'buildable' in allpkgs[spec.name]:
+ return True
+ return allpkgs[spec.name]['buildable']
+
+
+class ConfigError(SpackError): pass
+class ConfigFileError(ConfigError): pass
+
+def get_path(path, data):
+ if path:
+ return get_path(path[1:], data[path[0]])
+ else:
+ return data
+
+class ConfigFormatError(ConfigError):
+ """Raised when a configuration format does not match its schema."""
+ def __init__(self, validation_error, data):
+ # Try to get line number from erroneous instance and its parent
+ instance_mark = getattr(validation_error.instance, '_start_mark', None)
+ parent_mark = getattr(validation_error.parent, '_start_mark', None)
+ path = [str(s) for s in getattr(validation_error, 'path', None)]
+
+ # Try really hard to get the parent (which sometimes is not
+ # set) This digs it out of the validated structure if it's not
+ # on the validation_error.
+ if path and not parent_mark:
+ parent_path = list(path)[:-1]
+ parent = get_path(parent_path, data)
+ if path[-1] in parent:
+ if isinstance(parent, dict):
+ keylist = parent.keys()
+ elif isinstance(parent, list):
+ keylist = parent
+ idx = keylist.index(path[-1])
+ parent_mark = getattr(keylist[idx], '_start_mark', None)
+
+ if instance_mark:
+ location = '%s:%d' % (instance_mark.name, instance_mark.line + 1)
+ elif parent_mark:
+ location = '%s:%d' % (parent_mark.name, parent_mark.line + 1)
+ elif path:
+ location = 'At ' + ':'.join(path)
+ else:
+ location = '<unknown line>'
+
+ message = '%s: %s' % (location, validation_error.message)
+ super(ConfigError, self).__init__(message)
+
+class ConfigSanityError(ConfigFormatError):
+ """Same as ConfigFormatError, raised when config is written by Spack."""
diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py
index e0c14a0455..089d29325e 100644
--- a/lib/spack/spack/database.py
+++ b/lib/spack/spack/database.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -43,8 +43,8 @@ import os
import time
import socket
-from external import yaml
-from external.yaml.error import MarkedYAMLError, YAMLError
+import yaml
+from yaml.error import MarkedYAMLError, YAMLError
import llnl.util.tty as tty
from llnl.util.filesystem import *
@@ -54,6 +54,7 @@ import spack.spec
from spack.version import Version
from spack.spec import Spec
from spack.error import SpackError
+from spack.repository import UnknownPackageError
# DB goes in this directory underneath the root
_db_dirname = '.spack-db'
@@ -211,6 +212,9 @@ class Database(object):
child = self._read_spec_from_yaml(dep_hash, installs, hash_key)
spec._add_dependency(child)
+ # Specs from the database need to be marked concrete because
+ # they represent actual installations.
+ spec._mark_concrete()
return spec
@@ -326,7 +330,7 @@ class Database(object):
found = rec.ref_count
if not expected == found:
raise AssertionError(
- "Invalid ref_count: %s: %d (expected %d), in DB %s."
+ "Invalid ref_count: %s: %d (expected %d), in DB %s"
% (key, found, expected, self._index_path))
@@ -485,7 +489,7 @@ class Database(object):
1. Marks the spec as not installed.
2. Removes the spec if it has no more dependents.
3. If removed, recursively updates dependencies' ref counts
- and remvoes them if they are no longer needed.
+ and removes them if they are no longer needed.
"""
# Take a lock around the entire removal.
@@ -549,7 +553,7 @@ class Database(object):
for key, rec in self._data.items():
if installed is not any and rec.installed != installed:
continue
- if known is not any and spack.db.exists(rec.spec.name) != known:
+ if known is not any and spack.repo.exists(rec.spec.name) != known:
continue
if query_spec is any or rec.spec.satisfies(query_spec):
results.append(rec.spec)
diff --git a/lib/spack/spack/directives.py b/lib/spack/spack/directives.py
index 78039ac6f9..61cd303012 100644
--- a/lib/spack/spack/directives.py
+++ b/lib/spack/spack/directives.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -42,15 +42,19 @@ The available directives are:
* ``extends``
* ``patch``
* ``variant``
+ * ``resource``
"""
-__all__ = [ 'depends_on', 'extends', 'provides', 'patch', 'version',
- 'variant' ]
+__all__ = ['depends_on', 'extends', 'provides', 'patch', 'version',
+ 'variant', 'resource']
import re
import inspect
+import os.path
+import functools
from llnl.util.lang import *
+from llnl.util.filesystem import join_path
import spack
import spack.spec
@@ -60,7 +64,8 @@ from spack.version import Version
from spack.patch import Patch
from spack.variant import Variant
from spack.spec import Spec, parse_anonymous_spec
-
+from spack.resource import Resource
+from spack.fetch_strategy import from_kwargs
#
# This is a list of all directives, built up as they are defined in
@@ -79,8 +84,8 @@ class directive(object):
"""Decorator for Spack directives.
Spack directives allow you to modify a package while it is being
- defined, e.g. to add version or depenency information. Directives
- are one of the key pieces of Spack's package "langauge", which is
+ defined, e.g. to add version or dependency information. Directives
+ are one of the key pieces of Spack's package "language", which is
embedded in python.
Here's an example directive:
@@ -120,7 +125,7 @@ class directive(object):
dicts = (dicts,)
elif type(dicts) not in (list, tuple):
raise TypeError(
- "dicts arg must be list, tuple, or string. Found %s."
+ "dicts arg must be list, tuple, or string. Found %s"
% type(dicts))
self.dicts = dicts
@@ -141,6 +146,7 @@ class directive(object):
def __call__(self, directive_function):
directives[directive_function.__name__] = self
+ @functools.wraps(directive_function)
def wrapped(*args, **kwargs):
pkg = DictWrapper(caller_locals())
self.ensure_dicts(pkg)
@@ -168,7 +174,11 @@ def version(pkg, ver, checksum=None, **kwargs):
def _depends_on(pkg, spec, when=None):
- if when is None:
+ # If when is False do nothing
+ if when is False:
+ return
+ # If when is None or True make sure the condition is always satisfied
+ if when is None or when is True:
when = pkg.name
when_spec = parse_anonymous_spec(when, pkg.name)
@@ -238,11 +248,10 @@ def patch(pkg, url_or_filename, level=1, when=None):
if when is None:
when = pkg.name
when_spec = parse_anonymous_spec(when, pkg.name)
-
cur_patches = pkg.patches.setdefault(when_spec, [])
# if this spec is identical to some other, then append this
# patch to the existing list.
- cur_patches.append(Patch(pkg.name, url_or_filename, level))
+ cur_patches.append(Patch(pkg, url_or_filename, level))
@directive('variants')
@@ -259,6 +268,43 @@ def variant(pkg, name, default=False, description=""):
pkg.variants[name] = Variant(default, description)
+@directive('resources')
+def resource(pkg, **kwargs):
+ """
+ Define an external resource to be fetched and staged when building the package. Based on the keywords present in the
+ dictionary the appropriate FetchStrategy will be used for the resource. Resources are fetched and staged in their
+ own folder inside spack stage area, and then linked into the stage area of the package that needs them.
+
+ List of recognized keywords:
+
+ * 'when' : (optional) represents the condition upon which the resource is needed
+ * 'destination' : (optional) path where to link the resource. This path must be relative to the main package stage
+ area.
+ * 'placement' : (optional) gives the possibility to fine tune how the resource is linked into the main package stage
+ area.
+ """
+ when = kwargs.get('when', pkg.name)
+ destination = kwargs.get('destination', "")
+ placement = kwargs.get('placement', None)
+ # Check if the path is relative
+ if os.path.isabs(destination):
+ message = "The destination keyword of a resource directive can't be an absolute path.\n"
+ message += "\tdestination : '{dest}\n'".format(dest=destination)
+ raise RuntimeError(message)
+ # Check if the path falls within the main package stage area
+ test_path = 'stage_folder_root/'
+ normalized_destination = os.path.normpath(join_path(test_path, destination)) # Normalized absolute path
+ if test_path not in normalized_destination:
+ message = "The destination folder of a resource must fall within the main package stage directory.\n"
+ message += "\tdestination : '{dest}'\n".format(dest=destination)
+ raise RuntimeError(message)
+ when_spec = parse_anonymous_spec(when, pkg.name)
+ resources = pkg.resources.setdefault(when_spec, [])
+ name = kwargs.get('name')
+ fetcher = from_kwargs(**kwargs)
+ resources.append(Resource(name, fetcher, destination, placement))
+
+
class DirectiveError(spack.error.SpackError):
"""This is raised when something is wrong with a package directive."""
def __init__(self, directive, message):
@@ -271,5 +317,5 @@ class CircularReferenceError(DirectiveError):
def __init__(self, directive, package):
super(CircularReferenceError, self).__init__(
directive,
- "Package '%s' cannot pass itself to %s." % (package, directive))
+ "Package '%s' cannot pass itself to %s" % (package, directive))
self.package = package
diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py
index da8f4187cc..da8f1aa1bc 100644
--- a/lib/spack/spack/directory_layout.py
+++ b/lib/spack/spack/directory_layout.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -29,7 +29,7 @@ import hashlib
import shutil
import glob
import tempfile
-from external import yaml
+import yaml
import llnl.util.tty as tty
from llnl.util.filesystem import join_path, mkdirp
@@ -85,6 +85,16 @@ class DirectoryLayout(object):
raise NotImplementedError()
+ def check_installed(self, spec):
+ """Checks whether a spec is installed.
+
+ Return the spec's prefix, if it is installed, None otherwise.
+
+ Raise an exception if the install is inconsistent or corrupt.
+ """
+ raise NotImplementedError()
+
+
def extension_map(self, spec):
"""Get a dict of currently installed extension packages for a spec.
@@ -140,7 +150,7 @@ class DirectoryLayout(object):
if os.path.exists(path):
try:
shutil.rmtree(path)
- except exceptions.OSError, e:
+ except exceptions.OSError as e:
raise RemoveFailedError(spec, path, e)
path = os.path.dirname(path)
@@ -173,7 +183,9 @@ class YamlDirectoryLayout(DirectoryLayout):
self.spec_file_name = 'spec.yaml'
self.extension_file_name = 'extensions.yaml'
- self.build_log_name = 'build.out' # TODO: use config file.
+ self.build_log_name = 'build.out' # build log.
+ self.build_env_name = 'build.env' # build environment
+ self.packages_dir = 'repos' # archive of package.py files
# Cache of already written/read extension maps.
self._extension_maps = {}
@@ -186,6 +198,10 @@ class YamlDirectoryLayout(DirectoryLayout):
def relative_path_for_spec(self, spec):
_check_concrete(spec)
+
+ if spec.external:
+ return spec.external
+
dir_name = "%s-%s-%s" % (
spec.name,
spec.version,
@@ -212,8 +228,7 @@ class YamlDirectoryLayout(DirectoryLayout):
spec = Spec.from_yaml(f)
# Specs read from actual installations are always concrete
- spec._normal = True
- spec._concrete = True
+ spec._mark_concrete()
return spec
@@ -232,29 +247,49 @@ class YamlDirectoryLayout(DirectoryLayout):
self.build_log_name)
+ def build_env_path(self, spec):
+ return join_path(self.path_for_spec(spec), self.metadata_dir,
+ self.build_env_name)
+
+
+ def build_packages_path(self, spec):
+ return join_path(self.path_for_spec(spec), self.metadata_dir,
+ self.packages_dir)
+
+
def create_install_directory(self, spec):
_check_concrete(spec)
+ prefix = self.check_installed(spec)
+ if prefix:
+ raise InstallDirectoryAlreadyExistsError(prefix)
+
+ mkdirp(self.metadata_path(spec))
+ self.write_spec(spec, self.spec_file_path(spec))
+
+
+ def check_installed(self, spec):
+ _check_concrete(spec)
path = self.path_for_spec(spec)
spec_file_path = self.spec_file_path(spec)
- if os.path.isdir(path):
- if not os.path.isfile(spec_file_path):
- raise InconsistentInstallDirectoryError(
- 'No spec file found at path %s' % spec_file_path)
+ if not os.path.isdir(path):
+ return None
- installed_spec = self.read_spec(spec_file_path)
- if installed_spec == self.spec:
- raise InstallDirectoryAlreadyExistsError(path)
+ if not os.path.isfile(spec_file_path):
+ raise InconsistentInstallDirectoryError(
+ 'Inconsistent state: install prefix exists but contains no spec.yaml:',
+ " " + path)
- if spec.dag_hash() == installed_spec.dag_hash():
- raise SpecHashCollisionError(installed_hash, spec_hash)
- else:
- raise InconsistentInstallDirectoryError(
- 'Spec file in %s does not match hash!' % spec_file_path)
+ installed_spec = self.read_spec(spec_file_path)
+ if installed_spec == spec:
+ return path
- mkdirp(self.metadata_path(spec))
- self.write_spec(spec, spec_file_path)
+ if spec.dag_hash() == installed_spec.dag_hash():
+ raise SpecHashCollisionError(installed_hash, spec_hash)
+ else:
+ raise InconsistentInstallDirectoryError(
+ 'Spec file in %s does not match hash!' % spec_file_path)
def all_specs(self):
@@ -324,7 +359,7 @@ class YamlDirectoryLayout(DirectoryLayout):
if not dag_hash in by_hash:
raise InvalidExtensionSpecError(
- "Spec %s not found in %s." % (dag_hash, prefix))
+ "Spec %s not found in %s" % (dag_hash, prefix))
ext_spec = by_hash[dag_hash]
if not prefix == ext_spec.prefix:
@@ -388,8 +423,8 @@ class YamlDirectoryLayout(DirectoryLayout):
class DirectoryLayoutError(SpackError):
"""Superclass for directory layout errors."""
- def __init__(self, message):
- super(DirectoryLayoutError, self).__init__(message)
+ def __init__(self, message, long_msg=None):
+ super(DirectoryLayoutError, self).__init__(message, long_msg)
class SpecHashCollisionError(DirectoryLayoutError):
@@ -411,8 +446,8 @@ class RemoveFailedError(DirectoryLayoutError):
class InconsistentInstallDirectoryError(DirectoryLayoutError):
"""Raised when a package seems to be installed to the wrong place."""
- def __init__(self, message):
- super(InconsistentInstallDirectoryError, self).__init__(message)
+ def __init__(self, message, long_msg=None):
+ super(InconsistentInstallDirectoryError, self).__init__(message, long_msg)
class InstallDirectoryAlreadyExistsError(DirectoryLayoutError):
@@ -439,7 +474,7 @@ class ExtensionConflictError(DirectoryLayoutError):
"""Raised when an extension is added to a package that already has it."""
def __init__(self, spec, ext_spec, conflict):
super(ExtensionConflictError, self).__init__(
- "%s cannot be installed in %s because it conflicts with %s."% (
+ "%s cannot be installed in %s because it conflicts with %s"% (
ext_spec.short_spec, spec.short_spec, conflict.short_spec))
diff --git a/lib/spack/spack/environment.py b/lib/spack/spack/environment.py
new file mode 100644
index 0000000000..72aafa4e2d
--- /dev/null
+++ b/lib/spack/spack/environment.py
@@ -0,0 +1,252 @@
+import os
+import os.path
+import collections
+import inspect
+
+
+class NameModifier(object):
+ def __init__(self, name, **kwargs):
+ self.name = name
+ self.args = {'name': name}
+ self.args.update(kwargs)
+
+
+class NameValueModifier(object):
+ def __init__(self, name, value, **kwargs):
+ self.name = name
+ self.value = value
+ self.args = {'name': name, 'value': value}
+ self.args.update(kwargs)
+
+
+class SetEnv(NameValueModifier):
+ def execute(self):
+ os.environ[self.name] = str(self.value)
+
+
+class UnsetEnv(NameModifier):
+ def execute(self):
+ os.environ.pop(self.name, None) # Avoid throwing if the variable was not set
+
+
+class SetPath(NameValueModifier):
+ def execute(self):
+ string_path = concatenate_paths(self.value)
+ os.environ[self.name] = string_path
+
+
+class AppendPath(NameValueModifier):
+ def execute(self):
+ environment_value = os.environ.get(self.name, '')
+ directories = environment_value.split(':') if environment_value else []
+ directories.append(os.path.normpath(self.value))
+ os.environ[self.name] = ':'.join(directories)
+
+
+class PrependPath(NameValueModifier):
+ def execute(self):
+ environment_value = os.environ.get(self.name, '')
+ directories = environment_value.split(':') if environment_value else []
+ directories = [os.path.normpath(self.value)] + directories
+ os.environ[self.name] = ':'.join(directories)
+
+
+class RemovePath(NameValueModifier):
+ def execute(self):
+ environment_value = os.environ.get(self.name, '')
+ directories = environment_value.split(':') if environment_value else []
+ directories = [os.path.normpath(x) for x in directories if x != os.path.normpath(self.value)]
+ os.environ[self.name] = ':'.join(directories)
+
+
+class EnvironmentModifications(object):
+ """
+ Keeps track of requests to modify the current environment.
+
+ Each call to a method to modify the environment stores the extra information on the caller in the request:
+ - 'filename' : filename of the module where the caller is defined
+ - 'lineno': line number where the request occurred
+ - 'context' : line of code that issued the request that failed
+ """
+
+ def __init__(self, other=None):
+ """
+ Initializes a new instance, copying commands from other if it is not None
+
+ Args:
+ other: another instance of EnvironmentModifications from which (optional)
+ """
+ self.env_modifications = []
+ if other is not None:
+ self.extend(other)
+
+ def __iter__(self):
+ return iter(self.env_modifications)
+
+ def __len__(self):
+ return len(self.env_modifications)
+
+ def extend(self, other):
+ self._check_other(other)
+ self.env_modifications.extend(other.env_modifications)
+
+ @staticmethod
+ def _check_other(other):
+ if not isinstance(other, EnvironmentModifications):
+ raise TypeError('other must be an instance of EnvironmentModifications')
+
+ def _get_outside_caller_attributes(self):
+ stack = inspect.stack()
+ try:
+ _, filename, lineno, _, context, index = stack[2]
+ context = context[index].strip()
+ except Exception:
+ filename, lineno, context = 'unknown file', 'unknown line', 'unknown context'
+ args = {
+ 'filename': filename,
+ 'lineno': lineno,
+ 'context': context
+ }
+ return args
+
+ def set(self, name, value, **kwargs):
+ """
+ Stores in the current object a request to set an environment variable
+
+ Args:
+ name: name of the environment variable to be set
+ value: value of the environment variable
+ """
+ kwargs.update(self._get_outside_caller_attributes())
+ item = SetEnv(name, value, **kwargs)
+ self.env_modifications.append(item)
+
+ def unset(self, name, **kwargs):
+ """
+ Stores in the current object a request to unset an environment variable
+
+ Args:
+ name: name of the environment variable to be set
+ """
+ kwargs.update(self._get_outside_caller_attributes())
+ item = UnsetEnv(name, **kwargs)
+ self.env_modifications.append(item)
+
+ def set_path(self, name, elts, **kwargs):
+ """
+ Stores a request to set a path generated from a list.
+
+ Args:
+ name: name o the environment variable to be set.
+ elts: elements of the path to set.
+ """
+ kwargs.update(self._get_outside_caller_attributes())
+ item = SetPath(name, elts, **kwargs)
+ self.env_modifications.append(item)
+
+ def append_path(self, name, path, **kwargs):
+ """
+ Stores in the current object a request to append a path to a path list
+
+ Args:
+ name: name of the path list in the environment
+ path: path to be appended
+ """
+ kwargs.update(self._get_outside_caller_attributes())
+ item = AppendPath(name, path, **kwargs)
+ self.env_modifications.append(item)
+
+ def prepend_path(self, name, path, **kwargs):
+ """
+ Same as `append_path`, but the path is pre-pended
+
+ Args:
+ name: name of the path list in the environment
+ path: path to be pre-pended
+ """
+ kwargs.update(self._get_outside_caller_attributes())
+ item = PrependPath(name, path, **kwargs)
+ self.env_modifications.append(item)
+
+ def remove_path(self, name, path, **kwargs):
+ """
+ Stores in the current object a request to remove a path from a path list
+
+ Args:
+ name: name of the path list in the environment
+ path: path to be removed
+ """
+ kwargs.update(self._get_outside_caller_attributes())
+ item = RemovePath(name, path, **kwargs)
+ self.env_modifications.append(item)
+
+ def group_by_name(self):
+ """
+ Returns a dict of the modifications grouped by variable name
+
+ Returns:
+ dict mapping the environment variable name to the modifications to be done on it
+ """
+ modifications = collections.defaultdict(list)
+ for item in self:
+ modifications[item.name].append(item)
+ return modifications
+
+ def clear(self):
+ """
+ Clears the current list of modifications
+ """
+ self.env_modifications.clear()
+
+ def apply_modifications(self):
+ """
+ Applies the modifications and clears the list
+ """
+ modifications = self.group_by_name()
+ # Apply the modifications to the environment variables one variable at a time
+ for name, actions in sorted(modifications.items()):
+ for x in actions:
+ x.execute()
+
+
+def concatenate_paths(paths):
+ """
+ Concatenates an iterable of paths into a string of column separated paths
+
+ Args:
+ paths: iterable of paths
+
+ Returns:
+ string
+ """
+ return ':'.join(str(item) for item in paths)
+
+
+def set_or_unset_not_first(variable, changes, errstream):
+ """
+ Check if we are going to set or unset something after other modifications have already been requested
+ """
+ indexes = [ii for ii, item in enumerate(changes) if ii != 0 and type(item) in [SetEnv, UnsetEnv]]
+ if indexes:
+ good = '\t \t{context} at {filename}:{lineno}'
+ nogood = '\t--->\t{context} at {filename}:{lineno}'
+ errstream('Suspicious requests to set or unset the variable \'{var}\' found'.format(var=variable))
+ for ii, item in enumerate(changes):
+ print_format = nogood if ii in indexes else good
+ errstream(print_format.format(**item.args))
+
+
+def validate(env, errstream):
+ """
+ Validates the environment modifications to check for the presence of suspicious patterns. Prompts a warning for
+ everything that was found
+
+ Current checks:
+ - set or unset variables after other changes on the same variable
+
+ Args:
+ env: list of environment modifications
+ """
+ modifications = env.group_by_name()
+ for variable, list_of_changes in sorted(modifications.items()):
+ set_or_unset_not_first(variable, list_of_changes, errstream)
diff --git a/lib/spack/spack/error.py b/lib/spack/spack/error.py
index b3b24e6105..0c2e7eb53c 100644
--- a/lib/spack/spack/error.py
+++ b/lib/spack/spack/error.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/fetch_strategy.py b/lib/spack/spack/fetch_strategy.py
index b810023c5a..4ea87bea7e 100644
--- a/lib/spack/spack/fetch_strategy.py
+++ b/lib/spack/spack/fetch_strategy.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -44,6 +44,7 @@ import os
import sys
import re
import shutil
+import copy
from functools import wraps
import llnl.util.tty as tty
from llnl.util.filesystem import *
@@ -55,23 +56,28 @@ from spack.util.string import *
from spack.version import Version, ver
from spack.util.compression import decompressor_for, extension
+import spack.util.pattern as pattern
+
"""List of all fetch strategies, created by FetchStrategy metaclass."""
all_strategies = []
+
def _needs_stage(fun):
"""Many methods on fetch strategies require a stage to be set
using set_stage(). This decorator adds a check for self.stage."""
+
@wraps(fun)
def wrapper(self, *args, **kwargs):
if not self.stage:
raise NoStageError(fun)
return fun(self, *args, **kwargs)
+
return wrapper
class FetchStrategy(object):
"""Superclass of all fetch strategies."""
- enabled = False # Non-abstract subclasses should be enabled.
+ enabled = False # Non-abstract subclasses should be enabled.
required_attributes = None # Attributes required in version() args.
class __metaclass__(type):
@@ -80,28 +86,28 @@ class FetchStrategy(object):
type.__init__(cls, name, bases, dict)
if cls.enabled: all_strategies.append(cls)
-
def __init__(self):
# The stage is initialized late, so that fetch strategies can be constructed
# at package construction time. This is where things will be fetched.
self.stage = None
-
def set_stage(self, stage):
"""This is called by Stage before any of the fetching
methods are called on the stage."""
self.stage = stage
-
# Subclasses need to implement these methods
def fetch(self): pass # Return True on success, False on fail.
+
def check(self): pass # Do checksum.
+
def expand(self): pass # Expand archive.
+
def reset(self): pass # Revert to freshly downloaded state.
def archive(self, destination): pass # Used to create tarball for mirror.
- def __str__(self): # Should be human readable URL.
+ def __str__(self): # Should be human readable URL.
return "FetchStrategy.__str___"
# This method is used to match fetch strategies to version()
@@ -111,6 +117,15 @@ class FetchStrategy(object):
return any(k in args for k in cls.required_attributes)
+@pattern.composite(interface=FetchStrategy)
+class FetchStrategyComposite(object):
+ """
+ Composite for a FetchStrategy object. Implements the GoF composite pattern.
+ """
+ matches = FetchStrategy.matches
+ set_stage = FetchStrategy.set_stage
+
+
class URLFetchStrategy(FetchStrategy):
"""FetchStrategy that pulls source code from a URL for an archive,
checks the archive against a checksum,and decompresses the archive.
@@ -129,6 +144,8 @@ class URLFetchStrategy(FetchStrategy):
self.digest = kwargs.get('md5', None)
if not self.digest: self.digest = digest
+ self.expand_archive = kwargs.get('expand', True)
+
if not self.url:
raise ValueError("URLFetchStrategy requires a url for fetching.")
@@ -137,24 +154,24 @@ class URLFetchStrategy(FetchStrategy):
self.stage.chdir()
if self.archive_file:
- tty.msg("Already downloaded %s." % self.archive_file)
+ tty.msg("Already downloaded %s" % self.archive_file)
return
tty.msg("Trying to fetch from %s" % self.url)
- curl_args = ['-O', # save file to disk
- '-f', # fail on >400 errors
- '-D', '-', # print out HTML headers
- '-L', self.url,]
+ curl_args = ['-O', # save file to disk
+ '-f', # fail on >400 errors
+ '-D', '-', # print out HTML headers
+ '-L', self.url, ]
if sys.stdout.isatty():
curl_args.append('-#') # status bar when using a tty
else:
- curl_args.append('-sS') # just errors when not.
+ curl_args.append('-sS') # just errors when not.
# Run curl but grab the mime type from the http headers
headers = spack.curl(
- *curl_args, return_output=True, fail_on_error=False)
+ *curl_args, output=str, fail_on_error=False)
if spack.curl.returncode != 0:
# clean up archive on failure.
@@ -164,24 +181,23 @@ class URLFetchStrategy(FetchStrategy):
if spack.curl.returncode == 22:
# This is a 404. Curl will print the error.
raise FailedDownloadError(
- self.url, "URL %s was not found!" % self.url)
+ self.url, "URL %s was not found!" % self.url)
elif spack.curl.returncode == 60:
# This is a certificate error. Suggest spack -k
raise FailedDownloadError(
- self.url,
- "Curl was unable to fetch due to invalid certificate. "
- "This is either an attack, or your cluster's SSL configuration "
- "is bad. If you believe your SSL configuration is bad, you "
- "can try running spack -k, which will not check SSL certificates."
- "Use this at your own risk.")
+ self.url,
+ "Curl was unable to fetch due to invalid certificate. "
+ "This is either an attack, or your cluster's SSL configuration "
+ "is bad. If you believe your SSL configuration is bad, you "
+ "can try running spack -k, which will not check SSL certificates."
+ "Use this at your own risk.")
else:
# This is some other curl error. Curl will print the
# error, but print a spack message too
raise FailedDownloadError(
- self.url, "Curl failed with error %d" % spack.curl.returncode)
-
+ self.url, "Curl failed with error %d" % spack.curl.returncode)
# Check if we somehow got an HTML file rather than the archive we
# asked for. We only look at the last content type, to handle
@@ -190,13 +206,12 @@ class URLFetchStrategy(FetchStrategy):
if content_types and 'text/html' in content_types[-1]:
tty.warn("The contents of " + self.archive_file + " look like HTML.",
"The checksum will likely be bad. If it is, you can use",
- "'spack clean --dist' to remove the bad archive, then fix",
+ "'spack clean <package>' to remove the bad archive, then fix",
"your internet gateway issue and install again.")
if not self.archive_file:
raise FailedDownloadError(self.url)
-
@property
def archive_file(self):
"""Path to the source archive within this stage directory."""
@@ -204,12 +219,16 @@ class URLFetchStrategy(FetchStrategy):
@_needs_stage
def expand(self):
+ if not self.expand_archive:
+ tty.msg("Skipping expand step for %s" % self.archive_file)
+ return
+
tty.msg("Staging archive: %s" % self.archive_file)
self.stage.chdir()
if not self.archive_file:
raise NoArchiveFileError("URLFetchStrategy couldn't find archive file",
- "Failed on expand() for URL %s" % self.url)
+ "Failed on expand() for URL %s" % self.url)
decompress = decompressor_for(self.archive_file)
@@ -241,7 +260,6 @@ class URLFetchStrategy(FetchStrategy):
# Set the wd back to the stage when done.
self.stage.chdir()
-
def archive(self, destination):
"""Just moves this archive to the destination."""
if not self.archive_file:
@@ -252,7 +270,6 @@ class URLFetchStrategy(FetchStrategy):
shutil.move(self.archive_file, destination)
-
@_needs_stage
def check(self):
"""Check the downloaded archive against a checksum digest.
@@ -263,9 +280,8 @@ class URLFetchStrategy(FetchStrategy):
checker = crypto.Checker(self.digest)
if not checker.check(self.archive_file):
raise ChecksumError(
- "%s checksum failed for %s." % (checker.hash_name, self.archive_file),
- "Expected %s but got %s." % (self.digest, checker.sum))
-
+ "%s checksum failed for %s" % (checker.hash_name, self.archive_file),
+ "Expected %s but got %s" % (self.digest, checker.sum))
@_needs_stage
def reset(self):
@@ -273,16 +289,20 @@ class URLFetchStrategy(FetchStrategy):
if not self.archive_file:
raise NoArchiveFileError("Tried to reset URLFetchStrategy before fetching",
"Failed on reset() for URL %s" % self.url)
- if self.stage.source_path:
- shutil.rmtree(self.stage.source_path, ignore_errors=True)
- self.expand()
+ # Remove everythigng but the archive from the stage
+ for filename in os.listdir(self.stage.path):
+ abspath = os.path.join(self.stage.path, filename)
+ if abspath != self.archive_file:
+ shutil.rmtree(abspath, ignore_errors=True)
+
+ # Expand the archive again
+ self.expand()
def __repr__(self):
url = self.url if self.url else "no url"
return "URLFetchStrategy<%s>" % url
-
def __str__(self):
if self.url:
return self.url
@@ -298,33 +318,30 @@ class VCSFetchStrategy(FetchStrategy):
# Set a URL based on the type of fetch strategy.
self.url = kwargs.get(name, None)
if not self.url: raise ValueError(
- "%s requires %s argument." % (self.__class__, name))
+ "%s requires %s argument." % (self.__class__, name))
# Ensure that there's only one of the rev_types
if sum(k in kwargs for k in rev_types) > 1:
raise FetchStrategyError(
- "Supply only one of %s to fetch with %s." % (
- comma_or(rev_types), name))
+ "Supply only one of %s to fetch with %s" % (
+ comma_or(rev_types), name))
# Set attributes for each rev type.
for rt in rev_types:
setattr(self, rt, kwargs.get(rt, None))
-
@_needs_stage
def check(self):
- tty.msg("No checksum needed when fetching with %s." % self.name)
-
+ tty.msg("No checksum needed when fetching with %s" % self.name)
@_needs_stage
def expand(self):
tty.debug("Source fetched with %s is already expanded." % self.name)
-
@_needs_stage
def archive(self, destination, **kwargs):
- assert(extension(destination) == 'tar.gz')
- assert(self.stage.source_path.startswith(self.stage.path))
+ assert (extension(destination) == 'tar.gz')
+ assert (self.stage.source_path.startswith(self.stage.path))
tar = which('tar', required=True)
@@ -338,16 +355,13 @@ class VCSFetchStrategy(FetchStrategy):
self.stage.chdir()
tar('-czf', destination, os.path.basename(self.stage.source_path))
-
def __str__(self):
return "VCS: %s" % self.url
-
def __repr__(self):
return "%s<%s>" % (self.__class__, self.url)
-
class GitFetchStrategy(VCSFetchStrategy):
"""Fetch strategy that gets source code from a git repository.
Use like this in a package:
@@ -368,30 +382,31 @@ class GitFetchStrategy(VCSFetchStrategy):
required_attributes = ('git',)
def __init__(self, **kwargs):
+ # Discards the keywords in kwargs that may conflict with the next call to __init__
+ forwarded_args = copy.copy(kwargs)
+ forwarded_args.pop('name', None)
+
super(GitFetchStrategy, self).__init__(
- 'git', 'tag', 'branch', 'commit', **kwargs)
+ 'git', 'tag', 'branch', 'commit', **forwarded_args)
self._git = None
-
@property
def git_version(self):
- vstring = self.git('--version', return_output=True).lstrip('git version ')
+ vstring = self.git('--version', output=str).lstrip('git version ')
return Version(vstring)
-
@property
def git(self):
if not self._git:
self._git = which('git', required=True)
return self._git
-
@_needs_stage
def fetch(self):
self.stage.chdir()
if self.stage.source_path:
- tty.msg("Already fetched %s." % self.stage.source_path)
+ tty.msg("Already fetched %s" % self.stage.source_path)
return
args = []
@@ -418,7 +433,7 @@ class GitFetchStrategy(VCSFetchStrategy):
if self.branch:
args.extend(['--branch', self.branch])
elif self.tag and self.git_version >= ver('1.8.5.2'):
- args.extend(['--branch', self.tag])
+ args.extend(['--branch', self.tag])
# Try to be efficient if we're using a new enough git.
# This checks out only one branch's history
@@ -429,7 +444,7 @@ class GitFetchStrategy(VCSFetchStrategy):
# Yet more efficiency, only download a 1-commit deep tree
if self.git_version >= ver('1.7.1'):
try:
- self.git(*(args + ['--depth','1', self.url]))
+ self.git(*(args + ['--depth', '1', self.url]))
cloned = True
except spack.error.SpackError:
# This will fail with the dumb HTTP transport
@@ -452,18 +467,15 @@ class GitFetchStrategy(VCSFetchStrategy):
self.git('pull', '--tags', ignore_errors=1)
self.git('checkout', self.tag)
-
def archive(self, destination):
super(GitFetchStrategy, self).archive(destination, exclude='.git')
-
@_needs_stage
def reset(self):
self.stage.chdir_to_source()
self.git('checkout', '.')
self.git('clean', '-f')
-
def __str__(self):
return "[git] %s" % self.url
@@ -483,26 +495,28 @@ class SvnFetchStrategy(VCSFetchStrategy):
required_attributes = ['svn']
def __init__(self, **kwargs):
+ # Discards the keywords in kwargs that may conflict with the next call to __init__
+ forwarded_args = copy.copy(kwargs)
+ forwarded_args.pop('name', None)
+
super(SvnFetchStrategy, self).__init__(
- 'svn', 'revision', **kwargs)
+ 'svn', 'revision', **forwarded_args)
self._svn = None
if self.revision is not None:
self.revision = str(self.revision)
-
@property
def svn(self):
if not self._svn:
self._svn = which('svn', required=True)
return self._svn
-
@_needs_stage
def fetch(self):
self.stage.chdir()
if self.stage.source_path:
- tty.msg("Already fetched %s." % self.stage.source_path)
+ tty.msg("Already fetched %s" % self.stage.source_path)
return
tty.msg("Trying to check out svn repository: %s" % self.url)
@@ -515,10 +529,9 @@ class SvnFetchStrategy(VCSFetchStrategy):
self.svn(*args)
self.stage.chdir_to_source()
-
def _remove_untracked_files(self):
"""Removes untracked files in an svn repository."""
- status = self.svn('status', '--no-ignore', return_output=True)
+ status = self.svn('status', '--no-ignore', output=str)
self.svn('status', '--no-ignore')
for line in status.split('\n'):
if not re.match('^[I?]', line):
@@ -529,23 +542,19 @@ class SvnFetchStrategy(VCSFetchStrategy):
elif os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
-
def archive(self, destination):
super(SvnFetchStrategy, self).archive(destination, exclude='.svn')
-
@_needs_stage
def reset(self):
self.stage.chdir_to_source()
self._remove_untracked_files()
self.svn('revert', '.', '-R')
-
def __str__(self):
return "[svn] %s" % self.url
-
class HgFetchStrategy(VCSFetchStrategy):
"""Fetch strategy that gets source code from a Mercurial repository.
Use like this in a package:
@@ -567,11 +576,14 @@ class HgFetchStrategy(VCSFetchStrategy):
required_attributes = ['hg']
def __init__(self, **kwargs):
+ # Discards the keywords in kwargs that may conflict with the next call to __init__
+ forwarded_args = copy.copy(kwargs)
+ forwarded_args.pop('name', None)
+
super(HgFetchStrategy, self).__init__(
- 'hg', 'revision', **kwargs)
+ 'hg', 'revision', **forwarded_args)
self._hg = None
-
@property
def hg(self):
if not self._hg:
@@ -583,7 +595,7 @@ class HgFetchStrategy(VCSFetchStrategy):
self.stage.chdir()
if self.stage.source_path:
- tty.msg("Already fetched %s." % self.stage.source_path)
+ tty.msg("Already fetched %s" % self.stage.source_path)
return
args = []
@@ -597,11 +609,9 @@ class HgFetchStrategy(VCSFetchStrategy):
self.hg(*args)
-
def archive(self, destination):
super(HgFetchStrategy, self).archive(destination, exclude='.hg')
-
@_needs_stage
def reset(self):
self.stage.chdir()
@@ -619,7 +629,6 @@ class HgFetchStrategy(VCSFetchStrategy):
shutil.move(scrubbed, source_path)
self.stage.chdir_to_source()
-
def __str__(self):
return "[hg] %s" % self.url
@@ -634,6 +643,22 @@ def from_url(url):
return URLFetchStrategy(url)
+def from_kwargs(**kwargs):
+ """
+ Construct the appropriate FetchStrategy from the given keyword arguments.
+
+ :param kwargs: dictionary of keyword arguments
+ :return: fetcher or raise a FetchError exception
+ """
+ for fetcher in all_strategies:
+ if fetcher.matches(kwargs):
+ return fetcher(**kwargs)
+ # Raise an error in case we can't instantiate any known strategy
+ message = "Cannot instantiate any FetchStrategy"
+ long_message = message + " from the given arguments : {arguments}".format(srguments=kwargs)
+ raise FetchError(message, long_message)
+
+
def args_are_for(args, fetcher):
fetcher.matches(args)
@@ -671,15 +696,16 @@ def for_package_version(pkg, version):
class FetchError(spack.error.SpackError):
- def __init__(self, msg, long_msg):
+ def __init__(self, msg, long_msg=None):
super(FetchError, self).__init__(msg, long_msg)
class FailedDownloadError(FetchError):
"""Raised wen a download fails."""
+
def __init__(self, url, msg=""):
super(FailedDownloadError, self).__init__(
- "Failed to fetch file from URL: %s" % url, msg)
+ "Failed to fetch file from URL: %s" % url, msg)
self.url = url
@@ -689,7 +715,7 @@ class NoArchiveFileError(FetchError):
class NoDigestError(FetchError):
- def __init__(self, msg, long_msg):
+ def __init__(self, msg, long_msg=None):
super(NoDigestError, self).__init__(msg, long_msg)
@@ -702,12 +728,14 @@ class InvalidArgsError(FetchError):
class ChecksumError(FetchError):
"""Raised when archive fails to checksum."""
+
def __init__(self, message, long_msg=None):
super(ChecksumError, self).__init__(message, long_msg)
class NoStageError(FetchError):
"""Raised when fetch operations are called before set_stage()."""
+
def __init__(self, method):
super(NoStageError, self).__init__(
- "Must call FetchStrategy.set_stage() before calling %s" % method.__name__)
+ "Must call FetchStrategy.set_stage() before calling %s" % method.__name__)
diff --git a/lib/spack/spack/graph.py b/lib/spack/spack/graph.py
index 5fb6a9cd23..f3732dfbff 100644
--- a/lib/spack/spack/graph.py
+++ b/lib/spack/spack/graph.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -523,7 +523,7 @@ def graph_dot(*specs, **kwargs):
return '"%s"' % string
if not specs:
- specs = [p.name for p in spack.db.all_packages()]
+ specs = [p.name for p in spack.repo.all_packages()]
else:
roots = specs
specs = set()
diff --git a/lib/spack/spack/hooks/__init__.py b/lib/spack/spack/hooks/__init__.py
index 1c44e8abaa..2765f7be39 100644
--- a/lib/spack/spack/hooks/__init__.py
+++ b/lib/spack/spack/hooks/__init__.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/hooks/dotkit.py b/lib/spack/spack/hooks/dotkit.py
index 4e748ff80a..9123637356 100644
--- a/lib/spack/spack/hooks/dotkit.py
+++ b/lib/spack/spack/hooks/dotkit.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/hooks/extensions.py b/lib/spack/spack/hooks/extensions.py
index cf87a78c8c..627184cabd 100644
--- a/lib/spack/spack/hooks/extensions.py
+++ b/lib/spack/spack/hooks/extensions.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -27,9 +27,7 @@ import spack
def pre_uninstall(pkg):
- # Need to do this b/c uninstall does not automatically do it.
- # TODO: store full graph info in stored .spec file.
- pkg.spec.normalize()
+ assert(pkg.spec.concrete)
if pkg.is_extension:
if pkg.activated:
diff --git a/lib/spack/spack/hooks/sbang.py b/lib/spack/spack/hooks/sbang.py
new file mode 100644
index 0000000000..d78adb576e
--- /dev/null
+++ b/lib/spack/spack/hooks/sbang.py
@@ -0,0 +1,98 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+import os
+
+from llnl.util.filesystem import *
+import llnl.util.tty as tty
+
+import spack
+import spack.modules
+
+# Character limit for shebang line. Using Linux's 127 characters
+# here, as it is the shortest I could find on a modern OS.
+shebang_limit = 127
+
+def shebang_too_long(path):
+ """Detects whether a file has a shebang line that is too long."""
+ with open(path, 'r') as script:
+ bytes = script.read(2)
+ if bytes != '#!':
+ return False
+
+ line = bytes + script.readline()
+ return len(line) > shebang_limit
+
+
+def filter_shebang(path):
+ """Adds a second shebang line, using sbang, at the beginning of a file."""
+ with open(path, 'r') as original_file:
+ original = original_file.read()
+
+ # This line will be prepended to file
+ new_sbang_line = '#!/bin/bash %s/bin/sbang\n' % spack.spack_root
+
+ # Skip files that are already using sbang.
+ if original.startswith(new_sbang_line):
+ return
+
+ backup = path + ".shebang.bak"
+ os.rename(path, backup)
+
+ with open(path, 'w') as new_file:
+ new_file.write(new_sbang_line)
+ new_file.write(original)
+
+ copy_mode(backup, path)
+ unset_executable_mode(backup)
+
+ tty.warn("Patched overly long shebang in %s" % path)
+
+
+def filter_shebangs_in_directory(directory):
+ for file in os.listdir(directory):
+ path = os.path.join(directory, file)
+
+ # only handle files
+ if not os.path.isfile(path):
+ continue
+
+ # only handle links that resolve within THIS package's prefix.
+ if os.path.islink(path):
+ real_path = os.path.realpath(path)
+ if not real_path.startswith(directory + os.sep):
+ continue
+
+ # test the file for a long shebang, and filter
+ if shebang_too_long(path):
+ filter_shebang(path)
+
+
+def post_install(pkg):
+ """This hook edits scripts so that they call /bin/bash
+ $spack_prefix/bin/sbang instead of something longer than the
+ shebang limit."""
+ if not os.path.isdir(pkg.prefix.bin):
+ return
+ filter_shebangs_in_directory(pkg.prefix.bin)
diff --git a/lib/spack/spack/hooks/tclmodule.py b/lib/spack/spack/hooks/tclmodule.py
index 0b9fd5a67c..8b315f27a2 100644
--- a/lib/spack/spack/hooks/tclmodule.py
+++ b/lib/spack/spack/hooks/tclmodule.py
@@ -6,7 +6,7 @@
# Written by David Beckingsale, david@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/mirror.py b/lib/spack/spack/mirror.py
index 306c8085aa..6981f69ac0 100644
--- a/lib/spack/spack/mirror.py
+++ b/lib/spack/spack/mirror.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -26,7 +26,7 @@
This file contains code for creating spack mirror directories. A
mirror is an organized hierarchy containing specially named archive
files. This enabled spack to know where to find files in a mirror if
-the main server for a particualr package is down. Or, if the computer
+the main server for a particular package is down. Or, if the computer
where spack is run is not connected to the internet, it allows spack
to download packages directly from a mirror (e.g., on an intranet).
"""
@@ -42,28 +42,34 @@ import spack.fetch_strategy as fs
from spack.spec import Spec
from spack.stage import Stage
from spack.version import *
-from spack.util.compression import extension
+from spack.util.compression import extension, allowed_archive
-def mirror_archive_filename(spec):
+def mirror_archive_filename(spec, fetcher):
"""Get the name of the spec's archive in the mirror."""
if not spec.version.concrete:
raise ValueError("mirror.path requires spec with concrete version.")
- fetcher = spec.package.fetcher
if isinstance(fetcher, fs.URLFetchStrategy):
- # If we fetch this version with a URLFetchStrategy, use URL's archive type
- ext = url.downloaded_file_extension(fetcher.url)
+ if fetcher.expand_archive:
+ # If we fetch this version with a URLFetchStrategy, use URL's archive type
+ ext = url.downloaded_file_extension(fetcher.url)
+ else:
+ # If the archive shouldn't be expanded, don't check for its extension.
+ ext = None
else:
# Otherwise we'll make a .tar.gz ourselves
ext = 'tar.gz'
- return "%s-%s.%s" % (spec.package.name, spec.version, ext)
+ filename = "%s-%s" % (spec.package.name, spec.version)
+ if ext:
+ filename += ".%s" % ext
+ return filename
-def mirror_archive_path(spec):
+def mirror_archive_path(spec, fetcher):
"""Get the relative path to the spec's archive within a mirror."""
- return join_path(spec.name, mirror_archive_filename(spec))
+ return join_path(spec.name, mirror_archive_filename(spec, fetcher))
def get_matching_versions(specs, **kwargs):
@@ -74,10 +80,11 @@ def get_matching_versions(specs, **kwargs):
# Skip any package that has no known versions.
if not pkg.versions:
- tty.msg("No safe (checksummed) versions for package %s." % pkg.name)
+ tty.msg("No safe (checksummed) versions for package %s" % pkg.name)
continue
num_versions = kwargs.get('num_versions', 0)
+ matching_spec = []
for i, v in enumerate(reversed(sorted(pkg.versions))):
# Generate no more than num_versions versions for each spec.
if num_versions and i >= num_versions:
@@ -87,11 +94,29 @@ def get_matching_versions(specs, **kwargs):
if v.satisfies(spec.versions):
s = Spec(pkg.name)
s.versions = VersionList([v])
- matching.append(s)
+ s.variants = spec.variants.copy()
+ matching_spec.append(s)
+
+ if not matching_spec:
+ tty.warn("No known version matches spec: %s" % spec)
+ matching.extend(matching_spec)
return matching
+def suggest_archive_basename(resource):
+ """
+ Return a tentative basename for an archive. Raise an exception if the name is among the allowed archive types.
+
+ :param fetcher:
+ :return:
+ """
+ basename = os.path.basename(resource.fetcher.url)
+ if not allowed_archive(basename):
+ raise RuntimeError("%s is not an allowed archive tye" % basename)
+ return basename
+
+
def create(path, specs, **kwargs):
"""Create a directory to be used as a spack mirror, and fill it with
package archives.
@@ -108,7 +133,7 @@ def create(path, specs, **kwargs):
Return Value:
Returns a tuple of lists: (present, mirrored, error)
- * present: Package specs that were already prsent.
+ * present: Package specs that were already present.
* mirrored: Package specs that were successfully mirrored.
* error: Package specs that failed to mirror due to some error.
@@ -132,59 +157,72 @@ def create(path, specs, **kwargs):
# Get the absolute path of the root before we start jumping around.
mirror_root = os.path.abspath(path)
if not os.path.isdir(mirror_root):
- mkdirp(mirror_root)
+ try:
+ mkdirp(mirror_root)
+ except OSError as e:
+ raise MirrorError(
+ "Cannot create directory '%s':" % mirror_root, str(e))
# Things to keep track of while parsing specs.
- present = []
- mirrored = []
- error = []
+ categories = {
+ 'present': [],
+ 'mirrored': [],
+ 'error': []
+ }
# Iterate through packages and download all the safe tarballs for each of them
for spec in version_specs:
- pkg = spec.package
-
- stage = None
- try:
- # create a subdirectory for the current package@version
- archive_path = os.path.abspath(join_path(path, mirror_archive_path(spec)))
- subdir = os.path.dirname(archive_path)
- mkdirp(subdir)
-
- if os.path.exists(archive_path):
- tty.msg("Already added %s" % spec.format("$_$@"))
- present.append(spec)
- continue
-
- # Set up a stage and a fetcher for the download
- unique_fetch_name = spec.format("$_$@")
- fetcher = fs.for_package_version(pkg, pkg.version)
- stage = Stage(fetcher, name=unique_fetch_name)
- fetcher.set_stage(stage)
-
- # Do the fetch and checksum if necessary
- fetcher.fetch()
- if not kwargs.get('no_checksum', False):
- fetcher.check()
- tty.msg("Checksum passed for %s@%s" % (pkg.name, pkg.version))
-
- # Fetchers have to know how to archive their files. Use
- # that to move/copy/create an archive in the mirror.
- fetcher.archive(archive_path)
- tty.msg("Added %s." % spec.format("$_$@"))
- mirrored.append(spec)
-
- except Exception, e:
- if spack.debug:
- sys.excepthook(*sys.exc_info())
- else:
- tty.warn("Error while fetching %s." % spec.format('$_$@'), e.message)
- error.append(spec)
-
- finally:
- if stage:
- stage.destroy()
-
- return (present, mirrored, error)
+ add_single_spec(spec, mirror_root, categories, **kwargs)
+
+ return categories['present'], categories['mirrored'], categories['error']
+
+
+def add_single_spec(spec, mirror_root, categories, **kwargs):
+ tty.msg("Adding package {pkg} to mirror".format(pkg=spec.format("$_$@")))
+ spec_exists_in_mirror = True
+ try:
+ with spec.package.stage:
+ # fetcher = stage.fetcher
+ # fetcher.fetch()
+ # ...
+ # fetcher.archive(archive_path)
+ for ii, stage in enumerate(spec.package.stage):
+ fetcher = stage.fetcher
+ if ii == 0:
+ # create a subdirectory for the current package@version
+ archive_path = os.path.abspath(join_path(mirror_root, mirror_archive_path(spec, fetcher)))
+ name = spec.format("$_$@")
+ else:
+ resource = stage.resource
+ archive_path = join_path(subdir, suggest_archive_basename(resource))
+ name = "{resource} ({pkg}).".format(resource=resource.name, pkg=spec.format("$_$@"))
+ subdir = os.path.dirname(archive_path)
+ mkdirp(subdir)
+
+ if os.path.exists(archive_path):
+ tty.msg("{name} : already added".format(name=name))
+ else:
+ spec_exists_in_mirror = False
+ fetcher.fetch()
+ if not kwargs.get('no_checksum', False):
+ fetcher.check()
+ tty.msg("{name} : checksum passed".format(name=name))
+
+ # Fetchers have to know how to archive their files. Use
+ # that to move/copy/create an archive in the mirror.
+ fetcher.archive(archive_path)
+ tty.msg("{name} : added".format(name=name))
+
+ if spec_exists_in_mirror:
+ categories['present'].append(spec)
+ else:
+ categories['mirrored'].append(spec)
+ except Exception as e:
+ if spack.debug:
+ sys.excepthook(*sys.exc_info())
+ else:
+ tty.warn("Error while fetching %s" % spec.format('$_$@'), e.message)
+ categories['error'].append(spec)
class MirrorError(spack.error.SpackError):
diff --git a/lib/spack/spack/modules.py b/lib/spack/spack/modules.py
index 56a61adefb..61624fbd70 100644
--- a/lib/spack/spack/modules.py
+++ b/lib/spack/spack/modules.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,44 +22,43 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-"""This module contains code for creating environment modules, which
-can include dotkits, tcl modules, lmod, and others.
+"""
+This module contains code for creating environment modules, which can include dotkits, tcl modules, lmod, and others.
-The various types of modules are installed by post-install hooks and
-removed after an uninstall by post-uninstall hooks. This class
-consolidates the logic for creating an abstract description of the
-information that module systems need. Currently that includes a
-number directories to be appended to paths in the user's environment:
+The various types of modules are installed by post-install hooks and removed after an uninstall by post-uninstall hooks.
+This class consolidates the logic for creating an abstract description of the information that module systems need.
+Currently that includes a number of directories to be appended to paths in the user's environment:
* /bin directories to be appended to PATH
* /lib* directories for LD_LIBRARY_PATH
- * /man* and /share/man* directories for LD_LIBRARY_PATH
+ * /include directories for CPATH
+ * /man* and /share/man* directories for MANPATH
* the package prefix for CMAKE_PREFIX_PATH
-This module also includes logic for coming up with unique names for
-the module files so that they can be found by the various
-shell-support files in $SPACK/share/spack/setup-env.*.
+This module also includes logic for coming up with unique names for the module files so that they can be found by the
+various shell-support files in $SPACK/share/spack/setup-env.*.
-Each hook in hooks/ implements the logic for writing its specific type
-of module file.
+Each hook in hooks/ implements the logic for writing its specific type of module file.
"""
-__all__ = ['EnvModule', 'Dotkit', 'TclModule']
-
import os
+import os.path
import re
-import textwrap
import shutil
-from glob import glob
+import textwrap
import llnl.util.tty as tty
+import spack
+import spack.config
from llnl.util.filesystem import join_path, mkdirp
+from spack.environment import *
-import spack
+__all__ = ['EnvModule', 'Dotkit', 'TclModule']
-"""Registry of all types of modules. Entries created by EnvModule's
- metaclass."""
+# Registry of all types of modules. Entries created by EnvModule's metaclass
module_types = {}
+CONFIGURATION = spack.config.get_config('modules')
+
def print_help():
"""For use by commands to tell user how to activate shell support."""
@@ -78,74 +77,76 @@ def print_help():
"")
+def inspect_path(prefix):
+ """
+ Inspects the prefix of an installation to search for common layouts. Issues a request to modify the environment
+ accordingly when an item is found.
+
+ Args:
+ prefix: prefix of the installation
+
+ Returns:
+ instance of EnvironmentModifications containing the requested modifications
+ """
+ env = EnvironmentModifications()
+ # Inspect the prefix to check for the existence of common directories
+ prefix_inspections = {
+ 'bin': ('PATH',),
+ 'man': ('MANPATH',),
+ 'lib': ('LIBRARY_PATH', 'LD_LIBRARY_PATH'),
+ 'lib64': ('LIBRARY_PATH', 'LD_LIBRARY_PATH'),
+ 'include': ('CPATH',)
+ }
+ for attribute, variables in prefix_inspections.items():
+ expected = getattr(prefix, attribute)
+ if os.path.isdir(expected):
+ for variable in variables:
+ env.prepend_path(variable, expected)
+ # PKGCONFIG
+ for expected in (join_path(prefix.lib, 'pkgconfig'), join_path(prefix.lib64, 'pkgconfig')):
+ if os.path.isdir(expected):
+ env.prepend_path('PKG_CONFIG_PATH', expected)
+ # CMake related variables
+ env.prepend_path('CMAKE_PREFIX_PATH', prefix)
+ return env
+
+
class EnvModule(object):
name = 'env_module'
+ formats = {}
class __metaclass__(type):
def __init__(cls, name, bases, dict):
type.__init__(cls, name, bases, dict)
- if cls.name != 'env_module':
+ if cls.name != 'env_module' and cls.name in CONFIGURATION['enable']:
module_types[cls.name] = cls
-
def __init__(self, spec=None):
- # category in the modules system
- # TODO: come up with smarter category names.
- self.category = "spack"
+ self.spec = spec
+ self.pkg = spec.package # Just stored for convenience
- # Descriptions for the module system's UI
- self.short_description = ""
- self.long_description = ""
+ # short description default is just the package + version
+ # packages can provide this optional attribute
+ self.short_description = spec.format("$_ $@")
+ if hasattr(self.pkg, 'short_description'):
+ self.short_description = self.pkg.short_description
- # dict pathname -> list of directories to be prepended to in
- # the module file.
- self._paths = None
- self.spec = spec
+ # long description is the docstring with reduced whitespace.
+ self.long_description = None
+ if self.spec.package.__doc__:
+ self.long_description = re.sub(r'\s+', ' ', self.spec.package.__doc__)
@property
- def paths(self):
- if self._paths is None:
- self._paths = {}
-
- def add_path(path_name, directory):
- path = self._paths.setdefault(path_name, [])
- path.append(directory)
-
- # Add paths if they exist.
- for var, directory in [
- ('PATH', self.spec.prefix.bin),
- ('MANPATH', self.spec.prefix.man),
- ('MANPATH', self.spec.prefix.share_man),
- ('LIBRARY_PATH', self.spec.prefix.lib),
- ('LIBRARY_PATH', self.spec.prefix.lib64),
- ('LD_LIBRARY_PATH', self.spec.prefix.lib),
- ('LD_LIBRARY_PATH', self.spec.prefix.lib64),
- ('PKG_CONFIG_PATH', join_path(self.spec.prefix.lib, 'pkgconfig')),
- ('PKG_CONFIG_PATH', join_path(self.spec.prefix.lib64, 'pkgconfig'))]:
-
- if os.path.isdir(directory):
- add_path(var, directory)
-
- # Add python path unless it's an actual python installation
- # TODO: is there a better way to do this?
- if self.spec.name != 'python':
- site_packages = glob(join_path(self.spec.prefix.lib, "python*/site-packages"))
- if site_packages:
- add_path('PYTHONPATH', site_packages[0])
-
- if self.spec.package.extends(spack.spec.Spec('ruby')):
- add_path('GEM_PATH', self.spec.prefix)
-
- # short description is just the package + version
- # TODO: maybe packages can optionally provide it.
- self.short_description = self.spec.format("$_ $@")
-
- # long description is the docstring with reduced whitespace.
- if self.spec.package.__doc__:
- self.long_description = re.sub(r'\s+', ' ', self.spec.package.__doc__)
-
- return self._paths
+ def category(self):
+ # Anything defined at the package level takes precedence
+ if hasattr(self.pkg, 'category'):
+ return self.pkg.category
+ # Extensions
+ for extendee in self.pkg.extendees:
+ return '{extendee} extension'.format(extendee=extendee)
+ # Not very descriptive fallback
+ return 'spack installed package'
def write(self):
@@ -154,18 +155,46 @@ class EnvModule(object):
if not os.path.exists(module_dir):
mkdirp(module_dir)
- # If there are no paths, no need for a dotkit.
- if not self.paths:
+ # Environment modifications guessed by inspecting the
+ # installation prefix
+ env = inspect_path(self.spec.prefix)
+
+ # Let the extendee modify their extensions before asking for
+ # package-specific modifications
+ spack_env = EnvironmentModifications()
+ for item in self.pkg.extendees:
+ try:
+ package = self.spec[item].package
+ package.setup_dependent_package(self.pkg.module, self.spec)
+ package.setup_dependent_environment(spack_env, env, self.spec)
+ except:
+ # The extends was conditional, so it doesn't count here
+ # eg: extends('python', when='+python')
+ pass
+
+ # Package-specific environment modifications
+ self.spec.package.setup_environment(spack_env, env)
+
+ # TODO : implement site-specific modifications and filters
+ if not env:
return
with open(self.file_name, 'w') as f:
- self._write(f)
-
+ self.write_header(f)
+ for line in self.process_environment_command(env):
+ f.write(line)
- def _write(self, stream):
- """To be implemented by subclasses."""
+ def write_header(self, stream):
raise NotImplementedError()
+ def process_environment_command(self, env):
+ for command in env:
+ try:
+ yield self.formats[type(command)].format(**command.args)
+ except KeyError:
+ tty.warn('Cannot handle command of type {command} : skipping request'.format(command=type(command)))
+ tty.warn('{context} at {filename}:{lineno}'.format(**command.args))
+
@property
def file_name(self):
@@ -173,35 +202,43 @@ class EnvModule(object):
where this module lives."""
raise NotImplementedError()
-
@property
def use_name(self):
"""Subclasses should implement this to return the name the
module command uses to refer to the package."""
raise NotImplementedError()
-
def remove(self):
mod_file = self.file_name
if os.path.exists(mod_file):
- shutil.rmtree(mod_file, ignore_errors=True)
+ try:
+ os.remove(mod_file) # Remove the module file
+ os.removedirs(os.path.dirname(mod_file)) # Remove all the empty directories from the leaf up
+ except OSError:
+ pass # removedirs throws OSError on first non-empty directory found
class Dotkit(EnvModule):
name = 'dotkit'
path = join_path(spack.share_path, "dotkit")
+ formats = {
+ PrependPath: 'dk_alter {name} {value}\n',
+ SetEnv: 'dk_setenv {name} {value}\n'
+ }
+
@property
def file_name(self):
- return join_path(Dotkit.path, self.spec.architecture,
- self.spec.format('$_$@$%@$+$#.dk'))
+ return join_path(Dotkit.path, self.spec.architecture, '%s.dk' % self.use_name)
@property
def use_name(self):
- return self.spec.format('$_$@$%@$+$#')
+ return "%s-%s-%s-%s-%s" % (self.spec.name, self.spec.version,
+ self.spec.compiler.name,
+ self.spec.compiler.version,
+ self.spec.dag_hash())
-
- def _write(self, dk_file):
+ def write_header(self, dk_file):
# Category
if self.category:
dk_file.write('#c %s\n' % self.category)
@@ -215,47 +252,41 @@ class Dotkit(EnvModule):
for line in textwrap.wrap(self.long_description, 72):
dk_file.write("#h %s\n" % line)
- # Path alterations
- for var, dirs in self.paths.items():
- for directory in dirs:
- dk_file.write("dk_alter %s %s\n" % (var, directory))
-
- # Let CMake find this package.
- dk_file.write("dk_alter CMAKE_PREFIX_PATH %s\n" % self.spec.prefix)
-
class TclModule(EnvModule):
name = 'tcl'
path = join_path(spack.share_path, "modules")
+ formats = {
+ PrependPath: 'prepend-path {name} \"{value}\"\n',
+ AppendPath: 'append-path {name} \"{value}\"\n',
+ RemovePath: 'remove-path {name} \"{value}\"\n',
+ SetEnv: 'setenv {name} \"{value}\"\n',
+ UnsetEnv: 'unsetenv {name}\n'
+ }
+
@property
def file_name(self):
return join_path(TclModule.path, self.spec.architecture, self.use_name)
-
@property
def use_name(self):
- return self.spec.format('$_$@$%@$+$#')
-
-
- def _write(self, m_file):
- # TODO: cateogry?
- m_file.write('#%Module1.0\n')
-
+ return "%s-%s-%s-%s-%s" % (self.spec.name, self.spec.version,
+ self.spec.compiler.name,
+ self.spec.compiler.version,
+ self.spec.dag_hash())
+
+ def write_header(self, module_file):
+ # TCL Modulefile header
+ module_file.write('#%Module1.0\n')
+ # TODO : category ?
# Short description
if self.short_description:
- m_file.write('module-whatis \"%s\"\n\n' % self.short_description)
+ module_file.write('module-whatis \"%s\"\n\n' % self.short_description)
# Long description
if self.long_description:
- m_file.write('proc ModulesHelp { } {\n')
- doc = re.sub(r'"', '\"', self.long_description)
- m_file.write("puts stderr \"%s\"\n" % doc)
- m_file.write('}\n\n')
-
- # Path alterations
- for var, dirs in self.paths.items():
- for directory in dirs:
- m_file.write("prepend-path %s \"%s\"\n" % (var, directory))
-
- m_file.write("prepend-path CMAKE_PREFIX_PATH \"%s\"\n" % self.spec.prefix)
+ module_file.write('proc ModulesHelp { } {\n')
+ for line in textwrap.wrap(self.long_description, 72):
+ module_file.write("puts stderr \"%s\"\n" % line)
+ module_file.write('}\n\n')
diff --git a/lib/spack/spack/multimethod.py b/lib/spack/spack/multimethod.py
index 892619c6ac..3cd17e796a 100644
--- a/lib/spack/spack/multimethod.py
+++ b/lib/spack/spack/multimethod.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -138,7 +138,7 @@ class when(object):
methods like install() that depend on the package's spec.
For example:
- .. code-block::
+ .. code-block:: python
class SomePackage(Package):
...
@@ -163,26 +163,28 @@ class when(object):
if you only have part of the install that is platform specific, you
could do this:
- class SomePackage(Package):
- ...
- # virtual dependence on MPI.
- # could resolve to mpich, mpich2, OpenMPI
- depends_on('mpi')
+ .. code-block:: python
- def setup(self):
- # do nothing in the default case
- pass
+ class SomePackage(Package):
+ ...
+ # virtual dependence on MPI.
+ # could resolve to mpich, mpich2, OpenMPI
+ depends_on('mpi')
+
+ def setup(self):
+ # do nothing in the default case
+ pass
- @when('^openmpi')
- def setup(self):
- # do something special when this is built with OpenMPI for
- # its MPI implementations.
+ @when('^openmpi')
+ def setup(self):
+ # do something special when this is built with OpenMPI for
+ # its MPI implementations.
- def install(self, prefix):
- # Do common install stuff
- self.setup()
- # Do more common install stuff
+ def install(self, prefix):
+ # Do common install stuff
+ self.setup()
+ # Do more common install stuff
There must be one (and only one) @when clause that matches the
package's spec. If there is more than one, or if none match,
@@ -193,10 +195,11 @@ class when(object):
platform-specific versions. There's not much we can do to get
around this because of the way decorators work.
"""
-class when(object):
def __init__(self, spec):
pkg = get_calling_module_name()
- self.spec = parse_anonymous_spec(spec, pkg)
+ if spec is True:
+ spec = pkg
+ self.spec = parse_anonymous_spec(spec, pkg) if spec is not False else None
def __call__(self, method):
# Get the first definition of the method in the calling scope
@@ -207,7 +210,9 @@ class when(object):
if not type(original_method) == SpecMultiMethod:
original_method = SpecMultiMethod(original_method)
- original_method.register(self.spec, method)
+ if self.spec is not None:
+ original_method.register(self.spec, method)
+
return original_method
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index c631a35bf3..4065553131 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -35,35 +35,33 @@ README.
"""
import os
import re
-import time
-import itertools
-import subprocess
-import platform as py_platform
-import multiprocessing
-from urlparse import urlparse, urljoin
import textwrap
-from StringIO import StringIO
+import time
+import glob
import llnl.util.tty as tty
-from llnl.util.tty.log import log_output
-from llnl.util.link_tree import LinkTree
-from llnl.util.filesystem import *
-from llnl.util.lang import *
-
import spack
-import spack.error
+import spack.build_environment
import spack.compilers
-import spack.mirror
-import spack.hooks
import spack.directives
-import spack.build_environment
+import spack.error
+import spack.fetch_strategy as fs
+import spack.hooks
+import spack.mirror
+import spack.repository
import spack.url
import spack.util.web
-import spack.fetch_strategy as fs
-from spack.version import *
-from spack.stage import Stage
-from spack.util.compression import allowed_archive, extension
+from StringIO import StringIO
+from llnl.util.filesystem import *
+from llnl.util.lang import *
+from llnl.util.link_tree import LinkTree
+from llnl.util.tty.log import log_output
+from spack.stage import Stage, ResourceStage, StageComposite
+from spack.util.compression import allowed_archive
+from spack.util.environment import dump_environment
from spack.util.executable import ProcessError
+from spack.version import *
+from urlparse import urlparse
"""Allowed URL schemes for spack packages."""
_ALLOWED_URL_SCHEMES = ["http", "https", "ftp", "file", "git"]
@@ -314,6 +312,18 @@ class Package(object):
"""Most packages are NOT extendable. Set to True if you want extensions."""
extendable = False
+ """List of prefix-relative file paths (or a single path). If these do
+ not exist after install, or if they exist but are not files,
+ sanity checks fail.
+ """
+ sanity_check_is_file = []
+
+ """List of prefix-relative directory paths (or a single path). If
+ these do not exist after install, or if they exist but are not
+ directories, sanity checks will fail.
+ """
+ sanity_check_is_dir = []
+
def __init__(self, spec):
# this determines how the package should be built.
@@ -325,6 +335,9 @@ class Package(object):
if '.' in self.name:
self.name = self.name[self.name.rindex('.') + 1:]
+ # Allow custom staging paths for packages
+ self.path=None
+
# Sanity check attributes required by Spack directives.
spack.directives.ensure_dicts(type(self))
@@ -372,7 +385,7 @@ class Package(object):
self._total_time = 0.0
if self.is_extension:
- spack.db.get(self.extendee_spec)._check_extendable()
+ spack.repo.get(self.extendee_spec)._check_extendable()
@property
@@ -431,23 +444,52 @@ class Package(object):
return spack.url.substitute_version(self.nearest_url(version),
self.url_version(version))
+ def _make_resource_stage(self, root_stage, fetcher, resource):
+ resource_stage_folder = self._resource_stage(resource)
+ resource_mirror = join_path(self.name, os.path.basename(fetcher.url))
+ stage = ResourceStage(resource.fetcher, root=root_stage, resource=resource,
+ name=resource_stage_folder, mirror_path=resource_mirror,
+ path=self.path)
+ return stage
+
+ def _make_root_stage(self, fetcher):
+ # Construct a mirror path (TODO: get this out of package.py)
+ mp = spack.mirror.mirror_archive_path(self.spec, fetcher)
+ # Construct a path where the stage should build..
+ s = self.spec
+ stage_name = "%s-%s-%s" % (s.name, s.version, s.dag_hash())
+ # Build the composite stage
+ stage = Stage(fetcher, mirror_path=mp, name=stage_name, path=self.path)
+ return stage
+
+ def _make_stage(self):
+ # Construct a composite stage on top of the composite FetchStrategy
+ composite_fetcher = self.fetcher
+ composite_stage = StageComposite()
+ resources = self._get_needed_resources()
+ for ii, fetcher in enumerate(composite_fetcher):
+ if ii == 0:
+ # Construct root stage first
+ stage = self._make_root_stage(fetcher)
+ else:
+ # Construct resource stage
+ resource = resources[ii - 1] # ii == 0 is root!
+ stage = self._make_resource_stage(composite_stage[0], fetcher, resource)
+ # Append the item to the composite
+ composite_stage.append(stage)
+
+ # Create stage on first access. Needed because fetch, stage,
+ # patch, and install can be called independently of each
+ # other, so `with self.stage:` in do_install isn't sufficient.
+ composite_stage.create()
+ return composite_stage
@property
def stage(self):
if not self.spec.concrete:
raise ValueError("Can only get a stage for a concrete package.")
-
if self._stage is None:
- # Construct a mirror path (TODO: get this out of package.py)
- mp = spack.mirror.mirror_archive_path(self.spec)
-
- # Construct a path where the stage should build..
- s = self.spec
- stage_name = "%s-%s-%s" % (s.name, s.version, s.dag_hash())
-
- # Build the stage
- self._stage = Stage(self.fetcher, mirror_path=mp, name=stage_name)
-
+ self._stage = self._make_stage()
return self._stage
@@ -457,17 +499,27 @@ class Package(object):
self._stage = stage
+ def _make_fetcher(self):
+ # Construct a composite fetcher that always contains at least
+ # one element (the root package). In case there are resources
+ # associated with the package, append their fetcher to the
+ # composite.
+ root_fetcher = fs.for_package_version(self, self.version)
+ fetcher = fs.FetchStrategyComposite() # Composite fetcher
+ fetcher.append(root_fetcher) # Root fetcher is always present
+ resources = self._get_needed_resources()
+ for resource in resources:
+ fetcher.append(resource.fetcher)
+ return fetcher
+
@property
def fetcher(self):
if not self.spec.versions.concrete:
- raise ValueError(
- "Can only get a fetcher for a package with concrete versions.")
-
+ raise ValueError("Can only get a fetcher for a package with concrete versions.")
if not self._fetcher:
- self._fetcher = fs.for_package_version(self, self.version)
+ self._fetcher = self._make_fetcher()
return self._fetcher
-
@fetcher.setter
def fetcher(self, f):
self._fetcher = f
@@ -487,9 +539,15 @@ class Package(object):
if name == dep.name:
return dep
- # Otherwise return the spec from the extends() directive
- spec, kwargs = self.extendees[name]
- return spec
+ # if the spec is concrete already, then it extends something
+ # that is an *optional* dependency, and the dep isn't there.
+ if self.spec._concrete:
+ return None
+ else:
+ # If it's not concrete, then return the spec from the
+ # extends() directive since that is all we know so far.
+ spec, kwargs = self.extendees[name]
+ return spec
@property
@@ -497,18 +555,28 @@ class Package(object):
"""Spec of the extendee of this package, or None if it is not an extension."""
if not self.extendees:
return None
+
+ # TODO: allow multiple extendees.
name = next(iter(self.extendees))
return self.extendees[name][1]
@property
def is_extension(self):
- return len(self.extendees) > 0
+ # if it is concrete, it's only an extension if it actually
+ # dependes on the extendee.
+ if self.spec._concrete:
+ return self.extendee_spec is not None
+ else:
+ # If not, then it's an extension if it *could* be an extension
+ return bool(self.extendees)
def extends(self, spec):
- return (spec.name in self.extendees and
- spec.satisfies(self.extendees[spec.name][0]))
+ if not spec.name in self.extendees:
+ return False
+ s = self.extendee_spec
+ return s and s.satisfies(spec)
@property
@@ -548,7 +616,7 @@ class Package(object):
yield spec
continue
- for pkg in spack.db.get(name).preorder_traversal(visited, **kwargs):
+ for pkg in spack.repo.get(name).preorder_traversal(visited, **kwargs):
yield pkg
@@ -613,8 +681,8 @@ class Package(object):
spack.install_layout.remove_install_directory(self.spec)
- def do_fetch(self):
- """Creates a stage directory and downloads the taball for this package.
+ def do_fetch(self, mirror_only=False):
+ """Creates a stage directory and downloads the tarball for this package.
Working directory will be set to the stage directory.
"""
if not self.spec.concrete:
@@ -636,29 +704,23 @@ class Package(object):
if not ignore_checksum:
raise FetchError(
- "Will not fetch %s." % self.spec.format('$_$@'), checksum_msg)
+ "Will not fetch %s" % self.spec.format('$_$@'), checksum_msg)
+
+ self.stage.fetch(mirror_only)
- self.stage.fetch()
self._fetch_time = time.time() - start_time
if spack.do_checksum and self.version in self.versions:
self.stage.check()
-
- def do_stage(self):
+ def do_stage(self, mirror_only=False):
"""Unpacks the fetched tarball, then changes into the expanded tarball
directory."""
if not self.spec.concrete:
raise ValueError("Can only stage concrete packages.")
- self.do_fetch()
-
- archive_dir = self.stage.source_path
- if not archive_dir:
- self.stage.expand_archive()
- tty.msg("Created stage in %s." % self.stage.path)
- else:
- tty.msg("Already staged %s in %s." % (self.name, self.stage.path))
+ self.do_fetch(mirror_only)
+ self.stage.expand_archive()
self.stage.chdir_to_source()
@@ -676,19 +738,20 @@ class Package(object):
# If there are no patches, note it.
if not self.patches and not has_patch_fun:
- tty.msg("No patches needed for %s." % self.name)
+ tty.msg("No patches needed for %s" % self.name)
return
# Construct paths to special files in the archive dir used to
# keep track of whether patches were successfully applied.
- archive_dir = self.stage.source_path
- good_file = join_path(archive_dir, '.spack_patched')
- bad_file = join_path(archive_dir, '.spack_patch_failed')
+ archive_dir = self.stage.source_path
+ good_file = join_path(archive_dir, '.spack_patched')
+ no_patches_file = join_path(archive_dir, '.spack_no_patches')
+ bad_file = join_path(archive_dir, '.spack_patch_failed')
# If we encounter an archive that failed to patch, restage it
# so that we can apply all the patches again.
if os.path.isfile(bad_file):
- tty.msg("Patching failed last time. Restaging.")
+ tty.msg("Patching failed last time. Restaging.")
self.stage.restage()
self.stage.chdir_to_source()
@@ -697,29 +760,52 @@ class Package(object):
if os.path.isfile(good_file):
tty.msg("Already patched %s" % self.name)
return
+ elif os.path.isfile(no_patches_file):
+ tty.msg("No patches needed for %s" % self.name)
+ return
# Apply all the patches for specs that match this one
+ patched = False
for spec, patch_list in self.patches.items():
if self.spec.satisfies(spec):
for patch in patch_list:
- tty.msg('Applying patch %s' % patch.path_or_url)
try:
patch.apply(self.stage)
+ tty.msg('Applied patch %s' % patch.path_or_url)
+ patched = True
except:
# Touch bad file if anything goes wrong.
+ tty.msg('Patch %s failed.' % patch.path_or_url)
touch(bad_file)
raise
- # patch succeeded. Get rid of failed file & touch good file so we
- # don't try to patch again again next time.
+ if has_patch_fun:
+ try:
+ self.patch()
+ tty.msg("Ran patch() for %s" % self.name)
+ patched = True
+ except:
+ tty.msg("patch() function failed for %s" % self.name)
+ touch(bad_file)
+ raise
+
+ # Get rid of any old failed file -- patches have either succeeded
+ # or are not needed. This is mostly defensive -- it's needed
+ # if the restage() method doesn't clean *everything* (e.g., for a repo)
if os.path.isfile(bad_file):
os.remove(bad_file)
- touch(good_file)
- if has_patch_fun:
- self.patch()
+ # touch good or no patches file so that we skip next time.
+ if patched:
+ touch(good_file)
+ else:
+ touch(no_patches_file)
- tty.msg("Patched %s" % self.name)
+
+ @property
+ def namespace(self):
+ namespace, dot, module = self.__module__.rpartition('.')
+ return namespace
def do_fake_install(self):
@@ -730,9 +816,21 @@ class Package(object):
mkdirp(self.prefix.man1)
- def _build_logger(self, log_path):
- """Create a context manager to log build output."""
+ def _get_needed_resources(self):
+ resources = []
+ # Select the resources that are needed for this build
+ for when_spec, resource_list in self.resources.items():
+ if when_spec in self.spec:
+ resources.extend(resource_list)
+ # Sorts the resources by the length of the string representing their destination. Since any nested resource
+ # must contain another resource's name in its path, it seems that should work
+ resources = sorted(resources, key=lambda res: len(res.destination))
+ return resources
+ def _resource_stage(self, resource):
+ pieces = ['resource', resource.name, self.spec.dag_hash()]
+ resource_stage_folder = '-'.join(pieces)
+ return resource_stage_folder
def do_install(self,
@@ -745,7 +843,9 @@ class Package(object):
Args:
keep_prefix -- Keep install prefix on failure. By default, destroys it.
- keep_stage -- Keep stage on successful build. By default, destroys it.
+ keep_stage -- By default, stage is destroyed only if there are no
+ exceptions during build. Set to True to keep the stage
+ even with exceptions.
ignore_deps -- Do not install dependencies before installing this package.
fake -- Don't really build -- install fake stub files instead.
skip_patch -- Skip patch stage of build if True.
@@ -755,109 +855,127 @@ class Package(object):
if not self.spec.concrete:
raise ValueError("Can only install concrete packages.")
- if os.path.exists(self.prefix):
- tty.msg("%s is already installed in %s." % (self.name, self.prefix))
+ # No installation needed if package is external
+ if self.spec.external:
+ tty.msg("%s is externally installed in %s" % (self.name, self.spec.external))
+ return
+
+ # Ensure package is not already installed
+ if spack.install_layout.check_installed(self.spec):
+ tty.msg("%s is already installed in %s" % (self.name, self.prefix))
return
tty.msg("Installing %s" % self.name)
+ # First, install dependencies recursively.
if not ignore_deps:
self.do_install_dependencies(
keep_prefix=keep_prefix, keep_stage=keep_stage, ignore_deps=ignore_deps,
- fake=fake, skip_patch=skip_patch, verbose=verbose,
- make_jobs=make_jobs)
-
- start_time = time.time()
- if not fake:
- if not skip_patch:
- self.do_patch()
- else:
- self.do_stage()
+ fake=fake, skip_patch=skip_patch, verbose=verbose, make_jobs=make_jobs)
- # create the install directory. The install layout
- # handles this in case so that it can use whatever
- # package naming scheme it likes.
- spack.install_layout.create_install_directory(self.spec)
-
- def cleanup():
- if not keep_prefix:
- # If anything goes wrong, remove the install prefix
- self.remove_prefix()
- else:
- tty.warn("Keeping install prefix in place despite error.",
- "Spack will think this package is installed." +
- "Manually remove this directory to fix:",
- self.prefix)
+ # Set parallelism before starting build.
+ self.make_jobs = make_jobs
+ # Then install the package itself.
+ def build_process():
+ """Forked for each build. Has its own process and python
+ module space set up by build_environment.fork()."""
+ start_time = time.time()
+ if not fake:
+ if not skip_patch:
+ self.do_patch()
+ else:
+ self.do_stage()
- def real_work():
- try:
- tty.msg("Building %s." % self.name)
+ tty.msg("Building %s" % self.name)
+ self.stage.keep = keep_stage
+ with self.stage:
# Run the pre-install hook in the child process after
# the directory is created.
spack.hooks.pre_install(self)
- # Set up process's build environment before running install.
if fake:
self.do_fake_install()
else:
# Do the real install in the source directory.
- self.stage.chdir_to_source()
+ self.stage.chdir_to_source()
- # This redirects I/O to a build log (and optionally to the terminal)
- log_path = join_path(os.getcwd(), 'spack-build.out')
- log_file = open(log_path, 'w')
- with log_output(log_file, verbose, sys.stdout.isatty(), True):
- self.install(self.spec, self.prefix)
+ # Save the build environment in a file before building.
+ env_path = join_path(os.getcwd(), 'spack-build.env')
- # Ensure that something was actually installed.
- self._sanity_check_install()
+ try:
+ # Redirect I/O to a build log (and optionally to the terminal)
+ log_path = join_path(os.getcwd(), 'spack-build.out')
+ log_file = open(log_path, 'w')
+ with log_output(log_file, verbose, sys.stdout.isatty(), True):
+ dump_environment(env_path)
+ self.install(self.spec, self.prefix)
- # Move build log into install directory on success
- if not fake:
- log_install_path = spack.install_layout.build_log_path(self.spec)
- install(log_path, log_install_path)
+ except ProcessError as e:
+ # Annotate ProcessErrors with the location of the build log.
+ e.build_log = log_path
+ raise e
- # On successful install, remove the stage.
- if not keep_stage:
- self.stage.destroy()
+ # Ensure that something was actually installed.
+ self.sanity_check_prefix()
- # Stop timer.
- self._total_time = time.time() - start_time
- build_time = self._total_time - self._fetch_time
+ # Copy provenance into the install directory on success
+ log_install_path = spack.install_layout.build_log_path(self.spec)
+ env_install_path = spack.install_layout.build_env_path(self.spec)
+ packages_dir = spack.install_layout.build_packages_path(self.spec)
- tty.msg("Successfully installed %s." % self.name,
- "Fetch: %s. Build: %s. Total: %s."
- % (_hms(self._fetch_time), _hms(build_time), _hms(self._total_time)))
- print_pkg(self.prefix)
+ install(log_path, log_install_path)
+ install(env_path, env_install_path)
+ dump_packages(self.spec, packages_dir)
- except ProcessError, e:
- # Annotate with location of build log.
- e.build_log = log_path
- cleanup()
- raise e
+ # Run post install hooks before build stage is removed.
+ spack.hooks.post_install(self)
- except:
- # other exceptions just clean up and raise.
- cleanup()
- raise
+ # Stop timer.
+ self._total_time = time.time() - start_time
+ build_time = self._total_time - self._fetch_time
- # Set parallelism before starting build.
- self.make_jobs = make_jobs
+ tty.msg("Successfully installed %s" % self.name,
+ "Fetch: %s. Build: %s. Total: %s."
+ % (_hms(self._fetch_time), _hms(build_time), _hms(self._total_time)))
+ print_pkg(self.prefix)
- # Do the build.
- spack.build_environment.fork(self, real_work)
+ try:
+ # Create the install prefix and fork the build process.
+ spack.install_layout.create_install_directory(self.spec)
+ spack.build_environment.fork(self, build_process)
+ except:
+ # remove the install prefix if anything went wrong during install.
+ if not keep_prefix:
+ self.remove_prefix()
+ else:
+ tty.warn("Keeping install prefix in place despite error.",
+ "Spack will think this package is installed. " +
+ "Manually remove this directory to fix:",
+ self.prefix, wrap=True)
+ raise
# note: PARENT of the build process adds the new package to
# the database, so that we don't need to re-read from file.
spack.installed_db.add(self.spec, self.prefix)
- # Once everything else is done, run post install hooks
- spack.hooks.post_install(self)
+ def sanity_check_prefix(self):
+ """This function checks whether install succeeded."""
+ def check_paths(path_list, filetype, predicate):
+ if isinstance(path_list, basestring):
+ path_list = [path_list]
+
+ for path in path_list:
+ abs_path = os.path.join(self.prefix, path)
+ if not predicate(abs_path):
+ raise InstallError("Install failed for %s. No such %s in prefix: %s"
+ % (self.name, filetype, path))
+
+ check_paths(self.sanity_check_is_file, 'file', os.path.isfile)
+ check_paths(self.sanity_check_is_dir, 'directory', os.path.isdir)
- def _sanity_check_install(self):
installed = set(os.listdir(self.prefix))
installed.difference_update(spack.install_layout.hidden_file_paths)
if not installed:
@@ -887,38 +1005,127 @@ class Package(object):
return __import__(self.__class__.__module__,
fromlist=[self.__class__.__name__])
+ def setup_environment(self, spack_env, run_env):
+ """Set up the compile and runtime environemnts for a package.
+
+ `spack_env` and `run_env` are `EnvironmentModifications`
+ objects. Package authors can call methods on them to alter
+ the environment within Spack and at runtime.
- def setup_dependent_environment(self, module, spec, dependent_spec):
- """Called before the install() method of dependents.
+ Both `spack_env` and `run_env` are applied within the build
+ process, before this package's `install()` method is called.
+
+ Modifications in `run_env` will *also* be added to the
+ generated environment modules for this package.
Default implementation does nothing, but this can be
- overridden by an extendable package to set up the install
- environment for its extensions. This is useful if there are
- some common steps to installing all extensions for a
- certain package.
+ overridden if the package needs a particular environment.
- Some examples:
+ Examples:
- 1. Installing python modules generally requires PYTHONPATH to
- point to the lib/pythonX.Y/site-packages directory in the
- module's install prefix. This could set that variable.
+ 1. Qt extensions need `QTDIR` set.
- 2. Extensions often need to invoke the 'python' interpreter
- from the Python installation being extended. This routine can
- put a 'python' Execuable object in the module scope for the
- extension package to simplify extension installs.
+ Args:
+ spack_env (EnvironmentModifications): list of
+ modifications to be applied when this package is built
+ within Spack.
- 3. A lot of Qt extensions need QTDIR set. This can be used to do that.
+ run_env (EnvironmentModifications): list of environment
+ changes to be applied when this package is run outside
+ of Spack.
"""
pass
+ def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
+ """Set up the environment of packages that depend on this one.
+
+ This is similar to `setup_environment`, but it is used to
+ modify the compile and runtime environments of packages that
+ *depend* on this one. This gives packages like Python and
+ others that follow the extension model a way to implement
+ common environment or compile-time settings for dependencies.
+
+ By default, this delegates to self.setup_environment()
+
+ Example :
+
+ 1. Installing python modules generally requires
+ `PYTHONPATH` to point to the lib/pythonX.Y/site-packages
+ directory in the module's install prefix. This could
+ set that variable.
+
+ Args:
+
+ spack_env (EnvironmentModifications): list of
+ modifications to be applied when the dependent package
+ is bulit within Spack.
+
+ run_env (EnvironmentModifications): list of environment
+ changes to be applied when the dependent package is
+ run outside of Spack.
+
+ dependent_spec (Spec): The spec of the dependent package
+ about to be built. This allows the extendee (self) to
+ query the dependent's state. Note that *this*
+ package's spec is available as `self.spec`.
+
+ This is useful if there are some common steps to installing
+ all extensions for a certain package.
+
+ """
+ self.setup_environment(spack_env, run_env)
+
+
+ def setup_dependent_package(self, module, dependent_spec):
+ """Set up Python module-scope variables for dependent packages.
+
+ Called before the install() method of dependents.
+
+ Default implementation does nothing, but this can be
+ overridden by an extendable package to set up the module of
+ its extensions. This is useful if there are some common steps
+ to installing all extensions for a certain package.
+
+ Example :
+
+ 1. Extensions often need to invoke the `python`
+ interpreter from the Python installation being
+ extended. This routine can put a 'python' Executable
+ object in the module scope for the extension package to
+ simplify extension installs.
+
+ 2. MPI compilers could set some variables in the
+ dependent's scope that point to `mpicc`, `mpicxx`,
+ etc., allowing them to be called by common names
+ regardless of which MPI is used.
+
+ 3. BLAS/LAPACK implementations can set some variables
+ indicating the path to their libraries, since these
+ paths differ by BLAS/LAPACK implementation.
+
+ Args:
+
+ module (module): The Python `module` object of the
+ dependent package. Packages can use this to set
+ module-scope variables for the dependent to use.
+
+ dependent_spec (Spec): The spec of the dependent package
+ about to be built. This allows the extendee (self) to
+ query the dependent's state. Note that *this*
+ package's spec is available as `self.spec`.
+
+ This is useful if there are some common steps to installing
+ all extensions for a certain package.
+
+ """
+ pass
+
def install(self, spec, prefix):
"""Package implementations override this with their own build configuration."""
raise InstallError("Package %s provides no install method!" % self.name)
-
def do_uninstall(self, force=False):
if not self.installed:
raise InstallError(str(self.spec) + " is not installed.")
@@ -934,7 +1141,7 @@ class Package(object):
# Uninstalling in Spack only requires removing the prefix.
self.remove_prefix()
spack.installed_db.remove(self.spec)
- tty.msg("Successfully uninstalled %s." % self.spec.short_spec)
+ tty.msg("Successfully uninstalled %s" % self.spec.short_spec)
# Once everything else is done, run post install hooks
spack.hooks.post_uninstall(self)
@@ -981,7 +1188,7 @@ class Package(object):
self.extendee_spec.package.activate(self, **self.extendee_args)
spack.install_layout.add_extension(self.extendee_spec, self.spec)
- tty.msg("Activated extension %s for %s."
+ tty.msg("Activated extension %s for %s"
% (self.spec.short_spec, self.extendee_spec.format("$_$@$+$%@")))
@@ -1033,7 +1240,7 @@ class Package(object):
if self.activated:
spack.install_layout.remove_extension(self.extendee_spec, self.spec)
- tty.msg("Deactivated extension %s for %s."
+ tty.msg("Deactivated extension %s for %s"
% (self.spec.short_spec, self.extendee_spec.format("$_$@$+$%@")))
@@ -1061,8 +1268,7 @@ class Package(object):
def do_clean(self):
"""Removes the package's build stage and source tarball."""
- if os.path.exists(self.stage.path):
- self.stage.destroy()
+ self.stage.destroy()
def format_doc(self, **kwargs):
@@ -1099,9 +1305,9 @@ class Package(object):
raise VersionFetchError(self.__class__)
try:
- return find_versions_of_archive(
+ return spack.util.web.find_versions_of_archive(
*self.all_urls, list_url=self.list_url, list_depth=self.list_depth)
- except spack.error.NoNetworkConnectionError, e:
+ except spack.error.NoNetworkConnectionError as e:
tty.die("Package.fetch_versions couldn't connect to:",
e.url, e.message)
@@ -1119,51 +1325,29 @@ class Package(object):
@property
def rpath_args(self):
- """Get the rpath args as a string, with -Wl,-rpath= for each element."""
- return " ".join("-Wl,-rpath=%s" % p for p in self.rpath)
-
-
-def find_versions_of_archive(*archive_urls, **kwargs):
- list_url = kwargs.get('list_url', None)
- list_depth = kwargs.get('list_depth', 1)
-
- # Generate a list of list_urls based on archive urls and any
- # explicitly listed list_url in the package
- list_urls = set()
- if list_url:
- list_urls.add(list_url)
- for aurl in archive_urls:
- list_urls.add(spack.url.find_list_url(aurl))
-
- # Grab some web pages to scrape.
- page_map = {}
- for lurl in list_urls:
- pages = spack.util.web.get_pages(lurl, depth=list_depth)
- page_map.update(pages)
-
- # Scrape them for archive URLs
- regexes = []
- for aurl in archive_urls:
- # This creates a regex from the URL with a capture group for
- # the version part of the URL. The capture group is converted
- # to a generic wildcard, so we can use this to extract things
- # on a page that look like archive URLs.
- url_regex = spack.url.wildcard_version(aurl)
-
- # We'll be a bit more liberal and just look for the archive
- # part, not the full path.
- regexes.append(os.path.basename(url_regex))
-
- # Build a version list from all the matches we find
- versions = {}
- for page_url, content in page_map.iteritems():
- # extract versions from matches.
- for regex in regexes:
- versions.update(
- (Version(m.group(1)), urljoin(page_url, m.group(0)))
- for m in re.finditer(regex, content))
-
- return versions
+ """Get the rpath args as a string, with -Wl,-rpath, for each element."""
+ return " ".join("-Wl,-rpath,%s" % p for p in self.rpath)
+
+
+def install_dependency_symlinks(pkg, spec, prefix):
+ """Execute a dummy install and flatten dependencies"""
+ flatten_dependencies(spec, prefix)
+
+def flatten_dependencies(spec, flat_dir):
+ """Make each dependency of spec present in dir via symlink."""
+ for dep in spec.traverse(root=False):
+ name = dep.name
+
+ dep_path = spack.install_layout.path_for_spec(dep)
+ dep_files = LinkTree(dep_path)
+
+ os.mkdir(flat_dir+'/'+name)
+
+ conflict = dep_files.find_conflict(flat_dir+'/'+name)
+ if conflict:
+ raise DependencyConflictError(conflict)
+
+ dep_files.merge(flat_dir+'/'+name)
def validate_package_url(url_string):
@@ -1176,6 +1360,52 @@ def validate_package_url(url_string):
tty.die("Invalid file type in URL: '%s'" % url_string)
+def dump_packages(spec, path):
+ """Dump all package information for a spec and its dependencies.
+
+ This creates a package repository within path for every
+ namespace in the spec DAG, and fills the repos wtih package
+ files and patch files for every node in the DAG.
+ """
+ mkdirp(path)
+
+ # Copy in package.py files from any dependencies.
+ # Note that we copy them in as they are in the *install* directory
+ # NOT as they are in the repository, because we want a snapshot of
+ # how *this* particular build was done.
+ for node in spec.traverse():
+ if node is not spec:
+ # Locate the dependency package in the install tree and find
+ # its provenance information.
+ source = spack.install_layout.build_packages_path(node)
+ source_repo_root = join_path(source, node.namespace)
+
+ # There's no provenance installed for the source package. Skip it.
+ # User can always get something current from the builtin repo.
+ if not os.path.isdir(source_repo_root):
+ continue
+
+ # Create a source repo and get the pkg directory out of it.
+ try:
+ source_repo = spack.repository.Repo(source_repo_root)
+ source_pkg_dir = source_repo.dirname_for_package_name(node.name)
+ except RepoError as e:
+ tty.warn("Warning: Couldn't copy in provenance for %s" % node.name)
+
+ # Create a destination repository
+ dest_repo_root = join_path(path, node.namespace)
+ if not os.path.exists(dest_repo_root):
+ spack.repository.create_repo(dest_repo_root)
+ repo = spack.repository.Repo(dest_repo_root)
+
+ # Get the location of the package in the dest repo.
+ dest_pkg_dir = repo.dirname_for_package_name(node.name)
+ if node is not spec:
+ install_tree(source_pkg_dir, dest_pkg_dir)
+ else:
+ spack.repo.dump_provenance(node, dest_pkg_dir)
+
+
def print_pkg(message):
"""Outputs a message with a package icon."""
from llnl.util.tty.color import cwrite
@@ -1207,6 +1437,10 @@ class InstallError(spack.error.SpackError):
super(InstallError, self).__init__(message, long_msg)
+class ExternalPackageError(InstallError):
+ """Raised by install() when a package is only for external use."""
+
+
class PackageStillNeededError(InstallError):
"""Raised when package is still needed by another on uninstall."""
def __init__(self, spec, dependents):
@@ -1226,7 +1460,7 @@ class PackageVersionError(PackageError):
"""Raised when a version URL cannot automatically be determined."""
def __init__(self, version):
super(PackageVersionError, self).__init__(
- "Cannot determine a URL automatically for version %s." % version,
+ "Cannot determine a URL automatically for version %s" % version,
"Please provide a url for this version in the package.py file.")
@@ -1257,3 +1491,11 @@ class ExtensionConflictError(ExtensionError):
class ActivationError(ExtensionError):
def __init__(self, msg, long_msg=None):
super(ActivationError, self).__init__(msg, long_msg)
+
+
+class DependencyConflictError(spack.error.SpackError):
+ """Raised when the dependencies cannot be flattened as asked for."""
+ def __init__(self, conflict):
+ super(DependencyConflictError, self).__init__(
+ "%s conflicts with another file in the flattened directory." %(
+ conflict))
diff --git a/lib/spack/spack/packages.py b/lib/spack/spack/packages.py
deleted file mode 100644
index 2e90e22c29..0000000000
--- a/lib/spack/spack/packages.py
+++ /dev/null
@@ -1,211 +0,0 @@
-##############################################################################
-# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
-# Produced at the Lawrence Livermore National Laboratory.
-#
-# This file is part of Spack.
-# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
-# LLNL-CODE-647188
-#
-# For details, see https://scalability-llnl.github.io/spack
-# Please also see the LICENSE file for our notice and the LGPL.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License (as published by
-# the Free Software Foundation) version 2.1 dated February 1999.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
-# conditions of the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with this program; if not, write to the Free Software Foundation,
-# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-##############################################################################
-import os
-import sys
-import inspect
-import glob
-import imp
-
-import llnl.util.tty as tty
-from llnl.util.filesystem import join_path
-from llnl.util.lang import *
-
-import spack.error
-import spack.spec
-from spack.virtual import ProviderIndex
-from spack.util.naming import mod_to_class, validate_module_name
-
-# Name of module under which packages are imported
-_imported_packages_module = 'spack.packages'
-
-# Name of the package file inside a package directory
-_package_file_name = 'package.py'
-
-
-def _autospec(function):
- """Decorator that automatically converts the argument of a single-arg
- function to a Spec."""
- def converter(self, spec_like, **kwargs):
- if not isinstance(spec_like, spack.spec.Spec):
- spec_like = spack.spec.Spec(spec_like)
- return function(self, spec_like, **kwargs)
- return converter
-
-
-class PackageDB(object):
- def __init__(self, root):
- """Construct a new package database from a root directory."""
- self.root = root
- self.instances = {}
- self.provider_index = None
-
-
- @_autospec
- def get(self, spec, **kwargs):
- if spec.virtual:
- raise UnknownPackageError(spec.name)
-
- if kwargs.get('new', False):
- if spec in self.instances:
- del self.instances[spec]
-
- if not spec in self.instances:
- package_class = self.get_class_for_package_name(spec.name)
- try:
- copy = spec.copy()
- self.instances[copy] = package_class(copy)
- except Exception, e:
- if spack.debug:
- sys.excepthook(*sys.exc_info())
- raise FailedConstructorError(spec.name, e)
-
- return self.instances[spec]
-
-
- @_autospec
- def delete(self, spec):
- """Force a package to be recreated."""
- del self.instances[spec]
-
-
- def purge(self):
- """Clear entire package instance cache."""
- self.instances.clear()
-
-
- @_autospec
- def providers_for(self, vpkg_spec):
- if self.provider_index is None:
- self.provider_index = ProviderIndex(self.all_package_names())
-
- providers = self.provider_index.providers_for(vpkg_spec)
- if not providers:
- raise UnknownPackageError(vpkg_spec.name)
- return providers
-
-
- @_autospec
- def extensions_for(self, extendee_spec):
- return [p for p in self.all_packages() if p.extends(extendee_spec)]
-
-
- def dirname_for_package_name(self, pkg_name):
- """Get the directory name for a particular package. This is the
- directory that contains its package.py file."""
- return join_path(self.root, pkg_name)
-
-
- def filename_for_package_name(self, pkg_name):
- """Get the filename for the module we should load for a particular
- package. Packages for a pacakge DB live in
- ``$root/<package_name>/package.py``
-
- This will return a proper package.py path even if the
- package doesn't exist yet, so callers will need to ensure
- the package exists before importing.
- """
- validate_module_name(pkg_name)
- pkg_dir = self.dirname_for_package_name(pkg_name)
- return join_path(pkg_dir, _package_file_name)
-
-
- @memoized
- def all_package_names(self):
- """Generator function for all packages. This looks for
- ``<pkg_name>/package.py`` files within the root direcotry"""
- all_package_names = []
- for pkg_name in os.listdir(self.root):
- pkg_dir = join_path(self.root, pkg_name)
- pkg_file = join_path(pkg_dir, _package_file_name)
- if os.path.isfile(pkg_file):
- all_package_names.append(pkg_name)
- all_package_names.sort()
- return all_package_names
-
-
- def all_packages(self):
- for name in self.all_package_names():
- yield self.get(name)
-
-
- @memoized
- def exists(self, pkg_name):
- """Whether a package with the supplied name exists ."""
- if pkg_name == "":
- return False
- return os.path.exists(self.filename_for_package_name(pkg_name))
-
-
- @memoized
- def get_class_for_package_name(self, pkg_name):
- """Get an instance of the class for a particular package.
-
- This method uses Python's ``imp`` package to load python
- source from a Spack package's ``package.py`` file. A
- normal python import would only load each package once, but
- because we do this dynamically, the method needs to be
- memoized to ensure there is only ONE package class
- instance, per package, per database.
- """
- file_path = self.filename_for_package_name(pkg_name)
-
- if os.path.exists(file_path):
- if not os.path.isfile(file_path):
- tty.die("Something's wrong. '%s' is not a file!" % file_path)
- if not os.access(file_path, os.R_OK):
- tty.die("Cannot read '%s'!" % file_path)
- else:
- raise UnknownPackageError(pkg_name)
-
- class_name = mod_to_class(pkg_name)
- try:
- module_name = _imported_packages_module + '.' + pkg_name
- module = imp.load_source(module_name, file_path)
-
- except ImportError, e:
- tty.die("Error while importing %s from %s:\n%s" % (
- pkg_name, file_path, e.message))
-
- cls = getattr(module, class_name)
- if not inspect.isclass(cls):
- tty.die("%s.%s is not a class" % (pkg_name, class_name))
-
- return cls
-
-
-class UnknownPackageError(spack.error.SpackError):
- """Raised when we encounter a package spack doesn't have."""
- def __init__(self, name):
- super(UnknownPackageError, self).__init__("Package '%s' not found." % name)
- self.name = name
-
-
-class FailedConstructorError(spack.error.SpackError):
- """Raised when a package's class constructor fails."""
- def __init__(self, name, reason):
- super(FailedConstructorError, self).__init__(
- "Class constructor failed for package '%s'." % name,
- str(reason))
- self.name = name
diff --git a/lib/spack/spack/parse.py b/lib/spack/spack/parse.py
index bc12ec258c..e9467aa685 100644
--- a/lib/spack/spack/parse.py
+++ b/lib/spack/spack/parse.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/patch.py b/lib/spack/spack/patch.py
index b1b6e07738..b82a047753 100644
--- a/lib/spack/spack/patch.py
+++ b/lib/spack/spack/patch.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -41,8 +41,8 @@ class Patch(object):
"""This class describes a patch to be applied to some expanded
source code."""
- def __init__(self, pkg_name, path_or_url, level):
- self.pkg_name = pkg_name
+ def __init__(self, pkg, path_or_url, level):
+ self.pkg_name = pkg.name
self.path_or_url = path_or_url
self.path = None
self.url = None
@@ -54,7 +54,7 @@ class Patch(object):
if '://' in path_or_url:
self.url = path_or_url
else:
- pkg_dir = spack.db.dirname_for_package_name(pkg_name)
+ pkg_dir = spack.repo.dirname_for_package_name(self.pkg_name)
self.path = join_path(pkg_dir, path_or_url)
if not os.path.isfile(self.path):
raise NoSuchPatchFileError(pkg_name, self.path)
diff --git a/lib/spack/spack/preferred_packages.py b/lib/spack/spack/preferred_packages.py
new file mode 100644
index 0000000000..4d8526c75f
--- /dev/null
+++ b/lib/spack/spack/preferred_packages.py
@@ -0,0 +1,175 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+
+import spack
+from spack.version import *
+
+class PreferredPackages(object):
+ _default_order = {'compiler' : [ 'gcc', 'intel', 'clang', 'pgi', 'xlc' ] } # Arbitrary, but consistent
+
+ def __init__(self):
+ self.preferred = spack.config.get_config('packages')
+ self._spec_for_pkgname_cache = {}
+
+ # Given a package name, sort component (e.g, version, compiler, ...), and
+ # a second_key (used by providers), return the list
+ def _order_for_package(self, pkgname, component, second_key, test_all=True):
+ pkglist = [pkgname]
+ if test_all:
+ pkglist.append('all')
+ for pkg in pkglist:
+ order = self.preferred.get(pkg, {}).get(component, {})
+ if type(order) is dict:
+ order = order.get(second_key, {})
+ if not order:
+ continue
+ return [str(s).strip() for s in order]
+ return []
+
+
+ # A generic sorting function. Given a package name and sort
+ # component, return less-than-0, 0, or greater-than-0 if
+ # a is respectively less-than, equal to, or greater than b.
+ def _component_compare(self, pkgname, component, a, b, reverse_natural_compare, second_key):
+ if a is None:
+ return -1
+ if b is None:
+ return 1
+ orderlist = self._order_for_package(pkgname, component, second_key)
+ a_in_list = str(a) in orderlist
+ b_in_list = str(b) in orderlist
+ if a_in_list and not b_in_list:
+ return -1
+ elif b_in_list and not a_in_list:
+ return 1
+
+ cmp_a = None
+ cmp_b = None
+ reverse = None
+ if not a_in_list and not b_in_list:
+ cmp_a = a
+ cmp_b = b
+ reverse = -1 if reverse_natural_compare else 1
+ else:
+ cmp_a = orderlist.index(str(a))
+ cmp_b = orderlist.index(str(b))
+ reverse = 1
+
+ if cmp_a < cmp_b:
+ return -1 * reverse
+ elif cmp_a > cmp_b:
+ return 1 * reverse
+ else:
+ return 0
+
+
+ # A sorting function for specs. Similar to component_compare, but
+ # a and b are considered to match entries in the sorting list if they
+ # satisfy the list component.
+ def _spec_compare(self, pkgname, component, a, b, reverse_natural_compare, second_key):
+ if not a or not a.concrete:
+ return -1
+ if not b or not b.concrete:
+ return 1
+ specs = self._spec_for_pkgname(pkgname, component, second_key)
+ a_index = None
+ b_index = None
+ reverse = -1 if reverse_natural_compare else 1
+ for i, cspec in enumerate(specs):
+ if a_index == None and (cspec.satisfies(a) or a.satisfies(cspec)):
+ a_index = i
+ if b_index:
+ break
+ if b_index == None and (cspec.satisfies(b) or b.satisfies(cspec)):
+ b_index = i
+ if a_index:
+ break
+
+ if a_index != None and b_index == None: return -1
+ elif a_index == None and b_index != None: return 1
+ elif a_index != None and b_index == a_index: return -1 * cmp(a, b)
+ elif a_index != None and b_index != None and a_index != b_index: return cmp(a_index, b_index)
+ else: return cmp(a, b) * reverse
+
+
+
+ # Given a sort order specified by the pkgname/component/second_key, return
+ # a list of CompilerSpecs, VersionLists, or Specs for that sorting list.
+ def _spec_for_pkgname(self, pkgname, component, second_key):
+ key = (pkgname, component, second_key)
+ if not key in self._spec_for_pkgname_cache:
+ pkglist = self._order_for_package(pkgname, component, second_key)
+ if not pkglist:
+ if component in self._default_order:
+ pkglist = self._default_order[component]
+ if component == 'compiler':
+ self._spec_for_pkgname_cache[key] = [spack.spec.CompilerSpec(s) for s in pkglist]
+ elif component == 'version':
+ self._spec_for_pkgname_cache[key] = [VersionList(s) for s in pkglist]
+ else:
+ self._spec_for_pkgname_cache[key] = [spack.spec.Spec(s) for s in pkglist]
+ return self._spec_for_pkgname_cache[key]
+
+
+ def provider_compare(self, pkgname, provider_str, a, b):
+ """Return less-than-0, 0, or greater than 0 if a is respecively less-than, equal-to, or
+ greater-than b. A and b are possible implementations of provider_str.
+ One provider is less-than another if it is preferred over the other.
+ For example, provider_compare('scorep', 'mpi', 'mvapich', 'openmpi') would return -1 if
+ mvapich should be preferred over openmpi for scorep."""
+ return self._spec_compare(pkgname, 'providers', a, b, False, provider_str)
+
+
+ def spec_has_preferred_provider(self, pkgname, provider_str):
+ """Return True iff the named package has a list of preferred provider"""
+ return bool(self._order_for_package(pkgname, 'providers', provider_str, False))
+
+
+ def version_compare(self, pkgname, a, b):
+ """Return less-than-0, 0, or greater than 0 if version a of pkgname is
+ respecively less-than, equal-to, or greater-than version b of pkgname.
+ One version is less-than another if it is preferred over the other."""
+ return self._spec_compare(pkgname, 'version', a, b, True, None)
+
+
+ def variant_compare(self, pkgname, a, b):
+ """Return less-than-0, 0, or greater than 0 if variant a of pkgname is
+ respecively less-than, equal-to, or greater-than variant b of pkgname.
+ One variant is less-than another if it is preferred over the other."""
+ return self._component_compare(pkgname, 'variant', a, b, False, None)
+
+
+ def architecture_compare(self, pkgname, a, b):
+ """Return less-than-0, 0, or greater than 0 if architecture a of pkgname is
+ respecively less-than, equal-to, or greater-than architecture b of pkgname.
+ One architecture is less-than another if it is preferred over the other."""
+ return self._component_compare(pkgname, 'architecture', a, b, False, None)
+
+
+ def compiler_compare(self, pkgname, a, b):
+ """Return less-than-0, 0, or greater than 0 if compiler a of pkgname is
+ respecively less-than, equal-to, or greater-than compiler b of pkgname.
+ One compiler is less-than another if it is preferred over the other."""
+ return self._spec_compare(pkgname, 'compiler', a, b, False, None)
diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py
new file mode 100644
index 0000000000..d2fdc937f7
--- /dev/null
+++ b/lib/spack/spack/repository.py
@@ -0,0 +1,855 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://software.llnl.gov/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+import os
+import exceptions
+import sys
+import inspect
+import imp
+import re
+import traceback
+from bisect import bisect_left
+from external import yaml
+
+import llnl.util.tty as tty
+from llnl.util.filesystem import *
+
+import spack.error
+import spack.config
+import spack.spec
+from spack.virtual import ProviderIndex
+from spack.util.naming import *
+
+#
+# Super-namespace for all packages.
+# Package modules are imported as spack.pkg.<namespace>.<pkg-name>.
+#
+repo_namespace = 'spack.pkg'
+
+#
+# These names describe how repos should be laid out in the filesystem.
+#
+repo_config_name = 'repo.yaml' # Top-level filename for repo config.
+packages_dir_name = 'packages' # Top-level repo directory containing pkgs.
+package_file_name = 'package.py' # Filename for packages in a repository.
+
+# Guaranteed unused default value for some functions.
+NOT_PROVIDED = object()
+
+
+def _autospec(function):
+ """Decorator that automatically converts the argument of a single-arg
+ function to a Spec."""
+ def converter(self, spec_like, *args, **kwargs):
+ if not isinstance(spec_like, spack.spec.Spec):
+ spec_like = spack.spec.Spec(spec_like)
+ return function(self, spec_like, *args, **kwargs)
+ return converter
+
+
+def _make_namespace_module(ns):
+ module = imp.new_module(ns)
+ module.__file__ = "(spack namespace)"
+ module.__path__ = []
+ module.__package__ = ns
+ return module
+
+
+def substitute_spack_prefix(path):
+ """Replaces instances of $spack with Spack's prefix."""
+ return re.sub(r'^\$spack', spack.prefix, path)
+
+
+def canonicalize_path(path):
+ """Substitute $spack, expand user home, take abspath."""
+ path = substitute_spack_prefix(path)
+ path = os.path.expanduser(path)
+ path = os.path.abspath(path)
+ return path
+
+
+class RepoPath(object):
+ """A RepoPath is a list of repos that function as one.
+
+ It functions exactly like a Repo, but it operates on the
+ combined results of the Repos in its list instead of on a
+ single package repository.
+ """
+ def __init__(self, *repo_dirs, **kwargs):
+ # super-namespace for all packages in the RepoPath
+ self.super_namespace = kwargs.get('namespace', repo_namespace)
+
+ self.repos = []
+ self.by_namespace = NamespaceTrie()
+ self.by_path = {}
+
+ self._all_package_names = []
+ self._provider_index = None
+
+ # If repo_dirs is empty, just use the configuration
+ if not repo_dirs:
+ repo_dirs = spack.config.get_config('repos')
+ if not repo_dirs:
+ raise NoRepoConfiguredError(
+ "Spack configuration contains no package repositories.")
+
+ # Add each repo to this path.
+ for root in repo_dirs:
+ try:
+ repo = Repo(root, self.super_namespace)
+ self.put_last(repo)
+ except RepoError as e:
+ tty.warn("Failed to initialize repository at '%s'." % root,
+ e.message,
+ "To remove the bad repository, run this command:",
+ " spack repo rm %s" % root)
+
+
+ def swap(self, other):
+ """Convenience function to make swapping repostiories easier.
+
+ This is currently used by mock tests.
+ TODO: Maybe there is a cleaner way.
+
+ """
+ attrs = ['repos',
+ 'by_namespace',
+ 'by_path',
+ '_all_package_names',
+ '_provider_index']
+ for attr in attrs:
+ tmp = getattr(self, attr)
+ setattr(self, attr, getattr(other, attr))
+ setattr(other, attr, tmp)
+
+
+ def _add(self, repo):
+ """Add a repository to the namespace and path indexes.
+
+ Checks for duplicates -- two repos can't have the same root
+ directory, and they provide have the same namespace.
+
+ """
+ if repo.root in self.by_path:
+ raise DuplicateRepoError("Duplicate repository: '%s'" % repo.root)
+
+ if repo.namespace in self.by_namespace:
+ raise DuplicateRepoError(
+ "Package repos '%s' and '%s' both provide namespace %s"
+ % (repo.root, self.by_namespace[repo.namespace].root, repo.namespace))
+
+ # Add repo to the pkg indexes
+ self.by_namespace[repo.full_namespace] = repo
+ self.by_path[repo.root] = repo
+
+ # add names to the cached name list
+ new_pkgs = set(repo.all_package_names())
+ new_pkgs.update(set(self._all_package_names))
+ self._all_package_names = sorted(new_pkgs, key=lambda n:n.lower())
+
+
+ def put_first(self, repo):
+ """Add repo first in the search path."""
+ self._add(repo)
+ self.repos.insert(0, repo)
+
+
+ def put_last(self, repo):
+ """Add repo last in the search path."""
+ self._add(repo)
+ self.repos.append(repo)
+
+
+ def remove(self, repo):
+ """Remove a repo from the search path."""
+ if repo in self.repos:
+ self.repos.remove(repo)
+
+
+ def get_repo(self, namespace, default=NOT_PROVIDED):
+ """Get a repository by namespace.
+ Arguments
+ namespace
+ Look up this namespace in the RepoPath, and return
+ it if found.
+
+ Optional Arguments
+ default
+ If default is provided, return it when the namespace
+ isn't found. If not, raise an UnknownNamespaceError.
+ """
+ fullspace = '%s.%s' % (self.super_namespace, namespace)
+ if fullspace not in self.by_namespace:
+ if default == NOT_PROVIDED:
+ raise UnknownNamespaceError(namespace)
+ return default
+ return self.by_namespace[fullspace]
+
+
+ def first_repo(self):
+ """Get the first repo in precedence order."""
+ return self.repos[0] if self.repos else None
+
+
+ def all_package_names(self):
+ """Return all unique package names in all repositories."""
+ return self._all_package_names
+
+
+ def all_packages(self):
+ for name in self.all_package_names():
+ yield self.get(name)
+
+
+ @_autospec
+ def providers_for(self, vpkg_spec):
+ if self._provider_index is None:
+ self._provider_index = ProviderIndex(self.all_package_names())
+
+ providers = self._provider_index.providers_for(vpkg_spec)
+ if not providers:
+ raise UnknownPackageError(vpkg_spec.name)
+ return providers
+
+
+ @_autospec
+ def extensions_for(self, extendee_spec):
+ return [p for p in self.all_packages() if p.extends(extendee_spec)]
+
+
+ def find_module(self, fullname, path=None):
+ """Implements precedence for overlaid namespaces.
+
+ Loop checks each namespace in self.repos for packages, and
+ also handles loading empty containing namespaces.
+
+ """
+ # namespaces are added to repo, and package modules are leaves.
+ namespace, dot, module_name = fullname.rpartition('.')
+
+ # If it's a module in some repo, or if it is the repo's
+ # namespace, let the repo handle it.
+ for repo in self.repos:
+ if namespace == repo.full_namespace:
+ if repo.real_name(module_name):
+ return repo
+ elif fullname == repo.full_namespace:
+ return repo
+
+ # No repo provides the namespace, but it is a valid prefix of
+ # something in the RepoPath.
+ if self.by_namespace.is_prefix(fullname):
+ return self
+
+ return None
+
+
+ def load_module(self, fullname):
+ """Handles loading container namespaces when necessary.
+
+ See ``Repo`` for how actual package modules are loaded.
+ """
+ if fullname in sys.modules:
+ return sys.modules[fullname]
+
+ # partition fullname into prefix and module name.
+ namespace, dot, module_name = fullname.rpartition('.')
+
+ if not self.by_namespace.is_prefix(fullname):
+ raise ImportError("No such Spack repo: %s" % fullname)
+
+ module = _make_namespace_module(namespace)
+ module.__loader__ = self
+ sys.modules[fullname] = module
+ return module
+
+
+ @_autospec
+ def repo_for_pkg(self, spec):
+ """Given a spec, get the repository for its package."""
+ # If the spec already has a namespace, then return the
+ # corresponding repo if we know about it.
+ if spec.namespace:
+ fullspace = '%s.%s' % (self.super_namespace, spec.namespace)
+ if fullspace not in self.by_namespace:
+ raise UnknownNamespaceError(spec.namespace)
+ return self.by_namespace[fullspace]
+
+ # If there's no namespace, search in the RepoPath.
+ for repo in self.repos:
+ if spec.name in repo:
+ return repo
+
+ # If the package isn't in any repo, return the one with
+ # highest precedence. This is for commands like `spack edit`
+ # that can operate on packages that don't exist yet.
+ return self.first_repo()
+
+
+ @_autospec
+ def get(self, spec, new=False):
+ """Find a repo that contains the supplied spec's package.
+
+ Raises UnknownPackageError if not found.
+ """
+ return self.repo_for_pkg(spec).get(spec)
+
+
+ def get_pkg_class(self, pkg_name):
+ """Find a class for the spec's package and return the class object."""
+ return self.repo_for_pkg(pkg_name).get_pkg_class(pkg_name)
+
+
+ @_autospec
+ def dump_provenance(self, spec, path):
+ """Dump provenance information for a spec to a particular path.
+
+ This dumps the package file and any associated patch files.
+ Raises UnknownPackageError if not found.
+ """
+ return self.repo_for_pkg(spec).dump_provenance(spec, path)
+
+
+ def dirname_for_package_name(self, pkg_name):
+ return self.repo_for_pkg(pkg_name).dirname_for_package_name(pkg_name)
+
+
+ def filename_for_package_name(self, pkg_name):
+ return self.repo_for_pkg(pkg_name).filename_for_package_name(pkg_name)
+
+
+ def exists(self, pkg_name):
+ return any(repo.exists(pkg_name) for repo in self.repos)
+
+
+ def __contains__(self, pkg_name):
+ return self.exists(pkg_name)
+
+
+
+class Repo(object):
+ """Class representing a package repository in the filesystem.
+
+ Each package repository must have a top-level configuration file
+ called `repo.yaml`.
+
+ Currently, `repo.yaml` this must define:
+
+ `namespace`:
+ A Python namespace where the repository's packages should live.
+
+ """
+ def __init__(self, root, namespace=repo_namespace):
+ """Instantiate a package repository from a filesystem path.
+
+ Arguments:
+ root The root directory of the repository.
+
+ namespace A super-namespace that will contain the repo-defined
+ namespace (this is generally jsut `spack.pkg`). The
+ super-namespace is Spack's way of separating repositories
+ from other python namespaces.
+
+ """
+ # Root directory, containing _repo.yaml and package dirs
+ # Allow roots to by spack-relative by starting with '$spack'
+ self.root = canonicalize_path(root)
+
+ # super-namespace for all packages in the Repo
+ self.super_namespace = namespace
+
+ # check and raise BadRepoError on fail.
+ def check(condition, msg):
+ if not condition: raise BadRepoError(msg)
+
+ # Validate repository layout.
+ self.config_file = join_path(self.root, repo_config_name)
+ check(os.path.isfile(self.config_file),
+ "No %s found in '%s'" % (repo_config_name, root))
+ self.packages_path = join_path(self.root, packages_dir_name)
+ check(os.path.isdir(self.packages_path),
+ "No directory '%s' found in '%s'" % (repo_config_name, root))
+
+ # Read configuration and validate namespace
+ config = self._read_config()
+ check('namespace' in config, '%s must define a namespace.'
+ % join_path(root, repo_config_name))
+
+ self.namespace = config['namespace']
+ check(re.match(r'[a-zA-Z][a-zA-Z0-9_.]+', self.namespace),
+ ("Invalid namespace '%s' in repo '%s'. " % (self.namespace, self.root)) +
+ "Namespaces must be valid python identifiers separated by '.'")
+
+ # Set up 'full_namespace' to include the super-namespace
+ if self.super_namespace:
+ self.full_namespace = "%s.%s" % (self.super_namespace, self.namespace)
+ else:
+ self.full_namespace = self.namespace
+
+ # Keep name components around for checking prefixes.
+ self._names = self.full_namespace.split('.')
+
+ # These are internal cache variables.
+ self._modules = {}
+ self._classes = {}
+ self._instances = {}
+ self._provider_index = None
+ self._all_package_names = None
+
+ # make sure the namespace for packages in this repo exists.
+ self._create_namespace()
+
+
+ def _create_namespace(self):
+ """Create this repo's namespace module and insert it into sys.modules.
+
+ Ensures that modules loaded via the repo have a home, and that
+ we don't get runtime warnings from Python's module system.
+
+ """
+ parent = None
+ for l in range(1, len(self._names)+1):
+ ns = '.'.join(self._names[:l])
+ if not ns in sys.modules:
+ module = _make_namespace_module(ns)
+ module.__loader__ = self
+ sys.modules[ns] = module
+
+ # Ensure the namespace is an atrribute of its parent,
+ # if it has not been set by something else already.
+ #
+ # This ensures that we can do things like:
+ # import spack.pkg.builtin.mpich as mpich
+ if parent:
+ modname = self._names[l-1]
+ if not hasattr(parent, modname):
+ setattr(parent, modname, module)
+ else:
+ # no need to set up a module, but keep track of the parent.
+ module = sys.modules[ns]
+ parent = module
+
+
+ def real_name(self, import_name):
+ """Allow users to import Spack packages using Python identifiers.
+
+ A python identifier might map to many different Spack package
+ names due to hyphen/underscore ambiguity.
+
+ Easy example:
+ num3proxy -> 3proxy
+
+ Ambiguous:
+ foo_bar -> foo_bar, foo-bar
+
+ More ambiguous:
+ foo_bar_baz -> foo_bar_baz, foo-bar-baz, foo_bar-baz, foo-bar_baz
+ """
+ if import_name in self:
+ return import_name
+
+ options = possible_spack_module_names(import_name)
+ options.remove(import_name)
+ for name in options:
+ if name in self:
+ return name
+ return None
+
+
+ def is_prefix(self, fullname):
+ """True if fullname is a prefix of this Repo's namespace."""
+ parts = fullname.split('.')
+ return self._names[:len(parts)] == parts
+
+
+ def find_module(self, fullname, path=None):
+ """Python find_module import hook.
+
+ Returns this Repo if it can load the module; None if not.
+ """
+ if self.is_prefix(fullname):
+ return self
+
+ namespace, dot, module_name = fullname.rpartition('.')
+ if namespace == self.full_namespace:
+ if self.real_name(module_name):
+ return self
+
+ return None
+
+
+ def load_module(self, fullname):
+ """Python importer load hook.
+
+ Tries to load the module; raises an ImportError if it can't.
+ """
+ if fullname in sys.modules:
+ return sys.modules[fullname]
+
+ namespace, dot, module_name = fullname.rpartition('.')
+
+ if self.is_prefix(fullname):
+ module = _make_namespace_module(fullname)
+
+ elif namespace == self.full_namespace:
+ real_name = self.real_name(module_name)
+ if not real_name:
+ raise ImportError("No module %s in %s" % (module_name, self))
+ module = self._get_pkg_module(real_name)
+
+ else:
+ raise ImportError("No module %s in %s" % (fullname, self))
+
+ module.__loader__ = self
+ sys.modules[fullname] = module
+ return module
+
+
+ def _read_config(self):
+ """Check for a YAML config file in this db's root directory."""
+ try:
+ with open(self.config_file) as reponame_file:
+ yaml_data = yaml.load(reponame_file)
+
+ if (not yaml_data or 'repo' not in yaml_data or
+ not isinstance(yaml_data['repo'], dict)):
+ tty.die("Invalid %s in repository %s"
+ % (repo_config_name, self.root))
+
+ return yaml_data['repo']
+
+ except exceptions.IOError, e:
+ tty.die("Error reading %s when opening %s"
+ % (self.config_file, self.root))
+
+
+ @_autospec
+ def get(self, spec, new=False):
+ if spec.virtual:
+ raise UnknownPackageError(spec.name)
+
+ if spec.namespace and spec.namespace != self.namespace:
+ raise UnknownPackageError("Repository %s does not contain package %s"
+ % (self.namespace, spec.fullname))
+
+ key = hash(spec)
+ if new or key not in self._instances:
+ package_class = self.get_pkg_class(spec.name)
+ try:
+ copy = spec.copy() # defensive copy. Package owns its spec.
+ self._instances[key] = package_class(copy)
+ except Exception, e:
+ if spack.debug:
+ sys.excepthook(*sys.exc_info())
+ raise FailedConstructorError(spec.fullname, *sys.exc_info())
+
+ return self._instances[key]
+
+
+ @_autospec
+ def dump_provenance(self, spec, path):
+ """Dump provenance information for a spec to a particular path.
+
+ This dumps the package file and any associated patch files.
+ Raises UnknownPackageError if not found.
+ """
+ # Some preliminary checks.
+ if spec.virtual:
+ raise UnknownPackageError(spec.name)
+
+ if spec.namespace and spec.namespace != self.namespace:
+ raise UnknownPackageError("Repository %s does not contain package %s."
+ % (self.namespace, spec.fullname))
+
+ # Install any patch files needed by packages.
+ mkdirp(path)
+ for spec, patches in spec.package.patches.items():
+ for patch in patches:
+ if patch.path:
+ if os.path.exists(patch.path):
+ install(patch.path, path)
+ else:
+ tty.warn("Patch file did not exist: %s" % patch.path)
+
+ # Install the package.py file itself.
+ install(self.filename_for_package_name(spec), path)
+
+
+ def purge(self):
+ """Clear entire package instance cache."""
+ self._instances.clear()
+
+
+ @_autospec
+ def providers_for(self, vpkg_spec):
+ if self._provider_index is None:
+ self._provider_index = ProviderIndex(self.all_package_names())
+
+ providers = self._provider_index.providers_for(vpkg_spec)
+ if not providers:
+ raise UnknownPackageError(vpkg_spec.name)
+ return providers
+
+
+ @_autospec
+ def extensions_for(self, extendee_spec):
+ return [p for p in self.all_packages() if p.extends(extendee_spec)]
+
+
+ def _check_namespace(self, spec):
+ """Check that the spec's namespace is the same as this repository's."""
+ if spec.namespace and spec.namespace != self.namespace:
+ raise UnknownNamespaceError(spec.namespace)
+
+
+ @_autospec
+ def dirname_for_package_name(self, spec):
+ """Get the directory name for a particular package. This is the
+ directory that contains its package.py file."""
+ self._check_namespace(spec)
+ return join_path(self.packages_path, spec.name)
+
+
+ @_autospec
+ def filename_for_package_name(self, spec):
+ """Get the filename for the module we should load for a particular
+ package. Packages for a Repo live in
+ ``$root/<package_name>/package.py``
+
+ This will return a proper package.py path even if the
+ package doesn't exist yet, so callers will need to ensure
+ the package exists before importing.
+ """
+ self._check_namespace(spec)
+ pkg_dir = self.dirname_for_package_name(spec.name)
+ return join_path(pkg_dir, package_file_name)
+
+
+ def all_package_names(self):
+ """Returns a sorted list of all package names in the Repo."""
+ if self._all_package_names is None:
+ self._all_package_names = []
+
+ for pkg_name in os.listdir(self.packages_path):
+ # Skip non-directories in the package root.
+ pkg_dir = join_path(self.packages_path, pkg_name)
+ if not os.path.isdir(pkg_dir):
+ continue
+
+ # Skip directories without a package.py in them.
+ pkg_file = join_path(self.packages_path, pkg_name, package_file_name)
+ if not os.path.isfile(pkg_file):
+ continue
+
+ # Warn about invalid names that look like packages.
+ if not valid_module_name(pkg_name):
+ tty.warn("Skipping package at %s. '%s' is not a valid Spack module name."
+ % (pkg_dir, pkg_name))
+ continue
+
+ # All checks passed. Add it to the list.
+ self._all_package_names.append(pkg_name)
+ self._all_package_names.sort()
+
+ return self._all_package_names
+
+
+ def all_packages(self):
+ for name in self.all_package_names():
+ yield self.get(name)
+
+
+ def exists(self, pkg_name):
+ """Whether a package with the supplied name exists."""
+ # This does a binary search in the sorted list.
+ idx = bisect_left(self.all_package_names(), pkg_name)
+ return (idx < len(self._all_package_names) and
+ self._all_package_names[idx] == pkg_name)
+
+
+ def _get_pkg_module(self, pkg_name):
+ """Create a module for a particular package.
+
+ This caches the module within this Repo *instance*. It does
+ *not* add it to ``sys.modules``. So, you can construct
+ multiple Repos for testing and ensure that the module will be
+ loaded once per repo.
+
+ """
+ if pkg_name not in self._modules:
+ file_path = self.filename_for_package_name(pkg_name)
+
+ if not os.path.exists(file_path):
+ raise UnknownPackageError(pkg_name, self)
+
+ if not os.path.isfile(file_path):
+ tty.die("Something's wrong. '%s' is not a file!" % file_path)
+
+ if not os.access(file_path, os.R_OK):
+ tty.die("Cannot read '%s'!" % file_path)
+
+ # e.g., spack.pkg.builtin.mpich
+ fullname = "%s.%s" % (self.full_namespace, pkg_name)
+
+ module = imp.load_source(fullname, file_path)
+ module.__package__ = self.full_namespace
+ module.__loader__ = self
+ self._modules[pkg_name] = module
+
+ return self._modules[pkg_name]
+
+
+ def get_pkg_class(self, pkg_name):
+ """Get the class for the package out of its module.
+
+ First loads (or fetches from cache) a module for the
+ package. Then extracts the package class from the module
+ according to Spack's naming convention.
+ """
+ class_name = mod_to_class(pkg_name)
+ module = self._get_pkg_module(pkg_name)
+
+ cls = getattr(module, class_name)
+ if not inspect.isclass(cls):
+ tty.die("%s.%s is not a class" % (pkg_name, class_name))
+
+ return cls
+
+
+ def __str__(self):
+ return "[Repo '%s' at '%s']" % (self.namespace, self.root)
+
+
+ def __repr__(self):
+ return self.__str__()
+
+
+ def __contains__(self, pkg_name):
+ return self.exists(pkg_name)
+
+
+def create_repo(root, namespace=None):
+ """Create a new repository in root with the specified namespace.
+
+ If the namespace is not provided, use basename of root.
+ Return the canonicalized path and the namespace of the created repository.
+ """
+ root = canonicalize_path(root)
+ if not namespace:
+ namespace = os.path.basename(root)
+
+ if not re.match(r'\w[\.\w-]*', namespace):
+ raise InvalidNamespaceError("'%s' is not a valid namespace." % namespace)
+
+ existed = False
+ if os.path.exists(root):
+ if os.path.isfile(root):
+ raise BadRepoError('File %s already exists and is not a directory' % root)
+ elif os.path.isdir(root):
+ if not os.access(root, os.R_OK | os.W_OK):
+ raise BadRepoError('Cannot create new repo in %s: cannot access directory.' % root)
+ if os.listdir(root):
+ raise BadRepoError('Cannot create new repo in %s: directory is not empty.' % root)
+ existed = True
+
+ full_path = os.path.realpath(root)
+ parent = os.path.dirname(full_path)
+ if not os.access(parent, os.R_OK | os.W_OK):
+ raise BadRepoError("Cannot create repository in %s: can't access parent!" % root)
+
+ try:
+ config_path = os.path.join(root, repo_config_name)
+ packages_path = os.path.join(root, packages_dir_name)
+
+ mkdirp(packages_path)
+ with open(config_path, 'w') as config:
+ config.write("repo:\n")
+ config.write(" namespace: '%s'\n" % namespace)
+
+ except (IOError, OSError) as e:
+ raise BadRepoError('Failed to create new repository in %s.' % root,
+ "Caused by %s: %s" % (type(e), e))
+
+ # try to clean up.
+ if existed:
+ shutil.rmtree(config_path, ignore_errors=True)
+ shutil.rmtree(packages_path, ignore_errors=True)
+ else:
+ shutil.rmtree(root, ignore_errors=True)
+
+ return full_path, namespace
+
+
+class RepoError(spack.error.SpackError):
+ """Superclass for repository-related errors."""
+
+
+class NoRepoConfiguredError(RepoError):
+ """Raised when there are no repositories configured."""
+
+
+class InvalidNamespaceError(RepoError):
+ """Raised when an invalid namespace is encountered."""
+
+
+class BadRepoError(RepoError):
+ """Raised when repo layout is invalid."""
+
+
+class DuplicateRepoError(RepoError):
+ """Raised when duplicate repos are added to a RepoPath."""
+
+
+class PackageLoadError(spack.error.SpackError):
+ """Superclass for errors related to loading packages."""
+
+
+class UnknownPackageError(PackageLoadError):
+ """Raised when we encounter a package spack doesn't have."""
+ def __init__(self, name, repo=None):
+ msg = None
+ if repo:
+ msg = "Package %s not found in repository %s" % (name, repo)
+ else:
+ msg = "Package %s not found." % name
+ super(UnknownPackageError, self).__init__(msg)
+ self.name = name
+
+
+class UnknownNamespaceError(PackageLoadError):
+ """Raised when we encounter an unknown namespace"""
+ def __init__(self, namespace):
+ super(UnknownNamespaceError, self).__init__(
+ "Unknown namespace: %s" % namespace)
+
+
+class FailedConstructorError(PackageLoadError):
+ """Raised when a package's class constructor fails."""
+ def __init__(self, name, exc_type, exc_obj, exc_tb):
+ super(FailedConstructorError, self).__init__(
+ "Class constructor failed for package '%s'." % name,
+ '\nCaused by:\n' +
+ ('%s: %s\n' % (exc_type.__name__, exc_obj)) +
+ ''.join(traceback.format_tb(exc_tb)))
+ self.name = name
diff --git a/lib/spack/spack/resource.py b/lib/spack/spack/resource.py
new file mode 100644
index 0000000000..ddfaaf4cb0
--- /dev/null
+++ b/lib/spack/spack/resource.py
@@ -0,0 +1,41 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://software.llnl.gov/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""Describes an optional resource needed for a build.
+
+Typically a bunch of sources that can be built in-tree within another
+package to enable optional features.
+
+"""
+
+
+class Resource(object):
+ """
+ Represents an optional resource. Aggregates a name, a fetcher, a destination and a placement
+ """
+ def __init__(self, name, fetcher, destination, placement):
+ self.name = name
+ self.fetcher = fetcher
+ self.destination = destination
+ self.placement = placement
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index db5282f40b..2bd5d85c57 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -72,7 +72,9 @@ Here is the EBNF grammar for a spec::
dep_list = { ^ spec }
spec = id [ options ]
options = { @version-list | +variant | -variant | ~variant |
- %compiler | +arch=architecture | +flag=value}
+ %compiler | arch=architecture | [ flag ]=value}
+ flag = { cflags | cxxflags | fflags | cppflags | ldflags |
+ ldlibs }
variant = id
architecture = id
compiler = id [ version-list ]
@@ -80,6 +82,9 @@ Here is the EBNF grammar for a spec::
version = id | id: | :id | id:id
id = [A-Za-z0-9_][A-Za-z0-9_.-]*
+Identifiers using the <name>=<value> command, such as architectures and
+compiler flags, require a space before the name.
+
There is one context-sensitive part: ids in versions may contain '.', while
other ids may not.
@@ -96,8 +101,8 @@ import hashlib
import base64
from StringIO import StringIO
from operator import attrgetter
-from external import yaml
-from external.yaml.error import MarkedYAMLError
+import yaml
+from yaml.error import MarkedYAMLError
import llnl.util.tty as tty
from llnl.util.lang import *
@@ -210,7 +215,7 @@ class CompilerSpec(object):
else:
raise TypeError(
- "__init__ takes 1, 2, or 3 arguments. (%d given)" % nargs)
+ "__init__ takes 1 or 2 arguments. (%d given)" % nargs)
def _add_version(self, version):
@@ -315,7 +320,7 @@ class VariantSpec(object):
out = '+' if self.value else '~'
return out + self.name
else:
- return '+' + self.name + "=" + self.value
+ return ' ' + self.name + "=" + self.value
class VariantMap(HashableMap):
@@ -357,7 +362,7 @@ class VariantMap(HashableMap):
@property
def concrete(self):
return self.spec._concrete or all(
- v in self for v in self.spec.package.variants)
+ v in self for v in self.spec.package_class.variants)
def copy(self):
@@ -372,7 +377,7 @@ class VariantMap(HashableMap):
return ''.join(str(self[key]) for key in sorted_keys)
-_valid_compiler_flags = ['cflags', 'cxxflags', 'fflags', 'ldflags', 'cppflags']
+_valid_compiler_flags = ['cflags', 'cxxflags', 'fflags', 'ldflags', 'ldlibs', 'cppflags']
class FlagMap(HashableMap):
def __init__(self, spec):
super(FlagMap, self).__init__()
@@ -381,16 +386,17 @@ class FlagMap(HashableMap):
def satisfies(self, other, strict=False):
#"strict" makes no sense if this works, but it matches how we need it. Maybe
+ #strict=True
if strict:
+# if other.spec and other.spec.concrete:
return all(f in self and set(self[f]) == set(other[f])
- for f in other if other[f] != [])
+ for f in other)
+# else:
+# return all(f in self and set(self[f]) >= set(other[f])
+# for f in other)
else:
- if other.spec and other.spec.concrete:
- return all(f in self and set(self[f]) == set(other[f])
- for f in other)
- else:
- return all(f in self and set(self[f]) >= set(other[f])
- for f in other)
+ return all(f in self and set(self[f]) == set(other[f])
+ for f in other if other[f] != [])
def constrain(self, other):
@@ -433,9 +439,8 @@ class FlagMap(HashableMap):
def __str__(self):
sorted_keys = filter(lambda flag: self[flag] != [], sorted(self.keys()))
- cond_symbol = '+' if len(sorted_keys)>0 else ''
-# return '+' + '+'.join(str(key) + '=\"' + ' '.join(str(f) for f in self[key]) + '\"' for key in self)
- return cond_symbol + '+'.join(str(key) + '=\"' + ' '.join(str(f) for f in self[key]) + '\"' for key in sorted_keys)
+ cond_symbol = ' ' if len(sorted_keys)>0 else ''
+ return cond_symbol + ' '.join(str(key) + '=\"' + ' '.join(str(f) for f in self[key]) + '\"' for key in sorted_keys)
class DependencyMap(HashableMap):
@@ -483,14 +488,18 @@ class Spec(object):
self.dependencies = other.dependencies
self.variants = other.variants
self.variants.spec = self
+ self.namespace = other.namespace
# Specs are by default not assumed to be normal, but in some
# cases we've read them from a file want to assume normal.
# This allows us to manipulate specs that Spack doesn't have
# package.py files for.
- self._normal = kwargs.get('normal', False)
+ self._normal = kwargs.get('normal', False)
self._concrete = kwargs.get('concrete', False)
+ # Allow a spec to be constructed with an external path.
+ self.external = kwargs.get('external', None)
+
# This allows users to construct a spec DAG with literals.
# Note that given two specs a and b, Spec(a) copies a, but
# Spec(a, b) will copy a but just add b as a dep.
@@ -548,6 +557,13 @@ class Spec(object):
self.dependencies[spec.name] = spec
spec.dependents[self.name] = self
+ #
+ # Public interface
+ #
+ @property
+ def fullname(self):
+ return '%s.%s' % (self.namespace, self.name) if self.namespace else self.name
+
@property
def root(self):
@@ -570,7 +586,15 @@ class Spec(object):
@property
def package(self):
- return spack.db.get(self)
+ return spack.repo.get(self)
+
+
+ @property
+ def package_class(self):
+ """Internal package call gets only the class object for a package.
+ Use this to just get package metadata.
+ """
+ return spack.repo.get_pkg_class(self.name)
@property
@@ -588,7 +612,7 @@ class Spec(object):
@staticmethod
def is_virtual(name):
"""Test if a name is virtual without requiring a Spec."""
- return name != "" and not spack.db.exists(name)
+ return name != '' and not spack.repo.exists(name)
@property
@@ -601,6 +625,7 @@ class Spec(object):
return True
self._concrete = bool(not self.virtual
+ and self.namespace is not None
and self.versions.concrete
and self.variants.concrete
and self.architecture
@@ -743,6 +768,12 @@ class Spec(object):
for d in sorted(self.dependencies)),
'compiler_flags' : dict((name, value) for name, value in self.compiler_flags.items())
}
+
+ # Older concrete specs do not have a namespace. Omit for
+ # consistent hashing.
+ if not self.concrete or self.namespace:
+ d['namespace'] = self.namespace
+
if self.compiler:
d.update(self.compiler.to_dict())
else:
@@ -767,6 +798,7 @@ class Spec(object):
node = node[name]
spec = Spec(name)
+ spec.namespace = node.get('namespace', None)
spec.versions = VersionList.from_dict(node)
spec.architecture = node['arch']
@@ -859,8 +891,30 @@ class Spec(object):
"""Replace this virtual spec with a concrete spec."""
assert(self.virtual)
for name, dependent in self.dependents.items():
+ # remove self from all dependents.
del dependent.dependencies[self.name]
- dependent._add_dependency(concrete)
+
+ # add the replacement, unless it is already a dep of dependent.
+ if concrete.name not in dependent.dependencies:
+ dependent._add_dependency(concrete)
+
+
+ def _replace_node(self, replacement):
+ """Replace this spec with another.
+
+ Connects all dependents of this spec to its replacement, and
+ disconnects this spec from any dependencies it has. New spec
+ will have any dependencies the replacement had, and may need
+ to be normalized.
+
+ """
+ for name, dependent in self.dependents.items():
+ del dependent.dependencies[self.name]
+ dependent._add_dependency(replacement)
+
+ for name, dep in self.dependencies.items():
+ del dep.dependents[self.name]
+ del self.dependencies[dep.name]
def _expand_virtual_packages(self):
@@ -880,23 +934,81 @@ class Spec(object):
this are infrequent, but should implement this before it is
a problem.
"""
+ # Make an index of stuff this spec already provides
+ self_index = ProviderIndex(self.traverse(), restrict=True)
+
changed = False
- while True:
- virtuals =[v for v in self.traverse() if v.virtual]
- if not virtuals:
- return changed
-
- for spec in virtuals:
- providers = spack.db.providers_for(spec)
- concrete = spack.concretizer.choose_provider(spec, providers)
- concrete = concrete.copy()
- spec._replace_with(concrete)
- changed = True
+ done = False
+ while not done:
+ done = True
+ for spec in list(self.traverse()):
+ replacement = None
+ if spec.virtual:
+ replacement = self._find_provider(spec, self_index)
+ if replacement:
+ # TODO: may break if in-place on self but
+ # shouldn't happen if root is traversed first.
+ spec._replace_with(replacement)
+ done=False
+ break
+
+ if not replacement:
+ # Get a list of possible replacements in order of preference.
+ candidates = spack.concretizer.choose_virtual_or_external(spec)
+
+ # Try the replacements in order, skipping any that cause
+ # satisfiability problems.
+ for replacement in candidates:
+ if replacement is spec:
+ break
+
+ # Replace spec with the candidate and normalize
+ copy = self.copy()
+ copy[spec.name]._dup(replacement.copy(deps=False))
+
+ try:
+ # If there are duplicate providers or duplicate provider
+ # deps, consolidate them and merge constraints.
+ copy.normalize(force=True)
+ break
+ except SpecError as e:
+ # On error, we'll try the next replacement.
+ continue
+
+ # If replacement is external then trim the dependencies
+ if replacement.external:
+ if (spec.dependencies):
+ changed = True
+ spec.dependencies = DependencyMap()
+ replacement.dependencies = DependencyMap()
+
+ # TODO: could this and the stuff in _dup be cleaned up?
+ def feq(cfield, sfield):
+ return (not cfield) or (cfield == sfield)
+
+ if replacement is spec or (feq(replacement.name, spec.name) and
+ feq(replacement.versions, spec.versions) and
+ feq(replacement.compiler, spec.compiler) and
+ feq(replacement.architecture, spec.architecture) and
+ feq(replacement.dependencies, spec.dependencies) and
+ feq(replacement.variants, spec.variants) and
+ feq(replacement.external, spec.external)):
+ continue
+
+ # Refine this spec to the candidate. This uses
+ # replace_with AND dup so that it can work in
+ # place. TODO: make this more efficient.
+ if spec.virtual:
+ spec._replace_with(replacement)
+ changed = True
+ if spec._dup(replacement, deps=False, cleardeps=False):
+ changed = True
+ self_index.update(spec)
+ done=False
+ break
- # If there are duplicate providers or duplicate provider deps, this
- # consolidates them and merge constraints.
- changed |= self.normalize(force=True)
+ return changed
def concretize(self):
@@ -921,23 +1033,37 @@ class Spec(object):
force = False
while changed:
-#debugging code
-# print "pre-a"
-# a = self.normalize(force=force)
-# print self, "normal"
-# b = self._expand_virtual_packages()
-# print self, "expanded"
-# c = self._concretize_helper()
-# print self, "concrete-ish"
-# changes = (a,b,c)
-# print a, b, c
- changes = (self.normalize(force=force),
+ changes = (self.normalize(force),
self._expand_virtual_packages(),
self._concretize_helper())
changed = any(changes)
force=True
- self._concrete = True
+ for s in self.traverse():
+ # After concretizing, assign namespaces to anything left.
+ # Note that this doesn't count as a "change". The repository
+ # configuration is constant throughout a spack run, and
+ # normalize and concretize evaluate Packages using Repo.get(),
+ # which respects precedence. So, a namespace assignment isn't
+ # changing how a package name would have been interpreted and
+ # we can do it as late as possible to allow as much
+ # compatibility across repositories as possible.
+ if s.namespace is None:
+ s.namespace = spack.repo.repo_for_pkg(s.name).namespace
+
+ # Mark everything in the spec as concrete, as well.
+ self._mark_concrete()
+
+
+ def _mark_concrete(self):
+ """Mark this spec and its dependencies as concrete.
+
+ Only for internal use -- client code should use "concretize"
+ unless there is a need to force a spec to be concrete.
+ """
+ for s in self.traverse():
+ s._normal = True
+ s._concrete = True
def concretized(self):
@@ -1012,7 +1138,7 @@ class Spec(object):
the dependency. If no conditions are True (and we don't
depend on it), return None.
"""
- pkg = spack.db.get(self.name)
+ pkg = spack.repo.get(self.fullname)
conditions = pkg.dependencies[name]
# evaluate when specs to figure out constraints on the dependency.
@@ -1033,8 +1159,8 @@ class Spec(object):
def _find_provider(self, vdep, provider_index):
"""Find provider for a virtual spec in the provider index.
- Raise an exception if there is a conflicting virtual
- dependency already in this spec.
+ Raise an exception if there is a conflicting virtual
+ dependency already in this spec.
"""
assert(vdep.virtual)
providers = provider_index.providers_for(vdep)
@@ -1075,16 +1201,14 @@ class Spec(object):
"""
changed = False
- # If it's a virtual dependency, try to find a provider and
- # merge that.
+ # If it's a virtual dependency, try to find an existing
+ # provider in the spec, and merge that.
if dep.virtual:
visited.add(dep.name)
provider = self._find_provider(dep, provider_index)
if provider:
dep = provider
else:
- # if it's a real dependency, check whether it provides
- # something already required in the spec.
index = ProviderIndex([dep], restrict=True)
for vspec in (v for v in spec_deps.values() if v.virtual):
if index.providers_for(vspec):
@@ -1129,7 +1253,7 @@ class Spec(object):
# if we descend into a virtual spec, there's nothing more
# to normalize. Concretize will finish resolving it later.
- if self.virtual:
+ if self.virtual or self.external:
return False
# Combine constraints from package deps with constraints from
@@ -1137,7 +1261,7 @@ class Spec(object):
any_change = False
changed = True
- pkg = spack.db.get(self.name)
+ pkg = spack.repo.get(self.fullname)
while changed:
changed = False
for dep_name in pkg.dependencies:
@@ -1157,16 +1281,14 @@ class Spec(object):
the root, and ONLY the ones that were explicitly provided are there.
Normalization turns a partial flat spec into a DAG, where:
- 1. ALL dependencies of the root package are in the DAG.
- 2. Each node's dependencies dict only contains its direct deps.
+ 1. Known dependencies of the root package are in the DAG.
+ 2. Each node's dependencies dict only contains its known direct deps.
3. There is only ONE unique spec for each package in the DAG.
* This includes virtual packages. If there a non-virtual
package that provides a virtual package that is in the spec,
then we replace the virtual package with the non-virtual one.
- 4. The spec DAG matches package DAG, including default variant values.
-
TODO: normalize should probably implement some form of cycle detection,
to ensure that the spec is actually a DAG.
"""
@@ -1182,13 +1304,14 @@ class Spec(object):
# Get all the dependencies into one DependencyMap
spec_deps = self.flat_dependencies(copy=False)
- # Initialize index of virtual dependency providers
- index = ProviderIndex(spec_deps.values(), restrict=True)
+ # Initialize index of virtual dependency providers if
+ # concretize didn't pass us one already
+ provider_index = ProviderIndex(spec_deps.values(), restrict=True)
# traverse the package DAG and fill out dependencies according
# to package files & their 'when' specs
visited = set()
- any_change = self._normalize_helper(visited, spec_deps, index)
+ any_change = self._normalize_helper(visited, spec_deps, provider_index)
# If there are deps specified but not visited, they're not
# actually deps of this package. Raise an error.
@@ -1215,9 +1338,9 @@ class Spec(object):
UnsupportedCompilerError.
"""
for spec in self.traverse():
- # Don't get a package for a virtual name or an anonymous name
- if (not spec.virtual) and spack.db.exists(spec.name):
- spack.db.get(spec.name)
+ # Don't get a package for a virtual name.
+ if not spec.virtual and spec.name != '':
+ spack.repo.get(spec.fullname)
# validate compiler in addition to the package name.
if spec.compiler:
@@ -1226,7 +1349,7 @@ class Spec(object):
# Ensure that variants all exist.
for vname, variant in spec.variants.items():
- if vname not in spec.package.variants:
+ if vname not in spec.package_class.variants:
raise UnknownVariantError(spec.name, vname)
@@ -1240,6 +1363,10 @@ class Spec(object):
if not (self.name == other.name or self.name == "" or other.name == ""):
raise UnsatisfiableSpecNameError(self.name, other.name)
+ if other.namespace is not None:
+ if self.namespace is not None and other.namespace != self.namespace:
+ raise UnsatisfiableSpecNameError(self.fullname, other.fullname)
+
if not self.versions.overlaps(other.versions):
raise UnsatisfiableVersionSpecError(self.versions, other.versions)
@@ -1285,7 +1412,7 @@ class Spec(object):
# TODO: might want more detail than this, e.g. specific deps
# in violation. if this becomes a priority get rid of this
- # check and be more specici about what's wrong.
+ # check and be more specific about what's wrong.
if not other.satisfies_dependencies(self):
raise UnsatisfiableDependencySpecError(other, self)
@@ -1312,6 +1439,13 @@ class Spec(object):
return common
+ def constrained(self, other, deps=True):
+ """Return a constrained copy without modifying this spec."""
+ clone = self.copy(deps=deps)
+ clone.constrain(other, deps)
+ return clone
+
+
def dep_difference(self, other):
"""Returns dependencies in self that are not in other."""
mine = set(s.name for s in self.traverse(root=False))
@@ -1354,7 +1488,7 @@ class Spec(object):
# A concrete provider can satisfy a virtual dependency.
if not self.virtual and other.virtual:
- pkg = spack.db.get(self.name)
+ pkg = spack.repo.get(self.fullname)
if pkg.provides(other.name):
for provided, when_spec in pkg.provided.items():
if self.satisfies(when_spec, deps=False, strict=strict):
@@ -1366,6 +1500,11 @@ class Spec(object):
if self.name != other.name and self.name != "" and other.name != "":
return False
+ # namespaces either match, or other doesn't require one.
+ if other.namespace is not None:
+ if self.namespace is not None and self.namespace != other.namespace:
+ return False
+
if self.versions and other.versions:
if not self.versions.satisfies(other.versions, strict=strict):
return False
@@ -1465,16 +1604,27 @@ class Spec(object):
Whether deps should be copied too. Set to false to copy a
spec but not its dependencies.
"""
+ # We don't count dependencies as changes here
+ changed = True
+ if hasattr(self, 'name'):
+ changed = (self.name != other.name and self.versions != other.versions and
+ self.architecture != other.architecture and self.compiler != other.compiler and
+ self.variants != other.variants and self._normal != other._normal and
+ self.concrete != other.concrete and self.external != other.external)
+
# Local node attributes get copied first.
self.name = other.name
self.versions = other.versions.copy()
self.architecture = other.architecture
self.compiler = other.compiler.copy() if other.compiler else None
+ if kwargs.get('cleardeps', True):
+ self.dependents = DependencyMap()
+ self.dependencies = DependencyMap()
self.compiler_flags = other.compiler_flags.copy()
- self.dependents = DependencyMap()
- self.dependencies = DependencyMap()
self.variants = other.variants.copy()
self.variants.spec = self
+ self.external = other.external
+ self.namespace = other.namespace
# If we copy dependencies, preserve DAG structure in the new spec
if kwargs.get('deps', True):
@@ -1492,6 +1642,8 @@ class Spec(object):
# Since we preserved structure, we can copy _normal safely.
self._normal = other._normal
self._concrete = other._concrete
+ self.external = other.external
+ return changed
def copy(self, **kwargs):
@@ -1592,11 +1744,14 @@ class Spec(object):
def _cmp_node(self):
"""Comparison key for just *this node* and not its deps."""
-# if self.compiler:
-# return (self.name, self.versions, self.variants,
-# self.architecture, self.compiler._cmp_key())
- return (self.name, self.versions, self.variants,
- self.architecture, self.compiler, self.compiler_flags)
+ return (self.name,
+ self.namespace,
+ self.versions,
+ self.variants,
+ self.architecture,
+ self.compiler,
+ self.compiler_flags)
+
def eq_node(self, other):
"""Equality with another spec, not including dependencies."""
@@ -1609,11 +1764,15 @@ class Spec(object):
def _cmp_key(self):
- """Comparison key for this node and all dependencies *without*
- considering structure. This is the default, as
- normalization will restore structure.
+ """This returns a key for the spec *including* DAG structure.
+
+ The key is the concatenation of:
+ 1. A tuple describing this node in the DAG.
+ 2. The hash of each of this node's dependencies' cmp_keys.
"""
- return self._cmp_node() + (self.sorted_deps())
+ return self._cmp_node() + (
+ tuple(hash(self.dependencies[name])
+ for name in sorted(self.dependencies)),)
def colorized(self):
@@ -1625,16 +1784,32 @@ class Spec(object):
in the format string. The format strings you can provide are::
$_ Package name
- $@ Version
- $% Compiler
- $%@ Compiler & compiler version
- $%+ Compiler and compiler flags
- $%@+ Compiler, compiler version, and compiler flags
+ $. Full package name (with namespace)
+ $@ Version with '@' prefix
+ $% Compiler with '%' prefix
+ $%@ Compiler with '%' prefix & compiler version with '@' prefix
+ $%+ Compiler with '%' prefix & compiler flags prefixed by name
+ $%@+ Compiler, compiler version, and compiler flags with same prefixes as above
$+ Options
- $= Architecture
- $# 7-char prefix of DAG hash
+ $= Architecture prefixed by 'arch='
+ $# 7-char prefix of DAG hash with '-' prefix
$$ $
+ You can also use full-string versions, which leave off the prefixes:
+
+ ${PACKAGE} Package name
+ ${VERSION} Version
+ ${COMPILER} Full compiler string
+ ${COMPILERNAME} Compiler name
+ ${COMPILERVER} Compiler version
+ ${COMPILERFLAGS} Compiler flags
+ ${OPTIONS} Options
+ ${ARCHITECTURE} Architecture
+ ${SHA1} Dependencies 8-char sha1 prefix
+
+ ${SPACK_ROOT} The spack root directory
+ ${SPACK_INSTALL} The default spack install directory, ${SPACK_PREFIX}/opt
+
Optionally you can provide a width, e.g. $20_ for a 20-wide name.
Like printf, you can provide '-' for left justification, e.g.
$-20_ for a left-justified name.
@@ -1650,7 +1825,8 @@ class Spec(object):
color = kwargs.get('color', False)
length = len(format_string)
out = StringIO()
- escape = compiler = False
+ named = escape = compiler = False
+ named_str = fmt = ''
def write(s, c):
if color:
@@ -1674,6 +1850,8 @@ class Spec(object):
if c == '_':
out.write(fmt % self.name)
+ elif c == '.':
+ out.write(fmt % self.fullname)
elif c == '@':
if self.versions and self.versions != _any_version:
write(fmt % (c + str(self.versions)), c)
@@ -1686,13 +1864,16 @@ class Spec(object):
write(fmt % str(self.variants), c)
elif c == '=':
if self.architecture:
- write(fmt % ('+arch' + c + str(self.architecture)), c)
+ write(fmt % (' arch' + c + str(self.architecture)), c)
elif c == '#':
out.write('-' + fmt % (self.dag_hash(7)))
elif c == '$':
- if fmt != '':
+ if fmt != '%s':
raise ValueError("Can't use format width with $$.")
out.write('$')
+ elif c == '{':
+ named = True
+ named_str = ''
escape = False
elif compiler:
@@ -1711,6 +1892,45 @@ class Spec(object):
out.write(c)
compiler = False
+ elif named:
+ if not c == '}':
+ if i == length - 1:
+ raise ValueError("Error: unterminated ${ in format: '%s'"
+ % format_string)
+ named_str += c
+ continue;
+ if named_str == 'PACKAGE':
+ write(fmt % self.name, '@')
+ if named_str == 'VERSION':
+ if self.versions and self.versions != _any_version:
+ write(fmt % str(self.versions), '@')
+ elif named_str == 'COMPILER':
+ if self.compiler:
+ write(fmt % self.compiler, '%')
+ elif named_str == 'COMPILERNAME':
+ if self.compiler:
+ write(fmt % self.compiler.name, '%')
+ elif named_str == 'COMPILERVER':
+ if self.compiler:
+ write(fmt % self.compiler.versions, '%')
+ elif named_str == 'COMPILERFLAGS':
+ if self.compiler:
+ write(fmt % str(self.compiler_flags), '%')
+ elif named_str == 'OPTIONS':
+ if self.variants:
+ write(fmt % str(self.variants), '+')
+ elif named_str == 'ARCHITECTURE':
+ if self.architecture:
+ write(fmt % str(self.architecture), '=')
+ elif named_str == 'SHA1':
+ if self.dependencies:
+ out.write(fmt % str(self.dag_hash(7)))
+ elif named_str == 'SPACK_ROOT':
+ out.write(fmt % spack.prefix)
+ elif named_str == 'SPACK_INSTALL':
+ out.write(fmt % spack.install_path)
+
+ named = False
elif c == '$':
escape = True
@@ -1878,19 +2098,25 @@ class SpecParser(spack.parse.Parser):
def spec(self, name, check_valid_token = False):
"""Parse a spec out of the input. If a spec is supplied, then initialize
and return it instead of creating a new one."""
- if name != '':
- self.check_identifier()
+ spec_namespace, dot, spec_name = name.rpartition('.')
+ if not spec_namespace:
+ spec_namespace = None
+
+ if spec_name != '':
+ self.check_identifier(spec_name)
# This will init the spec without calling __init__.
spec = Spec.__new__(Spec)
- spec.name = name
+ spec.name = spec_name
spec.versions = VersionList()
spec.variants = VariantMap(spec)
spec.architecture = None
spec.compiler = None
+ spec.external = None
spec.compiler_flags = FlagMap(spec)
spec.dependents = DependencyMap()
spec.dependencies = DependencyMap()
+ spec.namespace = spec_namespace
spec._normal = False
spec._concrete = False
@@ -2001,7 +2227,6 @@ class SpecParser(spack.parse.Parser):
compiler = CompilerSpec.__new__(CompilerSpec)
compiler.name = self.token.value
compiler.versions = VersionList()
-# compiler.flags = {'cflags':None,'cxxflags':None,'fflags':None,'ldflags':None}
if self.accept(AT):
vlist = self.version_list()
for version in vlist:
@@ -2011,12 +2236,14 @@ class SpecParser(spack.parse.Parser):
return compiler
- def check_identifier(self):
+ def check_identifier(self, id=None):
"""The only identifiers that can contain '.' are versions, but version
ids are context-sensitive so we have to check on a case-by-case
basis. Call this if we detect a version id where it shouldn't be.
"""
- if '.' in self.token.value:
+ if not id:
+ id = self.token.value
+ if '.' in id:
self.last_token_error("Identifier cannot contain '.'")
@@ -2145,7 +2372,6 @@ class MultipleProviderError(SpecError):
self.vpkg = vpkg
self.providers = providers
-
class UnsatisfiableSpecError(SpecError):
"""Raised when a spec conflicts with package constraints.
Provide the requirement that was violated when raising."""
diff --git a/lib/spack/spack/stage.py b/lib/spack/spack/stage.py
index 78930ecb5b..d711752c20 100644
--- a/lib/spack/spack/stage.py
+++ b/lib/spack/spack/stage.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -23,53 +23,73 @@
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
-import re
+import errno
import shutil
import tempfile
+from urlparse import urljoin
import llnl.util.tty as tty
from llnl.util.filesystem import *
+import spack.util.pattern as pattern
+
import spack
import spack.config
import spack.fetch_strategy as fs
import spack.error
-
STAGE_PREFIX = 'spack-stage-'
class Stage(object):
- """A Stage object manaages a directory where some source code is
- downloaded and built before being installed. It handles
- fetching the source code, either as an archive to be expanded
- or by checking it out of a repository. A stage's lifecycle
- looks like this:
-
- Stage()
- Constructor creates the stage directory.
- fetch()
- Fetch a source archive into the stage.
- expand_archive()
- Expand the source archive.
- <install>
- Build and install the archive. This is handled by the Package class.
- destroy()
- Remove the stage once the package has been installed.
-
- If spack.use_tmp_stage is True, spack will attempt to create stages
- in a tmp directory. Otherwise, stages are created directly in
- spack.stage_path.
-
- There are two kinds of stages: named and unnamed. Named stages can
- persist between runs of spack, e.g. if you fetched a tarball but
- didn't finish building it, you won't have to fetch it again.
-
- Unnamed stages are created using standard mkdtemp mechanisms or
- similar, and are intended to persist for only one run of spack.
+ """Manages a temporary stage directory for building.
+
+ A Stage object is a context manager that handles a directory where
+ some source code is downloaded and built before being installed.
+ It handles fetching the source code, either as an archive to be
+ expanded or by checking it out of a repository. A stage's
+ lifecycle looks like this:
+
+ ```
+ with Stage() as stage: # Context manager creates and destroys the stage directory
+ stage.fetch() # Fetch a source archive into the stage.
+ stage.expand_archive() # Expand the source archive.
+ <install> # Build and install the archive. (handled by user of Stage)
+ ```
+
+ When used as a context manager, the stage is automatically
+ destroyed if no exception is raised by the context. If an
+ excpetion is raised, the stage is left in the filesystem and NOT
+ destroyed, for potential reuse later.
+
+ You can also use the stage's create/destroy functions manually,
+ like this:
+
+ ```
+ stage = Stage()
+ try:
+ stage.create() # Explicitly create the stage directory.
+ stage.fetch() # Fetch a source archive into the stage.
+ stage.expand_archive() # Expand the source archive.
+ <install> # Build and install the archive. (handled by user of Stage)
+ finally:
+ stage.destroy() # Explicitly destroy the stage directory.
+ ```
+
+ If spack.use_tmp_stage is True, spack will attempt to create
+ stages in a tmp directory. Otherwise, stages are created directly
+ in spack.stage_path.
+
+ There are two kinds of stages: named and unnamed. Named stages
+ can persist between runs of spack, e.g. if you fetched a tarball
+ but didn't finish building it, you won't have to fetch it again.
+
+ Unnamed stages are created using standard mkdtemp mechanisms or
+ similar, and are intended to persist for only one run of spack.
"""
- def __init__(self, url_or_fetch_strategy, **kwargs):
+ def __init__(self, url_or_fetch_strategy,
+ name=None, mirror_path=None, keep=False, path=None):
"""Create a stage object.
Parameters:
url_or_fetch_strategy
@@ -81,32 +101,77 @@ class Stage(object):
and will persist between runs (or if you construct another
stage object later). If name is not provided, then this
stage will be given a unique name automatically.
+
+ mirror_path
+ If provided, Stage will search Spack's mirrors for
+ this archive at the mirror_path, before using the
+ default fetch strategy.
+
+ keep
+ By default, when used as a context manager, the Stage
+ is deleted on exit when no exceptions are raised.
+ Pass True to keep the stage intact even if no
+ exceptions are raised.
"""
+ # TODO: fetch/stage coupling needs to be reworked -- the logic
+ # TODO: here is convoluted and not modular enough.
if isinstance(url_or_fetch_strategy, basestring):
self.fetcher = fs.from_url(url_or_fetch_strategy)
elif isinstance(url_or_fetch_strategy, fs.FetchStrategy):
self.fetcher = url_or_fetch_strategy
else:
raise ValueError("Can't construct Stage without url or fetch strategy")
-
self.fetcher.set_stage(self)
- self.name = kwargs.get('name')
- self.mirror_path = kwargs.get('mirror_path')
-
+ self.default_fetcher = self.fetcher # self.fetcher can change with mirrors.
+ self.skip_checksum_for_mirror = True # used for mirrored archives of repositories.
+
+ # TODO : this uses a protected member of tempfile, but seemed the only way to get a temporary name
+ # TODO : besides, the temporary link name won't be the same as the temporary stage area in tmp_root
+ self.name = name
+ if name is None:
+ self.name = STAGE_PREFIX + next(tempfile._get_candidate_names())
+ self.mirror_path = mirror_path
self.tmp_root = find_tmp_root()
- self.path = None
- self._setup()
+ # Try to construct here a temporary name for the stage directory
+ # If this is a named stage, then construct a named path.
+ if path is not None:
+ self.path = path
+ else:
+ self.path = join_path(spack.stage_path, self.name)
+
+ # Flag to decide whether to delete the stage folder on exit or not
+ self.keep = keep
+
+
+ def __enter__(self):
+ """
+ Entering a stage context will create the stage directory
+
+ Returns:
+ self
+ """
+ self.create()
+ return self
+
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ """
+ Exiting from a stage context will delete the stage directory unless:
+ - it was explicitly requested not to do so
+ - an exception has been raised
+ Args:
+ exc_type: exception type
+ exc_val: exception value
+ exc_tb: exception traceback
- def _cleanup_dead_links(self):
- """Remove any dead links in the stage directory."""
- for file in os.listdir(spack.stage_path):
- path = join_path(spack.stage_path, file)
- if os.path.islink(path):
- real_path = os.path.realpath(path)
- if not os.path.exists(path):
- os.unlink(path)
+ Returns:
+ Boolean
+ """
+ # Delete when there are no exceptions, unless asked to keep.
+ if exc_type is None and not self.keep:
+ self.destroy()
def _need_to_create_path(self):
@@ -126,11 +191,11 @@ class Stage(object):
# Path looks ok, but need to check the target of the link.
if os.path.islink(self.path):
real_path = os.path.realpath(self.path)
- real_tmp = os.path.realpath(self.tmp_root)
+ real_tmp = os.path.realpath(self.tmp_root)
if spack.use_tmp_stage:
- # If we're using a tmp dir, it's a link, and it points at the right spot,
- # then keep it.
+ # If we're using a tmp dir, it's a link, and it points at the
+ # right spot, then keep it.
if (real_path.startswith(real_tmp) and os.path.exists(real_path)):
return False
else:
@@ -145,139 +210,128 @@ class Stage(object):
return False
-
- def _setup(self):
- """Creates the stage directory.
- If spack.use_tmp_stage is False, the stage directory is created
- directly under spack.stage_path.
-
- If spack.use_tmp_stage is True, this will attempt to create a
- stage in a temporary directory and link it into spack.stage_path.
- Spack will use the first writable location in spack.tmp_dirs to
- create a stage. If there is no valid location in tmp_dirs, fall
- back to making the stage inside spack.stage_path.
- """
- # Create the top-level stage directory
- mkdirp(spack.stage_path)
- self._cleanup_dead_links()
-
- # If this is a named stage, then construct a named path.
- if self.name is not None:
- self.path = join_path(spack.stage_path, self.name)
-
- # If this is a temporary stage, them make the temp directory
- tmp_dir = None
- if self.tmp_root:
- if self.name is None:
- # Unnamed tmp root. Link the path in
- tmp_dir = tempfile.mkdtemp('', STAGE_PREFIX, self.tmp_root)
- self.name = os.path.basename(tmp_dir)
- self.path = join_path(spack.stage_path, self.name)
- if self._need_to_create_path():
- os.symlink(tmp_dir, self.path)
-
- else:
- if self._need_to_create_path():
- tmp_dir = tempfile.mkdtemp('', STAGE_PREFIX, self.tmp_root)
- os.symlink(tmp_dir, self.path)
-
- # if we're not using a tmp dir, create the stage directly in the
- # stage dir, rather than linking to it.
- else:
- if self.name is None:
- self.path = tempfile.mkdtemp('', STAGE_PREFIX, spack.stage_path)
- self.name = os.path.basename(self.path)
- else:
- if self._need_to_create_path():
- mkdirp(self.path)
-
- # Make sure we can actually do something with the stage we made.
- ensure_access(self.path)
-
-
@property
def archive_file(self):
"""Path to the source archive within this stage directory."""
- if not isinstance(self.fetcher, fs.URLFetchStrategy):
- return None
+ paths = []
+ if isinstance(self.fetcher, fs.URLFetchStrategy):
+ paths.append(os.path.join(self.path, os.path.basename(self.fetcher.url)))
- paths = [os.path.join(self.path, os.path.basename(self.fetcher.url))]
if self.mirror_path:
paths.append(os.path.join(self.path, os.path.basename(self.mirror_path)))
for path in paths:
if os.path.exists(path):
return path
- return None
-
+ else:
+ return None
@property
def source_path(self):
- """Returns the path to the expanded/checked out source code
- within this fetch strategy's path.
+ """Returns the path to the expanded/checked out source code.
+
+ To find the source code, this method searches for the first
+ subdirectory of the stage that it can find, and returns it.
+ This assumes nothing besides the archive file will be in the
+ stage path, but it has the advantage that we don't need to
+ know the name of the archive or its contents.
- This assumes nothing else is going ot be put in the
- FetchStrategy's path. It searches for the first
- subdirectory of the path it can find, then returns that.
+ If the fetch strategy is not supposed to expand the downloaded
+ file, it will just return the stage path. If the archive needs
+ to be expanded, it will return None when no archive is found.
"""
+ if isinstance(self.fetcher, fs.URLFetchStrategy):
+ if not self.fetcher.expand_archive:
+ return self.path
+
for p in [os.path.join(self.path, f) for f in os.listdir(self.path)]:
if os.path.isdir(p):
return p
return None
-
def chdir(self):
"""Changes directory to the stage path. Or dies if it is not set up."""
if os.path.isdir(self.path):
os.chdir(self.path)
else:
- tty.die("Setup failed: no such directory: " + self.path)
-
+ raise ChdirError("Setup failed: no such directory: " + self.path)
- def fetch(self):
+ def fetch(self, mirror_only=False):
"""Downloads an archive or checks out code from a repository."""
self.chdir()
- fetchers = [self.fetcher]
+ fetchers = []
+ if not mirror_only:
+ fetchers.append(self.default_fetcher)
# TODO: move mirror logic out of here and clean it up!
+ # TODO: Or @alalazo may have some ideas about how to use a
+ # TODO: CompositeFetchStrategy here.
+ self.skip_checksum_for_mirror = True
if self.mirror_path:
- urls = ["%s/%s" % (m, self.mirror_path) for m in _get_mirrors()]
-
+ mirrors = spack.config.get_config('mirrors')
+
+ # Join URLs of mirror roots with mirror paths. Because
+ # urljoin() will strip everything past the final '/' in
+ # the root, so we add a '/' if it is not present.
+ mirror_roots = [root if root.endswith('/') else root + '/'
+ for root in mirrors.values()]
+ urls = [urljoin(root, self.mirror_path) for root in mirror_roots]
+
+ # If this archive is normally fetched from a tarball URL,
+ # then use the same digest. `spack mirror` ensures that
+ # the checksum will be the same.
digest = None
- if isinstance(self.fetcher, fs.URLFetchStrategy):
- digest = self.fetcher.digest
- fetchers = [fs.URLFetchStrategy(url, digest)
- for url in urls] + fetchers
- for f in fetchers:
- f.set_stage(self)
+ if isinstance(self.default_fetcher, fs.URLFetchStrategy):
+ digest = self.default_fetcher.digest
+
+ # Have to skip the checksum for things archived from
+ # repositories. How can this be made safer?
+ self.skip_checksum_for_mirror = not bool(digest)
+
+ # Add URL strategies for all the mirrors with the digest
+ for url in urls:
+ fetchers.insert(0, fs.URLFetchStrategy(url, digest))
for fetcher in fetchers:
try:
- fetcher.fetch()
+ fetcher.set_stage(self)
+ self.fetcher = fetcher
+ self.fetcher.fetch()
break
- except spack.error.SpackError, e:
+ except spack.error.SpackError as e:
tty.msg("Fetching from %s failed." % fetcher)
tty.debug(e)
continue
else:
errMessage = "All fetchers failed for %s" % self.name
+ self.fetcher = self.default_fetcher
raise fs.FetchError(errMessage, None)
-
def check(self):
"""Check the downloaded archive against a checksum digest.
No-op if this stage checks code out of a repository."""
- self.fetcher.check()
-
+ if self.fetcher is not self.default_fetcher and self.skip_checksum_for_mirror:
+ tty.warn("Fetching from mirror without a checksum!",
+ "This package is normally checked out from a version "
+ "control system, but it has been archived on a spack "
+ "mirror. This means we cannot know a checksum for the "
+ "tarball in advance. Be sure that your connection to "
+ "this mirror is secure!.")
+ else:
+ self.fetcher.check()
def expand_archive(self):
"""Changes to the stage directory and attempt to expand the downloaded
archive. Fail if the stage is not set up or if the archive is not yet
downloaded.
"""
- self.fetcher.expand()
-
+ archive_dir = self.source_path
+ if not archive_dir:
+ self.fetcher.expand()
+ tty.msg("Created stage in %s" % self.path)
+ else:
+ tty.msg("Already staged %s in %s" % (self.name, self.path))
def chdir_to_source(self):
"""Changes directory to the expanded archive directory.
@@ -291,16 +345,41 @@ class Stage(object):
if not os.listdir(path):
tty.die("Archive was empty for %s" % self.name)
-
def restage(self):
"""Removes the expanded archive path if it exists, then re-expands
the archive.
"""
self.fetcher.reset()
+ def create(self):
+ """
+ Creates the stage directory
+
+ If self.tmp_root evaluates to False, the stage directory is
+ created directly under spack.stage_path, otherwise this will
+ attempt to create a stage in a temporary directory and link it
+ into spack.stage_path.
+
+ Spack will use the first writable location in spack.tmp_dirs
+ to create a stage. If there is no valid location in tmp_dirs,
+ fall back to making the stage inside spack.stage_path.
+ """
+ # Create the top-level stage directory
+ mkdirp(spack.stage_path)
+ remove_dead_links(spack.stage_path)
+ # If a tmp_root exists then create a directory there and then link it in the stage area,
+ # otherwise create the stage directory in self.path
+ if self._need_to_create_path():
+ if self.tmp_root:
+ tmp_dir = tempfile.mkdtemp('', STAGE_PREFIX, self.tmp_root)
+ os.symlink(tmp_dir, self.path)
+ else:
+ mkdirp(self.path)
+ # Make sure we can actually do something with the stage we made.
+ ensure_access(self.path)
def destroy(self):
- """Remove this stage directory."""
+ """Removes this stage directory."""
remove_linked_tree(self.path)
# Make sure we don't end up in a removed directory
@@ -310,8 +389,82 @@ class Stage(object):
os.chdir(os.path.dirname(self.path))
+class ResourceStage(Stage):
+ def __init__(self, url_or_fetch_strategy, root, resource, **kwargs):
+ super(ResourceStage, self).__init__(url_or_fetch_strategy, **kwargs)
+ self.root_stage = root
+ self.resource = resource
+
+ def expand_archive(self):
+ super(ResourceStage, self).expand_archive()
+ root_stage = self.root_stage
+ resource = self.resource
+ placement = os.path.basename(self.source_path) if resource.placement is None else resource.placement
+ if not isinstance(placement, dict):
+ placement = {'': placement}
+ # Make the paths in the dictionary absolute and link
+ for key, value in placement.iteritems():
+ target_path = join_path(root_stage.source_path, resource.destination)
+ destination_path = join_path(target_path, value)
+ source_path = join_path(self.source_path, key)
+
+ try:
+ os.makedirs(target_path)
+ except OSError as err:
+ if err.errno == errno.EEXIST and os.path.isdir(target_path):
+ pass
+ else:
+ raise
+
+ if not os.path.exists(destination_path):
+ # Create a symlink
+ tty.info('Moving resource stage\n\tsource : {stage}\n\tdestination : {destination}'.format(
+ stage=source_path, destination=destination_path
+ ))
+ shutil.move(source_path, destination_path)
+
+
+@pattern.composite(method_list=['fetch', 'create', 'check', 'expand_archive', 'restage', 'destroy'])
+class StageComposite:
+ """
+ Composite for Stage type objects. The first item in this composite is considered to be the root package, and
+ operations that return a value are forwarded to it.
+ """
+ #
+ # __enter__ and __exit__ delegate to all stages in the composite.
+ #
+ def __enter__(self):
+ for item in self:
+ item.__enter__()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ for item in reversed(self):
+ item.keep = getattr(self, 'keep', False)
+ item.__exit__(exc_type, exc_val, exc_tb)
+
+ #
+ # Below functions act only on the *first* stage in the composite.
+ #
+ @property
+ def source_path(self):
+ return self[0].source_path
+
+ @property
+ def path(self):
+ return self[0].path
+
+ def chdir_to_source(self):
+ return self[0].chdir_to_source()
+
+ @property
+ def archive_file(self):
+ return self[0].archive_file
+
+
class DIYStage(object):
"""Simple class that allows any directory to be a spack stage."""
+
def __init__(self, path):
self.archive_file = None
self.path = path
@@ -321,12 +474,16 @@ class DIYStage(object):
if os.path.isdir(self.path):
os.chdir(self.path)
else:
- tty.die("Setup failed: no such directory: " + self.path)
+ raise ChdirError("Setup failed: no such directory: " + self.path)
+
+ # DIY stages do nothing as context managers.
+ def __enter__(self): pass
+ def __exit__(self, exc_type, exc_val, exc_tb): pass
def chdir_to_source(self):
self.chdir()
- def fetch(self):
+ def fetch(self, mirror_only):
tty.msg("No need to fetch for DIY.")
def check(self):
@@ -345,30 +502,16 @@ class DIYStage(object):
def _get_mirrors():
"""Get mirrors from spack configuration."""
- config = spack.config.get_mirror_config()
+ config = spack.config.get_config('mirrors')
return [val for name, val in config.iteritems()]
-
def ensure_access(file=spack.stage_path):
"""Ensure we can access a directory and die with an error if we can't."""
if not can_access(file):
tty.die("Insufficient permissions for %s" % file)
-def remove_linked_tree(path):
- """Removes a directory and its contents. If the directory is a symlink,
- follows the link and reamoves the real directory before removing the
- link.
- """
- if os.path.exists(path):
- if os.path.islink(path):
- shutil.rmtree(os.path.realpath(path), True)
- os.unlink(path)
- else:
- shutil.rmtree(path, True)
-
-
def purge():
"""Remove all build directories in the top-level stage path."""
if os.path.isdir(spack.stage_path):
@@ -397,19 +540,15 @@ def find_tmp_root():
class StageError(spack.error.SpackError):
- def __init__(self, message, long_message=None):
- super(self, StageError).__init__(message, long_message)
+ """"Superclass for all errors encountered during staging."""
class RestageError(StageError):
- def __init__(self, message, long_msg=None):
- super(RestageError, self).__init__(message, long_msg)
+ """"Error encountered during restaging."""
class ChdirError(StageError):
- def __init__(self, message, long_msg=None):
- super(ChdirError, self).__init__(message, long_msg)
-
+ """Raised when Spack can't change directories."""
# Keep this in namespace for convenience
FailedDownloadError = fs.FailedDownloadError
diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py
index 2c4517d343..b710204ece 100644
--- a/lib/spack/spack/test/__init__.py
+++ b/lib/spack/spack/test/__init__.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -24,7 +24,10 @@
##############################################################################
import sys
import unittest
+import nose
+from spack.test.tally_plugin import Tally
+from llnl.util.filesystem import join_path
import llnl.util.tty as tty
from llnl.util.tty.colify import colify
@@ -45,6 +48,7 @@ test_names = ['versions',
'package_sanity',
'config',
'directory_layout',
+ 'pattern',
'python_version',
'git_fetch',
'svn_fetch',
@@ -60,7 +64,12 @@ test_names = ['versions',
'unit_install',
'lock',
'database',
- 'cflags']
+ 'namespace_trie',
+ 'yaml',
+ 'sbang',
+ 'environment',
+ 'cmd.uninstall']
+# 'cflags']
def list_tests():
@@ -68,7 +77,7 @@ def list_tests():
return test_names
-def run(names, verbose=False):
+def run(names, outputDir, verbose=False):
"""Run tests with the supplied names. Names should be a list. If
it's empty, run ALL of Spack's tests."""
verbosity = 1 if not verbose else 2
@@ -83,27 +92,30 @@ def run(names, verbose=False):
colify(sorted(test_names), indent=4)
sys.exit(1)
- runner = unittest.TextTestRunner(verbosity=verbosity)
-
- testsRun = errors = failures = 0
+ tally = Tally()
for test in names:
module = 'spack.test.' + test
print module
- suite = unittest.defaultTestLoader.loadTestsFromName(module)
tty.msg("Running test: %s" % test)
- result = runner.run(suite)
- testsRun += result.testsRun
- errors += len(result.errors)
- failures += len(result.failures)
- succeeded = not errors and not failures
+ runOpts = ["--with-%s" % spack.test.tally_plugin.Tally.name]
+
+ if outputDir:
+ xmlOutputFname = "unittests-{0}.xml".format(test)
+ xmlOutputPath = join_path(outputDir, xmlOutputFname)
+ runOpts += ["--with-xunit",
+ "--xunit-file={0}".format(xmlOutputPath)]
+ argv = [""] + runOpts + [module]
+ result = nose.run(argv=argv, addplugins=[tally])
+
+ succeeded = not tally.failCount and not tally.errorCount
tty.msg("Tests Complete.",
- "%5d tests run" % testsRun,
- "%5d failures" % failures,
- "%5d errors" % errors)
+ "%5d tests run" % tally.numberOfTestsRun,
+ "%5d failures" % tally.failCount,
+ "%5d errors" % tally.errorCount)
- if not errors and not failures:
+ if succeeded:
tty.info("OK", format='g')
else:
tty.info("FAIL", format='r')
diff --git a/lib/spack/spack/test/cc.py b/lib/spack/spack/test/cc.py
index aa16f9b351..0b1aeb2a8f 100644
--- a/lib/spack/spack/test/cc.py
+++ b/lib/spack/spack/test/cc.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -28,6 +28,8 @@ arguments correctly.
"""
import os
import unittest
+import tempfile
+import shutil
from llnl.util.filesystem import *
import spack
@@ -39,11 +41,11 @@ test_command = [
'arg1',
'-Wl,--start-group',
'arg2',
- '-Wl,-rpath=/first/rpath', 'arg3', '-Wl,-rpath', '-Wl,/second/rpath',
+ '-Wl,-rpath,/first/rpath', 'arg3', '-Wl,-rpath', '-Wl,/second/rpath',
'-llib1', '-llib2',
'arg4',
'-Wl,--end-group',
- '-Xlinker,-rpath', '-Xlinker,/third/rpath', '-Xlinker,-rpath=/fourth/rpath',
+ '-Xlinker', '-rpath', '-Xlinker', '/third/rpath', '-Xlinker', '-rpath', '-Xlinker', '/fourth/rpath',
'-llib3', '-llib4',
'arg5', 'arg6']
@@ -55,27 +57,54 @@ class CompilerTest(unittest.TestCase):
self.ld = Executable(join_path(spack.build_env_path, "ld"))
self.cpp = Executable(join_path(spack.build_env_path, "cpp"))
- os.environ['SPACK_CC'] = "/bin/mycc"
- os.environ['SPACK_PREFIX'] = "/usr"
+ self.realcc = "/bin/mycc"
+ self.prefix = "/spack-test-prefix"
+
+ os.environ['SPACK_CC'] = self.realcc
+ os.environ['SPACK_PREFIX'] = self.prefix
os.environ['SPACK_ENV_PATH']="test"
os.environ['SPACK_DEBUG_LOG_DIR'] = "."
os.environ['SPACK_COMPILER_SPEC'] = "gcc@4.4.7"
os.environ['SPACK_SHORT_SPEC'] = "foo@1.2"
+ # Make some fake dependencies
+ self.tmp_deps = tempfile.mkdtemp()
+ self.dep1 = join_path(self.tmp_deps, 'dep1')
+ self.dep2 = join_path(self.tmp_deps, 'dep2')
+ self.dep3 = join_path(self.tmp_deps, 'dep3')
+ self.dep4 = join_path(self.tmp_deps, 'dep4')
+
+ mkdirp(join_path(self.dep1, 'include'))
+ mkdirp(join_path(self.dep1, 'lib'))
+
+ mkdirp(join_path(self.dep2, 'lib64'))
+
+ mkdirp(join_path(self.dep3, 'include'))
+ mkdirp(join_path(self.dep3, 'lib64'))
+
+ mkdirp(join_path(self.dep4, 'include'))
+
+ if 'SPACK_DEPENDENCIES' in os.environ:
+ del os.environ['SPACK_DEPENDENCIES']
+
+
+ def tearDown(self):
+ shutil.rmtree(self.tmp_deps, True)
+
def check_cc(self, command, args, expected):
os.environ['SPACK_TEST_COMMAND'] = command
- self.assertEqual(self.cc(*args, return_output=True).strip(), expected)
+ self.assertEqual(self.cc(*args, output=str).strip(), expected)
def check_ld(self, command, args, expected):
os.environ['SPACK_TEST_COMMAND'] = command
- self.assertEqual(self.ld(*args, return_output=True).strip(), expected)
+ self.assertEqual(self.ld(*args, output=str).strip(), expected)
def check_cpp(self, command, args, expected):
os.environ['SPACK_TEST_COMMAND'] = command
- self.assertEqual(self.cpp(*args, return_output=True).strip(), expected)
+ self.assertEqual(self.cpp(*args, output=str).strip(), expected)
def test_vcheck_mode(self):
@@ -92,39 +121,101 @@ class CompilerTest(unittest.TestCase):
self.check_cpp('dump-mode', [], "cpp")
+ def test_as_mode(self):
+ self.check_cc('dump-mode', ['-S'], "as")
+
+
def test_ccld_mode(self):
self.check_cc('dump-mode', [], "ccld")
self.check_cc('dump-mode', ['foo.c', '-o', 'foo'], "ccld")
- self.check_cc('dump-mode', ['foo.c', '-o', 'foo', '-Wl,-rpath=foo'], "ccld")
- self.check_cc('dump-mode', ['foo.o', 'bar.o', 'baz.o', '-o', 'foo', '-Wl,-rpath=foo'], "ccld")
+ self.check_cc('dump-mode', ['foo.c', '-o', 'foo', '-Wl,-rpath,foo'], "ccld")
+ self.check_cc('dump-mode', ['foo.o', 'bar.o', 'baz.o', '-o', 'foo', '-Wl,-rpath,foo'], "ccld")
def test_ld_mode(self):
self.check_ld('dump-mode', [], "ld")
- self.check_ld('dump-mode', ['foo.o', 'bar.o', 'baz.o', '-o', 'foo', '-Wl,-rpath=foo'], "ld")
+ self.check_ld('dump-mode', ['foo.o', 'bar.o', 'baz.o', '-o', 'foo', '-Wl,-rpath,foo'], "ld")
+
+
+ def test_dep_rpath(self):
+ """Ensure RPATHs for root package are added."""
+ self.check_cc('dump-args', test_command,
+ self.realcc + ' ' +
+ '-Wl,-rpath,' + self.prefix + '/lib ' +
+ '-Wl,-rpath,' + self.prefix + '/lib64 ' +
+ ' '.join(test_command))
+
+
+ def test_dep_include(self):
+ """Ensure a single dependency include directory is added."""
+ os.environ['SPACK_DEPENDENCIES'] = self.dep4
+ self.check_cc('dump-args', test_command,
+ self.realcc + ' ' +
+ '-Wl,-rpath,' + self.prefix + '/lib ' +
+ '-Wl,-rpath,' + self.prefix + '/lib64 ' +
+ '-I' + self.dep4 + '/include ' +
+ ' '.join(test_command))
+
+
+ def test_dep_lib(self):
+ """Ensure a single dependency RPATH is added."""
+ os.environ['SPACK_DEPENDENCIES'] = self.dep2
+ self.check_cc('dump-args', test_command,
+ self.realcc + ' ' +
+ '-Wl,-rpath,' + self.prefix + '/lib ' +
+ '-Wl,-rpath,' + self.prefix + '/lib64 ' +
+ '-L' + self.dep2 + '/lib64 ' +
+ '-Wl,-rpath,' + self.dep2 + '/lib64 ' +
+ ' '.join(test_command))
+
+
+ def test_all_deps(self):
+ """Ensure includes and RPATHs for all deps are added. """
+ os.environ['SPACK_DEPENDENCIES'] = ':'.join([
+ self.dep1, self.dep2, self.dep3, self.dep4])
+
+ # This is probably more constrained than it needs to be; it
+ # checks order within prepended args and doesn't strictly have
+ # to. We could loosen that if it becomes necessary
+ self.check_cc('dump-args', test_command,
+ self.realcc + ' ' +
+ '-Wl,-rpath,' + self.prefix + '/lib ' +
+ '-Wl,-rpath,' + self.prefix + '/lib64 ' +
+
+ '-I' + self.dep4 + '/include ' +
+
+ '-L' + self.dep3 + '/lib64 ' +
+ '-Wl,-rpath,' + self.dep3 + '/lib64 ' +
+ '-I' + self.dep3 + '/include ' +
+
+ '-L' + self.dep2 + '/lib64 ' +
+ '-Wl,-rpath,' + self.dep2 + '/lib64 ' +
+ '-L' + self.dep1 + '/lib ' +
+ '-Wl,-rpath,' + self.dep1 + '/lib ' +
+ '-I' + self.dep1 + '/include ' +
- def test_includes(self):
- self.check_cc('dump-includes', test_command,
- "\n".join(["/test/include", "/other/include"]))
+ ' '.join(test_command))
- def test_libraries(self):
- self.check_cc('dump-libraries', test_command,
- "\n".join(["/test/lib", "/other/lib"]))
+ def test_ld_deps(self):
+ """Ensure no (extra) -I args or -Wl, are passed in ld mode."""
+ os.environ['SPACK_DEPENDENCIES'] = ':'.join([
+ self.dep1, self.dep2, self.dep3, self.dep4])
+ self.check_ld('dump-args', test_command,
+ 'ld ' +
+ '-rpath ' + self.prefix + '/lib ' +
+ '-rpath ' + self.prefix + '/lib64 ' +
- def test_libs(self):
- self.check_cc('dump-libs', test_command,
- "\n".join(["lib1", "lib2", "lib3", "lib4"]))
+ '-L' + self.dep3 + '/lib64 ' +
+ '-rpath ' + self.dep3 + '/lib64 ' +
+ '-L' + self.dep2 + '/lib64 ' +
+ '-rpath ' + self.dep2 + '/lib64 ' +
- def test_rpaths(self):
- self.check_cc('dump-rpaths', test_command,
- "\n".join(["/first/rpath", "/second/rpath", "/third/rpath", "/fourth/rpath"]))
+ '-L' + self.dep1 + '/lib ' +
+ '-rpath ' + self.dep1 + '/lib ' +
+ ' '.join(test_command))
- def test_other_args(self):
- self.check_cc('dump-other-args', test_command,
- "\n".join(["arg1", "-Wl,--start-group", "arg2", "arg3", "arg4",
- "-Wl,--end-group", "arg5", "arg6"]))
diff --git a/lib/spack/spack/test/cmd/__init__.py b/lib/spack/spack/test/cmd/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/spack/spack/test/cmd/__init__.py
diff --git a/lib/spack/spack/test/cmd/uninstall.py b/lib/spack/spack/test/cmd/uninstall.py
new file mode 100644
index 0000000000..80efe06d36
--- /dev/null
+++ b/lib/spack/spack/test/cmd/uninstall.py
@@ -0,0 +1,37 @@
+import spack.test.mock_database
+
+from spack.cmd.uninstall import uninstall
+
+
+class MockArgs(object):
+ def __init__(self, packages, all=False, force=False, dependents=False):
+ self.packages = packages
+ self.all = all
+ self.force = force
+ self.dependents = dependents
+ self.yes_to_all = True
+
+
+class TestUninstall(spack.test.mock_database.MockDatabase):
+ def test_uninstall(self):
+ parser = None
+ # Multiple matches
+ args = MockArgs(['mpileaks'])
+ self.assertRaises(SystemExit, uninstall, parser, args)
+ # Installed dependents
+ args = MockArgs(['libelf'])
+ self.assertRaises(SystemExit, uninstall, parser, args)
+ # Recursive uninstall
+ args = MockArgs(['callpath'], all=True, dependents=True)
+ uninstall(parser, args)
+
+ all_specs = spack.install_layout.all_specs()
+ self.assertEqual(len(all_specs), 7)
+ # query specs with multiple configurations
+ mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
+ callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
+ mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
+
+ self.assertEqual(len(mpileaks_specs), 0)
+ self.assertEqual(len(callpath_specs), 0)
+ self.assertEqual(len(mpi_specs), 3)
diff --git a/lib/spack/spack/test/concretize.py b/lib/spack/spack/test/concretize.py
index a651e29718..f75d1d2b7c 100644
--- a/lib/spack/spack/test/concretize.py
+++ b/lib/spack/spack/test/concretize.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,10 +22,10 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-import unittest
-
import spack
from spack.spec import Spec, CompilerSpec
+from spack.version import ver
+from spack.concretize import find_spec
from spack.test.mock_packages_test import *
class ConcretizeTest(MockPackagesTest):
@@ -78,6 +78,14 @@ class ConcretizeTest(MockPackagesTest):
self.check_concretize('mpich')
+ def test_concretize_preferred_version(self):
+ spec = self.check_concretize('python')
+ self.assertEqual(spec.versions, ver('2.7.11'))
+
+ spec = self.check_concretize('python@3.5.1')
+ self.assertEqual(spec.versions, ver('3.5.1'))
+
+
def test_concretize_with_virtual(self):
self.check_concretize('mpileaks ^mpi')
self.check_concretize('mpileaks ^mpi@:1.1')
@@ -125,22 +133,50 @@ class ConcretizeTest(MockPackagesTest):
we ask for some advanced version.
"""
self.assertTrue(not any(spec.satisfies('mpich2@:1.0')
- for spec in spack.db.providers_for('mpi@2.1')))
+ for spec in spack.repo.providers_for('mpi@2.1')))
self.assertTrue(not any(spec.satisfies('mpich2@:1.1')
- for spec in spack.db.providers_for('mpi@2.2')))
+ for spec in spack.repo.providers_for('mpi@2.2')))
self.assertTrue(not any(spec.satisfies('mpich2@:1.1')
- for spec in spack.db.providers_for('mpi@2.2')))
+ for spec in spack.repo.providers_for('mpi@2.2')))
self.assertTrue(not any(spec.satisfies('mpich@:1')
- for spec in spack.db.providers_for('mpi@2')))
+ for spec in spack.repo.providers_for('mpi@2')))
self.assertTrue(not any(spec.satisfies('mpich@:1')
- for spec in spack.db.providers_for('mpi@3')))
+ for spec in spack.repo.providers_for('mpi@3')))
self.assertTrue(not any(spec.satisfies('mpich2')
- for spec in spack.db.providers_for('mpi@3')))
+ for spec in spack.repo.providers_for('mpi@3')))
+
+
+ def test_concretize_two_virtuals(self):
+ """Test a package with multiple virtual dependencies."""
+ s = Spec('hypre').concretize()
+
+
+ def test_concretize_two_virtuals_with_one_bound(self):
+ """Test a package with multiple virtual dependencies and one preset."""
+ s = Spec('hypre ^openblas').concretize()
+
+
+ def test_concretize_two_virtuals_with_two_bound(self):
+ """Test a package with multiple virtual dependencies and two of them preset."""
+ s = Spec('hypre ^openblas ^netlib-lapack').concretize()
+
+
+ def test_concretize_two_virtuals_with_dual_provider(self):
+ """Test a package with multiple virtual dependencies and force a provider
+ that provides both."""
+ s = Spec('hypre ^openblas-with-lapack').concretize()
+
+
+ def test_concretize_two_virtuals_with_dual_provider_and_a_conflict(self):
+ """Test a package with multiple virtual dependencies and force a provider
+ that provides both, and another conflicting package that provides one."""
+ s = Spec('hypre ^openblas-with-lapack ^netlib-lapack')
+ self.assertRaises(spack.spec.MultipleProviderError, s.concretize)
def test_virtual_is_fully_expanded_for_callpath(self):
@@ -192,3 +228,100 @@ class ConcretizeTest(MockPackagesTest):
# TODO: not exactly the syntax I would like.
self.assertTrue(spec['libdwarf'].compiler.satisfies('clang'))
self.assertTrue(spec['libelf'].compiler.satisfies('clang'))
+
+
+ def test_external_package(self):
+ spec = Spec('externaltool%gcc')
+ spec.concretize()
+
+ self.assertEqual(spec['externaltool'].external, '/path/to/external_tool')
+ self.assertFalse('externalprereq' in spec)
+ self.assertTrue(spec['externaltool'].compiler.satisfies('gcc'))
+
+
+ def test_nobuild_package(self):
+ got_error = False
+ spec = Spec('externaltool%clang')
+ try:
+ spec.concretize()
+ except spack.concretize.NoBuildError:
+ got_error = True
+ self.assertTrue(got_error)
+
+
+ def test_external_and_virtual(self):
+ spec = Spec('externaltest')
+ spec.concretize()
+ self.assertEqual(spec['externaltool'].external, '/path/to/external_tool')
+ self.assertEqual(spec['stuff'].external, '/path/to/external_virtual_gcc')
+ self.assertTrue(spec['externaltool'].compiler.satisfies('gcc'))
+ self.assertTrue(spec['stuff'].compiler.satisfies('gcc'))
+
+
+ def test_find_spec_parents(self):
+ """Tests the spec finding logic used by concretization. """
+ s = Spec('a +foo',
+ Spec('b +foo',
+ Spec('c'),
+ Spec('d +foo')),
+ Spec('e +foo'))
+
+ self.assertEqual('a', find_spec(s['b'], lambda s: '+foo' in s).name)
+
+
+ def test_find_spec_children(self):
+ s = Spec('a',
+ Spec('b +foo',
+ Spec('c'),
+ Spec('d +foo')),
+ Spec('e +foo'))
+ self.assertEqual('d', find_spec(s['b'], lambda s: '+foo' in s).name)
+ s = Spec('a',
+ Spec('b +foo',
+ Spec('c +foo'),
+ Spec('d')),
+ Spec('e +foo'))
+ self.assertEqual('c', find_spec(s['b'], lambda s: '+foo' in s).name)
+
+
+ def test_find_spec_sibling(self):
+ s = Spec('a',
+ Spec('b +foo',
+ Spec('c'),
+ Spec('d')),
+ Spec('e +foo'))
+ self.assertEqual('e', find_spec(s['b'], lambda s: '+foo' in s).name)
+ self.assertEqual('b', find_spec(s['e'], lambda s: '+foo' in s).name)
+
+ s = Spec('a',
+ Spec('b +foo',
+ Spec('c'),
+ Spec('d')),
+ Spec('e',
+ Spec('f +foo')))
+ self.assertEqual('f', find_spec(s['b'], lambda s: '+foo' in s).name)
+
+
+ def test_find_spec_self(self):
+ s = Spec('a',
+ Spec('b +foo',
+ Spec('c'),
+ Spec('d')),
+ Spec('e'))
+ self.assertEqual('b', find_spec(s['b'], lambda s: '+foo' in s).name)
+
+
+ def test_find_spec_none(self):
+ s = Spec('a',
+ Spec('b',
+ Spec('c'),
+ Spec('d')),
+ Spec('e'))
+ self.assertEqual(None, find_spec(s['b'], lambda s: '+foo' in s))
+
+
+ def test_compiler_child(self):
+ s = Spec('mpileaks%clang ^dyninst%gcc')
+ s.concretize()
+ self.assertTrue(s['mpileaks'].satisfies('%clang'))
+ self.assertTrue(s['dyninst'].satisfies('%gcc'))
diff --git a/lib/spack/spack/test/config.py b/lib/spack/spack/test/config.py
index 790b22f3b0..0562d2d620 100644
--- a/lib/spack/spack/test/config.py
+++ b/lib/spack/spack/test/config.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,53 +22,99 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-import unittest
-import shutil
import os
+import shutil
from tempfile import mkdtemp
+
import spack
-from spack.packages import PackageDB
+import spack.config
+from ordereddict_backport import OrderedDict
from spack.test.mock_packages_test import *
+# Some sample compiler config data
+a_comps = {
+ "all": {
+ "gcc@4.7.3" : {
+ "cc" : "/gcc473",
+ "cxx": "/g++473",
+ "f77": None,
+ "fc" : None },
+ "gcc@4.5.0" : {
+ "cc" : "/gcc450",
+ "cxx": "/g++450",
+ "f77": "/gfortran",
+ "fc" : "/gfortran" },
+ "clang@3.3" : {
+ "cc" : "<overwritten>",
+ "cxx": "<overwritten>",
+ "f77": "<overwritten>",
+ "fc" : "<overwritten>" }
+ }
+}
+
+b_comps = {
+ "all": {
+ "icc@10.0" : {
+ "cc" : "/icc100",
+ "cxx": "/icc100",
+ "f77": None,
+ "fc" : None },
+ "icc@11.1" : {
+ "cc" : "/icc111",
+ "cxx": "/icp111",
+ "f77": "/ifort",
+ "fc" : "/ifort" },
+ "clang@3.3" : {
+ "cc" : "/clang",
+ "cxx": "/clang++",
+ "f77": None,
+ "fc" : None}
+ }
+}
+
class ConfigTest(MockPackagesTest):
def setUp(self):
- self.initmock()
+ super(ConfigTest, self).setUp()
self.tmp_dir = mkdtemp('.tmp', 'spack-config-test-')
- spack.config.config_scopes = [('test_low_priority', os.path.join(self.tmp_dir, 'low')),
- ('test_high_priority', os.path.join(self.tmp_dir, 'high'))]
+ spack.config.config_scopes = OrderedDict()
+ spack.config.ConfigScope('test_low_priority', os.path.join(self.tmp_dir, 'low'))
+ spack.config.ConfigScope('test_high_priority', os.path.join(self.tmp_dir, 'high'))
def tearDown(self):
- self.cleanmock()
+ super(ConfigTest, self).tearDown()
shutil.rmtree(self.tmp_dir, True)
- def check_config(self, comps):
- config = spack.config.get_compilers_config()
- compiler_list = ['cc', 'cxx', 'f77', 'f90']
- for key in comps:
+
+ def check_config(self, comps, *compiler_names):
+ """Check that named compilers in comps match Spack's config."""
+ config = spack.config.get_config('compilers')
+ compiler_list = ['cc', 'cxx', 'f77', 'fc']
+ for key in compiler_names:
for c in compiler_list:
- if comps[key][c] == '/bad':
- continue
- self.assertEqual(comps[key][c], config[key][c])
+ expected = comps['all'][key][c]
+ actual = config['all'][key][c]
+ self.assertEqual(expected, actual)
- def test_write_key(self):
- a_comps = {"gcc@4.7.3" : { "cc" : "/gcc473", "cxx" : "/g++473", "f77" : None, "f90" : None },
- "gcc@4.5.0" : { "cc" : "/gcc450", "cxx" : "/g++450", "f77" : "/gfortran", "f90" : "/gfortran" },
- "clang@3.3" : { "cc" : "/bad", "cxx" : "/bad", "f77" : "/bad", "f90" : "/bad" }}
+ def test_write_key_in_memory(self):
+ # Write b_comps "on top of" a_comps.
+ spack.config.update_config('compilers', a_comps, 'test_low_priority')
+ spack.config.update_config('compilers', b_comps, 'test_high_priority')
- b_comps = {"icc@10.0" : { "cc" : "/icc100", "cxx" : "/icc100", "f77" : None, "f90" : None },
- "icc@11.1" : { "cc" : "/icc111", "cxx" : "/icp111", "f77" : "/ifort", "f90" : "/ifort" },
- "clang@3.3" : { "cc" : "/clang", "cxx" : "/clang++", "f77" : None, "f90" : None}}
+ # Make sure the config looks how we expect.
+ self.check_config(a_comps, 'gcc@4.7.3', 'gcc@4.5.0')
+ self.check_config(b_comps, 'icc@10.0', 'icc@11.1', 'clang@3.3')
- spack.config.add_to_compiler_config(a_comps, 'test_low_priority')
- spack.config.add_to_compiler_config(b_comps, 'test_high_priority')
- self.check_config(a_comps)
- self.check_config(b_comps)
+ def test_write_key_to_disk(self):
+ # Write b_comps "on top of" a_comps.
+ spack.config.update_config('compilers', a_comps, 'test_low_priority')
+ spack.config.update_config('compilers', b_comps, 'test_high_priority')
+ # Clear caches so we're forced to read from disk.
spack.config.clear_config_caches()
- self.check_config(a_comps)
- self.check_config(b_comps)
-
+ # Same check again, to ensure consistency.
+ self.check_config(a_comps, 'gcc@4.7.3', 'gcc@4.5.0')
+ self.check_config(b_comps, 'icc@10.0', 'icc@11.1', 'clang@3.3')
diff --git a/lib/spack/spack/test/configure_guess.py b/lib/spack/spack/test/configure_guess.py
index 766dd51d52..2440d120e5 100644
--- a/lib/spack/spack/test/configure_guess.py
+++ b/lib/spack/spack/test/configure_guess.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -23,20 +23,15 @@
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
-import unittest
import shutil
import tempfile
+import unittest
from llnl.util.filesystem import *
-
from spack.cmd.create import ConfigureGuesser
from spack.stage import Stage
-
-from spack.fetch_strategy import URLFetchStrategy
-from spack.directory_layout import YamlDirectoryLayout
-from spack.util.executable import which
from spack.test.mock_packages_test import *
-from spack.test.mock_repo import MockArchive
+from spack.util.executable import which
class InstallTest(unittest.TestCase):
@@ -52,8 +47,6 @@ class InstallTest(unittest.TestCase):
def tearDown(self):
shutil.rmtree(self.tmpdir, ignore_errors=True)
- if self.stage:
- self.stage.destroy()
os.chdir(self.orig_dir)
@@ -64,12 +57,12 @@ class InstallTest(unittest.TestCase):
url = 'file://' + join_path(os.getcwd(), 'archive.tar.gz')
print url
- self.stage = Stage(url)
- self.stage.fetch()
+ with Stage(url) as stage:
+ stage.fetch()
- guesser = ConfigureGuesser()
- guesser(self.stage)
- self.assertEqual(system, guesser.build_system)
+ guesser = ConfigureGuesser()
+ guesser(stage)
+ self.assertEqual(system, guesser.build_system)
def test_python(self):
diff --git a/lib/spack/spack/test/database.py b/lib/spack/spack/test/database.py
index 8416143f2d..465263d057 100644
--- a/lib/spack/spack/test/database.py
+++ b/lib/spack/spack/test/database.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -26,19 +26,15 @@
These tests check the database is functioning properly,
both in memory and in its file
"""
-import tempfile
-import shutil
+import os.path
import multiprocessing
-from llnl.util.lock import *
-from llnl.util.filesystem import join_path
-
import spack
-from spack.database import Database
-from spack.directory_layout import YamlDirectoryLayout
-from spack.test.mock_packages_test import *
-
+from llnl.util.filesystem import join_path
+from llnl.util.lock import *
from llnl.util.tty.colify import colify
+from spack.test.mock_database import MockDatabase
+
def _print_ref_counts():
"""Print out all ref counts for the graph used here, for debugging"""
@@ -75,79 +71,7 @@ def _print_ref_counts():
colify(recs, cols=3)
-class DatabaseTest(MockPackagesTest):
-
- def _mock_install(self, spec):
- s = Spec(spec)
- pkg = spack.db.get(s.concretized())
- pkg.do_install(fake=True)
-
-
- def _mock_remove(self, spec):
- specs = spack.installed_db.query(spec)
- assert(len(specs) == 1)
- spec = specs[0]
- spec.package.do_uninstall(spec)
-
-
- def setUp(self):
- super(DatabaseTest, self).setUp()
- #
- # TODO: make the mockup below easier.
- #
-
- # Make a fake install directory
- self.install_path = tempfile.mkdtemp()
- self.spack_install_path = spack.install_path
- spack.install_path = self.install_path
-
- self.install_layout = YamlDirectoryLayout(self.install_path)
- self.spack_install_layout = spack.install_layout
- spack.install_layout = self.install_layout
-
- # Make fake database and fake install directory.
- self.installed_db = Database(self.install_path)
- self.spack_installed_db = spack.installed_db
- spack.installed_db = self.installed_db
-
- # make a mock database with some packages installed note that
- # the ref count for dyninst here will be 3, as it's recycled
- # across each install.
- #
- # Here is what the mock DB looks like:
- #
- # o mpileaks o mpileaks' o mpileaks''
- # |\ |\ |\
- # | o callpath | o callpath' | o callpath''
- # |/| |/| |/|
- # o | mpich o | mpich2 o | zmpi
- # | | o | fake
- # | | |
- # | |______________/
- # | .____________/
- # |/
- # o dyninst
- # |\
- # | o libdwarf
- # |/
- # o libelf
- #
-
- # Transaction used to avoid repeated writes.
- with spack.installed_db.write_transaction():
- self._mock_install('mpileaks ^mpich')
- self._mock_install('mpileaks ^mpich2')
- self._mock_install('mpileaks ^zmpi')
-
-
- def tearDown(self):
- super(DatabaseTest, self).tearDown()
- shutil.rmtree(self.install_path)
- spack.install_path = self.spack_install_path
- spack.install_layout = self.spack_install_layout
- spack.installed_db = self.spack_installed_db
-
-
+class DatabaseTest(MockDatabase):
def test_005_db_exists(self):
"""Make sure db cache file exists after creating."""
index_file = join_path(self.install_path, '.spack-db', 'index.yaml')
@@ -156,7 +80,6 @@ class DatabaseTest(MockPackagesTest):
self.assertTrue(os.path.exists(index_file))
self.assertTrue(os.path.exists(lock_file))
-
def test_010_all_install_sanity(self):
"""Ensure that the install layout reflects what we think it does."""
all_specs = spack.install_layout.all_specs()
diff --git a/lib/spack/spack/test/directory_layout.py b/lib/spack/spack/test/directory_layout.py
index b3ad8efec4..8ad8f1a360 100644
--- a/lib/spack/spack/test/directory_layout.py
+++ b/lib/spack/spack/test/directory_layout.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -25,32 +25,33 @@
"""\
This test verifies that the Spack directory layout works properly.
"""
-import unittest
-import tempfile
-import shutil
import os
-
-from llnl.util.filesystem import *
+import shutil
+import tempfile
import spack
-from spack.spec import Spec
-from spack.packages import PackageDB
+from llnl.util.filesystem import *
from spack.directory_layout import YamlDirectoryLayout
+from spack.repository import RepoPath
+from spack.spec import Spec
+from spack.test.mock_packages_test import *
# number of packages to test (to reduce test time)
max_packages = 10
-class DirectoryLayoutTest(unittest.TestCase):
+class DirectoryLayoutTest(MockPackagesTest):
"""Tests that a directory layout works correctly and produces a
consistent install path."""
def setUp(self):
+ super(DirectoryLayoutTest, self).setUp()
self.tmpdir = tempfile.mkdtemp()
self.layout = YamlDirectoryLayout(self.tmpdir)
def tearDown(self):
+ super(DirectoryLayoutTest, self).tearDown()
shutil.rmtree(self.tmpdir, ignore_errors=True)
self.layout = None
@@ -62,9 +63,12 @@ class DirectoryLayoutTest(unittest.TestCase):
finally that the directory can be removed by the directory
layout.
"""
- packages = list(spack.db.all_packages())[:max_packages]
+ packages = list(spack.repo.all_packages())[:max_packages]
for pkg in packages:
+ if pkg.name.startswith('external'):
+ #External package tests cannot be installed
+ continue
spec = pkg.spec
# If a spec fails to concretize, just skip it. If it is a
@@ -123,17 +127,17 @@ class DirectoryLayoutTest(unittest.TestCase):
information about installed packages' specs to uninstall
or query them again if the package goes away.
"""
- mock_db = PackageDB(spack.mock_packages_path)
+ mock_db = RepoPath(spack.mock_packages_path)
not_in_mock = set.difference(
- set(spack.db.all_package_names()),
+ set(spack.repo.all_package_names()),
set(mock_db.all_package_names()))
packages = list(not_in_mock)[:max_packages]
# Create all the packages that are not in mock.
installed_specs = {}
for pkg_name in packages:
- spec = spack.db.get(pkg_name).spec
+ spec = spack.repo.get(pkg_name).spec
# If a spec fails to concretize, just skip it. If it is a
# real error, it will be caught by concretization tests.
@@ -145,8 +149,7 @@ class DirectoryLayoutTest(unittest.TestCase):
self.layout.create_install_directory(spec)
installed_specs[spec] = self.layout.path_for_spec(spec)
- tmp = spack.db
- spack.db = mock_db
+ spack.repo.swap(mock_db)
# Now check that even without the package files, we know
# enough to read a spec from the spec file.
@@ -161,16 +164,19 @@ class DirectoryLayoutTest(unittest.TestCase):
self.assertTrue(spec.eq_dag(spec_from_file))
self.assertEqual(spec.dag_hash(), spec_from_file.dag_hash())
- spack.db = tmp
+ spack.repo.swap(mock_db)
def test_find(self):
"""Test that finding specs within an install layout works."""
- packages = list(spack.db.all_packages())[:max_packages]
+ packages = list(spack.repo.all_packages())[:max_packages]
# Create install prefixes for all packages in the list
installed_specs = {}
for pkg in packages:
+ if pkg.name.startswith('external'):
+ #External package tests cannot be installed
+ continue
spec = pkg.spec.concretized()
installed_specs[spec.name] = spec
self.layout.create_install_directory(spec)
diff --git a/lib/spack/spack/test/environment.py b/lib/spack/spack/test/environment.py
new file mode 100644
index 0000000000..6c8f5ea43c
--- /dev/null
+++ b/lib/spack/spack/test/environment.py
@@ -0,0 +1,73 @@
+import unittest
+import os
+from spack.environment import EnvironmentModifications
+
+
+class EnvironmentTest(unittest.TestCase):
+ def setUp(self):
+ os.environ.clear()
+ os.environ['UNSET_ME'] = 'foo'
+ os.environ['EMPTY_PATH_LIST'] = ''
+ os.environ['PATH_LIST'] = '/path/second:/path/third'
+ os.environ['REMOVE_PATH_LIST'] = '/a/b:/duplicate:/a/c:/remove/this:/a/d:/duplicate/:/f/g'
+
+ def test_set(self):
+ env = EnvironmentModifications()
+ env.set('A', 'dummy value')
+ env.set('B', 3)
+ env.apply_modifications()
+ self.assertEqual('dummy value', os.environ['A'])
+ self.assertEqual(str(3), os.environ['B'])
+
+ def test_unset(self):
+ env = EnvironmentModifications()
+ self.assertEqual('foo', os.environ['UNSET_ME'])
+ env.unset('UNSET_ME')
+ env.apply_modifications()
+ self.assertRaises(KeyError, os.environ.__getitem__, 'UNSET_ME')
+
+ def test_set_path(self):
+ env = EnvironmentModifications()
+ env.set_path('A', ['foo', 'bar', 'baz'])
+ env.apply_modifications()
+ self.assertEqual('foo:bar:baz', os.environ['A'])
+
+ def test_path_manipulation(self):
+ env = EnvironmentModifications()
+
+ env.append_path('PATH_LIST', '/path/last')
+ env.prepend_path('PATH_LIST', '/path/first')
+
+ env.append_path('EMPTY_PATH_LIST', '/path/middle')
+ env.append_path('EMPTY_PATH_LIST', '/path/last')
+ env.prepend_path('EMPTY_PATH_LIST', '/path/first')
+
+ env.append_path('NEWLY_CREATED_PATH_LIST', '/path/middle')
+ env.append_path('NEWLY_CREATED_PATH_LIST', '/path/last')
+ env.prepend_path('NEWLY_CREATED_PATH_LIST', '/path/first')
+
+ env.remove_path('REMOVE_PATH_LIST', '/remove/this')
+ env.remove_path('REMOVE_PATH_LIST', '/duplicate/')
+
+ env.apply_modifications()
+ self.assertEqual('/path/first:/path/second:/path/third:/path/last', os.environ['PATH_LIST'])
+ self.assertEqual('/path/first:/path/middle:/path/last', os.environ['EMPTY_PATH_LIST'])
+ self.assertEqual('/path/first:/path/middle:/path/last', os.environ['NEWLY_CREATED_PATH_LIST'])
+ self.assertEqual('/a/b:/a/c:/a/d:/f/g', os.environ['REMOVE_PATH_LIST'])
+
+ def test_extra_arguments(self):
+ env = EnvironmentModifications()
+ env.set('A', 'dummy value', who='Pkg1')
+ for x in env:
+ assert 'who' in x.args
+ env.apply_modifications()
+ self.assertEqual('dummy value', os.environ['A'])
+
+ def test_extend(self):
+ env = EnvironmentModifications()
+ env.set('A', 'dummy value')
+ env.set('B', 3)
+ copy_construct = EnvironmentModifications(env)
+ self.assertEqual(len(copy_construct), 2)
+ for x, y in zip(env, copy_construct):
+ assert x is y
diff --git a/lib/spack/spack/test/git_fetch.py b/lib/spack/spack/test/git_fetch.py
index 9700bd7533..3578044116 100644
--- a/lib/spack/spack/test/git_fetch.py
+++ b/lib/spack/spack/test/git_fetch.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -23,19 +23,12 @@
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
-import unittest
-import shutil
-import tempfile
-
-from llnl.util.filesystem import *
import spack
-from spack.version import ver
-from spack.stage import Stage
-from spack.util.executable import which
-
+from llnl.util.filesystem import *
from spack.test.mock_packages_test import *
from spack.test.mock_repo import MockGitRepo
+from spack.version import ver
class GitFetchTest(MockPackagesTest):
@@ -50,24 +43,17 @@ class GitFetchTest(MockPackagesTest):
spec = Spec('git-test')
spec.concretize()
- self.pkg = spack.db.get(spec, new=True)
-
+ self.pkg = spack.repo.get(spec, new=True)
def tearDown(self):
"""Destroy the stage space used by this test."""
super(GitFetchTest, self).tearDown()
-
- if self.repo.stage is not None:
- self.repo.stage.destroy()
-
- self.pkg.do_clean()
-
+ self.repo.destroy()
def assert_rev(self, rev):
"""Check that the current git revision is equal to the supplied rev."""
self.assertEqual(self.repo.rev_hash('HEAD'), self.repo.rev_hash(rev))
-
def try_fetch(self, rev, test_file, args):
"""Tries to:
1. Fetch the repo using a fetch strategy constructed with
@@ -79,26 +65,27 @@ class GitFetchTest(MockPackagesTest):
"""
self.pkg.versions[ver('git')] = args
- self.pkg.do_stage()
- self.assert_rev(rev)
+ with self.pkg.stage:
+ self.pkg.do_stage()
+ self.assert_rev(rev)
- file_path = join_path(self.pkg.stage.source_path, test_file)
- self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
- self.assertTrue(os.path.isfile(file_path))
+ file_path = join_path(self.pkg.stage.source_path, test_file)
+ self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
+ self.assertTrue(os.path.isfile(file_path))
- os.unlink(file_path)
- self.assertFalse(os.path.isfile(file_path))
+ os.unlink(file_path)
+ self.assertFalse(os.path.isfile(file_path))
- untracked_file = 'foobarbaz'
- touch(untracked_file)
- self.assertTrue(os.path.isfile(untracked_file))
- self.pkg.do_restage()
- self.assertFalse(os.path.isfile(untracked_file))
+ untracked_file = 'foobarbaz'
+ touch(untracked_file)
+ self.assertTrue(os.path.isfile(untracked_file))
+ self.pkg.do_restage()
+ self.assertFalse(os.path.isfile(untracked_file))
- self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
- self.assertTrue(os.path.isfile(file_path))
+ self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
+ self.assertTrue(os.path.isfile(file_path))
- self.assert_rev(rev)
+ self.assert_rev(rev)
def test_fetch_master(self):
diff --git a/lib/spack/spack/test/hg_fetch.py b/lib/spack/spack/test/hg_fetch.py
index 531dfabaa1..b8a0c1ec46 100644
--- a/lib/spack/spack/test/hg_fetch.py
+++ b/lib/spack/spack/test/hg_fetch.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -23,16 +23,12 @@
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
-import unittest
-
-from llnl.util.filesystem import *
-
import spack
+
from spack.version import ver
-from spack.stage import Stage
-from spack.util.executable import which
-from spack.test.mock_packages_test import *
from spack.test.mock_repo import MockHgRepo
+from llnl.util.filesystem import *
+from spack.test.mock_packages_test import *
class HgFetchTest(MockPackagesTest):
@@ -47,18 +43,12 @@ class HgFetchTest(MockPackagesTest):
spec = Spec('hg-test')
spec.concretize()
- self.pkg = spack.db.get(spec, new=True)
-
+ self.pkg = spack.repo.get(spec, new=True)
def tearDown(self):
"""Destroy the stage space used by this test."""
super(HgFetchTest, self).tearDown()
-
- if self.repo.stage is not None:
- self.repo.stage.destroy()
-
- self.pkg.do_clean()
-
+ self.repo.destroy()
def try_fetch(self, rev, test_file, args):
"""Tries to:
@@ -71,26 +61,27 @@ class HgFetchTest(MockPackagesTest):
"""
self.pkg.versions[ver('hg')] = args
- self.pkg.do_stage()
- self.assertEqual(self.repo.get_rev(), rev)
+ with self.pkg.stage:
+ self.pkg.do_stage()
+ self.assertEqual(self.repo.get_rev(), rev)
- file_path = join_path(self.pkg.stage.source_path, test_file)
- self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
- self.assertTrue(os.path.isfile(file_path))
+ file_path = join_path(self.pkg.stage.source_path, test_file)
+ self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
+ self.assertTrue(os.path.isfile(file_path))
- os.unlink(file_path)
- self.assertFalse(os.path.isfile(file_path))
+ os.unlink(file_path)
+ self.assertFalse(os.path.isfile(file_path))
- untracked = 'foobarbaz'
- touch(untracked)
- self.assertTrue(os.path.isfile(untracked))
- self.pkg.do_restage()
- self.assertFalse(os.path.isfile(untracked))
+ untracked = 'foobarbaz'
+ touch(untracked)
+ self.assertTrue(os.path.isfile(untracked))
+ self.pkg.do_restage()
+ self.assertFalse(os.path.isfile(untracked))
- self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
- self.assertTrue(os.path.isfile(file_path))
+ self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
+ self.assertTrue(os.path.isfile(file_path))
- self.assertEqual(self.repo.get_rev(), rev)
+ self.assertEqual(self.repo.get_rev(), rev)
def test_fetch_default(self):
diff --git a/lib/spack/spack/test/install.py b/lib/spack/spack/test/install.py
index 5659e97a4d..fc5b7e67df 100644
--- a/lib/spack/spack/test/install.py
+++ b/lib/spack/spack/test/install.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,18 +22,13 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-import os
-import unittest
import shutil
import tempfile
-from llnl.util.filesystem import *
-
import spack
-from spack.stage import Stage
-from spack.fetch_strategy import URLFetchStrategy
+from llnl.util.filesystem import *
from spack.directory_layout import YamlDirectoryLayout
-from spack.util.executable import which
+from spack.fetch_strategy import URLFetchStrategy, FetchStrategyComposite
from spack.test.mock_packages_test import *
from spack.test.mock_repo import MockArchive
@@ -59,9 +54,7 @@ class InstallTest(MockPackagesTest):
def tearDown(self):
super(InstallTest, self).tearDown()
-
- if self.repo.stage is not None:
- self.repo.stage.destroy()
+ self.repo.destroy()
# Turn checksumming back on
spack.do_checksum = True
@@ -71,17 +64,23 @@ class InstallTest(MockPackagesTest):
shutil.rmtree(self.tmpdir, ignore_errors=True)
- def test_install_and_uninstall(self):
+ def fake_fetchify(self, pkg):
+ """Fake the URL for a package so it downloads from a file."""
+ fetcher = FetchStrategyComposite()
+ fetcher.append(URLFetchStrategy(self.repo.url))
+ pkg.fetcher = fetcher
+
+
+ def ztest_install_and_uninstall(self):
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial_install_test_package')
spec.concretize()
self.assertTrue(spec.concrete)
# Get the package
- pkg = spack.db.get(spec)
+ pkg = spack.repo.get(spec)
- # Fake the URL for the package so it downloads from a file.
- pkg.fetcher = URLFetchStrategy(self.repo.url)
+ self.fake_fetchify(pkg)
try:
pkg.do_install()
@@ -89,3 +88,17 @@ class InstallTest(MockPackagesTest):
except Exception, e:
pkg.remove_prefix()
raise
+
+
+ def test_install_environment(self):
+ spec = Spec('cmake-client').concretized()
+
+ for s in spec.traverse():
+ self.fake_fetchify(s.package)
+
+ pkg = spec.package
+ try:
+ pkg.do_install()
+ except Exception, e:
+ pkg.remove_prefix()
+ raise
diff --git a/lib/spack/spack/test/link_tree.py b/lib/spack/spack/test/link_tree.py
index 9e887ecc49..ee37e765c7 100644
--- a/lib/spack/spack/test/link_tree.py
+++ b/lib/spack/spack/test/link_tree.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -24,8 +24,6 @@
##############################################################################
import os
import unittest
-import shutil
-import tempfile
from llnl.util.filesystem import *
from llnl.util.link_tree import LinkTree
@@ -38,6 +36,7 @@ class LinkTreeTest(unittest.TestCase):
def setUp(self):
self.stage = Stage('link-tree-test')
+ self.stage.create()
with working_dir(self.stage.path):
touchp('source/1')
@@ -51,10 +50,8 @@ class LinkTreeTest(unittest.TestCase):
source_path = os.path.join(self.stage.path, 'source')
self.link_tree = LinkTree(source_path)
-
def tearDown(self):
- if self.stage:
- self.stage.destroy()
+ self.stage.destroy()
def check_file_link(self, filename):
diff --git a/lib/spack/spack/test/lock.py b/lib/spack/spack/test/lock.py
index 5664e71b03..3b11d18da4 100644
--- a/lib/spack/spack/test/lock.py
+++ b/lib/spack/spack/test/lock.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -25,15 +25,13 @@
"""
These tests ensure that our lock works correctly.
"""
-import unittest
-import os
-import tempfile
import shutil
+import tempfile
+import unittest
from multiprocessing import Process
-from llnl.util.lock import *
from llnl.util.filesystem import join_path, touch
-
+from llnl.util.lock import *
from spack.util.multiproc import Barrier
# This is the longest a failed test will take, as the barriers will
diff --git a/lib/spack/spack/test/make_executable.py b/lib/spack/spack/test/make_executable.py
index c4bfeb2a03..a2606acf19 100644
--- a/lib/spack/spack/test/make_executable.py
+++ b/lib/spack/spack/test/make_executable.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -28,13 +28,13 @@ Tests for Spack's built-in parallel make support.
This just tests whether the right args are getting passed to make.
"""
import os
-import unittest
-import tempfile
import shutil
+import tempfile
+import unittest
from llnl.util.filesystem import *
-from spack.util.environment import path_put_first
from spack.build_environment import MakeExecutable
+from spack.util.environment import path_put_first
class MakeExecutableTest(unittest.TestCase):
@@ -56,47 +56,47 @@ class MakeExecutableTest(unittest.TestCase):
def test_make_normal(self):
make = MakeExecutable('make', 8)
- self.assertEqual(make(return_output=True).strip(), '-j8')
- self.assertEqual(make('install', return_output=True).strip(), '-j8 install')
+ self.assertEqual(make(output=str).strip(), '-j8')
+ self.assertEqual(make('install', output=str).strip(), '-j8 install')
def test_make_explicit(self):
make = MakeExecutable('make', 8)
- self.assertEqual(make(parallel=True, return_output=True).strip(), '-j8')
- self.assertEqual(make('install', parallel=True, return_output=True).strip(), '-j8 install')
+ self.assertEqual(make(parallel=True, output=str).strip(), '-j8')
+ self.assertEqual(make('install', parallel=True, output=str).strip(), '-j8 install')
def test_make_one_job(self):
make = MakeExecutable('make', 1)
- self.assertEqual(make(return_output=True).strip(), '')
- self.assertEqual(make('install', return_output=True).strip(), 'install')
+ self.assertEqual(make(output=str).strip(), '')
+ self.assertEqual(make('install', output=str).strip(), 'install')
def test_make_parallel_false(self):
make = MakeExecutable('make', 8)
- self.assertEqual(make(parallel=False, return_output=True).strip(), '')
- self.assertEqual(make('install', parallel=False, return_output=True).strip(), 'install')
+ self.assertEqual(make(parallel=False, output=str).strip(), '')
+ self.assertEqual(make('install', parallel=False, output=str).strip(), 'install')
def test_make_parallel_disabled(self):
make = MakeExecutable('make', 8)
os.environ['SPACK_NO_PARALLEL_MAKE'] = 'true'
- self.assertEqual(make(return_output=True).strip(), '')
- self.assertEqual(make('install', return_output=True).strip(), 'install')
+ self.assertEqual(make(output=str).strip(), '')
+ self.assertEqual(make('install', output=str).strip(), 'install')
os.environ['SPACK_NO_PARALLEL_MAKE'] = '1'
- self.assertEqual(make(return_output=True).strip(), '')
- self.assertEqual(make('install', return_output=True).strip(), 'install')
+ self.assertEqual(make(output=str).strip(), '')
+ self.assertEqual(make('install', output=str).strip(), 'install')
# These don't disable (false and random string)
os.environ['SPACK_NO_PARALLEL_MAKE'] = 'false'
- self.assertEqual(make(return_output=True).strip(), '-j8')
- self.assertEqual(make('install', return_output=True).strip(), '-j8 install')
+ self.assertEqual(make(output=str).strip(), '-j8')
+ self.assertEqual(make('install', output=str).strip(), '-j8 install')
os.environ['SPACK_NO_PARALLEL_MAKE'] = 'foobar'
- self.assertEqual(make(return_output=True).strip(), '-j8')
- self.assertEqual(make('install', return_output=True).strip(), '-j8 install')
+ self.assertEqual(make(output=str).strip(), '-j8')
+ self.assertEqual(make('install', output=str).strip(), '-j8 install')
del os.environ['SPACK_NO_PARALLEL_MAKE']
@@ -106,20 +106,20 @@ class MakeExecutableTest(unittest.TestCase):
# These should work
os.environ['SPACK_NO_PARALLEL_MAKE'] = 'true'
- self.assertEqual(make(parallel=True, return_output=True).strip(), '')
- self.assertEqual(make('install', parallel=True, return_output=True).strip(), 'install')
+ self.assertEqual(make(parallel=True, output=str).strip(), '')
+ self.assertEqual(make('install', parallel=True, output=str).strip(), 'install')
os.environ['SPACK_NO_PARALLEL_MAKE'] = '1'
- self.assertEqual(make(parallel=True, return_output=True).strip(), '')
- self.assertEqual(make('install', parallel=True, return_output=True).strip(), 'install')
+ self.assertEqual(make(parallel=True, output=str).strip(), '')
+ self.assertEqual(make('install', parallel=True, output=str).strip(), 'install')
# These don't disable (false and random string)
os.environ['SPACK_NO_PARALLEL_MAKE'] = 'false'
- self.assertEqual(make(parallel=True, return_output=True).strip(), '-j8')
- self.assertEqual(make('install', parallel=True, return_output=True).strip(), '-j8 install')
+ self.assertEqual(make(parallel=True, output=str).strip(), '-j8')
+ self.assertEqual(make('install', parallel=True, output=str).strip(), '-j8 install')
os.environ['SPACK_NO_PARALLEL_MAKE'] = 'foobar'
- self.assertEqual(make(parallel=True, return_output=True).strip(), '-j8')
- self.assertEqual(make('install', parallel=True, return_output=True).strip(), '-j8 install')
+ self.assertEqual(make(parallel=True, output=str).strip(), '-j8')
+ self.assertEqual(make('install', parallel=True, output=str).strip(), '-j8 install')
del os.environ['SPACK_NO_PARALLEL_MAKE']
diff --git a/lib/spack/spack/test/mirror.py b/lib/spack/spack/test/mirror.py
index 89ab14359e..e707adfe9d 100644
--- a/lib/spack/spack/test/mirror.py
+++ b/lib/spack/spack/test/mirror.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -23,11 +23,10 @@
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
-from filecmp import dircmp
-
import spack
import spack.mirror
-from spack.util.compression import decompressor_for
+
+from filecmp import dircmp
from spack.test.mock_packages_test import *
from spack.test.mock_repo import *
@@ -44,8 +43,16 @@ class MirrorTest(MockPackagesTest):
self.repos = {}
+ def tearDown(self):
+ """Destroy all the stages created by the repos in setup."""
+ super(MirrorTest, self).tearDown()
+ for repo in self.repos.values():
+ repo.destroy()
+ self.repos.clear()
+
+
def set_up_package(self, name, MockRepoClass, url_attr):
- """Use this to set up a mock package to be mirrored.
+ """Set up a mock package to be mirrored.
Each package needs us to:
1. Set up a mock repo/archive to fetch from.
2. Point the package's version args at that repo.
@@ -55,7 +62,7 @@ class MirrorTest(MockPackagesTest):
spec.concretize()
# Get the package and fix its fetch args to point to a mock repo
- pkg = spack.db.get(spec)
+ pkg = spack.repo.get(spec)
repo = MockRepoClass()
self.repos[name] = repo
@@ -65,22 +72,15 @@ class MirrorTest(MockPackagesTest):
pkg.versions[v][url_attr] = repo.url
- def tearDown(self):
- """Destroy all the stages created by the repos in setup."""
- super(MirrorTest, self).tearDown()
-
- for name, repo in self.repos.items():
- if repo.stage:
- pass #repo.stage.destroy()
-
- self.repos.clear()
+ def check_mirror(self):
+ with Stage('spack-mirror-test') as stage:
+ mirror_root = join_path(stage.path, 'test-mirror')
+ # register mirror with spack config
+ mirrors = { 'spack-mirror-test' : 'file://' + mirror_root }
+ spack.config.update_config('mirrors', mirrors)
- def check_mirror(self):
- stage = Stage('spack-mirror-test')
- mirror_root = join_path(stage.path, 'test-mirror')
- try:
os.chdir(stage.path)
spack.mirror.create(
mirror_root, self.repos, no_checksum=True)
@@ -88,7 +88,7 @@ class MirrorTest(MockPackagesTest):
# Stage directory exists
self.assertTrue(os.path.isdir(mirror_root))
- # subdirs for each package
+ # check that there are subdirs for each package
for name in self.repos:
subdir = join_path(mirror_root, name)
self.assertTrue(os.path.isdir(subdir))
@@ -96,40 +96,28 @@ class MirrorTest(MockPackagesTest):
files = os.listdir(subdir)
self.assertEqual(len(files), 1)
- # Decompress archive in the mirror
- archive = files[0]
- archive_path = join_path(subdir, archive)
- decomp = decompressor_for(archive_path)
-
- with working_dir(subdir):
- decomp(archive_path)
-
- # Find the untarred archive directory.
- files = os.listdir(subdir)
- self.assertEqual(len(files), 2)
- self.assertTrue(archive in files)
- files.remove(archive)
-
- expanded_archive = join_path(subdir, files[0])
- self.assertTrue(os.path.isdir(expanded_archive))
-
- # Compare the original repo with the expanded archive
- repo = self.repos[name]
- if not 'svn' in name:
- original_path = repo.path
- else:
- co = 'checked_out'
- svn('checkout', repo.url, co)
- original_path = join_path(subdir, co)
-
- dcmp = dircmp(original_path, expanded_archive)
-
- # make sure there are no new files in the expanded tarball
- self.assertFalse(dcmp.right_only)
- self.assertTrue(all(l in exclude for l in dcmp.left_only))
-
- finally:
- pass #stage.destroy()
+ # Now try to fetch each package.
+ for name, mock_repo in self.repos.items():
+ spec = Spec(name).concretized()
+ pkg = spec.package
+
+ saved_checksum_setting = spack.do_checksum
+ with pkg.stage:
+ # Stage the archive from the mirror and cd to it.
+ spack.do_checksum = False
+ pkg.do_stage(mirror_only=True)
+ # Compare the original repo with the expanded archive
+ original_path = mock_repo.path
+ if 'svn' in name:
+ # have to check out the svn repo to compare.
+ original_path = join_path(mock_repo.path, 'checked_out')
+ svn('checkout', mock_repo.url, original_path)
+ dcmp = dircmp(original_path, pkg.stage.source_path)
+ # make sure there are no new files in the expanded tarball
+ self.assertFalse(dcmp.right_only)
+ # and that all original files are present.
+ self.assertTrue(all(l in exclude for l in dcmp.left_only))
+ spack.do_checksum = saved_checksum_setting
def test_git_mirror(self):
diff --git a/lib/spack/spack/test/mock_database.py b/lib/spack/spack/test/mock_database.py
new file mode 100644
index 0000000000..82ba59fc48
--- /dev/null
+++ b/lib/spack/spack/test/mock_database.py
@@ -0,0 +1,80 @@
+import shutil
+import tempfile
+
+import spack
+from spack.spec import Spec
+from spack.database import Database
+from spack.directory_layout import YamlDirectoryLayout
+from spack.test.mock_packages_test import MockPackagesTest
+
+
+class MockDatabase(MockPackagesTest):
+ def _mock_install(self, spec):
+ s = Spec(spec)
+ s.concretize()
+ pkg = spack.repo.get(s)
+ pkg.do_install(fake=True)
+
+ def _mock_remove(self, spec):
+ specs = spack.installed_db.query(spec)
+ assert len(specs) == 1
+ spec = specs[0]
+ spec.package.do_uninstall(spec)
+
+ def setUp(self):
+ super(MockDatabase, self).setUp()
+ #
+ # TODO: make the mockup below easier.
+ #
+
+ # Make a fake install directory
+ self.install_path = tempfile.mkdtemp()
+ self.spack_install_path = spack.install_path
+ spack.install_path = self.install_path
+
+ self.install_layout = YamlDirectoryLayout(self.install_path)
+ self.spack_install_layout = spack.install_layout
+ spack.install_layout = self.install_layout
+
+ # Make fake database and fake install directory.
+ self.installed_db = Database(self.install_path)
+ self.spack_installed_db = spack.installed_db
+ spack.installed_db = self.installed_db
+
+ # make a mock database with some packages installed note that
+ # the ref count for dyninst here will be 3, as it's recycled
+ # across each install.
+ #
+ # Here is what the mock DB looks like:
+ #
+ # o mpileaks o mpileaks' o mpileaks''
+ # |\ |\ |\
+ # | o callpath | o callpath' | o callpath''
+ # |/| |/| |/|
+ # o | mpich o | mpich2 o | zmpi
+ # | | o | fake
+ # | | |
+ # | |______________/
+ # | .____________/
+ # |/
+ # o dyninst
+ # |\
+ # | o libdwarf
+ # |/
+ # o libelf
+ #
+
+ # Transaction used to avoid repeated writes.
+ with spack.installed_db.write_transaction():
+ self._mock_install('mpileaks ^mpich')
+ self._mock_install('mpileaks ^mpich2')
+ self._mock_install('mpileaks ^zmpi')
+
+ def tearDown(self):
+ for spec in spack.installed_db.query():
+ spec.package.do_uninstall(spec)
+ super(MockDatabase, self).tearDown()
+ shutil.rmtree(self.install_path)
+ spack.install_path = self.spack_install_path
+ spack.install_layout = self.spack_install_layout
+ spack.installed_db = self.spack_installed_db
diff --git a/lib/spack/spack/test/mock_packages_test.py b/lib/spack/spack/test/mock_packages_test.py
index 00f81114af..6d24a84150 100644
--- a/lib/spack/spack/test/mock_packages_test.py
+++ b/lib/spack/spack/test/mock_packages_test.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,43 +22,108 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+import os
+import shutil
+import tempfile
import unittest
import spack
import spack.config
-from spack.packages import PackageDB
+from llnl.util.filesystem import mkdirp
+from ordereddict_backport import OrderedDict
+from spack.repository import RepoPath
from spack.spec import Spec
+mock_compiler_config = """\
+compilers:
+ all:
+ clang@3.3:
+ cc: /path/to/clang
+ cxx: /path/to/clang++
+ f77: None
+ fc: None
+ gcc@4.5.0:
+ cc: /path/to/gcc
+ cxx: /path/to/g++
+ f77: /path/to/gfortran
+ fc: /path/to/gfortran
+"""
-def set_pkg_dep(pkg, spec):
- """Alters dependence information for a package.
- Use this to mock up constraints.
- """
- spec = Spec(spec)
- spack.db.get(pkg).dependencies[spec.name] = { Spec(pkg) : spec }
-
+mock_packages_config = """\
+packages:
+ externaltool:
+ buildable: False
+ paths:
+ externaltool@1.0%gcc@4.5.0: /path/to/external_tool
+ externalvirtual:
+ buildable: False
+ paths:
+ externalvirtual@2.0%clang@3.3: /path/to/external_virtual_clang
+ externalvirtual@1.0%gcc@4.5.0: /path/to/external_virtual_gcc
+"""
class MockPackagesTest(unittest.TestCase):
def initmock(self):
# Use the mock packages database for these tests. This allows
# us to set up contrived packages that don't interfere with
# real ones.
- self.real_db = spack.db
- spack.db = PackageDB(spack.mock_packages_path)
+ self.db = RepoPath(spack.mock_packages_path)
+ spack.repo.swap(self.db)
spack.config.clear_config_caches()
self.real_scopes = spack.config.config_scopes
- spack.config.config_scopes = [
- ('site', spack.mock_site_config),
- ('user', spack.mock_user_config)]
+
+ # Mock up temporary configuration directories
+ self.temp_config = tempfile.mkdtemp()
+ self.mock_site_config = os.path.join(self.temp_config, 'site')
+ self.mock_user_config = os.path.join(self.temp_config, 'user')
+ mkdirp(self.mock_site_config)
+ mkdirp(self.mock_user_config)
+ for confs in [('compilers.yaml', mock_compiler_config), ('packages.yaml', mock_packages_config)]:
+ conf_yaml = os.path.join(self.mock_site_config, confs[0])
+ with open(conf_yaml, 'w') as f:
+ f.write(confs[1])
+
+ # TODO: Mocking this up is kind of brittle b/c ConfigScope
+ # TODO: constructor modifies config_scopes. Make it cleaner.
+ spack.config.config_scopes = OrderedDict()
+ spack.config.ConfigScope('site', self.mock_site_config)
+ spack.config.ConfigScope('user', self.mock_user_config)
+
+ # Store changes to the package's dependencies so we can
+ # restore later.
+ self.saved_deps = {}
+
+
+ def set_pkg_dep(self, pkg_name, spec):
+ """Alters dependence information for a package.
+
+ Adds a dependency on <spec> to pkg.
+ Use this to mock up constraints.
+ """
+ spec = Spec(spec)
+
+ # Save original dependencies before making any changes.
+ pkg = spack.repo.get(pkg_name)
+ if pkg_name not in self.saved_deps:
+ self.saved_deps[pkg_name] = (pkg, pkg.dependencies.copy())
+
+ # Change dep spec
+ pkg.dependencies[spec.name] = { Spec(pkg_name) : spec }
def cleanmock(self):
"""Restore the real packages path after any test."""
- spack.db = self.real_db
+ spack.repo.swap(self.db)
spack.config.config_scopes = self.real_scopes
+ shutil.rmtree(self.temp_config, ignore_errors=True)
spack.config.clear_config_caches()
+ # Restore dependency changes that happened during the test
+ for pkg_name, (pkg, deps) in self.saved_deps.items():
+ pkg.dependencies.clear()
+ pkg.dependencies.update(deps)
+
def setUp(self):
self.initmock()
@@ -66,5 +131,3 @@ class MockPackagesTest(unittest.TestCase):
def tearDown(self):
self.cleanmock()
-
-
diff --git a/lib/spack/spack/test/mock_repo.py b/lib/spack/spack/test/mock_repo.py
index fd184e64bc..a8bdfb5571 100644
--- a/lib/spack/spack/test/mock_repo.py
+++ b/lib/spack/spack/test/mock_repo.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -26,13 +26,9 @@ import os
import shutil
from llnl.util.filesystem import *
-
-import spack
-from spack.version import ver
from spack.stage import Stage
from spack.util.executable import which
-
#
# VCS Systems used by mock repo code.
#
@@ -55,6 +51,12 @@ class MockRepo(object):
mkdirp(self.path)
+ def destroy(self):
+ """Destroy resources associated with this mock repo."""
+ if self.stage:
+ self.stage.destroy()
+
+
class MockArchive(MockRepo):
"""Creates a very simple archive directory with a configure script and a
makefile that installs to a prefix. Tars it up into an archive."""
@@ -141,7 +143,7 @@ class MockGitRepo(MockVCSRepo):
self.url = self.path
def rev_hash(self, rev):
- return git('rev-parse', rev, return_output=True).strip()
+ return git('rev-parse', rev, output=str).strip()
class MockSvnRepo(MockVCSRepo):
@@ -193,4 +195,4 @@ class MockHgRepo(MockVCSRepo):
def get_rev(self):
"""Get current mercurial revision."""
- return hg('id', '-i', return_output=True).strip()
+ return hg('id', '-i', output=str).strip()
diff --git a/lib/spack/spack/test/multimethod.py b/lib/spack/spack/test/multimethod.py
index 4b09e7d2b4..2125f673df 100644
--- a/lib/spack/spack/test/multimethod.py
+++ b/lib/spack/spack/test/multimethod.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -33,97 +33,98 @@ from spack.version import *
from spack.spec import Spec
from spack.multimethod import when
from spack.test.mock_packages_test import *
+from spack.version import *
class MultiMethodTest(MockPackagesTest):
def test_no_version_match(self):
- pkg = spack.db.get('multimethod@2.0')
+ pkg = spack.repo.get('multimethod@2.0')
self.assertRaises(NoSuchMethodError, pkg.no_version_2)
def test_one_version_match(self):
- pkg = spack.db.get('multimethod@1.0')
+ pkg = spack.repo.get('multimethod@1.0')
self.assertEqual(pkg.no_version_2(), 1)
- pkg = spack.db.get('multimethod@3.0')
+ pkg = spack.repo.get('multimethod@3.0')
self.assertEqual(pkg.no_version_2(), 3)
- pkg = spack.db.get('multimethod@4.0')
+ pkg = spack.repo.get('multimethod@4.0')
self.assertEqual(pkg.no_version_2(), 4)
def test_version_overlap(self):
- pkg = spack.db.get('multimethod@2.0')
+ pkg = spack.repo.get('multimethod@2.0')
self.assertEqual(pkg.version_overlap(), 1)
- pkg = spack.db.get('multimethod@5.0')
+ pkg = spack.repo.get('multimethod@5.0')
self.assertEqual(pkg.version_overlap(), 2)
def test_mpi_version(self):
- pkg = spack.db.get('multimethod^mpich@3.0.4')
+ pkg = spack.repo.get('multimethod^mpich@3.0.4')
self.assertEqual(pkg.mpi_version(), 3)
- pkg = spack.db.get('multimethod^mpich2@1.2')
+ pkg = spack.repo.get('multimethod^mpich2@1.2')
self.assertEqual(pkg.mpi_version(), 2)
- pkg = spack.db.get('multimethod^mpich@1.0')
+ pkg = spack.repo.get('multimethod^mpich@1.0')
self.assertEqual(pkg.mpi_version(), 1)
def test_undefined_mpi_version(self):
- pkg = spack.db.get('multimethod^mpich@0.4')
+ pkg = spack.repo.get('multimethod^mpich@0.4')
self.assertEqual(pkg.mpi_version(), 1)
- pkg = spack.db.get('multimethod^mpich@1.4')
+ pkg = spack.repo.get('multimethod^mpich@1.4')
self.assertEqual(pkg.mpi_version(), 1)
def test_default_works(self):
- pkg = spack.db.get('multimethod%gcc')
+ pkg = spack.repo.get('multimethod%gcc')
self.assertEqual(pkg.has_a_default(), 'gcc')
- pkg = spack.db.get('multimethod%intel')
+ pkg = spack.repo.get('multimethod%intel')
self.assertEqual(pkg.has_a_default(), 'intel')
- pkg = spack.db.get('multimethod%pgi')
+ pkg = spack.repo.get('multimethod%pgi')
self.assertEqual(pkg.has_a_default(), 'default')
def test_architecture_match(self):
- pkg = spack.db.get('multimethod arch=x86_64')
+ pkg = spack.repo.get('multimethod arch=x86_64')
self.assertEqual(pkg.different_by_architecture(), 'x86_64')
- pkg = spack.db.get('multimethod arch=ppc64')
+ pkg = spack.repo.get('multimethod arch=ppc64')
self.assertEqual(pkg.different_by_architecture(), 'ppc64')
- pkg = spack.db.get('multimethod arch=ppc32')
+ pkg = spack.repo.get('multimethod arch=ppc32')
self.assertEqual(pkg.different_by_architecture(), 'ppc32')
- pkg = spack.db.get('multimethod arch=arm64')
+ pkg = spack.repo.get('multimethod arch=arm64')
self.assertEqual(pkg.different_by_architecture(), 'arm64')
- pkg = spack.db.get('multimethod arch=macos')
+ pkg = spack.repo.get('multimethod arch=macos')
self.assertRaises(NoSuchMethodError, pkg.different_by_architecture)
def test_dependency_match(self):
- pkg = spack.db.get('multimethod^zmpi')
+ pkg = spack.repo.get('multimethod^zmpi')
self.assertEqual(pkg.different_by_dep(), 'zmpi')
- pkg = spack.db.get('multimethod^mpich')
+ pkg = spack.repo.get('multimethod^mpich')
self.assertEqual(pkg.different_by_dep(), 'mpich')
# If we try to switch on some entirely different dep, it's ambiguous,
# but should take the first option
- pkg = spack.db.get('multimethod^foobar')
+ pkg = spack.repo.get('multimethod^foobar')
self.assertEqual(pkg.different_by_dep(), 'mpich')
def test_virtual_dep_match(self):
- pkg = spack.db.get('multimethod^mpich2')
+ pkg = spack.repo.get('multimethod^mpich2')
self.assertEqual(pkg.different_by_virtual_dep(), 2)
- pkg = spack.db.get('multimethod^mpich@1.0')
+ pkg = spack.repo.get('multimethod^mpich@1.0')
self.assertEqual(pkg.different_by_virtual_dep(), 1)
diff --git a/lib/spack/spack/test/namespace_trie.py b/lib/spack/spack/test/namespace_trie.py
new file mode 100644
index 0000000000..2023ba6d96
--- /dev/null
+++ b/lib/spack/spack/test/namespace_trie.py
@@ -0,0 +1,115 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://software.llnl.gov/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+import unittest
+
+from spack.util.naming import NamespaceTrie
+
+
+class NamespaceTrieTest(unittest.TestCase):
+
+ def setUp(self):
+ self.trie = NamespaceTrie()
+
+
+ def test_add_single(self):
+ self.trie['foo'] = 'bar'
+
+ self.assertTrue(self.trie.is_prefix('foo'))
+ self.assertTrue(self.trie.has_value('foo'))
+ self.assertEqual(self.trie['foo'], 'bar')
+
+
+ def test_add_multiple(self):
+ self.trie['foo.bar'] = 'baz'
+
+ self.assertFalse(self.trie.has_value('foo'))
+ self.assertTrue(self.trie.is_prefix('foo'))
+
+ self.assertTrue(self.trie.is_prefix('foo.bar'))
+ self.assertTrue(self.trie.has_value('foo.bar'))
+ self.assertEqual(self.trie['foo.bar'], 'baz')
+
+ self.assertFalse(self.trie.is_prefix('foo.bar.baz'))
+ self.assertFalse(self.trie.has_value('foo.bar.baz'))
+
+
+ def test_add_three(self):
+ # add a three-level namespace
+ self.trie['foo.bar.baz'] = 'quux'
+
+ self.assertTrue(self.trie.is_prefix('foo'))
+ self.assertFalse(self.trie.has_value('foo'))
+
+ self.assertTrue(self.trie.is_prefix('foo.bar'))
+ self.assertFalse(self.trie.has_value('foo.bar'))
+
+ self.assertTrue(self.trie.is_prefix('foo.bar.baz'))
+ self.assertTrue(self.trie.has_value('foo.bar.baz'))
+ self.assertEqual(self.trie['foo.bar.baz'], 'quux')
+
+ self.assertFalse(self.trie.is_prefix('foo.bar.baz.quux'))
+ self.assertFalse(self.trie.has_value('foo.bar.baz.quux'))
+
+ # Try to add a second element in a prefix namespace
+ self.trie['foo.bar'] = 'blah'
+
+ self.assertTrue(self.trie.is_prefix('foo'))
+ self.assertFalse(self.trie.has_value('foo'))
+
+ self.assertTrue(self.trie.is_prefix('foo.bar'))
+ self.assertTrue(self.trie.has_value('foo.bar'))
+ self.assertEqual(self.trie['foo.bar'], 'blah')
+
+ self.assertTrue(self.trie.is_prefix('foo.bar.baz'))
+ self.assertTrue(self.trie.has_value('foo.bar.baz'))
+ self.assertEqual(self.trie['foo.bar.baz'], 'quux')
+
+ self.assertFalse(self.trie.is_prefix('foo.bar.baz.quux'))
+ self.assertFalse(self.trie.has_value('foo.bar.baz.quux'))
+
+
+ def test_add_none_single(self):
+ self.trie['foo'] = None
+ self.assertTrue(self.trie.is_prefix('foo'))
+ self.assertTrue(self.trie.has_value('foo'))
+ self.assertEqual(self.trie['foo'], None)
+
+ self.assertFalse(self.trie.is_prefix('foo.bar'))
+ self.assertFalse(self.trie.has_value('foo.bar'))
+
+
+
+ def test_add_none_multiple(self):
+ self.trie['foo.bar'] = None
+
+ self.assertTrue(self.trie.is_prefix('foo'))
+ self.assertFalse(self.trie.has_value('foo'))
+
+ self.assertTrue(self.trie.is_prefix('foo.bar'))
+ self.assertTrue(self.trie.has_value('foo.bar'))
+ self.assertEqual(self.trie['foo.bar'], None)
+
+ self.assertFalse(self.trie.is_prefix('foo.bar.baz'))
+ self.assertFalse(self.trie.has_value('foo.bar.baz'))
diff --git a/lib/spack/spack/test/optional_deps.py b/lib/spack/spack/test/optional_deps.py
index fbee0cfa8f..55f35ea4c9 100644
--- a/lib/spack/spack/test/optional_deps.py
+++ b/lib/spack/spack/test/optional_deps.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,10 +22,8 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-import unittest
-import spack
-from spack.spec import Spec, CompilerSpec
+from spack.spec import Spec
from spack.test.mock_packages_test import *
class ConcretizeTest(MockPackagesTest):
diff --git a/lib/spack/spack/test/package_sanity.py b/lib/spack/spack/test/package_sanity.py
index 6222e7b5f8..ee09040d0d 100644
--- a/lib/spack/spack/test/package_sanity.py
+++ b/lib/spack/spack/test/package_sanity.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -28,16 +28,15 @@ This test does sanity checks on Spack's builtin package database.
import unittest
import spack
-import spack.url as url
-from spack.packages import PackageDB
+from spack.repository import RepoPath
class PackageSanityTest(unittest.TestCase):
def check_db(self):
"""Get all packages in a DB to make sure they work."""
- for name in spack.db.all_package_names():
- spack.db.get(name)
+ for name in spack.repo.all_package_names():
+ spack.repo.get(name)
def test_get_all_packages(self):
@@ -47,15 +46,15 @@ class PackageSanityTest(unittest.TestCase):
def test_get_all_mock_packages(self):
"""Get the mock packages once each too."""
- tmp = spack.db
- spack.db = PackageDB(spack.mock_packages_path)
+ db = RepoPath(spack.mock_packages_path)
+ spack.repo.swap(db)
self.check_db()
- spack.db = tmp
+ spack.repo.swap(db)
def test_url_versions(self):
"""Check URLs for regular packages, if they are explicitly defined."""
- for pkg in spack.db.all_packages():
+ for pkg in spack.repo.all_packages():
for v, vattrs in pkg.versions.items():
if 'url' in vattrs:
# If there is a url for the version check it.
diff --git a/lib/spack/spack/test/packages.py b/lib/spack/spack/test/packages.py
index a8183cf6a6..f0b5e05f3b 100644
--- a/lib/spack/spack/test/packages.py
+++ b/lib/spack/spack/test/packages.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,40 +22,43 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-import unittest
-
-from llnl.util.filesystem import join_path
import spack
-import spack.packages as packages
-from spack.util.naming import mod_to_class
+from llnl.util.filesystem import join_path
+from spack.repository import Repo
from spack.test.mock_packages_test import *
+from spack.util.naming import mod_to_class
class PackagesTest(MockPackagesTest):
def test_load_package(self):
- pkg = spack.db.get('mpich')
+ pkg = spack.repo.get('mpich')
def test_package_name(self):
- pkg = spack.db.get('mpich')
+ pkg = spack.repo.get('mpich')
self.assertEqual(pkg.name, 'mpich')
def test_package_filename(self):
- filename = spack.db.filename_for_package_name('mpich')
- self.assertEqual(filename, join_path(spack.mock_packages_path, 'mpich', 'package.py'))
+ repo = Repo(spack.mock_packages_path)
+ filename = repo.filename_for_package_name('mpich')
+ self.assertEqual(filename,
+ join_path(spack.mock_packages_path, 'packages', 'mpich', 'package.py'))
def test_package_name(self):
- pkg = spack.db.get('mpich')
+ pkg = spack.repo.get('mpich')
self.assertEqual(pkg.name, 'mpich')
def test_nonexisting_package_filename(self):
- filename = spack.db.filename_for_package_name('some-nonexisting-package')
- self.assertEqual(filename, join_path(spack.mock_packages_path, 'some-nonexisting-package', 'package.py'))
+ repo = Repo(spack.mock_packages_path)
+ filename = repo.filename_for_package_name('some-nonexisting-package')
+ self.assertEqual(
+ filename,
+ join_path(spack.mock_packages_path, 'packages', 'some-nonexisting-package', 'package.py'))
def test_package_class_names(self):
@@ -64,3 +67,38 @@ class PackagesTest(MockPackagesTest):
self.assertEqual('PmgrCollective', mod_to_class('pmgr-collective'))
self.assertEqual('Pmgrcollective', mod_to_class('PmgrCollective'))
self.assertEqual('_3db', mod_to_class('3db'))
+
+
+ #
+ # Below tests target direct imports of spack packages from the
+ # spack.pkg namespace
+ #
+
+ def test_import_package(self):
+ import spack.pkg.builtin.mock.mpich
+
+
+ def test_import_package_as(self):
+ import spack.pkg.builtin.mock.mpich as mp
+
+
+ def test_import_class_from_package(self):
+ from spack.pkg.builtin.mock.mpich import Mpich
+
+
+ def test_import_module_from_package(self):
+ from spack.pkg.builtin.mock import mpich
+
+
+ def test_import_namespace_container_modules(self):
+ import spack.pkg
+ import spack.pkg as p
+ from spack import pkg
+
+ import spack.pkg.builtin
+ import spack.pkg.builtin as b
+ from spack.pkg import builtin
+
+ import spack.pkg.builtin.mock
+ import spack.pkg.builtin.mock as m
+ from spack.pkg.builtin import mock
diff --git a/lib/spack/spack/test/pattern.py b/lib/spack/spack/test/pattern.py
new file mode 100644
index 0000000000..6c783c6a5f
--- /dev/null
+++ b/lib/spack/spack/test/pattern.py
@@ -0,0 +1,104 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+
+import unittest
+
+import spack.util.pattern as pattern
+
+
+class CompositeTest(unittest.TestCase):
+
+ def setUp(self):
+ class Base:
+ counter = 0
+
+ def add(self):
+ raise NotImplemented('add not implemented')
+
+ def subtract(self):
+ raise NotImplemented('subtract not implemented')
+
+ class One(Base):
+ def add(self):
+ Base.counter += 1
+
+ def subtract(self):
+ Base.counter -= 1
+
+ class Two(Base):
+ def add(self):
+ Base.counter += 2
+
+ def subtract(self):
+ Base.counter -= 2
+
+ self.Base = Base
+ self.One = One
+ self.Two = Two
+
+ def test_composite_from_method_list(self):
+
+ @pattern.composite(method_list=['add', 'subtract'])
+ class CompositeFromMethodList:
+ pass
+
+ composite = CompositeFromMethodList()
+ composite.append(self.One())
+ composite.append(self.Two())
+ composite.add()
+ self.assertEqual(self.Base.counter, 3)
+ composite.pop()
+ composite.subtract()
+ self.assertEqual(self.Base.counter, 2)
+
+ def test_composite_from_interface(self):
+
+ @pattern.composite(interface=self.Base)
+ class CompositeFromInterface:
+ pass
+
+ composite = CompositeFromInterface()
+ composite.append(self.One())
+ composite.append(self.Two())
+ composite.add()
+ self.assertEqual(self.Base.counter, 3)
+ composite.pop()
+ composite.subtract()
+ self.assertEqual(self.Base.counter, 2)
+
+ def test_error_conditions(self):
+
+ def wrong_container():
+ @pattern.composite(interface=self.Base, container=2)
+ class CompositeFromInterface:
+ pass
+
+ def no_methods():
+ @pattern.composite()
+ class CompositeFromInterface:
+ pass
+
+ self.assertRaises(TypeError, wrong_container)
+ self.assertRaises(TypeError, no_methods)
diff --git a/lib/spack/spack/test/python_version.py b/lib/spack/spack/test/python_version.py
index 5779d31ed2..4294975304 100644
--- a/lib/spack/spack/test/python_version.py
+++ b/lib/spack/spack/test/python_version.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -28,13 +28,12 @@ This test ensures that all Spack files are Python version 2.6 or less.
Spack was originally 2.7, but enough systems in 2014 are still using
2.6 on their frontend nodes that we need 2.6 to get adopted.
"""
-import unittest
import os
import re
+import unittest
import llnl.util.tty as tty
-
-from external import pyqver2
+import pyqver2
import spack
spack_max_version = (2,6)
@@ -54,8 +53,8 @@ class PythonVersionTest(unittest.TestCase):
def package_py_files(self):
- for name in spack.db.all_package_names():
- yield spack.db.filename_for_package_name(name)
+ for name in spack.repo.all_package_names():
+ yield spack.repo.filename_for_package_name(name)
def check_python_versions(self, *files):
@@ -63,10 +62,6 @@ class PythonVersionTest(unittest.TestCase):
all_issues = {}
for fn in files:
- if fn != '/Users/gamblin2/src/spack/var/spack/packages/vim/package.py':
- continue
- print fn
-
with open(fn) as pyfile:
versions = pyqver2.get_versions(pyfile.read())
for ver, reasons in versions.items():
diff --git a/lib/spack/spack/test/sbang.py b/lib/spack/spack/test/sbang.py
new file mode 100644
index 0000000000..825bc4be98
--- /dev/null
+++ b/lib/spack/spack/test/sbang.py
@@ -0,0 +1,93 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""\
+Test that Spack's shebang filtering works correctly.
+"""
+import os
+import unittest
+import tempfile
+import shutil
+
+from llnl.util.filesystem import *
+from spack.hooks.sbang import filter_shebangs_in_directory
+import spack
+
+short_line = "#!/this/is/short/bin/bash\n"
+long_line = "#!/this/" + ('x' * 200) + "/is/long\n"
+sbang_line = '#!/bin/bash %s/bin/sbang\n' % spack.spack_root
+last_line = "last!\n"
+
+class SbangTest(unittest.TestCase):
+ def setUp(self):
+ self.tempdir = tempfile.mkdtemp()
+
+ # make sure we can ignore non-files
+ directory = os.path.join(self.tempdir, 'dir')
+ mkdirp(directory)
+
+ # Script with short shebang
+ self.short_shebang = os.path.join(self.tempdir, 'short')
+ with open(self.short_shebang, 'w') as f:
+ f.write(short_line)
+ f.write(last_line)
+
+ # Script with long shebang
+ self.long_shebang = os.path.join(self.tempdir, 'long')
+ with open(self.long_shebang, 'w') as f:
+ f.write(long_line)
+ f.write(last_line)
+
+ # Script already using sbang.
+ self.has_shebang = os.path.join(self.tempdir, 'shebang')
+ with open(self.has_shebang, 'w') as f:
+ f.write(sbang_line)
+ f.write(long_line)
+ f.write(last_line)
+
+
+ def tearDown(self):
+ shutil.rmtree(self.tempdir, ignore_errors=True)
+
+
+
+ def test_shebang_handling(self):
+ filter_shebangs_in_directory(self.tempdir)
+
+ # Make sure this is untouched
+ with open(self.short_shebang, 'r') as f:
+ self.assertEqual(f.readline(), short_line)
+ self.assertEqual(f.readline(), last_line)
+
+ # Make sure this got patched.
+ with open(self.long_shebang, 'r') as f:
+ self.assertEqual(f.readline(), sbang_line)
+ self.assertEqual(f.readline(), long_line)
+ self.assertEqual(f.readline(), last_line)
+
+ # Make sure this is untouched
+ with open(self.has_shebang, 'r') as f:
+ self.assertEqual(f.readline(), sbang_line)
+ self.assertEqual(f.readline(), long_line)
+ self.assertEqual(f.readline(), last_line)
diff --git a/lib/spack/spack/test/spec_dag.py b/lib/spack/spack/test/spec_dag.py
index 9576c362b8..e6b701870c 100644
--- a/lib/spack/spack/test/spec_dag.py
+++ b/lib/spack/spack/test/spec_dag.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -40,8 +40,8 @@ from spack.test.mock_packages_test import *
class SpecDagTest(MockPackagesTest):
def test_conflicting_package_constraints(self):
- set_pkg_dep('mpileaks', 'mpich@1.0')
- set_pkg_dep('callpath', 'mpich@2.0')
+ self.set_pkg_dep('mpileaks', 'mpich@1.0')
+ self.set_pkg_dep('callpath', 'mpich@2.0')
spec = Spec('mpileaks ^mpich ^callpath ^dyninst ^libelf ^libdwarf')
@@ -223,25 +223,25 @@ class SpecDagTest(MockPackagesTest):
def test_unsatisfiable_version(self):
- set_pkg_dep('mpileaks', 'mpich@1.0')
+ self.set_pkg_dep('mpileaks', 'mpich@1.0')
spec = Spec('mpileaks ^mpich@2.0 ^callpath ^dyninst ^libelf ^libdwarf')
self.assertRaises(spack.spec.UnsatisfiableVersionSpecError, spec.normalize)
def test_unsatisfiable_compiler(self):
- set_pkg_dep('mpileaks', 'mpich%gcc')
+ self.set_pkg_dep('mpileaks', 'mpich%gcc')
spec = Spec('mpileaks ^mpich%intel ^callpath ^dyninst ^libelf ^libdwarf')
self.assertRaises(spack.spec.UnsatisfiableCompilerSpecError, spec.normalize)
def test_unsatisfiable_compiler_version(self):
- set_pkg_dep('mpileaks', 'mpich%gcc@4.6')
+ self.set_pkg_dep('mpileaks', 'mpich%gcc@4.6')
spec = Spec('mpileaks ^mpich%gcc@4.5 ^callpath ^dyninst ^libelf ^libdwarf')
self.assertRaises(spack.spec.UnsatisfiableCompilerSpecError, spec.normalize)
def test_unsatisfiable_architecture(self):
- set_pkg_dep('mpileaks', 'mpich arch=bgqos_0')
+ self.set_pkg_dep('mpileaks', 'mpich arch=bgqos_0')
spec = Spec('mpileaks ^mpich arch=sles_10_ppc64 ^callpath ^dyninst ^libelf ^libdwarf')
self.assertRaises(spack.spec.UnsatisfiableArchitectureSpecError, spec.normalize)
@@ -336,20 +336,22 @@ class SpecDagTest(MockPackagesTest):
self.assertEqual(lhs, rhs)
self.assertEqual(str(lhs), str(rhs))
- # test that equal and equal_dag are doing the right thing
+ # Test that equal and equal_dag are doing the right thing
self.assertEqual(spec, expected_flat)
self.assertTrue(spec.eq_dag(expected_flat))
- self.assertEqual(spec, expected_normalized)
+ # Normalized has different DAG structure, so NOT equal.
+ self.assertNotEqual(spec, expected_normalized)
self.assertFalse(spec.eq_dag(expected_normalized))
- self.assertEqual(spec, non_unique_nodes)
+ # Again, different DAG structure so not equal.
+ self.assertNotEqual(spec, non_unique_nodes)
self.assertFalse(spec.eq_dag(non_unique_nodes))
spec.normalize()
# After normalizing, spec_dag_equal should match the normalized spec.
- self.assertEqual(spec, expected_flat)
+ self.assertNotEqual(spec, expected_flat)
self.assertFalse(spec.eq_dag(expected_flat))
self.assertEqual(spec, expected_normalized)
@@ -437,5 +439,3 @@ class SpecDagTest(MockPackagesTest):
orig_ids = set(id(s) for s in orig.traverse())
copy_ids = set(id(s) for s in copy.traverse())
self.assertFalse(orig_ids.intersection(copy_ids))
-
-# LocalWords: libdwarf
diff --git a/lib/spack/spack/test/spec_semantics.py b/lib/spack/spack/test/spec_semantics.py
index e39bba252b..7875d45f2a 100644
--- a/lib/spack/spack/test/spec_semantics.py
+++ b/lib/spack/spack/test/spec_semantics.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -35,7 +35,10 @@ class SpecSematicsTest(MockPackagesTest):
# ================================================================================
def check_satisfies(self, spec, anon_spec, concrete=False):
left = Spec(spec, concrete=concrete)
- right = parse_anonymous_spec(anon_spec, left.name)
+ try:
+ right = Spec(anon_spec) # if it's not anonymous, allow it.
+ except:
+ right = parse_anonymous_spec(anon_spec, left.name)
# Satisfies is one-directional.
self.assertTrue(left.satisfies(right))
@@ -48,7 +51,10 @@ class SpecSematicsTest(MockPackagesTest):
def check_unsatisfiable(self, spec, anon_spec, concrete=False):
left = Spec(spec, concrete=concrete)
- right = parse_anonymous_spec(anon_spec, left.name)
+ try:
+ right = Spec(anon_spec) # if it's not anonymous, allow it.
+ except:
+ right = parse_anonymous_spec(anon_spec, left.name)
self.assertFalse(left.satisfies(right))
self.assertFalse(left.satisfies(anon_spec))
@@ -88,6 +94,28 @@ class SpecSematicsTest(MockPackagesTest):
self.check_satisfies('libdwarf^libelf@0.8.13', '^libelf@0:1')
+ def test_satisfies_namespace(self):
+ self.check_satisfies('builtin.mpich', 'mpich')
+ self.check_satisfies('builtin.mock.mpich', 'mpich')
+
+ # TODO: only works for deps now, but shouldn't we allow this for root spec?
+ # self.check_satisfies('builtin.mock.mpich', 'mpi')
+
+ self.check_satisfies('builtin.mock.mpich', 'builtin.mock.mpich')
+
+ self.check_unsatisfiable('builtin.mock.mpich', 'builtin.mpich')
+
+
+ def test_satisfies_namespaced_dep(self):
+ """Ensure spec from same or unspecified namespace satisfies namespace constraint."""
+ self.check_satisfies('mpileaks ^builtin.mock.mpich', '^mpich')
+
+ self.check_satisfies('mpileaks ^builtin.mock.mpich', '^mpi')
+ self.check_satisfies('mpileaks ^builtin.mock.mpich', '^builtin.mock.mpich')
+
+ self.check_unsatisfiable('mpileaks ^builtin.mock.mpich', '^builtin.mpich')
+
+
def test_satisfies_compiler(self):
self.check_satisfies('foo%gcc', '%gcc')
self.check_satisfies('foo%intel', '%intel')
@@ -190,11 +218,23 @@ class SpecSematicsTest(MockPackagesTest):
def test_satisfies_virtual(self):
+ # Don't use check_satisfies: it checks constrain() too, and
+ # you can't constrain a non-virtual by a virtual.
self.assertTrue(Spec('mpich').satisfies(Spec('mpi')))
self.assertTrue(Spec('mpich2').satisfies(Spec('mpi')))
self.assertTrue(Spec('zmpi').satisfies(Spec('mpi')))
+ def test_satisfies_virtual_dep_with_virtual_constraint(self):
+ """Ensure we can satisfy virtual constraints when there are multiple
+ vdep providers in the specs."""
+ self.assertTrue(Spec('netlib-lapack ^openblas').satisfies('netlib-lapack ^openblas'))
+ self.assertFalse(Spec('netlib-lapack ^netlib-blas').satisfies('netlib-lapack ^openblas'))
+
+ self.assertFalse(Spec('netlib-lapack ^openblas').satisfies('netlib-lapack ^netlib-blas'))
+ self.assertTrue(Spec('netlib-lapack ^netlib-blas').satisfies('netlib-lapack ^netlib-blas'))
+
+
# ================================================================================
# Indexing specs
# ================================================================================
diff --git a/lib/spack/spack/test/spec_syntax.py b/lib/spack/spack/test/spec_syntax.py
index 404f38906e..6e08e30e13 100644
--- a/lib/spack/spack/test/spec_syntax.py
+++ b/lib/spack/spack/test/spec_syntax.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -23,9 +23,10 @@
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import unittest
+
import spack.spec
-from spack.spec import *
from spack.parse import Token
+from spack.spec import *
# Sample output for a complex lexing.
complex_lex = [Token(ID, 'mvapich_foo'),
diff --git a/lib/spack/spack/test/spec_yaml.py b/lib/spack/spack/test/spec_yaml.py
index 869befc02a..11987ea1b3 100644
--- a/lib/spack/spack/test/spec_yaml.py
+++ b/lib/spack/spack/test/spec_yaml.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/test/stage.py b/lib/spack/spack/test/stage.py
index 8cff8f7960..ea425127c4 100644
--- a/lib/spack/spack/test/stage.py
+++ b/lib/spack/spack/test/stage.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -25,15 +25,13 @@
"""\
Test that the Stage class works correctly.
"""
-import unittest
-import shutil
import os
-import getpass
+import shutil
+import unittest
from contextlib import *
-from llnl.util.filesystem import *
-
import spack
+from llnl.util.filesystem import *
from spack.stage import Stage
from spack.util.executable import which
@@ -192,116 +190,125 @@ class StageTest(unittest.TestCase):
def test_setup_and_destroy_name_with_tmp(self):
with use_tmp(True):
- stage = Stage(archive_url, name=stage_name)
- self.check_setup(stage, stage_name)
-
- stage.destroy()
+ with Stage(archive_url, name=stage_name) as stage:
+ self.check_setup(stage, stage_name)
self.check_destroy(stage, stage_name)
def test_setup_and_destroy_name_without_tmp(self):
with use_tmp(False):
- stage = Stage(archive_url, name=stage_name)
- self.check_setup(stage, stage_name)
-
- stage.destroy()
+ with Stage(archive_url, name=stage_name) as stage:
+ self.check_setup(stage, stage_name)
self.check_destroy(stage, stage_name)
def test_setup_and_destroy_no_name_with_tmp(self):
with use_tmp(True):
- stage = Stage(archive_url)
- self.check_setup(stage, None)
-
- stage.destroy()
+ with Stage(archive_url) as stage:
+ self.check_setup(stage, None)
self.check_destroy(stage, None)
def test_setup_and_destroy_no_name_without_tmp(self):
with use_tmp(False):
- stage = Stage(archive_url)
- self.check_setup(stage, None)
-
- stage.destroy()
+ with Stage(archive_url) as stage:
+ self.check_setup(stage, None)
self.check_destroy(stage, None)
def test_chdir(self):
- stage = Stage(archive_url, name=stage_name)
-
- stage.chdir()
- self.check_setup(stage, stage_name)
- self.check_chdir(stage, stage_name)
-
- stage.destroy()
+ with Stage(archive_url, name=stage_name) as stage:
+ stage.chdir()
+ self.check_setup(stage, stage_name)
+ self.check_chdir(stage, stage_name)
self.check_destroy(stage, stage_name)
def test_fetch(self):
- stage = Stage(archive_url, name=stage_name)
-
- stage.fetch()
- self.check_setup(stage, stage_name)
- self.check_chdir(stage, stage_name)
- self.check_fetch(stage, stage_name)
-
- stage.destroy()
+ with Stage(archive_url, name=stage_name) as stage:
+ stage.fetch()
+ self.check_setup(stage, stage_name)
+ self.check_chdir(stage, stage_name)
+ self.check_fetch(stage, stage_name)
self.check_destroy(stage, stage_name)
def test_expand_archive(self):
- stage = Stage(archive_url, name=stage_name)
-
- stage.fetch()
- self.check_setup(stage, stage_name)
- self.check_fetch(stage, stage_name)
-
- stage.expand_archive()
- self.check_expand_archive(stage, stage_name)
-
- stage.destroy()
+ with Stage(archive_url, name=stage_name) as stage:
+ stage.fetch()
+ self.check_setup(stage, stage_name)
+ self.check_fetch(stage, stage_name)
+ stage.expand_archive()
+ self.check_expand_archive(stage, stage_name)
self.check_destroy(stage, stage_name)
def test_expand_archive(self):
- stage = Stage(archive_url, name=stage_name)
+ with Stage(archive_url, name=stage_name) as stage:
+ stage.fetch()
+ self.check_setup(stage, stage_name)
+ self.check_fetch(stage, stage_name)
+ stage.expand_archive()
+ stage.chdir_to_source()
+ self.check_expand_archive(stage, stage_name)
+ self.check_chdir_to_source(stage, stage_name)
+ self.check_destroy(stage, stage_name)
- stage.fetch()
- self.check_setup(stage, stage_name)
- self.check_fetch(stage, stage_name)
- stage.expand_archive()
- stage.chdir_to_source()
- self.check_expand_archive(stage, stage_name)
- self.check_chdir_to_source(stage, stage_name)
+ def test_restage(self):
+ with Stage(archive_url, name=stage_name) as stage:
+ stage.fetch()
+ stage.expand_archive()
+ stage.chdir_to_source()
+ self.check_expand_archive(stage, stage_name)
+ self.check_chdir_to_source(stage, stage_name)
+
+ # Try to make a file in the old archive dir
+ with open('foobar', 'w') as file:
+ file.write("this file is to be destroyed.")
+
+ self.assertTrue('foobar' in os.listdir(stage.source_path))
+
+ # Make sure the file is not there after restage.
+ stage.restage()
+ self.check_chdir(stage, stage_name)
+ self.check_fetch(stage, stage_name)
+ stage.chdir_to_source()
+ self.check_chdir_to_source(stage, stage_name)
+ self.assertFalse('foobar' in os.listdir(stage.source_path))
+ self.check_destroy(stage, stage_name)
- stage.destroy()
+
+ def test_no_keep_without_exceptions(self):
+ with Stage(archive_url, name=stage_name, keep=False) as stage:
+ pass
self.check_destroy(stage, stage_name)
- def test_restage(self):
- stage = Stage(archive_url, name=stage_name)
+ def test_keep_without_exceptions(self):
+ with Stage(archive_url, name=stage_name, keep=True) as stage:
+ pass
+ path = self.get_stage_path(stage, stage_name)
+ self.assertTrue(os.path.isdir(path))
- stage.fetch()
- stage.expand_archive()
- stage.chdir_to_source()
- self.check_expand_archive(stage, stage_name)
- self.check_chdir_to_source(stage, stage_name)
- # Try to make a file in the old archive dir
- with open('foobar', 'w') as file:
- file.write("this file is to be destroyed.")
+ def test_no_keep_with_exceptions(self):
+ try:
+ with Stage(archive_url, name=stage_name, keep=False) as stage:
+ raise Exception()
- self.assertTrue('foobar' in os.listdir(stage.source_path))
+ path = self.get_stage_path(stage, stage_name)
+ self.assertTrue(os.path.isdir(path))
+ except:
+ pass # ignore here.
- # Make sure the file is not there after restage.
- stage.restage()
- self.check_chdir(stage, stage_name)
- self.check_fetch(stage, stage_name)
- stage.chdir_to_source()
- self.check_chdir_to_source(stage, stage_name)
- self.assertFalse('foobar' in os.listdir(stage.source_path))
+ def test_keep_exceptions(self):
+ try:
+ with Stage(archive_url, name=stage_name, keep=True) as stage:
+ raise Exception()
- stage.destroy()
- self.check_destroy(stage, stage_name)
+ path = self.get_stage_path(stage, stage_name)
+ self.assertTrue(os.path.isdir(path))
+ except:
+ pass # ignore here.
diff --git a/lib/spack/spack/test/svn_fetch.py b/lib/spack/spack/test/svn_fetch.py
index 184fe8faa1..1ee4ee700e 100644
--- a/lib/spack/spack/test/svn_fetch.py
+++ b/lib/spack/spack/test/svn_fetch.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -24,18 +24,12 @@
##############################################################################
import os
import re
-import unittest
-import shutil
-import tempfile
-
-from llnl.util.filesystem import *
-
import spack
+
+from spack.test.mock_repo import svn, MockSvnRepo
from spack.version import ver
-from spack.stage import Stage
-from spack.util.executable import which
from spack.test.mock_packages_test import *
-from spack.test.mock_repo import svn, MockSvnRepo
+from llnl.util.filesystem import *
class SvnFetchTest(MockPackagesTest):
@@ -49,23 +43,17 @@ class SvnFetchTest(MockPackagesTest):
spec = Spec('svn-test')
spec.concretize()
- self.pkg = spack.db.get(spec, new=True)
-
+ self.pkg = spack.repo.get(spec, new=True)
def tearDown(self):
"""Destroy the stage space used by this test."""
super(SvnFetchTest, self).tearDown()
-
- if self.repo.stage is not None:
- self.repo.stage.destroy()
-
- self.pkg.do_clean()
-
+ self.repo.destroy()
def assert_rev(self, rev):
"""Check that the current revision is equal to the supplied rev."""
def get_rev():
- output = svn('info', return_output=True)
+ output = svn('info', output=str)
self.assertTrue("Revision" in output)
for line in output.split('\n'):
match = re.match(r'Revision: (\d+)', line)
@@ -73,7 +61,6 @@ class SvnFetchTest(MockPackagesTest):
return match.group(1)
self.assertEqual(get_rev(), rev)
-
def try_fetch(self, rev, test_file, args):
"""Tries to:
1. Fetch the repo using a fetch strategy constructed with
@@ -85,26 +72,27 @@ class SvnFetchTest(MockPackagesTest):
"""
self.pkg.versions[ver('svn')] = args
- self.pkg.do_stage()
- self.assert_rev(rev)
+ with self.pkg.stage:
+ self.pkg.do_stage()
+ self.assert_rev(rev)
- file_path = join_path(self.pkg.stage.source_path, test_file)
- self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
- self.assertTrue(os.path.isfile(file_path))
+ file_path = join_path(self.pkg.stage.source_path, test_file)
+ self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
+ self.assertTrue(os.path.isfile(file_path))
- os.unlink(file_path)
- self.assertFalse(os.path.isfile(file_path))
+ os.unlink(file_path)
+ self.assertFalse(os.path.isfile(file_path))
- untracked = 'foobarbaz'
- touch(untracked)
- self.assertTrue(os.path.isfile(untracked))
- self.pkg.do_restage()
- self.assertFalse(os.path.isfile(untracked))
+ untracked = 'foobarbaz'
+ touch(untracked)
+ self.assertTrue(os.path.isfile(untracked))
+ self.pkg.do_restage()
+ self.assertFalse(os.path.isfile(untracked))
- self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
- self.assertTrue(os.path.isfile(file_path))
+ self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
+ self.assertTrue(os.path.isfile(file_path))
- self.assert_rev(rev)
+ self.assert_rev(rev)
def test_fetch_default(self):
diff --git a/lib/spack/spack/test/tally_plugin.py b/lib/spack/spack/test/tally_plugin.py
new file mode 100644
index 0000000000..4163ab95dd
--- /dev/null
+++ b/lib/spack/spack/test/tally_plugin.py
@@ -0,0 +1,59 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-software.llnl.gov/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+import os
+
+from nose.plugins import Plugin
+
+class Tally(Plugin):
+ name = 'tally'
+
+ def __init__(self):
+ super(Tally, self).__init__()
+ self.successCount = 0
+ self.failCount = 0
+ self.errorCount = 0
+
+ @property
+ def numberOfTestsRun(self):
+ """Excludes skipped tests"""
+ return self.errorCount + self.failCount + self.successCount
+
+ def options(self, parser, env=os.environ):
+ super(Tally, self).options(parser, env=env)
+
+ def configure(self, options, conf):
+ super(Tally, self).configure(options, conf)
+
+ def addSuccess(self, test):
+ self.successCount += 1
+
+ def addError(self, test, err):
+ self.errorCount += 1
+
+ def addFailure(self, test, err):
+ self.failCount += 1
+
+ def finalize(self, result):
+ pass
diff --git a/lib/spack/spack/test/unit_install.py b/lib/spack/spack/test/unit_install.py
index c4b9092f05..18615b7efe 100644
--- a/lib/spack/spack/test/unit_install.py
+++ b/lib/spack/spack/test/unit_install.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -22,20 +22,21 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-import unittest
import itertools
+import unittest
import spack
-test_install = __import__("spack.cmd.test-install",
+
+test_install = __import__("spack.cmd.test-install",
fromlist=["BuildId", "create_test_output", "TestResult"])
class MockOutput(object):
def __init__(self):
self.results = {}
-
+
def add_test(self, buildId, passed=True, buildInfo=None):
self.results[buildId] = passed
-
+
def write_to(self, stream):
pass
@@ -45,14 +46,14 @@ class MockSpec(object):
self.name = name
self.version = version
self.hash = hashStr if hashStr else hash((name, version))
-
+
def traverse(self, order=None):
- allDeps = itertools.chain.from_iterable(i.traverse() for i in
+ allDeps = itertools.chain.from_iterable(i.traverse() for i in
self.dependencies.itervalues())
return set(itertools.chain([self], allDeps))
-
+
def dag_hash(self):
- return self.hash
+ return self.hash
def to_yaml(self):
return "<<<MOCK YAML {0}>>>".format(test_install.BuildId(self).stringId())
@@ -75,47 +76,51 @@ class UnitInstallTest(unittest.TestCase):
def setUp(self):
super(UnitInstallTest, self).setUp()
-
+
pkgX.installed = False
pkgY.installed = False
+ self.saved_db = spack.repo
pkgDb = MockPackageDb({specX:pkgX, specY:pkgY})
- spack.db = pkgDb
+ spack.repo = pkgDb
+
def tearDown(self):
super(UnitInstallTest, self).tearDown()
-
+
+ spack.repo = self.saved_db
+
def test_installing_both(self):
mo = MockOutput()
-
+
pkgX.installed = True
pkgY.installed = True
- test_install.create_test_output(specX, [specX, specY], mo, getLogFunc=test_fetch_log)
-
- self.assertEqual(mo.results,
- {bIdX:test_install.TestResult.PASSED,
- bIdY:test_install.TestResult.PASSED})
+ test_install.create_test_output(specX, [specX, specY], mo, getLogFunc=mock_fetch_log)
+
+ self.assertEqual(mo.results,
+ {bIdX:test_install.TestResult.PASSED,
+ bIdY:test_install.TestResult.PASSED})
+
def test_dependency_already_installed(self):
mo = MockOutput()
-
+
pkgX.installed = True
pkgY.installed = True
- test_install.create_test_output(specX, [specX], mo, getLogFunc=test_fetch_log)
-
+ test_install.create_test_output(specX, [specX], mo, getLogFunc=mock_fetch_log)
self.assertEqual(mo.results, {bIdX:test_install.TestResult.PASSED})
#TODO: add test(s) where Y fails to install
+
class MockPackageDb(object):
def __init__(self, init=None):
self.specToPkg = {}
if init:
self.specToPkg.update(init)
-
+
def get(self, spec):
return self.specToPkg[spec]
-def test_fetch_log(path):
+def mock_fetch_log(path):
return []
-
diff --git a/lib/spack/spack/test/url_extrapolate.py b/lib/spack/spack/test/url_extrapolate.py
index 00d8216020..068a335b49 100644
--- a/lib/spack/spack/test/url_extrapolate.py
+++ b/lib/spack/spack/test/url_extrapolate.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -25,10 +25,7 @@
"""\
Tests ability of spack to extrapolate URL versions from existing versions.
"""
-import spack
import spack.url as url
-from spack.spec import Spec
-from spack.version import ver
from spack.test.mock_packages_test import *
diff --git a/lib/spack/spack/test/url_parse.py b/lib/spack/spack/test/url_parse.py
index ae1d559f7c..561d4658a1 100644
--- a/lib/spack/spack/test/url_parse.py
+++ b/lib/spack/spack/test/url_parse.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -27,8 +27,8 @@ This file has a bunch of versions tests taken from the excellent version
detection in Homebrew.
"""
import unittest
+
import spack.url as url
-from pprint import pprint
class UrlParseTest(unittest.TestCase):
diff --git a/lib/spack/spack/test/url_substitution.py b/lib/spack/spack/test/url_substitution.py
index db7ddd251d..2be38af0d3 100644
--- a/lib/spack/spack/test/url_substitution.py
+++ b/lib/spack/spack/test/url_substitution.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -27,9 +27,7 @@ This test does sanity checks on substituting new versions into URLs
"""
import unittest
-import spack
import spack.url as url
-from spack.packages import PackageDB
class PackageSanityTest(unittest.TestCase):
diff --git a/lib/spack/spack/test/versions.py b/lib/spack/spack/test/versions.py
index 20e946e90e..2732006eb3 100644
--- a/lib/spack/spack/test/versions.py
+++ b/lib/spack/spack/test/versions.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -28,6 +28,7 @@ We try to maintain compatibility with RPM's version semantics
where it makes sense.
"""
import unittest
+
from spack.version import *
diff --git a/lib/spack/spack/test/yaml.py b/lib/spack/spack/test/yaml.py
new file mode 100644
index 0000000000..b930c022f2
--- /dev/null
+++ b/lib/spack/spack/test/yaml.py
@@ -0,0 +1,94 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""
+Test Spack's custom YAML format.
+"""
+import unittest
+
+import spack.util.spack_yaml as syaml
+
+test_file = """\
+config_file:
+ x86_64:
+ foo: /path/to/foo
+ bar: /path/to/bar
+ baz: /path/to/baz
+ some_list:
+ - item 1
+ - item 2
+ - item 3
+ another_list:
+ [ 1, 2, 3 ]
+ some_key: some_string
+"""
+
+test_data = {
+ 'config_file' : syaml.syaml_dict([
+ ('x86_64', syaml.syaml_dict([
+ ('foo', '/path/to/foo'),
+ ('bar', '/path/to/bar'),
+ ('baz', '/path/to/baz' )])),
+ ('some_list', [ 'item 1', 'item 2', 'item 3' ]),
+ ('another_list', [ 1, 2, 3 ]),
+ ('some_key', 'some_string')
+ ])}
+
+class YamlTest(unittest.TestCase):
+
+ def setUp(self):
+ self.data = syaml.load(test_file)
+
+
+ def test_parse(self):
+ self.assertEqual(test_data, self.data)
+
+
+ def test_dict_order(self):
+ self.assertEqual(
+ ['x86_64', 'some_list', 'another_list', 'some_key'],
+ self.data['config_file'].keys())
+
+ self.assertEqual(
+ ['foo', 'bar', 'baz'],
+ self.data['config_file']['x86_64'].keys())
+
+
+ def test_line_numbers(self):
+ def check(obj, start_line, end_line):
+ self.assertEqual(obj._start_mark.line, start_line)
+ self.assertEqual(obj._end_mark.line, end_line)
+
+ check(self.data, 0, 12)
+ check(self.data['config_file'], 1, 12)
+ check(self.data['config_file']['x86_64'], 2, 5)
+ check(self.data['config_file']['x86_64']['foo'], 2, 2)
+ check(self.data['config_file']['x86_64']['bar'], 3, 3)
+ check(self.data['config_file']['x86_64']['baz'], 4, 4)
+ check(self.data['config_file']['some_list'], 6, 9)
+ check(self.data['config_file']['some_list'][0], 6, 6)
+ check(self.data['config_file']['some_list'][1], 7, 7)
+ check(self.data['config_file']['some_list'][2], 8, 8)
+ check(self.data['config_file']['another_list'], 10, 10)
+ check(self.data['config_file']['some_key'], 11, 11)
diff --git a/lib/spack/spack/url.py b/lib/spack/spack/url.py
index 6adbfe156d..f51f05cad7 100644
--- a/lib/spack/spack/url.py
+++ b/lib/spack/spack/url.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -70,7 +70,7 @@ def find_list_url(url):
"""
url_types = [
- # e.g. https://github.com/scalability-llnl/callpath/archive/v1.0.1.tar.gz
+ # e.g. https://github.com/llnl/callpath/archive/v1.0.1.tar.gz
(r'^(https://github.com/[^/]+/[^/]+)/archive/', lambda m: m.group(1) + '/releases')
]
@@ -142,7 +142,7 @@ def split_url_extension(path):
def downloaded_file_extension(path):
"""This returns the type of archive a URL refers to. This is
- sometimes confusing becasue of URLs like:
+ sometimes confusing because of URLs like:
(1) https://github.com/petdance/ack/tarball/1.93_02
@@ -225,7 +225,7 @@ def parse_version_offset(path):
(r'_((\d+\.)+\d+[a-z]?)[.]orig$', stem),
# e.g. http://www.openssl.org/source/openssl-0.9.8s.tar.gz
- (r'-([^-]+(-alpha|-beta)?)', stem),
+ (r'-v?([^-]+(-alpha|-beta)?)', stem),
# e.g. astyle_1.23_macosx.tar.gz
(r'_([^_]+(_alpha|_beta)?)', stem),
diff --git a/lib/spack/spack/util/__init__.py b/lib/spack/spack/util/__init__.py
index 1c388f31c5..b54691b67c 100644
--- a/lib/spack/spack/util/__init__.py
+++ b/lib/spack/spack/util/__init__.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/util/compression.py b/lib/spack/spack/util/compression.py
index fd17785ad0..5ae5867428 100644
--- a/lib/spack/spack/util/compression.py
+++ b/lib/spack/spack/util/compression.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -27,13 +27,12 @@ import os
from itertools import product
from spack.util.executable import which
-# Supported archvie extensions.
+# Supported archive extensions.
PRE_EXTS = ["tar"]
EXTS = ["gz", "bz2", "xz", "Z", "zip", "tgz"]
-# Add EXTS last so that .tar.gz is matched *before* tar.gz
-ALLOWED_ARCHIVE_TYPES = [".".join(l) for l in product(PRE_EXTS, EXTS)] + EXTS
-
+# Add PRE_EXTS and EXTS last so that .tar.gz is matched *before* .tar or .gz
+ALLOWED_ARCHIVE_TYPES = [".".join(l) for l in product(PRE_EXTS, EXTS)] + PRE_EXTS + EXTS
def allowed_archive(path):
return any(path.endswith(t) for t in ALLOWED_ARCHIVE_TYPES)
diff --git a/lib/spack/spack/util/crypto.py b/lib/spack/spack/util/crypto.py
index 8a8574cd3d..5269260284 100644
--- a/lib/spack/spack/util/crypto.py
+++ b/lib/spack/spack/util/crypto.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/util/debug.py b/lib/spack/spack/util/debug.py
index 37985eccdd..7930753f6f 100644
--- a/lib/spack/spack/util/debug.py
+++ b/lib/spack/spack/util/debug.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/util/environment.py b/lib/spack/spack/util/environment.py
index 7a4ff919ad..55e653fd2f 100644
--- a/lib/spack/spack/util/environment.py
+++ b/lib/spack/spack/util/environment.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -59,7 +59,8 @@ def path_put_first(var_name, directories):
path_set(var_name, new_path)
-def pop_keys(dictionary, *keys):
- for key in keys:
- if key in dictionary:
- dictionary.pop(key)
+def dump_environment(path):
+ """Dump the current environment out to a file."""
+ with open(path, 'w') as env_file:
+ for key, val in sorted(os.environ.items()):
+ env_file.write("%s=%s\n" % (key, val))
diff --git a/lib/spack/spack/util/executable.py b/lib/spack/spack/util/executable.py
index d1dfb62ffb..fc27b789d0 100644
--- a/lib/spack/spack/util/executable.py
+++ b/lib/spack/spack/util/executable.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -55,24 +55,80 @@ class Executable(object):
def __call__(self, *args, **kwargs):
- """Run the executable with subprocess.check_output, return output."""
- return_output = kwargs.get("return_output", False)
- fail_on_error = kwargs.get("fail_on_error", True)
- ignore_errors = kwargs.get("ignore_errors", ())
+ """Run this executable in a subprocess.
- output = kwargs.get("output", sys.stdout)
- error = kwargs.get("error", sys.stderr)
- input = kwargs.get("input", None)
+ Arguments
+ args
+ command line arguments to the executable to run.
+
+ Optional arguments
+
+ fail_on_error
+
+ Raise an exception if the subprocess returns an
+ error. Default is True. When not set, the return code is
+ avaiale as `exe.returncode`.
+
+ ignore_errors
+
+ An optional list/tuple of error codes that can be
+ *ignored*. i.e., if these codes are returned, this will
+ not raise an exception when `fail_on_error` is `True`.
+
+ output, error
+
+ These arguments allow you to specify new stdout and stderr
+ values. They default to `None`, which means the
+ subprocess will inherit the parent's file descriptors.
+
+ You can set these to:
+ - python streams, e.g. open Python file objects, or os.devnull;
+ - filenames, which will be automatically opened for writing; or
+ - `str`, as in the Python string type. If you set these to `str`,
+ output and error will be written to pipes and returned as
+ a string. If both `output` and `error` are set to `str`,
+ then one string is returned containing output concatenated
+ with error.
+
+ input
+
+ Same as output, error, but `str` is not an allowed value.
+
+ Deprecated arguments
+
+ return_output[=False]
+
+ Setting this to True is the same as setting output=str.
+ This argument may be removed in future Spack versions.
+
+ """
+ fail_on_error = kwargs.pop("fail_on_error", True)
+ ignore_errors = kwargs.pop("ignore_errors", ())
+
+ # TODO: This is deprecated. Remove in a future version.
+ return_output = kwargs.pop("return_output", False)
+
+ # Default values of None says to keep parent's file descriptors.
+ if return_output:
+ output = str
+ else:
+ output = kwargs.pop("output", None)
+
+ error = kwargs.pop("error", None)
+ input = kwargs.pop("input", None)
+ if input is str:
+ raise ValueError("Cannot use `str` as input stream.")
def streamify(arg, mode):
if isinstance(arg, basestring):
return open(arg, mode), True
- elif arg is None and mode != 'r':
- return open(os.devnull, mode), True
- return arg, False
- output, ostream = streamify(output, 'w')
- error, estream = streamify(error, 'w')
- input, istream = streamify(input, 'r')
+ elif arg is str:
+ return subprocess.PIPE, False
+ else:
+ return arg, False
+ ostream, close_ostream = streamify(output, 'w')
+ estream, close_estream = streamify(error, 'w')
+ istream, close_istream = streamify(input, 'r')
# if they just want to ignore one error code, make it a tuple.
if isinstance(ignore_errors, int):
@@ -93,19 +149,19 @@ class Executable(object):
try:
proc = subprocess.Popen(
- cmd,
- stdin=input,
- stderr=error,
- stdout=subprocess.PIPE if return_output else output)
+ cmd, stdin=istream, stderr=estream, stdout=ostream)
out, err = proc.communicate()
- self.returncode = proc.returncode
- rc = proc.returncode
+ rc = self.returncode = proc.returncode
if fail_on_error and rc != 0 and (rc not in ignore_errors):
raise ProcessError("Command exited with status %d:"
% proc.returncode, cmd_line)
- if return_output:
- return out
+
+ if output is str or error is str:
+ result = ''
+ if output is str: result += out
+ if error is str: result += err
+ return result
except OSError, e:
raise ProcessError(
@@ -120,9 +176,9 @@ class Executable(object):
% (proc.returncode, cmd_line))
finally:
- if ostream: output.close()
- if estream: error.close()
- if istream: input.close()
+ if close_ostream: output.close()
+ if close_estream: error.close()
+ if close_istream: input.close()
def __eq__(self, other):
diff --git a/lib/spack/spack/util/multiproc.py b/lib/spack/spack/util/multiproc.py
index 21cd6f543d..8ca82df011 100644
--- a/lib/spack/spack/util/multiproc.py
+++ b/lib/spack/spack/util/multiproc.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/util/naming.py b/lib/spack/spack/util/naming.py
index 782afbd4bb..5025f15027 100644
--- a/lib/spack/spack/util/naming.py
+++ b/lib/spack/spack/util/naming.py
@@ -1,13 +1,22 @@
# Need this because of spack.util.string
from __future__ import absolute_import
import string
+import itertools
import re
+from StringIO import StringIO
import spack
+__all__ = ['mod_to_class', 'spack_module_to_python_module', 'valid_module_name',
+ 'valid_fully_qualified_module_name', 'validate_fully_qualified_module_name',
+ 'validate_module_name', 'possible_spack_module_names', 'NamespaceTrie']
+
# Valid module names can contain '-' but can't start with it.
_valid_module_re = r'^\w[\w-]*$'
+# Valid module names can contain '-' but can't start with it.
+_valid_fully_qualified_module_re = r'^(\w[\w-]*)(\.\w[\w-]*)*$'
+
def mod_to_class(mod_name):
"""Convert a name from module style to class name style. Spack mostly
@@ -42,20 +51,160 @@ def mod_to_class(mod_name):
return class_name
+def spack_module_to_python_module(mod_name):
+ """Given a Spack module name, returns the name by which it can be
+ imported in Python.
+ """
+ if re.match(r'[0-9]', mod_name):
+ mod_name = 'num' + mod_name
+
+ return mod_name.replace('-', '_')
+
+
+def possible_spack_module_names(python_mod_name):
+ """Given a Python module name, return a list of all possible spack module
+ names that could correspond to it."""
+ mod_name = re.sub(r'^num(\d)', r'\1', python_mod_name)
+
+ parts = re.split(r'(_)', mod_name)
+ options = [['_', '-']] * mod_name.count('_')
+
+ results = []
+ for subs in itertools.product(*options):
+ s = list(parts)
+ s[1::2] = subs
+ results.append(''.join(s))
+
+ return results
+
+
def valid_module_name(mod_name):
- """Return whether the mod_name is valid for use in Spack."""
+ """Return whether mod_name is valid for use in Spack."""
return bool(re.match(_valid_module_re, mod_name))
+def valid_fully_qualified_module_name(mod_name):
+ """Return whether mod_name is a valid namespaced module name."""
+ return bool(re.match(_valid_fully_qualified_module_re, mod_name))
+
+
def validate_module_name(mod_name):
"""Raise an exception if mod_name is not valid."""
if not valid_module_name(mod_name):
raise InvalidModuleNameError(mod_name)
+def validate_fully_qualified_module_name(mod_name):
+ """Raise an exception if mod_name is not a valid namespaced module name."""
+ if not valid_fully_qualified_module_name(mod_name):
+ raise InvalidFullyQualifiedModuleNameError(mod_name)
+
+
class InvalidModuleNameError(spack.error.SpackError):
"""Raised when we encounter a bad module name."""
def __init__(self, name):
super(InvalidModuleNameError, self).__init__(
"Invalid module name: " + name)
self.name = name
+
+
+class InvalidFullyQualifiedModuleNameError(spack.error.SpackError):
+ """Raised when we encounter a bad full package name."""
+ def __init__(self, name):
+ super(InvalidFullyQualifiedModuleNameError, self).__init__(
+ "Invalid fully qualified package name: " + name)
+ self.name = name
+
+
+class NamespaceTrie(object):
+ class Element(object):
+ def __init__(self, value):
+ self.value = value
+
+
+ def __init__(self, separator='.'):
+ self._subspaces = {}
+ self._value = None
+ self._sep = separator
+
+
+ def __setitem__(self, namespace, value):
+ first, sep, rest = namespace.partition(self._sep)
+
+ if not first:
+ self._value = NamespaceTrie.Element(value)
+ return
+
+ if first not in self._subspaces:
+ self._subspaces[first] = NamespaceTrie()
+
+ self._subspaces[first][rest] = value
+
+
+ def _get_helper(self, namespace, full_name):
+ first, sep, rest = namespace.partition(self._sep)
+ if not first:
+ if not self._value:
+ raise KeyError("Can't find namespace '%s' in trie" % full_name)
+ return self._value.value
+ elif first not in self._subspaces:
+ raise KeyError("Can't find namespace '%s' in trie" % full_name)
+ else:
+ return self._subspaces[first]._get_helper(rest, full_name)
+
+
+ def __getitem__(self, namespace):
+ return self._get_helper(namespace, namespace)
+
+
+ def is_prefix(self, namespace):
+ """True if the namespace has a value, or if it's the prefix of one that does."""
+ first, sep, rest = namespace.partition(self._sep)
+ if not first:
+ return True
+ elif first not in self._subspaces:
+ return False
+ else:
+ return self._subspaces[first].is_prefix(rest)
+
+
+ def is_leaf(self, namespace):
+ """True if this namespace has no children in the trie."""
+ first, sep, rest = namespace.partition(self._sep)
+ if not first:
+ return bool(self._subspaces)
+ elif first not in self._subspaces:
+ return False
+ else:
+ return self._subspaces[first].is_leaf(rest)
+
+
+ def has_value(self, namespace):
+ """True if there is a value set for the given namespace."""
+ first, sep, rest = namespace.partition(self._sep)
+ if not first:
+ return self._value is not None
+ elif first not in self._subspaces:
+ return False
+ else:
+ return self._subspaces[first].has_value(rest)
+
+
+ def __contains__(self, namespace):
+ """Returns whether a value has been set for the namespace."""
+ return self.has_value(namespace)
+
+
+ def _str_helper(self, stream, level=0):
+ indent = (level * ' ')
+ for name in sorted(self._subspaces):
+ stream.write(indent + name + '\n')
+ if self._value:
+ stream.write(indent + ' ' + repr(self._value.value))
+ stream.write(self._subspaces[name]._str_helper(stream, level+1))
+
+
+ def __str__(self):
+ stream = StringIO()
+ self._str_helper(stream)
+ return stream.getvalue()
diff --git a/lib/spack/spack/util/pattern.py b/lib/spack/spack/util/pattern.py
new file mode 100644
index 0000000000..17a126498b
--- /dev/null
+++ b/lib/spack/spack/util/pattern.py
@@ -0,0 +1,116 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+import inspect
+import collections
+import functools
+
+
+def composite(interface=None, method_list=None, container=list):
+ """
+ Returns a class decorator that patches a class adding all the methods it needs to be a composite for a given
+ interface.
+
+ :param interface: class exposing the interface to which the composite object must conform. Only non-private and
+ non-special methods will be taken into account
+
+ :param method_list: names of methods that should be part of the composite
+
+ :param container: container for the composite object (default = list). Must fulfill the MutableSequence contract.
+ The composite class will expose the container API to manage object composition
+
+ :return: class decorator
+ """
+ # Check if container fulfills the MutableSequence contract and raise an exception if it doesn't
+ # The patched class returned by the decorator will inherit from the container class to expose the
+ # interface needed to manage objects composition
+ if not issubclass(container, collections.MutableSequence):
+ raise TypeError("Container must fulfill the MutableSequence contract")
+
+ # Check if at least one of the 'interface' or the 'method_list' arguments are defined
+ if interface is None and method_list is None:
+ raise TypeError("Either 'interface' or 'method_list' must be defined on a call to composite")
+
+ def cls_decorator(cls):
+ # Retrieve the base class of the composite. Inspect its methods and decide which ones will be overridden
+ def no_special_no_private(x):
+ return inspect.ismethod(x) and not x.__name__.startswith('_')
+
+ # Patch the behavior of each of the methods in the previous list. This is done associating an instance of the
+ # descriptor below to any method that needs to be patched.
+ class IterateOver(object):
+ """
+ Decorator used to patch methods in a composite. It iterates over all the items in the instance containing the
+ associated attribute and calls for each of them an attribute with the same name
+ """
+ def __init__(self, name, func=None):
+ self.name = name
+ self.func = func
+
+ def __get__(self, instance, owner):
+ def getter(*args, **kwargs):
+ for item in instance:
+ getattr(item, self.name)(*args, **kwargs)
+ # If we are using this descriptor to wrap a method from an interface, then we must conditionally
+ # use the `functools.wraps` decorator to set the appropriate fields.
+ if self.func is not None:
+ getter = functools.wraps(self.func)(getter)
+ return getter
+
+ dictionary_for_type_call = {}
+ # Construct a dictionary with the methods explicitly passed as name
+ if method_list is not None:
+ # python@2.7: method_list_dict = {name: IterateOver(name) for name in method_list}
+ method_list_dict = {}
+ for name in method_list:
+ method_list_dict[name] = IterateOver(name)
+ dictionary_for_type_call.update(method_list_dict)
+ # Construct a dictionary with the methods inspected from the interface
+ if interface is not None:
+ ##########
+ # python@2.7: interface_methods = {name: method for name, method in inspect.getmembers(interface, predicate=no_special_no_private)}
+ interface_methods = {}
+ for name, method in inspect.getmembers(interface, predicate=no_special_no_private):
+ interface_methods[name] = method
+ ##########
+ # python@2.7: interface_methods_dict = {name: IterateOver(name, method) for name, method in interface_methods.iteritems()}
+ interface_methods_dict = {}
+ for name, method in interface_methods.iteritems():
+ interface_methods_dict[name] = IterateOver(name, method)
+ ##########
+ dictionary_for_type_call.update(interface_methods_dict)
+ # Get the methods that are defined in the scope of the composite class and override any previous definition
+ ##########
+ # python@2.7: cls_method = {name: method for name, method in inspect.getmembers(cls, predicate=inspect.ismethod)}
+ cls_method = {}
+ for name, method in inspect.getmembers(cls, predicate=inspect.ismethod):
+ cls_method[name] = method
+ ##########
+ dictionary_for_type_call.update(cls_method)
+ # Generate the new class on the fly and return it
+ # FIXME : inherit from interface if we start to use ABC classes?
+ wrapper_class = type(cls.__name__, (cls, container), dictionary_for_type_call)
+ return wrapper_class
+
+ return cls_decorator
diff --git a/lib/spack/spack/util/prefix.py b/lib/spack/spack/util/prefix.py
index 7bd63c16ca..c613ca5182 100644
--- a/lib/spack/spack/util/prefix.py
+++ b/lib/spack/spack/util/prefix.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/util/spack_yaml.py b/lib/spack/spack/util/spack_yaml.py
new file mode 100644
index 0000000000..728e86b8ee
--- /dev/null
+++ b/lib/spack/spack/util/spack_yaml.py
@@ -0,0 +1,201 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""Enhanced YAML parsing for Spack.
+
+- ``load()`` preserves YAML Marks on returned objects -- this allows
+ us to access file and line information later.
+
+- ``Our load methods use ``OrderedDict`` class instead of YAML's
+ default unorderd dict.
+
+"""
+import yaml
+from yaml.nodes import *
+from yaml.constructor import ConstructorError
+from yaml.representer import SafeRepresenter
+from ordereddict_backport import OrderedDict
+
+# Only export load and dump
+__all__ = ['load', 'dump']
+
+# Make new classes so we can add custom attributes.
+# Also, use OrderedDict instead of just dict.
+class syaml_dict(OrderedDict):
+ def __repr__(self):
+ mappings = ('%r: %r' % (k,v) for k,v in self.items())
+ return '{%s}' % ', '.join(mappings)
+class syaml_list(list):
+ __repr__ = list.__repr__
+class syaml_str(str):
+ __repr__ = str.__repr__
+
+def mark(obj, node):
+ """Add start and end markers to an object."""
+ obj._start_mark = node.start_mark
+ obj._end_mark = node.end_mark
+
+
+class OrderedLineLoader(yaml.Loader):
+ """YAML loader that preserves order and line numbers.
+
+ Mappings read in by this loader behave like an ordered dict.
+ Sequences, mappings, and strings also have new attributes,
+ ``_start_mark`` and ``_end_mark``, that preserve YAML line
+ information in the output data.
+
+ """
+ #
+ # Override construct_yaml_* so that they build our derived types,
+ # which allows us to add new attributes to them.
+ #
+ # The standard YAML constructors return empty instances and fill
+ # in with mappings later. We preserve this behavior.
+ #
+ def construct_yaml_str(self, node):
+ value = self.construct_scalar(node)
+ try:
+ value = value.encode('ascii')
+ except UnicodeEncodeError:
+ pass
+ value = syaml_str(value)
+ mark(value, node)
+ return value
+
+
+ def construct_yaml_seq(self, node):
+ data = syaml_list()
+ mark(data, node)
+ yield data
+ data.extend(self.construct_sequence(node))
+
+
+ def construct_yaml_map(self, node):
+ data = syaml_dict()
+ mark(data, node)
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ #
+ # Override the ``construct_*`` routines. These fill in empty
+ # objects after yielded by the above ``construct_yaml_*`` methods.
+ #
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ value = syaml_list(self.construct_object(child, deep=deep)
+ for child in node.value)
+ mark(value, node)
+ return value
+
+
+ def construct_mapping(self, node, deep=False):
+ """Store mappings as OrderedDicts instead of as regular python
+ dictionaries to preserve file ordering."""
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+
+ mapping = syaml_dict()
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ try:
+ hash(key)
+ except TypeError, exc:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unacceptable key (%s)" % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ if key in mapping:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found already in-use key (%s)" % key, key_node.start_mark)
+ mapping[key] = value
+
+ mark(mapping, node)
+ return mapping
+
+# register above new constructors
+OrderedLineLoader.add_constructor(u'tag:yaml.org,2002:map', OrderedLineLoader.construct_yaml_map)
+OrderedLineLoader.add_constructor(u'tag:yaml.org,2002:seq', OrderedLineLoader.construct_yaml_seq)
+OrderedLineLoader.add_constructor(u'tag:yaml.org,2002:str', OrderedLineLoader.construct_yaml_str)
+
+
+
+class OrderedLineDumper(yaml.Dumper):
+ """Dumper that preserves ordering and formats ``syaml_*`` objects.
+
+ This dumper preserves insertion ordering ``syaml_dict`` objects
+ when they're written out. It also has some custom formatters
+ for ``syaml_*`` objects so that they are formatted like their
+ regular Python equivalents, instead of ugly YAML pyobjects.
+
+ """
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ # if it's a syaml_dict, preserve OrderedDict order.
+ # Otherwise do the default thing.
+ sort = not isinstance(mapping, syaml_dict)
+ mapping = mapping.items()
+ if sort:
+ mapping.sort()
+
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+# Make our special objects look like normal YAML ones.
+OrderedLineDumper.add_representer(syaml_dict, OrderedLineDumper.represent_dict)
+OrderedLineDumper.add_representer(syaml_list, OrderedLineDumper.represent_list)
+OrderedLineDumper.add_representer(syaml_str, OrderedLineDumper.represent_str)
+
+
+def load(*args, **kwargs):
+ """Load but modify the loader instance so that it will add __line__
+ atrributes to the returned object."""
+ kwargs['Loader'] = OrderedLineLoader
+ return yaml.load(*args, **kwargs)
+
+
+def dump(*args, **kwargs):
+ kwargs['Dumper'] = OrderedLineDumper
+ return yaml.dump(*args, **kwargs)
diff --git a/lib/spack/spack/util/string.py b/lib/spack/spack/util/string.py
index 234163bf52..1556ce6d29 100644
--- a/lib/spack/spack/util/string.py
+++ b/lib/spack/spack/util/string.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/util/web.py b/lib/spack/spack/util/web.py
index 1420d62a77..73f4858b02 100644
--- a/lib/spack/spack/util/web.py
+++ b/lib/spack/spack/util/web.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -23,6 +23,7 @@
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import re
+import os
import sys
import subprocess
import urllib2, cookielib
@@ -70,7 +71,9 @@ def _spider(args):
"""
url, visited, root, opener, depth, max_depth, raise_on_error = args
- pages = {}
+ pages = {} # dict from page URL -> text content.
+ links = set() # set of all links seen on visited pages.
+
try:
# Make a HEAD request first to check the content type. This lets
# us ignore tarballs and gigantic files.
@@ -83,12 +86,12 @@ def _spider(args):
if not "Content-type" in resp.headers:
tty.debug("ignoring page " + url)
- return pages
+ return pages, links
if not resp.headers["Content-type"].startswith('text/html'):
tty.debug("ignoring page " + url + " with content type " +
resp.headers["Content-type"])
- return pages
+ return pages, links
# Do the real GET request when we know it's just HTML.
req.get_method = lambda: "GET"
@@ -99,42 +102,45 @@ def _spider(args):
page = response.read()
pages[response_url] = page
- # If we're not at max depth, parse out the links in the page
- if depth < max_depth:
- link_parser = LinkParser()
- subcalls = []
- link_parser.feed(page)
-
- while link_parser.links:
- raw_link = link_parser.links.pop()
+ # Parse out the links in the page
+ link_parser = LinkParser()
+ subcalls = []
+ link_parser.feed(page)
- # Skip stuff that looks like an archive
- if any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES):
- continue
+ while link_parser.links:
+ raw_link = link_parser.links.pop()
+ abs_link = urlparse.urljoin(response_url, raw_link)
- # Evaluate the link relative to the page it came from.
- abs_link = urlparse.urljoin(response_url, raw_link)
+ links.add(abs_link)
- # Skip things outside the root directory
- if not abs_link.startswith(root):
- continue
+ # Skip stuff that looks like an archive
+ if any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES):
+ continue
- # Skip already-visited links
- if abs_link in visited:
- continue
+ # Skip things outside the root directory
+ if not abs_link.startswith(root):
+ continue
- subcalls.append((abs_link, visited, root, None, depth+1, max_depth, raise_on_error))
- visited.add(abs_link)
+ # Skip already-visited links
+ if abs_link in visited:
+ continue
- if subcalls:
- try:
- pool = Pool(processes=len(subcalls))
- dicts = pool.map(_spider, subcalls)
- for d in dicts:
- pages.update(d)
- finally:
- pool.terminate()
- pool.join()
+ # If we're not at max depth, follow links.
+ if depth < max_depth:
+ subcalls.append((abs_link, visited, root, None,
+ depth+1, max_depth, raise_on_error))
+ visited.add(abs_link)
+
+ if subcalls:
+ try:
+ pool = Pool(processes=len(subcalls))
+ results = pool.map(_spider, subcalls)
+ for sub_pages, sub_links in results:
+ pages.update(sub_pages)
+ links.update(sub_links)
+ finally:
+ pool.terminate()
+ pool.join()
except urllib2.URLError, e:
tty.debug(e)
@@ -155,10 +161,10 @@ def _spider(args):
# Other types of errors are completely ignored, except in debug mode.
tty.debug("Error in _spider: %s" % e)
- return pages
+ return pages, links
-def get_pages(root_url, **kwargs):
+def spider(root_url, **kwargs):
"""Gets web pages from a root URL.
If depth is specified (e.g., depth=2), then this will also fetches pages
linked from the root and its children up to depth.
@@ -167,5 +173,69 @@ def get_pages(root_url, **kwargs):
performance over a sequential fetch.
"""
max_depth = kwargs.setdefault('depth', 1)
- pages = _spider((root_url, set(), root_url, None, 1, max_depth, False))
- return pages
+ pages, links = _spider((root_url, set(), root_url, None, 1, max_depth, False))
+ return pages, links
+
+
+def find_versions_of_archive(*archive_urls, **kwargs):
+ """Scrape web pages for new versions of a tarball.
+
+ Arguments:
+ archive_urls:
+ URLs for different versions of a package. Typically these
+ are just the tarballs from the package file itself. By
+ default, this searches the parent directories of archives.
+
+ Keyword Arguments:
+ list_url:
+
+ URL for a listing of archives. Spack wills scrape these
+ pages for download links that look like the archive URL.
+
+ list_depth:
+ Max depth to follow links on list_url pages.
+
+ """
+ list_url = kwargs.get('list_url', None)
+ list_depth = kwargs.get('list_depth', 1)
+
+ # Generate a list of list_urls based on archive urls and any
+ # explicitly listed list_url in the package
+ list_urls = set()
+ if list_url:
+ list_urls.add(list_url)
+ for aurl in archive_urls:
+ list_urls.add(spack.url.find_list_url(aurl))
+
+ # Grab some web pages to scrape.
+ pages = {}
+ links = set()
+ for lurl in list_urls:
+ p, l = spider(lurl, depth=list_depth)
+ pages.update(p)
+ links.update(l)
+
+ # Scrape them for archive URLs
+ regexes = []
+ for aurl in archive_urls:
+ # This creates a regex from the URL with a capture group for
+ # the version part of the URL. The capture group is converted
+ # to a generic wildcard, so we can use this to extract things
+ # on a page that look like archive URLs.
+ url_regex = spack.url.wildcard_version(aurl)
+
+ # We'll be a bit more liberal and just look for the archive
+ # part, not the full path.
+ regexes.append(os.path.basename(url_regex))
+
+ # Build a dict version -> URL from any links that match the wildcards.
+ versions = {}
+ for url in links:
+ if any(re.search(r, url) for r in regexes):
+ try:
+ ver = spack.url.parse_version(url)
+ versions[ver] = url
+ except spack.url.UndetectableVersionError as e:
+ continue
+
+ return versions
diff --git a/lib/spack/spack/variant.py b/lib/spack/spack/variant.py
index c572560206..f31b4e044c 100644
--- a/lib/spack/spack/variant.py
+++ b/lib/spack/spack/variant.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
diff --git a/lib/spack/spack/version.py b/lib/spack/spack/version.py
index 35db05e018..e8a0a261c9 100644
--- a/lib/spack/spack/version.py
+++ b/lib/spack/spack/version.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -48,7 +48,7 @@ import sys
import re
from bisect import bisect_left
from functools import wraps
-from external.functools import total_ordering
+from functools_backport import total_ordering
# Valid version characters
VALID_VERSION = r'[A-Za-z0-9_.-]'
diff --git a/lib/spack/spack/virtual.py b/lib/spack/spack/virtual.py
index 2241e5a1cd..3a005c35d2 100644
--- a/lib/spack/spack/virtual.py
+++ b/lib/spack/spack/virtual.py
@@ -6,7 +6,7 @@
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
@@ -122,12 +122,13 @@ class ProviderIndex(object):
return sorted(providers)
- # TODO: this is pretty darned nasty, and inefficient.
+ # TODO: this is pretty darned nasty, and inefficient, but there
+ # are not that many vdeps in most specs.
def _cross_provider_maps(self, lmap, rmap):
result = {}
for lspec, rspec in itertools.product(lmap, rmap):
try:
- constrained = lspec.copy().constrain(rspec)
+ constrained = lspec.constrained(rspec)
except spack.spec.UnsatisfiableSpecError:
continue
@@ -135,7 +136,7 @@ class ProviderIndex(object):
for lp_spec, rp_spec in itertools.product(lmap[lspec], rmap[rspec]):
if lp_spec.name == rp_spec.name:
try:
- const = lp_spec.copy().constrain(rp_spec,deps=False)
+ const = lp_spec.constrained(rp_spec, deps=False)
result.setdefault(constrained, set()).add(const)
except spack.spec.UnsatisfiableSpecError:
continue
@@ -162,4 +163,4 @@ class ProviderIndex(object):
if crossed:
result[name] = crossed
- return bool(result)
+ return all(c in result for c in common)