summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/spack/docs/conf.py8
-rw-r--r--lib/spack/docs/contribution_guide.rst2
-rw-r--r--lib/spack/docs/developer_guide.rst8
-rw-r--r--lib/spack/docs/getting_started.rst4
-rw-r--r--lib/spack/docs/index.rst6
-rw-r--r--lib/spack/docs/packaging_guide.rst26
-rw-r--r--lib/spack/external/_pytest/pytester.py2
-rw-r--r--lib/spack/external/functools_backport.py17
-rw-r--r--lib/spack/external/ordereddict_backport.py8
-rwxr-xr-xlib/spack/external/pyqver2.py70
-rwxr-xr-xlib/spack/external/pyqver3.py248
-rw-r--r--lib/spack/external/six.py886
-rw-r--r--lib/spack/external/yaml/README2
-rw-r--r--lib/spack/external/yaml/lib/yaml/__init__.py (renamed from lib/spack/external/yaml/__init__.py)2
-rw-r--r--lib/spack/external/yaml/lib/yaml/composer.py (renamed from lib/spack/external/yaml/composer.py)0
-rw-r--r--lib/spack/external/yaml/lib/yaml/constructor.py (renamed from lib/spack/external/yaml/constructor.py)3
-rw-r--r--lib/spack/external/yaml/lib/yaml/cyaml.py85
-rw-r--r--lib/spack/external/yaml/lib/yaml/dumper.py (renamed from lib/spack/external/yaml/dumper.py)0
-rw-r--r--lib/spack/external/yaml/lib/yaml/emitter.py (renamed from lib/spack/external/yaml/emitter.py)0
-rw-r--r--lib/spack/external/yaml/lib/yaml/error.py (renamed from lib/spack/external/yaml/error.py)0
-rw-r--r--lib/spack/external/yaml/lib/yaml/events.py (renamed from lib/spack/external/yaml/events.py)0
-rw-r--r--lib/spack/external/yaml/lib/yaml/loader.py (renamed from lib/spack/external/yaml/loader.py)0
-rw-r--r--lib/spack/external/yaml/lib/yaml/nodes.py (renamed from lib/spack/external/yaml/nodes.py)0
-rw-r--r--lib/spack/external/yaml/lib/yaml/parser.py (renamed from lib/spack/external/yaml/parser.py)0
-rw-r--r--lib/spack/external/yaml/lib/yaml/reader.py (renamed from lib/spack/external/yaml/reader.py)9
-rw-r--r--lib/spack/external/yaml/lib/yaml/representer.py (renamed from lib/spack/external/yaml/representer.py)4
-rw-r--r--lib/spack/external/yaml/lib/yaml/resolver.py (renamed from lib/spack/external/yaml/resolver.py)5
-rw-r--r--lib/spack/external/yaml/lib/yaml/scanner.py (renamed from lib/spack/external/yaml/scanner.py)8
-rw-r--r--lib/spack/external/yaml/lib/yaml/serializer.py (renamed from lib/spack/external/yaml/serializer.py)0
-rw-r--r--lib/spack/external/yaml/lib/yaml/tokens.py (renamed from lib/spack/external/yaml/tokens.py)0
-rw-r--r--lib/spack/external/yaml/lib3/yaml/__init__.py312
-rw-r--r--lib/spack/external/yaml/lib3/yaml/composer.py139
-rw-r--r--lib/spack/external/yaml/lib3/yaml/constructor.py686
-rw-r--r--lib/spack/external/yaml/lib3/yaml/cyaml.py85
-rw-r--r--lib/spack/external/yaml/lib3/yaml/dumper.py62
-rw-r--r--lib/spack/external/yaml/lib3/yaml/emitter.py1137
-rw-r--r--lib/spack/external/yaml/lib3/yaml/error.py75
-rw-r--r--lib/spack/external/yaml/lib3/yaml/events.py86
-rw-r--r--lib/spack/external/yaml/lib3/yaml/loader.py40
-rw-r--r--lib/spack/external/yaml/lib3/yaml/nodes.py49
-rw-r--r--lib/spack/external/yaml/lib3/yaml/parser.py589
-rw-r--r--lib/spack/external/yaml/lib3/yaml/reader.py192
-rw-r--r--lib/spack/external/yaml/lib3/yaml/representer.py387
-rw-r--r--lib/spack/external/yaml/lib3/yaml/resolver.py227
-rw-r--r--lib/spack/external/yaml/lib3/yaml/scanner.py1444
-rw-r--r--lib/spack/external/yaml/lib3/yaml/serializer.py111
-rw-r--r--lib/spack/external/yaml/lib3/yaml/tokens.py104
-rw-r--r--lib/spack/llnl/util/filesystem.py4
-rw-r--r--lib/spack/llnl/util/lang.py11
-rw-r--r--lib/spack/llnl/util/tty/__init__.py8
-rw-r--r--lib/spack/llnl/util/tty/colify.py20
-rw-r--r--lib/spack/llnl/util/tty/log.py8
-rw-r--r--lib/spack/spack/__init__.py7
-rw-r--r--lib/spack/spack/architecture.py2
-rw-r--r--lib/spack/spack/build_environment.py8
-rw-r--r--lib/spack/spack/build_systems/autotools.py10
-rw-r--r--lib/spack/spack/build_systems/perl.py117
-rw-r--r--lib/spack/spack/build_systems/python.py80
-rw-r--r--lib/spack/spack/cmd/__init__.py6
-rw-r--r--lib/spack/spack/cmd/arch.py6
-rw-r--r--lib/spack/spack/cmd/build.py3
-rw-r--r--lib/spack/spack/cmd/common/arguments.py2
-rw-r--r--lib/spack/spack/cmd/compiler.py35
-rw-r--r--lib/spack/spack/cmd/configure.py4
-rw-r--r--lib/spack/spack/cmd/create.py70
-rw-r--r--lib/spack/spack/cmd/dependents.py2
-rw-r--r--lib/spack/spack/cmd/env.py5
-rw-r--r--lib/spack/spack/cmd/flake8.py26
-rw-r--r--lib/spack/spack/cmd/graph.py5
-rw-r--r--lib/spack/spack/cmd/info.py55
-rw-r--r--lib/spack/spack/cmd/list.py30
-rw-r--r--lib/spack/spack/cmd/location.py19
-rw-r--r--lib/spack/spack/cmd/md5.py2
-rw-r--r--lib/spack/spack/cmd/mirror.py4
-rw-r--r--lib/spack/spack/cmd/pkg.py15
-rw-r--r--lib/spack/spack/cmd/repo.py4
-rw-r--r--lib/spack/spack/cmd/spec.py23
-rw-r--r--lib/spack/spack/cmd/test.py6
-rw-r--r--lib/spack/spack/cmd/url.py92
-rw-r--r--lib/spack/spack/cmd/versions.py6
-rw-r--r--lib/spack/spack/compiler.py4
-rw-r--r--lib/spack/spack/compilers/__init__.py4
-rw-r--r--lib/spack/spack/concretize.py203
-rw-r--r--lib/spack/spack/config.py14
-rw-r--r--lib/spack/spack/database.py15
-rw-r--r--lib/spack/spack/directives.py30
-rw-r--r--lib/spack/spack/directory_layout.py3
-rw-r--r--lib/spack/spack/environment.py4
-rw-r--r--lib/spack/spack/error.py3
-rw-r--r--lib/spack/spack/fetch_strategy.py25
-rw-r--r--lib/spack/spack/graph.py3
-rw-r--r--lib/spack/spack/hooks/case_consistency.py6
-rw-r--r--lib/spack/spack/hooks/module_file_generation.py5
-rw-r--r--lib/spack/spack/modules.py22
-rw-r--r--lib/spack/spack/operating_systems/cnl.py2
-rw-r--r--lib/spack/spack/package.py34
-rw-r--r--lib/spack/spack/package_prefs.py337
-rw-r--r--lib/spack/spack/package_test.py18
-rw-r--r--lib/spack/spack/parse.py11
-rw-r--r--lib/spack/spack/provider_index.py11
-rw-r--r--lib/spack/spack/repository.py3
-rw-r--r--lib/spack/spack/spec.py188
-rw-r--r--lib/spack/spack/stage.py20
-rw-r--r--lib/spack/spack/test/architecture.py4
-rw-r--r--lib/spack/spack/test/build_system_guess.py2
-rw-r--r--lib/spack/spack/test/cmd/install.py6
-rw-r--r--lib/spack/spack/test/cmd/url.py21
-rw-r--r--lib/spack/spack/test/compilers.py9
-rw-r--r--lib/spack/spack/test/concretize.py27
-rw-r--r--lib/spack/spack/test/concretize_preferences.py8
-rw-r--r--lib/spack/spack/test/conftest.py16
-rw-r--r--lib/spack/spack/test/data/web/1.html10
-rw-r--r--lib/spack/spack/test/data/web/2.html12
-rw-r--r--lib/spack/spack/test/data/web/3.html11
-rw-r--r--lib/spack/spack/test/data/web/4.html11
-rw-r--r--lib/spack/spack/test/data/web/index.html10
-rw-r--r--lib/spack/spack/test/directory_layout.py18
-rw-r--r--lib/spack/spack/test/graph.py2
-rw-r--r--lib/spack/spack/test/lock.py2
-rw-r--r--lib/spack/spack/test/make_executable.py2
-rw-r--r--lib/spack/spack/test/modules.py6
-rw-r--r--lib/spack/spack/test/multimethod.py2
-rw-r--r--lib/spack/spack/test/package_sanity.py67
-rw-r--r--lib/spack/spack/test/pattern.py1
-rw-r--r--lib/spack/spack/test/provider_index.py7
-rw-r--r--lib/spack/spack/test/python_version.py130
-rw-r--r--lib/spack/spack/test/spec_dag.py24
-rw-r--r--lib/spack/spack/test/spec_semantics.py2
-rw-r--r--lib/spack/spack/test/spec_syntax.py233
-rw-r--r--lib/spack/spack/test/spec_yaml.py9
-rw-r--r--lib/spack/spack/test/url_extrapolate.py101
-rw-r--r--lib/spack/spack/test/url_parse.py794
-rw-r--r--lib/spack/spack/test/url_substitution.py84
-rw-r--r--lib/spack/spack/test/versions.py846
-rw-r--r--lib/spack/spack/test/web.py162
-rw-r--r--lib/spack/spack/url.py516
-rw-r--r--lib/spack/spack/util/crypto.py8
-rw-r--r--lib/spack/spack/util/executable.py10
-rw-r--r--lib/spack/spack/util/multiproc.py3
-rw-r--r--lib/spack/spack/util/naming.py47
-rw-r--r--lib/spack/spack/util/pattern.py45
-rw-r--r--lib/spack/spack/util/prefix.py12
-rw-r--r--lib/spack/spack/util/spack_json.py38
-rw-r--r--lib/spack/spack/util/spack_yaml.py16
-rw-r--r--lib/spack/spack/util/web.py131
-rw-r--r--lib/spack/spack/version.py89
146 files changed, 10302 insertions, 2040 deletions
diff --git a/lib/spack/docs/conf.py b/lib/spack/docs/conf.py
index db8d3d29dc..69ec2a0b33 100644
--- a/lib/spack/docs/conf.py
+++ b/lib/spack/docs/conf.py
@@ -51,6 +51,10 @@ from sphinx.apidoc import main as sphinx_apidoc
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('exts'))
sys.path.insert(0, os.path.abspath('../external'))
+if sys.version_info[0] < 3:
+ sys.path.insert(0, os.path.abspath('../external/yaml/lib'))
+else:
+ sys.path.insert(0, os.path.abspath('../external/yaml/lib3'))
sys.path.append(os.path.abspath('..'))
# Add the Spack bin directory to the path so that we can use its output in docs.
@@ -110,13 +114,13 @@ handling_spack = False
for line in fileinput.input('spack.rst', inplace=1):
if handling_spack:
if not line.startswith(' :noindex:'):
- print ' :noindex: %s' % ' '.join(spack.__all__)
+ print(' :noindex: %s' % ' '.join(spack.__all__))
handling_spack = False
if line.startswith('.. automodule::'):
handling_spack = (line == '.. automodule:: spack\n')
- print line,
+ sys.stdout.write(line)
# Enable todo items
todo_include_todos = True
diff --git a/lib/spack/docs/contribution_guide.rst b/lib/spack/docs/contribution_guide.rst
index e9cfe1fa54..a3b3197181 100644
--- a/lib/spack/docs/contribution_guide.rst
+++ b/lib/spack/docs/contribution_guide.rst
@@ -40,7 +40,7 @@ for the results of these tests after submitting a PR, we recommend that you run
locally to speed up the review process.
If you take a look in ``$SPACK_ROOT/.travis.yml``, you'll notice that we test
-against Python 2.6 and 2.7. We currently perform 3 types of tests:
+against Python 2.6, 2.7, and 3.3-3.6. We currently perform 3 types of tests:
^^^^^^^^^^
Unit Tests
diff --git a/lib/spack/docs/developer_guide.rst b/lib/spack/docs/developer_guide.rst
index 0ce4029950..ea8d50c6ca 100644
--- a/lib/spack/docs/developer_guide.rst
+++ b/lib/spack/docs/developer_guide.rst
@@ -447,16 +447,16 @@ the string that it detected to be the name and version. The
``--incorrect-name`` and ``--incorrect-version`` flags can be used to
print URLs that were not being parsed correctly.
-""""""""""""""""""
-``spack url test``
-""""""""""""""""""
+"""""""""""""""""""""
+``spack url summary``
+"""""""""""""""""""""
This command attempts to parse every URL for every package in Spack
and prints a summary of how many of them are being correctly parsed.
It also prints a histogram showing which regular expressions are being
matched and how frequently:
-.. command-output:: spack url test
+.. command-output:: spack url summary
This command is essential for anyone adding or changing the regular
expressions that parse names and versions. By running this command
diff --git a/lib/spack/docs/getting_started.rst b/lib/spack/docs/getting_started.rst
index 3c2610beb0..971d42cea0 100644
--- a/lib/spack/docs/getting_started.rst
+++ b/lib/spack/docs/getting_started.rst
@@ -11,7 +11,7 @@ Prerequisites
Spack has the following minimum requirements, which must be installed
before Spack is run:
-1. Python 2.6 or 2.7
+1. Python 2 (2.6 or 2.7) or 3 (3.3 - 3.6)
2. A C/C++ compiler
3. The ``git`` and ``curl`` commands.
@@ -774,7 +774,7 @@ This problem is related to OpenSSL, and in some cases might be solved
by installing a new version of ``git`` and ``openssl``:
#. Run ``spack install git``
-#. Add the output of ``spack module loads git`` to your ``.bahsrc``.
+#. Add the output of ``spack module loads git`` to your ``.bashrc``.
If this doesn't work, it is also possible to disable checking of SSL
certificates by using:
diff --git a/lib/spack/docs/index.rst b/lib/spack/docs/index.rst
index 4dffe6f091..2e99e96a3e 100644
--- a/lib/spack/docs/index.rst
+++ b/lib/spack/docs/index.rst
@@ -4,9 +4,13 @@
contain the root `toctree` directive.
===================
-Spack Documentation
+Spack
===================
+.. epigraph::
+
+ `These are docs for the Spack package manager. For sphere packing, see` `pyspack <https://pyspack.readthedocs.io>`_.
+
Spack is a package management tool designed to support multiple
versions and configurations of software on a wide variety of platforms
and environments. It was designed for large supercomputing centers,
diff --git a/lib/spack/docs/packaging_guide.rst b/lib/spack/docs/packaging_guide.rst
index 729ea5d656..18541179b2 100644
--- a/lib/spack/docs/packaging_guide.rst
+++ b/lib/spack/docs/packaging_guide.rst
@@ -1560,6 +1560,28 @@ Python's ``setup_dependent_environment`` method also sets up some
other variables, creates a directory, and sets up the ``PYTHONPATH``
so that dependent packages can find their dependencies at build time.
+.. _packaging_conflicts:
+
+---------
+Conflicts
+---------
+
+Sometimes packages have known bugs, or limitations, that would prevent them
+to build e.g. against other dependencies or with certain compilers. Spack
+makes it possible to express such constraints with the ``conflicts`` directive.
+
+Adding the following to a package:
+
+.. code-block:: python
+
+ conflicts('%intel', when='@1.2')
+
+we express the fact that the current package *cannot be built* with the Intel
+compiler when we are trying to install version "1.2". The ``when`` argument can
+be omitted, in which case the conflict will always be active.
+Conflicts are always evaluated after the concretization step has been performed,
+and if any match is found a detailed error message is shown to the user.
+
.. _packaging_extensions:
----------
@@ -2043,6 +2065,10 @@ The classes that are currently provided by Spack are:
| :py:class:`.PythonPackage` | Specialized class for |
| | :py:class:`.Python` extensions |
+------------------------------------+----------------------------------+
+ | :py:class:`.PerlPackage` | Specialized class for |
+ | | :py:class:`.Perl` extensions |
+ +------------------------------------+----------------------------------+
+
diff --git a/lib/spack/external/_pytest/pytester.py b/lib/spack/external/_pytest/pytester.py
index 17ff529a6c..d87c0a762a 100644
--- a/lib/spack/external/_pytest/pytester.py
+++ b/lib/spack/external/_pytest/pytester.py
@@ -551,7 +551,7 @@ class Testdir:
def _possibly_invalidate_import_caches(self):
# invalidate caches if we can (py33 and above)
try:
- import importlib
+ import importlib # nopyqver
except ImportError:
pass
else:
diff --git a/lib/spack/external/functools_backport.py b/lib/spack/external/functools_backport.py
index 19f0903c82..b3c913ffd7 100644
--- a/lib/spack/external/functools_backport.py
+++ b/lib/spack/external/functools_backport.py
@@ -28,3 +28,20 @@ def total_ordering(cls):
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
+
+
+@total_ordering
+class reverse_order(object):
+ """Helper for creating key functions.
+
+ This is a wrapper that inverts the sense of the natural
+ comparisons on the object.
+ """
+ def __init__(self, value):
+ self.value = value
+
+ def __eq__(self, other):
+ return other.value == self.value
+
+ def __lt__(self, other):
+ return other.value < self.value
diff --git a/lib/spack/external/ordereddict_backport.py b/lib/spack/external/ordereddict_backport.py
index 8ddad1477e..154e5d1872 100644
--- a/lib/spack/external/ordereddict_backport.py
+++ b/lib/spack/external/ordereddict_backport.py
@@ -8,7 +8,13 @@
try:
from thread import get_ident as _get_ident
except ImportError:
- from dummy_thread import get_ident as _get_ident
+ try:
+ from dummy_thread import get_ident as _get_ident
+ except ImportError:
+ try:
+ from _dummy_thread import get_ident as _get_ident
+ except ImportError:
+ from threading import get_ident as _get_ident # nopyqver
try:
from _abcoll import KeysView, ValuesView, ItemsView
diff --git a/lib/spack/external/pyqver2.py b/lib/spack/external/pyqver2.py
index 571e005524..07b191425b 100755
--- a/lib/spack/external/pyqver2.py
+++ b/lib/spack/external/pyqver2.py
@@ -57,11 +57,7 @@ StandardModules = {
"hmac": (2, 2),
"hotshot": (2, 2),
"HTMLParser": (2, 2),
-# skip importlib until we can conditionally skip for pytest.
-# pytest tries to import this and catches the exception, but
-# the test will still fail.
-# TODO: can we excelude with a comment like '# flake: noqa?'
-# "importlib": (2, 7),
+ "importlib": (2, 7),
"inspect": (2, 1),
"io": (2, 6),
"itertools": (2, 3),
@@ -262,7 +258,7 @@ class NodeChecker(object):
self.add(node, (2,2), "yield expression")
self.default(node)
-def get_versions(source):
+def get_versions(source, filename=None):
"""Return information about the Python versions required for specific features.
The return value is a dictionary with keys as a version number as a tuple
@@ -346,65 +342,3 @@ def qver(source):
#(2, 6)
"""
return max(get_versions(source).keys())
-
-
-if __name__ == '__main__':
-
- Verbose = False
- MinVersion = (2, 3)
- Lint = False
-
- files = []
- i = 1
- while i < len(sys.argv):
- a = sys.argv[i]
- if a == "--test":
- import doctest
- doctest.testmod()
- sys.exit(0)
- if a == "-v" or a == "--verbose":
- Verbose = True
- elif a == "-l" or a == "--lint":
- Lint = True
- elif a == "-m" or a == "--min-version":
- i += 1
- MinVersion = tuple(map(int, sys.argv[i].split(".")))
- else:
- files.append(a)
- i += 1
-
- if not files:
- print >>sys.stderr, """Usage: %s [options] source ...
-
- Report minimum Python version required to run given source files.
-
- -m x.y or --min-version x.y (default 2.3)
- report version triggers at or above version x.y in verbose mode
- -v or --verbose
- print more detailed report of version triggers for each version
- """ % sys.argv[0]
- sys.exit(1)
-
- for fn in files:
- try:
- f = open(fn)
- source = f.read()
- f.close()
- ver = get_versions(source)
- if Verbose:
- print fn
- for v in sorted([k for k in ver.keys() if k >= MinVersion], reverse=True):
- reasons = [x for x in uniq(ver[v]) if x]
- if reasons:
- # each reason is (lineno, message)
- print "\t%s\t%s" % (".".join(map(str, v)), ", ".join([x[1] for x in reasons]))
- elif Lint:
- for v in sorted([k for k in ver.keys() if k >= MinVersion], reverse=True):
- reasons = [x for x in uniq(ver[v]) if x]
- for r in reasons:
- # each reason is (lineno, message)
- print "%s:%s: %s %s" % (fn, r[0], ".".join(map(str, v)), r[1])
- else:
- print "%s\t%s" % (".".join(map(str, max(ver.keys()))), fn)
- except SyntaxError, x:
- print "%s: syntax error compiling with Python %s: %s" % (fn, platform.python_version(), x)
diff --git a/lib/spack/external/pyqver3.py b/lib/spack/external/pyqver3.py
new file mode 100755
index 0000000000..b63576a064
--- /dev/null
+++ b/lib/spack/external/pyqver3.py
@@ -0,0 +1,248 @@
+#!/usr/bin/env python3
+#
+# pyqver3.py
+# by Greg Hewgill
+# https://github.com/ghewgill/pyqver
+#
+# This software is provided 'as-is', without any express or implied
+# warranty. In no event will the author be held liable for any damages
+# arising from the use of this software.
+#
+# Permission is granted to anyone to use this software for any purpose,
+# including commercial applications, and to alter it and redistribute it
+# freely, subject to the following restrictions:
+#
+# 1. The origin of this software must not be misrepresented; you must not
+# claim that you wrote the original software. If you use this software
+# in a product, an acknowledgment in the product documentation would be
+# appreciated but is not required.
+# 2. Altered source versions must be plainly marked as such, and must not be
+# misrepresented as being the original software.
+# 3. This notice may not be removed or altered from any source distribution.
+#
+# Copyright (c) 2009-2013 Greg Hewgill http://hewgill.com
+#
+import ast
+import platform
+import sys
+
+StandardModules = {
+# skip argparse now that it's in lib/spack/external
+# "argparse": (3, 2),
+ "faulthandler": (3, 3),
+ "importlib": (3, 1),
+ "ipaddress": (3, 3),
+ "lzma": (3, 3),
+ "tkinter.ttk": (3, 1),
+ "unittest.mock": (3, 3),
+ "venv": (3, 3),
+}
+
+Functions = {
+ "bytearray.maketrans": (3, 1),
+ "bytes.maketrans": (3, 1),
+ "bz2.open": (3, 3),
+ "collections.Counter": (3, 1),
+ "collections.OrderedDict": (3, 1),
+ "crypt.mksalt": (3, 3),
+ "email.generator.BytesGenerator": (3, 2),
+ "email.message_from_binary_file": (3, 2),
+ "email.message_from_bytes": (3, 2),
+ "functools.lru_cache": (3, 2),
+ "gzip.compress": (3, 2),
+ "gzip.decompress": (3, 2),
+ "inspect.getclosurevars": (3, 3),
+ "inspect.getgeneratorlocals": (3, 3),
+ "inspect.getgeneratorstate": (3, 2),
+ "itertools.combinations_with_replacement": (3, 1),
+ "itertools.compress": (3, 1),
+ "logging.config.dictConfig": (3, 2),
+ "logging.NullHandler": (3, 1),
+ "math.erf": (3, 2),
+ "math.erfc": (3, 2),
+ "math.expm1": (3, 2),
+ "math.gamma": (3, 2),
+ "math.isfinite": (3, 2),
+ "math.lgamma": (3, 2),
+ "math.log2": (3, 3),
+ "os.environb": (3, 2),
+ "os.fsdecode": (3, 2),
+ "os.fsencode": (3, 2),
+ "os.fwalk": (3, 3),
+ "os.getenvb": (3, 2),
+ "os.get_exec_path": (3, 2),
+ "os.getgrouplist": (3, 3),
+ "os.getpriority": (3, 3),
+ "os.getresgid": (3, 2),
+ "os.getresuid": (3, 2),
+ "os.get_terminal_size": (3, 3),
+ "os.getxattr": (3, 3),
+ "os.initgroups": (3, 2),
+ "os.listxattr": (3, 3),
+ "os.lockf": (3, 3),
+ "os.pipe2": (3, 3),
+ "os.posix_fadvise": (3, 3),
+ "os.posix_fallocate": (3, 3),
+ "os.pread": (3, 3),
+ "os.pwrite": (3, 3),
+ "os.readv": (3, 3),
+ "os.removexattr": (3, 3),
+ "os.replace": (3, 3),
+ "os.sched_get_priority_max": (3, 3),
+ "os.sched_get_priority_min": (3, 3),
+ "os.sched_getaffinity": (3, 3),
+ "os.sched_getparam": (3, 3),
+ "os.sched_getscheduler": (3, 3),
+ "os.sched_rr_get_interval": (3, 3),
+ "os.sched_setaffinity": (3, 3),
+ "os.sched_setparam": (3, 3),
+ "os.sched_setscheduler": (3, 3),
+ "os.sched_yield": (3, 3),
+ "os.sendfile": (3, 3),
+ "os.setpriority": (3, 3),
+ "os.setresgid": (3, 2),
+ "os.setresuid": (3, 2),
+ "os.setxattr": (3, 3),
+ "os.sync": (3, 3),
+ "os.truncate": (3, 3),
+ "os.waitid": (3, 3),
+ "os.writev": (3, 3),
+ "shutil.chown": (3, 3),
+ "shutil.disk_usage": (3, 3),
+ "shutil.get_archive_formats": (3, 3),
+ "shutil.get_terminal_size": (3, 3),
+ "shutil.get_unpack_formats": (3, 3),
+ "shutil.make_archive": (3, 3),
+ "shutil.register_archive_format": (3, 3),
+ "shutil.register_unpack_format": (3, 3),
+ "shutil.unpack_archive": (3, 3),
+ "shutil.unregister_archive_format": (3, 3),
+ "shutil.unregister_unpack_format": (3, 3),
+ "shutil.which": (3, 3),
+ "signal.pthread_kill": (3, 3),
+ "signal.pthread_sigmask": (3, 3),
+ "signal.sigpending": (3, 3),
+ "signal.sigtimedwait": (3, 3),
+ "signal.sigwait": (3, 3),
+ "signal.sigwaitinfo": (3, 3),
+ "socket.CMSG_LEN": (3, 3),
+ "socket.CMSG_SPACE": (3, 3),
+ "socket.fromshare": (3, 3),
+ "socket.if_indextoname": (3, 3),
+ "socket.if_nameindex": (3, 3),
+ "socket.if_nametoindex": (3, 3),
+ "socket.sethostname": (3, 3),
+ "ssl.match_hostname": (3, 2),
+ "ssl.RAND_bytes": (3, 3),
+ "ssl.RAND_pseudo_bytes": (3, 3),
+ "ssl.SSLContext": (3, 2),
+ "ssl.SSLEOFError": (3, 3),
+ "ssl.SSLSyscallError": (3, 3),
+ "ssl.SSLWantReadError": (3, 3),
+ "ssl.SSLWantWriteError": (3, 3),
+ "ssl.SSLZeroReturnError": (3, 3),
+ "stat.filemode": (3, 3),
+ "textwrap.indent": (3, 3),
+ "threading.get_ident": (3, 3),
+ "time.clock_getres": (3, 3),
+ "time.clock_gettime": (3, 3),
+ "time.clock_settime": (3, 3),
+ "time.get_clock_info": (3, 3),
+ "time.monotonic": (3, 3),
+ "time.perf_counter": (3, 3),
+ "time.process_time": (3, 3),
+ "types.new_class": (3, 3),
+ "types.prepare_class": (3, 3),
+}
+
+def uniq(a):
+ if len(a) == 0:
+ return []
+ else:
+ return [a[0]] + uniq([x for x in a if x != a[0]])
+
+class NodeChecker(ast.NodeVisitor):
+ def __init__(self):
+ self.vers = dict()
+ self.vers[(3,0)] = []
+ def add(self, node, ver, msg):
+ if ver not in self.vers:
+ self.vers[ver] = []
+ self.vers[ver].append((node.lineno, msg))
+ def visit_Call(self, node):
+ def rollup(n):
+ if isinstance(n, ast.Name):
+ return n.id
+ elif isinstance(n, ast.Attribute):
+ r = rollup(n.value)
+ if r:
+ return r + "." + n.attr
+ name = rollup(node.func)
+ if name:
+ v = Functions.get(name)
+ if v is not None:
+ self.add(node, v, name)
+ self.generic_visit(node)
+ def visit_Import(self, node):
+ for n in node.names:
+ v = StandardModules.get(n.name)
+ if v is not None:
+ self.add(node, v, n.name)
+ self.generic_visit(node)
+ def visit_ImportFrom(self, node):
+ v = StandardModules.get(node.module)
+ if v is not None:
+ self.add(node, v, node.module)
+ for n in node.names:
+ name = node.module + "." + n.name
+ v = Functions.get(name)
+ if v is not None:
+ self.add(node, v, name)
+ def visit_Raise(self, node):
+ if isinstance(node.cause, ast.Name) and node.cause.id == "None":
+ self.add(node, (3,3), "raise ... from None")
+ def visit_YieldFrom(self, node):
+ self.add(node, (3,3), "yield from")
+
+def get_versions(source, filename=None):
+ """Return information about the Python versions required for specific features.
+
+ The return value is a dictionary with keys as a version number as a tuple
+ (for example Python 3.1 is (3,1)) and the value are a list of features that
+ require the indicated Python version.
+ """
+ tree = ast.parse(source, filename=filename)
+ checker = NodeChecker()
+ checker.visit(tree)
+ return checker.vers
+
+def v33(source):
+ if sys.version_info >= (3, 3):
+ return qver(source)
+ else:
+ print("Not all features tested, run --test with Python 3.3", file=sys.stderr)
+ return (3, 3)
+
+def qver(source):
+ """Return the minimum Python version required to run a particular bit of code.
+
+ >>> qver('print("hello world")')
+ (3, 0)
+ >>> qver("import importlib")
+ (3, 1)
+ >>> qver("from importlib import x")
+ (3, 1)
+ >>> qver("import tkinter.ttk")
+ (3, 1)
+ >>> qver("from collections import Counter")
+ (3, 1)
+ >>> qver("collections.OrderedDict()")
+ (3, 1)
+ >>> qver("import functools\\n@functools.lru_cache()\\ndef f(x): x*x")
+ (3, 2)
+ >>> v33("yield from x")
+ (3, 3)
+ >>> v33("raise x from None")
+ (3, 3)
+ """
+ return max(get_versions(source).keys())
diff --git a/lib/spack/external/six.py b/lib/spack/external/six.py
new file mode 100644
index 0000000000..5293325821
--- /dev/null
+++ b/lib/spack/external/six.py
@@ -0,0 +1,886 @@
+# Copyright (c) 2010-2017 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.10.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("getstatusoutput", "commands", "subprocess"),
+ MovedAttribute("getoutput", "commands", "subprocess"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("splitvalue", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ try:
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+ finally:
+ value = None
+ tb = None
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ try:
+ raise tp, value, tb
+ finally:
+ tb = None
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+ exec_("""def raise_from(value, from_value):
+ try:
+ if from_value is None:
+ raise value
+ raise value from from_value
+ finally:
+ value = None
+""")
+elif sys.version_info[:2] > (3, 2):
+ exec_("""def raise_from(value, from_value):
+ try:
+ raise value from from_value
+ finally:
+ value = None
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ def wrapper(f):
+ f = functools.wraps(wrapped, assigned, updated)(f)
+ f.__wrapped__ = wrapped
+ return f
+ return wrapper
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(meta):
+
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/lib/spack/external/yaml/README b/lib/spack/external/yaml/README
index c1edf13870..d186328eeb 100644
--- a/lib/spack/external/yaml/README
+++ b/lib/spack/external/yaml/README
@@ -28,7 +28,7 @@ Post your questions and opinions to the YAML-Core mailing list:
'http://lists.sourceforge.net/lists/listinfo/yaml-core'.
Submit bug reports and feature requests to the PyYAML bug tracker:
-'http://pyyaml.org/newticket?component=pyyaml'.
+'https://bitbucket.org/xi/pyyaml/issues/new'.
PyYAML is written by Kirill Simonov <xi@resolvent.net>. It is released
under the MIT license. See the file LICENSE for more details.
diff --git a/lib/spack/external/yaml/__init__.py b/lib/spack/external/yaml/lib/yaml/__init__.py
index f977f46ba7..87c15d38aa 100644
--- a/lib/spack/external/yaml/__init__.py
+++ b/lib/spack/external/yaml/lib/yaml/__init__.py
@@ -8,7 +8,7 @@ from nodes import *
from loader import *
from dumper import *
-__version__ = '3.10'
+__version__ = '3.12'
try:
from cyaml import *
diff --git a/lib/spack/external/yaml/composer.py b/lib/spack/external/yaml/lib/yaml/composer.py
index 06e5ac782f..06e5ac782f 100644
--- a/lib/spack/external/yaml/composer.py
+++ b/lib/spack/external/yaml/lib/yaml/composer.py
diff --git a/lib/spack/external/yaml/constructor.py b/lib/spack/external/yaml/lib/yaml/constructor.py
index 8c0ec181b2..635faac3e6 100644
--- a/lib/spack/external/yaml/constructor.py
+++ b/lib/spack/external/yaml/lib/yaml/constructor.py
@@ -131,9 +131,6 @@ class BaseConstructor(object):
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
- if key in mapping:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "found already in-use key (%s)" % key, key_node.start_mark)
mapping[key] = value
return mapping
diff --git a/lib/spack/external/yaml/lib/yaml/cyaml.py b/lib/spack/external/yaml/lib/yaml/cyaml.py
new file mode 100644
index 0000000000..68dcd75192
--- /dev/null
+++ b/lib/spack/external/yaml/lib/yaml/cyaml.py
@@ -0,0 +1,85 @@
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from constructor import *
+
+from serializer import *
+from representer import *
+
+from resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/lib/spack/external/yaml/dumper.py b/lib/spack/external/yaml/lib/yaml/dumper.py
index f811d2c919..f811d2c919 100644
--- a/lib/spack/external/yaml/dumper.py
+++ b/lib/spack/external/yaml/lib/yaml/dumper.py
diff --git a/lib/spack/external/yaml/emitter.py b/lib/spack/external/yaml/lib/yaml/emitter.py
index e5bcdcccbb..e5bcdcccbb 100644
--- a/lib/spack/external/yaml/emitter.py
+++ b/lib/spack/external/yaml/lib/yaml/emitter.py
diff --git a/lib/spack/external/yaml/error.py b/lib/spack/external/yaml/lib/yaml/error.py
index 577686db5f..577686db5f 100644
--- a/lib/spack/external/yaml/error.py
+++ b/lib/spack/external/yaml/lib/yaml/error.py
diff --git a/lib/spack/external/yaml/events.py b/lib/spack/external/yaml/lib/yaml/events.py
index f79ad389cb..f79ad389cb 100644
--- a/lib/spack/external/yaml/events.py
+++ b/lib/spack/external/yaml/lib/yaml/events.py
diff --git a/lib/spack/external/yaml/loader.py b/lib/spack/external/yaml/lib/yaml/loader.py
index 293ff467b1..293ff467b1 100644
--- a/lib/spack/external/yaml/loader.py
+++ b/lib/spack/external/yaml/lib/yaml/loader.py
diff --git a/lib/spack/external/yaml/nodes.py b/lib/spack/external/yaml/lib/yaml/nodes.py
index c4f070c41e..c4f070c41e 100644
--- a/lib/spack/external/yaml/nodes.py
+++ b/lib/spack/external/yaml/lib/yaml/nodes.py
diff --git a/lib/spack/external/yaml/parser.py b/lib/spack/external/yaml/lib/yaml/parser.py
index f9e3057f33..f9e3057f33 100644
--- a/lib/spack/external/yaml/parser.py
+++ b/lib/spack/external/yaml/lib/yaml/parser.py
diff --git a/lib/spack/external/yaml/reader.py b/lib/spack/external/yaml/lib/yaml/reader.py
index a67af7c5da..3249e6b9f5 100644
--- a/lib/spack/external/yaml/reader.py
+++ b/lib/spack/external/yaml/lib/yaml/reader.py
@@ -56,7 +56,8 @@ class Reader(object):
# Yeah, it's ugly and slow.
- def __init__(self, stream, name=None):
+ def __init__(self, stream):
+ self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
@@ -69,16 +70,16 @@ class Reader(object):
self.line = 0
self.column = 0
if isinstance(stream, unicode):
- self.name = "<unicode string>" if name is None else name
+ self.name = "<unicode string>"
self.check_printable(stream)
self.buffer = stream+u'\0'
elif isinstance(stream, str):
- self.name = "<string>" if name is None else name
+ self.name = "<string>"
self.raw_buffer = stream
self.determine_encoding()
else:
self.stream = stream
- self.name = getattr(stream, 'name', "<file>") if name is None else name
+ self.name = getattr(stream, 'name', "<file>")
self.eof = False
self.raw_buffer = ''
self.determine_encoding()
diff --git a/lib/spack/external/yaml/representer.py b/lib/spack/external/yaml/lib/yaml/representer.py
index 5f4fc70dbc..4ea8cb1fe1 100644
--- a/lib/spack/external/yaml/representer.py
+++ b/lib/spack/external/yaml/lib/yaml/representer.py
@@ -139,7 +139,9 @@ class BaseRepresenter(object):
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
- if data in [None, ()]:
+ if data is None:
+ return True
+ if isinstance(data, tuple) and data == ():
return True
if isinstance(data, (str, unicode, bool, int, float)):
return True
diff --git a/lib/spack/external/yaml/resolver.py b/lib/spack/external/yaml/lib/yaml/resolver.py
index 6b5ab87596..528fbc0ead 100644
--- a/lib/spack/external/yaml/resolver.py
+++ b/lib/spack/external/yaml/lib/yaml/resolver.py
@@ -24,7 +24,10 @@ class BaseResolver(object):
def add_implicit_resolver(cls, tag, regexp, first):
if not 'yaml_implicit_resolvers' in cls.__dict__:
- cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ implicit_resolvers = {}
+ for key in cls.yaml_implicit_resolvers:
+ implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
+ cls.yaml_implicit_resolvers = implicit_resolvers
if first is None:
first = [None]
for ch in first:
diff --git a/lib/spack/external/yaml/scanner.py b/lib/spack/external/yaml/lib/yaml/scanner.py
index 5228fad65c..834f662a4c 100644
--- a/lib/spack/external/yaml/scanner.py
+++ b/lib/spack/external/yaml/lib/yaml/scanner.py
@@ -286,7 +286,7 @@ class Scanner(object):
or self.index-key.index > 1024:
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
+ "could not find expected ':'", self.get_mark())
del self.possible_simple_keys[level]
def save_possible_simple_key(self):
@@ -297,10 +297,6 @@ class Scanner(object):
# Check if a simple key is required at the current position.
required = not self.flow_level and self.indent == self.column
- # A simple key is required only if it is the first token in the current
- # line. Therefore it is always allowed.
- assert self.allow_simple_key or not required
-
# The next token might be a simple key. Let's save it's number and
# position.
if self.allow_simple_key:
@@ -317,7 +313,7 @@ class Scanner(object):
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
+ "could not find expected ':'", self.get_mark())
del self.possible_simple_keys[self.flow_level]
diff --git a/lib/spack/external/yaml/serializer.py b/lib/spack/external/yaml/lib/yaml/serializer.py
index 0bf1e96dc1..0bf1e96dc1 100644
--- a/lib/spack/external/yaml/serializer.py
+++ b/lib/spack/external/yaml/lib/yaml/serializer.py
diff --git a/lib/spack/external/yaml/tokens.py b/lib/spack/external/yaml/lib/yaml/tokens.py
index 4d0b48a394..4d0b48a394 100644
--- a/lib/spack/external/yaml/tokens.py
+++ b/lib/spack/external/yaml/lib/yaml/tokens.py
diff --git a/lib/spack/external/yaml/lib3/yaml/__init__.py b/lib/spack/external/yaml/lib3/yaml/__init__.py
new file mode 100644
index 0000000000..d7d27fe63b
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/__init__.py
@@ -0,0 +1,312 @@
+
+from .error import *
+
+from .tokens import *
+from .events import *
+from .nodes import *
+
+from .loader import *
+from .dumper import *
+
+__version__ = '3.12'
+try:
+ from .cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+import io
+
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ stream = io.StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=Loader, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(metaclass=YAMLObjectMetaclass):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = Loader
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+
+ @classmethod
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+
diff --git a/lib/spack/external/yaml/lib3/yaml/composer.py b/lib/spack/external/yaml/lib3/yaml/composer.py
new file mode 100644
index 0000000000..d5c6a7acd9
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/composer.py
@@ -0,0 +1,139 @@
+
+__all__ = ['Composer', 'ComposerError']
+
+from .error import MarkedYAMLError
+from .events import *
+from .nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer:
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor, event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurence"
+ % anchor, self.anchors[anchor].start_mark,
+ "second occurence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/lib/spack/external/yaml/lib3/yaml/constructor.py b/lib/spack/external/yaml/lib3/yaml/constructor.py
new file mode 100644
index 0000000000..981543aebb
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/constructor.py
@@ -0,0 +1,686 @@
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError']
+
+from .error import *
+from .nodes import *
+
+import collections, datetime, base64, binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor:
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = next(generator)
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ if not isinstance(key, collections.Hashable):
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unhashable key", key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ @classmethod
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+
+ @classmethod
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == 'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return super().construct_scalar(node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == 'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == 'tag:yaml.org,2002:value':
+ key_node.tag = 'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return super().construct_mapping(node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ 'yes': True,
+ 'no': False,
+ 'true': True,
+ 'false': False,
+ 'on': True,
+ 'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ r'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag,
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_bytes(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ def construct_python_long(self, node):
+ return self.construct_yaml_int(node)
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ try:
+ __import__(name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name, exc), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if '.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = 'builtins'
+ object_name = name
+ try:
+ __import__(module_name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name, exc), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r"
+ % (object_name, module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(object, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/none',
+ Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/bool',
+ Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/str',
+ Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/unicode',
+ Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/bytes',
+ Constructor.construct_python_bytes)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/int',
+ Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/long',
+ Constructor.construct_python_long)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/float',
+ Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/complex',
+ Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/list',
+ Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/tuple',
+ Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/dict',
+ Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/name:',
+ Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/module:',
+ Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object:',
+ Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/apply:',
+ Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/new:',
+ Constructor.construct_python_object_new)
+
diff --git a/lib/spack/external/yaml/lib3/yaml/cyaml.py b/lib/spack/external/yaml/lib3/yaml/cyaml.py
new file mode 100644
index 0000000000..d5cb87e994
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/cyaml.py
@@ -0,0 +1,85 @@
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from .constructor import *
+
+from .serializer import *
+from .representer import *
+
+from .resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/lib/spack/external/yaml/lib3/yaml/dumper.py b/lib/spack/external/yaml/lib3/yaml/dumper.py
new file mode 100644
index 0000000000..0b69128771
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/dumper.py
@@ -0,0 +1,62 @@
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from .emitter import *
+from .serializer import *
+from .representer import *
+from .resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/lib/spack/external/yaml/lib3/yaml/emitter.py b/lib/spack/external/yaml/lib3/yaml/emitter.py
new file mode 100644
index 0000000000..34cb145a5f
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/emitter.py
@@ -0,0 +1,1137 @@
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from .error import YAMLError
+from .events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis:
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter:
+
+ DEFAULT_TAG_PREFIXES = {
+ '!' : '!',
+ 'tag:yaml.org,2002:' : '!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = '\n'
+ if line_break in ['\r', '\n', '\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not hasattr(self.stream, 'encoding'):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = sorted(self.event.tags.keys())
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator('---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor('&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor('*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator('[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator('{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator('-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == '')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = '!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return '%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != '!' or handle[-1] != '!':
+ raise EmitterError("tag handle must start and end with '!': %r" % handle)
+ for ch in handle[1:-1]:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch, handle))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == '!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return ''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == '!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = sorted(self.tag_prefixes.keys())
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == '!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == '!' and handle != '!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = ''.join(chunks)
+ if handle:
+ return '%s%s' % (handle, suffix_text)
+ else:
+ return '!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch, anchor))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith('---') or scalar.startswith('...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in '#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in '?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in ',?[]{}':
+ flow_indicators = True
+ if ch == ':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in '\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
+ if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == ' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in '\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write('\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = ' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = ' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = '%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = '%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator('\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != ' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == '\'':
+ data = '\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator('\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ '\0': '0',
+ '\x07': 'a',
+ '\x08': 'b',
+ '\x09': 't',
+ '\x0A': 'n',
+ '\x0B': 'v',
+ '\x0C': 'f',
+ '\x0D': 'r',
+ '\x1B': 'e',
+ '\"': '\"',
+ '\\': '\\',
+ '\x85': 'N',
+ '\xA0': '_',
+ '\u2028': 'L',
+ '\u2029': 'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator('"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
+ or not ('\x20' <= ch <= '\x7E'
+ or (self.allow_unicode
+ and ('\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= '\xFF':
+ data = '\\x%02X' % ord(ch)
+ elif ch <= '\uFFFF':
+ data = '\\u%04X' % ord(ch)
+ else:
+ data = '\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == ' ':
+ data = '\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator('"', False)
+
+ def determine_block_hints(self, text):
+ hints = ''
+ if text:
+ if text[0] in ' \n\x85\u2028\u2029':
+ hints += str(self.best_indent)
+ if text[-1] not in '\n\x85\u2028\u2029':
+ hints += '-'
+ elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
+ hints += '+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('>'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != ' ' \
+ and text[start] == '\n':
+ self.write_line_break()
+ leading_space = (ch == ' ')
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ spaces = (ch == ' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('|'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in '\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = ' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+
diff --git a/lib/spack/external/yaml/lib3/yaml/error.py b/lib/spack/external/yaml/lib3/yaml/error.py
new file mode 100644
index 0000000000..b796b4dc51
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/error.py
@@ -0,0 +1,75 @@
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark:
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end]
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/lib/spack/external/yaml/lib3/yaml/events.py b/lib/spack/external/yaml/lib3/yaml/events.py
new file mode 100644
index 0000000000..f79ad389cb
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/events.py
@@ -0,0 +1,86 @@
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/lib/spack/external/yaml/lib3/yaml/loader.py b/lib/spack/external/yaml/lib3/yaml/loader.py
new file mode 100644
index 0000000000..08c8f01b34
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/loader.py
@@ -0,0 +1,40 @@
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from .reader import *
+from .scanner import *
+from .parser import *
+from .composer import *
+from .constructor import *
+from .resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
diff --git a/lib/spack/external/yaml/lib3/yaml/nodes.py b/lib/spack/external/yaml/lib3/yaml/nodes.py
new file mode 100644
index 0000000000..c4f070c41e
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/nodes.py
@@ -0,0 +1,49 @@
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/lib/spack/external/yaml/lib3/yaml/parser.py b/lib/spack/external/yaml/lib3/yaml/parser.py
new file mode 100644
index 0000000000..13a5995d29
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/parser.py
@@ -0,0 +1,589 @@
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+from .events import *
+from .scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser:
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ '!': '!',
+ '!!': 'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == 'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == 'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle,
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle,
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == '!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == '!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == '!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), '',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), '', mark, mark)
+
diff --git a/lib/spack/external/yaml/lib3/yaml/reader.py b/lib/spack/external/yaml/lib3/yaml/reader.py
new file mode 100644
index 0000000000..f70e920f44
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/reader.py
@@ -0,0 +1,192 @@
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from .error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, bytes):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to a unicode string,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `bytes` object,
+ # - a `str` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = ''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, str):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+'\0'
+ elif isinstance(stream, bytes):
+ self.name = "<byte string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = None
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in '\n\x85\u2028\u2029' \
+ or (ch == '\r' and self.buffer[self.pointer] != '\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != '\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+ self.update_raw()
+ if isinstance(self.raw_buffer, bytes):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError as exc:
+ character = self.raw_buffer[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += '\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=4096):
+ data = self.stream.read(size)
+ if self.raw_buffer is None:
+ self.raw_buffer = data
+ else:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ if not data:
+ self.eof = True
+
+#try:
+# import psyco
+# psyco.bind(Reader)
+#except ImportError:
+# pass
+
diff --git a/lib/spack/external/yaml/lib3/yaml/representer.py b/lib/spack/external/yaml/lib3/yaml/representer.py
new file mode 100644
index 0000000000..b9e65c5109
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/representer.py
@@ -0,0 +1,387 @@
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from .error import *
+from .nodes import *
+
+import datetime, sys, copyreg, types, base64, collections
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter:
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=None):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, str(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ @classmethod
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+
+ @classmethod
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = list(mapping.items())
+ try:
+ mapping = sorted(mapping)
+ except TypeError:
+ pass
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data is None:
+ return True
+ if isinstance(data, tuple) and data == ():
+ return True
+ if isinstance(data, (str, bytes, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:null', 'null')
+
+ def represent_str(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:str', data)
+
+ def represent_binary(self, data):
+ if hasattr(base64, 'encodebytes'):
+ data = base64.encodebytes(data).decode('ascii')
+ else:
+ data = base64.encodestring(data).decode('ascii')
+ return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
+
+ def represent_bool(self, data):
+ if data:
+ value = 'true'
+ else:
+ value = 'false'
+ return self.represent_scalar('tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:int', str(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = '.nan'
+ elif data == self.inf_value:
+ value = '.inf'
+ elif data == -self.inf_value:
+ value = '-.inf'
+ else:
+ value = repr(data).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if '.' not in value and 'e' in value:
+ value = value.replace('e', '.0e', 1)
+ return self.represent_scalar('tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence('tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping('tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping('tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = data.isoformat()
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = data.isoformat(' ')
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(bytes,
+ SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = '%r' % data.real
+ elif data.real == 0.0:
+ data = '%rj' % data.imag
+ elif data.imag > 0:
+ data = '%r+%rj' % (data.real, data.imag)
+ else:
+ data = '%r%rj' % (data.real, data.imag)
+ return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = '%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ 'tag:yaml.org,2002:python/module:'+data.__name__, '')
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copyreg.dispatch_table:
+ reduce = copyreg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent object: %r" % data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = 'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = 'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = '%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ 'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+ def represent_ordered_dict(self, data):
+ # Provide uniform representation across different Python versions.
+ data_type = type(data)
+ tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \
+ % (data_type.__module__, data_type.__name__)
+ items = [[key, value] for key, value in data.items()]
+ return self.represent_sequence(tag, [items])
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(collections.OrderedDict,
+ Representer.represent_ordered_dict)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/lib/spack/external/yaml/lib3/yaml/resolver.py b/lib/spack/external/yaml/lib3/yaml/resolver.py
new file mode 100644
index 0000000000..02b82e73ee
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/resolver.py
@@ -0,0 +1,227 @@
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from .error import *
+from .nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver:
+
+ DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ @classmethod
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ implicit_resolvers = {}
+ for key in cls.yaml_implicit_resolvers:
+ implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
+ cls.yaml_implicit_resolvers = implicit_resolvers
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+
+ @classmethod
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, str) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (str, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, str):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, str):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == '':
+ resolvers = self.yaml_implicit_resolvers.get('', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:bool',
+ re.compile(r'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list('yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:float',
+ re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list('-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:int',
+ re.compile(r'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list('-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:merge',
+ re.compile(r'^(?:<<)$'),
+ ['<'])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:null',
+ re.compile(r'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ ['~', 'n', 'N', ''])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:timestamp',
+ re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list('0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:value',
+ re.compile(r'^(?:=)$'),
+ ['='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:yaml',
+ re.compile(r'^(?:!|&|\*)$'),
+ list('!&*'))
+
diff --git a/lib/spack/external/yaml/lib3/yaml/scanner.py b/lib/spack/external/yaml/lib3/yaml/scanner.py
new file mode 100644
index 0000000000..c8d127b8ec
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/scanner.py
@@ -0,0 +1,1444 @@
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey:
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner:
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == '\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == '%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == '-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == '.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == '\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == '[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == '{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == ']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == '}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == ',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == '-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == '?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == ':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == '*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == '&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == '!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == '|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == '>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == '\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == '\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token" % ch,
+ self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in list(self.possible_simple_keys):
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not find expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not find expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be catched by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '---' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '...' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
+ and (ch == '-' or (not self.flow_level and ch in '?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == '\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == 'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == 'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r" % self.peek(),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not ('0' <= ch <= '9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch, self.get_mark())
+ length = 0
+ while '0' <= self.peek(length) <= '9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == ' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != ' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch, self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == '*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == '<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != '>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ elif ch in '\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = '!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in '\0 \r\n\x85\u2028\u2029':
+ if ch == '!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = '!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = '!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = ''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != '\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in ' \t'
+ length = 0
+ while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != '\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == '\n' \
+ and leading_non_space and self.peek() not in ' \t':
+ if not breaks:
+ chunks.append(' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == '\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch, self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r" % ch,
+ self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() != ' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ while self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ '0': '\0',
+ 'a': '\x07',
+ 'b': '\x08',
+ 't': '\x09',
+ '\t': '\x09',
+ 'n': '\x0A',
+ 'v': '\x0B',
+ 'f': '\x0C',
+ 'r': '\x0D',
+ 'e': '\x1B',
+ ' ': '\x20',
+ '\"': '\"',
+ '\\': '\\',
+ 'N': '\x85',
+ '_': '\xA0',
+ 'L': '\u2028',
+ 'P': '\u2029',
+ }
+
+ ESCAPE_CODES = {
+ 'x': 2,
+ 'u': 4,
+ 'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == '\'' and self.peek(1) == '\'':
+ chunks.append('\'')
+ self.forward(2)
+ elif (double and ch == '\'') or (not double and ch in '\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == '\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k)), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(chr(code))
+ self.forward(length)
+ elif ch in '\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch, self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in ' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == '\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in ' \t':
+ self.forward()
+ if self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ':' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == '#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in '\0 \t\r\n\x85\u2028\u2029' \
+ or (not self.flow_level and ch == ':' and
+ self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
+ or (self.flow_level and ch in ',:?[]{}'):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (self.flow_level and ch == ':'
+ and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ self.forward(length)
+ raise ScannerError("while scanning a plain scalar", start_mark,
+ "found unexpected ':'", self.get_mark(),
+ "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == '#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in ' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != '!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != ' ':
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != '!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == '%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch, self.get_mark())
+ return ''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ codes = []
+ mark = self.get_mark()
+ while self.peek() == '%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
+ % self.peek(k), self.get_mark())
+ codes.append(int(self.prefix(2), 16))
+ self.forward(2)
+ try:
+ value = bytes(codes).decode('utf-8')
+ except UnicodeDecodeError as exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in '\r\n\x85':
+ if self.prefix(2) == '\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.forward()
+ return ch
+ return ''
+
+#try:
+# import psyco
+# psyco.bind(Scanner)
+#except ImportError:
+# pass
+
diff --git a/lib/spack/external/yaml/lib3/yaml/serializer.py b/lib/spack/external/yaml/lib3/yaml/serializer.py
new file mode 100644
index 0000000000..fe911e67ae
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/serializer.py
@@ -0,0 +1,111 @@
+
+__all__ = ['Serializer', 'SerializerError']
+
+from .error import YAMLError
+from .events import *
+from .nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer:
+
+ ANCHOR_TEMPLATE = 'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/lib/spack/external/yaml/lib3/yaml/tokens.py b/lib/spack/external/yaml/lib3/yaml/tokens.py
new file mode 100644
index 0000000000..4d0b48a394
--- /dev/null
+++ b/lib/spack/external/yaml/lib3/yaml/tokens.py
@@ -0,0 +1,104 @@
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/lib/spack/llnl/util/filesystem.py b/lib/spack/llnl/util/filesystem.py
index f456a5edf1..8922010e70 100644
--- a/lib/spack/llnl/util/filesystem.py
+++ b/lib/spack/llnl/util/filesystem.py
@@ -175,9 +175,9 @@ def change_sed_delimiter(old_delim, new_delim, *filenames):
def set_install_permissions(path):
"""Set appropriate permissions on the installed file."""
if os.path.isdir(path):
- os.chmod(path, 0755)
+ os.chmod(path, 0o755)
else:
- os.chmod(path, 0644)
+ os.chmod(path, 0o644)
def copy_mode(src, dest):
diff --git a/lib/spack/llnl/util/lang.py b/lib/spack/llnl/util/lang.py
index d9fef42e53..ec4c25fead 100644
--- a/lib/spack/llnl/util/lang.py
+++ b/lib/spack/llnl/util/lang.py
@@ -27,11 +27,18 @@ import re
import functools
import collections
import inspect
+from six import string_types
# Ignore emacs backups when listing modules
ignore_modules = [r'^\.#', '~$']
+class classproperty(property):
+ """classproperty decorator: like property but for classmethods."""
+ def __get__(self, cls, owner):
+ return self.fget.__get__(None, owner)()
+
+
def index_by(objects, *funcs):
"""Create a hierarchy of dictionaries by splitting the supplied
set of objects on unique values of the supplied functions.
@@ -80,7 +87,7 @@ def index_by(objects, *funcs):
return objects
f = funcs[0]
- if isinstance(f, basestring):
+ if isinstance(f, str):
f = lambda x: getattr(x, funcs[0])
elif isinstance(f, tuple):
f = lambda x: tuple(getattr(x, p) for p in funcs[0])
@@ -326,7 +333,7 @@ def match_predicate(*args):
"""
def match(string):
for arg in args:
- if isinstance(arg, basestring):
+ if isinstance(arg, string_types):
if re.search(arg, string):
return True
elif isinstance(arg, list) or isinstance(arg, tuple):
diff --git a/lib/spack/llnl/util/tty/__init__.py b/lib/spack/llnl/util/tty/__init__.py
index f73d96a4e4..f78d889037 100644
--- a/lib/spack/llnl/util/tty/__init__.py
+++ b/lib/spack/llnl/util/tty/__init__.py
@@ -29,7 +29,7 @@ import fcntl
import termios
import struct
import traceback
-from StringIO import StringIO
+from six import StringIO
from llnl.util.tty.color import *
@@ -93,7 +93,7 @@ def msg(message, *args, **kwargs):
else:
cwrite("@*b{%s==>} %s" % (st_text, cescape(message)))
for arg in args:
- print indent + str(arg)
+ print(indent + str(arg))
def info(message, *args, **kwargs):
@@ -201,7 +201,7 @@ def get_yes_or_no(prompt, **kwargs):
if not ans:
result = default_value
if result is None:
- print "Please enter yes or no."
+ print("Please enter yes or no.")
else:
if ans == 'y' or ans == 'yes':
result = True
@@ -239,7 +239,7 @@ def hline(label=None, **kwargs):
out.write(label)
out.write(suffix)
- print out.getvalue()
+ print(out.getvalue())
def terminal_size():
diff --git a/lib/spack/llnl/util/tty/colify.py b/lib/spack/llnl/util/tty/colify.py
index 67acdfa517..83de530ef1 100644
--- a/lib/spack/llnl/util/tty/colify.py
+++ b/lib/spack/llnl/util/tty/colify.py
@@ -25,9 +25,11 @@
"""
Routines for printing columnar output. See colify() for more information.
"""
+from __future__ import division
+
import os
import sys
-from StringIO import StringIO
+from six import StringIO
from llnl.util.tty import terminal_size
from llnl.util.tty.color import clen, cextra
@@ -64,18 +66,18 @@ def config_variable_cols(elts, console_width, padding, cols=0):
# Get a bound on the most columns we could possibly have.
# 'clen' ignores length of ansi color sequences.
lengths = [clen(e) for e in elts]
- max_cols = max(1, console_width / (min(lengths) + padding))
+ max_cols = max(1, console_width // (min(lengths) + padding))
max_cols = min(len(elts), max_cols)
# Range of column counts to try. If forced, use the supplied value.
- col_range = [cols] if cols else xrange(1, max_cols + 1)
+ col_range = [cols] if cols else range(1, max_cols + 1)
# Determine the most columns possible for the console width.
configs = [ColumnConfig(c) for c in col_range]
for i, length in enumerate(lengths):
for conf in configs:
if conf.valid:
- col = i / ((len(elts) + conf.cols - 1) / conf.cols)
+ col = i // ((len(elts) + conf.cols - 1) // conf.cols)
p = padding if col < (conf.cols - 1) else 0
if conf.widths[col] < (length + p):
@@ -107,7 +109,7 @@ def config_uniform_cols(elts, console_width, padding, cols=0):
# 'clen' ignores length of ansi color sequences.
max_len = max(clen(e) for e in elts) + padding
if cols == 0:
- cols = max(1, console_width / max_len)
+ cols = max(1, console_width // max_len)
cols = min(len(elts), cols)
config = ColumnConfig(cols)
@@ -193,12 +195,12 @@ def colify(elts, **options):
raise ValueError("method must be one of: " + allowed_methods)
cols = config.cols
- rows = (len(elts) + cols - 1) / cols
+ rows = (len(elts) + cols - 1) // cols
rows_last_col = len(elts) % rows
- for row in xrange(rows):
+ for row in range(rows):
output.write(" " * indent)
- for col in xrange(cols):
+ for col in range(cols):
elt = col * rows + row
width = config.widths[col] + cextra(elts[elt])
if col < cols - 1:
@@ -233,7 +235,7 @@ def colify_table(table, **options):
columns = len(table[0])
def transpose():
- for i in xrange(columns):
+ for i in range(columns):
for row in table:
yield row[i]
diff --git a/lib/spack/llnl/util/tty/log.py b/lib/spack/llnl/util/tty/log.py
index b1d45214ab..50e07c0b97 100644
--- a/lib/spack/llnl/util/tty/log.py
+++ b/lib/spack/llnl/util/tty/log.py
@@ -165,8 +165,12 @@ class log_output(object):
self.p.join(60.0) # 1 minute to join the child
def _spawn_writing_daemon(self, read, input_stream):
- # Parent: read from child, skip the with block.
- read_file = os.fdopen(read, 'r', 0)
+ # This is the Parent: read from child, skip the with block.
+
+ # Use line buffering (3rd param = 1) since Python 3 has a bug
+ # that prevents unbuffered text I/O.
+ read_file = os.fdopen(read, 'r', 1)
+
with open(self.filename, 'w') as log_file:
with keyboard_input(input_stream):
while True:
diff --git a/lib/spack/spack/__init__.py b/lib/spack/spack/__init__.py
index 6a28fbb2b0..345a804dfe 100644
--- a/lib/spack/spack/__init__.py
+++ b/lib/spack/spack/__init__.py
@@ -78,7 +78,6 @@ import spack.error
import spack.config
import spack.fetch_strategy
from spack.file_cache import FileCache
-from spack.package_prefs import PreferredPackages
from spack.abi import ABI
from spack.concretize import DefaultConcretizer
from spack.version import Version
@@ -96,7 +95,7 @@ spack_version = Version("0.10.0")
try:
repo = spack.repository.RepoPath()
sys.meta_path.append(repo)
-except spack.error.SpackError, e:
+except spack.error.SpackError as e:
tty.die('while initializing Spack RepoPath:', e.message)
@@ -162,6 +161,7 @@ from spack.build_systems.autotools import AutotoolsPackage
from spack.build_systems.cmake import CMakePackage
from spack.build_systems.python import PythonPackage
from spack.build_systems.r import RPackage
+from spack.build_systems.perl import PerlPackage
__all__ += [
'run_before',
@@ -172,7 +172,8 @@ __all__ += [
'AutotoolsPackage',
'MakefilePackage',
'PythonPackage',
- 'RPackage'
+ 'RPackage',
+ 'PerlPackage'
]
from spack.version import Version, ver
diff --git a/lib/spack/spack/architecture.py b/lib/spack/spack/architecture.py
index e44e0dc109..bace3c49f6 100644
--- a/lib/spack/spack/architecture.py
+++ b/lib/spack/spack/architecture.py
@@ -287,7 +287,7 @@ class OperatingSystem(object):
# ensure all the version calls we made are cached in the parent
# process, as well. This speeds up Spack a lot.
- clist = reduce(lambda x, y: x + y, compiler_lists)
+ clist = [comp for cl in compiler_lists for comp in cl]
return clist
def find_compiler(self, cmp_cls, *path):
diff --git a/lib/spack/spack/build_environment.py b/lib/spack/spack/build_environment.py
index 3e6dc12b35..a20a7b4db8 100644
--- a/lib/spack/spack/build_environment.py
+++ b/lib/spack/spack/build_environment.py
@@ -57,6 +57,7 @@ import os
import shutil
import sys
import traceback
+from six import iteritems
import llnl.util.lang as lang
import llnl.util.tty as tty
@@ -310,7 +311,7 @@ def set_build_environment_variables(pkg, env, dirty=False):
environment = compiler.environment
if 'set' in environment:
env_to_set = environment['set']
- for key, value in env_to_set.iteritems():
+ for key, value in iteritems(env_to_set):
env.set('SPACK_ENV_SET_%s' % key, value)
env.set('%s' % key, value)
# Let shell know which variables to set
@@ -322,8 +323,9 @@ def set_build_environment_variables(pkg, env, dirty=False):
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
# Add bin directories from dependencies to the PATH for the build.
- bin_dirs = reversed(filter(os.path.isdir, [
- '%s/bin' % d.prefix for d in pkg.spec.dependencies(deptype='build')]))
+ bin_dirs = reversed(
+ [d.prefix.bin for d in pkg.spec.dependencies(deptype='build')
+ if os.path.isdir(d.prefix.bin)])
bin_dirs = filter_system_bin_paths(bin_dirs)
for item in bin_dirs:
env.prepend_path('PATH', item)
diff --git a/lib/spack/spack/build_systems/autotools.py b/lib/spack/spack/build_systems/autotools.py
index a11a84acd0..ffd00e7f69 100644
--- a/lib/spack/spack/build_systems/autotools.py
+++ b/lib/spack/spack/build_systems/autotools.py
@@ -49,7 +49,8 @@ class AutotoolsPackage(PackageBase):
4. :py:meth:`~.AutotoolsPackage.install`
They all have sensible defaults and for many packages the only thing
- necessary will be to override the helper method :py:meth:`.configure_args`.
+ necessary will be to override the helper method
+ :py:meth:`~.AutotoolsPackage.configure_args`.
For a finer tuning you may also override:
+-----------------------------------------------+--------------------+
@@ -145,7 +146,7 @@ class AutotoolsPackage(PackageBase):
if config_guess is not None:
try:
check_call([config_guess], stdout=PIPE, stderr=PIPE)
- mod = stat(my_config_guess).st_mode & 0777 | S_IWUSR
+ mod = stat(my_config_guess).st_mode & 0o777 | S_IWUSR
os.chmod(my_config_guess, mod)
shutil.copyfile(config_guess, my_config_guess)
return True
@@ -234,7 +235,7 @@ class AutotoolsPackage(PackageBase):
appropriately, otherwise raises an error.
:raises RuntimeError: if a configure script is not found in
- :py:meth:`~.configure_directory`
+ :py:meth:`~AutotoolsPackage.configure_directory`
"""
# Check if a configure script is there. If not raise a RuntimeError.
if not os.path.exists(self.configure_abs_path):
@@ -255,7 +256,8 @@ class AutotoolsPackage(PackageBase):
return []
def configure(self, spec, prefix):
- """Runs configure with the arguments specified in :py:meth:`.configure_args`
+ """Runs configure with the arguments specified in
+ :py:meth:`~.AutotoolsPackage.configure_args`
and an appropriately set prefix.
"""
options = ['--prefix={0}'.format(prefix)] + self.configure_args()
diff --git a/lib/spack/spack/build_systems/perl.py b/lib/spack/spack/build_systems/perl.py
new file mode 100644
index 0000000000..78184c85dc
--- /dev/null
+++ b/lib/spack/spack/build_systems/perl.py
@@ -0,0 +1,117 @@
+##############################################################################
+# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License (as
+# published by the Free Software Foundation) version 2.1, February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+
+import inspect
+import os
+
+from llnl.util.filesystem import join_path
+from spack.directives import extends
+from spack.package import PackageBase, run_after
+from spack.util.executable import Executable
+
+
+class PerlPackage(PackageBase):
+ """Specialized class for packages that are built using Perl.
+
+ This class provides four phases that can be overridden if required:
+
+ 1. :py:meth:`~.PerlPackage.configure`
+ 2. :py:meth:`~.PerlPackage.build`
+ 3. :py:meth:`~.PerlPackage.check`
+ 4. :py:meth:`~.PerlPackage.install`
+
+ The default methods use, in order of preference:
+ (1) Makefile.PL,
+ (2) Build.PL.
+
+ Some packages may need to override
+ :py:meth:`~.PerlPackage.configure_args`,
+ which produces a list of arguments for
+ :py:meth:`~.PerlPackage.configure`.
+ Arguments should not include the installation base directory.
+ """
+ #: Phases of a Perl package
+ phases = ['configure', 'build', 'install']
+
+ #: This attribute is used in UI queries that need to know the build
+ #: system base class
+ build_system_class = 'PerlPackage'
+
+ #: Callback names for build-time test
+ build_time_test_callbacks = ['check']
+
+ extends('perl')
+
+ def configure_args(self):
+ """Produces a list containing the arguments that must be passed to
+ :py:meth:`~.PerlPackage.configure`. Arguments should not include
+ the installation base directory, which is prepended automatically.
+
+ :return: list of arguments for Makefile.PL or Build.PL
+ """
+ return []
+
+ def configure(self, spec, prefix):
+ """Runs Makefile.PL or Build.PL with arguments consisting of
+ an appropriate installation base directory followed by the
+ list returned by :py:meth:`~.PerlPackage.configure_args`.
+
+ :raise RuntimeError: if neither Makefile.PL or Build.PL exist
+ """
+ if os.path.isfile('Makefile.PL'):
+ self.build_method = 'Makefile.PL'
+ self.build_executable = inspect.getmodule(self).make
+ elif os.path.isfile('Build.PL'):
+ self.build_method = 'Build.PL'
+ self.build_executable = Executable(
+ join_path(self.stage.source_path, 'Build'))
+ else:
+ raise RuntimeError('Unknown build_method for perl package')
+
+ if self.build_method == 'Makefile.PL':
+ options = ['Makefile.PL', 'INSTALL_BASE={0}'.format(prefix)]
+ elif self.build_method == 'Build.PL':
+ options = ['Build.PL', '--install_base', prefix]
+ options += self.configure_args()
+
+ inspect.getmodule(self).perl(*options)
+
+ def build(self, spec, prefix):
+ """Builds a Perl package."""
+ self.build_executable()
+
+ # Ensure that tests run after build (if requested):
+ run_after('build')(PackageBase._run_default_build_time_test_callbacks)
+
+ def check(self):
+ """Runs built-in tests of a Perl package."""
+ self.build_executable('test')
+
+ def install(self, spec, prefix):
+ """Installs a Perl package."""
+ self.build_executable('install')
+
+ # Check that self.prefix is there after installation
+ run_after('install')(PackageBase.sanity_check_prefix)
diff --git a/lib/spack/spack/build_systems/python.py b/lib/spack/spack/build_systems/python.py
index d2ee72925d..2c8ccebae6 100644
--- a/lib/spack/spack/build_systems/python.py
+++ b/lib/spack/spack/build_systems/python.py
@@ -24,6 +24,7 @@
##############################################################################
import inspect
+import os
from spack.directives import extends
from spack.package import PackageBase, run_after
@@ -91,10 +92,26 @@ class PythonPackage(PackageBase):
# Default phases
phases = ['build', 'install']
+ # Name of modules that the Python package provides
+ # This is used to test whether or not the installation succeeded
+ # These names generally come from running:
+ #
+ # >>> import setuptools
+ # >>> setuptools.find_packages()
+ #
+ # in the source tarball directory
+ import_modules = []
+
# To be used in UI queries that require to know which
# build-system class we are using
build_system_class = 'PythonPackage'
+ #: Callback names for build-time test
+ build_time_test_callbacks = ['test']
+
+ #: Callback names for install-time test
+ install_time_test_callbacks = ['import_module_test']
+
extends('python')
def setup_file(self):
@@ -106,19 +123,38 @@ class PythonPackage(PackageBase):
"""The directory containing the ``setup.py`` file."""
return self.stage.source_path
- def python(self, *args):
- inspect.getmodule(self).python(*args)
+ def python(self, *args, **kwargs):
+ inspect.getmodule(self).python(*args, **kwargs)
- def setup_py(self, *args):
+ def setup_py(self, *args, **kwargs):
setup = self.setup_file()
with working_dir(self.build_directory):
- self.python(setup, '--no-user-cfg', *args)
+ self.python(setup, '--no-user-cfg', *args, **kwargs)
+
+ def _setup_command_available(self, command):
+ """Determines whether or not a setup.py command exists.
+
+ :param str command: The command to look for
+ :return: True if the command is found, else False
+ :rtype: bool
+ """
+ kwargs = {
+ 'output': os.devnull,
+ 'error': os.devnull,
+ 'fail_on_error': False
+ }
+
+ python = inspect.getmodule(self).python
+ setup = self.setup_file()
+
+ python(setup, '--no-user-cfg', command, '--help', **kwargs)
+ return python.returncode == 0
# The following phases and their descriptions come from:
# $ python setup.py --help-commands
- # Only standard commands are included here, but some packages
- # define extra commands as well
+
+ # Standard commands
def build(self, spec, prefix):
"""Build everything needed to install."""
@@ -306,5 +342,37 @@ class PythonPackage(PackageBase):
"""Arguments to pass to check."""
return []
+ # Testing
+
+ def test(self):
+ """Run unit tests after in-place build.
+
+ These tests are only run if the package actually has a 'test' command.
+ """
+ if self._setup_command_available('test'):
+ args = self.test_args(self.spec, self.prefix)
+
+ self.setup_py('test', *args)
+
+ def test_args(self, spec, prefix):
+ """Arguments to pass to test."""
+ return []
+
+ run_after('build')(PackageBase._run_default_build_time_test_callbacks)
+
+ def import_module_test(self):
+ """Attempts to import the module that was just installed.
+
+ This test is only run if the package overrides
+ :py:attr:`import_modules` with a list of module names."""
+
+ # Make sure we are importing the installed modules,
+ # not the ones in the current directory
+ with working_dir('..'):
+ for module in self.import_modules:
+ self.python('-c', 'import {0}'.format(module))
+
+ run_after('install')(PackageBase._run_default_install_time_test_callbacks)
+
# Check that self.prefix is there after installation
run_after('install')(PackageBase.sanity_check_prefix)
diff --git a/lib/spack/spack/cmd/__init__.py b/lib/spack/spack/cmd/__init__.py
index 3a42510245..622ef4d96c 100644
--- a/lib/spack/spack/cmd/__init__.py
+++ b/lib/spack/spack/cmd/__init__.py
@@ -22,6 +22,8 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+from __future__ import print_function
+
import os
import re
import sys
@@ -186,7 +188,7 @@ def display_specs(specs, **kwargs):
# Traverse the index and print out each package
for i, (architecture, compiler) in enumerate(sorted(index)):
if i > 0:
- print
+ print()
header = "%s{%s} / %s{%s}" % (spack.spec.architecture_color,
architecture, spack.spec.compiler_color,
@@ -205,7 +207,7 @@ def display_specs(specs, **kwargs):
for abbrv, spec in zip(abbreviated, specs):
prefix = gray_hash(spec, hlen) if hashes else ''
- print prefix + (format % (abbrv, spec.prefix))
+ print(prefix + (format % (abbrv, spec.prefix)))
elif mode == 'deps':
for spec in specs:
diff --git a/lib/spack/spack/cmd/arch.py b/lib/spack/spack/cmd/arch.py
index 5b9daf9dea..1079e7f215 100644
--- a/lib/spack/spack/cmd/arch.py
+++ b/lib/spack/spack/cmd/arch.py
@@ -22,6 +22,8 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+from __future__ import print_function
+
import spack.architecture as architecture
description = "print architecture information about this machine"
@@ -36,6 +38,6 @@ def setup_parser(subparser):
def arch(parser, args):
if args.platform:
- print architecture.platform()
+ print(architecture.platform())
else:
- print architecture.sys_type()
+ print(architecture.sys_type())
diff --git a/lib/spack/spack/cmd/build.py b/lib/spack/spack/cmd/build.py
index 6a90af907d..90157a85af 100644
--- a/lib/spack/spack/cmd/build.py
+++ b/lib/spack/spack/cmd/build.py
@@ -31,7 +31,8 @@ description = 'stops at build stage when installing a package, if possible'
build_system_to_phase = {
CMakePackage: 'build',
AutotoolsPackage: 'build',
- PythonPackage: 'build'
+ PythonPackage: 'build',
+ PerlPackage: 'build'
}
diff --git a/lib/spack/spack/cmd/common/arguments.py b/lib/spack/spack/cmd/common/arguments.py
index a8bdcf692f..3115501439 100644
--- a/lib/spack/spack/cmd/common/arguments.py
+++ b/lib/spack/spack/cmd/common/arguments.py
@@ -81,7 +81,7 @@ _arguments['constraint'] = Args(
_arguments['module_type'] = Args(
'-m', '--module-type',
choices=spack.modules.module_types.keys(),
- default=spack.modules.module_types.keys()[0],
+ default=list(spack.modules.module_types.keys())[0],
help='type of module files [default: %(default)s]')
_arguments['yes_to_all'] = Args(
diff --git a/lib/spack/spack/cmd/compiler.py b/lib/spack/spack/cmd/compiler.py
index 22f3b3f26a..6067d44c5e 100644
--- a/lib/spack/spack/cmd/compiler.py
+++ b/lib/spack/spack/cmd/compiler.py
@@ -22,8 +22,11 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+from __future__ import print_function
+
import argparse
import sys
+from six import iteritems
import llnl.util.tty as tty
import spack.compilers
@@ -142,36 +145,36 @@ def compiler_info(args):
tty.error("No compilers match spec %s" % cspec)
else:
for c in compilers:
- print str(c.spec) + ":"
- print "\tpaths:"
+ print(str(c.spec) + ":")
+ print("\tpaths:")
for cpath in ['cc', 'cxx', 'f77', 'fc']:
- print "\t\t%s = %s" % (cpath, getattr(c, cpath, None))
+ print("\t\t%s = %s" % (cpath, getattr(c, cpath, None)))
if c.flags:
- print "\tflags:"
- for flag, flag_value in c.flags.iteritems():
- print "\t\t%s = %s" % (flag, flag_value)
+ print("\tflags:")
+ for flag, flag_value in iteritems(c.flags):
+ print("\t\t%s = %s" % (flag, flag_value))
if len(c.environment) != 0:
if len(c.environment['set']) != 0:
- print "\tenvironment:"
- print "\t set:"
- for key, value in c.environment['set'].iteritems():
- print "\t %s = %s" % (key, value)
+ print("\tenvironment:")
+ print("\t set:")
+ for key, value in iteritems(c.environment['set']):
+ print("\t %s = %s" % (key, value))
if c.extra_rpaths:
- print "\tExtra rpaths:"
+ print("\tExtra rpaths:")
for extra_rpath in c.extra_rpaths:
- print "\t\t%s" % extra_rpath
- print "\tmodules = %s" % c.modules
- print "\toperating system = %s" % c.operating_system
+ print("\t\t%s" % extra_rpath)
+ print("\tmodules = %s" % c.modules)
+ print("\toperating system = %s" % c.operating_system)
def compiler_list(args):
tty.msg("Available compilers")
index = index_by(spack.compilers.all_compilers(scope=args.scope),
lambda c: (c.spec.name, c.operating_system, c.target))
- ordered_sections = sorted(index.items(), key=lambda (k, v): k)
+ ordered_sections = sorted(index.items(), key=lambda item: item[0])
for i, (key, compilers) in enumerate(ordered_sections):
if i >= 1:
- print
+ print()
name, os, target = key
os_str = os
if target:
diff --git a/lib/spack/spack/cmd/configure.py b/lib/spack/spack/cmd/configure.py
index 7b1ef04522..037705f480 100644
--- a/lib/spack/spack/cmd/configure.py
+++ b/lib/spack/spack/cmd/configure.py
@@ -22,7 +22,6 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-
import argparse
import llnl.util.tty as tty
@@ -36,7 +35,8 @@ description = 'stops at configuration stage when installing a package, if possib
build_system_to_phase = {
CMakePackage: 'cmake',
- AutotoolsPackage: 'configure'
+ AutotoolsPackage: 'configure',
+ PerlPackage: 'configure'
}
diff --git a/lib/spack/spack/cmd/create.py b/lib/spack/spack/cmd/create.py
index 14b213a756..906c7e1aec 100644
--- a/lib/spack/spack/cmd/create.py
+++ b/lib/spack/spack/cmd/create.py
@@ -31,13 +31,13 @@ import llnl.util.tty as tty
import spack
import spack.cmd
import spack.cmd.checksum
-import spack.url
import spack.util.web
from llnl.util.filesystem import mkdirp
from spack.repository import Repo
from spack.spec import Spec
from spack.util.executable import which
from spack.util.naming import *
+from spack.url import *
description = "create a new package file"
@@ -268,6 +268,45 @@ class RPackageTemplate(PackageTemplate):
super(RPackageTemplate, self).__init__(name, *args)
+class PerlmakePackageTemplate(PackageTemplate):
+ """Provides appropriate overrides for Perl extensions
+ that come with a Makefile.PL"""
+ base_class_name = 'PerlPackage'
+
+ dependencies = """\
+ # FIXME: Add dependencies if required:
+ # depends_on('perl-foo')
+ # depends_on('barbaz', type=('build', 'link', 'run'))"""
+
+ body = """\
+ # FIXME: If non-standard arguments are used for configure step:
+ # def configure_args(self):
+ # return ['my', 'configure', 'args']
+
+ # FIXME: in unusual cases, it may be necessary to override methods for
+ # configure(), build(), check() or install()."""
+
+ def __init__(self, name, *args):
+ # If the user provided `--name perl-cpp`, don't rename it perl-perl-cpp
+ if not name.startswith('perl-'):
+ # Make it more obvious that we are renaming the package
+ tty.msg("Changing package name from {0} to perl-{0}".format(name))
+ name = 'perl-{0}'.format(name)
+
+ super(PerlmakePackageTemplate, self).__init__(name, *args)
+
+
+class PerlbuildPackageTemplate(PerlmakePackageTemplate):
+ """Provides appropriate overrides for Perl extensions
+ that come with a Build.PL instead of a Makefile.PL"""
+ dependencies = """\
+ depends_on('perl-module-build', type='build')
+
+ # FIXME: Add additional dependencies if required:
+ # depends_on('perl-foo')
+ # depends_on('barbaz', type=('build', 'link', 'run'))"""
+
+
class OctavePackageTemplate(PackageTemplate):
"""Provides appropriate overrides for octave packages"""
@@ -305,6 +344,8 @@ templates = {
'bazel': BazelPackageTemplate,
'python': PythonPackageTemplate,
'r': RPackageTemplate,
+ 'perlmake': PerlmakePackageTemplate,
+ 'perlbuild': PerlbuildPackageTemplate,
'octave': OctavePackageTemplate,
'generic': PackageTemplate
}
@@ -341,6 +382,10 @@ class BuildSystemGuesser:
can take a peek at the fetched tarball and discern the build system it uses
"""
+ def __init__(self):
+ """Sets the default build system."""
+ self.build_system = 'generic'
+
def __call__(self, stage, url):
"""Try to guess the type of build system used by a project based on
the contents of its archive or the URL it was downloaded from."""
@@ -363,7 +408,9 @@ class BuildSystemGuesser:
(r'/SConstruct$', 'scons'),
(r'/setup.py$', 'python'),
(r'/NAMESPACE$', 'r'),
- (r'/WORKSPACE$', 'bazel')
+ (r'/WORKSPACE$', 'bazel'),
+ (r'/Build.PL$', 'perlbuild'),
+ (r'/Makefile.PL$', 'perlmake'),
]
# Peek inside the compressed file.
@@ -384,14 +431,11 @@ class BuildSystemGuesser:
# Determine the build system based on the files contained
# in the archive.
- build_system = 'generic'
for pattern, bs in clues:
if any(re.search(pattern, l) for l in lines):
- build_system = bs
+ self.build_system = bs
break
- self.build_system = build_system
-
def get_name(args):
"""Get the name of the package based on the supplied arguments.
@@ -415,9 +459,9 @@ def get_name(args):
elif args.url:
# Try to guess the package name based on the URL
try:
- name = spack.url.parse_name(args.url)
+ name = parse_name(args.url)
tty.msg("This looks like a URL for {0}".format(name))
- except spack.url.UndetectableNameError:
+ except UndetectableNameError:
tty.die("Couldn't guess a name for this package.",
" Please report this bug. In the meantime, try running:",
" `spack create --name <name> <url>`")
@@ -472,11 +516,16 @@ def get_versions(args, name):
if args.url:
# Find available versions
- url_dict = spack.util.web.find_versions_of_archive(args.url)
+ try:
+ url_dict = spack.util.web.find_versions_of_archive(args.url)
+ except UndetectableVersionError:
+ # Use fake versions
+ tty.warn("Couldn't detect version in: {0}".format(args.url))
+ return versions, guesser
if not url_dict:
# If no versions were found, revert to what the user provided
- version = spack.url.parse_version(args.url)
+ version = parse_version(args.url)
url_dict = {version: args.url}
versions = spack.cmd.checksum.get_checksums(
@@ -568,6 +617,7 @@ def create(parser, args):
url = get_url(args)
versions, guesser = get_versions(args, name)
build_system = get_build_system(args, guesser)
+ name = simplify_name(name)
# Create the package template object
PackageClass = templates[build_system]
diff --git a/lib/spack/spack/cmd/dependents.py b/lib/spack/spack/cmd/dependents.py
index 42181b5502..c752ffb943 100644
--- a/lib/spack/spack/cmd/dependents.py
+++ b/lib/spack/spack/cmd/dependents.py
@@ -50,4 +50,4 @@ def dependents(parser, args):
if deps:
spack.cmd.display_specs(deps)
else:
- print "No dependents"
+ print("No dependents")
diff --git a/lib/spack/spack/cmd/env.py b/lib/spack/spack/cmd/env.py
index 49fc48700c..ed18940ac0 100644
--- a/lib/spack/spack/cmd/env.py
+++ b/lib/spack/spack/cmd/env.py
@@ -22,8 +22,11 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+from __future__ import print_function
+
import os
import argparse
+
import llnl.util.tty as tty
import spack.cmd
import spack.build_environment as build_env
@@ -64,7 +67,7 @@ def env(parser, args):
if not cmd:
# If no command act like the "env" command and print out env vars.
for key, val in os.environ.items():
- print "%s=%s" % (key, val)
+ print("%s=%s" % (key, val))
else:
# Otherwise execute the command with the new environment
diff --git a/lib/spack/spack/cmd/flake8.py b/lib/spack/spack/cmd/flake8.py
index d5ed9adf18..a6dc941190 100644
--- a/lib/spack/spack/cmd/flake8.py
+++ b/lib/spack/spack/cmd/flake8.py
@@ -22,6 +22,8 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+from __future__ import print_function
+
import re
import os
import sys
@@ -68,7 +70,7 @@ exemptions = {
# exemptions applied to all files.
r'.py$': {
# Exempt lines with URLs from overlong line errors.
- 501: [r'(https?|file)\:']
+ 501: [r'(https?|ftp|file)\:']
},
}
@@ -127,7 +129,7 @@ def filter_file(source, dest, output=False):
for code, patterns in errors.items():
for pattern in patterns:
if pattern.search(line):
- line += (" # NOQA: ignore=%d" % code)
+ line += (" # NOQA: E%d" % code)
break
oline = line + '\n'
@@ -175,12 +177,12 @@ def flake8(parser, args):
file_list = changed_files()
shutil.copy('.flake8', os.path.join(temp, '.flake8'))
- print '======================================================='
- print 'flake8: running flake8 code checks on spack.'
- print
- print 'Modified files:'
+ print('=======================================================')
+ print('flake8: running flake8 code checks on spack.')
+ print()
+ print('Modified files:')
for filename in file_list:
- print " %s" % filename.strip()
+ print(" %s" % filename.strip())
print('=======================================================')
# filter files into a temporary directory with exemptions added.
@@ -196,7 +198,7 @@ def flake8(parser, args):
if args.root_relative:
# print results relative to repo root.
- print output
+ print(output)
else:
# print results relative to current working directory
def cwd_relative(path):
@@ -204,16 +206,16 @@ def flake8(parser, args):
os.path.join(spack.prefix, path.group(1)), os.getcwd())
for line in output.split('\n'):
- print re.sub(r'^(.*): \[', cwd_relative, line)
+ print(re.sub(r'^(.*): \[', cwd_relative, line))
if flake8.returncode != 0:
- print "Flake8 found errors."
+ print("Flake8 found errors.")
sys.exit(1)
else:
- print "Flake8 checks were clean."
+ print("Flake8 checks were clean.")
finally:
if args.keep_temp:
- print "temporary files are in ", temp
+ print("temporary files are in ", temp)
else:
shutil.rmtree(temp, ignore_errors=True)
diff --git a/lib/spack/spack/cmd/graph.py b/lib/spack/spack/cmd/graph.py
index 414b6d78ec..ee401d8fb7 100644
--- a/lib/spack/spack/cmd/graph.py
+++ b/lib/spack/spack/cmd/graph.py
@@ -22,8 +22,9 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-import argparse
+from __future__ import print_function
+import argparse
import llnl.util.tty as tty
import spack
@@ -96,5 +97,5 @@ def graph(parser, args):
elif specs: # ascii is default: user doesn't need to provide it explicitly
graph_ascii(specs[0], debug=spack.debug, deptype=deptype)
for spec in specs[1:]:
- print # extra line bt/w independent graphs
+ print() # extra line bt/w independent graphs
graph_ascii(spec, debug=spack.debug)
diff --git a/lib/spack/spack/cmd/info.py b/lib/spack/spack/cmd/info.py
index 1dd0ee4e78..799471ffcc 100644
--- a/lib/spack/spack/cmd/info.py
+++ b/lib/spack/spack/cmd/info.py
@@ -22,7 +22,10 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+from __future__ import print_function
+
import textwrap
+
from llnl.util.tty.colify import *
import spack
import spack.fetch_strategy as fs
@@ -50,12 +53,12 @@ def print_text_info(pkg):
"""Print out a plain text description of a package."""
header = "{0}: ".format(pkg.build_system_class)
- print header, pkg.name
+ print(header, pkg.name)
whitespaces = ''.join([' '] * (len(header) - len("Homepage: ")))
- print "Homepage:", whitespaces, pkg.homepage
+ print("Homepage:", whitespaces, pkg.homepage)
- print
- print "Safe versions: "
+ print()
+ print("Safe versions: ")
if not pkg.versions:
print(" None")
@@ -63,20 +66,20 @@ def print_text_info(pkg):
pad = padder(pkg.versions, 4)
for v in reversed(sorted(pkg.versions)):
f = fs.for_package_version(pkg, v)
- print " %s%s" % (pad(v), str(f))
+ print(" %s%s" % (pad(v), str(f)))
- print
- print "Variants:"
+ print()
+ print("Variants:")
if not pkg.variants:
- print " None"
+ print(" None")
else:
pad = padder(pkg.variants, 4)
maxv = max(len(v) for v in sorted(pkg.variants))
fmt = "%%-%ss%%-10s%%s" % (maxv + 4)
- print " " + fmt % ('Name', 'Default', 'Description')
- print
+ print(" " + fmt % ('Name', 'Default', 'Description'))
+ print()
for name in sorted(pkg.variants):
v = pkg.variants[name]
default = 'on' if v.default else 'off'
@@ -85,26 +88,26 @@ def print_text_info(pkg):
lines[1:] = [" " + (" " * maxv) + l for l in lines[1:]]
desc = "\n".join(lines)
- print " " + fmt % (name, default, desc)
+ print(" " + fmt % (name, default, desc))
- print
- print "Installation Phases:"
+ print()
+ print("Installation Phases:")
phase_str = ''
for phase in pkg.phases:
phase_str += " {0}".format(phase)
- print phase_str
+ print(phase_str)
for deptype in ('build', 'link', 'run'):
- print
- print "%s Dependencies:" % deptype.capitalize()
+ print()
+ print("%s Dependencies:" % deptype.capitalize())
deps = sorted(pkg.dependencies_of_type(deptype))
if deps:
colify(deps, indent=4)
else:
- print " None"
+ print(" None")
- print
- print "Virtual Packages: "
+ print()
+ print("Virtual Packages: ")
if pkg.provided:
inverse_map = {}
for spec, whens in pkg.provided.items():
@@ -113,17 +116,17 @@ def print_text_info(pkg):
inverse_map[when] = set()
inverse_map[when].add(spec)
for when, specs in reversed(sorted(inverse_map.items())):
- print " %s provides %s" % (
- when, ', '.join(str(s) for s in specs))
+ print(" %s provides %s" % (
+ when, ', '.join(str(s) for s in specs)))
else:
- print " None"
+ print(" None")
- print
- print "Description:"
+ print()
+ print("Description:")
if pkg.__doc__:
- print pkg.format_doc(indent=4)
+ print(pkg.format_doc(indent=4))
else:
- print " None"
+ print(" None")
def info(parser, args):
diff --git a/lib/spack/spack/cmd/list.py b/lib/spack/spack/cmd/list.py
index b5b699dccd..bcfb092945 100644
--- a/lib/spack/spack/cmd/list.py
+++ b/lib/spack/spack/cmd/list.py
@@ -22,12 +22,14 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+from __future__ import print_function
+
import argparse
import cgi
import fnmatch
import re
import sys
-from StringIO import StringIO
+from six import StringIO
import llnl.util.tty as tty
import spack
@@ -123,42 +125,42 @@ def rst(pkgs):
pkgs = [spack.repo.get(name) for name in pkg_names]
print('.. _package-list:')
- print('')
+ print()
print('============')
print('Package List')
print('============')
- print('')
+ print()
print('This is a list of things you can install using Spack. It is')
print('automatically generated based on the packages in the latest Spack')
print('release.')
- print('')
+ print()
print('Spack currently has %d mainline packages:' % len(pkgs))
- print('')
+ print()
print(rst_table('`%s`_' % p for p in pkg_names))
- print('')
+ print()
# Output some text for each package.
for pkg in pkgs:
print('-----')
- print('')
+ print()
print('.. _%s:' % pkg.name)
- print('')
+ print()
# Must be at least 2 long, breaks for single letter packages like R.
print('-' * max(len(pkg.name), 2))
print(pkg.name)
print('-' * max(len(pkg.name), 2))
- print('')
+ print()
print('Homepage:')
print(' * `%s <%s>`__' % (cgi.escape(pkg.homepage), pkg.homepage))
- print('')
+ print()
print('Spack package:')
print(' * `%s/package.py <%s>`__' % (pkg.name, github_url(pkg)))
- print('')
+ print()
if pkg.versions:
print('Versions:')
print(' ' + ', '.join(str(v) for v in
reversed(sorted(pkg.versions))))
- print('')
+ print()
for deptype in spack.alldeps:
deps = pkg.dependencies_of_type(deptype)
@@ -166,11 +168,11 @@ def rst(pkgs):
print('%s Dependencies' % deptype.capitalize())
print(' ' + ', '.join('%s_' % d if d in pkg_names
else d for d in deps))
- print('')
+ print()
print('Description:')
print(pkg.format_doc(indent=2))
- print('')
+ print()
def list(parser, args):
diff --git a/lib/spack/spack/cmd/location.py b/lib/spack/spack/cmd/location.py
index c82b7072f9..d1a7825630 100644
--- a/lib/spack/spack/cmd/location.py
+++ b/lib/spack/spack/cmd/location.py
@@ -22,8 +22,9 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-import argparse
+from __future__ import print_function
+import argparse
import llnl.util.tty as tty
import spack
@@ -70,16 +71,16 @@ def setup_parser(subparser):
def location(parser, args):
if args.module_dir:
- print spack.module_path
+ print(spack.module_path)
elif args.spack_root:
- print spack.prefix
+ print(spack.prefix)
elif args.packages:
- print spack.repo.first_repo().root
+ print(spack.repo.first_repo().root)
elif args.stages:
- print spack.stage_path
+ print(spack.stage_path)
else:
specs = spack.cmd.parse_specs(args.spec)
@@ -91,14 +92,14 @@ def location(parser, args):
if args.install_dir:
# install_dir command matches against installed specs.
spec = spack.cmd.disambiguate_spec(specs[0])
- print spec.prefix
+ print(spec.prefix)
else:
spec = specs[0]
if args.package_dir:
# This one just needs the spec name.
- print spack.repo.dirname_for_package_name(spec.name)
+ print(spack.repo.dirname_for_package_name(spec.name))
else:
# These versions need concretized specs.
@@ -106,11 +107,11 @@ def location(parser, args):
pkg = spack.repo.get(spec)
if args.stage_dir:
- print pkg.stage.path
+ print(pkg.stage.path)
else: # args.build_dir is the default.
if not pkg.stage.source_path:
tty.die("Build directory does not exist yet. "
"Run this to create it:",
"spack stage " + " ".join(args.spec))
- print pkg.stage.source_path
+ print(pkg.stage.source_path)
diff --git a/lib/spack/spack/cmd/md5.py b/lib/spack/spack/cmd/md5.py
index 7940d1327b..fc205cc693 100644
--- a/lib/spack/spack/cmd/md5.py
+++ b/lib/spack/spack/cmd/md5.py
@@ -25,7 +25,7 @@
import argparse
import hashlib
import os
-from urlparse import urlparse
+from six.moves.urllib.parse import urlparse
import llnl.util.tty as tty
import spack.util.crypto
diff --git a/lib/spack/spack/cmd/mirror.py b/lib/spack/spack/cmd/mirror.py
index 2db75a0b1f..528fcbfc3f 100644
--- a/lib/spack/spack/cmd/mirror.py
+++ b/lib/spack/spack/cmd/mirror.py
@@ -141,7 +141,7 @@ def mirror_list(args):
fmt = "%%-%ds%%s" % (max_len + 4)
for name in mirrors:
- print fmt % (name, mirrors[name])
+ print(fmt % (name, mirrors[name]))
def _read_specs_from_file(filename):
@@ -152,7 +152,7 @@ def _read_specs_from_file(filename):
s = Spec(string)
s.package
specs.append(s)
- except SpackError, e:
+ except SpackError as e:
tty.die("Parse error in %s, line %d:" % (args.file, i + 1),
">>> " + string, str(e))
return specs
diff --git a/lib/spack/spack/cmd/pkg.py b/lib/spack/spack/cmd/pkg.py
index 45104a9ff2..12dcb81792 100644
--- a/lib/spack/spack/cmd/pkg.py
+++ b/lib/spack/spack/cmd/pkg.py
@@ -22,6 +22,8 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+from __future__ import print_function
+
import os
import argparse
@@ -71,13 +73,16 @@ def setup_parser(subparser):
help="revision to compare to rev1 (default is HEAD)")
-def get_git():
+def get_git(fatal=True):
# cd to spack prefix to do git operations
os.chdir(spack.prefix)
# If this is a non-git version of spack, give up.
if not os.path.isdir('.git'):
- tty.die("No git repo in %s. Can't use 'spack pkg'" % spack.prefix)
+ if fatal:
+ tty.die("No git repo in %s. Can't use 'spack pkg'" % spack.prefix)
+ else:
+ return None
return which("git", required=True)
@@ -118,13 +123,13 @@ def pkg_diff(args):
u1, u2 = diff_packages(args.rev1, args.rev2)
if u1:
- print "%s:" % args.rev1
+ print("%s:" % args.rev1)
colify(sorted(u1), indent=4)
if u1:
- print
+ print()
if u2:
- print "%s:" % args.rev2
+ print("%s:" % args.rev2)
colify(sorted(u2), indent=4)
diff --git a/lib/spack/spack/cmd/repo.py b/lib/spack/spack/cmd/repo.py
index 1881654cac..dd75f148c2 100644
--- a/lib/spack/spack/cmd/repo.py
+++ b/lib/spack/spack/cmd/repo.py
@@ -22,6 +22,8 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+from __future__ import print_function
+
import os
import llnl.util.tty as tty
@@ -161,7 +163,7 @@ def repo_list(args):
max_ns_len = max(len(r.namespace) for r in repos)
for repo in repos:
fmt = "%%-%ds%%s" % (max_ns_len + 4)
- print fmt % (repo.namespace, repo.root)
+ print(fmt % (repo.namespace, repo.root))
def repo(parser, args):
diff --git a/lib/spack/spack/cmd/spec.py b/lib/spack/spack/cmd/spec.py
index 9eea404bc7..d89707f230 100644
--- a/lib/spack/spack/cmd/spec.py
+++ b/lib/spack/spack/cmd/spec.py
@@ -22,8 +22,9 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-import argparse
+from __future__ import print_function
+import argparse
import spack
import spack.cmd
import spack.cmd.common.arguments as arguments
@@ -69,20 +70,20 @@ def spec(parser, args):
# With -y, just print YAML to output.
if args.yaml:
spec.concretize()
- print spec.to_yaml()
+ print(spec.to_yaml())
continue
# Print some diagnostic info by default.
- print "Input spec"
- print "--------------------------------"
- print spec.tree(**kwargs)
+ print("Input spec")
+ print("--------------------------------")
+ print(spec.tree(**kwargs))
- print "Normalized"
- print "--------------------------------"
+ print("Normalized")
+ print("--------------------------------")
spec.normalize()
- print spec.tree(**kwargs)
+ print(spec.tree(**kwargs))
- print "Concretized"
- print "--------------------------------"
+ print("Concretized")
+ print("--------------------------------")
spec.concretize()
- print spec.tree(**kwargs)
+ print(spec.tree(**kwargs))
diff --git a/lib/spack/spack/cmd/test.py b/lib/spack/spack/cmd/test.py
index c569a1bc88..9384e3a9e6 100644
--- a/lib/spack/spack/cmd/test.py
+++ b/lib/spack/spack/cmd/test.py
@@ -22,12 +22,14 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+from __future__ import print_function
+
import sys
import os
import re
import argparse
import pytest
-from StringIO import StringIO
+from six import StringIO
from llnl.util.filesystem import *
from llnl.util.tty.colify import colify
@@ -79,7 +81,7 @@ def do_list(args, unknown_args):
output_lines.append(
os.path.basename(name).replace('.py', ''))
else:
- print indent + name
+ print(indent + name)
if args.list:
colify(output_lines)
diff --git a/lib/spack/spack/cmd/url.py b/lib/spack/spack/cmd/url.py
index 6823f0febd..1128e08a43 100644
--- a/lib/spack/spack/cmd/url.py
+++ b/lib/spack/spack/cmd/url.py
@@ -31,6 +31,7 @@ import spack
from llnl.util import tty
from spack.url import *
from spack.util.web import find_versions_of_archive
+from spack.util.naming import simplify_name
description = "debugging tool for url parsing"
@@ -66,19 +67,26 @@ def setup_parser(subparser):
'-n', '--incorrect-name', action='store_true',
help='only list urls for which the name was incorrectly parsed')
excl_args.add_argument(
+ '-N', '--correct-name', action='store_true',
+ help='only list urls for which the name was correctly parsed')
+ excl_args.add_argument(
'-v', '--incorrect-version', action='store_true',
help='only list urls for which the version was incorrectly parsed')
+ excl_args.add_argument(
+ '-V', '--correct-version', action='store_true',
+ help='only list urls for which the version was correctly parsed')
- # Test
+ # Summary
sp.add_parser(
- 'test', help='print a summary of how well we are parsing package urls')
+ 'summary',
+ help='print a summary of how well we are parsing package urls')
def url(parser, args):
action = {
- 'parse': url_parse,
- 'list': url_list,
- 'test': url_test
+ 'parse': url_parse,
+ 'list': url_list,
+ 'summary': url_summary
}
action[args.subcommand](args)
@@ -116,6 +124,10 @@ def url_parse(args):
tty.msg('Spidering for versions:')
versions = find_versions_of_archive(url)
+ if not versions:
+ print(' Found no versions for {0}'.format(name))
+ return
+
max_len = max(len(str(v)) for v in versions)
for v in sorted(versions):
@@ -145,7 +157,7 @@ def url_list(args):
return len(urls)
-def url_test(args):
+def url_summary(args):
# Collect statistics on how many URLs were correctly parsed
total_urls = 0
correct_names = 0
@@ -205,19 +217,19 @@ def url_test(args):
correct_versions, total_urls, correct_versions / total_urls))
print()
- tty.msg('Statistics on name regular expresions:')
+ tty.msg('Statistics on name regular expressions:')
print()
- print(' Index Count Regular Expresion')
+ print(' Index Count Regular Expression')
for ni in name_regex_dict:
print(' {0:>3}: {1:>6} r{2!r}'.format(
ni, name_count_dict[ni], name_regex_dict[ni]))
print()
- tty.msg('Statistics on version regular expresions:')
+ tty.msg('Statistics on version regular expressions:')
print()
- print(' Index Count Regular Expresion')
+ print(' Index Count Regular Expression')
for vi in version_regex_dict:
print(' {0:>3}: {1:>6} r{2!r}'.format(
vi, version_count_dict[vi], version_regex_dict[vi]))
@@ -257,22 +269,38 @@ def url_list_parsing(args, urls, url, pkg):
:rtype: set
"""
if url:
- if args.incorrect_name:
- # Only add URLs whose name was incorrectly parsed
+ if args.correct_name or args.incorrect_name:
+ # Attempt to parse the name
try:
name = parse_name(url)
- if not name_parsed_correctly(pkg, name):
+ if (args.correct_name and
+ name_parsed_correctly(pkg, name)):
+ # Add correctly parsed URLs
+ urls.add(url)
+ elif (args.incorrect_name and
+ not name_parsed_correctly(pkg, name)):
+ # Add incorrectly parsed URLs
urls.add(url)
except UndetectableNameError:
- urls.add(url)
- elif args.incorrect_version:
- # Only add URLs whose version was incorrectly parsed
+ if args.incorrect_name:
+ # Add incorrectly parsed URLs
+ urls.add(url)
+ elif args.correct_version or args.incorrect_version:
+ # Attempt to parse the version
try:
version = parse_version(url)
- if not version_parsed_correctly(pkg, version):
+ if (args.correct_version and
+ version_parsed_correctly(pkg, version)):
+ # Add correctly parsed URLs
+ urls.add(url)
+ elif (args.incorrect_version and
+ not version_parsed_correctly(pkg, version)):
+ # Add incorrectly parsed URLs
urls.add(url)
except UndetectableVersionError:
- urls.add(url)
+ if args.incorrect_version:
+ # Add incorrectly parsed URLs
+ urls.add(url)
else:
urls.add(url)
@@ -289,6 +317,8 @@ def name_parsed_correctly(pkg, name):
"""
pkg_name = pkg.name
+ name = simplify_name(name)
+
# After determining a name, `spack create` determines a build system.
# Some build systems prepend a special string to the front of the name.
# Since this can't be guessed from the URL, it would be unfair to say
@@ -311,9 +341,33 @@ def version_parsed_correctly(pkg, version):
:returns: True if the name was correctly parsed, else False
:rtype: bool
"""
+ version = remove_separators(version)
+
# If the version parsed from the URL is listed in a version()
# directive, we assume it was correctly parsed
for pkg_version in pkg.versions:
- if str(pkg_version) == str(version):
+ pkg_version = remove_separators(pkg_version)
+ if pkg_version == version:
return True
return False
+
+
+def remove_separators(version):
+ """Removes separator characters ('.', '_', and '-') from a version.
+
+ A version like 1.2.3 may be displayed as 1_2_3 in the URL.
+ Make sure 1.2.3, 1-2-3, 1_2_3, and 123 are considered equal.
+ Unfortunately, this also means that 1.23 and 12.3 are equal.
+
+ :param version: A version
+ :type version: str or Version
+ :returns: The version with all separator characters removed
+ :rtype: str
+ """
+ version = str(version)
+
+ version = version.replace('.', '')
+ version = version.replace('_', '')
+ version = version.replace('-', '')
+
+ return version
diff --git a/lib/spack/spack/cmd/versions.py b/lib/spack/spack/cmd/versions.py
index dacca2489b..a6f6805fb0 100644
--- a/lib/spack/spack/cmd/versions.py
+++ b/lib/spack/spack/cmd/versions.py
@@ -22,6 +22,8 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+from __future__ import print_function
+
from llnl.util.tty.colify import colify
import llnl.util.tty as tty
import spack
@@ -47,10 +49,10 @@ def versions(parser, args):
tty.msg("Remote versions (not yet checksummed):")
if not remote_versions:
if not fetched_versions:
- print " Found no versions for %s" % pkg.name
+ print(" Found no versions for %s" % pkg.name)
tty.debug("Check the list_url and list_depth attribute on the "
"package to help Spack find versions.")
else:
- print " Found no unckecksummed versions for %s" % pkg.name
+ print(" Found no unchecksummed versions for %s" % pkg.name)
else:
colify(sorted(remote_versions, reverse=True), indent=2)
diff --git a/lib/spack/spack/compiler.py b/lib/spack/spack/compiler.py
index 9e9c7cbcb4..90af900d0d 100644
--- a/lib/spack/spack/compiler.py
+++ b/lib/spack/spack/compiler.py
@@ -265,11 +265,11 @@ class Compiler(object):
full_path, prefix, suffix = key
version = detect_version(full_path)
return (version, prefix, suffix, full_path)
- except ProcessError, e:
+ except ProcessError as e:
tty.debug(
"Couldn't get version for compiler %s" % full_path, e)
return None
- except Exception, e:
+ except Exception as e:
# Catching "Exception" here is fine because it just
# means something went wrong running a candidate executable.
tty.debug("Error while executing candidate compiler %s"
diff --git a/lib/spack/spack/compilers/__init__.py b/lib/spack/spack/compilers/__init__.py
index a16caa3a6c..585df23320 100644
--- a/lib/spack/spack/compilers/__init__.py
+++ b/lib/spack/spack/compilers/__init__.py
@@ -335,7 +335,7 @@ def get_compiler_duplicates(compiler_spec, arch_spec):
scope_to_compilers[scope] = compilers
cfg_file_to_duplicates = dict()
- for scope, compilers in scope_to_compilers.iteritems():
+ for scope, compilers in scope_to_compilers.items():
config_file = config_scopes[scope].get_section_filename('compilers')
cfg_file_to_duplicates[config_file] = compilers
@@ -401,7 +401,7 @@ class CompilerDuplicateError(spack.error.SpackError):
config_file_to_duplicates = get_compiler_duplicates(
compiler_spec, arch_spec)
duplicate_table = list(
- (x, len(y)) for x, y in config_file_to_duplicates.iteritems())
+ (x, len(y)) for x, y in config_file_to_duplicates.items())
descriptor = lambda num: 'time' if num == 1 else 'times'
duplicate_msg = (
lambda cfgfile, count: "{0}: {1} {2}".format(
diff --git a/lib/spack/spack/concretize.py b/lib/spack/spack/concretize.py
index 126db8b780..2a5ce65fa4 100644
--- a/lib/spack/spack/concretize.py
+++ b/lib/spack/spack/concretize.py
@@ -34,86 +34,78 @@ TODO: make this customizable and allow users to configure
concretization policies.
"""
from __future__ import print_function
+from six import iteritems
+from spack.version import *
+from itertools import chain
+from ordereddict_backport import OrderedDict
+from functools_backport import reverse_order
+
import spack
import spack.spec
import spack.compilers
import spack.architecture
import spack.error
-from spack.version import *
-from functools import partial
-from itertools import chain
from spack.package_prefs import *
class DefaultConcretizer(object):
-
"""This class doesn't have any state, it just provides some methods for
concretization. You can subclass it to override just some of the
default concretization strategies, or you can override all of them.
"""
-
def _valid_virtuals_and_externals(self, spec):
"""Returns a list of candidate virtual dep providers and external
- packages that coiuld be used to concretize a spec."""
+ packages that coiuld be used to concretize a spec.
+
+ Preferred specs come first in the list.
+ """
# First construct a list of concrete candidates to replace spec with.
candidates = [spec]
+ pref_key = lambda spec: 0 # no-op pref key
+
if spec.virtual:
- providers = spack.repo.providers_for(spec)
- if not providers:
- raise UnsatisfiableProviderSpecError(providers[0], spec)
- spec_w_preferred_providers = find_spec(
- spec,
- lambda x: pkgsort().spec_has_preferred_provider(
- x.name, spec.name))
- if not spec_w_preferred_providers:
- spec_w_preferred_providers = spec
- provider_cmp = partial(pkgsort().provider_compare,
- spec_w_preferred_providers.name,
- spec.name)
- candidates = sorted(providers, cmp=provider_cmp)
+ candidates = spack.repo.providers_for(spec)
+ if not candidates:
+ raise UnsatisfiableProviderSpecError(candidates[0], spec)
+
+ # Find nearest spec in the DAG (up then down) that has prefs.
+ spec_w_prefs = find_spec(
+ spec, lambda p: PackagePrefs.has_preferred_providers(
+ p.name, spec.name),
+ spec) # default to spec itself.
+
+ # Create a key to sort candidates by the prefs we found
+ pref_key = PackagePrefs(spec_w_prefs.name, 'providers', spec.name)
# For each candidate package, if it has externals, add those
# to the usable list. if it's not buildable, then *only* add
# the externals.
- usable = []
+ #
+ # Use an OrderedDict to avoid duplicates (use it like a set)
+ usable = OrderedDict()
for cspec in candidates:
if is_spec_buildable(cspec):
- usable.append(cspec)
+ usable[cspec] = True
+
externals = spec_externals(cspec)
for ext in externals:
if ext.satisfies(spec):
- usable.append(ext)
+ usable[ext] = True
# If nothing is in the usable list now, it's because we aren't
# allowed to build anything.
if not usable:
raise NoBuildError(spec)
- def cmp_externals(a, b):
- if a.name != b.name and (not a.external or a.external_module and
- not b.external and b.external_module):
- # We're choosing between different providers, so
- # maintain order from provider sort
- index_of_a = next(i for i in range(0, len(candidates))
- if a.satisfies(candidates[i]))
- index_of_b = next(i for i in range(0, len(candidates))
- if b.satisfies(candidates[i]))
- return index_of_a - index_of_b
-
- result = cmp_specs(a, b)
- if result != 0:
- return result
-
- # prefer external packages to internal packages.
- if a.external is None or b.external is None:
- return -cmp(a.external, b.external)
- else:
- return cmp(a.external, b.external)
-
- usable.sort(cmp=cmp_externals)
- return usable
+ # Use a sort key to order the results
+ return sorted(usable, key=lambda spec: (
+ not (spec.external or spec.external_module), # prefer externals
+ pref_key(spec), # respect prefs
+ spec.name, # group by name
+ reverse_order(spec.versions), # latest version
+ spec # natural order
+ ))
- # XXX(deptypes): Look here.
def choose_virtual_or_external(self, spec):
"""Given a list of candidate virtual and external packages, try to
find one that is most ABI compatible.
@@ -124,25 +116,16 @@ class DefaultConcretizer(object):
# Find the nearest spec in the dag that has a compiler. We'll
# use that spec to calibrate compiler compatibility.
- abi_exemplar = find_spec(spec, lambda x: x.compiler)
- if not abi_exemplar:
- abi_exemplar = spec.root
-
- # Make a list including ABI compatibility of specs with the exemplar.
- strict = [spack.abi.compatible(c, abi_exemplar) for c in candidates]
- loose = [spack.abi.compatible(c, abi_exemplar, loose=True)
- for c in candidates]
- keys = zip(strict, loose, candidates)
+ abi_exemplar = find_spec(spec, lambda x: x.compiler, spec.root)
# Sort candidates from most to least compatibility.
- # Note:
- # 1. We reverse because True > False.
- # 2. Sort is stable, so c's keep their order.
- keys.sort(key=lambda k: k[:2], reverse=True)
-
- # Pull the candidates back out and return them in order
- candidates = [c for s, l, c in keys]
- return candidates
+ # We reverse because True > False.
+ # Sort is stable, so candidates keep their order.
+ return sorted(candidates,
+ reverse=True,
+ key=lambda spec: (
+ spack.abi.compatible(spec, abi_exemplar, loose=True),
+ spack.abi.compatible(spec, abi_exemplar)))
def concretize_version(self, spec):
"""If the spec is already concrete, return. Otherwise take
@@ -162,26 +145,12 @@ class DefaultConcretizer(object):
if spec.versions.concrete:
return False
- # If there are known available versions, return the most recent
- # version that satisfies the spec
+ # List of versions we could consider, in sorted order
pkg = spec.package
+ usable = [v for v in pkg.versions
+ if any(v.satisfies(sv) for sv in spec.versions)]
- # ---------- Produce prioritized list of versions
- # Get list of preferences from packages.yaml
- preferred = pkgsort()
- # NOTE: pkgsort() == spack.package_prefs.PreferredPackages()
-
- yaml_specs = [
- x[0] for x in
- preferred._spec_for_pkgname(spec.name, 'version', None)]
- n = len(yaml_specs)
- yaml_index = dict(
- [(spc, n - index) for index, spc in enumerate(yaml_specs)])
-
- # List of versions we could consider, in sorted order
- unsorted_versions = [
- v for v in pkg.versions
- if any(v.satisfies(sv) for sv in spec.versions)]
+ yaml_prefs = PackagePrefs(spec.name, 'version')
# The keys below show the order of precedence of factors used
# to select a version when concretizing. The item with
@@ -189,12 +158,11 @@ class DefaultConcretizer(object):
#
# NOTE: When COMPARING VERSIONS, the '@develop' version is always
# larger than other versions. BUT when CONCRETIZING,
- # the largest NON-develop version is selected by
- # default.
- keys = [(
+ # the largest NON-develop version is selected by default.
+ keyfn = lambda v: (
# ------- Special direction from the user
# Respect order listed in packages.yaml
- yaml_index.get(v, -1),
+ -yaml_prefs(v),
# The preferred=True flag (packages or packages.yaml or both?)
pkg.versions.get(Version(v)).get('preferred', False),
@@ -209,15 +177,11 @@ class DefaultConcretizer(object):
# a) develop > everything (disabled by "not v.isdevelop() above)
# b) numeric > non-numeric
# c) Numeric or string comparison
- v) for v in unsorted_versions]
- keys.sort(reverse=True)
+ v)
+ usable.sort(key=keyfn, reverse=True)
- # List of versions in complete sorted order
- valid_versions = [x[-1] for x in keys]
- # --------------------------
-
- if valid_versions:
- spec.versions = ver([valid_versions[0]])
+ if usable:
+ spec.versions = ver([usable[0]])
else:
# We don't know of any SAFE versions that match the given
# spec. Grab the spec's versions and grab the highest
@@ -241,7 +205,7 @@ class DefaultConcretizer(object):
def concretize_architecture(self, spec):
"""If the spec is empty provide the defaults of the platform. If the
- architecture is not a basestring, then check if either the platform,
+ architecture is not a string type, then check if either the platform,
target or operating system are concretized. If any of the fields are
changed then return True. If everything is concretized (i.e the
architecture attribute is a namedtuple of classes) then return False.
@@ -258,16 +222,20 @@ class DefaultConcretizer(object):
spec.architecture = spack.spec.ArchSpec(sys_arch)
spec_changed = True
- default_archs = [root_arch, sys_arch]
- while not spec.architecture.concrete and default_archs:
- arch = default_archs.pop(0)
+ default_archs = list(x for x in [root_arch, sys_arch] if x)
+ for arch in default_archs:
+ if spec.architecture.concrete:
+ break
- replacement_fields = [k for k, v in arch.to_cmp_dict().iteritems()
+ replacement_fields = [k for k, v in iteritems(arch.to_cmp_dict())
if v and not getattr(spec.architecture, k)]
for field in replacement_fields:
setattr(spec.architecture, field, getattr(arch, field))
spec_changed = True
+ if not spec.architecture.concrete:
+ raise InsufficientArchitectureInfoError(spec, default_archs)
+
return spec_changed
def concretize_variants(self, spec):
@@ -276,16 +244,15 @@ class DefaultConcretizer(object):
the package specification.
"""
changed = False
- preferred_variants = pkgsort().spec_preferred_variants(
- spec.package_class.name)
+ preferred_variants = PackagePrefs.preferred_variants(spec.name)
for name, variant in spec.package_class.variants.items():
if name not in spec.variants:
changed = True
if name in preferred_variants:
spec.variants[name] = preferred_variants.get(name)
else:
- spec.variants[name] = \
- spack.spec.VariantSpec(name, variant.default)
+ spec.variants[name] = spack.spec.VariantSpec(
+ name, variant.default)
return changed
def concretize_compiler(self, spec):
@@ -327,12 +294,9 @@ class DefaultConcretizer(object):
spec.compiler, spec.architecture)
return False
- # Find the another spec that has a compiler, or the root if none do
+ # Find another spec that has a compiler, or the root if none do
other_spec = spec if spec.compiler else find_spec(
- spec, lambda x: x.compiler)
-
- if not other_spec:
- other_spec = spec.root
+ spec, lambda x: x.compiler, spec.root)
other_compiler = other_spec.compiler
assert(other_spec)
@@ -351,9 +315,9 @@ class DefaultConcretizer(object):
if not compiler_list:
# No compiler with a satisfactory spec was found
raise UnavailableCompilerVersionError(other_compiler)
- cmp_compilers = partial(
- pkgsort().compiler_compare, other_spec.name)
- matches = sorted(compiler_list, cmp=cmp_compilers)
+
+ ppk = PackagePrefs(other_spec.name, 'compiler')
+ matches = sorted(compiler_list, key=ppk)
# copy concrete version into other_compiler
try:
@@ -418,7 +382,7 @@ class DefaultConcretizer(object):
return ret
-def find_spec(spec, condition):
+def find_spec(spec, condition, default=None):
"""Searches the dag from spec in an intelligent order and looks
for a spec that matches a condition"""
# First search parents, then search children
@@ -445,7 +409,7 @@ def find_spec(spec, condition):
if condition(spec):
return spec
- return None # Nothing matched the condition.
+ return default # Nothing matched the condition; return default.
def _compiler_concretization_failure(compiler_spec, arch):
@@ -464,7 +428,7 @@ def _compiler_concretization_failure(compiler_spec, arch):
class NoCompilersForArchError(spack.error.SpackError):
def __init__(self, arch, available_os_targets):
err_msg = ("No compilers found"
- " for operating system %s and target %s."
+ " for operating system %s and target %s."
"\nIf previous installations have succeeded, the"
" operating system may have been updated." %
(arch.platform_os, arch.target))
@@ -483,7 +447,6 @@ class NoCompilersForArchError(spack.error.SpackError):
class UnavailableCompilerVersionError(spack.error.SpackError):
-
"""Raised when there is no available compiler that satisfies a
compiler spec."""
@@ -498,7 +461,6 @@ class UnavailableCompilerVersionError(spack.error.SpackError):
class NoValidVersionError(spack.error.SpackError):
-
"""Raised when there is no way to have a concrete version for a
particular spec."""
@@ -508,6 +470,17 @@ class NoValidVersionError(spack.error.SpackError):
% (spec.name, spec.versions))
+class InsufficientArchitectureInfoError(spack.error.SpackError):
+
+ """Raised when details on architecture cannot be collected from the
+ system"""
+
+ def __init__(self, spec, archs):
+ super(InsufficientArchitectureInfoError, self).__init__(
+ "Cannot determine necessary architecture information for '%s': %s"
+ % (spec.name, str(archs)))
+
+
class NoBuildError(spack.error.SpackError):
"""Raised when a package is configured with the buildable option False, but
no satisfactory external versions can be found"""
diff --git a/lib/spack/spack/config.py b/lib/spack/spack/config.py
index 56c6421457..7c3d614aee 100644
--- a/lib/spack/spack/config.py
+++ b/lib/spack/spack/config.py
@@ -52,6 +52,8 @@ import copy
import os
import re
import sys
+from six import string_types
+from six import iteritems
import yaml
import jsonschema
@@ -108,7 +110,7 @@ def extend_with_default(validator_class):
"patternProperties"]
def set_defaults(validator, properties, instance, schema):
- for property, subschema in properties.iteritems():
+ for property, subschema in iteritems(properties):
if "default" in subschema:
instance.setdefault(property, subschema["default"])
for err in validate_properties(
@@ -116,10 +118,10 @@ def extend_with_default(validator_class):
yield err
def set_pp_defaults(validator, properties, instance, schema):
- for property, subschema in properties.iteritems():
+ for property, subschema in iteritems(properties):
if "default" in subschema:
if isinstance(instance, dict):
- for key, val in instance.iteritems():
+ for key, val in iteritems(instance):
if re.match(property, key) and val is None:
instance[key] = subschema["default"]
@@ -306,8 +308,8 @@ def _mark_overrides(data):
elif isinstance(data, dict):
marked = {}
- for key, val in data.iteritems():
- if isinstance(key, basestring) and key.endswith(':'):
+ for key, val in iteritems(data):
+ if isinstance(key, string_types) and key.endswith(':'):
key = syaml.syaml_str(key[:-1])
key.override = True
marked[key] = _mark_overrides(val)
@@ -348,7 +350,7 @@ def _merge_yaml(dest, source):
# Source dict is merged into dest.
elif they_are(dict):
- for sk, sv in source.iteritems():
+ for sk, sv in iteritems(source):
if override(sk) or sk not in dest:
# if sk ended with ::, or if it's new, completely override
dest[sk] = copy.copy(sv)
diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py
index d3fc03fb40..c63da4cf2e 100644
--- a/lib/spack/spack/database.py
+++ b/lib/spack/spack/database.py
@@ -41,6 +41,8 @@ filesystem.
"""
import os
import socket
+from six import string_types
+from six import iteritems
from yaml.error import MarkedYAMLError, YAMLError
@@ -260,7 +262,7 @@ class Database(object):
raise ValueError("Invalid database format: %s" % format)
try:
- if isinstance(stream, basestring):
+ if isinstance(stream, string_types):
with open(stream, 'r') as f:
fdata = load(f)
else:
@@ -511,7 +513,7 @@ class Database(object):
new_spec, path, installed, ref_count=0, explicit=explicit)
# Connect dependencies from the DB to the new copy.
- for name, dep in spec.dependencies_dict(_tracked_deps).iteritems():
+ for name, dep in iteritems(spec.dependencies_dict(_tracked_deps)):
dkey = dep.spec.dag_hash()
new_spec._add_dependency(self._data[dkey].spec, dep.deptypes)
self._data[dkey].ref_count += 1
@@ -619,13 +621,12 @@ class Database(object):
Return the specs of all packages that extend
the given spec
"""
- for s in self.query():
+ for spec in self.query():
try:
- if s.package.extends(extendee_spec):
- yield s.package
- except spack.repository.UnknownPackageError:
+ spack.store.layout.check_activated(extendee_spec, spec)
+ yield spec.package
+ except spack.directory_layout.NoSuchExtensionError:
continue
- # skips unknown packages
# TODO: conditional way to do this instead of catching exceptions
def query(self, query_spec=any, known=any, installed=True, explicit=any):
diff --git a/lib/spack/spack/directives.py b/lib/spack/spack/directives.py
index 58eabb9e3b..e2219d1f49 100644
--- a/lib/spack/spack/directives.py
+++ b/lib/spack/spack/directives.py
@@ -51,6 +51,7 @@ import functools
import inspect
import os.path
import re
+from six import string_types
import llnl.util.lang
import spack
@@ -174,7 +175,7 @@ class DirectiveMetaMixin(type):
"""
global __all__
- if isinstance(dicts, basestring):
+ if isinstance(dicts, string_types):
dicts = (dicts, )
if not isinstance(dicts, collections.Sequence):
message = "dicts arg must be list, tuple, or string. Found {0}"
@@ -262,6 +263,33 @@ def _depends_on(pkg, spec, when=None, type=None):
conditions[when_spec] = dep_spec
+@directive('conflicts')
+def conflicts(conflict_spec, when=None):
+ """Allows a package to define a conflict, i.e. a concretized configuration
+ that is known to be non-valid.
+
+ For example a package that is known not to be buildable with intel
+ compilers can declare:
+
+ conflicts('%intel')
+
+ To express the same constraint only when the 'foo' variant is activated:
+
+ conflicts('%intel', when='+foo')
+
+ :param conflict_spec: constraint defining the known conflict
+ :param when: optional constraint that triggers the conflict
+ """
+ def _execute(pkg):
+ # If when is not specified the conflict always holds
+ condition = pkg.name if when is None else when
+ when_spec = parse_anonymous_spec(condition, pkg.name)
+
+ when_spec_list = pkg.conflicts.setdefault(conflict_spec, [])
+ when_spec_list.append(when_spec)
+ return _execute
+
+
@directive(('dependencies', 'dependency_types'))
def depends_on(spec, when=None, type=None):
"""Creates a dict of deps with specs defining when they apply.
diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py
index b84ee3be5b..9d09875484 100644
--- a/lib/spack/spack/directory_layout.py
+++ b/lib/spack/spack/directory_layout.py
@@ -23,7 +23,6 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
-import exceptions
import shutil
import glob
import tempfile
@@ -138,7 +137,7 @@ class DirectoryLayout(object):
if os.path.exists(path):
try:
shutil.rmtree(path)
- except exceptions.OSError as e:
+ except OSError as e:
raise RemoveFailedError(spec, path, e)
path = os.path.dirname(path)
diff --git a/lib/spack/spack/environment.py b/lib/spack/spack/environment.py
index 1333054518..eadfa45efb 100644
--- a/lib/spack/spack/environment.py
+++ b/lib/spack/spack/environment.py
@@ -291,7 +291,7 @@ class EnvironmentModifications(object):
shell_options = '{shell_options}'.format(**info)
source_file = '{source_command} {file} {concatenate_on_success}'
- dump_cmd = "import os, json; print json.dumps(dict(os.environ))"
+ dump_cmd = "import os, json; print(json.dumps(dict(os.environ)))"
dump_environment = 'python -c "%s"' % dump_cmd
# Construct the command that will be executed
@@ -310,7 +310,7 @@ class EnvironmentModifications(object):
proc.wait()
if proc.returncode != 0:
raise RuntimeError('sourcing files returned a non-zero exit code')
- output = ''.join([line for line in proc.stdout])
+ output = ''.join([line.decode('utf-8') for line in proc.stdout])
# Construct a dictionaries of the environment before and after
# sourcing the files, so that we can diff them.
diff --git a/lib/spack/spack/error.py b/lib/spack/spack/error.py
index b6261a05f4..cd1ae5b25c 100644
--- a/lib/spack/spack/error.py
+++ b/lib/spack/spack/error.py
@@ -22,8 +22,11 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+from __future__ import print_function
+
import os
import sys
+
import llnl.util.tty as tty
import spack
import inspect
diff --git a/lib/spack/spack/fetch_strategy.py b/lib/spack/spack/fetch_strategy.py
index d510db568f..38752b3fc1 100644
--- a/lib/spack/spack/fetch_strategy.py
+++ b/lib/spack/spack/fetch_strategy.py
@@ -46,6 +46,9 @@ import re
import shutil
import copy
from functools import wraps
+from six import string_types
+from six import with_metaclass
+
import llnl.util.tty as tty
from llnl.util.filesystem import *
import spack
@@ -74,21 +77,19 @@ def _needs_stage(fun):
return wrapper
-class FetchStrategy(object):
+class FSMeta(type):
+ """This metaclass registers all fetch strategies in a list."""
+ def __init__(cls, name, bases, dict):
+ type.__init__(cls, name, bases, dict)
+ if cls.enabled:
+ all_strategies.append(cls)
+
+class FetchStrategy(with_metaclass(FSMeta, object)):
"""Superclass of all fetch strategies."""
enabled = False # Non-abstract subclasses should be enabled.
required_attributes = None # Attributes required in version() args.
- class __metaclass__(type):
-
- """This metaclass registers all fetch strategies in a list."""
-
- def __init__(cls, name, bases, dict):
- type.__init__(cls, name, bases, dict)
- if cls.enabled:
- all_strategies.append(cls)
-
def __init__(self):
# The stage is initialized late, so that fetch strategies can be
# constructed at package construction time. This is where things
@@ -319,7 +320,7 @@ class URLFetchStrategy(FetchStrategy):
# top-level directory. We ignore hidden files to accomodate
# these "semi-exploding" tarballs.
files = os.listdir(tarball_container)
- non_hidden = filter(lambda f: not f.startswith('.'), files)
+ non_hidden = [f for f in files if not f.startswith('.')]
if len(non_hidden) == 1:
expanded_dir = os.path.join(tarball_container, non_hidden[0])
if os.path.isdir(expanded_dir):
@@ -461,7 +462,7 @@ class VCSFetchStrategy(FetchStrategy):
patterns = kwargs.get('exclude', None)
if patterns is not None:
- if isinstance(patterns, basestring):
+ if isinstance(patterns, string_types):
patterns = [patterns]
for p in patterns:
tar.add_default_arg('--exclude=%s' % p)
diff --git a/lib/spack/spack/graph.py b/lib/spack/spack/graph.py
index 91230263f1..04e6cc7fca 100644
--- a/lib/spack/spack/graph.py
+++ b/lib/spack/spack/graph.py
@@ -63,6 +63,7 @@ can take a number of specs as input.
"""
from heapq import *
+from six import iteritems
from llnl.util.lang import *
from llnl.util.tty.color import *
@@ -562,7 +563,7 @@ def graph_dot(specs, deptype=None, static=False, out=None):
continue
# Add edges for each depends_on in the package.
- for dep_name, dep in spec.package.dependencies.iteritems():
+ for dep_name, dep in iteritems(spec.package.dependencies):
deps.add((spec.name, dep_name))
# If the package provides something, add an edge for that.
diff --git a/lib/spack/spack/hooks/case_consistency.py b/lib/spack/spack/hooks/case_consistency.py
index faf38f7ae3..2b88291666 100644
--- a/lib/spack/spack/hooks/case_consistency.py
+++ b/lib/spack/spack/hooks/case_consistency.py
@@ -23,6 +23,7 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from __future__ import absolute_import
+
import os
import re
import platform
@@ -30,6 +31,7 @@ import platform
from llnl.util.filesystem import *
import spack
+from spack.cmd.pkg import get_git
from spack.util.executable import *
@@ -60,8 +62,8 @@ def git_case_consistency_check(path):
"""
with working_dir(path):
# Don't bother fixing case if Spack isn't in a git repository
- git = which('git')
- if not git:
+ git = get_git(fatal=False)
+ if git is None:
return
try:
diff --git a/lib/spack/spack/hooks/module_file_generation.py b/lib/spack/spack/hooks/module_file_generation.py
index 445cea4e91..ff9617ff1c 100644
--- a/lib/spack/spack/hooks/module_file_generation.py
+++ b/lib/spack/spack/hooks/module_file_generation.py
@@ -23,15 +23,16 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import spack.modules
+from six import iteritems
def post_install(pkg):
- for item, cls in spack.modules.module_types.iteritems():
+ for item, cls in iteritems(spack.modules.module_types):
generator = cls(pkg.spec)
generator.write()
def post_uninstall(pkg):
- for item, cls in spack.modules.module_types.iteritems():
+ for item, cls in iteritems(spack.modules.module_types):
generator = cls(pkg.spec)
generator.remove()
diff --git a/lib/spack/spack/modules.py b/lib/spack/spack/modules.py
index a6ffded935..8c702f1111 100644
--- a/lib/spack/spack/modules.py
+++ b/lib/spack/spack/modules.py
@@ -46,6 +46,8 @@ import os.path
import re
import string
import textwrap
+from six import iteritems
+from six import with_metaclass
import llnl.util.tty as tty
from llnl.util.filesystem import join_path, mkdirp
@@ -213,7 +215,7 @@ def parse_config_options(module_generator):
for x in arglist:
yield (x, )
else:
- for x in arglist.iteritems():
+ for x in iteritems(arglist):
yield x
for method, arglist in environment_actions.items():
@@ -246,17 +248,17 @@ def format_env_var_name(name):
return name.replace('-', '_').upper()
-class EnvModule(object):
- name = 'env_module'
- formats = {}
+class ModuleMeta(type):
+ """Metaclass registers modules in themodule_types dict."""
+ def __init__(cls, name, bases, dict):
+ type.__init__(cls, name, bases, dict)
+ if cls.name != 'env_module' and cls.name in _module_config['enable']:
+ module_types[cls.name] = cls
- class __metaclass__(type):
- def __init__(cls, name, bases, dict):
- type.__init__(cls, name, bases, dict)
- if cls.name != 'env_module' and cls.name in _module_config[
- 'enable']:
- module_types[cls.name] = cls
+class EnvModule(with_metaclass(ModuleMeta, object)):
+ name = 'env_module'
+ formats = {}
def __init__(self, spec=None):
self.spec = spec
diff --git a/lib/spack/spack/operating_systems/cnl.py b/lib/spack/spack/operating_systems/cnl.py
index 7acab1cbcb..b5c759bbcb 100644
--- a/lib/spack/spack/operating_systems/cnl.py
+++ b/lib/spack/spack/operating_systems/cnl.py
@@ -54,7 +54,7 @@ class Cnl(OperatingSystem):
# ensure all the version calls we made are cached in the parent
# process, as well. This speeds up Spack a lot.
- clist = reduce(lambda x, y: x + y, compiler_lists)
+ clist = [comp for cl in compiler_lists for comp in cl]
return clist
def find_compiler(self, cmp_cls, *paths):
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index 80d65bd739..177b4c908b 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -42,6 +42,9 @@ import re
import sys
import textwrap
import time
+from six import StringIO
+from six import string_types
+from six import with_metaclass
import llnl.util.lock
import llnl.util.tty as tty
@@ -56,7 +59,7 @@ import spack.mirror
import spack.repository
import spack.url
import spack.util.web
-from StringIO import StringIO
+
from llnl.util.filesystem import *
from llnl.util.lang import *
from llnl.util.link_tree import LinkTree
@@ -238,7 +241,7 @@ def on_package_attributes(**attr_dict):
return _execute_under_condition
-class PackageBase(object):
+class PackageBase(with_metaclass(PackageMeta, object)):
"""This is the superclass for all spack packages.
***The Package class***
@@ -475,7 +478,6 @@ class PackageBase(object):
Package creators override functions like install() (all of them do this),
clean() (some of them do this), and others to provide custom behavior.
"""
- __metaclass__ = PackageMeta
#
# These are default values for instance variables.
#
@@ -568,7 +570,7 @@ class PackageBase(object):
self.list_url = None
if not hasattr(self, 'list_depth'):
- self.list_depth = 1
+ self.list_depth = 0
# Set default licensing information
if not hasattr(self, 'license_required'):
@@ -964,6 +966,10 @@ class PackageBase(object):
self.stage.expand_archive()
self.stage.chdir_to_source()
+ def patch(self):
+ """Default patch implementation is a no-op."""
+ pass
+
def do_patch(self):
"""Calls do_stage(), then applied patches to the expanded tarball if they
haven't been applied already."""
@@ -1115,6 +1121,13 @@ class PackageBase(object):
finally:
self.prefix_lock.release_write()
+ @contextlib.contextmanager
+ def _stage_and_write_lock(self):
+ """Prefix lock nested in a stage."""
+ with self.stage:
+ with self._prefix_write_lock():
+ yield
+
def do_install(self,
keep_prefix=False,
keep_stage=False,
@@ -1233,7 +1246,7 @@ class PackageBase(object):
self.stage.keep = keep_stage
- with contextlib.nested(self.stage, self._prefix_write_lock()):
+ with self._stage_and_write_lock():
# Run the pre-install hook in the child process after
# the directory is created.
spack.hooks.pre_install(self)
@@ -1265,9 +1278,10 @@ class PackageBase(object):
input_stream=input_stream
)
with redirection_context as log_redirection:
- for phase_name, phase in zip(self.phases, self._InstallPhase_phases): # NOQA: ignore=E501
+ for phase_name, phase in zip(
+ self.phases, self._InstallPhase_phases):
tty.msg(
- 'Executing phase : \'{0}\''.format(phase_name) # NOQA: ignore=E501
+ 'Executing phase : \'{0}\''.format(phase_name)
)
# Redirect stdout and stderr to daemon pipe
with log_redirection:
@@ -1355,7 +1369,7 @@ class PackageBase(object):
"""This function checks whether install succeeded."""
def check_paths(path_list, filetype, predicate):
- if isinstance(path_list, basestring):
+ if isinstance(path_list, string_types):
path_list = [path_list]
for path in path_list:
@@ -1676,9 +1690,7 @@ class PackageBase(object):
try:
return spack.util.web.find_versions_of_archive(
- *self.all_urls,
- list_url=self.list_url,
- list_depth=self.list_depth)
+ self.all_urls, self.list_url, self.list_depth)
except spack.error.NoNetworkConnectionError as e:
tty.die("Package.fetch_versions couldn't connect to:", e.url,
e.message)
diff --git a/lib/spack/spack/package_prefs.py b/lib/spack/spack/package_prefs.py
index 63f90d9b50..f9dac2bef0 100644
--- a/lib/spack/spack/package_prefs.py
+++ b/lib/spack/spack/package_prefs.py
@@ -22,12 +22,25 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+from six import string_types
+from six import iteritems
+
+from llnl.util.lang import classproperty
import spack
import spack.error
from spack.version import *
+_lesser_spec_types = {'compiler': spack.spec.CompilerSpec,
+ 'version': VersionList}
+
+
+def _spec_type(component):
+ """Map from component name to spec type for package prefs."""
+ return _lesser_spec_types.get(component, spack.spec.Spec)
+
+
def get_packages_config():
"""Wrapper around get_packages_config() to validate semantics."""
config = spack.config.get_config('packages')
@@ -49,177 +62,141 @@ def get_packages_config():
return config
-class PreferredPackages(object):
- def __init__(self):
- self.preferred = get_packages_config()
- self._spec_for_pkgname_cache = {}
+class PackagePrefs(object):
+ """Defines the sort order for a set of specs.
+
+ Spack's package preference implementation uses PackagePrefss to
+ define sort order. The PackagePrefs class looks at Spack's
+ packages.yaml configuration and, when called on a spec, returns a key
+ that can be used to sort that spec in order of the user's
+ preferences.
+
+ You can use it like this:
+
+ # key function sorts CompilerSpecs for `mpich` in order of preference
+ kf = PackagePrefs('mpich', 'compiler')
+ compiler_list.sort(key=kf)
+
+ Or like this:
+
+ # key function to sort VersionLists for OpenMPI in order of preference.
+ kf = PackagePrefs('openmpi', 'version')
+ version_list.sort(key=kf)
- # Given a package name, sort component (e.g, version, compiler, ...), and
- # a second_key (used by providers), return the list
- def _order_for_package(self, pkgname, component, second_key,
- test_all=True):
+ Optionally, you can sort in order of preferred virtual dependency
+ providers. To do that, provide 'providers' and a third argument
+ denoting the virtual package (e.g., ``mpi``):
+
+ kf = PackagePrefs('trilinos', 'providers', 'mpi')
+ provider_spec_list.sort(key=kf)
+
+ """
+ _packages_config_cache = None
+ _spec_cache = {}
+
+ def __init__(self, pkgname, component, vpkg=None):
+ self.pkgname = pkgname
+ self.component = component
+ self.vpkg = vpkg
+
+ def __call__(self, spec):
+ """Return a key object (an index) that can be used to sort spec.
+
+ Sort is done in package order. We don't cache the result of
+ this function as Python's sort functions already ensure that the
+ key function is called at most once per sorted element.
+ """
+ spec_order = self._specs_for_pkg(
+ self.pkgname, self.component, self.vpkg)
+
+ # integer is the index of the first spec in order that satisfies
+ # spec, or it's a number larger than any position in the order.
+ return next(
+ (i for i, s in enumerate(spec_order) if spec.satisfies(s)),
+ len(spec_order))
+
+ @classproperty
+ @classmethod
+ def _packages_config(cls):
+ if cls._packages_config_cache is None:
+ cls._packages_config_cache = get_packages_config()
+ return cls._packages_config_cache
+
+ @classmethod
+ def _order_for_package(cls, pkgname, component, vpkg=None, all=True):
+ """Given a package name, sort component (e.g, version, compiler, ...),
+ and an optional vpkg, return the list from the packages config.
+ """
pkglist = [pkgname]
- if test_all:
+ if all:
pkglist.append('all')
+
for pkg in pkglist:
- order = self.preferred.get(pkg, {}).get(component, {})
- if isinstance(order, dict) and second_key:
- order = order.get(second_key, {})
+ pkg_entry = cls._packages_config.get(pkg)
+ if not pkg_entry:
+ continue
+
+ order = pkg_entry.get(component)
if not order:
continue
- return [str(s).strip() for s in order]
+
+ # vpkg is one more level
+ if vpkg is not None:
+ order = order.get(vpkg)
+
+ if order:
+ return [str(s).strip() for s in order]
+
return []
- # A generic sorting function. Given a package name and sort
- # component, return less-than-0, 0, or greater-than-0 if
- # a is respectively less-than, equal to, or greater than b.
- def _component_compare(self, pkgname, component, a, b,
- reverse_natural_compare, second_key):
- if a is None:
- return -1
- if b is None:
- return 1
- orderlist = self._order_for_package(pkgname, component, second_key)
- a_in_list = str(a) in orderlist
- b_in_list = str(b) in orderlist
- if a_in_list and not b_in_list:
- return -1
- elif b_in_list and not a_in_list:
- return 1
-
- cmp_a = None
- cmp_b = None
- reverse = None
- if not a_in_list and not b_in_list:
- cmp_a = a
- cmp_b = b
- reverse = -1 if reverse_natural_compare else 1
- else:
- cmp_a = orderlist.index(str(a))
- cmp_b = orderlist.index(str(b))
- reverse = 1
-
- if cmp_a < cmp_b:
- return -1 * reverse
- elif cmp_a > cmp_b:
- return 1 * reverse
- else:
- return 0
-
- # A sorting function for specs. Similar to component_compare, but
- # a and b are considered to match entries in the sorting list if they
- # satisfy the list component.
- def _spec_compare(self, pkgname, component, a, b,
- reverse_natural_compare, second_key):
- if not a or (not a.concrete and not second_key):
- return -1
- if not b or (not b.concrete and not second_key):
- return 1
- specs = self._spec_for_pkgname(pkgname, component, second_key)
- a_index = None
- b_index = None
- reverse = -1 if reverse_natural_compare else 1
- for i, cspec in enumerate(specs):
- if a_index is None and (cspec.satisfies(a) or a.satisfies(cspec)):
- a_index = i
- if b_index:
- break
- if b_index is None and (cspec.satisfies(b) or b.satisfies(cspec)):
- b_index = i
- if a_index:
- break
-
- if a_index is not None and b_index is None:
- return -1
- elif a_index is None and b_index is not None:
- return 1
- elif a_index is not None and b_index == a_index:
- return -1 * cmp(a, b)
- elif (a_index is not None and b_index is not None and
- a_index != b_index):
- return cmp(a_index, b_index)
- else:
- return cmp(a, b) * reverse
-
- # Given a sort order specified by the pkgname/component/second_key, return
- # a list of CompilerSpecs, VersionLists, or Specs for that sorting list.
- def _spec_for_pkgname(self, pkgname, component, second_key):
- key = (pkgname, component, second_key)
- if key not in self._spec_for_pkgname_cache:
- pkglist = self._order_for_package(pkgname, component, second_key)
- if component == 'compiler':
- self._spec_for_pkgname_cache[key] = \
- [spack.spec.CompilerSpec(s) for s in pkglist]
- elif component == 'version':
- self._spec_for_pkgname_cache[key] = \
- [VersionList(s) for s in pkglist]
- else:
- self._spec_for_pkgname_cache[key] = \
- [spack.spec.Spec(s) for s in pkglist]
- return self._spec_for_pkgname_cache[key]
-
- def provider_compare(self, pkgname, provider_str, a, b):
- """Return less-than-0, 0, or greater than 0 if a is respecively
- less-than, equal-to, or greater-than b. A and b are possible
- implementations of provider_str. One provider is less-than another
- if it is preferred over the other. For example,
- provider_compare('scorep', 'mpi', 'mvapich', 'openmpi') would
- return -1 if mvapich should be preferred over openmpi for scorep."""
- return self._spec_compare(pkgname, 'providers', a, b, False,
- provider_str)
-
- def spec_has_preferred_provider(self, pkgname, provider_str):
- """Return True iff the named package has a list of preferred
- providers"""
- return bool(self._order_for_package(pkgname, 'providers',
- provider_str, False))
-
- def spec_preferred_variants(self, pkgname):
- """Return a VariantMap of preferred variants and their values"""
- for pkg in (pkgname, 'all'):
- variants = self.preferred.get(pkg, {}).get('variants', '')
+ @classmethod
+ def _specs_for_pkg(cls, pkgname, component, vpkg=None):
+ """Given a sort order specified by the pkgname/component/second_key,
+ return a list of CompilerSpecs, VersionLists, or Specs for
+ that sorting list.
+ """
+ key = (pkgname, component, vpkg)
+
+ specs = cls._spec_cache.get(key)
+ if specs is None:
+ pkglist = cls._order_for_package(pkgname, component, vpkg)
+ spec_type = _spec_type(component)
+ specs = [spec_type(s) for s in pkglist]
+ cls._spec_cache[key] = specs
+
+ return specs
+
+ @classmethod
+ def clear_caches(cls):
+ cls._packages_config_cache = None
+ cls._spec_cache = {}
+
+ @classmethod
+ def has_preferred_providers(cls, pkgname, vpkg):
+ """Whether specific package has a preferred vpkg providers."""
+ return bool(cls._order_for_package(pkgname, 'providers', vpkg, False))
+
+ @classmethod
+ def preferred_variants(cls, pkg_name):
+ """Return a VariantMap of preferred variants/values for a spec."""
+ for pkg in (pkg_name, 'all'):
+ variants = cls._packages_config.get(pkg, {}).get('variants', '')
if variants:
break
- if not isinstance(variants, basestring):
+
+ # allow variants to be list or string
+ if not isinstance(variants, string_types):
variants = " ".join(variants)
- pkg = spack.repo.get(pkgname)
- spec = spack.spec.Spec("%s %s" % (pkgname, variants))
+
# Only return variants that are actually supported by the package
+ pkg = spack.repo.get(pkg_name)
+ spec = spack.spec.Spec("%s %s" % (pkg_name, variants))
return dict((name, variant) for name, variant in spec.variants.items()
if name in pkg.variants)
- def version_compare(self, pkgname, a, b):
- """Return less-than-0, 0, or greater than 0 if version a of pkgname is
- respectively less-than, equal-to, or greater-than version b of
- pkgname. One version is less-than another if it is preferred over
- the other."""
- return self._spec_compare(pkgname, 'version', a, b, True, None)
-
- def variant_compare(self, pkgname, a, b):
- """Return less-than-0, 0, or greater than 0 if variant a of pkgname is
- respectively less-than, equal-to, or greater-than variant b of
- pkgname. One variant is less-than another if it is preferred over
- the other."""
- return self._component_compare(pkgname, 'variant', a, b, False, None)
-
- def architecture_compare(self, pkgname, a, b):
- """Return less-than-0, 0, or greater than 0 if architecture a of pkgname
- is respectively less-than, equal-to, or greater-than architecture b
- of pkgname. One architecture is less-than another if it is preferred
- over the other."""
- return self._component_compare(pkgname, 'architecture', a, b,
- False, None)
-
- def compiler_compare(self, pkgname, a, b):
- """Return less-than-0, 0, or greater than 0 if compiler a of pkgname is
- respecively less-than, equal-to, or greater-than compiler b of
- pkgname. One compiler is less-than another if it is preferred over
- the other."""
- return self._spec_compare(pkgname, 'compiler', a, b, False, None)
-
def spec_externals(spec):
- """Return a list of external specs (with external directory path filled in),
+ """Return a list of external specs (w/external directory path filled in),
one for each known external installation."""
# break circular import.
from spack.build_environment import get_path_from_module
@@ -233,7 +210,7 @@ def spec_externals(spec):
if (not pkg_paths) and (not pkg_modules):
return []
- for external_spec, path in pkg_paths.iteritems():
+ for external_spec, path in iteritems(pkg_paths):
if not path:
# skip entries without paths (avoid creating extra Specs)
continue
@@ -242,7 +219,7 @@ def spec_externals(spec):
if external_spec.satisfies(spec):
external_specs.append(external_spec)
- for external_spec, module in pkg_modules.iteritems():
+ for external_spec, module in iteritems(pkg_modules):
if not module:
continue
@@ -253,7 +230,8 @@ def spec_externals(spec):
if external_spec.satisfies(spec):
external_specs.append(external_spec)
- return external_specs
+ # defensively copy returned specs
+ return [s.copy() for s in external_specs]
def is_spec_buildable(spec):
@@ -266,50 +244,5 @@ def is_spec_buildable(spec):
return allpkgs[spec.name]['buildable']
-def cmp_specs(lhs, rhs):
- # Package name sort order is not configurable, always goes alphabetical
- if lhs.name != rhs.name:
- return cmp(lhs.name, rhs.name)
-
- # Package version is second in compare order
- pkgname = lhs.name
- if lhs.versions != rhs.versions:
- return pkgsort().version_compare(
- pkgname, lhs.versions, rhs.versions)
-
- # Compiler is third
- if lhs.compiler != rhs.compiler:
- return pkgsort().compiler_compare(
- pkgname, lhs.compiler, rhs.compiler)
-
- # Variants
- if lhs.variants != rhs.variants:
- return pkgsort().variant_compare(
- pkgname, lhs.variants, rhs.variants)
-
- # Architecture
- if lhs.architecture != rhs.architecture:
- return pkgsort().architecture_compare(
- pkgname, lhs.architecture, rhs.architecture)
-
- # Dependency is not configurable
- lhash, rhash = hash(lhs), hash(rhs)
- if lhash != rhash:
- return -1 if lhash < rhash else 1
-
- # Equal specs
- return 0
-
-
-_pkgsort = None
-
-
-def pkgsort():
- global _pkgsort
- if _pkgsort is None:
- _pkgsort = PreferredPackages()
- return _pkgsort
-
-
class VirtualInPackagesYAMLError(spack.error.SpackError):
"""Raised when a disallowed virtual is found in packages.yaml"""
diff --git a/lib/spack/spack/package_test.py b/lib/spack/spack/package_test.py
index e366b5f0e5..54f424d45e 100644
--- a/lib/spack/spack/package_test.py
+++ b/lib/spack/spack/package_test.py
@@ -45,15 +45,15 @@ def compile_c_and_execute(source_file, include_flags, link_flags):
def compare_output(current_output, blessed_output):
"""Compare blessed and current output of executables."""
if not (current_output == blessed_output):
- print "Produced output does not match expected output."
- print "Expected output:"
- print '-' * 80
- print blessed_output
- print '-' * 80
- print "Produced output:"
- print '-' * 80
- print current_output
- print '-' * 80
+ print("Produced output does not match expected output.")
+ print("Expected output:")
+ print('-' * 80)
+ print(blessed_output)
+ print('-' * 80)
+ print("Produced output:")
+ print('-' * 80)
+ print(current_output)
+ print('-' * 80)
raise RuntimeError("Ouput check failed.",
"See spack_output.log for details")
diff --git a/lib/spack/spack/parse.py b/lib/spack/spack/parse.py
index e116175823..880bb09b4e 100644
--- a/lib/spack/spack/parse.py
+++ b/lib/spack/spack/parse.py
@@ -25,6 +25,8 @@
import re
import shlex
import itertools
+from six import string_types
+
import spack.error
@@ -46,9 +48,8 @@ class Token:
def is_a(self, type):
return self.type == type
- def __cmp__(self, other):
- return cmp((self.type, self.value),
- (other.type, other.value))
+ def __eq__(self, other):
+ return (self.type == other.type) and (self.value == other.value)
class Lexer(object):
@@ -118,7 +119,7 @@ class Parser(object):
def gettok(self):
"""Puts the next token in the input stream into self.next."""
try:
- self.next = self.tokens.next()
+ self.next = next(self.tokens)
except StopIteration:
self.next = None
@@ -159,7 +160,7 @@ class Parser(object):
sys.exit(1)
def setup(self, text):
- if isinstance(text, basestring):
+ if isinstance(text, string_types):
text = shlex.split(text)
self.text = text
self.push_tokens(self.lexer.lex(text))
diff --git a/lib/spack/spack/provider_index.py b/lib/spack/spack/provider_index.py
index 0e771c6255..8d64d100b1 100644
--- a/lib/spack/spack/provider_index.py
+++ b/lib/spack/spack/provider_index.py
@@ -26,6 +26,7 @@
The ``virtual`` module contains utility classes for virtual dependencies.
"""
from itertools import product as iproduct
+from six import iteritems
from pprint import pformat
import spack.util.spack_yaml as syaml
@@ -97,7 +98,7 @@ class ProviderIndex(object):
assert(not spec.virtual)
pkg = spec.package
- for provided_spec, provider_specs in pkg.provided.iteritems():
+ for provided_spec, provider_specs in iteritems(pkg.provided):
for provider_spec in provider_specs:
# TODO: fix this comment.
# We want satisfaction other than flags
@@ -145,8 +146,8 @@ class ProviderIndex(object):
if p_spec.satisfies(vspec, deps=False):
providers.update(spec_set)
- # Return providers in order
- return sorted(providers)
+ # Return providers in order. Defensively copy.
+ return sorted(s.copy() for s in providers)
# TODO: this is pretty darned nasty, and inefficient, but there
# are not that many vdeps in most specs.
@@ -201,7 +202,7 @@ class ProviderIndex(object):
def from_yaml(stream):
try:
yfile = syaml.load(stream)
- except MarkedYAMLError, e:
+ except MarkedYAMLError as e:
raise spack.spec.SpackYAMLError(
"error parsing YAML ProviderIndex cache:", str(e))
@@ -288,7 +289,7 @@ def _transform(providers, transform_fun, out_mapping_type=dict):
"""
def mapiter(mappings):
if isinstance(mappings, dict):
- return mappings.iteritems()
+ return iteritems(mappings)
else:
return iter(mappings)
diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py
index 1536ecb0e6..5486f7a9a4 100644
--- a/lib/spack/spack/repository.py
+++ b/lib/spack/spack/repository.py
@@ -26,7 +26,6 @@ import os
import stat
import shutil
import errno
-import exceptions
import sys
import inspect
import imp
@@ -558,7 +557,7 @@ class Repo(object):
return yaml_data['repo']
- except exceptions.IOError:
+ except IOError:
tty.die("Error reading %s when opening %s"
% (self.config_file, self.root))
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index b2fae9fd8e..534bc6c2d3 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -96,15 +96,16 @@ specs to avoid ambiguity. Both are provided because ~ can cause shell
expansion when it is the first character in an id typed on the command line.
"""
import base64
+import sys
import collections
-import csv
import ctypes
import hashlib
import itertools
from operator import attrgetter
+from six import StringIO
+from six import string_types
+from six import iteritems
-import cStringIO
-import llnl.util.tty as tty
import spack
import spack.architecture
import spack.compilers as compilers
@@ -113,7 +114,7 @@ import spack.parse
import spack.store
import spack.util.spack_json as sjson
import spack.util.spack_yaml as syaml
-from cStringIO import StringIO
+
from llnl.util.filesystem import find_libraries
from llnl.util.lang import *
from llnl.util.tty.color import *
@@ -157,6 +158,7 @@ __all__ = [
'UnsatisfiableDependencySpecError',
'AmbiguousHashError',
'InvalidHashError',
+ 'NoSuchHashError',
'RedundantSpecError']
# Valid pattern for an identifier in Spack
@@ -222,7 +224,7 @@ def canonical_deptype(deptype):
if deptype is None:
return alldeps
- elif isinstance(deptype, str):
+ elif isinstance(deptype, string_types):
return special_types.get(deptype, (deptype,))
elif isinstance(deptype, (tuple, list)):
@@ -270,7 +272,7 @@ class ArchSpec(object):
spec_like = args[0]
if isinstance(spec_like, ArchSpec):
self._dup(spec_like)
- elif isinstance(spec_like, basestring):
+ elif isinstance(spec_like, string_types):
spec_fields = spec_like.split("-")
if len(spec_fields) == 3:
@@ -391,7 +393,7 @@ class ArchSpec(object):
raise UnsatisfiableArchitectureSpecError(self, other)
constrained = False
- for attr, svalue in self.to_cmp_dict().iteritems():
+ for attr, svalue in iteritems(self.to_cmp_dict()):
ovalue = getattr(other, attr)
if svalue is None and ovalue is not None:
setattr(self, attr, ovalue)
@@ -406,7 +408,7 @@ class ArchSpec(object):
@property
def concrete(self):
- return all(v for k, v in self.to_cmp_dict().iteritems())
+ return all(v for k, v in iteritems(self.to_cmp_dict()))
def to_cmp_dict(self):
"""Returns a dictionary that can be used for field comparison."""
@@ -464,7 +466,7 @@ class CompilerSpec(object):
arg = args[0]
# If there is one argument, it's either another CompilerSpec
# to copy or a string to parse
- if isinstance(arg, basestring):
+ if isinstance(arg, string_types):
c = SpecParser().parse_compiler(arg)
self.name = c.name
self.versions = c.versions
@@ -579,8 +581,11 @@ class DependencySpec(object):
self.deptypes = tuple(sorted(set(deptypes)))
def update_deptypes(self, deptypes):
- deptypes = tuple(sorted(set(deptypes)))
+ deptypes = set(deptypes)
+ deptypes.update(self.deptypes)
+ deptypes = tuple(sorted(deptypes))
changed = self.deptypes != deptypes
+
self.deptypes = deptypes
return changed
@@ -728,11 +733,10 @@ class FlagMap(HashableMap):
return clone
def _cmp_key(self):
- return tuple((k, tuple(v)) for k, v in sorted(self.iteritems()))
+ return tuple((k, tuple(v)) for k, v in sorted(iteritems(self)))
def __str__(self):
- sorted_keys = filter(
- lambda flag: self[flag] != [], sorted(self.keys()))
+ sorted_keys = [k for k in sorted(self.keys()) if self[k] != []]
cond_symbol = ' ' if len(sorted_keys) > 0 else ''
return cond_symbol + ' '.join(
str(key) + '=\"' + ' '.join(
@@ -918,7 +922,7 @@ class Spec(object):
return
# Parse if the spec_like is a string.
- if not isinstance(spec_like, basestring):
+ if not isinstance(spec_like, string_types):
raise TypeError("Can't make spec out of %s" % type(spec_like))
spec_list = SpecParser().parse(spec_like)
@@ -1018,9 +1022,9 @@ class Spec(object):
if name in self.variants:
raise DuplicateVariantError(
"Cannot specify variant '%s' twice" % name)
- if isinstance(value, basestring) and value.upper() == 'TRUE':
+ if isinstance(value, string_types) and value.upper() == 'TRUE':
value = True
- elif isinstance(value, basestring) and value.upper() == 'FALSE':
+ elif isinstance(value, string_types) and value.upper() == 'FALSE':
value = False
self.variants[name] = VariantSpec(name, value)
@@ -1056,7 +1060,7 @@ class Spec(object):
new_vals = tuple(kwargs.get(arg, None) for arg in arch_attrs)
self.architecture = ArchSpec(*new_vals)
else:
- new_attrvals = [(a, v) for a, v in kwargs.iteritems()
+ new_attrvals = [(a, v) for a, v in iteritems(kwargs)
if a in arch_attrs]
for new_attr, new_value in new_attrvals:
if getattr(self.architecture, new_attr):
@@ -1219,7 +1223,7 @@ class Spec(object):
# get initial values for kwargs
depth = kwargs.get('depth', False)
key_fun = kwargs.get('key', id)
- if isinstance(key_fun, basestring):
+ if isinstance(key_fun, string_types):
key_fun = attrgetter(key_fun)
yield_root = kwargs.get('root', True)
cover = kwargs.get('cover', 'nodes')
@@ -1314,8 +1318,12 @@ class Spec(object):
else:
yaml_text = syaml.dump(
self.to_node_dict(), default_flow_style=True, width=maxint)
- sha = hashlib.sha1(yaml_text)
+ sha = hashlib.sha1(yaml_text.encode('utf-8'))
+
b32_hash = base64.b32encode(sha.digest()).lower()
+ if sys.version_info[0] >= 3:
+ b32_hash = b32_hash.decode('utf-8')
+
if self.concrete:
self._hash = b32_hash
return b32_hash[:length]
@@ -1421,7 +1429,7 @@ class Spec(object):
formats so that reindex will work on old specs/databases.
"""
for dep_name, elt in dependency_dict.items():
- if isinstance(elt, basestring):
+ if isinstance(elt, string_types):
# original format, elt is just the dependency hash.
dag_hash, deptypes = elt, ['build', 'link']
elif isinstance(elt, tuple):
@@ -1566,14 +1574,12 @@ class Spec(object):
a problem.
"""
# Make an index of stuff this spec already provides
- # XXX(deptype): 'link' and 'run'?
self_index = ProviderIndex(self.traverse(), restrict=True)
changed = False
done = False
while not done:
done = True
- # XXX(deptype): 'link' and 'run'?
for spec in list(self.traverse()):
replacement = None
if spec.virtual:
@@ -1599,7 +1605,7 @@ class Spec(object):
# Replace spec with the candidate and normalize
copy = self.copy()
- copy[spec.name]._dup(replacement.copy(deps=False))
+ copy[spec.name]._dup(replacement, deps=False)
try:
# If there are duplicate providers or duplicate
@@ -1701,6 +1707,18 @@ class Spec(object):
# Mark everything in the spec as concrete, as well.
self._mark_concrete()
+ # Now that the spec is concrete we should check if
+ # there are declared conflicts
+ matches = []
+ for x in self.traverse():
+ for conflict_spec, when_list in x.package.conflicts.items():
+ if x.satisfies(conflict_spec):
+ for when_spec in when_list:
+ if x.satisfies(when_spec):
+ matches.append((x, conflict_spec, when_spec))
+ if matches:
+ raise ConflictsInSpecError(self, matches)
+
def _mark_concrete(self, value=True):
"""Mark this spec and its dependencies as concrete.
@@ -1798,6 +1816,8 @@ class Spec(object):
dependency already in this spec.
"""
assert(vdep.virtual)
+
+ # note that this defensively copies.
providers = provider_index.providers_for(vdep)
# If there is a provider for the vpkg, then use that instead of
@@ -1827,6 +1847,10 @@ class Spec(object):
provider_index):
"""Merge the dependency into this spec.
+ Caller should assume that this routine can owns the dep parameter
+ (i.e. it needs to be a copy of any internal structures like
+ dependencies on Package class objects).
+
This is the core of normalize(). There are some basic steps:
* If dep is virtual, evaluate whether it corresponds to an
@@ -1839,6 +1863,7 @@ class Spec(object):
constraints into this spec.
This method returns True if the spec was changed, False otherwise.
+
"""
changed = False
@@ -1851,7 +1876,8 @@ class Spec(object):
dep = provider
else:
index = ProviderIndex([dep], restrict=True)
- for vspec in (v for v in spec_deps.values() if v.virtual):
+ items = list(spec_deps.items())
+ for name, vspec in items:
if index.providers_for(vspec):
vspec._replace_with(dep)
del spec_deps[vspec.name]
@@ -1862,29 +1888,23 @@ class Spec(object):
raise UnsatisfiableProviderSpecError(required[0], dep)
provider_index.update(dep)
- # If the spec isn't already in the set of dependencies, clone
- # it from the package description.
+ # If the spec isn't already in the set of dependencies, add it.
+ # Note: dep is always owned by this method. If it's from the
+ # caller, it's a copy from _evaluate_dependency_conditions. If it
+ # comes from a vdep, it's a defensive copy from _find_provider.
if dep.name not in spec_deps:
- spec_deps[dep.name] = dep.copy()
+ spec_deps[dep.name] = dep
changed = True
else:
- dspec = spec_deps[dep.name]
- if self.name not in dspec._dependents:
- self._add_dependency(dspec, deptypes)
- else:
- dependent = dspec._dependents[self.name]
- changed = dependent.update_deptypes(deptypes)
-
- # Constrain package information with spec info
- try:
- changed |= spec_deps[dep.name].constrain(dep)
-
- except UnsatisfiableSpecError as e:
- e.message = "Invalid spec: '%s'. "
- e.message += "Package %s requires %s %s, but spec asked for %s"
- e.message %= (spec_deps[dep.name], dep.name,
- e.constraint_type, e.required, e.provided)
- raise e
+ # merge package/vdep information into spec
+ try:
+ changed |= spec_deps[dep.name].constrain(dep)
+ except UnsatisfiableSpecError as e:
+ e.message = "Invalid spec: '%s'. "
+ e.message += "Package %s requires %s %s, but spec asked for %s"
+ e.message %= (spec_deps[dep.name], dep.name,
+ e.constraint_type, e.required, e.provided)
+ raise e
# Add merged spec to my deps and recurse
dependency = spec_deps[dep.name]
@@ -2094,6 +2114,9 @@ class Spec(object):
changed = False
for name in self.common_dependencies(other):
changed |= self[name].constrain(other[name], deps=False)
+ if name in self._dependencies:
+ changed |= self._dependencies[name].update_deptypes(
+ other._dependencies[name].deptypes)
# Update with additional constraints from other spec
for name in other.dep_difference(self):
@@ -2166,7 +2189,13 @@ class Spec(object):
# A concrete provider can satisfy a virtual dependency.
if not self.virtual and other.virtual:
- pkg = spack.repo.get(self.fullname)
+ try:
+ pkg = spack.repo.get(self.fullname)
+ except spack.repository.PackageLoadError:
+ # If we can't get package info on this spec, don't treat
+ # it as a provider of this vdep.
+ return False
+
if pkg.provides(other.name):
for provided, when_specs in pkg.provided.items():
if any(self.satisfies(when_spec, deps=False, strict=strict)
@@ -2219,7 +2248,7 @@ class Spec(object):
# If we need to descend into dependencies, do it, otherwise we're done.
if deps:
deps_strict = strict
- if self.concrete and not other.name:
+ if self._concrete and not other.name:
# We're dealing with existing specs
deps_strict = True
return self.satisfies_dependencies(other, strict=deps_strict)
@@ -2320,9 +2349,6 @@ class Spec(object):
self.external_module = other.external_module
self.namespace = other.namespace
- self.external = other.external
- self.external_module = other.external_module
-
# If we copy dependencies, preserve DAG structure in the new spec
if deps:
deptypes = alldeps # by default copy all deptypes
@@ -2336,6 +2362,7 @@ class Spec(object):
# These fields are all cached results of expensive operations.
# If we preserved the original structure, we can copy them
# safely. If not, they need to be recomputed.
+ # TODO: dependency hashes can be copied more aggressively.
if deps is True or deps == alldeps:
self._hash = other._hash
self._cmp_key_cache = other._cmp_key_cache
@@ -2407,11 +2434,8 @@ class Spec(object):
if query_parameters:
# We have extra query parameters, which are comma separated
# values
- f = cStringIO.StringIO(query_parameters.pop())
- try:
- query_parameters = next(csv.reader(f, skipinitialspace=True))
- except StopIteration:
- query_parameters = ['']
+ csv = query_parameters.pop().strip()
+ query_parameters = re.split(r'\s*,\s*', csv)
try:
value = next(
@@ -2721,41 +2745,6 @@ class Spec(object):
def dep_string(self):
return ''.join("^" + dep.format() for dep in self.sorted_deps())
- def __cmp__(self, other):
- from package_prefs import pkgsort
-
- # Package name sort order is not configurable, always goes alphabetical
- if self.name != other.name:
- return cmp(self.name, other.name)
-
- # Package version is second in compare order
- pkgname = self.name
- if self.versions != other.versions:
- return pkgsort().version_compare(
- pkgname, self.versions, other.versions)
-
- # Compiler is third
- if self.compiler != other.compiler:
- return pkgsort().compiler_compare(
- pkgname, self.compiler, other.compiler)
-
- # Variants
- if self.variants != other.variants:
- return pkgsort().variant_compare(
- pkgname, self.variants, other.variants)
-
- # Target
- if self.architecture != other.architecture:
- return pkgsort().architecture_compare(
- pkgname, self.architecture, other.architecture)
-
- # Dependency is not configurable
- if self._dependencies != other._dependencies:
- return -1 if self._dependencies < other._dependencies else 1
-
- # Equal specs
- return 0
-
def __str__(self):
ret = self.format() + self.dep_string()
return ret.strip()
@@ -2975,8 +2964,7 @@ class SpecParser(spack.parse.Parser):
spec.dag_hash()[:len(self.token.value)] == self.token.value]
if not matches:
- tty.die("%s does not match any installed packages." %
- self.token.value)
+ raise NoSuchHashError(self.token.value)
if len(matches) != 1:
raise AmbiguousHashError(
@@ -3348,9 +3336,27 @@ class InvalidHashError(SpecError):
% (hash, spec))
+class NoSuchHashError(SpecError):
+ def __init__(self, hash):
+ super(NoSuchHashError, self).__init__(
+ "No installed spec matches the hash: '%s'")
+
+
class RedundantSpecError(SpecError):
def __init__(self, spec, addition):
super(RedundantSpecError, self).__init__(
"Attempting to add %s to spec %s which is already concrete."
" This is likely the result of adding to a spec specified by hash."
% (addition, spec))
+
+
+class ConflictsInSpecError(SpecError, RuntimeError):
+ def __init__(self, spec, matches):
+ message = 'Conflicts in concretized spec "{0}"\n'.format(
+ spec.short_spec
+ )
+ long_message = 'List of matching conflicts:\n\n'
+ match_fmt = '{0}. "{1}" conflicts with "{2}" in spec "{3}"\n'
+ for idx, (s, c, w) in enumerate(matches):
+ long_message += match_fmt.format(idx + 1, c, w, s)
+ super(ConflictsInSpecError, self).__init__(message, long_message)
diff --git a/lib/spack/spack/stage.py b/lib/spack/spack/stage.py
index 91f77839d8..21db3d75c2 100644
--- a/lib/spack/spack/stage.py
+++ b/lib/spack/spack/stage.py
@@ -29,18 +29,19 @@ import hashlib
import shutil
import tempfile
import getpass
-from urlparse import urljoin
+from six import string_types
+from six import iteritems
+from six.moves.urllib.parse import urljoin
import llnl.util.tty as tty
import llnl.util.lock
from llnl.util.filesystem import *
-import spack.util.pattern as pattern
-
import spack
import spack.config
-import spack.fetch_strategy as fs
import spack.error
+import spack.fetch_strategy as fs
+import spack.util.pattern as pattern
from spack.version import *
from spack.util.path import canonicalize_path
from spack.util.crypto import prefix_bits, bit_length
@@ -84,7 +85,7 @@ def get_tmp_root():
if _tmp_root is None:
config = spack.config.get_config('config')
candidates = config['build_stage']
- if isinstance(candidates, basestring):
+ if isinstance(candidates, string_types):
candidates = [candidates]
path = _first_accessible_path(candidates)
@@ -188,7 +189,7 @@ class Stage(object):
"""
# TODO: fetch/stage coupling needs to be reworked -- the logic
# TODO: here is convoluted and not modular enough.
- if isinstance(url_or_fetch_strategy, basestring):
+ if isinstance(url_or_fetch_strategy, string_types):
self.fetcher = fs.from_url(url_or_fetch_strategy)
elif isinstance(url_or_fetch_strategy, fs.FetchStrategy):
self.fetcher = url_or_fetch_strategy
@@ -225,7 +226,7 @@ class Stage(object):
self._lock = None
if lock:
if self.name not in Stage.stage_locks:
- sha1 = hashlib.sha1(self.name).digest()
+ sha1 = hashlib.sha1(self.name.encode('utf-8')).digest()
lock_id = prefix_bits(sha1, bit_length(sys.maxsize))
stage_lock_path = join_path(spack.stage_path, '.lock')
@@ -548,7 +549,7 @@ class ResourceStage(Stage):
if not isinstance(placement, dict):
placement = {'': placement}
# Make the paths in the dictionary absolute and link
- for key, value in placement.iteritems():
+ for key, value in iteritems(placement):
target_path = join_path(
root_stage.source_path, resource.destination)
destination_path = join_path(target_path, value)
@@ -661,7 +662,7 @@ class DIYStage(object):
def _get_mirrors():
"""Get mirrors from spack configuration."""
config = spack.config.get_config('mirrors')
- return [val for name, val in config.iteritems()]
+ return [val for name, val in iteritems(config)]
def ensure_access(file=spack.stage_path):
@@ -689,5 +690,6 @@ class RestageError(StageError):
class ChdirError(StageError):
"""Raised when Spack can't change directories."""
+
# Keep this in namespace for convenience
FailedDownloadError = fs.FailedDownloadError
diff --git a/lib/spack/spack/test/architecture.py b/lib/spack/spack/test/architecture.py
index fb4113361c..8f257cf0dc 100644
--- a/lib/spack/spack/test/architecture.py
+++ b/lib/spack/spack/test/architecture.py
@@ -138,8 +138,8 @@ def test_user_defaults(config):
def test_user_input_combination(config):
platform = spack.architecture.platform()
- os_list = platform.operating_sys.keys()
- target_list = platform.targets.keys()
+ os_list = list(platform.operating_sys.keys())
+ target_list = list(platform.targets.keys())
additional = ["fe", "be", "frontend", "backend"]
os_list.extend(additional)
diff --git a/lib/spack/spack/test/build_system_guess.py b/lib/spack/spack/test/build_system_guess.py
index 82bf1964b2..e6fb84b37d 100644
--- a/lib/spack/spack/test/build_system_guess.py
+++ b/lib/spack/spack/test/build_system_guess.py
@@ -38,6 +38,8 @@ import spack.stage
('setup.py', 'python'),
('NAMESPACE', 'r'),
('WORKSPACE', 'bazel'),
+ ('Makefile.PL', 'perlmake'),
+ ('Build.PL', 'perlbuild'),
('foobar', 'generic')
]
)
diff --git a/lib/spack/spack/test/cmd/install.py b/lib/spack/spack/test/cmd/install.py
index 304eb04a55..b57d39b441 100644
--- a/lib/spack/spack/test/cmd/install.py
+++ b/lib/spack/spack/test/cmd/install.py
@@ -22,19 +22,19 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-import StringIO
import argparse
import codecs
import collections
import contextlib
import unittest
+from six import StringIO
import llnl.util.filesystem
import spack
import spack.cmd
import spack.cmd.install as install
-FILE_REGISTRY = collections.defaultdict(StringIO.StringIO)
+FILE_REGISTRY = collections.defaultdict(StringIO)
# Monkey-patch open to write module files to a StringIO instance
@@ -44,7 +44,7 @@ def mock_open(filename, mode, *args):
message = 'test.test_install : unexpected opening mode for mock_open'
raise RuntimeError(message)
- FILE_REGISTRY[filename] = StringIO.StringIO()
+ FILE_REGISTRY[filename] = StringIO()
try:
yield FILE_REGISTRY[filename]
diff --git a/lib/spack/spack/test/cmd/url.py b/lib/spack/spack/test/cmd/url.py
index 4c60d814ce..3bc0bc7820 100644
--- a/lib/spack/spack/test/cmd/url.py
+++ b/lib/spack/spack/test/cmd/url.py
@@ -48,11 +48,12 @@ def test_name_parsed_correctly():
assert name_parsed_correctly(MyPackage('r-devtools', []), 'devtools')
assert name_parsed_correctly(MyPackage('py-numpy', []), 'numpy')
assert name_parsed_correctly(MyPackage('octave-splines', []), 'splines')
+ assert name_parsed_correctly(MyPackage('imagemagick', []), 'ImageMagick') # noqa
+ assert name_parsed_correctly(MyPackage('th-data', []), 'TH.data')
# Expected False
assert not name_parsed_correctly(MyPackage('', []), 'hdf5')
assert not name_parsed_correctly(MyPackage('hdf5', []), '')
- assert not name_parsed_correctly(MyPackage('imagemagick', []), 'ImageMagick') # noqa
assert not name_parsed_correctly(MyPackage('yaml-cpp', []), 'yamlcpp')
assert not name_parsed_correctly(MyPackage('yamlcpp', []), 'yaml-cpp')
assert not name_parsed_correctly(MyPackage('r-py-parser', []), 'parser')
@@ -64,6 +65,8 @@ def test_version_parsed_correctly():
assert version_parsed_correctly(MyPackage('', ['1.2.3']), '1.2.3')
assert version_parsed_correctly(MyPackage('', ['5.4a', '5.4b']), '5.4a')
assert version_parsed_correctly(MyPackage('', ['5.4a', '5.4b']), '5.4b')
+ assert version_parsed_correctly(MyPackage('', ['1.63.0']), '1_63_0')
+ assert version_parsed_correctly(MyPackage('', ['0.94h']), '094h')
# Expected False
assert not version_parsed_correctly(MyPackage('', []), '1.2.3')
@@ -95,7 +98,7 @@ def test_url_list(parser):
colored_urls = url_list(args)
assert colored_urls == total_urls
- # The following two options should print fewer URLs than the default.
+ # The following options should print fewer URLs than the default.
# If they print the same number of URLs, something is horribly broken.
# If they say we missed 0 URLs, something is probably broken too.
args = parser.parse_args(['list', '--incorrect-name'])
@@ -106,11 +109,19 @@ def test_url_list(parser):
incorrect_version_urls = url_list(args)
assert 0 < incorrect_version_urls < total_urls
+ args = parser.parse_args(['list', '--correct-name'])
+ correct_name_urls = url_list(args)
+ assert 0 < correct_name_urls < total_urls
-def test_url_test(parser):
- args = parser.parse_args(['test'])
+ args = parser.parse_args(['list', '--correct-version'])
+ correct_version_urls = url_list(args)
+ assert 0 < correct_version_urls < total_urls
+
+
+def test_url_summary(parser):
+ args = parser.parse_args(['summary'])
(total_urls, correct_names, correct_versions,
- name_count_dict, version_count_dict) = url_test(args)
+ name_count_dict, version_count_dict) = url_summary(args)
assert 0 < correct_names <= sum(name_count_dict.values()) <= total_urls # noqa
assert 0 < correct_versions <= sum(version_count_dict.values()) <= total_urls # noqa
diff --git a/lib/spack/spack/test/compilers.py b/lib/spack/spack/test/compilers.py
index d0fc506f40..bc21ec886e 100644
--- a/lib/spack/spack/test/compilers.py
+++ b/lib/spack/spack/test/compilers.py
@@ -23,6 +23,7 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import pytest
+from six import iteritems
import spack.spec
import spack.compilers as compilers
@@ -30,7 +31,7 @@ import spack.compilers as compilers
@pytest.mark.usefixtures('config')
class TestCompilers(object):
-
+
def test_get_compiler_duplicates(self):
# In this case there is only one instance of the specified compiler in
# the test configuration (so it is not actually a duplicate), but the
@@ -38,11 +39,11 @@ class TestCompilers(object):
cfg_file_to_duplicates = compilers.get_compiler_duplicates(
'gcc@4.5.0', spack.spec.ArchSpec('cray-CNL-xeon'))
assert len(cfg_file_to_duplicates) == 1
- cfg_file, duplicates = cfg_file_to_duplicates.iteritems().next()
+ cfg_file, duplicates = next(iteritems(cfg_file_to_duplicates))
assert len(duplicates) == 1
def test_all_compilers(self):
all_compilers = compilers.all_compilers()
- filtered = list(x for x in all_compilers if str(x.spec) == 'clang@3.3')
- filtered = list(x for x in filtered if x.operating_system == 'SuSE11')
+ filtered = [x for x in all_compilers if str(x.spec) == 'clang@3.3']
+ filtered = [x for x in filtered if x.operating_system == 'SuSE11']
assert len(filtered) == 1
diff --git a/lib/spack/spack/test/concretize.py b/lib/spack/spack/test/concretize.py
index f4021a89ee..3b383584ce 100644
--- a/lib/spack/spack/test/concretize.py
+++ b/lib/spack/spack/test/concretize.py
@@ -26,7 +26,7 @@ import pytest
import spack
import spack.architecture
from spack.concretize import find_spec
-from spack.spec import Spec, CompilerSpec
+from spack.spec import Spec, CompilerSpec, ConflictsInSpecError, SpecError
from spack.version import ver
@@ -82,6 +82,10 @@ def check_concretize(abstract_spec):
'mpileaks ^mpi', 'mpileaks ^mpi@:1.1', 'mpileaks ^mpi@2:',
'mpileaks ^mpi@2.1', 'mpileaks ^mpi@2.2', 'mpileaks ^mpi@2.2',
'mpileaks ^mpi@:1', 'mpileaks ^mpi@1.2:2'
+ # conflict not triggered
+ 'conflict',
+ 'conflict%clang~foo',
+ 'conflict-parent%gcc'
]
)
def spec(request):
@@ -89,6 +93,19 @@ def spec(request):
return request.param
+@pytest.fixture(
+ params=[
+ 'conflict%clang',
+ 'conflict%clang+foo',
+ 'conflict-parent%clang',
+ 'conflict-parent@0.9^conflict~foo'
+ ]
+)
+def conflict_spec(request):
+ """Spec to be concretized"""
+ return request.param
+
+
@pytest.mark.usefixtures('config', 'builtin_mock')
class TestConcretize(object):
def test_concretize(self, spec):
@@ -372,3 +389,11 @@ class TestConcretize(object):
s.concretize()
assert s['mpileaks'].satisfies('%clang')
assert s['dyninst'].satisfies('%gcc')
+
+ def test_conflicts_in_spec(self, conflict_spec):
+ # Check that an exception is raised an caught by the appropriate
+ # exception types.
+ for exc_type in (ConflictsInSpecError, RuntimeError, SpecError):
+ s = Spec(conflict_spec)
+ with pytest.raises(exc_type):
+ s.concretize()
diff --git a/lib/spack/spack/test/concretize_preferences.py b/lib/spack/spack/test/concretize_preferences.py
index 54df4e1563..bf915064b2 100644
--- a/lib/spack/spack/test/concretize_preferences.py
+++ b/lib/spack/spack/test/concretize_preferences.py
@@ -27,7 +27,7 @@ import pytest
import spack
import spack.util.spack_yaml as syaml
from spack.spec import Spec
-from spack.package_prefs import PreferredPackages
+import spack.package_prefs
@pytest.fixture()
@@ -41,7 +41,7 @@ def concretize_scope(config, tmpdir):
# This is kind of weird, but that's how config scopes are
# set in ConfigScope.__init__
spack.config.config_scopes.pop('concretize')
- spack.package_prefs._pkgsort = PreferredPackages()
+ spack.package_prefs.PackagePrefs.clear_caches()
# reset provider index each time, too
spack.repo._provider_index = None
@@ -55,7 +55,7 @@ def update_packages(pkgname, section, value):
"""Update config and reread package list"""
conf = {pkgname: {section: value}}
spack.config.update_config('packages', conf, 'concretize')
- spack.package_prefs._pkgsort = PreferredPackages()
+ spack.package_prefs.PackagePrefs.clear_caches()
def assert_variant_values(spec, **variants):
@@ -146,7 +146,7 @@ all:
spack.config.update_config('packages', conf, 'concretize')
# should be no error for 'all':
- spack.package_prefs._pkgsort = PreferredPackages()
+ spack.package_prefs.PackagePrefs.clear_caches()
spack.package_prefs.get_packages_config()
def test_external_mpi(self):
diff --git a/lib/spack/spack/test/conftest.py b/lib/spack/spack/test/conftest.py
index f344727674..fc1d6ecec2 100644
--- a/lib/spack/spack/test/conftest.py
+++ b/lib/spack/spack/test/conftest.py
@@ -27,11 +27,12 @@ import copy
import os
import re
import shutil
+from six import StringIO
-import cStringIO
import llnl.util.filesystem
import llnl.util.lang
import ordereddict_backport
+
import py
import pytest
import spack
@@ -56,11 +57,8 @@ def no_stdin_duplication(monkeypatch):
"""Duplicating stdin (or any other stream) returns an empty
cStringIO object.
"""
- monkeypatch.setattr(
- llnl.util.lang,
- 'duplicate_stream',
- lambda x: cStringIO.StringIO()
- )
+ monkeypatch.setattr(llnl.util.lang, 'duplicate_stream',
+ lambda x: StringIO())
@pytest.fixture(autouse=True)
@@ -170,15 +168,19 @@ def configuration_dir(tmpdir_factory, linux_os):
def config(configuration_dir):
"""Hooks the mock configuration files into spack.config"""
# Set up a mock config scope
+ spack.package_prefs.PackagePrefs.clear_caches()
spack.config.clear_config_caches()
real_scope = spack.config.config_scopes
spack.config.config_scopes = ordereddict_backport.OrderedDict()
spack.config.ConfigScope('site', str(configuration_dir.join('site')))
spack.config.ConfigScope('user', str(configuration_dir.join('user')))
Config = collections.namedtuple('Config', ['real', 'mock'])
+
yield Config(real=real_scope, mock=spack.config.config_scopes)
+
spack.config.config_scopes = real_scope
spack.config.clear_config_caches()
+ spack.package_prefs.PackagePrefs.clear_caches()
@pytest.fixture(scope='module')
@@ -312,7 +314,7 @@ def mock_archive():
"\ttouch $prefix/dummy_file\n"
"EOF\n"
)
- os.chmod(configure_path, 0755)
+ os.chmod(configure_path, 0o755)
# Archive it
current = tmpdir.chdir()
archive_name = '{0}.tar.gz'.format(repo_name)
diff --git a/lib/spack/spack/test/data/web/1.html b/lib/spack/spack/test/data/web/1.html
new file mode 100644
index 0000000000..ef49c38cdb
--- /dev/null
+++ b/lib/spack/spack/test/data/web/1.html
@@ -0,0 +1,10 @@
+<html>
+ <head>
+ This is page 1.
+ </head>
+ <body>
+ <a href="2.html">list_depth=2 follows this.</a>
+
+ <a href="foo-1.0.0.tar.gz">foo-1.0.0.tar.gz</a>
+ </body>
+</html>
diff --git a/lib/spack/spack/test/data/web/2.html b/lib/spack/spack/test/data/web/2.html
new file mode 100644
index 0000000000..64c843f25b
--- /dev/null
+++ b/lib/spack/spack/test/data/web/2.html
@@ -0,0 +1,12 @@
+<html>
+ <head>
+ This is page 2.
+ </head>
+ <body>
+ <a href="3.html">list_depth=3 follows this.</a>
+ <a href="4.html">list_depth=3 follows this too.</a>
+
+ <a href="foo-2.0.0.tar.gz">foo-2.0.0.tar.gz</a>
+ <a href="foo-2.0.0b2.tar.gz">foo-2.0.0b2.tar.gz</a>
+ </body>
+</html>
diff --git a/lib/spack/spack/test/data/web/3.html b/lib/spack/spack/test/data/web/3.html
new file mode 100644
index 0000000000..e530206035
--- /dev/null
+++ b/lib/spack/spack/test/data/web/3.html
@@ -0,0 +1,11 @@
+<html>
+ <head>
+ This is page 3.
+ </head>
+ <body>
+ <a href="index.html">This link is already visited.</a>
+
+ <a href="foo-3.0.tar.gz">foo-3.0.tar.gz</a>
+ <a href="foo-3.0a1.tar.gz">foo-3.0a1.tar.gz</a>
+ </body>
+</html>
diff --git a/lib/spack/spack/test/data/web/4.html b/lib/spack/spack/test/data/web/4.html
new file mode 100644
index 0000000000..b5fe850f4d
--- /dev/null
+++ b/lib/spack/spack/test/data/web/4.html
@@ -0,0 +1,11 @@
+<html>
+ <head>
+ This is page 4.
+ </head>
+ <body>
+ This page is terminal and has no links to other pages.
+
+ <a href="foo-4.5.tar.gz">foo-4.5.tar.gz.</a>
+ <a href="foo-4.5-rc5.tar.gz">foo-4.1-rc5.tar.gz.</a>
+ </body>
+</html>
diff --git a/lib/spack/spack/test/data/web/index.html b/lib/spack/spack/test/data/web/index.html
new file mode 100644
index 0000000000..3985deeb35
--- /dev/null
+++ b/lib/spack/spack/test/data/web/index.html
@@ -0,0 +1,10 @@
+<html>
+ <head>
+ This is the root page.
+ </head>
+ <body>
+ <a href="1.html">list_depth=1 follows this.</a>
+
+ <a href="foo-0.0.0.tar.gz">foo-0.0.0.tar.gz</a>
+ </body>
+</html>
diff --git a/lib/spack/spack/test/directory_layout.py b/lib/spack/spack/test/directory_layout.py
index 1987bb3a44..d1365c0e76 100644
--- a/lib/spack/spack/test/directory_layout.py
+++ b/lib/spack/spack/test/directory_layout.py
@@ -133,23 +133,25 @@ def test_read_and_write_spec(
# TODO: increase reuse of build dependencies.
stored_deptypes = ('link', 'run')
expected = spec.copy(deps=stored_deptypes)
+ assert expected.concrete
assert expected == spec_from_file
- assert expected.eq_dag # msg , spec_from_file
+ assert expected.eq_dag(spec_from_file)
assert spec_from_file.concrete
# Ensure that specs that come out "normal" are really normal.
with open(spec_path) as spec_file:
read_separately = Spec.from_yaml(spec_file.read())
- # TODO: revise this when build deps are in dag_hash
- norm = read_separately.normalized().copy(deps=stored_deptypes)
- assert norm == spec_from_file
+ # TODO: revise this when build deps are in dag_hash
+ norm = read_separately.normalized().copy(deps=stored_deptypes)
+ assert norm == spec_from_file
+ assert norm.eq_dag(spec_from_file)
- # TODO: revise this when build deps are in dag_hash
- conc = read_separately.concretized().copy(deps=stored_deptypes)
- assert conc == spec_from_file
+ # TODO: revise this when build deps are in dag_hash
+ conc = read_separately.concretized().copy(deps=stored_deptypes)
+ assert conc == spec_from_file
+ assert conc.eq_dag(spec_from_file)
- # Make sure the hash of the read-in spec is the same
assert expected.dag_hash() == spec_from_file.dag_hash()
# Ensure directories are properly removed
diff --git a/lib/spack/spack/test/graph.py b/lib/spack/spack/test/graph.py
index ce7b07ed86..46dd4f1bc6 100644
--- a/lib/spack/spack/test/graph.py
+++ b/lib/spack/spack/test/graph.py
@@ -22,7 +22,7 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-from StringIO import StringIO
+from six import StringIO
from spack.spec import Spec
from spack.graph import AsciiGraph, topological_sort, graph_dot
diff --git a/lib/spack/spack/test/lock.py b/lib/spack/spack/test/lock.py
index 4f62cd85e9..214797c1f6 100644
--- a/lib/spack/spack/test/lock.py
+++ b/lib/spack/spack/test/lock.py
@@ -283,7 +283,7 @@ class LockTest(unittest.TestCase):
# ensure lock file exists the first time, so we open it read-only
# to begin wtih.
touch(self.lock_path)
- os.chmod(self.lock_path, 0444)
+ os.chmod(self.lock_path, 0o444)
lock = Lock(self.lock_path)
self.assertTrue(lock._reads == 0)
diff --git a/lib/spack/spack/test/make_executable.py b/lib/spack/spack/test/make_executable.py
index 87a43a529a..1b3f384a8b 100644
--- a/lib/spack/spack/test/make_executable.py
+++ b/lib/spack/spack/test/make_executable.py
@@ -46,7 +46,7 @@ class MakeExecutableTest(unittest.TestCase):
with open(make_exe, 'w') as f:
f.write('#!/bin/sh\n')
f.write('echo "$@"')
- os.chmod(make_exe, 0700)
+ os.chmod(make_exe, 0o700)
path_put_first('PATH', [self.tmpdir])
diff --git a/lib/spack/spack/test/modules.py b/lib/spack/spack/test/modules.py
index bb1b0006f8..0eb54cba2c 100644
--- a/lib/spack/spack/test/modules.py
+++ b/lib/spack/spack/test/modules.py
@@ -24,14 +24,14 @@
##############################################################################
import collections
import contextlib
+from six import StringIO
-import cStringIO
import pytest
import spack.modules
import spack.spec
# Our "filesystem" for the tests below
-FILE_REGISTRY = collections.defaultdict(cStringIO.StringIO)
+FILE_REGISTRY = collections.defaultdict(StringIO)
# Spec strings that will be used throughout the tests
mpich_spec_string = 'mpich@3.0.4'
mpileaks_spec_string = 'mpileaks'
@@ -48,7 +48,7 @@ def stringio_open(monkeypatch):
if not mode == 'w':
raise RuntimeError('unexpected opening mode for stringio_open')
- FILE_REGISTRY[filename] = cStringIO.StringIO()
+ FILE_REGISTRY[filename] = StringIO()
try:
yield FILE_REGISTRY[filename]
diff --git a/lib/spack/spack/test/multimethod.py b/lib/spack/spack/test/multimethod.py
index fbcc70afe8..003936a77a 100644
--- a/lib/spack/spack/test/multimethod.py
+++ b/lib/spack/spack/test/multimethod.py
@@ -86,7 +86,7 @@ def test_default_works(builtin_mock):
def test_target_match(builtin_mock):
platform = spack.architecture.platform()
- targets = platform.targets.values()
+ targets = list(platform.targets.values())
for target in targets[:-1]:
pkg = spack.repo.get('multimethod target=' + target.name)
assert pkg.different_by_target() == target.name
diff --git a/lib/spack/spack/test/package_sanity.py b/lib/spack/spack/test/package_sanity.py
index c75d7cdcc7..ac318f94dc 100644
--- a/lib/spack/spack/test/package_sanity.py
+++ b/lib/spack/spack/test/package_sanity.py
@@ -22,49 +22,48 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-"""\
-This test does sanity checks on Spack's builtin package database.
-"""
-import unittest
+"""This test does sanity checks on Spack's builtin package database."""
+
import re
import spack
from spack.repository import RepoPath
-class PackageSanityTest(unittest.TestCase):
+def check_db():
+ """Get all packages in a DB to make sure they work."""
+ for name in spack.repo.all_package_names():
+ spack.repo.get(name)
+
+
+def test_get_all_packages():
+ """Get all packages once and make sure that works."""
+ check_db()
+
- def check_db(self):
- """Get all packages in a DB to make sure they work."""
- for name in spack.repo.all_package_names():
- spack.repo.get(name)
+def test_get_all_mock_packages():
+ """Get the mock packages once each too."""
+ db = RepoPath(spack.mock_packages_path)
+ spack.repo.swap(db)
+ check_db()
+ spack.repo.swap(db)
- def test_get_all_packages(self):
- """Get all packages once and make sure that works."""
- self.check_db()
- def test_get_all_mock_packages(self):
- """Get the mock packages once each too."""
- db = RepoPath(spack.mock_packages_path)
- spack.repo.swap(db)
- self.check_db()
- spack.repo.swap(db)
+def test_url_versions():
+ """Check URLs for regular packages, if they are explicitly defined."""
+ for pkg in spack.repo.all_packages():
+ for v, vattrs in pkg.versions.items():
+ if 'url' in vattrs:
+ # If there is a url for the version check it.
+ v_url = pkg.url_for_version(v)
+ assert vattrs['url'] == v_url
- def test_url_versions(self):
- """Check URLs for regular packages, if they are explicitly defined."""
- for pkg in spack.repo.all_packages():
- for v, vattrs in pkg.versions.items():
- if 'url' in vattrs:
- # If there is a url for the version check it.
- v_url = pkg.url_for_version(v)
- self.assertEqual(vattrs['url'], v_url)
- def test_all_versions_are_lowercase(self):
- """Spack package names must be lowercase, and use `-` instead of `_`.
- """
- errors = []
- for name in spack.repo.all_package_names():
- if re.search(r'[_A-Z]', name):
- errors.append(name)
+def test_all_versions_are_lowercase():
+ """Spack package names must be lowercase, and use `-` instead of `_`."""
+ errors = []
+ for name in spack.repo.all_package_names():
+ if re.search(r'[_A-Z]', name):
+ errors.append(name)
- self.assertEqual([], errors)
+ assert len(errors) == 0
diff --git a/lib/spack/spack/test/pattern.py b/lib/spack/spack/test/pattern.py
index 0c772a0d2d..b76f88e670 100644
--- a/lib/spack/spack/test/pattern.py
+++ b/lib/spack/spack/test/pattern.py
@@ -86,6 +86,7 @@ class CompositeTest(unittest.TestCase):
composite.append(self.Two())
composite.add()
self.assertEqual(self.Base.counter, 3)
+
composite.pop()
composite.subtract()
self.assertEqual(self.Base.counter, 2)
diff --git a/lib/spack/spack/test/provider_index.py b/lib/spack/spack/test/provider_index.py
index a176d0c315..69a5c3cd40 100644
--- a/lib/spack/spack/test/provider_index.py
+++ b/lib/spack/spack/test/provider_index.py
@@ -37,7 +37,8 @@ Tests assume that mock packages provide this::
mpi@:10.0: set([zmpi])},
'stuff': {stuff: set([externalvirtual])}}
"""
-import StringIO
+from six import StringIO
+
import spack
from spack.provider_index import ProviderIndex
from spack.spec import Spec
@@ -46,10 +47,10 @@ from spack.spec import Spec
def test_yaml_round_trip(builtin_mock):
p = ProviderIndex(spack.repo.all_package_names())
- ostream = StringIO.StringIO()
+ ostream = StringIO()
p.to_yaml(ostream)
- istream = StringIO.StringIO(ostream.getvalue())
+ istream = StringIO(ostream.getvalue())
q = ProviderIndex.from_yaml(istream)
assert p == q
diff --git a/lib/spack/spack/test/python_version.py b/lib/spack/spack/test/python_version.py
index 5af55bdc5f..ee0ff9d2c9 100644
--- a/lib/spack/spack/test/python_version.py
+++ b/lib/spack/spack/test/python_version.py
@@ -1,5 +1,5 @@
##############################################################################
-# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
+# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
@@ -22,26 +22,54 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-"""
-This test ensures that all Spack files are Python version 2.6 or less.
+"""Check that Spack complies with minimum supported python versions.
+
+We ensure that all Spack files work with Python2 >= 2.6 and Python3 >= 3.0.
-Spack was originally 2.7, but enough systems in 2014 are still using
-2.6 on their frontend nodes that we need 2.6 to get adopted.
+We'd like to drop 2.6 support at some point, but there are still many HPC
+systems that ship with RHEL6/CentOS 6, which have Python 2.6 as the
+default version. Once those go away, we can likely drop 2.6 and increase
+the minimum supported Python 3 version, as well.
"""
+from __future__ import print_function
+
import os
+import sys
import re
import unittest
import llnl.util.tty as tty
-import pyqver2
import spack
-spack_max_version = (2, 6)
+#
+# This test uses pyqver, by Greg Hewgill, which is a dual-source module.
+# That means we need to do different checks depending on whether we're
+# running Python 2 or Python 3.
+#
+if sys.version_info[0] < 3:
+ import pyqver2 as pyqver
+ spack_min_supported = (2, 6)
+
+ # Exclude Python 3 versions of dual-source modules when using Python 2
+ exclude_paths = [
+ os.path.join(spack.lib_path, 'external', 'yaml', 'lib3'),
+ os.path.join(spack.lib_path, 'external', 'pyqver3.py')]
+
+else:
+ import pyqver3 as pyqver
+ spack_min_supported = (3, 0)
+
+ # Exclude Python 2 versions of dual-source modules when using Python 3
+ exclude_paths = [
+ os.path.join(spack.lib_path, 'external', 'yaml', 'lib'),
+ os.path.join(spack.lib_path, 'external', 'pyqver2.py')]
class PythonVersionTest(unittest.TestCase):
- def pyfiles(self, *search_paths):
+ def pyfiles(self, search_paths, exclude=()):
+ """List python search files in a set of search paths, excluding
+ any paths in the exclude list"""
# first file is the spack script.
yield spack.spack_file
@@ -49,53 +77,71 @@ class PythonVersionTest(unittest.TestCase):
for path in search_paths:
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
+ realpath = os.path.realpath(os.path.join(root, filename))
+ if any(realpath.startswith(p) for p in exclude):
+ continue
+
if re.match(r'^[^.#].*\.py$', filename):
yield os.path.join(root, filename)
- def package_py_files(self):
- for name in spack.repo.all_package_names():
- yield spack.repo.filename_for_package_name(name)
-
- def check_python_versions(self, *files):
- # dict version -> filename -> reasons
+ def check_python_versions(self, files):
+ # This is a dict dict mapping:
+ # version -> filename -> reasons
+ #
+ # Reasons are tuples of (lineno, string), where the string is the
+ # cause for a version incompatibility.
all_issues = {}
- for fn in files:
- with open(fn) as pyfile:
- versions = pyqver2.get_versions(pyfile.read())
- for ver, reasons in versions.items():
- if ver > spack_max_version:
- if ver not in all_issues:
- all_issues[ver] = {}
- all_issues[ver][fn] = reasons
+ # Parse files and run pyqver on each file.
+ for path in files:
+ with open(path) as pyfile:
+ full_text = pyfile.read()
+ versions = pyqver.get_versions(full_text, path)
+
+ for ver, reasons in versions.items():
+ if ver <= spack_min_supported:
+ continue
+
+ # Record issues. Mark exceptions with '# nopyqver' comment
+ for lineno, cause in reasons:
+ lines = full_text.split('\n')
+ if not re.search(r'#\s*nopyqver\s*$', lines[lineno - 1]):
+ all_issues.setdefault(ver, {})[path] = reasons
+ # Print a message if there are are issues
if all_issues:
- tty.error("Spack must run on Python version %d.%d"
- % spack_max_version)
+ tty.msg("Spack must remain compatible with Python version %d.%d"
+ % spack_min_supported)
+ # Print out a table showing which files/linenos require which
+ # python version, and a string describing why.
for v in sorted(all_issues.keys(), reverse=True):
- msgs = []
- for fn in sorted(all_issues[v].keys()):
- short_fn = fn
- if fn.startswith(spack.prefix):
- short_fn = fn[len(spack.prefix):]
-
- reasons = [r for r in set(all_issues[v][fn]) if r]
- for r in reasons:
- msgs.append(("%s:%s" % ('spack' + short_fn, r[0]), r[1]))
-
- tty.error("These files require version %d.%d:" % v)
- maxlen = max(len(f) for f, prob in msgs)
+ messages = []
+ for path in sorted(all_issues[v].keys()):
+ short_path = path
+ if path.startswith(spack.prefix):
+ short_path = path[len(spack.prefix):]
+
+ reasons = [r for r in set(all_issues[v][path]) if r]
+ for lineno, cause in reasons:
+ file_line = "%s:%s" % (short_path.lstrip('/'), lineno)
+ messages.append((file_line, cause))
+
+ print()
+ tty.msg("These files require version %d.%d:" % v)
+ maxlen = max(len(f) for f, prob in messages)
fmt = "%%-%ds%%s" % (maxlen + 3)
- print fmt % ('File', 'Reason')
- print fmt % ('-' * (maxlen), '-' * 20)
- for msg in msgs:
- print fmt % msg
+ print(fmt % ('File', 'Reason'))
+ print(fmt % ('-' * (maxlen), '-' * 20))
+ for msg in messages:
+ print(fmt % msg)
+ # Fail this test if there were issues.
self.assertTrue(len(all_issues) == 0)
def test_core_module_compatibility(self):
- self.check_python_versions(*self.pyfiles(spack.lib_path))
+ self.check_python_versions(
+ self.pyfiles([spack.lib_path], exclude=exclude_paths))
def test_package_module_compatibility(self):
- self.check_python_versions(*self.pyfiles(spack.packages_path))
+ self.check_python_versions(self.pyfiles([spack.packages_path]))
diff --git a/lib/spack/spack/test/spec_dag.py b/lib/spack/spack/test/spec_dag.py
index 2c414bd0c0..af6a4efd95 100644
--- a/lib/spack/spack/test/spec_dag.py
+++ b/lib/spack/spack/test/spec_dag.py
@@ -63,8 +63,7 @@ def set_dependency(saved_deps):
pkg = spack.repo.get(pkg_name)
if pkg_name not in saved_deps:
saved_deps[pkg_name] = (pkg, pkg.dependencies.copy())
- # Change dep spec
- # XXX(deptype): handle deptypes.
+
pkg.dependencies[spec.name] = {Spec(pkg_name): spec}
pkg.dependency_types[spec.name] = set(deptypes)
return _mock
@@ -90,7 +89,7 @@ class TestSpecDag(object):
names = ['mpileaks', 'callpath', 'dyninst', 'libdwarf', 'libelf',
'zmpi', 'fake']
- pairs = zip([0, 1, 2, 3, 4, 2, 3], names)
+ pairs = list(zip([0, 1, 2, 3, 4, 2, 3], names))
traversal = dag.traverse()
assert [x.name for x in traversal] == names
@@ -104,7 +103,7 @@ class TestSpecDag(object):
names = ['mpileaks', 'callpath', 'dyninst', 'libdwarf', 'libelf',
'libelf', 'zmpi', 'fake', 'zmpi']
- pairs = zip([0, 1, 2, 3, 4, 3, 2, 3, 1], names)
+ pairs = list(zip([0, 1, 2, 3, 4, 3, 2, 3, 1], names))
traversal = dag.traverse(cover='edges')
assert [x.name for x in traversal] == names
@@ -118,7 +117,7 @@ class TestSpecDag(object):
names = ['mpileaks', 'callpath', 'dyninst', 'libdwarf', 'libelf',
'libelf', 'zmpi', 'fake', 'zmpi', 'fake']
- pairs = zip([0, 1, 2, 3, 4, 3, 2, 3, 1, 2], names)
+ pairs = list(zip([0, 1, 2, 3, 4, 3, 2, 3, 1, 2], names))
traversal = dag.traverse(cover='paths')
assert [x.name for x in traversal] == names
@@ -132,7 +131,7 @@ class TestSpecDag(object):
names = ['libelf', 'libdwarf', 'dyninst', 'fake', 'zmpi',
'callpath', 'mpileaks']
- pairs = zip([4, 3, 2, 3, 2, 1, 0], names)
+ pairs = list(zip([4, 3, 2, 3, 2, 1, 0], names))
traversal = dag.traverse(order='post')
assert [x.name for x in traversal] == names
@@ -146,7 +145,7 @@ class TestSpecDag(object):
names = ['libelf', 'libdwarf', 'libelf', 'dyninst', 'fake', 'zmpi',
'callpath', 'zmpi', 'mpileaks']
- pairs = zip([4, 3, 3, 2, 3, 2, 1, 1, 0], names)
+ pairs = list(zip([4, 3, 3, 2, 3, 2, 1, 1, 0], names))
traversal = dag.traverse(cover='edges', order='post')
assert [x.name for x in traversal] == names
@@ -160,7 +159,7 @@ class TestSpecDag(object):
names = ['libelf', 'libdwarf', 'libelf', 'dyninst', 'fake', 'zmpi',
'callpath', 'fake', 'zmpi', 'mpileaks']
- pairs = zip([4, 3, 3, 2, 3, 2, 1, 2, 1, 0], names)
+ pairs = list(zip([4, 3, 3, 2, 3, 2, 1, 2, 1, 0], names))
traversal = dag.traverse(cover='paths', order='post')
assert [x.name for x in traversal] == names
@@ -609,6 +608,8 @@ class TestSpecDag(object):
assert '^mpich2' in s2
def test_construct_spec_with_deptypes(self):
+ """Ensure that it is possible to construct a spec with explicit
+ dependency types."""
s = Spec('a',
Spec('b',
['build'], Spec('c')),
@@ -633,7 +634,12 @@ class TestSpecDag(object):
assert s['f']._dependents['e'].deptypes == ('run',)
def check_diamond_deptypes(self, spec):
- """Validate deptypes in dt-diamond spec."""
+ """Validate deptypes in dt-diamond spec.
+
+ This ensures that concretization works properly when two packages
+ depend on the same dependency in different ways.
+
+ """
assert spec['dt-diamond']._dependencies[
'dt-diamond-left'].deptypes == ('build', 'link')
diff --git a/lib/spack/spack/test/spec_semantics.py b/lib/spack/spack/test/spec_semantics.py
index 2f3b2b1b8d..f071bcc833 100644
--- a/lib/spack/spack/test/spec_semantics.py
+++ b/lib/spack/spack/test/spec_semantics.py
@@ -293,7 +293,7 @@ class TestSpecSematics(object):
copy = spec.copy()
for s in spec.traverse():
assert s.satisfies(copy[s.name])
- assert copy[s.name].satisfies(s)
+ assert copy[s.name].satisfies(s)
def test_unsatisfiable_compiler_flag_mismatch(self):
# No matchi in specs
diff --git a/lib/spack/spack/test/spec_syntax.py b/lib/spack/spack/test/spec_syntax.py
index fcb6cfa907..dfad4a019f 100644
--- a/lib/spack/spack/test/spec_syntax.py
+++ b/lib/spack/spack/test/spec_syntax.py
@@ -122,7 +122,7 @@ class TestSpecSyntax(object):
def _check_raises(self, exc_type, items):
for item in items:
with pytest.raises(exc_type):
- self.check_parse(item)
+ Spec(item)
# ========================================================================
# Parse checks
@@ -225,113 +225,174 @@ class TestSpecSyntax(object):
errors = ['x@@1.2', 'x ^y@@1.2', 'x@1.2::', 'x::']
self._check_raises(SpecParseError, errors)
+ def _check_hash_parse(self, spec):
+ """Check several ways to specify a spec by hash."""
+ # full hash
+ self.check_parse(str(spec), '/' + spec.dag_hash())
+
+ # partial hash
+ self.check_parse(str(spec), '/ ' + spec.dag_hash()[:5])
+
+ # name + hash
+ self.check_parse(str(spec), spec.name + '/' + spec.dag_hash())
+
+ # name + version + space + partial hash
+ self.check_parse(
+ str(spec), spec.name + '@' + str(spec.version) +
+ ' /' + spec.dag_hash()[:6])
+
def test_spec_by_hash(self, database):
specs = database.mock.db.query()
- hashes = [s._hash for s in specs] # Preserves order of elements
-
- # Make sure the database is still the shape we expect
- assert len(specs) > 3
+ assert len(specs) # make sure something's in the DB
- self.check_parse(str(specs[0]), '/' + hashes[0])
- self.check_parse(str(specs[1]), '/ ' + hashes[1][:5])
- self.check_parse(str(specs[2]), specs[2].name + '/' + hashes[2])
- self.check_parse(str(specs[3]),
- specs[3].name + '@' + str(specs[3].version) +
- ' /' + hashes[3][:6])
+ for spec in specs:
+ self._check_hash_parse(spec)
def test_dep_spec_by_hash(self, database):
- specs = database.mock.db.query()
- hashes = [s._hash for s in specs] # Preserves order of elements
-
- # Make sure the database is still the shape we expect
- assert len(specs) > 10
- assert specs[4].name in specs[10]
- assert specs[-1].name in specs[10]
-
- spec1 = sp.Spec(specs[10].name + '^/' + hashes[4])
- assert specs[4].name in spec1 and spec1[specs[4].name] == specs[4]
- spec2 = sp.Spec(specs[10].name + '%' + str(specs[10].compiler) +
- ' ^ / ' + hashes[-1])
- assert (specs[-1].name in spec2 and
- spec2[specs[-1].name] == specs[-1] and
- spec2.compiler == specs[10].compiler)
- spec3 = sp.Spec(specs[10].name + '^/' + hashes[4][:4] +
- '^ / ' + hashes[-1][:5])
- assert (specs[-1].name in spec3 and
- spec3[specs[-1].name] == specs[-1] and
- specs[4].name in spec3 and spec3[specs[4].name] == specs[4])
+ mpileaks_zmpi = database.mock.db.query_one('mpileaks ^zmpi')
+ zmpi = database.mock.db.query_one('zmpi')
+ fake = database.mock.db.query_one('fake')
- def test_multiple_specs_with_hash(self, database):
- specs = database.mock.db.query()
- hashes = [s._hash for s in specs] # Preserves order of elements
-
- assert len(specs) > 3
-
- output = sp.parse(specs[0].name + '/' + hashes[0] + '/' + hashes[1])
- assert len(output) == 2
- output = sp.parse('/' + hashes[0] + '/' + hashes[1])
- assert len(output) == 2
- output = sp.parse('/' + hashes[0] + '/' + hashes[1] +
- ' ' + specs[2].name)
- assert len(output) == 3
- output = sp.parse('/' + hashes[0] +
- ' ' + specs[1].name + ' ' + specs[2].name)
- assert len(output) == 3
- output = sp.parse('/' + hashes[0] + ' ' +
- specs[1].name + ' / ' + hashes[1])
- assert len(output) == 2
+ assert 'fake' in mpileaks_zmpi
+ assert 'zmpi' in mpileaks_zmpi
- def test_ambiguous_hash(self, database):
- specs = database.mock.db.query()
- hashes = [s._hash for s in specs] # Preserves order of elements
+ mpileaks_hash_fake = sp.Spec('mpileaks ^/' + fake.dag_hash())
+ assert 'fake' in mpileaks_hash_fake
+ assert mpileaks_hash_fake['fake'] == fake
+
+ mpileaks_hash_zmpi = sp.Spec(
+ 'mpileaks %' + str(mpileaks_zmpi.compiler) +
+ ' ^ / ' + zmpi.dag_hash())
+ assert 'zmpi' in mpileaks_hash_zmpi
+ assert mpileaks_hash_zmpi['zmpi'] == zmpi
+ assert mpileaks_hash_zmpi.compiler == mpileaks_zmpi.compiler
+
+ mpileaks_hash_fake_and_zmpi = sp.Spec(
+ 'mpileaks ^/' + fake.dag_hash()[:4] + '^ / ' + zmpi.dag_hash()[:5])
+ assert 'zmpi' in mpileaks_hash_fake_and_zmpi
+ assert mpileaks_hash_fake_and_zmpi['zmpi'] == zmpi
- # Make sure the database is as expected
- assert hashes[1][:1] == hashes[2][:1] == 'b'
+ assert 'fake' in mpileaks_hash_fake_and_zmpi
+ assert mpileaks_hash_fake_and_zmpi['fake'] == fake
- ambiguous_hashes = ['/b',
- specs[1].name + '/b',
- specs[0].name + '^/b',
- specs[0].name + '^' + specs[1].name + '/b']
- self._check_raises(AmbiguousHashError, ambiguous_hashes)
+ def test_multiple_specs_with_hash(self, database):
+ mpileaks_zmpi = database.mock.db.query_one('mpileaks ^zmpi')
+ callpath_mpich2 = database.mock.db.query_one('callpath ^mpich2')
+
+ # name + hash + separate hash
+ specs = sp.parse('mpileaks /' + mpileaks_zmpi.dag_hash() +
+ '/' + callpath_mpich2.dag_hash())
+ assert len(specs) == 2
+
+ # 2 separate hashes
+ specs = sp.parse('/' + mpileaks_zmpi.dag_hash() +
+ '/' + callpath_mpich2.dag_hash())
+ assert len(specs) == 2
+
+ # 2 separate hashes + name
+ specs = sp.parse('/' + mpileaks_zmpi.dag_hash() +
+ '/' + callpath_mpich2.dag_hash() +
+ ' callpath')
+ assert len(specs) == 3
+
+ # hash + 2 names
+ specs = sp.parse('/' + mpileaks_zmpi.dag_hash() +
+ ' callpath' +
+ ' callpath')
+ assert len(specs) == 3
+
+ # hash + name + hash
+ specs = sp.parse('/' + mpileaks_zmpi.dag_hash() +
+ ' callpath' +
+ ' / ' + callpath_mpich2.dag_hash())
+ assert len(specs) == 2
+
+ def test_ambiguous_hash(self, database):
+ dbspecs = database.mock.db.query()
+
+ def find_ambiguous(specs, keyfun):
+ """Return the first set of specs that's ambiguous under a
+ particular key function."""
+ key_to_spec = {}
+ for spec in specs:
+ key = keyfun(spec)
+ speclist = key_to_spec.setdefault(key, [])
+ speclist.append(spec)
+ if len(speclist) > 1:
+ return (key, speclist)
+
+ # If we fail here, we may need to guarantee that there are
+ # some ambiguos specs by adding more specs to the test DB
+ # until this succeeds.
+ raise RuntimeError("no ambiguous specs found for keyfun!")
+
+ # ambiguity in first hash character
+ char, specs = find_ambiguous(dbspecs, lambda s: s.dag_hash()[0])
+ self._check_raises(AmbiguousHashError, ['/' + char])
+
+ # ambiguity in first hash character AND spec name
+ t, specs = find_ambiguous(dbspecs,
+ lambda s: (s.name, s.dag_hash()[0]))
+ name, char = t
+ self._check_raises(AmbiguousHashError, [name + '/' + char])
def test_invalid_hash(self, database):
- specs = database.mock.db.query()
- hashes = [s._hash for s in specs] # Preserves order of elements
+ mpileaks_zmpi = database.mock.db.query_one('mpileaks ^zmpi')
+ zmpi = database.mock.db.query_one('zmpi')
- # Make sure the database is as expected
- assert (hashes[0] != hashes[3] and
- hashes[1] != hashes[4] and len(specs) > 4)
+ mpileaks_mpich = database.mock.db.query_one('mpileaks ^mpich')
+ mpich = database.mock.db.query_one('mpich')
- inputs = [specs[0].name + '/' + hashes[3],
- specs[1].name + '^' + specs[4].name + '/' + hashes[0],
- specs[1].name + '^' + specs[4].name + '/' + hashes[1]]
- self._check_raises(InvalidHashError, inputs)
+ # name + incompatible hash
+ self._check_raises(InvalidHashError, [
+ 'zmpi /' + mpich.dag_hash(),
+ 'mpich /' + zmpi.dag_hash()])
+
+ # name + dep + incompatible hash
+ self._check_raises(InvalidHashError, [
+ 'mpileaks ^mpich /' + mpileaks_zmpi.dag_hash(),
+ 'mpileaks ^zmpi /' + mpileaks_mpich.dag_hash()])
def test_nonexistent_hash(self, database):
- # This test uses database to make sure we don't accidentally access
- # real installs, however unlikely
+ """Ensure we get errors for nonexistant hashes."""
specs = database.mock.db.query()
- hashes = [s._hash for s in specs] # Preserves order of elements
- # Make sure the database is as expected
- assert 'abc123' not in [h[:6] for h in hashes]
+ # This hash shouldn't be in the test DB. What are the odds :)
+ no_such_hash = 'aaaaaaaaaaaaaaa'
+ hashes = [s._hash for s in specs]
+ assert no_such_hash not in [h[:len(no_such_hash)] for h in hashes]
- nonexistant_hashes = ['/abc123',
- specs[0].name + '/abc123']
- self._check_raises(SystemExit, nonexistant_hashes)
+ self._check_raises(NoSuchHashError, [
+ '/' + no_such_hash,
+ 'mpileaks /' + no_such_hash])
def test_redundant_spec(self, database):
- specs = database.mock.db.query()
- hashes = [s._hash for s in specs] # Preserves order of elements
+ """Check that redundant spec constraints raise errors.
+
+ TODO (TG): does this need to be an error? Or should concrete
+ specs only raise errors if constraints cause a contradiction?
+
+ """
+ mpileaks_zmpi = database.mock.db.query_one('mpileaks ^zmpi')
+ callpath_zmpi = database.mock.db.query_one('callpath ^zmpi')
+ dyninst = database.mock.db.query_one('dyninst')
+
+ mpileaks_mpich2 = database.mock.db.query_one('mpileaks ^mpich2')
+
+ redundant_specs = [
+ # redudant compiler
+ '/' + mpileaks_zmpi.dag_hash() + '%' + str(mpileaks_zmpi.compiler),
+
+ # redudant version
+ 'mpileaks/' + mpileaks_mpich2.dag_hash() +
+ '@' + str(mpileaks_mpich2.version),
+
+ # redundant dependency
+ 'callpath /' + callpath_zmpi.dag_hash() + '^ libelf',
- # Make sure the database is as expected
- assert len(specs) > 3
+ # redundant flags
+ '/' + dyninst.dag_hash() + ' cflags="-O3 -fPIC"']
- redundant_specs = ['/' + hashes[0] + '%' + str(specs[0].compiler),
- specs[1].name + '/' + hashes[1] +
- '@' + str(specs[1].version),
- specs[2].name + '/' + hashes[2] + '^ libelf',
- '/' + hashes[3] + ' cflags="-O3 -fPIC"']
self._check_raises(RedundantSpecError, redundant_specs)
def test_duplicate_variant(self):
diff --git a/lib/spack/spack/test/spec_yaml.py b/lib/spack/spack/test/spec_yaml.py
index e913dc8412..0bcd2de3cf 100644
--- a/lib/spack/spack/test/spec_yaml.py
+++ b/lib/spack/spack/test/spec_yaml.py
@@ -27,6 +27,8 @@
YAML format preserves DAG informatoin in the spec.
"""
+from collections import Iterable, Mapping
+
import spack.util.spack_json as sjson
import spack.util.spack_yaml as syaml
from spack.spec import Spec
@@ -78,8 +80,6 @@ def test_using_ordered_dict(builtin_mock):
versions and processes.
"""
def descend_and_check(iterable, level=0):
- from spack.util.spack_yaml import syaml_dict
- from collections import Iterable, Mapping
if isinstance(iterable, Mapping):
assert isinstance(iterable, syaml_dict)
return descend_and_check(iterable.values(), level=level + 1)
@@ -95,7 +95,12 @@ def test_using_ordered_dict(builtin_mock):
for spec in specs:
dag = Spec(spec)
dag.normalize()
+ from pprint import pprint
+ pprint(dag.to_node_dict())
+ break
+
level = descend_and_check(dag.to_node_dict())
+
# level just makes sure we are doing something here
assert level >= 5
diff --git a/lib/spack/spack/test/url_extrapolate.py b/lib/spack/spack/test/url_extrapolate.py
deleted file mode 100644
index 5f5cf555ae..0000000000
--- a/lib/spack/spack/test/url_extrapolate.py
+++ /dev/null
@@ -1,101 +0,0 @@
-##############################################################################
-# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
-# Produced at the Lawrence Livermore National Laboratory.
-#
-# This file is part of Spack.
-# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
-# LLNL-CODE-647188
-#
-# For details, see https://github.com/llnl/spack
-# Please also see the LICENSE file for our notice and the LGPL.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License (as
-# published by the Free Software Foundation) version 2.1, February 1999.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
-# conditions of the GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-##############################################################################
-"""Tests ability of spack to extrapolate URL versions from
-existing versions.
-"""
-import unittest
-
-import spack.url as url
-
-
-class UrlExtrapolateTest(unittest.TestCase):
-
- def check_url(self, base, version, new_url):
- self.assertEqual(url.substitute_version(base, version), new_url)
-
- def test_libelf_version(self):
- base = "http://www.mr511.de/software/libelf-0.8.13.tar.gz"
- self.check_url(base, '0.8.13', base)
- self.check_url(
- base, '0.8.12', "http://www.mr511.de/software/libelf-0.8.12.tar.gz")
- self.check_url(
- base, '0.3.1', "http://www.mr511.de/software/libelf-0.3.1.tar.gz")
- self.check_url(
- base, '1.3.1b', "http://www.mr511.de/software/libelf-1.3.1b.tar.gz")
-
- def test_libdwarf_version(self):
- base = "http://www.prevanders.net/libdwarf-20130729.tar.gz"
- self.check_url(base, '20130729', base)
- self.check_url(
- base, '8.12', "http://www.prevanders.net/libdwarf-8.12.tar.gz")
-
- def test_dyninst_version(self):
- # Dyninst has a version twice in the URL.
- base = "http://www.dyninst.org/sites/default/files/downloads/dyninst/8.1.2/DyninstAPI-8.1.2.tgz"
- self.check_url(base, '8.1.2', base)
- self.check_url(base, '8.2',
- "http://www.dyninst.org/sites/default/files/downloads/dyninst/8.2/DyninstAPI-8.2.tgz")
- self.check_url(base, '8.3.1',
- "http://www.dyninst.org/sites/default/files/downloads/dyninst/8.3.1/DyninstAPI-8.3.1.tgz")
-
- def test_partial_version_prefix(self):
- # Test now with a partial prefix earlier in the URL -- this is
- # hard to figure out so Spack only substitutes the last
- # instance of the version.
- base = "http://www.dyninst.org/sites/default/files/downloads/dyninst/8.1/DyninstAPI-8.1.2.tgz"
- self.check_url(base, '8.1.2', base)
- self.check_url(base, '8.1.4',
- "http://www.dyninst.org/sites/default/files/downloads/dyninst/8.1/DyninstAPI-8.1.4.tgz")
- self.check_url(base, '8.2',
- "http://www.dyninst.org/sites/default/files/downloads/dyninst/8.1/DyninstAPI-8.2.tgz")
- self.check_url(base, '8.3.1',
- "http://www.dyninst.org/sites/default/files/downloads/dyninst/8.1/DyninstAPI-8.3.1.tgz")
-
- def test_scalasca_partial_version(self):
- # Note that this probably doesn't actually work, but sites are
- # inconsistent about their directory structure, so it's not
- # clear what is right. This test is for consistency and to
- # document behavior. If you figure out a good way to handle
- # this case, fix the tests too.
- self.check_url('http://apps.fz-juelich.de/scalasca/releases/cube/4.3/dist/cube-4.3-TP1.tar.gz', '8.3.1',
- 'http://apps.fz-juelich.de/scalasca/releases/cube/4.3/dist/cube-8.3.1.tar.gz')
- self.check_url('http://apps.fz-juelich.de/scalasca/releases/cube/4.3/dist/cube-4.3-TP1.tar.gz', '8.3.1',
- 'http://apps.fz-juelich.de/scalasca/releases/cube/4.3/dist/cube-8.3.1.tar.gz')
-
- def test_mpileaks_version(self):
- self.check_url('https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz', '2.1.3',
- 'https://github.com/hpc/mpileaks/releases/download/v2.1.3/mpileaks-2.1.3.tar.gz')
-
- def test_gcc(self):
- self.check_url('http://open-source-box.org/gcc/gcc-4.9.2/gcc-4.9.2.tar.bz2', '4.7',
- 'http://open-source-box.org/gcc/gcc-4.7/gcc-4.7.tar.bz2')
- self.check_url('http://open-source-box.org/gcc/gcc-4.4.7/gcc-4.4.7.tar.bz2', '4.4.7',
- 'http://open-source-box.org/gcc/gcc-4.4.7/gcc-4.4.7.tar.bz2')
-
- def test_github_raw(self):
- self.check_url('https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true', '2.0.7',
- 'https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true')
- self.check_url('https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true', '4.7',
- 'https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v4.7.tgz?raw=true')
diff --git a/lib/spack/spack/test/url_parse.py b/lib/spack/spack/test/url_parse.py
index 8913de94d0..2af7c6ae0b 100644
--- a/lib/spack/spack/test/url_parse.py
+++ b/lib/spack/spack/test/url_parse.py
@@ -22,246 +22,667 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-"""\
-This file has a bunch of versions tests taken from the excellent version
-detection in Homebrew.
-"""
+"""Tests Spack's ability to parse the name and version of a package
+based on its URL."""
+
+import os
import unittest
-import spack.url as url
+from spack.url import *
-class UrlParseTest(unittest.TestCase):
+class UrlStripVersionSuffixesTest(unittest.TestCase):
+ """Tests for spack.url.strip_version_suffixes"""
- def assert_not_detected(self, string):
- self.assertRaises(
- url.UndetectableVersionError, url.parse_name_and_version, string)
+ def check(self, before, after):
+ stripped = strip_version_suffixes(before)
+ self.assertEqual(stripped, after)
- def check(self, name, v, string, **kwargs):
- # Make sure correct name and version are extracted.
- parsed_name, parsed_v = url.parse_name_and_version(string)
- self.assertEqual(parsed_name, name)
- self.assertEqual(parsed_v, url.Version(v))
+ def test_no_suffix(self):
+ self.check('rgb-1.0.6',
+ 'rgb-1.0.6')
- # Some URLs (like boost) are special and need to override the
- # built-in functionality.
- if kwargs.get('no_check_url', False):
- return
+ def test_misleading_prefix(self):
+ self.check('jpegsrc.v9b',
+ 'jpegsrc.v9b')
+ self.check('turbolinux702',
+ 'turbolinux702')
+ self.check('converge_install_2.3.16',
+ 'converge_install_2.3.16')
- # Make sure Spack formulates the right URL when we try to
- # build one with a specific version.
- self.assertEqual(string, url.substitute_version(string, v))
+ # Download type
- def test_wwwoffle_version(self):
- self.check(
- 'wwwoffle', '2.9h',
- 'http://www.gedanken.demon.co.uk/download-wwwoffle/wwwoffle-2.9h.tgz')
+ def test_src(self):
+ self.check('apache-ant-1.9.7-src',
+ 'apache-ant-1.9.7')
+ self.check('go1.7.4.src',
+ 'go1.7.4')
+
+ def test_source(self):
+ self.check('bowtie2-2.2.5-source',
+ 'bowtie2-2.2.5')
+ self.check('grib_api-1.17.0-Source',
+ 'grib_api-1.17.0')
+
+ def test_full(self):
+ self.check('julia-0.4.3-full',
+ 'julia-0.4.3')
+
+ def test_bin(self):
+ self.check('apache-maven-3.3.9-bin',
+ 'apache-maven-3.3.9')
+
+ def test_binary(self):
+ self.check('Jmol-14.8.0-binary',
+ 'Jmol-14.8.0')
+
+ def test_gem(self):
+ self.check('rubysl-date-2.0.9.gem',
+ 'rubysl-date-2.0.9')
+
+ def test_tar(self):
+ self.check('gromacs-4.6.1-tar',
+ 'gromacs-4.6.1')
+
+ def test_sh(self):
+ self.check('Miniconda2-4.3.11-Linux-x86_64.sh',
+ 'Miniconda2-4.3.11')
+
+ # Download version
+
+ def test_stable(self):
+ self.check('libevent-2.0.21-stable',
+ 'libevent-2.0.21')
+
+ def test_final(self):
+ self.check('2.6.7-final',
+ '2.6.7')
+
+ def test_rel(self):
+ self.check('v1.9.5.1rel',
+ 'v1.9.5.1')
+
+ def test_orig(self):
+ self.check('dash_0.5.5.1.orig',
+ 'dash_0.5.5.1')
+
+ def test_plus(self):
+ self.check('ncbi-blast-2.6.0+-src',
+ 'ncbi-blast-2.6.0')
+
+ # License
+
+ def test_gpl(self):
+ self.check('cppad-20170114.gpl',
+ 'cppad-20170114')
+
+ # OS
+
+ def test_linux(self):
+ self.check('astyle_2.04_linux',
+ 'astyle_2.04')
+
+ def test_unix(self):
+ self.check('install-tl-unx',
+ 'install-tl')
+
+ def test_macos(self):
+ self.check('astyle_1.23_macosx',
+ 'astyle_1.23')
+ self.check('haxe-2.08-osx',
+ 'haxe-2.08')
+
+ # PyPI
+
+ def test_wheel(self):
+ self.check('entrypoints-0.2.2-py2.py3-none-any.whl',
+ 'entrypoints-0.2.2')
+ self.check('numpy-1.12.0-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl', # noqa
+ 'numpy-1.12.0')
+
+ def test_exe(self):
+ self.check('PyYAML-3.12.win-amd64-py3.5.exe',
+ 'PyYAML-3.12')
+
+ # Combinations of multiple patterns
+
+ def test_complex_all(self):
+ self.check('p7zip_9.04_src_all',
+ 'p7zip_9.04')
+
+ def test_complex_run(self):
+ self.check('cuda_8.0.44_linux.run',
+ 'cuda_8.0.44')
+
+ def test_complex_file(self):
+ self.check('ack-2.14-single-file',
+ 'ack-2.14')
+
+ def test_complex_jar(self):
+ self.check('antlr-3.4-complete.jar',
+ 'antlr-3.4')
+
+ def test_complex_oss(self):
+ self.check('tbb44_20160128oss_src_0',
+ 'tbb44_20160128')
+
+ def test_complex_darwin(self):
+ self.check('ghc-7.0.4-x86_64-apple-darwin',
+ 'ghc-7.0.4')
+ self.check('ghc-7.0.4-i386-apple-darwin',
+ 'ghc-7.0.4')
+
+ def test_complex_arch(self):
+ self.check('VizGlow_v2.2alpha17-R21November2016-Linux-x86_64-Install',
+ 'VizGlow_v2.2alpha17-R21November2016')
+ self.check('jdk-8u92-linux-x64',
+ 'jdk-8u92')
+ self.check('cuda_6.5.14_linux_64.run',
+ 'cuda_6.5.14')
+
+ def test_complex_with(self):
+ self.check('mafft-7.221-with-extensions-src',
+ 'mafft-7.221')
+ self.check('spark-2.0.0-bin-without-hadoop',
+ 'spark-2.0.0')
+
+ def test_complex_public(self):
+ self.check('dakota-6.3-public.src',
+ 'dakota-6.3')
+
+ def test_complex_universal(self):
+ self.check('synergy-1.3.6p2-MacOSX-Universal',
+ 'synergy-1.3.6p2')
+
+
+class UrlStripNameSuffixesTest(unittest.TestCase):
+ """Tests for spack.url.strip_name_suffixes"""
+
+ def check(self, before, version, after):
+ stripped = strip_name_suffixes(before, version)
+ self.assertEqual(stripped, after)
+
+ def test_no_suffix(self):
+ self.check('rgb-1.0.6', '1.0.6',
+ 'rgb')
+ self.check('nauty26r7', '26r7',
+ 'nauty')
+
+ # Download type
+
+ def test_install(self):
+ self.check('converge_install_2.3.16', '2.3.16',
+ 'converge')
+
+ def test_src(self):
+ self.check('jpegsrc.v9b', '9b',
+ 'jpeg')
+
+ def test_std(self):
+ self.check('ghostscript-fonts-std-8.11', '8.11',
+ 'ghostscript-fonts')
+
+ # Download version
+
+ def test_snapshot(self):
+ self.check('gts-snapshot-121130', '121130',
+ 'gts')
+
+ def test_distrib(self):
+ self.check('zoltan_distrib_v3.83', '3.83',
+ 'zoltan')
+
+ # VCS
- def test_version_sourceforge_download(self):
+ def test_bazaar(self):
+ self.check('libvterm-0+bzr681', '681',
+ 'libvterm')
+
+ # License
+
+ def test_gpl(self):
+ self.check('PyQt-x11-gpl-4.11.3', '4.11.3',
+ 'PyQt-x11')
+
+
+class UrlParseOffsetTest(unittest.TestCase):
+
+ def check(self, name, noffset, ver, voffset, path):
+ # Make sure parse_name_offset and parse_name_version are working
+ v, vstart, vlen, vi, vre = parse_version_offset(path)
+ n, nstart, nlen, ni, nre = parse_name_offset(path, v)
+
+ self.assertEqual(n, name)
+ self.assertEqual(v, ver)
+ self.assertEqual(nstart, noffset)
+ self.assertEqual(vstart, voffset)
+
+ def test_name_in_path(self):
self.check(
- 'foo-bar', '1.21',
- 'http://sourceforge.net/foo_bar-1.21.tar.gz/download')
+ 'antlr', 25, '2.7.7', 40,
+ 'https://github.com/antlr/antlr/tarball/v2.7.7')
+
+ def test_name_in_stem(self):
self.check(
- 'foo-bar', '1.21',
- 'http://sf.net/foo_bar-1.21.tar.gz/download')
+ 'gmp', 32, '6.0.0a', 36,
+ 'https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2')
- def test_no_version(self):
- self.assert_not_detected('http://example.com/blah.tar')
- self.assert_not_detected('foo')
+ def test_name_in_suffix(self):
+ # Don't think I've ever seen one of these before
+ # We don't look for it, so it would probably fail anyway
+ pass
- def test_version_all_dots(self):
+ def test_version_in_path(self):
self.check(
- 'foo-bar-la', '1.14', 'http://example.com/foo.bar.la.1.14.zip')
+ 'nextflow', 31, '0.20.1', 59,
+ 'https://github.com/nextflow-io/nextflow/releases/download/v0.20.1/nextflow')
- def test_version_underscore_separator(self):
+ def test_version_in_stem(self):
self.check(
- 'grc', '1.1',
- 'http://example.com/grc_1.1.tar.gz')
-
- def test_boost_version_style(self):
+ 'zlib', 24, '1.2.10', 29,
+ 'http://zlib.net/fossils/zlib-1.2.10.tar.gz')
+ self.check(
+ 'slepc', 51, '3.6.2', 57,
+ 'http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz')
self.check(
- 'boost', '1.39.0',
- 'http://example.com/boost_1_39_0.tar.bz2',
- no_check_url=True)
+ 'cloog', 61, '0.18.1', 67,
+ 'http://www.bastoul.net/cloog/pages/download/count.php3?url=./cloog-0.18.1.tar.gz')
+ self.check(
+ 'libxc', 58, '2.2.2', 64,
+ 'http://www.tddft.org/programs/octopus/down.php?file=libxc/libxc-2.2.2.tar.gz')
- def test_erlang_version_style(self):
+ def test_version_in_suffix(self):
+ self.check(
+ 'swiftsim', 36, '0.3.0', 76,
+ 'http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0')
self.check(
- 'otp', 'R13B',
- 'http://erlang.org/download/otp_src_R13B.tar.gz')
+ 'sionlib', 30, '1.7.1', 59,
+ 'http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1')
- def test_another_erlang_version_style(self):
+ def test_regex_in_name(self):
self.check(
- 'otp', 'R15B01',
- 'https://github.com/erlang/otp/tarball/OTP_R15B01')
+ 'voro++', 40, '0.4.6', 47,
+ 'http://math.lbl.gov/voro++/download/dir/voro++-0.4.6.tar.gz')
+
+
+class UrlParseNameAndVersionTest(unittest.TestCase):
+
+ def assert_not_detected(self, string):
+ self.assertRaises(
+ UndetectableVersionError, parse_name_and_version, string)
+
+ def check(self, name, v, string, **kwargs):
+ # Make sure correct name and version are extracted.
+ parsed_name, parsed_v = parse_name_and_version(string)
+ self.assertEqual(parsed_name, name)
+ self.assertEqual(parsed_v, Version(v))
+
+ # Make sure Spack formulates the right URL when we try to
+ # build one with a specific version.
+ self.assertEqual(string, substitute_version(string, v))
+
+ # Common Repositories
- def test_yet_another_erlang_version_style(self):
+ def test_github_downloads(self):
+ # name/archive/ver.ver
self.check(
- 'otp', 'R15B03-1',
- 'https://github.com/erlang/otp/tarball/OTP_R15B03-1')
+ 'nco', '4.6.2',
+ 'https://github.com/nco/nco/archive/4.6.2.tar.gz')
+ # name/archive/vver.ver
+ self.check(
+ 'vim', '8.0.0134',
+ 'https://github.com/vim/vim/archive/v8.0.0134.tar.gz')
+ # name/archive/name-ver.ver
+ self.check(
+ 'oce', '0.18',
+ 'https://github.com/tpaviot/oce/archive/OCE-0.18.tar.gz')
+ # name/releases/download/vver/name-ver.ver
+ self.check(
+ 'libmesh', '1.0.0',
+ 'https://github.com/libMesh/libmesh/releases/download/v1.0.0/libmesh-1.0.0.tar.bz2')
+ # name/tarball/vver.ver
+ self.check(
+ 'git', '2.7.1',
+ 'https://github.com/git/git/tarball/v2.7.1')
+ # name/zipball/vver.ver
+ self.check(
+ 'git', '2.7.1',
+ 'https://github.com/git/git/zipball/v2.7.1')
- def test_p7zip_version_style(self):
+ def test_gitlab_downloads(self):
+ # name/repository/archive.ext?ref=vver.ver
+ self.check(
+ 'swiftsim', '0.3.0',
+ 'http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0')
+ # name/repository/archive.ext?ref=name-ver.ver
self.check(
- 'p7zip', '9.04',
- 'http://kent.dl.sourceforge.net/sourceforge/p7zip/p7zip_9.04_src_all.tar.bz2')
+ 'icet', '1.2.3',
+ 'https://gitlab.kitware.com/icet/icet/repository/archive.tar.gz?ref=IceT-1.2.3')
- def test_new_github_style(self):
+ def test_bitbucket_downloads(self):
+ # name/get/ver.ver
self.check(
- 'libnet', '1.1.4',
- 'https://github.com/sam-github/libnet/tarball/libnet-1.1.4')
+ 'eigen', '3.2.7',
+ 'https://bitbucket.org/eigen/eigen/get/3.2.7.tar.bz2')
+ # name/get/vver.ver
+ self.check(
+ 'hoomd-blue', '1.3.3',
+ 'https://bitbucket.org/glotzer/hoomd-blue/get/v1.3.3.tar.bz2')
+ # name/downloads/name-ver.ver
+ self.check(
+ 'dolfin', '2016.1.0',
+ 'https://bitbucket.org/fenics-project/dolfin/downloads/dolfin-2016.1.0.tar.gz')
- def test_gloox_beta_style(self):
+ def test_sourceforge_downloads(self):
+ # name-ver.ver
self.check(
- 'gloox', '1.0-beta7',
- 'http://camaya.net/download/gloox-1.0-beta7.tar.bz2')
+ 'libpng', '1.6.27',
+ 'http://download.sourceforge.net/libpng/libpng-1.6.27.tar.gz')
+ self.check(
+ 'lcms2', '2.6',
+ 'http://downloads.sourceforge.net/project/lcms/lcms/2.6/lcms2-2.6.tar.gz')
+ self.check(
+ 'modules', '3.2.10',
+ 'http://prdownloads.sourceforge.net/modules/modules-3.2.10.tar.gz')
+ # name-ver.ver.ext/download
+ self.check(
+ 'glew', '2.0.0',
+ 'https://sourceforge.net/projects/glew/files/glew/2.0.0/glew-2.0.0.tgz/download')
- def test_sphinx_beta_style(self):
+ def test_cran_downloads(self):
+ # name.name_ver.ver-ver.ver
self.check(
- 'sphinx', '1.10-beta',
- 'http://sphinxsearch.com/downloads/sphinx-1.10-beta.tar.gz')
+ 'TH.data', '1.0-8',
+ 'https://cran.r-project.org/src/contrib/TH.data_1.0-8.tar.gz')
+ self.check(
+ 'knitr', '1.14',
+ 'https://cran.rstudio.com/src/contrib/knitr_1.14.tar.gz')
+ self.check(
+ 'devtools', '1.12.0',
+ 'https://cloud.r-project.org/src/contrib/devtools_1.12.0.tar.gz')
- def test_astyle_verson_style(self):
+ def test_pypi_downloads(self):
+ # name.name_name-ver.ver
+ self.check(
+ '3to2', '1.1.1',
+ 'https://pypi.python.org/packages/source/3/3to2/3to2-1.1.1.zip')
self.check(
- 'astyle', '1.23',
- 'http://kent.dl.sourceforge.net/sourceforge/astyle/astyle_1.23_macosx.tar.gz')
+ 'mpmath', '0.19',
+ 'https://pypi.python.org/packages/source/m/mpmath/mpmath-all-0.19.tar.gz')
+ self.check(
+ 'pandas', '0.16.0',
+ 'https://pypi.python.org/packages/source/p/pandas/pandas-0.16.0.tar.gz#md5=bfe311f05dc0c351f8955fbd1e296e73')
+ self.check(
+ 'sphinx_rtd_theme', '0.1.10a0',
+ 'https://pypi.python.org/packages/da/6b/1b75f13d8aa3333f19c6cdf1f0bc9f52ea739cae464fbee050307c121857/sphinx_rtd_theme-0.1.10a0.tar.gz')
+ self.check(
+ 'backports.ssl_match_hostname', '3.5.0.1',
+ 'https://pypi.io/packages/source/b/backports.ssl_match_hostname/backports.ssl_match_hostname-3.5.0.1.tar.gz')
- def test_version_dos2unix(self):
+ def test_bazaar_downloads(self):
self.check(
- 'dos2unix', '3.1',
- 'http://www.sfr-fresh.com/linux/misc/dos2unix-3.1.tar.gz')
+ 'libvterm', '681',
+ 'http://www.leonerd.org.uk/code/libvterm/libvterm-0+bzr681.tar.gz')
- def test_version_internal_dash(self):
+ # Common Tarball Formats
+
+ def test_version_only(self):
+ # ver.ver
+ self.check(
+ 'eigen', '3.2.7',
+ 'https://bitbucket.org/eigen/eigen/get/3.2.7.tar.bz2')
+ # ver.ver-ver
+ self.check(
+ 'ImageMagick', '7.0.2-7',
+ 'https://github.com/ImageMagick/ImageMagick/archive/7.0.2-7.tar.gz')
+ # vver.ver
self.check(
- 'foo-arse', '1.1-2',
- 'http://example.com/foo-arse-1.1-2.tar.gz')
+ 'CGNS', '3.3.0',
+ 'https://github.com/CGNS/CGNS/archive/v3.3.0.tar.gz')
+ # vver_ver
+ self.check(
+ 'luafilesystem', '1_6_3',
+ 'https://github.com/keplerproject/luafilesystem/archive/v1_6_3.tar.gz')
- def test_version_single_digit(self):
+ def test_no_separators(self):
+ # namever
+ self.check(
+ 'turbolinux', '702',
+ 'file://{0}/turbolinux702.tar.gz'.format(os.getcwd()))
self.check(
- 'foo-bar', '45',
- 'http://example.com/foo_bar.45.tar.gz')
+ 'nauty', '26r7',
+ 'http://pallini.di.uniroma1.it/nauty26r7.tar.gz')
- def test_noseparator_single_digit(self):
+ def test_dashes_only(self):
+ # name-name-ver-ver
+ self.check(
+ 'Trilinos', '12-10-1',
+ 'https://github.com/trilinos/Trilinos/archive/trilinos-release-12-10-1.tar.gz')
+ self.check(
+ 'panda', '2016-03-07',
+ 'http://comopt.ifi.uni-heidelberg.de/software/PANDA/downloads/panda-2016-03-07.tar')
self.check(
- 'foo-bar', '45',
- 'http://example.com/foo_bar45.tar.gz')
+ 'gts', '121130',
+ 'http://gts.sourceforge.net/tarballs/gts-snapshot-121130.tar.gz')
+ self.check(
+ 'cdd', '061a',
+ 'http://www.cs.mcgill.ca/~fukuda/download/cdd/cdd-061a.tar.gz')
- def test_version_developer_that_hates_us_format(self):
+ def test_underscores_only(self):
+ # name_name_ver_ver
+ self.check(
+ 'tinyxml', '2_6_2',
+ 'https://sourceforge.net/projects/tinyxml/files/tinyxml/2.6.2/tinyxml_2_6_2.tar.gz')
+ self.check(
+ 'boost', '1_55_0',
+ 'http://downloads.sourceforge.net/project/boost/boost/1.55.0/boost_1_55_0.tar.bz2')
self.check(
- 'foo-bar-la', '1.2.3',
- 'http://example.com/foo-bar-la.1.2.3.tar.gz')
+ 'yorick', '2_2_04',
+ 'https://github.com/dhmunro/yorick/archive/y_2_2_04.tar.gz')
+ # name_namever_ver
+ self.check(
+ 'tbb', '44_20160413',
+ 'https://www.threadingbuildingblocks.org/sites/default/files/software_releases/source/tbb44_20160413oss_src.tgz')
- def test_version_regular(self):
+ def test_dots_only(self):
+ # name.name.ver.ver
+ self.check(
+ 'prank', '150803',
+ 'http://wasabiapp.org/download/prank/prank.source.150803.tgz')
+ self.check(
+ 'jpeg', '9b',
+ 'http://www.ijg.org/files/jpegsrc.v9b.tar.gz')
+ self.check(
+ 'openjpeg', '2.1',
+ 'https://github.com/uclouvain/openjpeg/archive/version.2.1.tar.gz')
+ # name.namever.ver
+ self.check(
+ 'atlas', '3.11.34',
+ 'http://sourceforge.net/projects/math-atlas/files/Developer%20%28unstable%29/3.11.34/atlas3.11.34.tar.bz2')
self.check(
- 'foo-bar', '1.21',
- 'http://example.com/foo_bar-1.21.tar.gz')
+ 'visit', '2.10.1',
+ 'http://portal.nersc.gov/project/visit/releases/2.10.1/visit2.10.1.tar.gz')
+ self.check(
+ 'geant', '4.10.01.p03',
+ 'http://geant4.cern.ch/support/source/geant4.10.01.p03.tar.gz')
+ self.check(
+ 'tcl', '8.6.5',
+ 'http://prdownloads.sourceforge.net/tcl/tcl8.6.5-src.tar.gz')
- def test_version_gitlab(self):
+ def test_dash_dot(self):
+ # name-name-ver.ver
+ # digit in name
self.check(
- 'vtk', '7.0.0',
- 'https://gitlab.kitware.com/vtk/vtk/repository/'
- 'archive.tar.bz2?ref=v7.0.0')
+ 'm4', '1.4.17',
+ 'https://ftp.gnu.org/gnu/m4/m4-1.4.17.tar.gz')
+ # letter in version
self.check(
- 'icet', '1.2.3',
- 'https://gitlab.kitware.com/icet/icet/repository/'
- 'archive.tar.gz?ref=IceT-1.2.3')
+ 'gmp', '6.0.0a',
+ 'https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2')
+ # version starts with 'v'
+ self.check(
+ 'LaunchMON', '1.0.2',
+ 'https://github.com/LLNL/LaunchMON/releases/download/v1.0.2/launchmon-v1.0.2.tar.gz')
+ # name-ver-ver.ver
self.check(
- 'foo', '42.1337',
- 'http://example.com/org/foo/repository/'
- 'archive.zip?ref=42.1337bar')
+ 'libedit', '20150325-3.1',
+ 'http://thrysoee.dk/editline/libedit-20150325-3.1.tar.gz')
- def test_version_github(self):
+ def test_dash_underscore(self):
+ # name-name-ver_ver
self.check(
- 'yajl', '1.0.5',
- 'http://github.com/lloyd/yajl/tarball/1.0.5')
+ 'icu4c', '57_1',
+ 'http://download.icu-project.org/files/icu4c/57.1/icu4c-57_1-src.tgz')
- def test_version_github_with_high_patch_number(self):
+ def test_underscore_dot(self):
+ # name_name_ver.ver
self.check(
- 'yajl', '1.2.34',
- 'http://github.com/lloyd/yajl/tarball/v1.2.34')
+ 'superlu_dist', '4.1',
+ 'http://crd-legacy.lbl.gov/~xiaoye/SuperLU/superlu_dist_4.1.tar.gz')
+ self.check(
+ 'pexsi', '0.9.0',
+ 'https://math.berkeley.edu/~linlin/pexsi/download/pexsi_v0.9.0.tar.gz')
+ # name_name.ver.ver
+ self.check(
+ 'fer', '696',
+ 'ftp://ftp.pmel.noaa.gov/ferret/pub/source/fer_source.v696.tar.gz')
- def test_yet_another_version(self):
+ def test_dash_dot_dash_dot(self):
+ # name-name-ver.ver-ver.ver
+ self.check(
+ 'sowing', '1.1.23-p1',
+ 'http://ftp.mcs.anl.gov/pub/petsc/externalpackages/sowing-1.1.23-p1.tar.gz')
self.check(
- 'mad', '0.15.1b',
- 'http://example.com/mad-0.15.1b.tar.gz')
+ 'bib2xhtml', '3.0-15-gf506',
+ 'http://www.spinellis.gr/sw/textproc/bib2xhtml/bib2xhtml-v3.0-15-gf506.tar.gz')
+ # namever.ver-ver.ver
+ self.check(
+ 'go', '1.4-bootstrap-20161024',
+ 'https://storage.googleapis.com/golang/go1.4-bootstrap-20161024.tar.gz')
- def test_lame_version_style(self):
+ def test_underscore_dash_dot(self):
+ # name_name-ver.ver
+ self.check(
+ 'the_silver_searcher', '0.32.0',
+ 'http://geoff.greer.fm/ag/releases/the_silver_searcher-0.32.0.tar.gz')
self.check(
- 'lame', '398-2',
- 'http://kent.dl.sourceforge.net/sourceforge/lame/lame-398-2.tar.gz')
+ 'sphinx_rtd_theme', '0.1.10a0',
+ 'https://pypi.python.org/packages/source/s/sphinx_rtd_theme/sphinx_rtd_theme-0.1.10a0.tar.gz')
- def test_ruby_version_style(self):
+ def test_dot_underscore_dot_dash_dot(self):
+ # name.name_ver.ver-ver.ver
self.check(
- 'ruby', '1.9.1-p243',
- 'ftp://ftp.ruby-lang.org/pub/ruby/1.9/ruby-1.9.1-p243.tar.gz')
+ 'TH.data', '1.0-8',
+ 'https://cran.r-project.org/src/contrib/TH.data_1.0-8.tar.gz')
+ self.check(
+ 'XML', '3.98-1.4',
+ 'https://cran.r-project.org/src/contrib/XML_3.98-1.4.tar.gz')
- def test_omega_version_style(self):
+ def test_dash_dot_underscore_dot(self):
+ # name-name-ver.ver_ver.ver
+ self.check(
+ 'pypar', '2.1.5_108',
+ 'https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/pypar/pypar-2.1.5_108.tgz')
+ # name-namever.ver_ver.ver
self.check(
- 'omega', '0.80.2',
- 'http://www.alcyone.com/binaries/omega/omega-0.80.2-src.tar.gz')
+ 'STAR-CCM+', '11.06.010_02',
+ 'file://{0}/STAR-CCM+11.06.010_02_linux-x86_64.tar.gz'.format(os.getcwd()))
- def test_rc_style(self):
+ # Weird URLS
+
+ def test_version_in_path(self):
+ # github.com/repo/name/releases/download/name-vver/name
self.check(
- 'libvorbis', '1.2.2rc1',
- 'http://downloads.xiph.org/releases/vorbis/libvorbis-1.2.2rc1.tar.bz2')
+ 'nextflow', '0.20.1',
+ 'https://github.com/nextflow-io/nextflow/releases/download/v0.20.1/nextflow')
- def test_dash_rc_style(self):
+ def test_suffix_queries(self):
self.check(
- 'js', '1.8.0-rc1',
- 'http://ftp.mozilla.org/pub/mozilla.org/js/js-1.8.0-rc1.tar.gz')
+ 'swiftsim', '0.3.0',
+ 'http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0')
+ self.check(
+ 'sionlib', '1.7.1',
+ 'http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1')
- def test_angband_version_style(self):
+ def test_stem_queries(self):
+ self.check(
+ 'slepc', '3.6.2',
+ 'http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz')
self.check(
- 'angband', '3.0.9b',
- 'http://rephial.org/downloads/3.0/angband-3.0.9b-src.tar.gz')
+ 'otf', '1.12.5salmon',
+ 'http://wwwpub.zih.tu-dresden.de/%7Emlieber/dcount/dcount.php?package=otf&get=OTF-1.12.5salmon.tar.gz')
- def test_stable_suffix(self):
+ def test_single_character_name(self):
self.check(
- 'libevent', '1.4.14b',
- 'http://www.monkey.org/~provos/libevent-1.4.14b-stable.tar.gz')
+ 'R', '3.3.2',
+ 'https://cloud.r-project.org/src/base/R-3/R-3.3.2.tar.gz')
+
+ def test_single_digit_version(self):
+ pass
- def test_debian_style_1(self):
+ def test_name_starts_with_digit(self):
self.check(
- 'sl', '3.03',
- 'http://ftp.de.debian.org/debian/pool/main/s/sl/sl_3.03.orig.tar.gz')
+ '3to2', '1.1.1',
+ 'https://pypi.python.org/packages/source/3/3to2/3to2-1.1.1.zip')
- def test_debian_style_2(self):
+ def plus_in_name(self):
self.check(
- 'mmv', '1.01b',
- 'http://ftp.de.debian.org/debian/pool/main/m/mmv/mmv_1.01b.orig.tar.gz')
+ 'gtk+', '2.24.31',
+ 'http://ftp.gnome.org/pub/gnome/sources/gtk+/2.24/gtk+-2.24.31.tar.xz')
+ self.check(
+ 'voro++', '0.4.6',
+ 'http://math.lbl.gov/voro++/download/dir/voro++-0.4.6.tar.gz')
+
+ def test_no_version(self):
+ self.assert_not_detected('http://www.netlib.org/blas/blast-forum/cblas.tgz')
+ self.assert_not_detected('http://www.netlib.org/voronoi/triangle.zip')
- def test_imagemagick_style(self):
+ def test_download_php(self):
+ # Name comes before download.php
+ self.check(
+ 'sionlib', '1.7.1',
+ 'http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1')
+ # Ignore download.php
+ self.check(
+ 'slepc', '3.6.2',
+ 'http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz')
self.check(
- 'imagemagick', '6.7.5-7',
+ 'ScientificPython', '2.8.1',
+ 'https://sourcesup.renater.fr/frs/download.php/file/4411/ScientificPython-2.8.1.tar.gz')
- 'http://downloads.sf.net/project/machomebrew/mirror/ImageMagick-6.7.5-7.tar.bz2')
+ def test_gloox_beta_style(self):
+ self.check(
+ 'gloox', '1.0-beta7',
+ 'http://camaya.net/download/gloox-1.0-beta7.tar.bz2')
- def test_dash_version_dash_style(self):
+ def test_sphinx_beta_style(self):
self.check(
- 'antlr', '3.4',
- 'http://www.antlr.org/download/antlr-3.4-complete.jar')
+ 'sphinx', '1.10-beta',
+ 'http://sphinxsearch.com/downloads/sphinx-1.10-beta.tar.gz')
- def test_apache_version_style(self):
+ def test_ruby_version_style(self):
self.check(
- 'apache-cassandra', '1.2.0-rc2',
- 'http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin.tar.gz')
+ 'ruby', '1.9.1-p243',
+ 'ftp://ftp.ruby-lang.org/pub/ruby/1.9/ruby-1.9.1-p243.tar.gz')
- def test_jpeg_style(self):
+ def test_rc_style(self):
self.check(
- 'jpegsrc', '8d',
- 'http://www.ijg.org/files/jpegsrc.v8d.tar.gz')
+ 'libvorbis', '1.2.2rc1',
+ 'http://downloads.xiph.org/releases/vorbis/libvorbis-1.2.2rc1.tar.bz2')
- def test_pypy_version(self):
+ def test_dash_rc_style(self):
self.check(
- 'pypy', '1.4.1',
- 'http://pypy.org/download/pypy-1.4.1-osx.tar.bz2')
+ 'js', '1.8.0-rc1',
+ 'http://ftp.mozilla.org/pub/mozilla.org/js/js-1.8.0-rc1.tar.gz')
- def test_openssl_version(self):
+ def test_apache_version_style(self):
self.check(
- 'openssl', '0.9.8s',
- 'http://www.openssl.org/source/openssl-0.9.8s.tar.gz')
+ 'apache-cassandra', '1.2.0-rc2',
+ 'http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin.tar.gz')
def test_xaw3d_version(self):
self.check(
- 'xaw3d', '1.5E',
+ 'Xaw3d', '1.5E',
'ftp://ftp.visi.com/users/hawkeyd/X/Xaw3d-1.5E.tar.gz')
def test_fann_version(self):
@@ -269,16 +690,6 @@ class UrlParseTest(unittest.TestCase):
'fann', '2.1.0beta',
'http://downloads.sourceforge.net/project/fann/fann/2.1.0beta/fann-2.1.0beta.zip')
- def test_iges_version(self):
- self.check(
- 'grads', '2.0.1',
- 'ftp://iges.org/grads/2.0/grads-2.0.1-bin-darwin9.8-intel.tar.gz')
-
- def test_haxe_version(self):
- self.check(
- 'haxe', '2.08',
- 'http://haxe.org/file/haxe-2.08-osx.tar.gz')
-
def test_imap_version(self):
self.check(
'imap', '2007f',
@@ -289,26 +700,6 @@ class UrlParseTest(unittest.TestCase):
'suite3270', '3.3.12ga7',
'http://sourceforge.net/projects/x3270/files/x3270/3.3.12ga7/suite3270-3.3.12ga7-src.tgz')
- def test_synergy_version(self):
- self.check(
- 'synergy', '1.3.6p2',
- 'http://synergy.googlecode.com/files/synergy-1.3.6p2-MacOSX-Universal.zip')
-
- def test_mvapich2_19_version(self):
- self.check(
- 'mvapich2', '1.9',
- 'http://mvapich.cse.ohio-state.edu/download/mvapich2/mv2/mvapich2-1.9.tgz')
-
- def test_mvapich2_20_version(self):
- self.check(
- 'mvapich2', '2.0',
- 'http://mvapich.cse.ohio-state.edu/download/mvapich/mv2/mvapich2-2.0.tar.gz')
-
- def test_hdf5_version(self):
- self.check(
- 'hdf5', '1.8.13',
- 'http://www.hdfgroup.org/ftp/HDF5/current/src/hdf5-1.8.13.tar.bz2')
-
def test_scalasca_version(self):
self.check(
'cube', '4.2.3',
@@ -317,55 +708,20 @@ class UrlParseTest(unittest.TestCase):
'cube', '4.3-TP1',
'http://apps.fz-juelich.de/scalasca/releases/cube/4.3/dist/cube-4.3-TP1.tar.gz')
- def test_mpileaks_version(self):
- self.check(
- 'mpileaks', '1.0',
- 'https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz')
- self.check(
- 'mpileaks', '1.0',
- 'https://github.com/hpc/mpileaks/releases/download/1.0/mpileaks-1.0.tar.gz')
-
- def test_gcc_version(self):
- self.check(
- 'gcc', '4.4.7',
- 'http://open-source-box.org/gcc/gcc-4.4.7/gcc-4.4.7.tar.bz2')
-
- def test_gcc_version_precedence(self):
- # prefer the version in the tarball, not in the url prefix.
- self.check(
- 'gcc', '4.4.7',
- 'http://open-source-box.org/gcc/gcc-4.9.2/gcc-4.4.7.tar.bz2')
-
def test_github_raw_url(self):
self.check(
- 'powerparser', '2.0.7',
+ 'CLAMR', '2.0.7',
'https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true')
- def test_r_xml_version(self):
+ def test_luaposix_version(self):
self.check(
- 'xml', '3.98-1.4',
- 'https://cran.r-project.org/src/contrib/XML_3.98-1.4.tar.gz')
+ 'luaposix', '33.4.0',
+ 'https://github.com/luaposix/luaposix/archive/release-v33.4.0.tar.gz')
def test_nco_version(self):
self.check(
'nco', '4.6.2-beta03',
'https://github.com/nco/nco/archive/4.6.2-beta03.tar.gz')
-
self.check(
'nco', '4.6.3-alpha04',
'https://github.com/nco/nco/archive/4.6.3-alpha04.tar.gz')
-
- def test_yorick_version(self):
- self.check(
- 'yorick', '2_2_04',
- 'https://github.com/dhmunro/yorick/archive/y_2_2_04.tar.gz')
-
- def test_luaposix_version(self):
- self.check(
- 'luaposix', '33.4.0',
- 'https://github.com/luaposix/luaposix/archive/release-v33.4.0.tar.gz')
-
- def test_sionlib_version(self):
- self.check(
- 'sionlib', '1.7.1',
- 'http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1')
diff --git a/lib/spack/spack/test/url_substitution.py b/lib/spack/spack/test/url_substitution.py
index ea6374e3d2..449a3b29bf 100644
--- a/lib/spack/spack/test/url_substitution.py
+++ b/lib/spack/spack/test/url_substitution.py
@@ -22,44 +22,64 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-"""\
-This test does sanity checks on substituting new versions into URLs
-"""
+"""Tests Spack's ability to substitute a different version into a URL."""
+
+import os
import unittest
-import spack.url as url
+from spack.url import substitute_version
+
+
+class UrlSubstitutionTest(unittest.TestCase):
+ def check(self, base, version, new_url):
+ self.assertEqual(substitute_version(base, version), new_url)
-base = "https://comp.llnl.gov/linear_solvers/download/hypre-2.9.0b.tar.gz"
-stem = "https://comp.llnl.gov/linear_solvers/download/hypre-"
+ def test_same_version(self):
+ # Ensures that substituting the same version results in the same URL
+ self.check(
+ 'http://www.mr511.de/software/libelf-0.8.13.tar.gz', '0.8.13',
+ 'http://www.mr511.de/software/libelf-0.8.13.tar.gz')
+ def test_different_version(self):
+ # Test a completely different version syntax
+ self.check(
+ 'http://www.prevanders.net/libdwarf-20130729.tar.gz', '8.12',
+ 'http://www.prevanders.net/libdwarf-8.12.tar.gz')
-class PackageSanityTest(unittest.TestCase):
+ def test_double_version(self):
+ # Test a URL where the version appears twice
+ # It should get substituted both times
+ self.check(
+ 'https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz', '2.1.3',
+ 'https://github.com/hpc/mpileaks/releases/download/v2.1.3/mpileaks-2.1.3.tar.gz')
- def test_hypre_url_substitution(self):
- self.assertEqual(url.substitute_version(base, '2.9.0b'), base)
- self.assertEqual(
- url.substitute_version(base, '2.8.0b'), stem + "2.8.0b.tar.gz")
- self.assertEqual(
- url.substitute_version(base, '2.7.0b'), stem + "2.7.0b.tar.gz")
- self.assertEqual(
- url.substitute_version(base, '2.6.0b'), stem + "2.6.0b.tar.gz")
- self.assertEqual(
- url.substitute_version(base, '1.14.0b'), stem + "1.14.0b.tar.gz")
- self.assertEqual(
- url.substitute_version(base, '1.13.0b'), stem + "1.13.0b.tar.gz")
- self.assertEqual(
- url.substitute_version(base, '2.0.0'), stem + "2.0.0.tar.gz")
- self.assertEqual(
- url.substitute_version(base, '1.6.0'), stem + "1.6.0.tar.gz")
+ def test_partial_version_prefix(self):
+ # Test now with a partial prefix earlier in the URL
+ # This is hard to figure out so Spack only substitutes
+ # the last instance of the version
+ self.check(
+ 'https://www.open-mpi.org/software/ompi/v2.1/downloads/openmpi-2.1.0.tar.bz2', '2.2.0',
+ 'https://www.open-mpi.org/software/ompi/v2.1/downloads/openmpi-2.2.0.tar.bz2')
+ self.check(
+ 'https://www.open-mpi.org/software/ompi/v2.1/downloads/openmpi-2.1.0.tar.bz2', '2.2',
+ 'https://www.open-mpi.org/software/ompi/v2.1/downloads/openmpi-2.2.tar.bz2')
- def test_otf2_url_substitution(self):
- base = "http://www.vi-hps.org/upload/packages/otf2/otf2-1.4.tar.gz"
+ def test_no_separator(self):
+ # No separator between the name and version of the package
+ self.check(
+ 'file://{0}/turbolinux702.tar.gz'.format(os.getcwd()), '703',
+ 'file://{0}/turbolinux703.tar.gz'.format(os.getcwd()))
- self.assertEqual(url.substitute_version(base, '1.4'), base)
+ def test_github_raw(self):
+ self.check(
+ 'https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true', '2.0.7',
+ 'https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true')
+ self.check(
+ 'https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true', '4.7',
+ 'https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v4.7.tgz?raw=true')
- self.assertEqual(
- url.substitute_version(base, '1.3.1'),
- "http://www.vi-hps.org/upload/packages/otf2/otf2-1.3.1.tar.gz")
- self.assertEqual(
- url.substitute_version(base, '1.2.1'),
- "http://www.vi-hps.org/upload/packages/otf2/otf2-1.2.1.tar.gz")
+ def test_regex(self):
+ # Package name contains regex characters
+ self.check(
+ 'http://math.lbl.gov/voro++/download/dir/voro++-0.4.6.tar.gz', '1.2.3',
+ 'http://math.lbl.gov/voro++/download/dir/voro++-1.2.3.tar.gz')
diff --git a/lib/spack/spack/test/versions.py b/lib/spack/spack/test/versions.py
index c1d427783c..71ea3af9e9 100644
--- a/lib/spack/spack/test/versions.py
+++ b/lib/spack/spack/test/versions.py
@@ -22,413 +22,453 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-"""
-These version tests were taken from the RPM source code.
+"""These version tests were taken from the RPM source code.
We try to maintain compatibility with RPM's version semantics
where it makes sense.
"""
-import unittest
+import pytest
from spack.version import *
-class VersionsTest(unittest.TestCase):
-
- def assert_ver_lt(self, a, b):
- a, b = ver(a), ver(b)
- self.assertTrue(a < b)
- self.assertTrue(a <= b)
- self.assertTrue(a != b)
- self.assertFalse(a == b)
- self.assertFalse(a > b)
- self.assertFalse(a >= b)
-
- def assert_ver_gt(self, a, b):
- a, b = ver(a), ver(b)
- self.assertTrue(a > b)
- self.assertTrue(a >= b)
- self.assertTrue(a != b)
- self.assertFalse(a == b)
- self.assertFalse(a < b)
- self.assertFalse(a <= b)
-
- def assert_ver_eq(self, a, b):
- a, b = ver(a), ver(b)
- self.assertFalse(a > b)
- self.assertTrue(a >= b)
- self.assertFalse(a != b)
- self.assertTrue(a == b)
- self.assertFalse(a < b)
- self.assertTrue(a <= b)
-
- def assert_in(self, needle, haystack):
- self.assertTrue(ver(needle) in ver(haystack))
-
- def assert_not_in(self, needle, haystack):
- self.assertFalse(ver(needle) in ver(haystack))
-
- def assert_canonical(self, canonical_list, version_list):
- self.assertEqual(ver(canonical_list), ver(version_list))
-
- def assert_overlaps(self, v1, v2):
- self.assertTrue(ver(v1).overlaps(ver(v2)))
-
- def assert_no_overlap(self, v1, v2):
- self.assertFalse(ver(v1).overlaps(ver(v2)))
-
- def assert_satisfies(self, v1, v2):
- self.assertTrue(ver(v1).satisfies(ver(v2)))
-
- def assert_does_not_satisfy(self, v1, v2):
- self.assertFalse(ver(v1).satisfies(ver(v2)))
-
- def check_intersection(self, expected, a, b):
- self.assertEqual(ver(expected), ver(a).intersection(ver(b)))
-
- def check_union(self, expected, a, b):
- self.assertEqual(ver(expected), ver(a).union(ver(b)))
-
- def test_two_segments(self):
- self.assert_ver_eq('1.0', '1.0')
- self.assert_ver_lt('1.0', '2.0')
- self.assert_ver_gt('2.0', '1.0')
- self.assert_ver_eq('develop', 'develop')
- self.assert_ver_lt('1.0', 'develop')
- self.assert_ver_gt('develop', '1.0')
-
- def test_three_segments(self):
- self.assert_ver_eq('2.0.1', '2.0.1')
- self.assert_ver_lt('2.0', '2.0.1')
- self.assert_ver_gt('2.0.1', '2.0')
-
- def test_alpha(self):
- # TODO: not sure whether I like this. 2.0.1a is *usually*
- # TODO: less than 2.0.1, but special-casing it makes version
- # TODO: comparison complicated. See version.py
- self.assert_ver_eq('2.0.1a', '2.0.1a')
- self.assert_ver_gt('2.0.1a', '2.0.1')
- self.assert_ver_lt('2.0.1', '2.0.1a')
-
- def test_patch(self):
- self.assert_ver_eq('5.5p1', '5.5p1')
- self.assert_ver_lt('5.5p1', '5.5p2')
- self.assert_ver_gt('5.5p2', '5.5p1')
- self.assert_ver_eq('5.5p10', '5.5p10')
- self.assert_ver_lt('5.5p1', '5.5p10')
- self.assert_ver_gt('5.5p10', '5.5p1')
-
- def test_num_alpha_with_no_separator(self):
- self.assert_ver_lt('10xyz', '10.1xyz')
- self.assert_ver_gt('10.1xyz', '10xyz')
- self.assert_ver_eq('xyz10', 'xyz10')
- self.assert_ver_lt('xyz10', 'xyz10.1')
- self.assert_ver_gt('xyz10.1', 'xyz10')
-
- def test_alpha_with_dots(self):
- self.assert_ver_eq('xyz.4', 'xyz.4')
- self.assert_ver_lt('xyz.4', '8')
- self.assert_ver_gt('8', 'xyz.4')
- self.assert_ver_lt('xyz.4', '2')
- self.assert_ver_gt('2', 'xyz.4')
-
- def test_nums_and_patch(self):
- self.assert_ver_lt('5.5p2', '5.6p1')
- self.assert_ver_gt('5.6p1', '5.5p2')
- self.assert_ver_lt('5.6p1', '6.5p1')
- self.assert_ver_gt('6.5p1', '5.6p1')
-
- def test_rc_versions(self):
- self.assert_ver_gt('6.0.rc1', '6.0')
- self.assert_ver_lt('6.0', '6.0.rc1')
-
- def test_alpha_beta(self):
- self.assert_ver_gt('10b2', '10a1')
- self.assert_ver_lt('10a2', '10b2')
-
- def test_double_alpha(self):
- self.assert_ver_eq('1.0aa', '1.0aa')
- self.assert_ver_lt('1.0a', '1.0aa')
- self.assert_ver_gt('1.0aa', '1.0a')
-
- def test_padded_numbers(self):
- self.assert_ver_eq('10.0001', '10.0001')
- self.assert_ver_eq('10.0001', '10.1')
- self.assert_ver_eq('10.1', '10.0001')
- self.assert_ver_lt('10.0001', '10.0039')
- self.assert_ver_gt('10.0039', '10.0001')
-
- def test_close_numbers(self):
- self.assert_ver_lt('4.999.9', '5.0')
- self.assert_ver_gt('5.0', '4.999.9')
-
- def test_date_stamps(self):
- self.assert_ver_eq('20101121', '20101121')
- self.assert_ver_lt('20101121', '20101122')
- self.assert_ver_gt('20101122', '20101121')
-
- def test_underscores(self):
- self.assert_ver_eq('2_0', '2_0')
- self.assert_ver_eq('2.0', '2_0')
- self.assert_ver_eq('2_0', '2.0')
-
- def test_rpm_oddities(self):
- self.assert_ver_eq('1b.fc17', '1b.fc17')
- self.assert_ver_lt('1b.fc17', '1.fc17')
- self.assert_ver_gt('1.fc17', '1b.fc17')
- self.assert_ver_eq('1g.fc17', '1g.fc17')
- self.assert_ver_gt('1g.fc17', '1.fc17')
- self.assert_ver_lt('1.fc17', '1g.fc17')
-
- # Stuff below here is not taken from RPM's tests and is
- # unique to spack
- def test_version_ranges(self):
- self.assert_ver_lt('1.2:1.4', '1.6')
- self.assert_ver_gt('1.6', '1.2:1.4')
- self.assert_ver_eq('1.2:1.4', '1.2:1.4')
- self.assertNotEqual(ver('1.2:1.4'), ver('1.2:1.6'))
-
- self.assert_ver_lt('1.2:1.4', '1.5:1.6')
- self.assert_ver_gt('1.5:1.6', '1.2:1.4')
-
- def test_contains(self):
- self.assert_in('1.3', '1.2:1.4')
- self.assert_in('1.2.5', '1.2:1.4')
- self.assert_in('1.3.5', '1.2:1.4')
- self.assert_in('1.3.5-7', '1.2:1.4')
- self.assert_not_in('1.1', '1.2:1.4')
- self.assert_not_in('1.5', '1.2:1.4')
-
- self.assert_in('1.4.2', '1.2:1.4')
- self.assert_not_in('1.4.2', '1.2:1.4.0')
-
- self.assert_in('1.2.8', '1.2.7:1.4')
- self.assert_in('1.2.7:1.4', ':')
- self.assert_not_in('1.2.5', '1.2.7:1.4')
-
- self.assert_in('1.4.1', '1.2.7:1.4')
- self.assert_not_in('1.4.1', '1.2.7:1.4.0')
-
- def test_in_list(self):
- self.assert_in('1.2', ['1.5', '1.2', '1.3'])
- self.assert_in('1.2.5', ['1.5', '1.2:1.3'])
- self.assert_in('1.5', ['1.5', '1.2:1.3'])
- self.assert_not_in('1.4', ['1.5', '1.2:1.3'])
-
- self.assert_in('1.2.5:1.2.7', [':'])
- self.assert_in('1.2.5:1.2.7', ['1.5', '1.2:1.3'])
- self.assert_not_in('1.2.5:1.5', ['1.5', '1.2:1.3'])
- self.assert_not_in('1.1:1.2.5', ['1.5', '1.2:1.3'])
-
- def test_ranges_overlap(self):
- self.assert_overlaps('1.2', '1.2')
- self.assert_overlaps('1.2.1', '1.2.1')
- self.assert_overlaps('1.2.1b', '1.2.1b')
-
- self.assert_overlaps('1.2:1.7', '1.6:1.9')
- self.assert_overlaps(':1.7', '1.6:1.9')
- self.assert_overlaps(':1.7', ':1.9')
- self.assert_overlaps(':1.7', '1.6:')
- self.assert_overlaps('1.2:', '1.6:1.9')
- self.assert_overlaps('1.2:', ':1.9')
- self.assert_overlaps('1.2:', '1.6:')
- self.assert_overlaps(':', ':')
- self.assert_overlaps(':', '1.6:1.9')
- self.assert_overlaps('1.6:1.9', ':')
-
- def test_overlap_with_containment(self):
- self.assert_in('1.6.5', '1.6')
- self.assert_in('1.6.5', ':1.6')
-
- self.assert_overlaps('1.6.5', ':1.6')
- self.assert_overlaps(':1.6', '1.6.5')
-
- self.assert_not_in(':1.6', '1.6.5')
- self.assert_in('1.6.5', ':1.6')
-
- def test_lists_overlap(self):
- self.assert_overlaps('1.2b:1.7,5', '1.6:1.9,1')
- self.assert_overlaps('1,2,3,4,5', '3,4,5,6,7')
- self.assert_overlaps('1,2,3,4,5', '5,6,7')
- self.assert_overlaps('1,2,3,4,5', '5:7')
- self.assert_overlaps('1,2,3,4,5', '3, 6:7')
- self.assert_overlaps('1, 2, 4, 6.5', '3, 6:7')
- self.assert_overlaps('1, 2, 4, 6.5', ':, 5, 8')
- self.assert_overlaps('1, 2, 4, 6.5', ':')
- self.assert_no_overlap('1, 2, 4', '3, 6:7')
- self.assert_no_overlap('1,2,3,4,5', '6,7')
- self.assert_no_overlap('1,2,3,4,5', '6:7')
-
- def test_canonicalize_list(self):
- self.assert_canonical(['1.2', '1.3', '1.4'],
- ['1.2', '1.3', '1.3', '1.4'])
-
- self.assert_canonical(['1.2', '1.3:1.4'],
- ['1.2', '1.3', '1.3:1.4'])
-
- self.assert_canonical(['1.2', '1.3:1.4'],
- ['1.2', '1.3:1.4', '1.4'])
-
- self.assert_canonical(['1.3:1.4'],
- ['1.3:1.4', '1.3', '1.3.1', '1.3.9', '1.4'])
-
- self.assert_canonical(['1.3:1.4'],
- ['1.3', '1.3.1', '1.3.9', '1.4', '1.3:1.4'])
-
- self.assert_canonical(['1.3:1.5'],
- ['1.3', '1.3.1', '1.3.9', '1.4:1.5', '1.3:1.4'])
-
- self.assert_canonical(['1.3:1.5'],
- ['1.3, 1.3.1,1.3.9,1.4:1.5,1.3:1.4'])
-
- self.assert_canonical(['1.3:1.5'],
- ['1.3, 1.3.1,1.3.9,1.4 : 1.5 , 1.3 : 1.4'])
-
- self.assert_canonical([':'],
- [':,1.3, 1.3.1,1.3.9,1.4 : 1.5 , 1.3 : 1.4'])
-
- def test_intersection(self):
- self.check_intersection('2.5',
- '1.0:2.5', '2.5:3.0')
- self.check_intersection('2.5:2.7',
- '1.0:2.7', '2.5:3.0')
- self.check_intersection('0:1', ':', '0:1')
-
- self.check_intersection(['1.0', '2.5:2.7'],
- ['1.0:2.7'], ['2.5:3.0', '1.0'])
- self.check_intersection(['2.5:2.7'],
- ['1.1:2.7'], ['2.5:3.0', '1.0'])
- self.check_intersection(['0:1'], [':'], ['0:1'])
-
- def test_intersect_with_containment(self):
- self.check_intersection('1.6.5', '1.6.5', ':1.6')
- self.check_intersection('1.6.5', ':1.6', '1.6.5')
-
- self.check_intersection('1.6:1.6.5', ':1.6.5', '1.6')
- self.check_intersection('1.6:1.6.5', '1.6', ':1.6.5')
-
- def test_union_with_containment(self):
- self.check_union(':1.6', '1.6.5', ':1.6')
- self.check_union(':1.6', ':1.6', '1.6.5')
-
- self.check_union(':1.6', ':1.6.5', '1.6')
- self.check_union(':1.6', '1.6', ':1.6.5')
-
- self.check_union(':', '1.0:', ':2.0')
-
- self.check_union('1:4', '1:3', '2:4')
- self.check_union('1:4', '2:4', '1:3')
-
- # Tests successor/predecessor case.
- self.check_union('1:4', '1:2', '3:4')
-
- def test_basic_version_satisfaction(self):
- self.assert_satisfies('4.7.3', '4.7.3')
-
- self.assert_satisfies('4.7.3', '4.7')
- self.assert_satisfies('4.7.3b2', '4.7')
- self.assert_satisfies('4.7b6', '4.7')
-
- self.assert_satisfies('4.7.3', '4')
- self.assert_satisfies('4.7.3b2', '4')
- self.assert_satisfies('4.7b6', '4')
-
- self.assert_does_not_satisfy('4.8.0', '4.9')
- self.assert_does_not_satisfy('4.8', '4.9')
- self.assert_does_not_satisfy('4', '4.9')
-
- def test_basic_version_satisfaction_in_lists(self):
- self.assert_satisfies(['4.7.3'], ['4.7.3'])
-
- self.assert_satisfies(['4.7.3'], ['4.7'])
- self.assert_satisfies(['4.7.3b2'], ['4.7'])
- self.assert_satisfies(['4.7b6'], ['4.7'])
-
- self.assert_satisfies(['4.7.3'], ['4'])
- self.assert_satisfies(['4.7.3b2'], ['4'])
- self.assert_satisfies(['4.7b6'], ['4'])
-
- self.assert_does_not_satisfy(['4.8.0'], ['4.9'])
- self.assert_does_not_satisfy(['4.8'], ['4.9'])
- self.assert_does_not_satisfy(['4'], ['4.9'])
-
- def test_version_range_satisfaction(self):
- self.assert_satisfies('4.7b6', '4.3:4.7')
- self.assert_satisfies('4.3.0', '4.3:4.7')
- self.assert_satisfies('4.3.2', '4.3:4.7')
-
- self.assert_does_not_satisfy('4.8.0', '4.3:4.7')
- self.assert_does_not_satisfy('4.3', '4.4:4.7')
-
- self.assert_satisfies('4.7b6', '4.3:4.7')
- self.assert_does_not_satisfy('4.8.0', '4.3:4.7')
-
- def test_version_range_satisfaction_in_lists(self):
- self.assert_satisfies(['4.7b6'], ['4.3:4.7'])
- self.assert_satisfies(['4.3.0'], ['4.3:4.7'])
- self.assert_satisfies(['4.3.2'], ['4.3:4.7'])
-
- self.assert_does_not_satisfy(['4.8.0'], ['4.3:4.7'])
- self.assert_does_not_satisfy(['4.3'], ['4.4:4.7'])
-
- self.assert_satisfies(['4.7b6'], ['4.3:4.7'])
- self.assert_does_not_satisfy(['4.8.0'], ['4.3:4.7'])
-
- def test_satisfaction_with_lists(self):
- self.assert_satisfies('4.7', '4.3, 4.6, 4.7')
- self.assert_satisfies('4.7.3', '4.3, 4.6, 4.7')
- self.assert_satisfies('4.6.5', '4.3, 4.6, 4.7')
- self.assert_satisfies('4.6.5.2', '4.3, 4.6, 4.7')
-
- self.assert_does_not_satisfy('4', '4.3, 4.6, 4.7')
- self.assert_does_not_satisfy('4.8.0', '4.2, 4.3:4.7')
-
- self.assert_satisfies('4.8.0', '4.2, 4.3:4.8')
- self.assert_satisfies('4.8.2', '4.2, 4.3:4.8')
-
- def test_formatted_strings(self):
- versions = '1.2.3', '1_2_3', '1-2-3'
- for item in versions:
- v = Version(item)
- self.assertEqual(v.dotted, '1.2.3')
- self.assertEqual(v.dashed, '1-2-3')
- self.assertEqual(v.underscored, '1_2_3')
- self.assertEqual(v.joined, '123')
-
- def test_repr_and_str(self):
-
- def check_repr_and_str(vrs):
- a = Version(vrs)
- self.assertEqual(repr(a), 'Version(\'' + vrs + '\')')
- b = eval(repr(a))
- self.assertEqual(a, b)
- self.assertEqual(str(a), vrs)
- self.assertEqual(str(a), str(b))
-
- check_repr_and_str('1.2.3')
- check_repr_and_str('R2016a')
- check_repr_and_str('R2016a.2-3_4')
-
- def test_get_item(self):
- a = Version('0.1_2-3')
- self.assertTrue(isinstance(a[1], int))
- # Test slicing
- b = a[0:2]
- self.assertTrue(isinstance(b, Version))
- self.assertEqual(b, Version('0.1'))
- self.assertEqual(repr(b), 'Version(\'0.1\')')
- self.assertEqual(str(b), '0.1')
- b = a[0:3]
- self.assertTrue(isinstance(b, Version))
- self.assertEqual(b, Version('0.1_2'))
- self.assertEqual(repr(b), 'Version(\'0.1_2\')')
- self.assertEqual(str(b), '0.1_2')
- b = a[1:]
- self.assertTrue(isinstance(b, Version))
- self.assertEqual(b, Version('1_2-3'))
- self.assertEqual(repr(b), 'Version(\'1_2-3\')')
- self.assertEqual(str(b), '1_2-3')
- # Raise TypeError on tuples
- self.assertRaises(TypeError, b.__getitem__, 1, 2)
-
-if __name__ == '__main__':
- unittest.main()
+def assert_ver_lt(a, b):
+ """Asserts the results of comparisons when 'a' is less than 'b'."""
+ a, b = ver(a), ver(b)
+ assert a < b
+ assert a <= b
+ assert a != b
+ assert not a == b
+ assert not a > b
+ assert not a >= b
+
+
+def assert_ver_gt(a, b):
+ """Asserts the results of comparisons when 'a' is greater than 'b'."""
+ a, b = ver(a), ver(b)
+ assert a > b
+ assert a >= b
+ assert a != b
+ assert not a == b
+ assert not a < b
+ assert not a <= b
+
+
+def assert_ver_eq(a, b):
+ """Asserts the results of comparisons when 'a' is equal to 'b'."""
+ a, b = ver(a), ver(b)
+ assert not a > b
+ assert a >= b
+ assert not a != b
+ assert a == b
+ assert not a < b
+ assert a <= b
+
+
+def assert_in(needle, haystack):
+ """Asserts that 'needle' is in 'haystack'."""
+ assert ver(needle) in ver(haystack)
+
+
+def assert_not_in(needle, haystack):
+ """Asserts that 'needle' is not in 'haystack'."""
+ assert ver(needle) not in ver(haystack)
+
+
+def assert_canonical(canonical_list, version_list):
+ """Asserts that a redundant list is reduced to canonical form."""
+ assert ver(canonical_list) == ver(version_list)
+
+
+def assert_overlaps(v1, v2):
+ """Asserts that two version ranges overlaps."""
+ assert ver(v1).overlaps(ver(v2))
+
+
+def assert_no_overlap(v1, v2):
+ """Asserts that two version ranges do not overlap."""
+ assert not ver(v1).overlaps(ver(v2))
+
+
+def assert_satisfies(v1, v2):
+ """Asserts that 'v1' satisfies 'v2'."""
+ assert ver(v1).satisfies(ver(v2))
+
+
+def assert_does_not_satisfy(v1, v2):
+ """Asserts that 'v1' does not satisfy 'v2'."""
+ assert not ver(v1).satisfies(ver(v2))
+
+
+def check_intersection(expected, a, b):
+ """Asserts that 'a' intersect 'b' == 'expected'."""
+ assert ver(expected) == ver(a).intersection(ver(b))
+
+
+def check_union(expected, a, b):
+ """Asserts that 'a' union 'b' == 'expected'."""
+ assert ver(expected) == ver(a).union(ver(b))
+
+
+def test_two_segments():
+ assert_ver_eq('1.0', '1.0')
+ assert_ver_lt('1.0', '2.0')
+ assert_ver_gt('2.0', '1.0')
+ assert_ver_eq('develop', 'develop')
+ assert_ver_lt('1.0', 'develop')
+ assert_ver_gt('develop', '1.0')
+
+
+def test_three_segments():
+ assert_ver_eq('2.0.1', '2.0.1')
+ assert_ver_lt('2.0', '2.0.1')
+ assert_ver_gt('2.0.1', '2.0')
+
+
+def test_alpha():
+ # TODO: not sure whether I like this. 2.0.1a is *usually*
+ # TODO: less than 2.0.1, but special-casing it makes version
+ # TODO: comparison complicated. See version.py
+ assert_ver_eq('2.0.1a', '2.0.1a')
+ assert_ver_gt('2.0.1a', '2.0.1')
+ assert_ver_lt('2.0.1', '2.0.1a')
+
+
+def test_patch():
+ assert_ver_eq('5.5p1', '5.5p1')
+ assert_ver_lt('5.5p1', '5.5p2')
+ assert_ver_gt('5.5p2', '5.5p1')
+ assert_ver_eq('5.5p10', '5.5p10')
+ assert_ver_lt('5.5p1', '5.5p10')
+ assert_ver_gt('5.5p10', '5.5p1')
+
+
+def test_num_alpha_with_no_separator():
+ assert_ver_lt('10xyz', '10.1xyz')
+ assert_ver_gt('10.1xyz', '10xyz')
+ assert_ver_eq('xyz10', 'xyz10')
+ assert_ver_lt('xyz10', 'xyz10.1')
+ assert_ver_gt('xyz10.1', 'xyz10')
+
+
+def test_alpha_with_dots():
+ assert_ver_eq('xyz.4', 'xyz.4')
+ assert_ver_lt('xyz.4', '8')
+ assert_ver_gt('8', 'xyz.4')
+ assert_ver_lt('xyz.4', '2')
+ assert_ver_gt('2', 'xyz.4')
+
+
+def test_nums_and_patch():
+ assert_ver_lt('5.5p2', '5.6p1')
+ assert_ver_gt('5.6p1', '5.5p2')
+ assert_ver_lt('5.6p1', '6.5p1')
+ assert_ver_gt('6.5p1', '5.6p1')
+
+
+def test_rc_versions():
+ assert_ver_gt('6.0.rc1', '6.0')
+ assert_ver_lt('6.0', '6.0.rc1')
+
+
+def test_alpha_beta():
+ assert_ver_gt('10b2', '10a1')
+ assert_ver_lt('10a2', '10b2')
+
+
+def test_double_alpha():
+ assert_ver_eq('1.0aa', '1.0aa')
+ assert_ver_lt('1.0a', '1.0aa')
+ assert_ver_gt('1.0aa', '1.0a')
+
+
+def test_padded_numbers():
+ assert_ver_eq('10.0001', '10.0001')
+ assert_ver_eq('10.0001', '10.1')
+ assert_ver_eq('10.1', '10.0001')
+ assert_ver_lt('10.0001', '10.0039')
+ assert_ver_gt('10.0039', '10.0001')
+
+
+def test_close_numbers():
+ assert_ver_lt('4.999.9', '5.0')
+ assert_ver_gt('5.0', '4.999.9')
+
+
+def test_date_stamps():
+ assert_ver_eq('20101121', '20101121')
+ assert_ver_lt('20101121', '20101122')
+ assert_ver_gt('20101122', '20101121')
+
+
+def test_underscores():
+ assert_ver_eq('2_0', '2_0')
+ assert_ver_eq('2.0', '2_0')
+ assert_ver_eq('2_0', '2.0')
+
+
+def test_rpm_oddities():
+ assert_ver_eq('1b.fc17', '1b.fc17')
+ assert_ver_lt('1b.fc17', '1.fc17')
+ assert_ver_gt('1.fc17', '1b.fc17')
+ assert_ver_eq('1g.fc17', '1g.fc17')
+ assert_ver_gt('1g.fc17', '1.fc17')
+ assert_ver_lt('1.fc17', '1g.fc17')
+
+
+# Stuff below here is not taken from RPM's tests and is
+# unique to spack
+def test_version_ranges():
+ assert_ver_lt('1.2:1.4', '1.6')
+ assert_ver_gt('1.6', '1.2:1.4')
+ assert_ver_eq('1.2:1.4', '1.2:1.4')
+ assert ver('1.2:1.4') != ver('1.2:1.6')
+
+ assert_ver_lt('1.2:1.4', '1.5:1.6')
+ assert_ver_gt('1.5:1.6', '1.2:1.4')
+
+
+def test_contains():
+ assert_in('1.3', '1.2:1.4')
+ assert_in('1.2.5', '1.2:1.4')
+ assert_in('1.3.5', '1.2:1.4')
+ assert_in('1.3.5-7', '1.2:1.4')
+ assert_not_in('1.1', '1.2:1.4')
+ assert_not_in('1.5', '1.2:1.4')
+
+ assert_in('1.4.2', '1.2:1.4')
+ assert_not_in('1.4.2', '1.2:1.4.0')
+
+ assert_in('1.2.8', '1.2.7:1.4')
+ assert_in('1.2.7:1.4', ':')
+ assert_not_in('1.2.5', '1.2.7:1.4')
+
+ assert_in('1.4.1', '1.2.7:1.4')
+ assert_not_in('1.4.1', '1.2.7:1.4.0')
+
+
+def test_in_list():
+ assert_in('1.2', ['1.5', '1.2', '1.3'])
+ assert_in('1.2.5', ['1.5', '1.2:1.3'])
+ assert_in('1.5', ['1.5', '1.2:1.3'])
+ assert_not_in('1.4', ['1.5', '1.2:1.3'])
+
+ assert_in('1.2.5:1.2.7', [':'])
+ assert_in('1.2.5:1.2.7', ['1.5', '1.2:1.3'])
+ assert_not_in('1.2.5:1.5', ['1.5', '1.2:1.3'])
+ assert_not_in('1.1:1.2.5', ['1.5', '1.2:1.3'])
+
+
+def test_ranges_overlap():
+ assert_overlaps('1.2', '1.2')
+ assert_overlaps('1.2.1', '1.2.1')
+ assert_overlaps('1.2.1b', '1.2.1b')
+
+ assert_overlaps('1.2:1.7', '1.6:1.9')
+ assert_overlaps(':1.7', '1.6:1.9')
+ assert_overlaps(':1.7', ':1.9')
+ assert_overlaps(':1.7', '1.6:')
+ assert_overlaps('1.2:', '1.6:1.9')
+ assert_overlaps('1.2:', ':1.9')
+ assert_overlaps('1.2:', '1.6:')
+ assert_overlaps(':', ':')
+ assert_overlaps(':', '1.6:1.9')
+ assert_overlaps('1.6:1.9', ':')
+
+
+def test_overlap_with_containment():
+ assert_in('1.6.5', '1.6')
+ assert_in('1.6.5', ':1.6')
+
+ assert_overlaps('1.6.5', ':1.6')
+ assert_overlaps(':1.6', '1.6.5')
+
+ assert_not_in(':1.6', '1.6.5')
+ assert_in('1.6.5', ':1.6')
+
+
+def test_lists_overlap():
+ assert_overlaps('1.2b:1.7,5', '1.6:1.9,1')
+ assert_overlaps('1,2,3,4,5', '3,4,5,6,7')
+ assert_overlaps('1,2,3,4,5', '5,6,7')
+ assert_overlaps('1,2,3,4,5', '5:7')
+ assert_overlaps('1,2,3,4,5', '3, 6:7')
+ assert_overlaps('1, 2, 4, 6.5', '3, 6:7')
+ assert_overlaps('1, 2, 4, 6.5', ':, 5, 8')
+ assert_overlaps('1, 2, 4, 6.5', ':')
+ assert_no_overlap('1, 2, 4', '3, 6:7')
+ assert_no_overlap('1,2,3,4,5', '6,7')
+ assert_no_overlap('1,2,3,4,5', '6:7')
+
+
+def test_canonicalize_list():
+ assert_canonical(['1.2', '1.3', '1.4'], ['1.2', '1.3', '1.3', '1.4'])
+
+ assert_canonical(['1.2', '1.3:1.4'], ['1.2', '1.3', '1.3:1.4'])
+
+ assert_canonical(['1.2', '1.3:1.4'], ['1.2', '1.3:1.4', '1.4'])
+
+ assert_canonical(['1.3:1.4'], ['1.3:1.4', '1.3', '1.3.1', '1.3.9', '1.4'])
+
+ assert_canonical(['1.3:1.4'], ['1.3', '1.3.1', '1.3.9', '1.4', '1.3:1.4'])
+
+ assert_canonical(
+ ['1.3:1.5'], ['1.3', '1.3.1', '1.3.9', '1.4:1.5', '1.3:1.4']
+ )
+
+ assert_canonical(['1.3:1.5'], ['1.3, 1.3.1,1.3.9,1.4:1.5,1.3:1.4'])
+
+ assert_canonical(['1.3:1.5'], ['1.3, 1.3.1,1.3.9,1.4 : 1.5 , 1.3 : 1.4'])
+
+ assert_canonical([':'], [':,1.3, 1.3.1,1.3.9,1.4 : 1.5 , 1.3 : 1.4'])
+
+
+def test_intersection():
+ check_intersection('2.5', '1.0:2.5', '2.5:3.0')
+ check_intersection('2.5:2.7', '1.0:2.7', '2.5:3.0')
+ check_intersection('0:1', ':', '0:1')
+
+ check_intersection(['1.0', '2.5:2.7'], ['1.0:2.7'], ['2.5:3.0', '1.0'])
+ check_intersection(['2.5:2.7'], ['1.1:2.7'], ['2.5:3.0', '1.0'])
+ check_intersection(['0:1'], [':'], ['0:1'])
+
+
+def test_intersect_with_containment():
+ check_intersection('1.6.5', '1.6.5', ':1.6')
+ check_intersection('1.6.5', ':1.6', '1.6.5')
+
+ check_intersection('1.6:1.6.5', ':1.6.5', '1.6')
+ check_intersection('1.6:1.6.5', '1.6', ':1.6.5')
+
+
+def test_union_with_containment():
+ check_union(':1.6', '1.6.5', ':1.6')
+ check_union(':1.6', ':1.6', '1.6.5')
+
+ check_union(':1.6', ':1.6.5', '1.6')
+ check_union(':1.6', '1.6', ':1.6.5')
+
+ check_union(':', '1.0:', ':2.0')
+
+ check_union('1:4', '1:3', '2:4')
+ check_union('1:4', '2:4', '1:3')
+
+ # Tests successor/predecessor case.
+ check_union('1:4', '1:2', '3:4')
+
+
+def test_basic_version_satisfaction():
+ assert_satisfies('4.7.3', '4.7.3')
+
+ assert_satisfies('4.7.3', '4.7')
+ assert_satisfies('4.7.3b2', '4.7')
+ assert_satisfies('4.7b6', '4.7')
+
+ assert_satisfies('4.7.3', '4')
+ assert_satisfies('4.7.3b2', '4')
+ assert_satisfies('4.7b6', '4')
+
+ assert_does_not_satisfy('4.8.0', '4.9')
+ assert_does_not_satisfy('4.8', '4.9')
+ assert_does_not_satisfy('4', '4.9')
+
+
+def test_basic_version_satisfaction_in_lists():
+ assert_satisfies(['4.7.3'], ['4.7.3'])
+
+ assert_satisfies(['4.7.3'], ['4.7'])
+ assert_satisfies(['4.7.3b2'], ['4.7'])
+ assert_satisfies(['4.7b6'], ['4.7'])
+
+ assert_satisfies(['4.7.3'], ['4'])
+ assert_satisfies(['4.7.3b2'], ['4'])
+ assert_satisfies(['4.7b6'], ['4'])
+
+ assert_does_not_satisfy(['4.8.0'], ['4.9'])
+ assert_does_not_satisfy(['4.8'], ['4.9'])
+ assert_does_not_satisfy(['4'], ['4.9'])
+
+
+def test_version_range_satisfaction():
+ assert_satisfies('4.7b6', '4.3:4.7')
+ assert_satisfies('4.3.0', '4.3:4.7')
+ assert_satisfies('4.3.2', '4.3:4.7')
+
+ assert_does_not_satisfy('4.8.0', '4.3:4.7')
+ assert_does_not_satisfy('4.3', '4.4:4.7')
+
+ assert_satisfies('4.7b6', '4.3:4.7')
+ assert_does_not_satisfy('4.8.0', '4.3:4.7')
+
+
+def test_version_range_satisfaction_in_lists():
+ assert_satisfies(['4.7b6'], ['4.3:4.7'])
+ assert_satisfies(['4.3.0'], ['4.3:4.7'])
+ assert_satisfies(['4.3.2'], ['4.3:4.7'])
+
+ assert_does_not_satisfy(['4.8.0'], ['4.3:4.7'])
+ assert_does_not_satisfy(['4.3'], ['4.4:4.7'])
+
+ assert_satisfies(['4.7b6'], ['4.3:4.7'])
+ assert_does_not_satisfy(['4.8.0'], ['4.3:4.7'])
+
+
+def test_satisfaction_with_lists():
+ assert_satisfies('4.7', '4.3, 4.6, 4.7')
+ assert_satisfies('4.7.3', '4.3, 4.6, 4.7')
+ assert_satisfies('4.6.5', '4.3, 4.6, 4.7')
+ assert_satisfies('4.6.5.2', '4.3, 4.6, 4.7')
+
+ assert_does_not_satisfy('4', '4.3, 4.6, 4.7')
+ assert_does_not_satisfy('4.8.0', '4.2, 4.3:4.7')
+
+ assert_satisfies('4.8.0', '4.2, 4.3:4.8')
+ assert_satisfies('4.8.2', '4.2, 4.3:4.8')
+
+
+def test_formatted_strings():
+ versions = '1.2.3', '1_2_3', '1-2-3'
+ for item in versions:
+ v = Version(item)
+ assert v.dotted == '1.2.3'
+ assert v.dashed == '1-2-3'
+ assert v.underscored == '1_2_3'
+ assert v.joined == '123'
+
+
+def test_repr_and_str():
+
+ def check_repr_and_str(vrs):
+ a = Version(vrs)
+ assert repr(a) == 'Version(\'' + vrs + '\')'
+ b = eval(repr(a))
+ assert a == b
+ assert str(a) == vrs
+ assert str(a) == str(b)
+
+ check_repr_and_str('1.2.3')
+ check_repr_and_str('R2016a')
+ check_repr_and_str('R2016a.2-3_4')
+
+
+def test_get_item():
+ a = Version('0.1_2-3')
+ assert isinstance(a[1], int)
+ # Test slicing
+ b = a[0:2]
+ assert isinstance(b, Version)
+ assert b == Version('0.1')
+ assert repr(b) == 'Version(\'0.1\')'
+ assert str(b) == '0.1'
+ b = a[0:3]
+ assert isinstance(b, Version)
+ assert b == Version('0.1_2')
+ assert repr(b) == 'Version(\'0.1_2\')'
+ assert str(b) == '0.1_2'
+ b = a[1:]
+ assert isinstance(b, Version)
+ assert b == Version('1_2-3')
+ assert repr(b) == 'Version(\'1_2-3\')'
+ assert str(b) == '1_2-3'
+ # Raise TypeError on tuples
+ with pytest.raises(TypeError):
+ b.__getitem__(1, 2)
diff --git a/lib/spack/spack/test/web.py b/lib/spack/spack/test/web.py
new file mode 100644
index 0000000000..9fa95a8d18
--- /dev/null
+++ b/lib/spack/spack/test/web.py
@@ -0,0 +1,162 @@
+##############################################################################
+# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License (as
+# published by the Free Software Foundation) version 2.1, February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""Tests for web.py."""
+import os
+
+import spack
+from spack.util.web import spider, find_versions_of_archive
+from spack.version import *
+
+
+web_data_path = os.path.join(spack.test_path, 'data', 'web')
+
+root = 'file://' + web_data_path + '/index.html'
+root_tarball = 'file://' + web_data_path + '/foo-0.0.0.tar.gz'
+
+page_1 = 'file://' + os.path.join(web_data_path, '1.html')
+page_2 = 'file://' + os.path.join(web_data_path, '2.html')
+page_3 = 'file://' + os.path.join(web_data_path, '3.html')
+page_4 = 'file://' + os.path.join(web_data_path, '4.html')
+
+
+def test_spider_0():
+ pages, links = spider(root, depth=0)
+
+ assert root in pages
+ assert page_1 not in pages
+ assert page_2 not in pages
+ assert page_3 not in pages
+ assert page_4 not in pages
+
+ assert "This is the root page." in pages[root]
+
+ assert root not in links
+ assert page_1 in links
+ assert page_2 not in links
+ assert page_3 not in links
+ assert page_4 not in links
+
+
+def test_spider_1():
+ pages, links = spider(root, depth=1)
+
+ assert root in pages
+ assert page_1 in pages
+ assert page_2 not in pages
+ assert page_3 not in pages
+ assert page_4 not in pages
+
+ assert "This is the root page." in pages[root]
+ assert "This is page 1." in pages[page_1]
+
+ assert root not in links
+ assert page_1 in links
+ assert page_2 in links
+ assert page_3 not in links
+ assert page_4 not in links
+
+
+def test_spider_2():
+ pages, links = spider(root, depth=2)
+
+ assert root in pages
+ assert page_1 in pages
+ assert page_2 in pages
+ assert page_3 not in pages
+ assert page_4 not in pages
+
+ assert "This is the root page." in pages[root]
+ assert "This is page 1." in pages[page_1]
+ assert "This is page 2." in pages[page_2]
+
+ assert root not in links
+ assert page_1 in links
+ assert page_1 in links
+ assert page_2 in links
+ assert page_3 in links
+ assert page_4 in links
+
+
+def test_spider_3():
+ pages, links = spider(root, depth=3)
+
+ assert root in pages
+ assert page_1 in pages
+ assert page_2 in pages
+ assert page_3 in pages
+ assert page_4 in pages
+
+ assert "This is the root page." in pages[root]
+ assert "This is page 1." in pages[page_1]
+ assert "This is page 2." in pages[page_2]
+ assert "This is page 3." in pages[page_3]
+ assert "This is page 4." in pages[page_4]
+
+ assert root in links # circular link on page 3
+ assert page_1 in links
+ assert page_1 in links
+ assert page_2 in links
+ assert page_3 in links
+ assert page_4 in links
+
+
+def test_find_versions_of_archive_0():
+ versions = find_versions_of_archive(root_tarball, root, list_depth=0)
+ assert ver('0.0.0') in versions
+
+
+def test_find_versions_of_archive_1():
+ versions = find_versions_of_archive(root_tarball, root, list_depth=1)
+ assert ver('0.0.0') in versions
+ assert ver('1.0.0') in versions
+
+
+def test_find_versions_of_archive_2():
+ versions = find_versions_of_archive(root_tarball, root, list_depth=2)
+ assert ver('0.0.0') in versions
+ assert ver('1.0.0') in versions
+ assert ver('2.0.0') in versions
+
+
+def test_find_exotic_versions_of_archive_2():
+ versions = find_versions_of_archive(root_tarball, root, list_depth=2)
+ # up for grabs to make this better.
+ assert ver('2.0.0b2') in versions
+
+
+def test_find_versions_of_archive_3():
+ versions = find_versions_of_archive(root_tarball, root, list_depth=3)
+ assert ver('0.0.0') in versions
+ assert ver('1.0.0') in versions
+ assert ver('2.0.0') in versions
+ assert ver('3.0') in versions
+ assert ver('4.5') in versions
+
+
+def test_find_exotic_versions_of_archive_3():
+ versions = find_versions_of_archive(root_tarball, root, list_depth=3)
+ assert ver('2.0.0b2') in versions
+ assert ver('3.0a1') in versions
+ assert ver('4.5-rc5') in versions
diff --git a/lib/spack/spack/url.py b/lib/spack/spack/url.py
index 65f8e12e58..174f7d0b3c 100644
--- a/lib/spack/spack/url.py
+++ b/lib/spack/spack/url.py
@@ -46,8 +46,8 @@ it's never been told about that version before.
"""
import os
import re
-from StringIO import StringIO
-from urlparse import urlsplit, urlunsplit
+from six import StringIO
+from six.moves.urllib.parse import urlsplit, urlunsplit
import llnl.util.tty as tty
from llnl.util.tty.color import *
@@ -71,7 +71,7 @@ def find_list_url(url):
url_types = [
# e.g. https://github.com/llnl/callpath/archive/v1.0.1.tar.gz
- (r'^(https://github.com/[^/]+/[^/]+)/archive/',
+ (r'(.*github\.com/[^/]+/[^/]+)/archive/',
lambda m: m.group(1) + '/releases')]
for pattern, fun in url_types:
@@ -101,6 +101,177 @@ def strip_query_and_fragment(path):
return (path, '') # Ignore URL parse errors here
+def strip_version_suffixes(path):
+ """Some tarballs contain extraneous information after the version:
+
+ * ``bowtie2-2.2.5-source``
+ * ``libevent-2.0.21-stable``
+ * ``cuda_8.0.44_linux.run``
+
+ These strings are not part of the version number and should be ignored.
+ This function strips those suffixes off and returns the remaining string.
+ The goal is that the version is always the last thing in ``path``:
+
+ * ``bowtie2-2.2.5``
+ * ``libevent-2.0.21``
+ * ``cuda_8.0.44``
+
+ :param str path: The filename or URL for the package
+ :return: The ``path`` with any extraneous suffixes removed
+ :rtype: str
+ """
+ # NOTE: This could be done with complicated regexes in parse_version_offset
+ # NOTE: The problem is that we would have to add these regexes to the end
+ # NOTE: of every single version regex. Easier to just strip them off
+ # NOTE: permanently
+
+ suffix_regexes = [
+ # Download type
+ '[Ii]nstall',
+ 'all',
+ 'src(_0)?',
+ '[Ss]ources?',
+ 'file',
+ 'full',
+ 'single',
+ 'public',
+ 'with[a-zA-Z_-]+',
+ 'bin',
+ 'binary',
+ 'run',
+ '[Uu]niversal',
+ 'jar',
+ 'complete',
+ 'oss',
+ 'gem',
+ 'tar',
+ 'sh',
+
+ # Download version
+ 'stable',
+ '[Ff]inal',
+ 'rel',
+ 'orig',
+ 'dist',
+ '\+',
+
+ # License
+ 'gpl',
+
+ # Arch
+ # Needs to come before and after OS, appears in both orders
+ 'ia32',
+ 'intel',
+ 'amd64',
+ 'x64',
+ 'x86_64',
+ 'x86',
+ 'i[36]86',
+ 'ppc64(le)?',
+ 'armv?(7l|6l|64)',
+
+ # OS
+ '[Ll]inux(_64)?',
+ '[Uu]ni?x',
+ '[Ss]un[Oo][Ss]',
+ '[Mm]ac[Oo][Ss][Xx]?',
+ '[Oo][Ss][Xx]',
+ '[Dd]arwin(64)?',
+ '[Aa]pple',
+ '[Ww]indows',
+ '[Ww]in(64|32)?',
+ '[Cc]ygwin(64|32)?',
+ '[Mm]ingw',
+
+ # Arch
+ # Needs to come before and after OS, appears in both orders
+ 'ia32',
+ 'intel',
+ 'amd64',
+ 'x64',
+ 'x86_64',
+ 'x86',
+ 'i[36]86',
+ 'ppc64(le)?',
+ 'armv?(7l|6l|64)?',
+
+ # PyPI
+ '[._-]py[23].*\.whl',
+ '[._-]cp[23].*\.whl',
+ '[._-]win.*\.exe',
+ ]
+
+ for regex in suffix_regexes:
+ # Remove the suffix from the end of the path
+ # This may be done multiple times
+ path = re.sub(r'[._-]?' + regex + '$', '', path)
+
+ return path
+
+
+def strip_name_suffixes(path, version):
+ """Most tarballs contain a package name followed by a version number.
+ However, some also contain extraneous information in-between the name
+ and version:
+
+ * ``rgb-1.0.6``
+ * ``converge_install_2.3.16``
+ * ``jpegsrc.v9b``
+
+ These strings are not part of the package name and should be ignored.
+ This function strips the version number and any extraneous suffixes
+ off and returns the remaining string. The goal is that the name is
+ always the last thing in ``path``:
+
+ * ``rgb``
+ * ``converge``
+ * ``jpeg``
+
+ :param str path: The filename or URL for the package
+ :param str version: The version detected for this URL
+ :return: The ``path`` with any extraneous suffixes removed
+ :rtype: str
+ """
+ # NOTE: This could be done with complicated regexes in parse_name_offset
+ # NOTE: The problem is that we would have to add these regexes to every
+ # NOTE: single name regex. Easier to just strip them off permanently
+
+ suffix_regexes = [
+ # Strip off the version and anything after it
+
+ # name-ver
+ # name_ver
+ # name.ver
+ r'[._-]v?' + str(version) + '.*',
+
+ # namever
+ str(version) + '.*',
+
+ # Download type
+ 'install',
+ 'src',
+ '(open)?[Ss]ources?',
+ '[._-]std',
+
+ # Download version
+ 'snapshot',
+ 'distrib',
+
+ # VCS
+ '0\+bzr',
+
+ # License
+ 'gpl',
+ ]
+
+ for regex in suffix_regexes:
+ # Remove the suffix from the end of the path
+ # This may be done multiple times
+ path = re.sub('[._-]?' + regex + '$', '', path)
+
+ return path
+
+
def split_url_extension(path):
"""Some URLs have a query string, e.g.:
@@ -125,7 +296,7 @@ def split_url_extension(path):
prefix, ext, suffix = path, '', ''
# Strip off sourceforge download suffix.
- match = re.search(r'((?:sourceforge.net|sf.net)/.*)(/download)$', path)
+ match = re.search(r'((?:sourceforge\.net|sf\.net)/.*)(/download)$', path)
if match:
prefix, suffix = match.groups()
@@ -189,8 +360,20 @@ def parse_version_offset(path):
path, ext, suffix = split_url_extension(path)
# stem: Everything from path after the final '/'
- stem = os.path.basename(path)
- offset = len(path) - len(stem)
+ original_stem = os.path.basename(path)
+
+ # Try to strip off anything after the version number
+ stem = strip_version_suffixes(original_stem)
+
+ # Assumptions:
+ #
+ # 1. version always comes after the name
+ # 2. separators include '-', '_', and '.'
+ # 3. names can contain A-Z, a-z, 0-9, '+', separators
+ # 4. versions can contain A-Z, a-z, 0-9, separators
+ # 5. versions always start with a digit
+ # 6. versions are often prefixed by a 'v' character
+ # 7. separators are most reliable to determine name/version boundaries
# List of the following format:
#
@@ -202,87 +385,118 @@ def parse_version_offset(path):
# The first regex that matches string will be used to determine
# the version of the package. Thefore, hyperspecific regexes should
# come first while generic, catch-all regexes should come last.
+ # With that said, regular expressions are slow, so if possible, put
+ # ones that only catch one or two URLs at the bottom.
version_regexes = [
- # GitHub tarballs, e.g. v1.2.3
- (r'github.com/.+/(?:zip|tar)ball/v?((\d+\.)+\d+)$', path),
+ # 1st Pass: Simplest case
+ # Assume name contains no digits and version contains no letters
+ # e.g. libpng-1.6.27
+ (r'^[a-zA-Z+._-]+[._-]v?(\d[\d._-]*)$', stem),
- # e.g. https://github.com/sam-github/libnet/tarball/libnet-1.1.4
- (r'github.com/.+/(?:zip|tar)ball/.*-((\d+\.)+\d+)$', path),
+ # 2nd Pass: Version only
+ # Assume version contains no letters
- # e.g. https://github.com/isaacs/npm/tarball/v0.2.5-1
- (r'github.com/.+/(?:zip|tar)ball/v?((\d+\.)+\d+-(\d+))$', path),
+ # ver
+ # e.g. 3.2.7, 7.0.2-7, v3.3.0, v1_6_3
+ (r'^v?(\d[\d._-]*)$', stem),
- # e.g. https://github.com/petdance/ack/tarball/1.93_02
- (r'github.com/.+/(?:zip|tar)ball/v?((\d+\.)+\d+_(\d+))$', path),
+ # 3rd Pass: No separator characters are used
+ # Assume name contains no digits
- # Yorick is very special.
- # e.g. https://github.com/dhmunro/yorick/archive/y_2_2_04.tar.gz
- (r'github.com/[^/]+/yorick/archive/y_(\d+(?:_\d+)*)$', path),
+ # namever
+ # e.g. turbolinux702, nauty26r7
+ (r'^[a-zA-Z+]*(\d[\da-zA-Z]*)$', stem),
- # e.g. https://github.com/hpc/lwgrp/archive/v1.0.1.tar.gz
- (r'github.com/[^/]+/[^/]+/archive/(?:release-)?v?(\w+(?:[.-]\w+)*)$', path), # noqa
+ # 4th Pass: A single separator character is used
+ # Assume name contains no digits
- # e.g. https://github.com/erlang/otp/tarball/OTP_R15B01 (erlang style)
- (r'[-_](R\d+[AB]\d*(-\d+)?)', path),
+ # name-name-ver-ver
+ # e.g. panda-2016-03-07, gts-snapshot-121130, cdd-061a
+ (r'^[a-zA-Z+-]*(\d[\da-zA-Z-]*)$', stem),
- # e.g., https://github.com/hpc/libcircle/releases/download/0.2.1-rc.1/libcircle-0.2.1-rc.1.tar.gz
- # e.g.,
- # https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz
- (r'github.com/[^/]+/[^/]+/releases/download/v?([^/]+)/.*$', path),
+ # name_name_ver_ver
+ # e.g. tinyxml_2_6_2, boost_1_55_0, tbb2017_20161128, v1_6_3
+ (r'^[a-zA-Z+_]*(\d[\da-zA-Z_]*)$', stem),
- # GitLab syntax:
- # {baseUrl}{/organization}{/projectName}/repository/archive.{fileEnding}?ref={gitTag}
- # as with github releases, we hope a version can be found in the
- # git tag
- # Search dotted versions:
- # e.g., https://gitlab.kitware.com/vtk/vtk/repository/archive.tar.bz2?ref=v7.0.0
- # e.g., https://example.com/org/repo/repository/archive.tar.bz2?ref=SomePrefix-2.1.1
- # e.g., http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1
- (r'\?ref=(?:.*-|v)*((\d+\.)+\d+).*$', suffix),
- (r'\?version=((\d+\.)+\d+)', suffix),
+ # name.name.ver.ver
+ # e.g. prank.source.150803, jpegsrc.v9b, atlas3.11.34, geant4.10.01.p03
+ (r'^[a-zA-Z+.]*(\d[\da-zA-Z.]*)$', stem),
- # e.g. boost_1_39_0
- (r'((\d+_)+\d+)$', stem),
+ # 5th Pass: Two separator characters are used
+ # Name may contain digits, version may contain letters
- # e.g. foobar-4.5.1-1
- # e.g. ruby-1.9.1-p243
- (r'-((\d+\.)*\d\.\d+-(p|rc|RC)?\d+)(?:[-._](?:bin|dist|stable|src|sources))?$', stem), # noqa
+ # name-name-ver.ver
+ # e.g. m4-1.4.17, gmp-6.0.0a, launchmon-v1.0.2
+ (r'^[a-zA-Z\d+-]+-v?(\d[\da-zA-Z.]*)$', stem),
- # e.g. lame-398-1
- (r'-((\d)+-\d)', stem),
+ # name-name-ver_ver
+ # e.g. icu4c-57_1
+ (r'^[a-zA-Z\d+-]+-v?(\d[\da-zA-Z_]*)$', stem),
- # e.g. foobar_1.2-3 or 3.98-1.4
- (r'_((\d+\.)+\d+(-(\d+(\.\d+)?))?[a-z]?)', stem),
+ # name_name_ver.ver
+ # e.g. superlu_dist_4.1, pexsi_v0.9.0
+ (r'^[a-zA-Z\d+_]+_v?(\d[\da-zA-Z.]*)$', stem),
- # e.g. foobar-4.5.1
- (r'-((\d+\.)*\d+)$', stem),
+ # name_name.ver.ver
+ # e.g. fer_source.v696
+ (r'^[a-zA-Z\d+_]+\.v?(\d[\da-zA-Z.]*)$', stem),
- # e.g. foobar-4.5.1b, foobar4.5RC, foobar.v4.5.1b
- (r'[-._]?v?((\d+\.)*\d+[-._]?([a-z]|rc|RC|tp|TP?)\d*)$', stem),
+ # name-name-ver.ver-ver.ver
+ # e.g. sowing-1.1.23-p1, bib2xhtml-v3.0-15-gf506, 4.6.3-alpha04
+ (r'^(?:[a-zA-Z\d+-]+-)?v?(\d[\da-zA-Z.-]*)$', stem),
- # e.g. foobar-4.5.0-beta1, or foobar-4.50-beta
- (r'-((\d+\.)*\d+-beta(\d+)?)$', stem),
+ # namever.ver-ver.ver
+ # e.g. go1.4-bootstrap-20161024
+ (r'^[a-zA-Z+]+v?(\d[\da-zA-Z.-]*)$', stem),
- # e.g. foobar4.5.1
- (r'((\d+\.)*\d+)$', stem),
+ # 6th Pass: All three separator characters are used
+ # Name may contain digits, version may contain letters
- # e.g. foobar-4.5.0-bin
- (r'-((\d+\.)+\d+[a-z]?)[-._](bin|dist|stable|src|sources?)$', stem),
+ # name_name-ver.ver
+ # e.g. the_silver_searcher-0.32.0, sphinx_rtd_theme-0.1.10a0
+ (r'^[a-zA-Z\d+_]+-v?(\d[\da-zA-Z.]*)$', stem),
- # e.g. dash_0.5.5.1.orig.tar.gz (Debian style)
- (r'_((\d+\.)+\d+[a-z]?)[.]orig$', stem),
+ # name.name_ver.ver-ver.ver
+ # e.g. TH.data_1.0-8, XML_3.98-1.4
+ (r'^[a-zA-Z\d+.]+_v?(\d[\da-zA-Z.-]*)$', stem),
- # e.g. http://www.openssl.org/source/openssl-0.9.8s.tar.gz
- (r'-v?([^-]+(-alpha|-beta)?)', stem),
+ # name-name-ver.ver_ver.ver
+ # e.g. pypar-2.1.5_108
+ (r'^[a-zA-Z\d+-]+-v?(\d[\da-zA-Z._]*)$', stem),
- # e.g. astyle_1.23_macosx.tar.gz
- (r'_([^_]+(_alpha|_beta)?)', stem),
+ # name.name_name-ver.ver
+ # e.g. tap.py-1.6, backports.ssl_match_hostname-3.5.0.1
+ (r'^[a-zA-Z\d+._]+-v?(\d[\da-zA-Z.]*)$', stem),
- # e.g. http://mirrors.jenkins-ci.org/war/1.486/jenkins.war
- (r'\/(\d\.\d+)\/', path),
+ # name-namever.ver_ver.ver
+ # e.g. STAR-CCM+11.06.010_02
+ (r'^[a-zA-Z+-]+(\d[\da-zA-Z._]*)$', stem),
- # e.g. http://www.ijg.org/files/jpegsrc.v8d.tar.gz
- (r'\.v(\d+[a-z]?)', stem)
+ # 7th Pass: Specific VCS
+
+ # bazaar
+ # e.g. libvterm-0+bzr681
+ (r'bzr(\d[\da-zA-Z._-]*)$', stem),
+
+ # 8th Pass: Version in path
+
+ # github.com/repo/name/releases/download/vver/name
+ # e.g. https://github.com/nextflow-io/nextflow/releases/download/v0.20.1/nextflow
+ (r'github\.com/[^/]+/[^/]+/releases/download/[a-zA-Z+._-]*v?(\d[\da-zA-Z._-]*)/', path), # noqa
+
+ # 9th Pass: Query strings
+
+ # e.g. http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0
+ (r'\?ref=[a-zA-Z+._-]*v?(\d[\da-zA-Z._-]*)$', suffix),
+
+ # e.g. http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1
+ (r'\?version=v?(\d[\da-zA-Z._-]*)$', suffix),
+
+ # e.g. http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz
+ (r'\?filename=[a-zA-Z\d+-]+-v?(\d[\da-zA-Z.]*)$', stem),
+
+ # e.g. http://wwwpub.zih.tu-dresden.de/%7Emlieber/dcount/dcount.php?package=otf&get=OTF-1.12.5salmon.tar.gz
+ (r'\?package=[a-zA-Z\d+-]+&get=[a-zA-Z\d+-]+-v?(\d[\da-zA-Z.]*)$', stem), # noqa
]
for i, version_regex in enumerate(version_regexes):
@@ -292,9 +506,15 @@ def parse_version_offset(path):
version = match.group(1)
start = match.start(1)
- # if we matched from the basename, then add offset in.
+ # If we matched from the stem or suffix, we need to add offset
+ offset = 0
if match_string is stem:
- start += offset
+ offset = len(path) - len(original_stem)
+ elif match_string is suffix:
+ offset = len(path)
+ if ext:
+ offset += len(ext) + 1 # .tar.gz is converted to tar.gz
+ start += offset
return version, start, len(version), i, regex
@@ -342,7 +562,7 @@ def parse_name_offset(path, v=None):
except UndetectableVersionError:
# Not all URLs contain a version. We still want to be able
# to determine a name if possible.
- v = ''
+ v = 'unknown'
# path: The prefix of the URL, everything before the ext and suffix
# ext: The file extension
@@ -350,8 +570,10 @@ def parse_name_offset(path, v=None):
path, ext, suffix = split_url_extension(path)
# stem: Everything from path after the final '/'
- stem = os.path.basename(path)
- offset = len(path) - len(stem)
+ original_stem = os.path.basename(path)
+
+ # Try to strip off anything after the package name
+ stem = strip_name_suffixes(original_stem, v)
# List of the following format:
#
@@ -363,25 +585,45 @@ def parse_name_offset(path, v=None):
# The first regex that matches string will be used to determine
# the name of the package. Thefore, hyperspecific regexes should
# come first while generic, catch-all regexes should come last.
+ # With that said, regular expressions are slow, so if possible, put
+ # ones that only catch one or two URLs at the bottom.
name_regexes = [
- (r'/sourceforge/([^/]+)/', path),
- (r'github.com/[^/]+/[^/]+/releases/download/%s/(.*)-%s$' %
- (v, v), path),
- (r'/([^/]+)/(tarball|zipball)/', path),
- (r'/([^/]+)[_.-](bin|dist|stable|src|sources)[_.-]%s' % v, path),
- (r'github.com/[^/]+/([^/]+)/archive', path),
- (r'[^/]+/([^/]+)/repository/archive', path), # gitlab
- (r'([^/]+)/download.php', path),
-
- (r'([^/]+)[_.-]v?%s' % v, stem), # prefer the stem
- (r'([^/]+)%s' % v, stem),
-
- # accept the path if name is not in stem.
- (r'/([^/]+)[_.-]v?%s' % v, path),
- (r'/([^/]+)%s' % v, path),
-
- (r'^([^/]+)[_.-]v?%s' % v, path),
- (r'^([^/]+)%s' % v, path)
+ # 1st Pass: Common repositories
+
+ # GitHub: github.com/repo/name/
+ # e.g. https://github.com/nco/nco/archive/4.6.2.tar.gz
+ (r'github\.com/[^/]+/([^/]+)', path),
+
+ # GitLab: gitlab.*/repo/name/
+ # e.g. http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0
+ (r'gitlab[^/]+/[^/]+/([^/]+)', path),
+
+ # Bitbucket: bitbucket.org/repo/name/
+ # e.g. https://bitbucket.org/glotzer/hoomd-blue/get/v1.3.3.tar.bz2
+ (r'bitbucket\.org/[^/]+/([^/]+)', path),
+
+ # PyPI: pypi.(python.org|io)/packages/source/first-letter/name/
+ # e.g. https://pypi.python.org/packages/source/m/mpmath/mpmath-all-0.19.tar.gz
+ # e.g. https://pypi.io/packages/source/b/backports.ssl_match_hostname/backports.ssl_match_hostname-3.5.0.1.tar.gz
+ (r'pypi\.(?:python\.org|io)/packages/source/[A-Za-z\d]/([^/]+)', path),
+
+ # 2nd Pass: Query strings
+
+ # ?filename=name-ver.ver
+ # e.g. http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz
+ (r'\?filename=([A-Za-z\d+-]+)$', stem),
+
+ # ?package=name
+ # e.g. http://wwwpub.zih.tu-dresden.de/%7Emlieber/dcount/dcount.php?package=otf&get=OTF-1.12.5salmon.tar.gz
+ (r'\?package=([A-Za-z\d+-]+)', stem),
+
+ # download.php
+ # e.g. http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1
+ (r'([^/]+)/download.php$', path),
+
+ # 3rd Pass: Name followed by version in archive
+
+ (r'^([A-Za-z\d+\._-]+)$', stem),
]
for i, name_regex in enumerate(name_regexes):
@@ -391,13 +633,15 @@ def parse_name_offset(path, v=None):
name = match.group(1)
start = match.start(1)
- # if we matched from the basename, then add offset in.
+ # If we matched from the stem or suffix, we need to add offset
+ offset = 0
if match_string is stem:
- start += offset
-
- # package names should be lowercase and separated by dashes.
- name = name.lower()
- name = re.sub('[_.]', '-', name)
+ offset = len(path) - len(original_stem)
+ elif match_string is suffix:
+ offset = len(path)
+ if ext:
+ offset += len(ext) + 1 # .tar.gz is converted to tar.gz
+ start += offset
return name, start, len(name), i, regex
@@ -430,6 +674,9 @@ def parse_name_and_version(path):
The version of the package
:rtype: tuple
+
+ :raises UndetectableVersionError: If the URL does not match any regexes
+ :raises UndetectableNameError: If the URL does not match any regexes
"""
ver = parse_version(path)
name = parse_name(path, ver)
@@ -456,6 +703,22 @@ def cumsum(elts, init=0, fn=lambda x: x):
return sums
+def find_all(substring, string):
+ """Returns a list containing the indices of
+ every occurrence of substring in string."""
+
+ occurrences = []
+ index = 0
+ while index < len(string):
+ index = string.find(substring, index)
+ if index == -1:
+ break
+ occurrences.append(index)
+ index += len(substring)
+
+ return occurrences
+
+
def substitution_offsets(path):
"""This returns offsets for substituting versions and names in the
provided path. It is a helper for :func:`substitute_version`.
@@ -467,65 +730,34 @@ def substitution_offsets(path):
except UndetectableNameError:
return (None, -1, -1, (), ver, vs, vl, (vs,))
except UndetectableVersionError:
- return (None, -1, -1, (), None, -1, -1, ())
-
- # protect extensions like bz2 from getting inadvertently
- # considered versions.
- path = comp.strip_extension(path)
-
- # Construct a case-insensitive regular expression for the package name.
- name_re = '(%s)' % insensitize(name)
-
- # Split the string apart by things that match the name so that if the
- # name contains numbers or things that look like versions, we don't
- # accidentally substitute them with a version.
- name_parts = re.split(name_re, path)
-
- offsets = cumsum(name_parts, 0, len)
- name_offsets = offsets[1::2]
+ try:
+ name, ns, nl, ni, nregex = parse_name_offset(path)
+ return (name, ns, nl, (ns,), None, -1, -1, ())
+ except UndetectableNameError:
+ return (None, -1, -1, (), None, -1, -1, ())
- ver_offsets = []
- for i in xrange(0, len(name_parts), 2):
- vparts = re.split(ver, name_parts[i])
- voffsets = cumsum(vparts, offsets[i], len)
- ver_offsets.extend(voffsets[1::2])
+ # Find the index of every occurrence of name and ver in path
+ name_offsets = find_all(name, path)
+ ver_offsets = find_all(ver, path)
- return (name, ns, nl, tuple(name_offsets),
- ver, vs, vl, tuple(ver_offsets))
+ return (name, ns, nl, name_offsets,
+ ver, vs, vl, ver_offsets)
def wildcard_version(path):
"""Find the version in the supplied path, and return a regular expression
that will match this path with any version in its place.
"""
- # Get name and version, so we can treat them specially
- name, v = parse_name_and_version(path)
+ # Get version so we can replace it with a wildcard
+ version = parse_version(path)
- path, ext, suffix = split_url_extension(path)
+ # Split path by versions
+ vparts = path.split(str(version))
+
+ # Replace each version with a generic capture group to find versions
+ # and escape everything else so it's not interpreted as a regex
+ result = '(\d.*)'.join(re.escape(vp) for vp in vparts)
- # Construct a case-insensitive regular expression for the package name.
- name_re = '(%s)' % insensitize(name)
-
- # Split the string apart by things that match the name so that if the
- # name contains numbers or things that look like versions, we don't
- # catch them with the version wildcard.
- name_parts = re.split(name_re, path)
-
- # Even elements in the array did *not* match the name
- for i in xrange(0, len(name_parts), 2):
- # Split each part by things that look like versions.
- vparts = re.split(v.wildcard(), name_parts[i])
-
- # Replace each version with a generic capture group to find versions.
- # And escape everything else so it's not interpreted as a regex
- vgroup = '(%s)' % v.wildcard()
- name_parts[i] = vgroup.join(re.escape(vp) for vp in vparts)
-
- # Put it all back together with original name matches intact.
- result = ''.join(name_parts)
- if ext:
- result += '.' + ext
- result += suffix
return result
diff --git a/lib/spack/spack/util/crypto.py b/lib/spack/spack/util/crypto.py
index d074716022..2965168056 100644
--- a/lib/spack/spack/util/crypto.py
+++ b/lib/spack/spack/util/crypto.py
@@ -22,6 +22,7 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+import sys
import hashlib
"""Set of acceptable hashes that Spack will use."""
@@ -104,11 +105,16 @@ class Checker(object):
def prefix_bits(byte_array, bits):
"""Return the first <bits> bits of a byte array as an integer."""
+ if sys.version_info < (3,):
+ b2i = ord # In Python 2, indexing byte_array gives str
+ else:
+ b2i = lambda b: b # In Python 3, indexing byte_array gives int
+
result = 0
n = 0
for i, b in enumerate(byte_array):
n += 8
- result = (result << 8) | ord(b)
+ result = (result << 8) | b2i(b)
if n >= bits:
break
diff --git a/lib/spack/spack/util/executable.py b/lib/spack/spack/util/executable.py
index 63bbbb7c92..1d7f019fdf 100644
--- a/lib/spack/spack/util/executable.py
+++ b/lib/spack/spack/util/executable.py
@@ -22,10 +22,10 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-
import os
import re
import subprocess
+from six import string_types
import llnl.util.tty as tty
import spack
@@ -68,7 +68,7 @@ class Executable(object):
Raise an exception if the subprocess returns an
error. Default is True. When not set, the return code is
- avaiale as `exe.returncode`.
+ available as `exe.returncode`.
ignore_errors
@@ -129,7 +129,7 @@ class Executable(object):
raise ValueError("Cannot use `str` as input stream.")
def streamify(arg, mode):
- if isinstance(arg, basestring):
+ if isinstance(arg, string_types):
return open(arg, mode), True
elif arg is str:
return subprocess.PIPE, False
@@ -178,9 +178,9 @@ class Executable(object):
if output is str or error is str:
result = ''
if output is str:
- result += out
+ result += out.decode('utf-8')
if error is str:
- result += err
+ result += err.decode('utf-8')
return result
except OSError as e:
diff --git a/lib/spack/spack/util/multiproc.py b/lib/spack/spack/util/multiproc.py
index 6a25c45713..91bac57c26 100644
--- a/lib/spack/spack/util/multiproc.py
+++ b/lib/spack/spack/util/multiproc.py
@@ -28,7 +28,6 @@ than multiprocessing.Pool.apply() can. For example, apply() will fail
to pickle functions if they're passed indirectly as parameters.
"""
from multiprocessing import Process, Pipe, Semaphore, Value
-from itertools import izip
__all__ = ['spawn', 'parmap', 'Barrier']
@@ -43,7 +42,7 @@ def spawn(f):
def parmap(f, X):
pipe = [Pipe() for x in X]
proc = [Process(target=spawn(f), args=(c, x))
- for x, (p, c) in izip(X, pipe)]
+ for x, (p, c) in zip(X, pipe)]
[p.start() for p in proc]
[p.join() for p in proc]
return [p.recv() for (p, c) in pipe]
diff --git a/lib/spack/spack/util/naming.py b/lib/spack/spack/util/naming.py
index 9a5cdee411..cd35008aed 100644
--- a/lib/spack/spack/util/naming.py
+++ b/lib/spack/spack/util/naming.py
@@ -27,7 +27,7 @@ from __future__ import absolute_import
import string
import itertools
import re
-from StringIO import StringIO
+from six import StringIO
import spack
@@ -39,6 +39,7 @@ __all__ = [
'validate_fully_qualified_module_name',
'validate_module_name',
'possible_spack_module_names',
+ 'simplify_name',
'NamespaceTrie']
# Valid module names can contain '-' but can't start with it.
@@ -108,6 +109,50 @@ def possible_spack_module_names(python_mod_name):
return results
+def simplify_name(name):
+ """Simplifies a name which may include uppercase letters, periods,
+ underscores, and pluses. In general, we want our package names to
+ only contain lowercase letters, digits, and dashes.
+
+ :param str name: The original name of the package
+ :return: The new name of the package
+ :rtype: str
+ """
+ # Convert CamelCase to Dashed-Names
+ # e.g. ImageMagick -> Image-Magick
+ # e.g. SuiteSparse -> Suite-Sparse
+ # name = re.sub('([a-z])([A-Z])', r'\1-\2', name)
+
+ # Rename Intel downloads
+ # e.g. l_daal, l_ipp, l_mkl -> daal, ipp, mkl
+ if name.startswith('l_'):
+ name = name[2:]
+
+ # Convert UPPERCASE to lowercase
+ # e.g. SAMRAI -> samrai
+ name = name.lower()
+
+ # Replace '_' and '.' with '-'
+ # e.g. backports.ssl_match_hostname -> backports-ssl-match-hostname
+ name = name.replace('_', '-')
+ name = name.replace('.', '-')
+
+ # Replace "++" with "pp" and "+" with "-plus"
+ # e.g. gtk+ -> gtk-plus
+ # e.g. voro++ -> voropp
+ name = name.replace('++', 'pp')
+ name = name.replace('+', '-plus')
+
+ # Simplify Lua package names
+ # We don't want "lua" to occur multiple times in the name
+ name = re.sub('^(lua)([^-])', r'\1-\2', name)
+
+ # Simplify Bio++ package names
+ name = re.sub('^(bpp)([^-])', r'\1-\2', name)
+
+ return name
+
+
def valid_module_name(mod_name):
"""Return whether mod_name is valid for use in Spack."""
return bool(re.match(_valid_module_re, mod_name))
diff --git a/lib/spack/spack/util/pattern.py b/lib/spack/spack/util/pattern.py
index b5731ccf08..7a1109f2d2 100644
--- a/lib/spack/spack/util/pattern.py
+++ b/lib/spack/spack/util/pattern.py
@@ -61,7 +61,7 @@ def composite(interface=None, method_list=None, container=list):
# Retrieve the base class of the composite. Inspect its methods and
# decide which ones will be overridden
def no_special_no_private(x):
- return inspect.ismethod(x) and not x.__name__.startswith('_')
+ return callable(x) and not x.__name__.startswith('_')
# Patch the behavior of each of the methods in the previous list.
# This is done associating an instance of the descriptor below to
@@ -90,42 +90,25 @@ def composite(interface=None, method_list=None, container=list):
return getter
dictionary_for_type_call = {}
+
# Construct a dictionary with the methods explicitly passed as name
if method_list is not None:
- # python@2.7: method_list_dict = {name: IterateOver(name) for name
- # in method_list}
- method_list_dict = {}
- for name in method_list:
- method_list_dict[name] = IterateOver(name)
- dictionary_for_type_call.update(method_list_dict)
+ dictionary_for_type_call.update(
+ (name, IterateOver(name)) for name in method_list)
+
# Construct a dictionary with the methods inspected from the interface
if interface is not None:
- ##########
- # python@2.7: interface_methods = {name: method for name, method in
- # inspect.getmembers(interface, predicate=no_special_no_private)}
- interface_methods = {}
- for name, method in inspect.getmembers(
- interface, predicate=no_special_no_private):
- interface_methods[name] = method
- ##########
- # python@2.7: interface_methods_dict = {name: IterateOver(name,
- # method) for name, method in interface_methods.iteritems()}
- interface_methods_dict = {}
- for name, method in interface_methods.iteritems():
- interface_methods_dict[name] = IterateOver(name, method)
- ##########
- dictionary_for_type_call.update(interface_methods_dict)
+ dictionary_for_type_call.update(
+ (name, IterateOver(name, method))
+ for name, method in inspect.getmembers(
+ interface, predicate=no_special_no_private))
+
# Get the methods that are defined in the scope of the composite
# class and override any previous definition
- ##########
- # python@2.7: cls_method = {name: method for name, method in
- # inspect.getmembers(cls, predicate=inspect.ismethod)}
- cls_method = {}
- for name, method in inspect.getmembers(
- cls, predicate=inspect.ismethod):
- cls_method[name] = method
- ##########
- dictionary_for_type_call.update(cls_method)
+ dictionary_for_type_call.update(
+ (name, method) for name, method in inspect.getmembers(
+ cls, predicate=inspect.ismethod))
+
# Generate the new class on the fly and return it
# FIXME : inherit from interface if we start to use ABC classes?
wrapper_class = type(cls.__name__, (cls, container),
diff --git a/lib/spack/spack/util/prefix.py b/lib/spack/spack/util/prefix.py
index 985d862269..bc6808f350 100644
--- a/lib/spack/spack/util/prefix.py
+++ b/lib/spack/spack/util/prefix.py
@@ -35,11 +35,11 @@ class Prefix(str):
For example, you can do something like this::
prefix = Prefix('/usr')
- print prefix.lib
- print prefix.lib64
- print prefix.bin
- print prefix.share
- print prefix.man4
+ print(prefix.lib)
+ print(prefix.lib64)
+ print(prefix.bin)
+ print(prefix.share)
+ print(prefix.man4)
This program would print:
@@ -52,7 +52,7 @@ class Prefix(str):
Prefix objects behave identically to strings. In fact, they
subclass str. So operators like + are legal:
- print "foobar " + prefix
+ print("foobar " + prefix)
This prints 'foobar /usr". All of this is meant to make custom
installs easy.
diff --git a/lib/spack/spack/util/spack_json.py b/lib/spack/spack/util/spack_json.py
index 236eef8983..82fa700821 100644
--- a/lib/spack/spack/util/spack_json.py
+++ b/lib/spack/spack/util/spack_json.py
@@ -23,7 +23,11 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""Simple wrapper around JSON to guarantee consistent use of load/dump. """
+import sys
import json
+from six import string_types
+from six import iteritems
+
import spack.error
__all__ = ['load', 'dump', 'SpackJSONError']
@@ -36,12 +40,12 @@ _json_dump_args = {
def load(stream):
"""Spack JSON needs to be ordered to support specs."""
- if isinstance(stream, basestring):
- return _byteify(json.loads(stream, object_hook=_byteify),
- ignore_dicts=True)
+ if isinstance(stream, string_types):
+ load = json.loads
else:
- return _byteify(json.load(stream, object_hook=_byteify),
- ignore_dicts=True)
+ load = json.load
+
+ return _strify(load(stream, object_hook=_strify), ignore_dicts=True)
def dump(data, stream=None):
@@ -52,19 +56,23 @@ def dump(data, stream=None):
return json.dump(data, stream, **_json_dump_args)
-def _byteify(data, ignore_dicts=False):
- # if this is a unicode string, return its string representation
- if isinstance(data, unicode):
- return data.encode('utf-8')
+def _strify(data, ignore_dicts=False):
+ # if this is a unicode string in python 2, return its string representation
+ if sys.version_info[0] < 3:
+ if isinstance(data, unicode):
+ return data.encode('utf-8')
+
# if this is a list of values, return list of byteified values
if isinstance(data, list):
- return [_byteify(item, ignore_dicts=True) for item in data]
+ return [_strify(item, ignore_dicts=True) for item in data]
+
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
if isinstance(data, dict) and not ignore_dicts:
- return dict((_byteify(key, ignore_dicts=True),
- _byteify(value, ignore_dicts=True)) for key, value in
- data.iteritems())
+ return dict((_strify(key, ignore_dicts=True),
+ _strify(value, ignore_dicts=True)) for key, value in
+ iteritems(data))
+
# if it's anything else, return it in its original form
return data
@@ -72,5 +80,5 @@ def _byteify(data, ignore_dicts=False):
class SpackJSONError(spack.error.SpackError):
"""Raised when there are issues with JSON parsing."""
- def __init__(self, msg, yaml_error):
- super(SpackJSONError, self).__init__(msg, str(yaml_error))
+ def __init__(self, msg, json_error):
+ super(SpackJSONError, self).__init__(msg, str(json_error))
diff --git a/lib/spack/spack/util/spack_yaml.py b/lib/spack/spack/util/spack_yaml.py
index 9d4c607908..6533004392 100644
--- a/lib/spack/spack/util/spack_yaml.py
+++ b/lib/spack/spack/util/spack_yaml.py
@@ -85,13 +85,7 @@ class OrderedLineLoader(Loader):
def construct_yaml_str(self, node):
value = self.construct_scalar(node)
- try:
- value = value.encode('ascii')
- except UnicodeEncodeError:
- pass
-
value = syaml_str(value)
-
mark(value, node)
return value
@@ -137,7 +131,7 @@ class OrderedLineLoader(Loader):
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
- except TypeError, exc:
+ except TypeError as exc:
raise ConstructorError(
"while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
@@ -154,11 +148,11 @@ class OrderedLineLoader(Loader):
# register above new constructors
OrderedLineLoader.add_constructor(
- u'tag:yaml.org,2002:map', OrderedLineLoader.construct_yaml_map)
+ 'tag:yaml.org,2002:map', OrderedLineLoader.construct_yaml_map)
OrderedLineLoader.add_constructor(
- u'tag:yaml.org,2002:seq', OrderedLineLoader.construct_yaml_seq)
+ 'tag:yaml.org,2002:seq', OrderedLineLoader.construct_yaml_seq)
OrderedLineLoader.add_constructor(
- u'tag:yaml.org,2002:str', OrderedLineLoader.construct_yaml_str)
+ 'tag:yaml.org,2002:str', OrderedLineLoader.construct_yaml_str)
class OrderedLineDumper(Dumper):
@@ -181,7 +175,7 @@ class OrderedLineDumper(Dumper):
# if it's a syaml_dict, preserve OrderedDict order.
# Otherwise do the default thing.
sort = not isinstance(mapping, syaml_dict)
- mapping = mapping.items()
+ mapping = list(mapping.items())
if sort:
mapping.sort()
diff --git a/lib/spack/spack/util/web.py b/lib/spack/spack/util/web.py
index 935532266f..f803c6cea3 100644
--- a/lib/spack/spack/util/web.py
+++ b/lib/spack/spack/util/web.py
@@ -25,10 +25,23 @@
import re
import os
import sys
-import urllib2
-import urlparse
-from multiprocessing import Pool
-from HTMLParser import HTMLParser, HTMLParseError
+import traceback
+
+from six.moves.urllib.request import urlopen, Request
+from six.moves.urllib.error import URLError
+from six.moves.urllib.parse import urljoin
+import multiprocessing.pool
+
+try:
+ # Python 2 had these in the HTMLParser package.
+ from HTMLParser import HTMLParser, HTMLParseError
+except ImportError:
+ # In Python 3, things moved to html.parser
+ from html.parser import HTMLParser
+
+ # Also, HTMLParseError is deprecated and never raised.
+ class HTMLParseError(Exception):
+ pass
import llnl.util.tty as tty
@@ -55,34 +68,51 @@ class LinkParser(HTMLParser):
self.links.append(val)
-def _spider(args):
- """_spider(url, depth, max_depth)
+class NonDaemonProcess(multiprocessing.Process):
+ """Process tha allows sub-processes, so pools can have sub-pools."""
+ def _get_daemon(self):
+ return False
- Fetches URL and any pages it links to up to max_depth. depth should
- initially be 1, and max_depth includes the root. This function will
- print out a warning only if the root can't be fetched; it ignores
- errors with pages that the root links to.
+ def _set_daemon(self, value):
+ pass
- This will return a list of the pages fetched, in no particular order.
+ daemon = property(_get_daemon, _set_daemon)
- Takes args as a tuple b/c it's intended to be used by a multiprocessing
- pool. Firing off all the child links at once makes the fetch MUCH
- faster for pages with lots of children.
- """
- url, visited, root, opener, depth, max_depth, raise_on_error = args
+class NonDaemonPool(multiprocessing.pool.Pool):
+ """Pool that uses non-daemon processes"""
+ Process = NonDaemonProcess
+
+
+def _spider(url, visited, root, depth, max_depth, raise_on_error):
+ """Fetches URL and any pages it links to up to max_depth.
+
+ depth should initially be zero, and max_depth is the max depth of
+ links to follow from the root.
+
+ Prints out a warning only if the root can't be fetched; it ignores
+ errors with pages that the root links to.
+
+ Returns a tuple of:
+ - pages: dict of pages visited (URL) mapped to their full text.
+ - links: set of links encountered while visiting the pages.
+ """
pages = {} # dict from page URL -> text content.
links = set() # set of all links seen on visited pages.
+ # root may end with index.html -- chop that off.
+ if root.endswith('/index.html'):
+ root = re.sub('/index.html$', '', root)
+
try:
# Make a HEAD request first to check the content type. This lets
# us ignore tarballs and gigantic files.
# It would be nice to do this with the HTTP Accept header to avoid
# one round-trip. However, most servers seem to ignore the header
# if you ask for a tarball with Accept: text/html.
- req = urllib2.Request(url)
+ req = Request(url)
req.get_method = lambda: "HEAD"
- resp = urllib2.urlopen(req, timeout=TIMEOUT)
+ resp = urlopen(req, timeout=TIMEOUT)
if "Content-type" not in resp.headers:
tty.debug("ignoring page " + url)
@@ -95,11 +125,11 @@ def _spider(args):
# Do the real GET request when we know it's just HTML.
req.get_method = lambda: "GET"
- response = urllib2.urlopen(req, timeout=TIMEOUT)
+ response = urlopen(req, timeout=TIMEOUT)
response_url = response.geturl()
# Read the page and and stick it in the map we'll return
- page = response.read()
+ page = response.read().decode('utf-8')
pages[response_url] = page
# Parse out the links in the page
@@ -109,7 +139,7 @@ def _spider(args):
while link_parser.links:
raw_link = link_parser.links.pop()
- abs_link = urlparse.urljoin(response_url, raw_link.strip())
+ abs_link = urljoin(response_url, raw_link.strip())
links.add(abs_link)
@@ -127,22 +157,24 @@ def _spider(args):
# If we're not at max depth, follow links.
if depth < max_depth:
- subcalls.append((abs_link, visited, root, None,
+ subcalls.append((abs_link, visited, root,
depth + 1, max_depth, raise_on_error))
visited.add(abs_link)
if subcalls:
+ pool = NonDaemonPool(processes=len(subcalls))
try:
- pool = Pool(processes=len(subcalls))
- results = pool.map(_spider, subcalls)
+ results = pool.map(_spider_wrapper, subcalls)
+
for sub_pages, sub_links in results:
pages.update(sub_pages)
links.update(sub_links)
+
finally:
pool.terminate()
pool.join()
- except urllib2.URLError as e:
+ except URLError as e:
tty.debug(e)
if raise_on_error:
raise spack.error.NoNetworkConnectionError(str(e), url)
@@ -159,46 +191,53 @@ def _spider(args):
except Exception as e:
# Other types of errors are completely ignored, except in debug mode.
- tty.debug("Error in _spider: %s" % e)
+ tty.debug("Error in _spider: %s:%s" % (type(e), e),
+ traceback.format_exc())
return pages, links
-def spider(root_url, **kwargs):
+def _spider_wrapper(args):
+ """Wrapper for using spider with multiprocessing."""
+ return _spider(*args)
+
+
+def spider(root_url, depth=0):
+
"""Gets web pages from a root URL.
- If depth is specified (e.g., depth=2), then this will also fetches pages
- linked from the root and its children up to depth.
+
+ If depth is specified (e.g., depth=2), then this will also follow
+ up to <depth> levels of links from the root.
This will spawn processes to fetch the children, for much improved
performance over a sequential fetch.
+
"""
- max_depth = kwargs.setdefault('depth', 1)
- pages, links = _spider((root_url, set(), root_url, None,
- 1, max_depth, False))
+ pages, links = _spider(root_url, set(), root_url, 0, depth, False)
return pages, links
-def find_versions_of_archive(*archive_urls, **kwargs):
+def find_versions_of_archive(archive_urls, list_url=None, list_depth=0):
"""Scrape web pages for new versions of a tarball.
Arguments:
archive_urls:
- URLs for different versions of a package. Typically these
- are just the tarballs from the package file itself. By
- default, this searches the parent directories of archives.
+ URL or sequence of URLs for different versions of a
+ package. Typically these are just the tarballs from the package
+ file itself. By default, this searches the parent directories
+ of archives.
Keyword Arguments:
list_url:
-
URL for a listing of archives. Spack wills scrape these
pages for download links that look like the archive URL.
list_depth:
- Max depth to follow links on list_url pages.
+ Max depth to follow links on list_url pages. Default 0.
"""
- list_url = kwargs.get('list_url', None)
- list_depth = kwargs.get('list_depth', 1)
+ if not isinstance(archive_urls, (list, tuple)):
+ archive_urls = [archive_urls]
# Generate a list of list_urls based on archive urls and any
# explicitly listed list_url in the package
@@ -229,6 +268,14 @@ def find_versions_of_archive(*archive_urls, **kwargs):
# part, not the full path.
url_regex = os.path.basename(url_regex)
+ # We need to add a / to the beginning of the regex to prevent
+ # Spack from picking up similarly named packages like:
+ # https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz
+ # https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz
+ # https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz
+ # https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz
+ url_regex = '/' + url_regex
+
# We need to add a $ anchor to the end of the regex to prevent
# Spack from picking up signature files like:
# .asc
@@ -236,7 +283,9 @@ def find_versions_of_archive(*archive_urls, **kwargs):
# .sha256
# .sig
# However, SourceForge downloads still need to end in '/download'.
- regexes.append(url_regex + '(\/download)?$')
+ url_regex += '(\/download)?$'
+
+ regexes.append(url_regex)
# Build a dict version -> URL from any links that match the wildcards.
versions = {}
diff --git a/lib/spack/spack/version.py b/lib/spack/spack/version.py
index 0d68a709e8..89fcc9aaa7 100644
--- a/lib/spack/spack/version.py
+++ b/lib/spack/spack/version.py
@@ -47,8 +47,8 @@ import re
import numbers
from bisect import bisect_left
from functools import wraps
+from six import string_types
-from functools_backport import total_ordering
from spack.util.spack_yaml import syaml_dict
__all__ = ['Version', 'VersionRange', 'VersionList', 'ver']
@@ -111,7 +111,6 @@ def _numeric_lt(self0, other):
"""Compares two versions, knowing they're both numeric"""
-@total_ordering
class Version(object):
"""Class to represent versions"""
@@ -195,52 +194,29 @@ class Version(object):
nother = len(other.version)
return nother <= nself and self.version[:nother] == other.version
- def wildcard(self):
- """Create a regex that will match variants of this version string."""
- def a_or_n(seg):
- if type(seg) == int:
- return r'[0-9]+'
- else:
- return r'[a-zA-Z]+'
-
- version = self.version
-
- # Use a wildcard for separators, in case a version is written
- # two different ways (e.g., boost writes 1_55_0 and 1.55.0)
- sep_re = '[_.-]'
- separators = ('',) + (sep_re,) * len(self.separators)
-
- version += (version[-1],) * 2
- separators += (sep_re,) * 2
-
- segments = [a_or_n(seg) for seg in version]
-
- wc = segments[0]
- for i in xrange(1, len(separators)):
- wc += '(?:' + separators[i] + segments[i]
-
- # Add possible alpha or beta indicator at the end of each segemnt
- # We treat these specially b/c they're so common.
- wc += '(?:[a-z]|alpha|beta)?)?' * (len(segments) - 1)
- return wc
-
def __iter__(self):
return iter(self.version)
def __getitem__(self, idx):
cls = type(self)
+
if isinstance(idx, numbers.Integral):
return self.version[idx]
+
elif isinstance(idx, slice):
# Currently len(self.separators) == len(self.version) - 1
extendend_separators = self.separators + ('',)
string_arg = []
- for token, sep in zip(self.version, extendend_separators)[idx]:
+
+ pairs = zip(self.version[idx], extendend_separators[idx])
+ for token, sep in pairs:
string_arg.append(str(token))
string_arg.append(str(sep))
+
string_arg.pop() # We don't need the last separator
string_arg = ''.join(string_arg)
return cls(string_arg)
+
message = '{cls.__name__} indices must be integers'
raise TypeError(message.format(cls=cls))
@@ -323,9 +299,22 @@ class Version(object):
return (other is not None and
type(other) == Version and self.version == other.version)
+ @coerced
def __ne__(self, other):
return not (self == other)
+ @coerced
+ def __le__(self, other):
+ return self == other or self < other
+
+ @coerced
+ def __ge__(self, other):
+ return not (self < other)
+
+ @coerced
+ def __gt__(self, other):
+ return not (self == other) and not (self < other)
+
def __hash__(self):
return hash(self.version)
@@ -371,13 +360,12 @@ class Version(object):
return VersionList()
-@total_ordering
class VersionRange(object):
def __init__(self, start, end):
- if isinstance(start, basestring):
+ if isinstance(start, string_types):
start = Version(start)
- if isinstance(end, basestring):
+ if isinstance(end, string_types):
end = Version(end)
self.start = start
@@ -414,9 +402,22 @@ class VersionRange(object):
type(other) == VersionRange and
self.start == other.start and self.end == other.end)
+ @coerced
def __ne__(self, other):
return not (self == other)
+ @coerced
+ def __le__(self, other):
+ return self == other or self < other
+
+ @coerced
+ def __ge__(self, other):
+ return not (self < other)
+
+ @coerced
+ def __gt__(self, other):
+ return not (self == other) and not (self < other)
+
@property
def concrete(self):
return self.start if self.start == self.end else None
@@ -561,14 +562,13 @@ class VersionRange(object):
return out
-@total_ordering
class VersionList(object):
"""Sorted, non-redundant list of Versions and VersionRanges."""
def __init__(self, vlist=None):
self.versions = []
if vlist is not None:
- if isinstance(vlist, basestring):
+ if isinstance(vlist, string_types):
vlist = _string_to_version(vlist)
if type(vlist) == VersionList:
self.versions = vlist.versions
@@ -754,6 +754,7 @@ class VersionList(object):
def __eq__(self, other):
return other is not None and self.versions == other.versions
+ @coerced
def __ne__(self, other):
return not (self == other)
@@ -761,6 +762,18 @@ class VersionList(object):
def __lt__(self, other):
return other is not None and self.versions < other.versions
+ @coerced
+ def __le__(self, other):
+ return self == other or self < other
+
+ @coerced
+ def __ge__(self, other):
+ return not (self < other)
+
+ @coerced
+ def __gt__(self, other):
+ return not (self == other) and not (self < other)
+
def __hash__(self):
return hash(tuple(self.versions))
@@ -796,7 +809,7 @@ def ver(obj):
"""
if isinstance(obj, (list, tuple)):
return VersionList(obj)
- elif isinstance(obj, basestring):
+ elif isinstance(obj, string_types):
return _string_to_version(obj)
elif isinstance(obj, (int, float)):
return _string_to_version(str(obj))