From a2f90453f4d1ac1372f717911bd08e33d801ff31 Mon Sep 17 00:00:00 2001 From: Gregory Becker Date: Thu, 26 May 2016 20:30:05 -0700 Subject: Fixed backwards compatibility for compilers.yaml config file --- bin/spack | 3 +++ 1 file changed, 3 insertions(+) (limited to 'bin') diff --git a/bin/spack b/bin/spack index 3544feb10a..9b1276a866 100755 --- a/bin/spack +++ b/bin/spack @@ -138,6 +138,9 @@ def main(): import spack.util.debug as debug debug.register_interrupt_handler() + from spack.yaml_version_check import check_yaml_versions + check_yaml_versions() + spack.spack_working_dir = working_dir if args.mock: from spack.repository import RepoPath -- cgit v1.2.3-60-g2f50 From 4952c4c7def96caef2b47160f8b9e08392ae348d Mon Sep 17 00:00:00 2001 From: Orion Poplawski Date: Wed, 8 Jun 2016 14:22:26 -0600 Subject: Also remove from external from bin/spack --- bin/spack | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'bin') diff --git a/bin/spack b/bin/spack index 3544feb10a..dac48437a6 100755 --- a/bin/spack +++ b/bin/spack @@ -77,7 +77,7 @@ import llnl.util.tty as tty from llnl.util.tty.color import * import spack from spack.error import SpackError -from external import argparse +import argparse # Command parsing parser = argparse.ArgumentParser( -- cgit v1.2.3-60-g2f50 From 27e9bc6d028e3ee8b4890c226594e25bcfc370eb Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Thu, 23 Jun 2016 00:03:23 -0700 Subject: Make sbang handle lua - use --! instead of #! for patched lua scripts. --- bin/sbang | 11 +++++++++++ lib/spack/spack/hooks/sbang.py | 7 ++++++- lib/spack/spack/test/sbang.py | 23 ++++++++++++++++++----- 3 files changed, 35 insertions(+), 6 deletions(-) (limited to 'bin') diff --git a/bin/sbang b/bin/sbang index f6b6d35e8a..1ea5f06592 100755 --- a/bin/sbang +++ b/bin/sbang @@ -79,6 +79,15 @@ # Obviously, for this to work, `sbang` needs to have a short enough # path that *it* will run without hitting OS limits. # +# For Lua, scripts the second line can't start with #!, as # is not +# the comment character in lua (even though lua ignores #! on the +# *first* line of a script). So, instrument a lua script like this, +# using -- instead of # on the second line: +# +# 1 #!/bin/bash /path/to/sbang +# 2 --!/long/path/to/lua with arguments +# 3 +# 4 print "success!" # # How it works # ----------------------------- @@ -95,6 +104,8 @@ lines=0 while read line && ((lines < 2)) ; do if [[ "$line" = '#!'* ]]; then interpreter="${line#\#!}" + elif [[ "$line" = '--!'*lua* ]]; then + interpreter="${line#--!}" fi lines=$((lines+1)) done < "$script" diff --git a/lib/spack/spack/hooks/sbang.py b/lib/spack/spack/hooks/sbang.py index cb0ad42b14..3a957c6e0e 100644 --- a/lib/spack/spack/hooks/sbang.py +++ b/lib/spack/spack/hooks/sbang.py @@ -23,6 +23,7 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import os +import re import llnl.util.tty as tty @@ -57,11 +58,15 @@ def filter_shebang(path): if original.startswith(new_sbang_line): return + # Use --! instead of #! on second line for lua. + if re.search(r'^#!(/[^/]*)*lua\b', original): + original = re.sub(r'^#', '--', original) + with open(path, 'w') as new_file: new_file.write(new_sbang_line) new_file.write(original) - tty.warn("Patched overly long shebang in %s" % path) + tty.warn("Patched overlong shebang in %s" % path) def filter_shebangs_in_directory(directory): diff --git a/lib/spack/spack/test/sbang.py b/lib/spack/spack/test/sbang.py index 6aea1a68c7..ed54ff90b0 100644 --- a/lib/spack/spack/test/sbang.py +++ b/lib/spack/spack/test/sbang.py @@ -34,10 +34,12 @@ from llnl.util.filesystem import * from spack.hooks.sbang import filter_shebangs_in_directory import spack -short_line = "#!/this/is/short/bin/bash\n" -long_line = "#!/this/" + ('x' * 200) + "/is/long\n" -sbang_line = '#!/bin/bash %s/bin/sbang\n' % spack.spack_root -last_line = "last!\n" +short_line = "#!/this/is/short/bin/bash\n" +long_line = "#!/this/" + ('x' * 200) + "/is/long\n" +lua_line = "#!/this/" + ('x' * 200) + "/is/lua\n" +lua_line_patched = "--!/this/" + ('x' * 200) + "/is/lua\n" +sbang_line = '#!/bin/bash %s/bin/sbang\n' % spack.spack_root +last_line = "last!\n" class SbangTest(unittest.TestCase): def setUp(self): @@ -59,6 +61,12 @@ class SbangTest(unittest.TestCase): f.write(long_line) f.write(last_line) + # Lua script with long shebang + self.lua_shebang = os.path.join(self.tempdir, 'lua') + with open(self.lua_shebang, 'w') as f: + f.write(lua_line) + f.write(last_line) + # Script already using sbang. self.has_shebang = os.path.join(self.tempdir, 'shebang') with open(self.has_shebang, 'w') as f: @@ -71,7 +79,6 @@ class SbangTest(unittest.TestCase): shutil.rmtree(self.tempdir, ignore_errors=True) - def test_shebang_handling(self): filter_shebangs_in_directory(self.tempdir) @@ -86,6 +93,12 @@ class SbangTest(unittest.TestCase): self.assertEqual(f.readline(), long_line) self.assertEqual(f.readline(), last_line) + # Make sure this got patched. + with open(self.lua_shebang, 'r') as f: + self.assertEqual(f.readline(), sbang_line) + self.assertEqual(f.readline(), lua_line_patched) + self.assertEqual(f.readline(), last_line) + # Make sure this is untouched with open(self.has_shebang, 'r') as f: self.assertEqual(f.readline(), sbang_line) -- cgit v1.2.3-60-g2f50 From bf1072c9022cd161b9cc4860e5403a463bc0e05b Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Tue, 9 Aug 2016 13:23:53 -0700 Subject: Make Spack core PEP8 compliant. --- .flake8 | 2 +- bin/spack | 15 +- lib/spack/llnl/util/filesystem.py | 16 ++- lib/spack/llnl/util/lang.py | 79 +++++++---- lib/spack/llnl/util/link_tree.py | 10 +- lib/spack/llnl/util/lock.py | 3 + lib/spack/llnl/util/tty/__init__.py | 12 +- lib/spack/llnl/util/tty/colify.py | 32 +++-- lib/spack/llnl/util/tty/color.py | 29 ++-- lib/spack/llnl/util/tty/log.py | 16 +-- lib/spack/spack/abi.py | 25 ++-- lib/spack/spack/architecture.py | 1 + lib/spack/spack/cmd/__init__.py | 2 +- lib/spack/spack/cmd/activate.py | 4 +- lib/spack/spack/cmd/arch.py | 2 +- lib/spack/spack/cmd/cd.py | 3 +- lib/spack/spack/cmd/clean.py | 1 + lib/spack/spack/cmd/common/arguments.py | 4 +- lib/spack/spack/cmd/compiler.py | 65 +++++---- lib/spack/spack/cmd/compilers.py | 6 +- lib/spack/spack/cmd/config.py | 10 +- lib/spack/spack/cmd/create.py | 1 + lib/spack/spack/cmd/deactivate.py | 11 +- lib/spack/spack/cmd/dependents.py | 7 +- lib/spack/spack/cmd/diy.py | 8 +- lib/spack/spack/cmd/doc.py | 1 + lib/spack/spack/cmd/edit.py | 14 +- lib/spack/spack/cmd/env.py | 8 +- lib/spack/spack/cmd/extensions.py | 11 +- lib/spack/spack/cmd/fetch.py | 12 +- lib/spack/spack/cmd/graph.py | 13 +- lib/spack/spack/cmd/help.py | 4 +- lib/spack/spack/cmd/install.py | 7 +- lib/spack/spack/cmd/load.py | 7 +- lib/spack/spack/cmd/location.py | 29 ++-- lib/spack/spack/cmd/mirror.py | 25 ++-- lib/spack/spack/cmd/module.py | 17 ++- lib/spack/spack/cmd/package-list.py | 2 +- lib/spack/spack/cmd/patch.py | 6 +- lib/spack/spack/cmd/pkg.py | 53 ++++--- lib/spack/spack/cmd/providers.py | 9 +- lib/spack/spack/cmd/python.py | 10 +- lib/spack/spack/cmd/reindex.py | 2 +- lib/spack/spack/cmd/repo.py | 29 ++-- lib/spack/spack/cmd/restage.py | 1 + lib/spack/spack/cmd/setup.py | 9 +- lib/spack/spack/cmd/spec.py | 13 +- lib/spack/spack/cmd/stage.py | 4 +- lib/spack/spack/cmd/test-install.py | 61 ++++---- lib/spack/spack/cmd/test.py | 2 + lib/spack/spack/cmd/uninstall.py | 20 +-- lib/spack/spack/cmd/unload.py | 6 +- lib/spack/spack/cmd/unuse.py | 6 +- lib/spack/spack/cmd/url-parse.py | 14 +- lib/spack/spack/cmd/urls.py | 5 +- lib/spack/spack/cmd/use.py | 6 +- lib/spack/spack/cmd/versions.py | 7 +- lib/spack/spack/compiler.py | 58 ++++---- lib/spack/spack/compilers/__init__.py | 68 ++++----- lib/spack/spack/compilers/clang.py | 12 +- lib/spack/spack/compilers/craype.py | 38 +++-- lib/spack/spack/compilers/gcc.py | 10 +- lib/spack/spack/compilers/intel.py | 10 +- lib/spack/spack/compilers/nag.py | 18 +-- lib/spack/spack/compilers/pgi.py | 13 +- lib/spack/spack/compilers/xl.py | 50 +++---- lib/spack/spack/concretize.py | 16 ++- lib/spack/spack/config.py | 130 ++++++++--------- lib/spack/spack/database.py | 3 + lib/spack/spack/directives.py | 5 +- lib/spack/spack/directory_layout.py | 85 +++++------- lib/spack/spack/environment.py | 17 ++- lib/spack/spack/error.py | 8 +- lib/spack/spack/fetch_strategy.py | 2 + lib/spack/spack/file_cache.py | 2 + lib/spack/spack/graph.py | 21 +-- lib/spack/spack/hooks/__init__.py | 2 + lib/spack/spack/hooks/extensions.py | 2 - lib/spack/spack/mirror.py | 27 ++-- lib/spack/spack/modules.py | 17 ++- lib/spack/spack/multimethod.py | 15 +- lib/spack/spack/operating_systems/cnl.py | 1 + lib/spack/spack/operating_systems/linux_distro.py | 2 + lib/spack/spack/operating_systems/mac_os.py | 1 + lib/spack/spack/package.py | 15 +- lib/spack/spack/parse.py | 20 ++- lib/spack/spack/patch.py | 4 +- lib/spack/spack/platforms/bgq.py | 2 +- lib/spack/spack/platforms/darwin.py | 3 +- lib/spack/spack/platforms/linux.py | 3 +- lib/spack/spack/platforms/test.py | 27 +++- lib/spack/spack/preferred_packages.py | 2 +- lib/spack/spack/provider_index.py | 1 + lib/spack/spack/repository.py | 7 + lib/spack/spack/resource.py | 6 +- lib/spack/spack/spec.py | 23 ++-- lib/spack/spack/test/architecture.py | 1 + lib/spack/spack/test/cc.py | 64 ++++----- lib/spack/spack/test/cmd/module.py | 14 +- lib/spack/spack/test/cmd/test_compiler_cmd.py | 7 +- lib/spack/spack/test/cmd/uninstall.py | 2 + lib/spack/spack/test/config.py | 61 ++++---- lib/spack/spack/test/database.py | 1 + lib/spack/spack/test/directory_layout.py | 13 +- lib/spack/spack/test/environment.py | 3 +- lib/spack/spack/test/git_fetch.py | 18 +-- lib/spack/spack/test/hg_fetch.py | 8 +- lib/spack/spack/test/install.py | 8 +- lib/spack/spack/test/link_tree.py | 6 - lib/spack/spack/test/lock.py | 2 + lib/spack/spack/test/make_executable.py | 26 ++-- lib/spack/spack/test/mirror.py | 17 ++- lib/spack/spack/test/mock_database.py | 1 + lib/spack/spack/test/mock_packages_test.py | 8 +- lib/spack/spack/test/mock_repo.py | 6 +- lib/spack/spack/test/multimethod.py | 17 +-- lib/spack/spack/test/namespace_trie.py | 6 - lib/spack/spack/test/operating_system.py | 32 ++++- lib/spack/spack/test/optional_deps.py | 36 ++--- lib/spack/spack/test/package_sanity.py | 3 - lib/spack/spack/test/packages.py | 49 +++---- lib/spack/spack/test/pattern.py | 2 + lib/spack/spack/test/python_version.py | 11 +- lib/spack/spack/test/sbang.py | 1 + lib/spack/spack/test/spec_dag.py | 1 + lib/spack/spack/test/spec_semantics.py | 161 +++++++++++----------- lib/spack/spack/test/spec_syntax.py | 132 ++++++++++++------ lib/spack/spack/test/stage.py | 28 +--- lib/spack/spack/test/svn_fetch.py | 8 +- lib/spack/spack/test/tally_plugin.py | 1 + lib/spack/spack/test/url_extrapolate.py | 20 ++- lib/spack/spack/test/url_parse.py | 7 +- lib/spack/spack/test/url_substitution.py | 30 ++-- lib/spack/spack/test/yaml.py | 12 +- lib/spack/spack/url.py | 79 +++++++---- lib/spack/spack/util/compression.py | 4 +- lib/spack/spack/util/crypto.py | 11 +- lib/spack/spack/util/debug.py | 5 +- lib/spack/spack/util/executable.py | 2 +- lib/spack/spack/util/multiproc.py | 18 ++- lib/spack/spack/util/naming.py | 35 ++--- lib/spack/spack/util/pattern.py | 10 +- lib/spack/spack/util/prefix.py | 1 + lib/spack/spack/util/spack_yaml.py | 57 +++++--- lib/spack/spack/util/string.py | 4 +- lib/spack/spack/util/web.py | 1 + lib/spack/spack/variant.py | 2 + lib/spack/spack/version.py | 6 +- lib/spack/spack/yaml_version_check.py | 4 +- share/spack/qa/run-flake8 | 17 ++- 150 files changed, 1439 insertions(+), 1163 deletions(-) (limited to 'bin') diff --git a/.flake8 b/.flake8 index 286522bc48..b178a2da57 100644 --- a/.flake8 +++ b/.flake8 @@ -19,5 +19,5 @@ # - F999: name name be undefined or undefined from star imports. # [flake8] -ignore = E221,E241,E731,F403,F821,F999,F405 +ignore = E129,E221,E241,E272,E731,F403,F821,F999,F405 max-line-length = 79 diff --git a/bin/spack b/bin/spack index e9307d1485..9fed11f33b 100755 --- a/bin/spack +++ b/bin/spack @@ -1,4 +1,5 @@ #!/usr/bin/env python +# flake8: noqa ############################################################################## # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. @@ -24,9 +25,10 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import sys -if not sys.version_info[:2] >= (2,6): +if not sys.version_info[:2] >= (2, 6): v_info = sys.version_info[:3] - sys.exit("Spack requires Python 2.6 or higher. This is Python %d.%d.%d." % v_info) + sys.exit("Spack requires Python 2.6 or higher. " + "This is Python %d.%d.%d." % v_info) import os @@ -62,7 +64,8 @@ for pyc_file in orphaned_pyc_files: try: os.remove(pyc_file) except OSError as e: - print "WARNING: Spack may fail mysteriously. Couldn't remove orphaned .pyc file: %s" % pyc_file + print ("WARNING: Spack may fail mysteriously. " + "Couldn't remove orphaned .pyc file: %s" % pyc_file) # If there is no working directory, use the spack prefix. try: @@ -128,6 +131,7 @@ if len(sys.argv) == 1: # actually parse the args. args = parser.parse_args() + def main(): # Set up environment based on args. tty.set_verbose(args.verbose) @@ -148,7 +152,7 @@ def main(): # If the user asked for it, don't check ssl certs. if args.insecure: - tty.warn("You asked for --insecure, which does not check SSL certificates.") + tty.warn("You asked for --insecure. Will NOT check SSL certificates.") spack.curl.add_default_arg('-k') # Try to load the particular command asked for and run it @@ -167,7 +171,8 @@ def main(): elif isinstance(return_val, int): sys.exit(return_val) else: - tty.die("Bad return value from command %s: %s" % (args.command, return_val)) + tty.die("Bad return value from command %s: %s" + % (args.command, return_val)) if args.profile: import cProfile diff --git a/lib/spack/llnl/util/filesystem.py b/lib/spack/llnl/util/filesystem.py index 4cf99163e0..22ca85abf9 100644 --- a/lib/spack/llnl/util/filesystem.py +++ b/lib/spack/llnl/util/filesystem.py @@ -106,6 +106,7 @@ def filter_file(regex, repl, *filenames, **kwargs): class FileFilter(object): """Convenience class for calling filter_file a lot.""" + def __init__(self, *filenames): self.filenames = filenames @@ -355,7 +356,8 @@ def traverse_tree(source_root, dest_root, rel_path='', **kwargs): # When follow_nonexisting isn't set, don't descend into dirs # in source that do not exist in dest if follow_nonexisting or os.path.exists(dest_child): - tuples = traverse_tree(source_root, dest_root, rel_child, **kwargs) # NOQA: ignore=E501 + tuples = traverse_tree( + source_root, dest_root, rel_child, **kwargs) for t in tuples: yield t @@ -422,14 +424,20 @@ def fix_darwin_install_name(path): libs = glob.glob(join_path(path, "*.dylib")) for lib in libs: # fix install name first: - subprocess.Popen(["install_name_tool", "-id", lib, lib], stdout=subprocess.PIPE).communicate()[0] # NOQA: ignore=E501 - long_deps = subprocess.Popen(["otool", "-L", lib], stdout=subprocess.PIPE).communicate()[0].split('\n') # NOQA: ignore=E501 + subprocess.Popen( + ["install_name_tool", "-id", lib, lib], + stdout=subprocess.PIPE).communicate()[0] + long_deps = subprocess.Popen( + ["otool", "-L", lib], + stdout=subprocess.PIPE).communicate()[0].split('\n') deps = [dep.partition(' ')[0][1::] for dep in long_deps[2:-1]] # fix all dependencies: for dep in deps: for loc in libs: if dep == os.path.basename(loc): - subprocess.Popen(["install_name_tool", "-change", dep, loc, lib], stdout=subprocess.PIPE).communicate()[0] # NOQA: ignore=E501 + subprocess.Popen( + ["install_name_tool", "-change", dep, loc, lib], + stdout=subprocess.PIPE).communicate()[0] break diff --git a/lib/spack/llnl/util/lang.py b/lib/spack/llnl/util/lang.py index 63eb08d803..df32012e2d 100644 --- a/lib/spack/llnl/util/lang.py +++ b/lib/spack/llnl/util/lang.py @@ -24,7 +24,6 @@ ############################################################################## import os import re -import sys import functools import collections import inspect @@ -39,14 +38,15 @@ def index_by(objects, *funcs): Values are used as keys. For example, suppose you have four objects with attributes that look like this: - a = Spec(name="boost", compiler="gcc", arch="bgqos_0") - b = Spec(name="mrnet", compiler="intel", arch="chaos_5_x86_64_ib") - c = Spec(name="libelf", compiler="xlc", arch="bgqos_0") - d = Spec(name="libdwarf", compiler="intel", arch="chaos_5_x86_64_ib") + a = Spec(name="boost", compiler="gcc", arch="bgqos_0") + b = Spec(name="mrnet", compiler="intel", arch="chaos_5_x86_64_ib") + c = Spec(name="libelf", compiler="xlc", arch="bgqos_0") + d = Spec(name="libdwarf", compiler="intel", arch="chaos_5_x86_64_ib") - list_of_specs = [a,b,c,d] - index1 = index_by(list_of_specs, lambda s: s.arch, lambda s: s.compiler) - index2 = index_by(list_of_specs, lambda s: s.compiler) + list_of_specs = [a,b,c,d] + index1 = index_by(list_of_specs, lambda s: s.arch, + lambda s: s.compiler) + index2 = index_by(list_of_specs, lambda s: s.compiler) ``index1'' now has two levels of dicts, with lists at the leaves, like this: @@ -137,7 +137,7 @@ def get_calling_module_name(): finally: del stack - if not '__module__' in caller_locals: + if '__module__' not in caller_locals: raise RuntimeError("Must invoke get_calling_module_name() " "from inside a class definition!") @@ -173,11 +173,11 @@ def has_method(cls, name): class memoized(object): """Decorator that caches the results of a function, storing them in an attribute of that function.""" + def __init__(self, func): self.func = func self.cache = {} - def __call__(self, *args): if not isinstance(args, collections.Hashable): # Not hashable, so just call the function. @@ -187,12 +187,10 @@ class memoized(object): self.cache[args] = self.func(*args) return self.cache[args] - def __get__(self, obj, objtype): """Support instance methods.""" return functools.partial(self.__call__, obj) - def clear(self): """Expunge cache so that self.func will be called again.""" self.cache.clear() @@ -237,13 +235,21 @@ def key_ordering(cls): if not has_method(cls, '_cmp_key'): raise TypeError("'%s' doesn't define _cmp_key()." % cls.__name__) - setter('__eq__', lambda s,o: (s is o) or (o is not None and s._cmp_key() == o._cmp_key())) - setter('__lt__', lambda s,o: o is not None and s._cmp_key() < o._cmp_key()) - setter('__le__', lambda s,o: o is not None and s._cmp_key() <= o._cmp_key()) - - setter('__ne__', lambda s,o: (s is not o) and (o is None or s._cmp_key() != o._cmp_key())) - setter('__gt__', lambda s,o: o is None or s._cmp_key() > o._cmp_key()) - setter('__ge__', lambda s,o: o is None or s._cmp_key() >= o._cmp_key()) + setter('__eq__', + lambda s, o: + (s is o) or (o is not None and s._cmp_key() == o._cmp_key())) + setter('__lt__', + lambda s, o: o is not None and s._cmp_key() < o._cmp_key()) + setter('__le__', + lambda s, o: o is not None and s._cmp_key() <= o._cmp_key()) + + setter('__ne__', + lambda s, o: + (s is not o) and (o is None or s._cmp_key() != o._cmp_key())) + setter('__gt__', + lambda s, o: o is None or s._cmp_key() > o._cmp_key()) + setter('__ge__', + lambda s, o: o is None or s._cmp_key() >= o._cmp_key()) setter('__hash__', lambda self: hash(self._cmp_key())) @@ -254,10 +260,10 @@ def key_ordering(cls): class HashableMap(dict): """This is a hashable, comparable dictionary. Hash is performed on a tuple of the values in the dictionary.""" + def _cmp_key(self): return tuple(sorted(self.values())) - def copy(self): """Type-agnostic clone method. Preserves subclass type.""" # Construct a new dict of my type @@ -336,24 +342,39 @@ def match_predicate(*args): return match - def DictWrapper(dictionary): """Returns a class that wraps a dictionary and enables it to be used like an object.""" class wrapper(object): - def __getattr__(self, name): return dictionary[name] - def __setattr__(self, name, value): dictionary[name] = value - def setdefault(self, *args): return dictionary.setdefault(*args) - def get(self, *args): return dictionary.get(*args) - def keys(self): return dictionary.keys() - def values(self): return dictionary.values() - def items(self): return dictionary.items() - def __iter__(self): return iter(dictionary) + def __getattr__(self, name): + return dictionary[name] + + def __setattr__(self, name, value): + dictionary[name] = value + + def setdefault(self, *args): + return dictionary.setdefault(*args) + + def get(self, *args): + return dictionary.get(*args) + + def keys(self): + return dictionary.keys() + + def values(self): + return dictionary.values() + + def items(self): + return dictionary.items() + + def __iter__(self): + return iter(dictionary) return wrapper() class RequiredAttributeError(ValueError): + def __init__(self, message): super(RequiredAttributeError, self).__init__(message) diff --git a/lib/spack/llnl/util/link_tree.py b/lib/spack/llnl/util/link_tree.py index b6d8796084..d6547e933a 100644 --- a/lib/spack/llnl/util/link_tree.py +++ b/lib/spack/llnl/util/link_tree.py @@ -23,12 +23,13 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## """LinkTree class for setting up trees of symbolic links.""" -__all__ = ['LinkTree'] import os import shutil from llnl.util.filesystem import * +__all__ = ['LinkTree'] + empty_file_name = '.spack-empty' @@ -43,13 +44,13 @@ class LinkTree(object): modified. """ + def __init__(self, source_root): if not os.path.exists(source_root): raise IOError("No such file or directory: '%s'", source_root) self._root = source_root - def find_conflict(self, dest_root, **kwargs): """Returns the first file in dest that conflicts with src""" kwargs['follow_nonexisting'] = False @@ -61,9 +62,9 @@ class LinkTree(object): return dest return None - def merge(self, dest_root, **kwargs): - """Link all files in src into dest, creating directories if necessary.""" + """Link all files in src into dest, creating directories + if necessary.""" kwargs['order'] = 'pre' for src, dest in traverse_tree(self._root, dest_root, **kwargs): if os.path.isdir(src): @@ -83,7 +84,6 @@ class LinkTree(object): assert(not os.path.exists(dest)) os.symlink(src, dest) - def unmerge(self, dest_root, **kwargs): """Unlink all files in dest that exist in src. diff --git a/lib/spack/llnl/util/lock.py b/lib/spack/llnl/util/lock.py index 4a4aec2385..aa8272d5fe 100644 --- a/lib/spack/llnl/util/lock.py +++ b/lib/spack/llnl/util/lock.py @@ -47,6 +47,7 @@ class Lock(object): and recent NFS versions. """ + def __init__(self, file_path): self._file_path = file_path self._fd = None @@ -225,6 +226,7 @@ class LockTransaction(object): class ReadTransaction(LockTransaction): + def _enter(self): return self._lock.acquire_read(self._timeout) @@ -233,6 +235,7 @@ class ReadTransaction(LockTransaction): class WriteTransaction(LockTransaction): + def _enter(self): return self._lock.acquire_write(self._timeout) diff --git a/lib/spack/llnl/util/tty/__init__.py b/lib/spack/llnl/util/tty/__init__.py index ee81e11a20..db74aaba6b 100644 --- a/lib/spack/llnl/util/tty/__init__.py +++ b/lib/spack/llnl/util/tty/__init__.py @@ -36,6 +36,7 @@ _debug = False _verbose = False indent = " " + def is_verbose(): return _verbose @@ -148,7 +149,8 @@ def get_yes_or_no(prompt, **kwargs): elif default_value is False: prompt += ' [y/N] ' else: - raise ValueError("default for get_yes_no() must be True, False, or None.") + raise ValueError( + "default for get_yes_no() must be True, False, or None.") result = None while result is None: @@ -174,8 +176,9 @@ def hline(label=None, **kwargs): char = kwargs.pop('char', '-') max_width = kwargs.pop('max_width', 64) if kwargs: - raise TypeError("'%s' is an invalid keyword argument for this function." - % next(kwargs.iterkeys())) + raise TypeError( + "'%s' is an invalid keyword argument for this function." + % next(kwargs.iterkeys())) rows, cols = terminal_size() if not cols: @@ -200,7 +203,8 @@ def terminal_size(): """Gets the dimensions of the console: (rows, cols).""" def ioctl_GWINSZ(fd): try: - rc = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) + rc = struct.unpack('hh', fcntl.ioctl( + fd, termios.TIOCGWINSZ, '1234')) except: return return rc diff --git a/lib/spack/llnl/util/tty/colify.py b/lib/spack/llnl/util/tty/colify.py index 81a83691d7..67acdfa517 100644 --- a/lib/spack/llnl/util/tty/colify.py +++ b/lib/spack/llnl/util/tty/colify.py @@ -27,15 +27,14 @@ Routines for printing columnar output. See colify() for more information. """ import os import sys -import fcntl -import termios -import struct from StringIO import StringIO from llnl.util.tty import terminal_size from llnl.util.tty.color import clen, cextra + class ColumnConfig: + def __init__(self, cols): self.cols = cols self.line_length = 0 @@ -43,7 +42,8 @@ class ColumnConfig: self.widths = [0] * cols # does not include ansi colors def __repr__(self): - attrs = [(a,getattr(self, a)) for a in dir(self) if not a.startswith("__")] + attrs = [(a, getattr(self, a)) + for a in dir(self) if not a.startswith("__")] return "" % ", ".join("%s: %r" % a for a in attrs) @@ -68,7 +68,7 @@ def config_variable_cols(elts, console_width, padding, cols=0): max_cols = min(len(elts), max_cols) # Range of column counts to try. If forced, use the supplied value. - col_range = [cols] if cols else xrange(1, max_cols+1) + col_range = [cols] if cols else xrange(1, max_cols + 1) # Determine the most columns possible for the console width. configs = [ColumnConfig(c) for c in col_range] @@ -106,7 +106,6 @@ def config_uniform_cols(elts, console_width, padding, cols=0): # 'clen' ignores length of ansi color sequences. max_len = max(clen(e) for e in elts) + padding - max_clen = max(len(e) for e in elts) + padding if cols == 0: cols = max(1, console_width / max_len) cols = min(len(elts), cols) @@ -130,17 +129,19 @@ def colify(elts, **options): output= A file object to write to. Default is sys.stdout. indent= Optionally indent all columns by some number of spaces. padding= Spaces between columns. Default is 2. - width= Width of the output. Default is 80 if tty is not detected. + width= Width of the output. Default is 80 if tty not detected. cols= Force number of columns. Default is to size to terminal, or single-column if no tty tty= Whether to attempt to write to a tty. Default is to - autodetect a tty. Set to False to force single-column output. + autodetect a tty. Set to False to force + single-column output. - method= Method to use to fit columns. Options are variable or uniform. - Variable-width columns are tighter, uniform columns are all the - same width and fit less data on the screen. + method= Method to use to fit columns. Options are variable or + uniform. Variable-width columns are tighter, uniform + columns are all the same width and fit less data on + the screen. """ # Get keyword arguments or set defaults cols = options.pop("cols", 0) @@ -152,8 +153,9 @@ def colify(elts, **options): console_cols = options.pop("width", None) if options: - raise TypeError("'%s' is an invalid keyword argument for this function." - % next(options.iterkeys())) + raise TypeError( + "'%s' is an invalid keyword argument for this function." + % next(options.iterkeys())) # elts needs to be an array of strings so we can count the elements elts = [str(elt) for elt in elts] @@ -167,7 +169,8 @@ def colify(elts, **options): r, c = env_size.split('x') console_rows, console_cols = int(r), int(c) tty = True - except: pass + except: + pass # Use only one column if not a tty. if not tty: @@ -228,6 +231,7 @@ def colify_table(table, **options): raise ValueError("Table is empty in colify_table!") columns = len(table[0]) + def transpose(): for i in xrange(columns): for row in table: diff --git a/lib/spack/llnl/util/tty/color.py b/lib/spack/llnl/util/tty/color.py index 0abcb09b97..b0c00f1502 100644 --- a/lib/spack/llnl/util/tty/color.py +++ b/lib/spack/llnl/util/tty/color.py @@ -75,25 +75,27 @@ To output an @, use '@@'. To output a } inside braces, use '}}'. import re import sys + class ColorParseError(Exception): """Raised when a color format fails to parse.""" + def __init__(self, message): super(ColorParseError, self).__init__(message) # Text styles for ansi codes -styles = {'*' : '1', # bold - '_' : '4', # underline - None : '0' } # plain +styles = {'*': '1', # bold + '_': '4', # underline + None: '0'} # plain # Dim and bright ansi colors -colors = {'k' : 30, 'K' : 90, # black - 'r' : 31, 'R' : 91, # red - 'g' : 32, 'G' : 92, # green - 'y' : 33, 'Y' : 93, # yellow - 'b' : 34, 'B' : 94, # blue - 'm' : 35, 'M' : 95, # magenta - 'c' : 36, 'C' : 96, # cyan - 'w' : 37, 'W' : 97 } # white +colors = {'k': 30, 'K': 90, # black + 'r': 31, 'R': 91, # red + 'g': 32, 'G': 92, # green + 'y': 33, 'Y': 93, # yellow + 'b': 34, 'B': 94, # blue + 'm': 35, 'M': 95, # magenta + 'c': 36, 'C': 96, # cyan + 'w': 37, 'W': 97} # white # Regex to be used for color formatting color_re = r'@(?:@|\.|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)' @@ -104,6 +106,7 @@ _force_color = False class match_to_ansi(object): + def __init__(self, color=True): self.color = color @@ -179,12 +182,14 @@ def cprint(string, stream=sys.stdout, color=None): """Same as cwrite, but writes a trailing newline to the stream.""" cwrite(string + "\n", stream, color) + def cescape(string): """Replace all @ with @@ in the string provided.""" return str(string).replace('@', '@@') class ColorStream(object): + def __init__(self, stream, color=None): self._stream = stream self._color = color @@ -196,7 +201,7 @@ class ColorStream(object): color = self._color if self._color is None: if raw: - color=True + color = True else: color = self._stream.isatty() or _force_color raw_write(colorize(string, color=color)) diff --git a/lib/spack/llnl/util/tty/log.py b/lib/spack/llnl/util/tty/log.py index ca82da7b17..b67edcf9cc 100644 --- a/lib/spack/llnl/util/tty/log.py +++ b/lib/spack/llnl/util/tty/log.py @@ -36,6 +36,7 @@ import llnl.util.tty.color as color # Use this to strip escape sequences _escape = re.compile(r'\x1b[^m]*m|\x1b\[?1034h') + def _strip(line): """Strip color and control characters from a line.""" return _escape.sub('', line) @@ -58,10 +59,10 @@ class keyboard_input(object): When the with block completes, this will restore settings before canonical and echo were disabled. """ + def __init__(self, stream): self.stream = stream - def __enter__(self): self.old_cfg = None @@ -86,10 +87,9 @@ class keyboard_input(object): # Apply new settings for terminal termios.tcsetattr(fd, termios.TCSADRAIN, self.new_cfg) - except Exception, e: + except Exception: pass # Some OS's do not support termios, so ignore. - def __exit__(self, exc_type, exception, traceback): # If termios was avaialble, restore old settings after the # with block @@ -114,6 +114,7 @@ class log_output(object): Closes the provided stream when done with the block. If echo is True, also prints the output to stdout. """ + def __init__(self, stream, echo=False, force_color=False, debug=False): self.stream = stream @@ -122,7 +123,7 @@ class log_output(object): self.force_color = force_color self.debug = debug - # Default is to try file-descriptor reassignment unless the system + # Default is to try file-descriptor reassignment unless the system # out/err streams do not have an associated file descriptor self.directAssignment = False @@ -130,7 +131,6 @@ class log_output(object): """Jumps to __exit__ on the child process.""" raise _SkipWithBlock() - def __enter__(self): """Redirect output from the with block to a file. @@ -154,7 +154,8 @@ class log_output(object): with self.stream as log_file: with keyboard_input(sys.stdin): while True: - rlist, w, x = select.select([read_file, sys.stdin], [], []) + rlist, w, x = select.select( + [read_file, sys.stdin], [], []) if not rlist: break @@ -211,7 +212,6 @@ class log_output(object): if self.debug: tty._debug = True - def __exit__(self, exc_type, exception, traceback): """Exits on child, handles skipping the with block on parent.""" # Child should just exit here. @@ -235,7 +235,7 @@ class log_output(object): sys.stderr = self._stderr else: os.dup2(self._stdout, sys.stdout.fileno()) - os.dup2(self._stderr, sys.stderr.fileno()) + os.dup2(self._stderr, sys.stderr.fileno()) return False diff --git a/lib/spack/spack/abi.py b/lib/spack/spack/abi.py index 38cff62af4..064abb9782 100644 --- a/lib/spack/spack/abi.py +++ b/lib/spack/spack/abi.py @@ -30,15 +30,15 @@ from spack.spec import CompilerSpec from spack.util.executable import Executable, ProcessError from llnl.util.lang import memoized + class ABI(object): """This class provides methods to test ABI compatibility between specs. The current implementation is rather rough and could be improved.""" def architecture_compatible(self, parent, child): - """Returns true iff the parent and child specs have ABI compatible targets.""" - return not parent.architecture or not child.architecture \ - or parent.architecture == child.architecture - + """Return true if parent and child have ABI compatible targets.""" + return not parent.architecture or not child.architecture or \ + parent.architecture == child.architecture @memoized def _gcc_get_libstdcxx_version(self, version): @@ -61,8 +61,9 @@ class ABI(object): else: return None try: - output = rungcc("--print-file-name=%s" % libname, return_output=True) - except ProcessError, e: + output = rungcc("--print-file-name=%s" % libname, + return_output=True) + except ProcessError: return None if not output: return None @@ -71,7 +72,6 @@ class ABI(object): return None return os.path.basename(libpath) - @memoized def _gcc_compiler_compare(self, pversion, cversion): """Returns true iff the gcc version pversion and cversion @@ -82,7 +82,6 @@ class ABI(object): return False return plib == clib - def _intel_compiler_compare(self, pversion, cversion): """Returns true iff the intel version pversion and cversion are ABI compatible""" @@ -92,9 +91,8 @@ class ABI(object): return False return pversion.version[:2] == cversion.version[:2] - def compiler_compatible(self, parent, child, **kwargs): - """Returns true iff the compilers for parent and child specs are ABI compatible""" + """Return true if compilers for parent and child are ABI compatible.""" if not parent.compiler or not child.compiler: return True @@ -109,8 +107,8 @@ class ABI(object): # TODO: into compiler classes? for pversion in parent.compiler.versions: for cversion in child.compiler.versions: - # For a few compilers use specialized comparisons. Otherwise - # match on version match. + # For a few compilers use specialized comparisons. + # Otherwise match on version match. if pversion.satisfies(cversion): return True elif (parent.compiler.name == "gcc" and @@ -121,9 +119,8 @@ class ABI(object): return True return False - def compatible(self, parent, child, **kwargs): """Returns true iff a parent and child spec are ABI compatible""" loosematch = kwargs.get('loose', False) return self.architecture_compatible(parent, child) and \ - self.compiler_compatible(parent, child, loose=loosematch) + self.compiler_compatible(parent, child, loose=loosematch) diff --git a/lib/spack/spack/architecture.py b/lib/spack/spack/architecture.py index 886e170b1a..0d210f9741 100644 --- a/lib/spack/spack/architecture.py +++ b/lib/spack/spack/architecture.py @@ -91,6 +91,7 @@ import spack.error as serr class NoPlatformError(serr.SpackError): + def __init__(self): super(NoPlatformError, self).__init__( "Could not determine a platform for this machine.") diff --git a/lib/spack/spack/cmd/__init__.py b/lib/spack/spack/cmd/__init__.py index 230115df50..f69f434afd 100644 --- a/lib/spack/spack/cmd/__init__.py +++ b/lib/spack/spack/cmd/__init__.py @@ -240,4 +240,4 @@ def display_specs(specs, **kwargs): else: raise ValueError( "Invalid mode for display_specs: %s. Must be one of (paths," - "deps, short)." % mode) # NOQA: ignore=E501 + "deps, short)." % mode) diff --git a/lib/spack/spack/cmd/activate.py b/lib/spack/spack/cmd/activate.py index 9867fa8835..797cdcb136 100644 --- a/lib/spack/spack/cmd/activate.py +++ b/lib/spack/spack/cmd/activate.py @@ -29,12 +29,14 @@ import spack.cmd description = "Activate a package extension." + def setup_parser(subparser): subparser.add_argument( '-f', '--force', action='store_true', help="Activate without first activating dependencies.") subparser.add_argument( - 'spec', nargs=argparse.REMAINDER, help="spec of package extension to activate.") + 'spec', nargs=argparse.REMAINDER, + help="spec of package extension to activate.") def activate(parser, args): diff --git a/lib/spack/spack/cmd/arch.py b/lib/spack/spack/cmd/arch.py index cf2f96fd21..1badd40f7f 100644 --- a/lib/spack/spack/cmd/arch.py +++ b/lib/spack/spack/cmd/arch.py @@ -22,10 +22,10 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import spack import spack.architecture as architecture description = "Print the architecture for this machine" + def arch(parser, args): print architecture.sys_type() diff --git a/lib/spack/spack/cmd/cd.py b/lib/spack/spack/cmd/cd.py index aa45f67ae1..cf7232258c 100644 --- a/lib/spack/spack/cmd/cd.py +++ b/lib/spack/spack/cmd/cd.py @@ -25,7 +25,8 @@ import spack.cmd.location import spack.modules -description="cd to spack directories in the shell." +description = "cd to spack directories in the shell." + def setup_parser(subparser): """This is for decoration -- spack cd is used through spack's diff --git a/lib/spack/spack/cmd/clean.py b/lib/spack/spack/cmd/clean.py index 514c5874ef..dc62fbcaf6 100644 --- a/lib/spack/spack/cmd/clean.py +++ b/lib/spack/spack/cmd/clean.py @@ -31,6 +31,7 @@ import spack.cmd description = "Remove build stage and source tarball for packages." + def setup_parser(subparser): subparser.add_argument('packages', nargs=argparse.REMAINDER, help="specs of packages to clean") diff --git a/lib/spack/spack/cmd/common/arguments.py b/lib/spack/spack/cmd/common/arguments.py index af04170824..afcba33714 100644 --- a/lib/spack/spack/cmd/common/arguments.py +++ b/lib/spack/spack/cmd/common/arguments.py @@ -35,7 +35,7 @@ _arguments = {} def add_common_arguments(parser, list_of_arguments): for argument in list_of_arguments: if argument not in _arguments: - message = 'Trying to add the non existing argument "{0}" to a command' # NOQA: ignore=E501 + message = 'Trying to add non existing argument "{0}" to a command' raise KeyError(message.format(argument)) x = _arguments[argument] parser.add_argument(*x.flags, **x.kwargs) @@ -82,7 +82,7 @@ parms = Bunch( kwargs={ 'action': 'store_true', 'dest': 'yes_to_all', - 'help': 'Assume "yes" is the answer to every confirmation asked to the user.' # NOQA: ignore=E501 + 'help': 'Assume "yes" is the answer to every confirmation request.' }) _arguments['yes_to_all'] = parms diff --git a/lib/spack/spack/cmd/compiler.py b/lib/spack/spack/cmd/compiler.py index c325628ebf..ea91c71479 100644 --- a/lib/spack/spack/cmd/compiler.py +++ b/lib/spack/spack/cmd/compiler.py @@ -37,6 +37,7 @@ from spack.util.environment import get_path description = "Manage compilers" + def setup_parser(subparser): sp = subparser.add_subparsers( metavar='SUBCOMMAND', dest='compiler_command') @@ -44,48 +45,58 @@ def setup_parser(subparser): scopes = spack.config.config_scopes # Find - find_parser = sp.add_parser('find', aliases=['add'], help='Search the system for compilers to add to the Spack configuration.') + find_parser = sp.add_parser( + 'find', aliases=['add'], + help='Search the system for compilers to add to Spack configuration.') find_parser.add_argument('add_paths', nargs=argparse.REMAINDER) - find_parser.add_argument('--scope', choices=scopes, default=spack.cmd.default_modify_scope, - help="Configuration scope to modify.") + find_parser.add_argument( + '--scope', choices=scopes, default=spack.cmd.default_modify_scope, + help="Configuration scope to modify.") # Remove - remove_parser = sp.add_parser('remove', aliases=['rm'], help='Remove compiler by spec.') + remove_parser = sp.add_parser( + 'remove', aliases=['rm'], help='Remove compiler by spec.') remove_parser.add_argument( - '-a', '--all', action='store_true', help='Remove ALL compilers that match spec.') + '-a', '--all', action='store_true', + help='Remove ALL compilers that match spec.') remove_parser.add_argument('compiler_spec') - remove_parser.add_argument('--scope', choices=scopes, default=spack.cmd.default_modify_scope, - help="Configuration scope to modify.") + remove_parser.add_argument( + '--scope', choices=scopes, default=spack.cmd.default_modify_scope, + help="Configuration scope to modify.") # List list_parser = sp.add_parser('list', help='list available compilers') - list_parser.add_argument('--scope', choices=scopes, default=spack.cmd.default_list_scope, - help="Configuration scope to read from.") + list_parser.add_argument( + '--scope', choices=scopes, default=spack.cmd.default_list_scope, + help="Configuration scope to read from.") # Info info_parser = sp.add_parser('info', help='Show compiler paths.') info_parser.add_argument('compiler_spec') - info_parser.add_argument('--scope', choices=scopes, default=spack.cmd.default_list_scope, - help="Configuration scope to read from.") + info_parser.add_argument( + '--scope', choices=scopes, default=spack.cmd.default_list_scope, + help="Configuration scope to read from.") def compiler_find(args): - """Search either $PATH or a list of paths OR MODULES for compilers and add them - to Spack's configuration.""" + """Search either $PATH or a list of paths OR MODULES for compilers and + add them to Spack's configuration. + + """ paths = args.add_paths if not paths: paths = get_path('PATH') - # Don't initialize compilers config via compilers.get_compiler_config. - # Just let compiler_find do the + # Don't initialize compilers config via compilers.get_compiler_config. + # Just let compiler_find do the # entire process and return an empty config from all_compilers # Default for any other process is init_config=True compilers = [c for c in spack.compilers.find_compilers(*paths) if c.spec not in spack.compilers.all_compilers( - scope=args.scope, init_config=False)] + scope=args.scope, init_config=False)] if compilers: spack.compilers.add_compilers_to_config(compilers, scope=args.scope, - init_config=False) + init_config=False) n = len(compilers) s = 's' if n > 1 else '' filename = spack.config.get_config_filename(args.scope, 'compilers') @@ -103,11 +114,12 @@ def compiler_remove(args): elif not args.all and len(compilers) > 1: tty.error("Multiple compilers match spec %s. Choose one:" % cspec) colify(reversed(sorted([c.spec for c in compilers])), indent=4) - tty.msg("Or, you can use `spack compiler remove -a` to remove all of them.") + tty.msg("Or, use `spack compiler remove -a` to remove all of them.") sys.exit(1) for compiler in compilers: - spack.compilers.remove_compiler_from_config(compiler.spec, scope=args.scope) + spack.compilers.remove_compiler_from_config( + compiler.spec, scope=args.scope) tty.msg("Removed compiler %s" % compiler.spec) @@ -133,7 +145,8 @@ def compiler_list(args): tty.msg("Available compilers") index = index_by(spack.compilers.all_compilers(scope=args.scope), 'name') for i, (name, compilers) in enumerate(index.items()): - if i >= 1: print + if i >= 1: + print cname = "%s{%s}" % (spack.spec.compiler_color, name) tty.hline(colorize(cname), char='-') @@ -141,10 +154,10 @@ def compiler_list(args): def compiler(parser, args): - action = {'add' : compiler_find, - 'find' : compiler_find, - 'remove' : compiler_remove, - 'rm' : compiler_remove, - 'info' : compiler_info, - 'list' : compiler_list } + action = {'add': compiler_find, + 'find': compiler_find, + 'remove': compiler_remove, + 'rm': compiler_remove, + 'info': compiler_info, + 'list': compiler_list} action[args.compiler_command](args) diff --git a/lib/spack/spack/cmd/compilers.py b/lib/spack/spack/cmd/compilers.py index 9fbc2bb952..b87f977e5a 100644 --- a/lib/spack/spack/cmd/compilers.py +++ b/lib/spack/spack/cmd/compilers.py @@ -22,18 +22,16 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import llnl.util.tty as tty -from llnl.util.tty.colify import colify -from llnl.util.lang import index_by - import spack from spack.cmd.compiler import compiler_list description = "List available compilers. Same as 'spack compiler list'." + def setup_parser(subparser): subparser.add_argument('--scope', choices=spack.config.config_scopes, help="Configuration scope to read/modify.") + def compilers(parser, args): compiler_list(args) diff --git a/lib/spack/spack/cmd/config.py b/lib/spack/spack/cmd/config.py index d6f56c270d..c189e37036 100644 --- a/lib/spack/spack/cmd/config.py +++ b/lib/spack/spack/cmd/config.py @@ -22,15 +22,11 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import sys -import argparse - -import llnl.util.tty as tty - import spack.config description = "Get and set configuration options." + def setup_parser(subparser): # User can only choose one scope_group = subparser.add_mutually_exclusive_group() @@ -64,6 +60,6 @@ def config_edit(args): def config(parser, args): - action = { 'get' : config_get, - 'edit' : config_edit } + action = {'get': config_get, + 'edit': config_edit} action[args.config_command](args) diff --git a/lib/spack/spack/cmd/create.py b/lib/spack/spack/cmd/create.py index 51bf17a44b..52a82eb38f 100644 --- a/lib/spack/spack/cmd/create.py +++ b/lib/spack/spack/cmd/create.py @@ -217,6 +217,7 @@ def setup_parser(subparser): class BuildSystemGuesser(object): + def __call__(self, stage, url): """Try to guess the type of build system used by a project based on the contents of its archive or the URL it was downloaded from.""" diff --git a/lib/spack/spack/cmd/deactivate.py b/lib/spack/spack/cmd/deactivate.py index 990309ee48..2b15a0331e 100644 --- a/lib/spack/spack/cmd/deactivate.py +++ b/lib/spack/spack/cmd/deactivate.py @@ -31,6 +31,7 @@ from spack.graph import topological_sort description = "Deactivate a package extension." + def setup_parser(subparser): subparser.add_argument( '-f', '--force', action='store_true', @@ -40,7 +41,8 @@ def setup_parser(subparser): help="Deactivate all extensions of an extendable package, or " "deactivate an extension AND its dependencies.") subparser.add_argument( - 'spec', nargs=argparse.REMAINDER, help="spec of package extension to deactivate.") + 'spec', nargs=argparse.REMAINDER, + help="spec of package extension to deactivate.") def deactivate(parser, args): @@ -65,7 +67,8 @@ def deactivate(parser, args): if not args.force and not spec.package.activated: tty.die("%s is not activated." % pkg.spec.short_spec) - tty.msg("Deactivating %s and all dependencies." % pkg.spec.short_spec) + tty.msg("Deactivating %s and all dependencies." % + pkg.spec.short_spec) topo_order = topological_sort(spec) index = spec.index() @@ -79,7 +82,9 @@ def deactivate(parser, args): epkg.do_deactivate(force=args.force) else: - tty.die("spack deactivate --all requires an extendable package or an extension.") + tty.die( + "spack deactivate --all requires an extendable package " + "or an extension.") else: if not pkg.is_extension: diff --git a/lib/spack/spack/cmd/dependents.py b/lib/spack/spack/cmd/dependents.py index 78eb6847b8..7729105e62 100644 --- a/lib/spack/spack/cmd/dependents.py +++ b/lib/spack/spack/cmd/dependents.py @@ -31,9 +31,11 @@ import spack.cmd description = "Show installed packages that depend on another." + def setup_parser(subparser): subparser.add_argument( - 'spec', nargs=argparse.REMAINDER, help="specs to list dependencies of.") + 'spec', nargs=argparse.REMAINDER, + help="specs to list dependencies of.") def dependents(parser, args): @@ -42,5 +44,6 @@ def dependents(parser, args): tty.die("spack dependents takes only one spec.") fmt = '$_$@$%@$+$=$#' - deps = [d.format(fmt, color=True) for d in specs[0].package.installed_dependents] + deps = [d.format(fmt, color=True) + for d in specs[0].package.installed_dependents] tty.msg("Dependents of %s" % specs[0].format(fmt, color=True), *deps) diff --git a/lib/spack/spack/cmd/diy.py b/lib/spack/spack/cmd/diy.py index 643e6374b2..487654d261 100644 --- a/lib/spack/spack/cmd/diy.py +++ b/lib/spack/spack/cmd/diy.py @@ -35,6 +35,7 @@ from spack.stage import DIYStage description = "Do-It-Yourself: build from an existing source directory." + def setup_parser(subparser): subparser.add_argument( '-i', '--ignore-dependencies', action='store_true', dest='ignore_deps', @@ -76,14 +77,17 @@ def diy(self, args): return if not spec.versions.concrete: - tty.die("spack diy spec must have a single, concrete version. Did you forget a package version number?") + tty.die( + "spack diy spec must have a single, concrete version. " + "Did you forget a package version number?") spec.concretize() package = spack.repo.get(spec) if package.installed: tty.error("Already installed in %s" % package.prefix) - tty.msg("Uninstall or try adding a version suffix for this DIY build.") + tty.msg("Uninstall or try adding a version suffix for this " + "DIY build.") sys.exit(1) # Forces the build to run out of the current directory. diff --git a/lib/spack/spack/cmd/doc.py b/lib/spack/spack/cmd/doc.py index b3d0737d13..291b17216f 100644 --- a/lib/spack/spack/cmd/doc.py +++ b/lib/spack/spack/cmd/doc.py @@ -25,6 +25,7 @@ description = "Run pydoc from within spack." + def setup_parser(subparser): subparser.add_argument('entity', help="Run pydoc help on entity") diff --git a/lib/spack/spack/cmd/edit.py b/lib/spack/spack/cmd/edit.py index 49ab83867a..286136dd67 100644 --- a/lib/spack/spack/cmd/edit.py +++ b/lib/spack/spack/cmd/edit.py @@ -68,7 +68,7 @@ def edit_package(name, repo_path, namespace, force=False): if os.path.exists(path): if not os.path.isfile(path): tty.die("Something's wrong. '%s' is not a file!" % path) - if not os.access(path, os.R_OK|os.W_OK): + if not os.access(path, os.R_OK | os.W_OK): tty.die("Insufficient permissions on '%s'!" % path) elif not force: tty.die("No package '%s'. Use spack create, or supply -f/--force " @@ -93,19 +93,23 @@ def setup_parser(subparser): # Various filetypes you can edit directly from the cmd line. excl_args.add_argument( '-c', '--command', dest='path', action='store_const', - const=spack.cmd.command_path, help="Edit the command with the supplied name.") + const=spack.cmd.command_path, + help="Edit the command with the supplied name.") excl_args.add_argument( '-t', '--test', dest='path', action='store_const', const=spack.test_path, help="Edit the test with the supplied name.") excl_args.add_argument( '-m', '--module', dest='path', action='store_const', - const=spack.module_path, help="Edit the main spack module with the supplied name.") + const=spack.module_path, + help="Edit the main spack module with the supplied name.") # Options for editing packages excl_args.add_argument( - '-r', '--repo', default=None, help="Path to repo to edit package in.") + '-r', '--repo', default=None, + help="Path to repo to edit package in.") excl_args.add_argument( - '-N', '--namespace', default=None, help="Namespace of package to edit.") + '-N', '--namespace', default=None, + help="Namespace of package to edit.") subparser.add_argument( 'name', nargs='?', default=None, help="name of package to edit") diff --git a/lib/spack/spack/cmd/env.py b/lib/spack/spack/cmd/env.py index 85d111e91e..f3bad039d4 100644 --- a/lib/spack/spack/cmd/env.py +++ b/lib/spack/spack/cmd/env.py @@ -28,11 +28,13 @@ import llnl.util.tty as tty import spack.cmd import spack.build_environment as build_env -description = "Run a command with the environment for a particular spec's install." +description = "Run a command with the install environment for a spec." + def setup_parser(subparser): subparser.add_argument( - 'spec', nargs=argparse.REMAINDER, help="specs of package environment to emulate.") + 'spec', nargs=argparse.REMAINDER, + help="specs of package environment to emulate.") def env(parser, args): @@ -47,7 +49,7 @@ def env(parser, args): if sep in args.spec: s = args.spec.index(sep) spec = args.spec[:s] - cmd = args.spec[s+1:] + cmd = args.spec[s + 1:] else: spec = args.spec[0] cmd = args.spec[1:] diff --git a/lib/spack/spack/cmd/extensions.py b/lib/spack/spack/cmd/extensions.py index 11659e0c96..b5c484305f 100644 --- a/lib/spack/spack/cmd/extensions.py +++ b/lib/spack/spack/cmd/extensions.py @@ -22,7 +22,6 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import sys import argparse import llnl.util.tty as tty @@ -34,6 +33,7 @@ import spack.cmd.find description = "List extensions for package." + def setup_parser(subparser): format_group = subparser.add_mutually_exclusive_group() format_group.add_argument( @@ -47,7 +47,8 @@ def setup_parser(subparser): help='Show full dependency DAG of extensions') subparser.add_argument( - 'spec', nargs=argparse.REMAINDER, help='Spec of package to list extensions for') + 'spec', nargs=argparse.REMAINDER, + help='Spec of package to list extensions for') def extensions(parser, args): @@ -85,7 +86,8 @@ def extensions(parser, args): # # List specs of installed extensions. # - installed = [s.spec for s in spack.installed_db.installed_extensions_for(spec)] + installed = [ + s.spec for s in spack.installed_db.installed_extensions_for(spec)] print if not installed: tty.msg("None installed.") @@ -102,4 +104,5 @@ def extensions(parser, args): tty.msg("None activated.") return tty.msg("%d currently activated:" % len(activated)) - spack.cmd.find.display_specs(activated.values(), mode=args.mode, long=args.long) + spack.cmd.find.display_specs( + activated.values(), mode=args.mode, long=args.long) diff --git a/lib/spack/spack/cmd/fetch.py b/lib/spack/spack/cmd/fetch.py index 1afc51d9fa..c1ac2ed48d 100644 --- a/lib/spack/spack/cmd/fetch.py +++ b/lib/spack/spack/cmd/fetch.py @@ -29,16 +29,21 @@ import spack.cmd description = "Fetch archives for packages" + def setup_parser(subparser): subparser.add_argument( '-n', '--no-checksum', action='store_true', dest='no_checksum', help="Do not check packages against checksum") subparser.add_argument( - '-m', '--missing', action='store_true', help="Also fetch all missing dependencies") + '-m', '--missing', action='store_true', + help="Also fetch all missing dependencies") subparser.add_argument( - '-D', '--dependencies', action='store_true', help="Also fetch all dependencies") + '-D', '--dependencies', action='store_true', + help="Also fetch all dependencies") subparser.add_argument( - 'packages', nargs=argparse.REMAINDER, help="specs of packages to fetch") + 'packages', nargs=argparse.REMAINDER, + help="specs of packages to fetch") + def fetch(parser, args): if not args.packages: @@ -50,7 +55,6 @@ def fetch(parser, args): specs = spack.cmd.parse_specs(args.packages, concretize=True) for spec in specs: if args.missing or args.dependencies: - to_fetch = set() for s in spec.traverse(deptype_query=spack.alldeps): package = spack.repo.get(s) if args.missing and package.installed: diff --git a/lib/spack/spack/cmd/graph.py b/lib/spack/spack/cmd/graph.py index da65121836..8faabfbb7b 100644 --- a/lib/spack/spack/cmd/graph.py +++ b/lib/spack/spack/cmd/graph.py @@ -30,6 +30,7 @@ from spack.graph import * description = "Generate graphs of package dependency relationships." + def setup_parser(subparser): setup_parser.parser = subparser @@ -42,10 +43,12 @@ def setup_parser(subparser): help="Generate graph in dot format and print to stdout.") subparser.add_argument( - '--concretize', action='store_true', help="Concretize specs before graphing.") + '--concretize', action='store_true', + help="Concretize specs before graphing.") subparser.add_argument( - 'specs', nargs=argparse.REMAINDER, help="specs of packages to graph.") + 'specs', nargs=argparse.REMAINDER, + help="specs of packages to graph.") def graph(parser, args): @@ -56,11 +59,11 @@ def graph(parser, args): setup_parser.parser.print_help() return 1 - if args.dot: # Dot graph only if asked for. + if args.dot: # Dot graph only if asked for. graph_dot(*specs) - elif specs: # ascii is default: user doesn't need to provide it explicitly + elif specs: # ascii is default: user doesn't need to provide it explicitly graph_ascii(specs[0], debug=spack.debug) for spec in specs[1:]: - print # extra line bt/w independent graphs + print # extra line bt/w independent graphs graph_ascii(spec, debug=spack.debug) diff --git a/lib/spack/spack/cmd/help.py b/lib/spack/spack/cmd/help.py index 1d23161839..5bc8fc3e74 100644 --- a/lib/spack/spack/cmd/help.py +++ b/lib/spack/spack/cmd/help.py @@ -22,14 +22,14 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import sys - description = "Get help on spack and its commands" + def setup_parser(subparser): subparser.add_argument('help_command', nargs='?', default=None, help='command to get help on') + def help(parser, args): if args.help_command: parser.parse_args([args.help_command, '-h']) diff --git a/lib/spack/spack/cmd/install.py b/lib/spack/spack/cmd/install.py index 4c076322a9..7663a97a28 100644 --- a/lib/spack/spack/cmd/install.py +++ b/lib/spack/spack/cmd/install.py @@ -31,6 +31,7 @@ import spack.cmd description = "Build and install packages" + def setup_parser(subparser): subparser.add_argument( '-i', '--ignore-dependencies', action='store_true', dest='ignore_deps', @@ -52,18 +53,18 @@ def setup_parser(subparser): help="Display verbose build output while installing.") subparser.add_argument( '--fake', action='store_true', dest='fake', - help="Fake install. Just remove the prefix and touch a fake file in it.") + help="Fake install. Just remove prefix and create a fake file.") subparser.add_argument( '--dirty', action='store_true', dest='dirty', help="Install a package *without* cleaning the environment.") subparser.add_argument( - 'packages', nargs=argparse.REMAINDER, help="specs of packages to install") + 'packages', nargs=argparse.REMAINDER, + help="specs of packages to install") subparser.add_argument( '--run-tests', action='store_true', dest='run_tests', help="Run tests during installation of a package.") - def install(parser, args): if not args.packages: tty.die("install requires at least one package argument") diff --git a/lib/spack/spack/cmd/load.py b/lib/spack/spack/cmd/load.py index 205abbb6b3..85190a5d0b 100644 --- a/lib/spack/spack/cmd/load.py +++ b/lib/spack/spack/cmd/load.py @@ -25,13 +25,16 @@ import argparse import spack.modules -description ="Add package to environment using modules." +description = "Add package to environment using modules." + def setup_parser(subparser): """Parser is only constructed so that this prints a nice help message with -h. """ subparser.add_argument( - 'spec', nargs=argparse.REMAINDER, help="Spec of package to load with modules. (If -, read specs from STDIN)") + 'spec', nargs=argparse.REMAINDER, + help="Spec of package to load with modules. " + "(If -, read specs from STDIN)") def load(parser, args): diff --git a/lib/spack/spack/cmd/location.py b/lib/spack/spack/cmd/location.py index b0dbb1a550..b9c8b5c330 100644 --- a/lib/spack/spack/cmd/location.py +++ b/lib/spack/spack/cmd/location.py @@ -22,8 +22,6 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import os -import sys import argparse import llnl.util.tty as tty @@ -32,16 +30,19 @@ from llnl.util.filesystem import join_path import spack import spack.cmd -description="Print out locations of various directories used by Spack" +description = "Print out locations of various directories used by Spack" + def setup_parser(subparser): global directories directories = subparser.add_mutually_exclusive_group() directories.add_argument( - '-m', '--module-dir', action='store_true', help="Spack python module directory.") + '-m', '--module-dir', action='store_true', + help="Spack python module directory.") directories.add_argument( - '-r', '--spack-root', action='store_true', help="Spack installation root.") + '-r', '--spack-root', action='store_true', + help="Spack installation root.") directories.add_argument( '-i', '--install-dir', action='store_true', @@ -53,15 +54,19 @@ def setup_parser(subparser): '-P', '--packages', action='store_true', help="Top-level packages directory for Spack.") directories.add_argument( - '-s', '--stage-dir', action='store_true', help="Stage directory for a spec.") + '-s', '--stage-dir', action='store_true', + help="Stage directory for a spec.") directories.add_argument( - '-S', '--stages', action='store_true', help="Top level Stage directory.") + '-S', '--stages', action='store_true', + help="Top level Stage directory.") directories.add_argument( '-b', '--build-dir', action='store_true', - help="Checked out or expanded source directory for a spec (requires it to be staged first).") + help="Checked out or expanded source directory for a spec " + "(requires it to be staged first).") subparser.add_argument( - 'spec', nargs=argparse.REMAINDER, help="spec of package to fetch directory for.") + 'spec', nargs=argparse.REMAINDER, + help="spec of package to fetch directory for.") def location(parser, args): @@ -104,9 +109,9 @@ def location(parser, args): if args.stage_dir: print pkg.stage.path - else: # args.build_dir is the default. + else: # args.build_dir is the default. if not pkg.stage.source_path: - tty.die("Build directory does not exist yet. Run this to create it:", + tty.die("Build directory does not exist yet. " + "Run this to create it:", "spack stage " + " ".join(args.spec)) print pkg.stage.source_path - diff --git a/lib/spack/spack/cmd/mirror.py b/lib/spack/spack/cmd/mirror.py index 0cf682fc4f..585faaf524 100644 --- a/lib/spack/spack/cmd/mirror.py +++ b/lib/spack/spack/cmd/mirror.py @@ -23,7 +23,6 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import os -import sys from datetime import datetime import argparse @@ -40,6 +39,7 @@ from spack.util.spack_yaml import syaml_dict description = "Manage mirrors." + def setup_parser(subparser): subparser.add_argument( '-n', '--no-checksum', action='store_true', dest='no_checksum', @@ -61,8 +61,9 @@ def setup_parser(subparser): '-D', '--dependencies', action='store_true', help="Also fetch all dependencies") create_parser.add_argument( - '-o', '--one-version-per-spec', action='store_const', const=1, default=0, - help="Only fetch one 'preferred' version per spec, not all known versions.") + '-o', '--one-version-per-spec', action='store_const', + const=1, default=0, + help="Only fetch one 'preferred' version per spec, not all known.") scopes = spack.config.config_scopes @@ -70,7 +71,7 @@ def setup_parser(subparser): add_parser = sp.add_parser('add', help=mirror_add.__doc__) add_parser.add_argument('name', help="Mnemonic name for mirror.") add_parser.add_argument( - 'url', help="URL of mirror directory created by 'spack mirror create'.") + 'url', help="URL of mirror directory from 'spack mirror create'.") add_parser.add_argument( '--scope', choices=scopes, default=spack.cmd.default_modify_scope, help="Configuration scope to modify.") @@ -107,7 +108,7 @@ def mirror_add(args): tty.die("Mirror with url %s already exists." % url) # should only be one item per mirror dict. - items = [(n,u) for n,u in mirrors.items()] + items = [(n, u) for n, u in mirrors.items()] items.insert(0, (args.name, url)) mirrors = syaml_dict(items) spack.config.update_config('mirrors', mirrors, scope=args.scope) @@ -121,7 +122,7 @@ def mirror_remove(args): if not mirrors: mirrors = syaml_dict() - if not name in mirrors: + if name not in mirrors: tty.die("No mirror with name %s" % name) old_value = mirrors.pop(name) @@ -152,7 +153,7 @@ def _read_specs_from_file(filename): s.package specs.append(s) except SpackError, e: - tty.die("Parse error in %s, line %d:" % (args.file, i+1), + tty.die("Parse error in %s, line %d:" % (args.file, i + 1), ">>> " + string, str(e)) return specs @@ -214,10 +215,10 @@ def mirror_create(args): def mirror(parser, args): - action = { 'create' : mirror_create, - 'add' : mirror_add, - 'remove' : mirror_remove, - 'rm' : mirror_remove, - 'list' : mirror_list } + action = {'create': mirror_create, + 'add': mirror_add, + 'remove': mirror_remove, + 'rm': mirror_remove, + 'list': mirror_list} action[args.mirror_command](args) diff --git a/lib/spack/spack/cmd/module.py b/lib/spack/spack/cmd/module.py index a10e36e077..2d0b83fe00 100644 --- a/lib/spack/spack/cmd/module.py +++ b/lib/spack/spack/cmd/module.py @@ -118,7 +118,8 @@ def loads(mtype, specs, args): seen_add = seen.add for spec in specs_from_user_constraint: specs.extend( - [item for item in spec.traverse(order='post', cover='nodes') if not (item in seen or seen_add(item))] # NOQA: ignore=E501 + [item for item in spec.traverse(order='post', cover='nodes') + if not (item in seen or seen_add(item))] ) module_cls = module_types[mtype] @@ -178,7 +179,9 @@ def rm(mtype, specs, args): # Ask for confirmation if not args.yes_to_all: - tty.msg('You are about to remove {0} module files the following specs:\n'.format(mtype)) # NOQA: ignore=E501 + tty.msg( + 'You are about to remove {0} module files the following specs:\n' + .format(mtype)) spack.cmd.display_specs(specs_with_modules, long=True) print('') spack.cmd.ask_for_confirmation('Do you want to proceed ? ') @@ -197,7 +200,9 @@ def refresh(mtype, specs, args): return if not args.yes_to_all: - tty.msg('You are about to regenerate {name} module files for the following specs:\n'.format(name=mtype)) # NOQA: ignore=E501 + tty.msg( + 'You are about to regenerate {name} module files for:\n' + .format(name=mtype)) spack.cmd.display_specs(specs, long=True) print('') spack.cmd.ask_for_confirmation('Do you want to proceed ? ') @@ -245,11 +250,13 @@ def module(parser, args): try: callbacks[args.subparser_name](module_type, args.specs, args) except MultipleMatches: - message = 'the constraint \'{query}\' matches multiple packages, and this is not allowed in this context' # NOQA: ignore=E501 + message = ('the constraint \'{query}\' matches multiple packages, ' + 'and this is not allowed in this context') tty.error(message.format(query=constraint)) for s in args.specs: sys.stderr.write(s.format(color=True) + '\n') raise SystemExit(1) except NoMatch: - message = 'the constraint \'{query}\' match no package, and this is not allowed in this context' # NOQA: ignore=E501 + message = ('the constraint \'{query}\' match no package, ' + 'and this is not allowed in this context') tty.die(message.format(query=constraint)) diff --git a/lib/spack/spack/cmd/package-list.py b/lib/spack/spack/cmd/package-list.py index a27502d30e..9ed42de823 100644 --- a/lib/spack/spack/cmd/package-list.py +++ b/lib/spack/spack/cmd/package-list.py @@ -32,7 +32,7 @@ description = "Print a list of all packages in reStructuredText." def github_url(pkg): """Link to a package file on github.""" - url = "https://github.com/llnl/spack/blob/master/var/spack/packages/%s/package.py" # NOQA: ignore=E501 + url = "https://github.com/llnl/spack/blob/master/var/spack/packages/%s/package.py" return (url % pkg.name) diff --git a/lib/spack/spack/cmd/patch.py b/lib/spack/spack/cmd/patch.py index a5507e42cf..9c72da40b5 100644 --- a/lib/spack/spack/cmd/patch.py +++ b/lib/spack/spack/cmd/patch.py @@ -29,14 +29,16 @@ import spack.cmd import spack -description="Patch expanded archive sources in preparation for install" +description = "Patch expanded archive sources in preparation for install" + def setup_parser(subparser): subparser.add_argument( '-n', '--no-checksum', action='store_true', dest='no_checksum', help="Do not check downloaded packages against checksum") subparser.add_argument( - 'packages', nargs=argparse.REMAINDER, help="specs of packages to stage") + 'packages', nargs=argparse.REMAINDER, + help="specs of packages to stage") def patch(parser, args): diff --git a/lib/spack/spack/cmd/pkg.py b/lib/spack/spack/cmd/pkg.py index a24c2759fe..7791b93cf5 100644 --- a/lib/spack/spack/cmd/pkg.py +++ b/lib/spack/spack/cmd/pkg.py @@ -33,6 +33,7 @@ from spack.util.executable import * description = "Query packages associated with particular git revisions." + def setup_parser(subparser): sp = subparser.add_subparsers( metavar='SUBCOMMAND', dest='pkg_command') @@ -46,22 +47,28 @@ def setup_parser(subparser): help="Revision to list packages for.") diff_parser = sp.add_parser('diff', help=pkg_diff.__doc__) - diff_parser.add_argument('rev1', nargs='?', default='HEAD^', - help="Revision to compare against.") - diff_parser.add_argument('rev2', nargs='?', default='HEAD', - help="Revision to compare to rev1 (default is HEAD).") + diff_parser.add_argument( + 'rev1', nargs='?', default='HEAD^', + help="Revision to compare against.") + diff_parser.add_argument( + 'rev2', nargs='?', default='HEAD', + help="Revision to compare to rev1 (default is HEAD).") add_parser = sp.add_parser('added', help=pkg_added.__doc__) - add_parser.add_argument('rev1', nargs='?', default='HEAD^', - help="Revision to compare against.") - add_parser.add_argument('rev2', nargs='?', default='HEAD', - help="Revision to compare to rev1 (default is HEAD).") + add_parser.add_argument( + 'rev1', nargs='?', default='HEAD^', + help="Revision to compare against.") + add_parser.add_argument( + 'rev2', nargs='?', default='HEAD', + help="Revision to compare to rev1 (default is HEAD).") rm_parser = sp.add_parser('removed', help=pkg_removed.__doc__) - rm_parser.add_argument('rev1', nargs='?', default='HEAD^', - help="Revision to compare against.") - rm_parser.add_argument('rev2', nargs='?', default='HEAD', - help="Revision to compare to rev1 (default is HEAD).") + rm_parser.add_argument( + 'rev1', nargs='?', default='HEAD^', + help="Revision to compare against.") + rm_parser.add_argument( + 'rev2', nargs='?', default='HEAD', + help="Revision to compare to rev1 (default is HEAD).") def get_git(): @@ -88,7 +95,8 @@ def pkg_add(args): for pkg_name in args.packages: filename = spack.repo.filename_for_package_name(pkg_name) if not os.path.isfile(filename): - tty.die("No such package: %s. Path does not exist:" % pkg_name, filename) + tty.die("No such package: %s. Path does not exist:" % + pkg_name, filename) git = get_git() git('-C', spack.packages_path, 'add', filename) @@ -112,7 +120,8 @@ def pkg_diff(args): if u1: print "%s:" % args.rev1 colify(sorted(u1), indent=4) - if u1: print + if u1: + print if u2: print "%s:" % args.rev2 @@ -122,19 +131,21 @@ def pkg_diff(args): def pkg_removed(args): """Show packages removed since a commit.""" u1, u2 = diff_packages(args.rev1, args.rev2) - if u1: colify(sorted(u1)) + if u1: + colify(sorted(u1)) def pkg_added(args): """Show packages added since a commit.""" u1, u2 = diff_packages(args.rev1, args.rev2) - if u2: colify(sorted(u2)) + if u2: + colify(sorted(u2)) def pkg(parser, args): - action = { 'add' : pkg_add, - 'diff' : pkg_diff, - 'list' : pkg_list, - 'removed' : pkg_removed, - 'added' : pkg_added } + action = {'add': pkg_add, + 'diff': pkg_diff, + 'list': pkg_list, + 'removed': pkg_removed, + 'added': pkg_added} action[args.pkg_command](args) diff --git a/lib/spack/spack/cmd/providers.py b/lib/spack/spack/cmd/providers.py index e9007486d2..0f4a97cc4a 100644 --- a/lib/spack/spack/cmd/providers.py +++ b/lib/spack/spack/cmd/providers.py @@ -22,7 +22,6 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import os import argparse from llnl.util.tty.colify import colify @@ -30,11 +29,13 @@ from llnl.util.tty.colify import colify import spack import spack.cmd -description ="List packages that provide a particular virtual package" +description = "List packages that provide a particular virtual package" + def setup_parser(subparser): - subparser.add_argument('vpkg_spec', metavar='VPACKAGE_SPEC', nargs=argparse.REMAINDER, - help='Find packages that provide this virtual package') + subparser.add_argument( + 'vpkg_spec', metavar='VPACKAGE_SPEC', nargs=argparse.REMAINDER, + help='Find packages that provide this virtual package') def providers(parser, args): diff --git a/lib/spack/spack/cmd/python.py b/lib/spack/spack/cmd/python.py index 59423271b9..12727cb599 100644 --- a/lib/spack/spack/cmd/python.py +++ b/lib/spack/spack/cmd/python.py @@ -30,18 +30,22 @@ import platform import spack + def setup_parser(subparser): subparser.add_argument( '-c', dest='python_command', help='Command to execute.') subparser.add_argument( - 'python_args', nargs=argparse.REMAINDER, help="File to run plus arguments.") + 'python_args', nargs=argparse.REMAINDER, + help="File to run plus arguments.") + description = "Launch an interpreter as spack would launch a command" + def python(parser, args): # Fake a main python shell by setting __name__ to __main__. - console = code.InteractiveConsole({'__name__' : '__main__', - 'spack' : spack}) + console = code.InteractiveConsole({'__name__': '__main__', + 'spack': spack}) if "PYTHONSTARTUP" in os.environ: startup_file = os.environ["PYTHONSTARTUP"] diff --git a/lib/spack/spack/cmd/reindex.py b/lib/spack/spack/cmd/reindex.py index 93eba7a0f1..e37eebbd92 100644 --- a/lib/spack/spack/cmd/reindex.py +++ b/lib/spack/spack/cmd/reindex.py @@ -22,10 +22,10 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import argparse import spack description = "Rebuild Spack's package database." + def reindex(parser, args): spack.installed_db.reindex(spack.install_layout) diff --git a/lib/spack/spack/cmd/repo.py b/lib/spack/spack/cmd/repo.py index cbd8f4784e..5ab2ac0833 100644 --- a/lib/spack/spack/cmd/repo.py +++ b/lib/spack/spack/cmd/repo.py @@ -23,20 +23,16 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import os -import re -import shutil -import argparse import llnl.util.tty as tty -from llnl.util.filesystem import join_path, mkdirp import spack.spec import spack.config -from spack.util.environment import get_path from spack.repository import * description = "Manage package source repositories." + def setup_parser(subparser): sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='repo_command') scopes = spack.config.config_scopes @@ -57,13 +53,15 @@ def setup_parser(subparser): # Add add_parser = sp.add_parser('add', help=repo_add.__doc__) - add_parser.add_argument('path', help="Path to a Spack package repository directory.") + add_parser.add_argument( + 'path', help="Path to a Spack package repository directory.") add_parser.add_argument( '--scope', choices=scopes, default=spack.cmd.default_modify_scope, help="Configuration scope to modify.") # Remove - remove_parser = sp.add_parser('remove', help=repo_remove.__doc__, aliases=['rm']) + remove_parser = sp.add_parser( + 'remove', help=repo_remove.__doc__, aliases=['rm']) remove_parser.add_argument( 'path_or_namespace', help="Path or namespace of a Spack package repository.") @@ -100,7 +98,8 @@ def repo_add(args): # If that succeeds, finally add it to the configuration. repos = spack.config.get_config('repos', args.scope) - if not repos: repos = [] + if not repos: + repos = [] if repo.root in repos or path in repos: tty.die("Repository is already registered with Spack: %s" % path) @@ -135,7 +134,7 @@ def repo_remove(args): tty.msg("Removed repository %s with namespace '%s'." % (repo.root, repo.namespace)) return - except RepoError as e: + except RepoError: continue tty.die("No repository with path or namespace: %s" @@ -149,7 +148,7 @@ def repo_list(args): for r in roots: try: repos.append(Repo(r)) - except RepoError as e: + except RepoError: continue msg = "%d package repositor" % len(repos) @@ -166,9 +165,9 @@ def repo_list(args): def repo(parser, args): - action = { 'create' : repo_create, - 'list' : repo_list, - 'add' : repo_add, - 'remove' : repo_remove, - 'rm' : repo_remove} + action = {'create': repo_create, + 'list': repo_list, + 'add': repo_add, + 'remove': repo_remove, + 'rm': repo_remove} action[args.repo_command](args) diff --git a/lib/spack/spack/cmd/restage.py b/lib/spack/spack/cmd/restage.py index 325d30662f..969afe09bd 100644 --- a/lib/spack/spack/cmd/restage.py +++ b/lib/spack/spack/cmd/restage.py @@ -31,6 +31,7 @@ import spack.cmd description = "Revert checked out package source code." + def setup_parser(subparser): subparser.add_argument('packages', nargs=argparse.REMAINDER, help="specs of packages to restage") diff --git a/lib/spack/spack/cmd/setup.py b/lib/spack/spack/cmd/setup.py index 04f3d663df..b55e102c0e 100644 --- a/lib/spack/spack/cmd/setup.py +++ b/lib/spack/spack/cmd/setup.py @@ -35,6 +35,7 @@ from spack.stage import DIYStage description = "Create a configuration script and module, but don't build." + def setup_parser(subparser): subparser.add_argument( '-i', '--ignore-dependencies', action='store_true', dest='ignore_deps', @@ -70,7 +71,9 @@ def setup(self, args): return if not spec.versions.concrete: - tty.die("spack setup spec must have a single, concrete version. Did you forget a package version number?") + tty.die( + "spack setup spec must have a single, concrete version. " + "Did you forget a package version number?") spec.concretize() package = spack.repo.get(spec) @@ -84,8 +87,8 @@ def setup(self, args): spack.do_checksum = False package.do_install( - keep_prefix=True, # Don't remove install directory, even if you think you should + keep_prefix=True, # Don't remove install directory ignore_deps=args.ignore_deps, verbose=args.verbose, keep_stage=True, # don't remove source dir for SETUP. - install_phases = set(['setup', 'provenance'])) + install_phases=set(['setup', 'provenance'])) diff --git a/lib/spack/spack/cmd/spec.py b/lib/spack/spack/cmd/spec.py index 321e3e429b..6e6d1c1277 100644 --- a/lib/spack/spack/cmd/spec.py +++ b/lib/spack/spack/cmd/spec.py @@ -25,23 +25,22 @@ import argparse import spack.cmd -import llnl.util.tty as tty - import spack -import spack.url as url description = "print out abstract and concrete versions of a spec." + def setup_parser(subparser): subparser.add_argument('-i', '--ids', action='store_true', help="show numerical ids for dependencies.") - subparser.add_argument('specs', nargs=argparse.REMAINDER, help="specs of packages") + subparser.add_argument( + 'specs', nargs=argparse.REMAINDER, help="specs of packages") def spec(parser, args): - kwargs = { 'ids' : args.ids, - 'indent' : 2, - 'color' : True } + kwargs = {'ids': args.ids, + 'indent': 2, + 'color': True} for spec in spack.cmd.parse_specs(args.specs): print "Input spec" diff --git a/lib/spack/spack/cmd/stage.py b/lib/spack/spack/cmd/stage.py index 61e9c6d9ff..bfc2e5f456 100644 --- a/lib/spack/spack/cmd/stage.py +++ b/lib/spack/spack/cmd/stage.py @@ -22,14 +22,14 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import os import argparse import llnl.util.tty as tty import spack import spack.cmd -description="Expand downloaded archive in preparation for install" +description = "Expand downloaded archive in preparation for install" + def setup_parser(subparser): subparser.add_argument( diff --git a/lib/spack/spack/cmd/test-install.py b/lib/spack/spack/cmd/test-install.py index 14c06d136d..8e7173e9a2 100644 --- a/lib/spack/spack/cmd/test-install.py +++ b/lib/spack/spack/cmd/test-install.py @@ -36,25 +36,25 @@ from llnl.util.filesystem import * from spack.build_environment import InstallError from spack.fetch_strategy import FetchError -description = "Run package installation as a unit test, output formatted results." +description = "Run package install as a unit test, output formatted results." def setup_parser(subparser): - subparser.add_argument('-j', - '--jobs', - action='store', - type=int, - help="Explicitly set number of make jobs. Default is #cpus.") + subparser.add_argument( + '-j', '--jobs', action='store', type=int, + help="Explicitly set number of make jobs. Default is #cpus.") - subparser.add_argument('-n', - '--no-checksum', - action='store_true', - dest='no_checksum', - help="Do not check packages against checksum") + subparser.add_argument( + '-n', '--no-checksum', action='store_true', dest='no_checksum', + help="Do not check packages against checksum") - subparser.add_argument('-o', '--output', action='store', help="test output goes in this file") + subparser.add_argument( + '-o', '--output', action='store', + help="test output goes in this file") - subparser.add_argument('package', nargs=argparse.REMAINDER, help="spec of package to install") + subparser.add_argument( + 'package', nargs=argparse.REMAINDER, + help="spec of package to install") class TestResult(object): @@ -65,6 +65,7 @@ class TestResult(object): class TestSuite(object): + def __init__(self, filename): self.filename = filename self.root = ET.Element('testsuite') @@ -75,14 +76,17 @@ class TestSuite(object): def append(self, item): if not isinstance(item, TestCase): - raise TypeError('only TestCase instances may be appended to a TestSuite instance') + raise TypeError( + 'only TestCase instances may be appended to TestSuite') self.tests.append(item) # Append the item to the list of tests def __exit__(self, exc_type, exc_val, exc_tb): # Prepare the header for the entire test suite - number_of_errors = sum(x.result_type == TestResult.ERRORED for x in self.tests) + number_of_errors = sum( + x.result_type == TestResult.ERRORED for x in self.tests) self.root.set('errors', str(number_of_errors)) - number_of_failures = sum(x.result_type == TestResult.FAILED for x in self.tests) + number_of_failures = sum( + x.result_type == TestResult.FAILED for x in self.tests) self.root.set('failures', str(number_of_failures)) self.root.set('tests', str(len(self.tests))) @@ -112,7 +116,8 @@ class TestCase(object): self.element.set('time', str(time)) self.result_type = None - def set_result(self, result_type, message=None, error_type=None, text=None): + def set_result(self, result_type, + message=None, error_type=None, text=None): self.result_type = result_type result = TestCase.results[self.result_type] if result is not None and result is not TestResult.PASSED: @@ -155,13 +160,19 @@ def install_single_spec(spec, number_of_jobs): # If it is already installed, skip the test if spack.repo.get(spec).installed: testcase = TestCase(package.name, package.spec.short_spec, time=0.0) - testcase.set_result(TestResult.SKIPPED, message='Skipped [already installed]', error_type='already_installed') + testcase.set_result( + TestResult.SKIPPED, + message='Skipped [already installed]', + error_type='already_installed') return testcase # If it relies on dependencies that did not install, skip if failed_dependencies(spec): testcase = TestCase(package.name, package.spec.short_spec, time=0.0) - testcase.set_result(TestResult.SKIPPED, message='Skipped [failed dependencies]', error_type='dep_failed') + testcase.set_result( + TestResult.SKIPPED, + message='Skipped [failed dependencies]', + error_type='dep_failed') return testcase # Otherwise try to install the spec @@ -177,26 +188,30 @@ def install_single_spec(spec, number_of_jobs): testcase = TestCase(package.name, package.spec.short_spec, duration) testcase.set_result(TestResult.PASSED) except InstallError: - # An InstallError is considered a failure (the recipe didn't work correctly) + # An InstallError is considered a failure (the recipe didn't work + # correctly) duration = time.time() - start_time # Try to get the log lines = fetch_log(package.build_log_path) text = '\n'.join(lines) testcase = TestCase(package.name, package.spec.short_spec, duration) - testcase.set_result(TestResult.FAILED, message='Installation failure', text=text) + testcase.set_result(TestResult.FAILED, + message='Installation failure', text=text) except FetchError: # A FetchError is considered an error (we didn't even start building) duration = time.time() - start_time testcase = TestCase(package.name, package.spec.short_spec, duration) - testcase.set_result(TestResult.ERRORED, message='Unable to fetch package') + testcase.set_result(TestResult.ERRORED, + message='Unable to fetch package') return testcase def get_filename(args, top_spec): if not args.output: - fname = 'test-{x.name}-{x.version}-{hash}.xml'.format(x=top_spec, hash=top_spec.dag_hash()) + fname = 'test-{x.name}-{x.version}-{hash}.xml'.format( + x=top_spec, hash=top_spec.dag_hash()) output_directory = join_path(os.getcwd(), 'test-output') if not os.path.exists(output_directory): os.mkdir(output_directory) diff --git a/lib/spack/spack/cmd/test.py b/lib/spack/spack/cmd/test.py index b9f2a449ae..bf7342f606 100644 --- a/lib/spack/spack/cmd/test.py +++ b/lib/spack/spack/cmd/test.py @@ -52,6 +52,7 @@ def setup_parser(subparser): class MockCache(object): + def store(self, copyCmd, relativeDst): pass @@ -60,6 +61,7 @@ class MockCache(object): class MockCacheFetcher(object): + def set_stage(self, stage): pass diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py index dbe6cd6584..8957d1c908 100644 --- a/lib/spack/spack/cmd/uninstall.py +++ b/lib/spack/spack/cmd/uninstall.py @@ -50,25 +50,27 @@ def setup_parser(subparser): subparser.add_argument( '-f', '--force', action='store_true', dest='force', help="Remove regardless of whether other packages depend on this one.") + subparser.add_argument( '-a', '--all', action='store_true', dest='all', - help="USE CAREFULLY. Remove ALL installed packages that match each " + - "supplied spec. i.e., if you say uninstall libelf, ALL versions of " + # NOQA: ignore=E501 - "libelf are uninstalled. This is both useful and dangerous, like rm -r.") # NOQA: ignore=E501 + help="USE CAREFULLY. Remove ALL installed packages that match each " + "supplied spec. i.e., if you say uninstall libelf, ALL versions " + "of libelf are uninstalled. This is both useful and dangerous, " + "like rm -r.") + subparser.add_argument( '-d', '--dependents', action='store_true', dest='dependents', - help='Also uninstall any packages that depend on the ones given via command line.' # NOQA: ignore=E501 - ) + help='Also uninstall any packages that depend on the ones given ' + 'via command line.') + subparser.add_argument( '-y', '--yes-to-all', action='store_true', dest='yes_to_all', - help='Assume "yes" is the answer to every confirmation asked to the user.' # NOQA: ignore=E501 + help='Assume "yes" is the answer to every confirmation requested') - ) subparser.add_argument( 'packages', nargs=argparse.REMAINDER, - help="specs of packages to uninstall" - ) + help="specs of packages to uninstall") def concretize_specs(specs, allow_multiple_matches=False, force=False): diff --git a/lib/spack/spack/cmd/unload.py b/lib/spack/spack/cmd/unload.py index 7bd15750ed..b52bedb7b4 100644 --- a/lib/spack/spack/cmd/unload.py +++ b/lib/spack/spack/cmd/unload.py @@ -25,13 +25,15 @@ import argparse import spack.modules -description ="Remove package from environment using module." +description = "Remove package from environment using module." + def setup_parser(subparser): """Parser is only constructed so that this prints a nice help message with -h. """ subparser.add_argument( - 'spec', nargs=argparse.REMAINDER, help='Spec of package to unload with modules.') + 'spec', nargs=argparse.REMAINDER, + help='Spec of package to unload with modules.') def unload(parser, args): diff --git a/lib/spack/spack/cmd/unuse.py b/lib/spack/spack/cmd/unuse.py index 789a690e9c..6403cf6162 100644 --- a/lib/spack/spack/cmd/unuse.py +++ b/lib/spack/spack/cmd/unuse.py @@ -25,13 +25,15 @@ import argparse import spack.modules -description ="Remove package from environment using dotkit." +description = "Remove package from environment using dotkit." + def setup_parser(subparser): """Parser is only constructed so that this prints a nice help message with -h. """ subparser.add_argument( - 'spec', nargs=argparse.REMAINDER, help='Spec of package to unuse with dotkit.') + 'spec', nargs=argparse.REMAINDER, + help='Spec of package to unuse with dotkit.') def unuse(parser, args): diff --git a/lib/spack/spack/cmd/url-parse.py b/lib/spack/spack/cmd/url-parse.py index ce12a17d13..b8c7c95040 100644 --- a/lib/spack/spack/cmd/url-parse.py +++ b/lib/spack/spack/cmd/url-parse.py @@ -22,28 +22,28 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import sys - import llnl.util.tty as tty import spack import spack.url from spack.util.web import find_versions_of_archive -description = "Show parsing of a URL, optionally spider web for other versions." +description = "Show parsing of a URL, optionally spider web for versions." + def setup_parser(subparser): subparser.add_argument('url', help="url of a package archive") subparser.add_argument( - '-s', '--spider', action='store_true', help="Spider the source page for versions.") + '-s', '--spider', action='store_true', + help="Spider the source page for versions.") def print_name_and_version(url): name, ns, nl, ntup, ver, vs, vl, vtup = spack.url.substitution_offsets(url) - underlines = [" "] * max(ns+nl, vs+vl) - for i in range(ns, ns+nl): + underlines = [" "] * max(ns + nl, vs + vl) + for i in range(ns, ns + nl): underlines[i] = '-' - for i in range(vs, vs+vl): + for i in range(vs, vs + vl): underlines[i] = '~' print " %s" % url diff --git a/lib/spack/spack/cmd/urls.py b/lib/spack/spack/cmd/urls.py index 2fe2019a22..f151581d7d 100644 --- a/lib/spack/spack/cmd/urls.py +++ b/lib/spack/spack/cmd/urls.py @@ -22,12 +22,12 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import sys import spack import spack.url description = "Inspect urls used by packages in spack." + def setup_parser(subparser): subparser.add_argument( '-c', '--color', action='store_true', @@ -53,6 +53,7 @@ def urls(parser, args): for url in sorted(urls): if args.color or args.extrapolation: - print spack.url.color_url(url, subs=args.extrapolation, errors=True) + print spack.url.color_url( + url, subs=args.extrapolation, errors=True) else: print url diff --git a/lib/spack/spack/cmd/use.py b/lib/spack/spack/cmd/use.py index bbb90fde1b..e3612ace48 100644 --- a/lib/spack/spack/cmd/use.py +++ b/lib/spack/spack/cmd/use.py @@ -25,13 +25,15 @@ import argparse import spack.modules -description ="Add package to environment using dotkit." +description = "Add package to environment using dotkit." + def setup_parser(subparser): """Parser is only constructed so that this prints a nice help message with -h. """ subparser.add_argument( - 'spec', nargs=argparse.REMAINDER, help='Spec of package to use with dotkit.') + 'spec', nargs=argparse.REMAINDER, + help='Spec of package to use with dotkit.') def use(parser, args): diff --git a/lib/spack/spack/cmd/versions.py b/lib/spack/spack/cmd/versions.py index ec3a4b2e34..1e95225ab8 100644 --- a/lib/spack/spack/cmd/versions.py +++ b/lib/spack/spack/cmd/versions.py @@ -22,15 +22,16 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import os from llnl.util.tty.colify import colify import llnl.util.tty as tty import spack -description ="List available versions of a package" +description = "List available versions of a package" + def setup_parser(subparser): - subparser.add_argument('package', metavar='PACKAGE', help='Package to list versions for') + subparser.add_argument('package', metavar='PACKAGE', + help='Package to list versions for') def versions(parser, args): diff --git a/lib/spack/spack/compiler.py b/lib/spack/spack/compiler.py index ce4555bc56..a77991e4dc 100644 --- a/lib/spack/spack/compiler.py +++ b/lib/spack/spack/compiler.py @@ -25,10 +25,8 @@ import os import re import itertools -from datetime import datetime import llnl.util.tty as tty -from llnl.util.lang import memoized from llnl.util.filesystem import join_path import spack.error @@ -37,10 +35,10 @@ import spack.architecture from spack.util.multiproc import parmap from spack.util.executable import * from spack.util.environment import get_path -from spack.version import Version __all__ = ['Compiler', 'get_compiler_version'] + def _verify_executables(*paths): for path in paths: if not os.path.isfile(path) and os.access(path, os.X_OK): @@ -49,8 +47,9 @@ def _verify_executables(*paths): _version_cache = {} + def get_compiler_version(compiler_path, version_arg, regex='(.*)'): - if not compiler_path in _version_cache: + if compiler_path not in _version_cache: compiler = Executable(compiler_path) output = compiler(version_arg, output=str, error=str) @@ -113,7 +112,7 @@ class Compiler(object): # Name of module used to switch versions of this compiler PrgEnv_compiler = None - def __init__(self, cspec, operating_system, + def __init__(self, cspec, operating_system, paths, modules=[], alias=None, **kwargs): def check(exe): if exe is None: @@ -130,11 +129,6 @@ class Compiler(object): else: self.fc = check(paths[3]) - #self.cc = check(cc) - #self.cxx = check(cxx) - #self.f77 = check(f77) - #self.fc = check(fc) - # Unfortunately have to make sure these params are accepted # in the same order they are returned by sorted(flags) # in compilers/__init__.py @@ -158,31 +152,30 @@ class Compiler(object): @property def openmp_flag(self): # If it is not overridden, assume it is not supported and warn the user - tty.die("The compiler you have chosen does not currently support OpenMP.", - "If you think it should, please edit the compiler subclass and", - "submit a pull request or issue.") - + tty.die( + "The compiler you have chosen does not currently support OpenMP.", + "If you think it should, please edit the compiler subclass and", + "submit a pull request or issue.") # This property should be overridden in the compiler subclass if # C++11 is supported by that compiler @property def cxx11_flag(self): # If it is not overridden, assume it is not supported and warn the user - tty.die("The compiler you have chosen does not currently support C++11.", - "If you think it should, please edit the compiler subclass and", - "submit a pull request or issue.") - + tty.die( + "The compiler you have chosen does not currently support C++11.", + "If you think it should, please edit the compiler subclass and", + "submit a pull request or issue.") # This property should be overridden in the compiler subclass if # C++14 is supported by that compiler @property def cxx14_flag(self): # If it is not overridden, assume it is not supported and warn the user - tty.die("The compiler you have chosen does not currently support C++14.", - "If you think it should, please edit the compiler subclass and", - "submit a pull request or issue.") - - + tty.die( + "The compiler you have chosen does not currently support C++14.", + "If you think it should, please edit the compiler subclass and", + "submit a pull request or issue.") # # Compiler classes have methods for querying the version of @@ -191,7 +184,6 @@ class Compiler(object): # Compiler *instances* are just data objects, and can only be # constructed from an actual set of executables. # - @classmethod def default_version(cls, cc): """Override just this to override all compiler version functions.""" @@ -258,16 +250,19 @@ class Compiler(object): version = detect_version(full_path) return (version, prefix, suffix, full_path) except ProcessError, e: - tty.debug("Couldn't get version for compiler %s" % full_path, e) + tty.debug( + "Couldn't get version for compiler %s" % full_path, e) return None except Exception, e: # Catching "Exception" here is fine because it just # means something went wrong running a candidate executable. - tty.debug("Error while executing candidate compiler %s" % full_path, - "%s: %s" %(e.__class__.__name__, e)) + tty.debug("Error while executing candidate compiler %s" + % full_path, + "%s: %s" % (e.__class__.__name__, e)) return None - successful = [key for key in parmap(check, checks) if key is not None] + successful = [k for k in parmap(check, checks) if k is not None] + # The 'successful' list is ordered like the input paths. # Reverse it here so that the dict creation (last insert wins) # does not spoil the intented precedence. @@ -278,20 +273,23 @@ class Compiler(object): """Return a string representation of the compiler toolchain.""" return self.__str__() - def __str__(self): """Return a string representation of the compiler toolchain.""" return "%s(%s)" % ( - self.name, '\n '.join((str(s) for s in (self.cc, self.cxx, self.f77, self.fc, self.modules, str(self.operating_system))))) + self.name, '\n '.join((str(s) for s in ( + self.cc, self.cxx, self.f77, self.fc, self.modules, + str(self.operating_system))))) class CompilerAccessError(spack.error.SpackError): + def __init__(self, path): super(CompilerAccessError, self).__init__( "'%s' is not a valid compiler." % path) class InvalidCompilerError(spack.error.SpackError): + def __init__(self): super(InvalidCompilerError, self).__init__( "Compiler has no executables.") diff --git a/lib/spack/spack/compilers/__init__.py b/lib/spack/spack/compilers/__init__.py index 0ba94741da..eb866c8bbb 100644 --- a/lib/spack/spack/compilers/__init__.py +++ b/lib/spack/spack/compilers/__init__.py @@ -26,15 +26,9 @@ system and configuring Spack to use multiple compilers. """ import imp -import os import platform -import copy -import hashlib -import base64 -import yaml -import sys -from llnl.util.lang import memoized, list_modules +from llnl.util.lang import list_modules from llnl.util.filesystem import join_path import spack @@ -43,11 +37,7 @@ import spack.spec import spack.config import spack.architecture -from spack.util.multiproc import parmap -from spack.compiler import Compiler -from spack.util.executable import which from spack.util.naming import mod_to_class -from spack.util.environment import get_path _imported_compilers_module = 'spack.compilers' _path_instance_vars = ['cc', 'cxx', 'f77', 'fc'] @@ -73,7 +63,8 @@ def _to_dict(compiler): """Return a dict version of compiler suitable to insert in YAML.""" d = {} d['spec'] = str(compiler.spec) - d['paths'] = dict( (attr, getattr(compiler, attr, None)) for attr in _path_instance_vars ) + d['paths'] = dict((attr, getattr(compiler, attr, None)) + for attr in _path_instance_vars) d['operating_system'] = str(compiler.operating_system) d['modules'] = compiler.modules if compiler.modules else [] @@ -140,15 +131,19 @@ def remove_compiler_from_config(compiler_spec, scope=None): - compiler_specs: a list of CompilerSpec objects. - scope: configuration scope to modify. """ + # Need a better way for this + global _cache_config_file + compiler_config = get_compiler_config(scope) config_length = len(compiler_config) - filtered_compiler_config = [comp for comp in compiler_config - if spack.spec.CompilerSpec(comp['compiler']['spec']) != compiler_spec] - # Need a better way for this - global _cache_config_file - _cache_config_file = filtered_compiler_config # Update the cache for changes - if len(filtered_compiler_config) == config_length: # No items removed + filtered_compiler_config = [ + comp for comp in compiler_config + if spack.spec.CompilerSpec(comp['compiler']['spec']) != compiler_spec] + + # Update the cache for changes + _cache_config_file = filtered_compiler_config + if len(filtered_compiler_config) == config_length: # No items removed CompilerSpecInsufficientlySpecificError(compiler_spec) spack.config.update_config('compilers', filtered_compiler_config, scope) @@ -158,7 +153,8 @@ def all_compilers_config(scope=None, init_config=True): available to build with. These are instances of CompilerSpec. """ # Get compilers for this architecture. - global _cache_config_file #Create a cache of the config file so we don't load all the time. + # Create a cache of the config file so we don't load all the time. + global _cache_config_file if not _cache_config_file: _cache_config_file = get_compiler_config(scope, init_config) return _cache_config_file @@ -236,7 +232,8 @@ def compilers_for_spec(compiler_spec, scope=None, **kwargs): continue items = items['compiler'] - if not ('paths' in items and all(n in items['paths'] for n in _path_instance_vars)): + if not ('paths' in items and + all(n in items['paths'] for n in _path_instance_vars)): raise InvalidCompilerConfigurationError(cspec) cls = class_for_compiler_name(cspec.name) @@ -254,10 +251,10 @@ def compilers_for_spec(compiler_spec, scope=None, **kwargs): mods = [] if 'operating_system' in items: - operating_system = spack.architecture._operating_system_from_dict(items['operating_system'], platform) + os = spack.architecture._operating_system_from_dict( + items['operating_system'], platform) else: - operating_system = None - + os = None alias = items['alias'] if 'alias' in items else None @@ -266,7 +263,8 @@ def compilers_for_spec(compiler_spec, scope=None, **kwargs): if f in items: flags[f] = items[f] - compilers.append(cls(cspec, operating_system, compiler_paths, mods, alias, **flags)) + compilers.append( + cls(cspec, os, compiler_paths, mods, alias, **flags)) return compilers @@ -275,7 +273,6 @@ def compilers_for_spec(compiler_spec, scope=None, **kwargs): for cspec in matches: compilers.extend(get_compilers(cspec)) return compilers -# return [get_compilers(cspec) for cspec in matches] @_auto_compiler_spec @@ -285,8 +282,9 @@ def compiler_for_spec(compiler_spec, arch): operating_system = arch.platform_os assert(compiler_spec.concrete) - compilers = [c for c in compilers_for_spec(compiler_spec, platform=arch.platform) - if c.operating_system == operating_system] + compilers = [ + c for c in compilers_for_spec(compiler_spec, platform=arch.platform) + if c.operating_system == operating_system] if len(compilers) < 1: raise NoCompilerForSpecError(compiler_spec, operating_system) if len(compilers) > 1: @@ -321,11 +319,13 @@ def all_os_classes(): return classes + def all_compiler_types(): return [class_for_compiler_name(c) for c in supported_compilers()] class InvalidCompilerConfigurationError(spack.error.SpackError): + def __init__(self, compiler_spec): super(InvalidCompilerConfigurationError, self).__init__( "Invalid configuration for [compiler \"%s\"]: " % compiler_spec, @@ -335,14 +335,18 @@ class InvalidCompilerConfigurationError(spack.error.SpackError): class NoCompilersError(spack.error.SpackError): def __init__(self): - super(NoCompilersError, self).__init__("Spack could not find any compilers!") + super(NoCompilersError, self).__init__( + "Spack could not find any compilers!") + class NoCompilerForSpecError(spack.error.SpackError): def __init__(self, compiler_spec, target): - super(NoCompilerForSpecError, self).__init__("No compilers for operating system %s satisfy spec %s" % ( - target, compiler_spec)) + super(NoCompilerForSpecError, self).__init__( + "No compilers for operating system %s satisfy spec %s" + % (target, compiler_spec)) + class CompilerSpecInsufficientlySpecificError(spack.error.SpackError): def __init__(self, compiler_spec): - super(CompilerSpecInsufficientlySpecificError, self).__init__("Multiple compilers satisfy spec %s", - compiler_spec) + super(CompilerSpecInsufficientlySpecificError, self).__init__( + "Multiple compilers satisfy spec %s" % compiler_spec) diff --git a/lib/spack/spack/compilers/clang.py b/lib/spack/spack/compilers/clang.py index 00b406d820..4cf65222ae 100644 --- a/lib/spack/spack/compilers/clang.py +++ b/lib/spack/spack/compilers/clang.py @@ -29,6 +29,7 @@ from spack.util.executable import * import llnl.util.tty as tty from spack.version import ver + class Clang(Compiler): # Subclasses use possible names of C compiler cc_names = ['clang'] @@ -43,11 +44,12 @@ class Clang(Compiler): fc_names = [] # Named wrapper links within spack.build_env_path - link_paths = { 'cc' : 'clang/clang', - 'cxx' : 'clang/clang++', - # Use default wrappers for fortran, in case provided in compilers.yaml - 'f77' : 'f77', - 'fc' : 'f90' } + link_paths = {'cc': 'clang/clang', + 'cxx': 'clang/clang++', + # Use default wrappers for fortran, in case provided in + # compilers.yaml + 'f77': 'f77', + 'fc': 'f90'} @property def is_apple(self): diff --git a/lib/spack/spack/compilers/craype.py b/lib/spack/spack/compilers/craype.py index 4ba8b110ec..c92e5c131a 100644 --- a/lib/spack/spack/compilers/craype.py +++ b/lib/spack/spack/compilers/craype.py @@ -1,34 +1,33 @@ -##############################################################################} -# Copyright (c) 2013, Lawrence Livermore National Security, LLC. +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. -# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # -# For details, see https://scalability-llnl.github.io/spack +# For details, see https://github.com/llnl/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License (as published by -# the Free Software Foundation) version 2.1 dated February 1999. +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and -# conditions of the GNU General Public License for more details. +# conditions of the GNU Lesser General Public License for more details. # -# You should have received a copy of the GNU Lesser General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import llnl.util.tty as tty - -#from spack.build_environment import load_module from spack.compiler import * -#from spack.version import ver + class Craype(Compiler): + """Cray programming environment compiler.""" + # Subclasses use possible names of C compiler cc_names = ['cc'] @@ -47,12 +46,11 @@ class Craype(Compiler): PrgEnv = 'PrgEnv-cray' PrgEnv_compiler = 'craype' - link_paths = { 'cc' : 'cc', - 'cxx' : 'c++', - 'f77' : 'f77', - 'fc' : 'fc'} - + link_paths = {'cc': 'cc', + 'cxx': 'c++', + 'f77': 'f77', + 'fc': 'fc'} + @classmethod def default_version(cls, comp): return get_compiler_version(comp, r'([Vv]ersion).*(\d+(\.\d+)+)') - diff --git a/lib/spack/spack/compilers/gcc.py b/lib/spack/spack/compilers/gcc.py index 2fae6688db..a556f346d7 100644 --- a/lib/spack/spack/compilers/gcc.py +++ b/lib/spack/spack/compilers/gcc.py @@ -26,6 +26,7 @@ import llnl.util.tty as tty from spack.compiler import * from spack.version import ver + class Gcc(Compiler): # Subclasses use possible names of C compiler cc_names = ['gcc'] @@ -44,10 +45,10 @@ class Gcc(Compiler): suffixes = [r'-mp-\d\.\d', r'-\d\.\d', r'-\d'] # Named wrapper links within spack.build_env_path - link_paths = {'cc' : 'gcc/gcc', - 'cxx' : 'gcc/g++', - 'f77' : 'gcc/gfortran', - 'fc' : 'gcc/gfortran' } + link_paths = {'cc': 'gcc/gcc', + 'cxx': 'gcc/g++', + 'f77': 'gcc/gfortran', + 'fc': 'gcc/gfortran'} PrgEnv = 'PrgEnv-gnu' PrgEnv_compiler = 'gcc' @@ -79,7 +80,6 @@ class Gcc(Compiler): # older gfortran versions don't have simple dumpversion output. r'(?:GNU Fortran \(GCC\))?(\d+\.\d+(?:\.\d+)?)') - @classmethod def f77_version(cls, f77): return cls.fc_version(f77) diff --git a/lib/spack/spack/compilers/intel.py b/lib/spack/spack/compilers/intel.py index 6cad03ff47..8531ecd19a 100644 --- a/lib/spack/spack/compilers/intel.py +++ b/lib/spack/spack/compilers/intel.py @@ -26,6 +26,7 @@ from spack.compiler import * import llnl.util.tty as tty from spack.version import ver + class Intel(Compiler): # Subclasses use possible names of C compiler cc_names = ['icc'] @@ -40,10 +41,10 @@ class Intel(Compiler): fc_names = ['ifort'] # Named wrapper links within spack.build_env_path - link_paths = { 'cc' : 'intel/icc', - 'cxx' : 'intel/icpc', - 'f77' : 'intel/ifort', - 'fc' : 'intel/ifort' } + link_paths = {'cc': 'intel/icc', + 'cxx': 'intel/icpc', + 'f77': 'intel/ifort', + 'fc': 'intel/ifort'} PrgEnv = 'PrgEnv-intel' PrgEnv_compiler = 'intel' @@ -64,7 +65,6 @@ class Intel(Compiler): else: return "-std=c++11" - @classmethod def default_version(cls, comp): """The '--version' option seems to be the most consistent one diff --git a/lib/spack/spack/compilers/nag.py b/lib/spack/spack/compilers/nag.py index cee11bc97a..fdfc078b5e 100644 --- a/lib/spack/spack/compilers/nag.py +++ b/lib/spack/spack/compilers/nag.py @@ -23,7 +23,7 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack.compiler import * -import llnl.util.tty as tty + class Nag(Compiler): # Subclasses use possible names of C compiler @@ -39,11 +39,12 @@ class Nag(Compiler): fc_names = ['nagfor'] # Named wrapper links within spack.build_env_path - link_paths = { # Use default wrappers for C and C++, in case provided in compilers.yaml - 'cc' : 'cc', - 'cxx' : 'c++', - 'f77' : 'nag/nagfor', - 'fc' : 'nag/nagfor' } + # Use default wrappers for C and C++, in case provided in compilers.yaml + link_paths = { + 'cc': 'cc', + 'cxx': 'c++', + 'f77': 'nag/nagfor', + 'fc': 'nag/nagfor'} @property def openmp_flag(self): @@ -71,9 +72,8 @@ class Nag(Compiler): """The '-V' option works for nag compilers. Output looks like this:: - NAG Fortran Compiler Release 6.0(Hibiya) Build 1037 - Product NPL6A60NA for x86-64 Linux - Copyright 1990-2015 The Numerical Algorithms Group Ltd., Oxford, U.K. + NAG Fortran Compiler Release 6.0(Hibiya) Build 1037 + Product NPL6A60NA for x86-64 Linux """ return get_compiler_version( comp, '-V', r'NAG Fortran Compiler Release ([0-9.]+)') diff --git a/lib/spack/spack/compilers/pgi.py b/lib/spack/spack/compilers/pgi.py index 6d36d8bfa6..0e4be6e9ba 100644 --- a/lib/spack/spack/compilers/pgi.py +++ b/lib/spack/spack/compilers/pgi.py @@ -23,7 +23,7 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack.compiler import * -import llnl.util.tty as tty + class Pgi(Compiler): # Subclasses use possible names of C compiler @@ -39,17 +39,14 @@ class Pgi(Compiler): fc_names = ['pgfortran', 'pgf95', 'pgf90'] # Named wrapper links within spack.build_env_path - link_paths = { 'cc' : 'pgi/pgcc', - 'cxx' : 'pgi/pgc++', - 'f77' : 'pgi/pgfortran', - 'fc' : 'pgi/pgfortran' } - - + link_paths = {'cc': 'pgi/pgcc', + 'cxx': 'pgi/pgc++', + 'f77': 'pgi/pgfortran', + 'fc': 'pgi/pgfortran'} PrgEnv = 'PrgEnv-pgi' PrgEnv_compiler = 'pgi' - @property def openmp_flag(self): return "-mp" diff --git a/lib/spack/spack/compilers/xl.py b/lib/spack/spack/compilers/xl.py index b1431436ad..5c83209781 100644 --- a/lib/spack/spack/compilers/xl.py +++ b/lib/spack/spack/compilers/xl.py @@ -26,24 +26,26 @@ from spack.compiler import * import llnl.util.tty as tty from spack.version import ver + class Xl(Compiler): # Subclasses use possible names of C compiler - cc_names = ['xlc','xlc_r'] + cc_names = ['xlc', 'xlc_r'] # Subclasses use possible names of C++ compiler - cxx_names = ['xlC','xlC_r','xlc++','xlc++_r'] + cxx_names = ['xlC', 'xlC_r', 'xlc++', 'xlc++_r'] # Subclasses use possible names of Fortran 77 compiler - f77_names = ['xlf','xlf_r'] + f77_names = ['xlf', 'xlf_r'] # Subclasses use possible names of Fortran 90 compiler - fc_names = ['xlf90','xlf90_r','xlf95','xlf95_r','xlf2003','xlf2003_r','xlf2008','xlf2008_r'] + fc_names = ['xlf90', 'xlf90_r', 'xlf95', 'xlf95_r', + 'xlf2003', 'xlf2003_r', 'xlf2008', 'xlf2008_r'] # Named wrapper links within spack.build_env_path - link_paths = { 'cc' : 'xl/xlc', - 'cxx' : 'xl/xlc++', - 'f77' : 'xl/xlf', - 'fc' : 'xl/xlf90' } + link_paths = {'cc': 'xl/xlc', + 'cxx': 'xl/xlc++', + 'f77': 'xl/xlf', + 'fc': 'xl/xlf90'} @property def openmp_flag(self): @@ -56,7 +58,6 @@ class Xl(Compiler): else: return "-qlanglvl=extended0x" - @classmethod def default_version(cls, comp): """The '-qversion' is the standard option fo XL compilers. @@ -82,29 +83,28 @@ class Xl(Compiler): """ return get_compiler_version( - comp, '-qversion',r'([0-9]?[0-9]\.[0-9])') - + comp, '-qversion', r'([0-9]?[0-9]\.[0-9])') @classmethod def fc_version(cls, fc): - """The fortran and C/C++ versions of the XL compiler are always two units apart. - By this we mean that the fortran release that goes with XL C/C++ 11.1 is 13.1. - Having such a difference in version number is confusing spack quite a lot. - Most notably if you keep the versions as is the default xl compiler will only - have fortran and no C/C++. - So we associate the Fortran compiler with the version associated to the C/C++ - compiler. - One last stumble. Version numbers over 10 have at least a .1 those under 10 - a .0. There is no xlf 9.x or under currently available. BG/P and BG/L can - such a compiler mix and possibly older version of AIX and linux on power. + """The fortran and C/C++ versions of the XL compiler are always + two units apart. By this we mean that the fortran release that + goes with XL C/C++ 11.1 is 13.1. Having such a difference in + version number is confusing spack quite a lot. Most notably + if you keep the versions as is the default xl compiler will + only have fortran and no C/C++. So we associate the Fortran + compiler with the version associated to the C/C++ compiler. + One last stumble. Version numbers over 10 have at least a .1 + those under 10 a .0. There is no xlf 9.x or under currently + available. BG/P and BG/L can such a compiler mix and possibly + older version of AIX and linux on power. """ - fver = get_compiler_version(fc, '-qversion',r'([0-9]?[0-9]\.[0-9])') + fver = get_compiler_version(fc, '-qversion', r'([0-9]?[0-9]\.[0-9])') cver = float(fver) - 2 - if cver < 10 : - cver = cver - 0.1 + if cver < 10: + cver = cver - 0.1 return str(cver) - @classmethod def f77_version(cls, f77): return cls.fc_version(f77) diff --git a/lib/spack/spack/concretize.py b/lib/spack/spack/concretize.py index 6f11c86ce8..726dee62e3 100644 --- a/lib/spack/spack/concretize.py +++ b/lib/spack/spack/concretize.py @@ -61,7 +61,9 @@ class DefaultConcretizer(object): if not providers: raise UnsatisfiableProviderSpecError(providers[0], spec) spec_w_preferred_providers = find_spec( - spec, lambda x: spack.pkgsort.spec_has_preferred_provider(x.name, spec.name)) # NOQA: ignore=E501 + spec, + lambda x: spack.pkgsort.spec_has_preferred_provider( + x.name, spec.name)) if not spec_w_preferred_providers: spec_w_preferred_providers = spec provider_cmp = partial(spack.pkgsort.provider_compare, @@ -495,7 +497,8 @@ class UnavailableCompilerVersionError(spack.error.SpackError): def __init__(self, compiler_spec, operating_system): super(UnavailableCompilerVersionError, self).__init__( - "No available compiler version matches '%s' on operating_system %s" % (compiler_spec, operating_system), # NOQA: ignore=E501 + "No available compiler version matches '%s' on operating_system %s" + % (compiler_spec, operating_system), "Run 'spack compilers' to see available compiler Options.") @@ -506,14 +509,15 @@ class NoValidVersionError(spack.error.SpackError): def __init__(self, spec): super(NoValidVersionError, self).__init__( - "There are no valid versions for %s that match '%s'" % (spec.name, spec.versions)) # NOQA: ignore=E501 + "There are no valid versions for %s that match '%s'" + % (spec.name, spec.versions)) class NoBuildError(spack.error.SpackError): - """Raised when a package is configured with the buildable option False, but no satisfactory external versions can be found""" def __init__(self, spec): - super(NoBuildError, self).__init__( - "The spec '%s' is configured as not buildable,and no matching external installs were found" % spec.name) # NOQA: ignore=E501 + msg = ("The spec '%s' is configured as not buildable, " + "and no matching external installs were found") + super(NoBuildError, self).__init__(msg % spec.name) diff --git a/lib/spack/spack/config.py b/lib/spack/spack/config.py index a4e274893c..a4a4f5411e 100644 --- a/lib/spack/spack/config.py +++ b/lib/spack/spack/config.py @@ -158,35 +158,35 @@ section_schemas = { 'required': ['cc', 'cxx', 'f77', 'fc'], 'additionalProperties': False, 'properties': { - 'cc': { 'anyOf': [ {'type' : 'string' }, - {'type' : 'null' }]}, - 'cxx': { 'anyOf': [ {'type' : 'string' }, - {'type' : 'null' }]}, - 'f77': { 'anyOf': [ {'type' : 'string' }, - {'type' : 'null' }]}, - 'fc': { 'anyOf': [ {'type' : 'string' }, - {'type' : 'null' }]}, - 'cflags': { 'anyOf': [ {'type' : 'string' }, - {'type' : 'null' }]}, - 'cxxflags': { 'anyOf': [ {'type' : 'string' }, - {'type' : 'null' }]}, - 'fflags': { 'anyOf': [ {'type' : 'string' }, - {'type' : 'null' }]}, - 'cppflags': { 'anyOf': [ {'type' : 'string' }, - {'type' : 'null' }]}, - 'ldflags': { 'anyOf': [ {'type' : 'string' }, - {'type' : 'null' }]}, - 'ldlibs': { 'anyOf': [ {'type' : 'string' }, - {'type' : 'null' }]}}}, - 'spec': { 'type': 'string'}, - 'operating_system': { 'type': 'string'}, - 'alias': { 'anyOf': [ {'type' : 'string'}, - {'type' : 'null' }]}, - 'modules': { 'anyOf': [ {'type' : 'string'}, - {'type' : 'null' }, - {'type': 'array'}, - ]} - },},},},},}, + 'cc': {'anyOf': [{'type': 'string'}, + {'type': 'null'}]}, + 'cxx': {'anyOf': [{'type': 'string'}, + {'type': 'null'}]}, + 'f77': {'anyOf': [{'type': 'string'}, + {'type': 'null'}]}, + 'fc': {'anyOf': [{'type': 'string'}, + {'type': 'null'}]}, + 'cflags': {'anyOf': [{'type': 'string'}, + {'type': 'null'}]}, + 'cxxflags': {'anyOf': [{'type': 'string'}, + {'type': 'null'}]}, + 'fflags': {'anyOf': [{'type': 'string'}, + {'type': 'null'}]}, + 'cppflags': {'anyOf': [{'type': 'string'}, + {'type': 'null'}]}, + 'ldflags': {'anyOf': [{'type': 'string'}, + {'type': 'null'}]}, + 'ldlibs': {'anyOf': [{'type': 'string'}, + {'type': 'null'}]}}}, + 'spec': {'type': 'string'}, + 'operating_system': {'type': 'string'}, + 'alias': {'anyOf': [{'type': 'string'}, + {'type': 'null'}]}, + 'modules': {'anyOf': [{'type': 'string'}, + {'type': 'null'}, + {'type': 'array'}, + ]} + }, }, }, }, }, }, 'mirrors': { '$schema': 'http://json-schema.org/schema#', 'title': 'Spack mirror configuration file schema', @@ -199,7 +199,7 @@ section_schemas = { 'additionalProperties': False, 'patternProperties': { r'\w[\w-]*': { - 'type': 'string'},},},},}, + 'type': 'string'}, }, }, }, }, 'repos': { '$schema': 'http://json-schema.org/schema#', @@ -211,7 +211,7 @@ section_schemas = { 'type': 'array', 'default': [], 'items': { - 'type': 'string'},},},}, + 'type': 'string'}, }, }, }, 'packages': { '$schema': 'http://json-schema.org/schema#', 'title': 'Spack package configuration file schema', @@ -223,48 +223,48 @@ section_schemas = { 'default': {}, 'additionalProperties': False, 'patternProperties': { - r'\w[\w-]*': { # package name + r'\w[\w-]*': { # package name 'type': 'object', 'default': {}, 'additionalProperties': False, 'properties': { 'version': { - 'type' : 'array', - 'default' : [], - 'items' : { 'anyOf' : [ { 'type' : 'string' }, - { 'type' : 'number'}]}}, #version strings + 'type': 'array', + 'default': [], + 'items': {'anyOf': [{'type': 'string'}, + {'type': 'number'}]}}, # version strings 'compiler': { - 'type' : 'array', - 'default' : [], - 'items' : { 'type' : 'string' } }, #compiler specs + 'type': 'array', + 'default': [], + 'items': {'type': 'string'}}, # compiler specs 'buildable': { 'type': 'boolean', 'default': True, - }, + }, 'modules': { - 'type' : 'object', - 'default' : {}, - }, + 'type': 'object', + 'default': {}, + }, 'providers': { 'type': 'object', 'default': {}, 'additionalProperties': False, 'patternProperties': { r'\w[\w-]*': { - 'type' : 'array', - 'default' : [], - 'items' : { 'type' : 'string' },},},}, + 'type': 'array', + 'default': [], + 'items': {'type': 'string'}, }, }, }, 'paths': { - 'type' : 'object', - 'default' : {}, - }, + 'type': 'object', + 'default': {}, + }, 'variants': { - 'oneOf' : [ - { 'type' : 'string' }, - { 'type' : 'array', - 'items' : { 'type' : 'string' } }, - ], }, - },},},},},}, + 'oneOf': [ + {'type': 'string'}, + {'type': 'array', + 'items': {'type': 'string'}}, + ], }, + }, }, }, }, }, }, 'targets': { '$schema': 'http://json-schema.org/schema#', @@ -277,8 +277,8 @@ section_schemas = { 'default': {}, 'additionalProperties': False, 'patternProperties': { - r'\w[\w-]*': { # target name - 'type': 'string' ,},},},},}, + r'\w[\w-]*': { # target name + 'type': 'string', }, }, }, }, }, 'modules': { '$schema': 'http://json-schema.org/schema#', 'title': 'Spack module file configuration file schema', @@ -389,13 +389,15 @@ section_schemas = { }, 'tcl': { 'allOf': [ - {'$ref': '#/definitions/module_type_configuration'}, # Base configuration + # Base configuration + {'$ref': '#/definitions/module_type_configuration'}, {} # Specific tcl extensions ] }, 'dotkit': { 'allOf': [ - {'$ref': '#/definitions/module_type_configuration'}, # Base configuration + # Base configuration + {'$ref': '#/definitions/module_type_configuration'}, {} # Specific dotkit extensions ] }, @@ -428,7 +430,8 @@ def extend_with_default(validator_class): """ validate_properties = validator_class.VALIDATORS["properties"] - validate_pattern_properties = validator_class.VALIDATORS["patternProperties"] + validate_pattern_properties = validator_class.VALIDATORS[ + "patternProperties"] def set_defaults(validator, properties, instance, schema): for property, subschema in properties.iteritems(): @@ -510,7 +513,8 @@ class ConfigScope(object): except jsonschema.ValidationError as e: raise ConfigSanityError(e, data) except (yaml.YAMLError, IOError) as e: - raise ConfigFileError("Error writing to config file: '%s'" % str(e)) + raise ConfigFileError( + "Error writing to config file: '%s'" % str(e)) def clear(self): """Empty cached config information.""" @@ -739,7 +743,8 @@ def spec_externals(spec): path = get_path_from_module(module) - external_spec = spack.spec.Spec(external_spec, external=path, external_module=module) + external_spec = spack.spec.Spec( + external_spec, external=path, external_module=module) if external_spec.satisfies(spec): external_specs.append(external_spec) @@ -773,6 +778,7 @@ def get_path(path, data): class ConfigFormatError(ConfigError): """Raised when a configuration format does not match its schema.""" + def __init__(self, validation_error, data): # Try to get line number from erroneous instance and its parent instance_mark = getattr(validation_error.instance, '_start_mark', None) diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index 16814429dc..f3dcdef0a9 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -119,6 +119,7 @@ class InstallRecord(object): class Database(object): + def __init__(self, root, db_dir=None): """Create a Database for Spack installations under ``root``. @@ -600,6 +601,7 @@ class Database(object): class CorruptDatabaseError(SpackError): + def __init__(self, path, msg=''): super(CorruptDatabaseError, self).__init__( "Spack database is corrupt: %s. %s." % (path, msg), @@ -607,6 +609,7 @@ class CorruptDatabaseError(SpackError): class InvalidDatabaseVersionError(SpackError): + def __init__(self, expected, found): super(InvalidDatabaseVersionError, self).__init__( "Expected database version %s but found version %s." diff --git a/lib/spack/spack/directives.py b/lib/spack/spack/directives.py index e92dd6fb67..313bf48f0d 100644 --- a/lib/spack/spack/directives.py +++ b/lib/spack/spack/directives.py @@ -349,9 +349,10 @@ class CircularReferenceError(DirectiveError): class UnknownDependencyTypeError(DirectiveError): """This is raised when a dependency is of an unknown type.""" + def __init__(self, directive, package, deptype): super(UnknownDependencyTypeError, self).__init__( directive, - "Package '%s' cannot depend on a package via %s." % - (package, deptype)) + "Package '%s' cannot depend on a package via %s." + % (package, deptype)) self.package = package diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py index 8150a6da2b..0ae6f765f4 100644 --- a/lib/spack/spack/directory_layout.py +++ b/lib/spack/spack/directory_layout.py @@ -22,16 +22,13 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import re import os import exceptions -import hashlib import shutil import glob import tempfile import yaml -import llnl.util.tty as tty from llnl.util.filesystem import join_path, mkdirp import spack @@ -51,10 +48,10 @@ class DirectoryLayout(object): install, and they can use this to customize the nesting structure of spack installs. """ + def __init__(self, root): self.root = root - @property def hidden_file_paths(self): """Return a list of hidden files used by the directory layout. @@ -67,25 +64,21 @@ class DirectoryLayout(object): """ raise NotImplementedError() - def all_specs(self): """To be implemented by subclasses to traverse all specs for which there is a directory within the root. """ raise NotImplementedError() - def relative_path_for_spec(self, spec): """Implemented by subclasses to return a relative path from the install root to a unique location for the provided spec.""" raise NotImplementedError() - def create_install_directory(self, spec): """Creates the installation directory for a spec.""" raise NotImplementedError() - def check_installed(self, spec): """Checks whether a spec is installed. @@ -95,7 +88,6 @@ class DirectoryLayout(object): """ raise NotImplementedError() - def extension_map(self, spec): """Get a dict of currently installed extension packages for a spec. @@ -104,7 +96,6 @@ class DirectoryLayout(object): """ raise NotImplementedError() - def check_extension_conflict(self, spec, ext_spec): """Ensure that ext_spec can be activated in spec. @@ -113,7 +104,6 @@ class DirectoryLayout(object): """ raise NotImplementedError() - def check_activated(self, spec, ext_spec): """Ensure that ext_spec can be removed from spec. @@ -121,26 +111,22 @@ class DirectoryLayout(object): """ raise NotImplementedError() - def add_extension(self, spec, ext_spec): """Add to the list of currently installed extensions.""" raise NotImplementedError() - def remove_extension(self, spec, ext_spec): """Remove from the list of currently installed extensions.""" raise NotImplementedError() - def path_for_spec(self, spec): - """Return an absolute path from the root to a directory for the spec.""" + """Return absolute path from the root to a directory for the spec.""" _check_concrete(spec) path = self.relative_path_for_spec(spec) assert(not path.startswith(self.root)) return os.path.join(self.root, path) - def remove_install_directory(self, spec): """Removes a prefix and any empty parent directories from the root. Raised RemoveFailedError if something goes wrong. @@ -177,6 +163,7 @@ class YamlDirectoryLayout(DirectoryLayout): only enabled variants are included in the install path. Disabled variants are omitted. """ + def __init__(self, root, **kwargs): super(YamlDirectoryLayout, self).__init__(root) self.metadata_dir = kwargs.get('metadata_dir', '.spack') @@ -191,12 +178,10 @@ class YamlDirectoryLayout(DirectoryLayout): # Cache of already written/read extension maps. self._extension_maps = {} - @property def hidden_file_paths(self): return (self.metadata_dir,) - def relative_path_for_spec(self, spec): _check_concrete(spec) @@ -208,20 +193,19 @@ class YamlDirectoryLayout(DirectoryLayout): spec.version, spec.dag_hash(self.hash_len)) - path = join_path(spec.architecture, + path = join_path( + spec.architecture, "%s-%s" % (spec.compiler.name, spec.compiler.version), dir_name) return path - def write_spec(self, spec, path): """Write a spec out to a file.""" _check_concrete(spec) with open(path, 'w') as f: spec.to_yaml(f) - def read_spec(self, path): """Read the contents of a file and parse them as a spec""" try: @@ -237,32 +221,26 @@ class YamlDirectoryLayout(DirectoryLayout): spec._mark_concrete() return spec - def spec_file_path(self, spec): """Gets full path to spec file""" _check_concrete(spec) return join_path(self.metadata_path(spec), self.spec_file_name) - def metadata_path(self, spec): return join_path(self.path_for_spec(spec), self.metadata_dir) - def build_log_path(self, spec): return join_path(self.path_for_spec(spec), self.metadata_dir, self.build_log_name) - def build_env_path(self, spec): return join_path(self.path_for_spec(spec), self.metadata_dir, self.build_env_name) - def build_packages_path(self, spec): return join_path(self.path_for_spec(spec), self.metadata_dir, self.packages_dir) - def create_install_directory(self, spec): _check_concrete(spec) @@ -273,7 +251,6 @@ class YamlDirectoryLayout(DirectoryLayout): mkdirp(self.metadata_path(spec)) self.write_spec(spec, self.spec_file_path(spec)) - def check_installed(self, spec): _check_concrete(spec) path = self.path_for_spec(spec) @@ -284,7 +261,7 @@ class YamlDirectoryLayout(DirectoryLayout): if not os.path.isfile(spec_file_path): raise InconsistentInstallDirectoryError( - 'Inconsistent state: install prefix exists but contains no spec.yaml:', + 'Install prefix exists but contains no spec.yaml:', " " + path) installed_spec = self.read_spec(spec_file_path) @@ -297,7 +274,6 @@ class YamlDirectoryLayout(DirectoryLayout): raise InconsistentInstallDirectoryError( 'Spec file in %s does not match hash!' % spec_file_path) - def all_specs(self): if not os.path.isdir(self.root): return [] @@ -307,20 +283,17 @@ class YamlDirectoryLayout(DirectoryLayout): spec_files = glob.glob(pattern) return [self.read_spec(s) for s in spec_files] - def specs_by_hash(self): by_hash = {} for spec in self.all_specs(): by_hash[spec.dag_hash()] = spec return by_hash - def extension_file_path(self, spec): """Gets full path to an installed package's extension file""" _check_concrete(spec) return join_path(self.metadata_path(spec), self.extension_file_name) - def _write_extensions(self, spec, extensions): path = self.extension_file_path(spec) @@ -332,23 +305,22 @@ class YamlDirectoryLayout(DirectoryLayout): # write tmp file with tmp: yaml.dump({ - 'extensions' : [ - { ext.name : { - 'hash' : ext.dag_hash(), - 'path' : str(ext.prefix) + 'extensions': [ + {ext.name: { + 'hash': ext.dag_hash(), + 'path': str(ext.prefix) }} for ext in sorted(extensions.values())] }, tmp, default_flow_style=False) # Atomic update by moving tmpfile on top of old one. os.rename(tmp.name, path) - def _extension_map(self, spec): """Get a dict spec> for all extensions currently installed for this package.""" _check_concrete(spec) - if not spec in self._extension_maps: + if spec not in self._extension_maps: path = self.extension_file_path(spec) if not os.path.exists(path): self._extension_maps[spec] = {} @@ -363,14 +335,14 @@ class YamlDirectoryLayout(DirectoryLayout): dag_hash = entry[name]['hash'] prefix = entry[name]['path'] - if not dag_hash in by_hash: + if dag_hash not in by_hash: raise InvalidExtensionSpecError( "Spec %s not found in %s" % (dag_hash, prefix)) ext_spec = by_hash[dag_hash] - if not prefix == ext_spec.prefix: + if prefix != ext_spec.prefix: raise InvalidExtensionSpecError( - "Prefix %s does not match spec with hash %s: %s" + "Prefix %s does not match spec hash %s: %s" % (prefix, dag_hash, ext_spec)) exts[ext_spec.name] = ext_spec @@ -378,13 +350,11 @@ class YamlDirectoryLayout(DirectoryLayout): return self._extension_maps[spec] - def extension_map(self, spec): """Defensive copying version of _extension_map() for external API.""" _check_concrete(spec) return self._extension_map(spec).copy() - def check_extension_conflict(self, spec, ext_spec): exts = self._extension_map(spec) if ext_spec.name in exts: @@ -394,13 +364,11 @@ class YamlDirectoryLayout(DirectoryLayout): else: raise ExtensionConflictError(spec, ext_spec, installed_spec) - def check_activated(self, spec, ext_spec): exts = self._extension_map(spec) - if (not ext_spec.name in exts) or (ext_spec != exts[ext_spec.name]): + if (ext_spec.name not in exts) or (ext_spec != exts[ext_spec.name]): raise NoSuchExtensionError(spec, ext_spec) - def add_extension(self, spec, ext_spec): _check_concrete(spec) _check_concrete(ext_spec) @@ -413,7 +381,6 @@ class YamlDirectoryLayout(DirectoryLayout): exts[ext_spec.name] = ext_spec self._write_extensions(spec, exts) - def remove_extension(self, spec, ext_spec): _check_concrete(spec) _check_concrete(ext_spec) @@ -429,12 +396,14 @@ class YamlDirectoryLayout(DirectoryLayout): class DirectoryLayoutError(SpackError): """Superclass for directory layout errors.""" + def __init__(self, message, long_msg=None): super(DirectoryLayoutError, self).__init__(message, long_msg) class SpecHashCollisionError(DirectoryLayoutError): """Raised when there is a hash collision in an install layout.""" + def __init__(self, installed_spec, new_spec): super(SpecHashCollisionError, self).__init__( 'Specs %s and %s have the same SHA-1 prefix!' @@ -443,6 +412,7 @@ class SpecHashCollisionError(DirectoryLayoutError): class RemoveFailedError(DirectoryLayoutError): """Raised when a DirectoryLayout cannot remove an install prefix.""" + def __init__(self, installed_spec, prefix, error): super(RemoveFailedError, self).__init__( 'Could not remove prefix %s for %s : %s' @@ -452,12 +422,15 @@ class RemoveFailedError(DirectoryLayoutError): class InconsistentInstallDirectoryError(DirectoryLayoutError): """Raised when a package seems to be installed to the wrong place.""" + def __init__(self, message, long_msg=None): - super(InconsistentInstallDirectoryError, self).__init__(message, long_msg) + super(InconsistentInstallDirectoryError, self).__init__( + message, long_msg) class InstallDirectoryAlreadyExistsError(DirectoryLayoutError): """Raised when create_install_directory is called unnecessarily.""" + def __init__(self, path): super(InstallDirectoryAlreadyExistsError, self).__init__( "Install path %s already exists!") @@ -473,22 +446,26 @@ class InvalidExtensionSpecError(DirectoryLayoutError): class ExtensionAlreadyInstalledError(DirectoryLayoutError): """Raised when an extension is added to a package that already has it.""" + def __init__(self, spec, ext_spec): super(ExtensionAlreadyInstalledError, self).__init__( - "%s is already installed in %s" % (ext_spec.short_spec, spec.short_spec)) + "%s is already installed in %s" + % (ext_spec.short_spec, spec.short_spec)) class ExtensionConflictError(DirectoryLayoutError): """Raised when an extension is added to a package that already has it.""" + def __init__(self, spec, ext_spec, conflict): super(ExtensionConflictError, self).__init__( - "%s cannot be installed in %s because it conflicts with %s"% ( - ext_spec.short_spec, spec.short_spec, conflict.short_spec)) + "%s cannot be installed in %s because it conflicts with %s" + % (ext_spec.short_spec, spec.short_spec, conflict.short_spec)) class NoSuchExtensionError(DirectoryLayoutError): """Raised when an extension isn't there on deactivate.""" + def __init__(self, spec, ext_spec): super(NoSuchExtensionError, self).__init__( - "%s cannot be removed from %s because it's not activated."% ( - ext_spec.short_spec, spec.short_spec)) + "%s cannot be removed from %s because it's not activated." + % (ext_spec.short_spec, spec.short_spec)) diff --git a/lib/spack/spack/environment.py b/lib/spack/spack/environment.py index 41136ab2eb..613ece2f45 100644 --- a/lib/spack/spack/environment.py +++ b/lib/spack/spack/environment.py @@ -1,4 +1,4 @@ -# +############################################################################## # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # @@ -21,7 +21,7 @@ # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -# +############################################################################## import collections import inspect import json @@ -287,7 +287,10 @@ class EnvironmentModifications(object): shell = '{shell}'.format(**info) shell_options = '{shell_options}'.format(**info) source_file = '{source_command} {file} {concatenate_on_success}' - dump_environment = 'python -c "import os, json; print json.dumps(dict(os.environ))"' # NOQA: ignore=E501 + + dump_cmd = "import os, json; print json.dumps(dict(os.environ))" + dump_environment = 'python -c "%s"' % dump_cmd + # Construct the command that will be executed command = [source_file.format(file=file, **info) for file in args] command.append(dump_environment) @@ -326,8 +329,10 @@ class EnvironmentModifications(object): for x in unset_variables: env.unset(x) # Variables that have been modified - common_variables = set(this_environment).intersection(set(after_source_env)) # NOQA: ignore=E501 - modified_variables = [x for x in common_variables if this_environment[x] != after_source_env[x]] # NOQA: ignore=E501 + common_variables = set( + this_environment).intersection(set(after_source_env)) + modified_variables = [x for x in common_variables + if this_environment[x] != after_source_env[x]] def return_separator_if_any(first_value, second_value): separators = ':', ';' @@ -405,7 +410,7 @@ def set_or_unset_not_first(variable, changes, errstream): if indexes: good = '\t \t{context} at {filename}:{lineno}' nogood = '\t--->\t{context} at {filename}:{lineno}' - message = 'Suspicious requests to set or unset the variable \'{var}\' found' # NOQA: ignore=E501 + message = "Suspicious requests to set or unset '{var}' found" errstream(message.format(var=variable)) for ii, item in enumerate(changes): print_format = nogood if ii in indexes else good diff --git a/lib/spack/spack/error.py b/lib/spack/spack/error.py index 85ad2fe249..c94875e91a 100644 --- a/lib/spack/spack/error.py +++ b/lib/spack/spack/error.py @@ -27,21 +27,21 @@ import sys import llnl.util.tty as tty import spack + class SpackError(Exception): """This is the superclass for all Spack errors. Subclasses can be found in the modules they have to do with. """ + def __init__(self, message, long_message=None): super(SpackError, self).__init__() self.message = message self._long_message = long_message - @property def long_message(self): return self._long_message - def die(self): if spack.debug: sys.excepthook(*sys.exc_info()) @@ -52,21 +52,23 @@ class SpackError(Exception): print self.long_message os._exit(1) - def __str__(self): msg = self.message if self._long_message: msg += "\n %s" % self._long_message return msg + class UnsupportedPlatformError(SpackError): """Raised by packages when a platform is not supported""" + def __init__(self, message): super(UnsupportedPlatformError, self).__init__(message) class NoNetworkConnectionError(SpackError): """Raised when an operation needs an internet connection.""" + def __init__(self, message, url): super(NoNetworkConnectionError, self).__init__( "No network connection: " + str(message), diff --git a/lib/spack/spack/fetch_strategy.py b/lib/spack/spack/fetch_strategy.py index bcb33bd0e6..c69a23033c 100644 --- a/lib/spack/spack/fetch_strategy.py +++ b/lib/spack/spack/fetch_strategy.py @@ -356,6 +356,7 @@ class URLFetchStrategy(FetchStrategy): class CacheURLFetchStrategy(URLFetchStrategy): """The resource associated with a cache URL may be out of date.""" + def __init__(self, *args, **kwargs): super(CacheURLFetchStrategy, self).__init__(*args, **kwargs) @@ -836,6 +837,7 @@ def for_package_version(pkg, version): class FsCache(object): + def __init__(self, root): self.root = os.path.abspath(root) diff --git a/lib/spack/spack/file_cache.py b/lib/spack/spack/file_cache.py index fb9ccf46b8..0a66166fd8 100644 --- a/lib/spack/spack/file_cache.py +++ b/lib/spack/spack/file_cache.py @@ -41,6 +41,7 @@ class FileCache(object): client code need not manage locks for cache entries. """ + def __init__(self, root): """Create a file cache object. @@ -131,6 +132,7 @@ class FileCache(object): """ class WriteContextManager(object): + def __enter__(cm): cm.orig_filename = self.cache_path(key) cm.orig_file = None diff --git a/lib/spack/spack/graph.py b/lib/spack/spack/graph.py index 80d1199ef5..b875e9da99 100644 --- a/lib/spack/spack/graph.py +++ b/lib/spack/spack/graph.py @@ -136,6 +136,7 @@ NODE, COLLAPSE, MERGE_RIGHT, EXPAND_RIGHT, BACK_EDGE = states class AsciiGraph(object): + def __init__(self): # These can be set after initialization or after a call to # graph() to change behavior. @@ -288,22 +289,22 @@ class AsciiGraph(object): self._indent() for p in prev_ends: - advance(p, lambda: [("| ", self._pos)]) # NOQA: ignore=E272 - advance(p + 1, lambda: [("|/", self._pos)]) # NOQA: ignore=E272 + advance(p, lambda: [("| ", self._pos)]) + advance(p + 1, lambda: [("|/", self._pos)]) if end >= 0: - advance(end + 1, lambda: [("| ", self._pos)]) # NOQA: ignore=E272 - advance(start - 1, lambda: [("|", self._pos), ("_", end)]) # NOQA: ignore=E272 + advance(end + 1, lambda: [("| ", self._pos)]) + advance(start - 1, lambda: [("|", self._pos), ("_", end)]) else: - advance(start - 1, lambda: [("| ", self._pos)]) # NOQA: ignore=E272 + advance(start - 1, lambda: [("| ", self._pos)]) if start >= 0: - advance(start, lambda: [("|", self._pos), ("/", end)]) # NOQA: ignore=E272 + advance(start, lambda: [("|", self._pos), ("/", end)]) if collapse: - advance(flen, lambda: [(" /", self._pos)]) # NOQA: ignore=E272 + advance(flen, lambda: [(" /", self._pos)]) else: - advance(flen, lambda: [("| ", self._pos)]) # NOQA: ignore=E272 + advance(flen, lambda: [("| ", self._pos)]) self._set_state(BACK_EDGE, end, label) self._out.write("\n") @@ -438,8 +439,8 @@ class AsciiGraph(object): # Expand forward after doing all back connections if (i + 1 < len(self._frontier) and - len(self._frontier[i + 1]) == 1 and - self._frontier[i + 1][0] in self._frontier[i]): + len(self._frontier[i + 1]) == 1 and + self._frontier[i + 1][0] in self._frontier[i]): # We need to connect to the element to the right. # Keep lines straight by connecting directly and # avoiding unnecessary expand/contract. diff --git a/lib/spack/spack/hooks/__init__.py b/lib/spack/spack/hooks/__init__.py index 902e488eca..c7c84defa0 100644 --- a/lib/spack/spack/hooks/__init__.py +++ b/lib/spack/spack/hooks/__init__.py @@ -45,6 +45,7 @@ from llnl.util.lang import memoized, list_modules from llnl.util.filesystem import join_path import spack + @memoized def all_hook_modules(): modules = [] @@ -58,6 +59,7 @@ def all_hook_modules(): class HookRunner(object): + def __init__(self, hook_name): self.hook_name = hook_name diff --git a/lib/spack/spack/hooks/extensions.py b/lib/spack/spack/hooks/extensions.py index bcbd68dfa0..070b309a43 100644 --- a/lib/spack/spack/hooks/extensions.py +++ b/lib/spack/spack/hooks/extensions.py @@ -23,8 +23,6 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import spack - def pre_uninstall(pkg): assert(pkg.spec.concrete) diff --git a/lib/spack/spack/mirror.py b/lib/spack/spack/mirror.py index 0bbcfba6b4..f053e4405f 100644 --- a/lib/spack/spack/mirror.py +++ b/lib/spack/spack/mirror.py @@ -40,9 +40,8 @@ import spack.error import spack.url as url import spack.fetch_strategy as fs from spack.spec import Spec -from spack.stage import Stage from spack.version import * -from spack.util.compression import extension, allowed_archive +from spack.util.compression import allowed_archive def mirror_archive_filename(spec, fetcher): @@ -52,10 +51,10 @@ def mirror_archive_filename(spec, fetcher): if isinstance(fetcher, fs.URLFetchStrategy): if fetcher.expand_archive: - # If we fetch this version with a URLFetchStrategy, use URL's archive type + # If we fetch with a URLFetchStrategy, use URL's archive type ext = url.downloaded_file_extension(fetcher.url) else: - # If the archive shouldn't be expanded, don't check for its extension. + # If the archive shouldn't be expanded, don't check extension. ext = None else: # Otherwise we'll make a .tar.gz ourselves @@ -106,7 +105,9 @@ def get_matching_versions(specs, **kwargs): def suggest_archive_basename(resource): """ - Return a tentative basename for an archive. Raise an exception if the name is among the allowed archive types. + Return a tentative basename for an archive. + + Raises an exception if the name is not an allowed archive type. :param fetcher: :return: @@ -170,7 +171,7 @@ def create(path, specs, **kwargs): 'error': [] } - # Iterate through packages and download all the safe tarballs for each of them + # Iterate through packages and download all safe tarballs for each for spec in version_specs: add_single_spec(spec, mirror_root, categories, **kwargs) @@ -190,12 +191,15 @@ def add_single_spec(spec, mirror_root, categories, **kwargs): fetcher = stage.fetcher if ii == 0: # create a subdirectory for the current package@version - archive_path = os.path.abspath(join_path(mirror_root, mirror_archive_path(spec, fetcher))) + archive_path = os.path.abspath(join_path( + mirror_root, mirror_archive_path(spec, fetcher))) name = spec.format("$_$@") else: resource = stage.resource - archive_path = join_path(subdir, suggest_archive_basename(resource)) - name = "{resource} ({pkg}).".format(resource=resource.name, pkg=spec.format("$_$@")) + archive_path = join_path( + subdir, suggest_archive_basename(resource)) + name = "{resource} ({pkg}).".format( + resource=resource.name, pkg=spec.format("$_$@")) subdir = os.path.dirname(archive_path) mkdirp(subdir) @@ -217,15 +221,18 @@ def add_single_spec(spec, mirror_root, categories, **kwargs): categories['present'].append(spec) else: categories['mirrored'].append(spec) + except Exception as e: if spack.debug: sys.excepthook(*sys.exc_info()) else: - tty.warn("Error while fetching %s" % spec.format('$_$@'), e.message) + tty.warn("Error while fetching %s" + % spec.format('$_$@'), e.message) categories['error'].append(spec) class MirrorError(spack.error.SpackError): """Superclass of all mirror-creation related errors.""" + def __init__(self, msg, long_msg=None): super(MirrorError, self).__init__(msg, long_msg) diff --git a/lib/spack/spack/modules.py b/lib/spack/spack/modules.py index 8ac6a77d13..debc6752b4 100644 --- a/lib/spack/spack/modules.py +++ b/lib/spack/spack/modules.py @@ -459,7 +459,8 @@ class EnvModule(object): yield self.environment_modifications_formats[type( command)].format(**command.args) except KeyError: - message = 'Cannot handle command of type {command} : skipping request' # NOQA: ignore=E501 + message = ('Cannot handle command of type {command}: ' + 'skipping request') details = '{context} at {filename}:{lineno}' tty.warn(message.format(command=type(command))) tty.warn(details.format(**command.args)) @@ -494,7 +495,8 @@ class Dotkit(EnvModule): autoload_format = 'dk_op {module_file}\n' - default_naming_format = '{name}-{version}-{compiler.name}-{compiler.version}' # NOQA: ignore=E501 + default_naming_format = \ + '{name}-{version}-{compiler.name}-{compiler.version}' @property def file_name(self): @@ -543,7 +545,8 @@ class TclModule(EnvModule): prerequisite_format = 'prereq {module_file}\n' - default_naming_format = '{name}-{version}-{compiler.name}-{compiler.version}' # NOQA: ignore=E501 + default_naming_format = \ + '{name}-{version}-{compiler.name}-{compiler.version}' @property def file_name(self): @@ -554,7 +557,7 @@ class TclModule(EnvModule): timestamp = datetime.datetime.now() # TCL Modulefile header header = '#%Module1.0\n' - header += '## Module file created by spack (https://github.com/LLNL/spack) on %s\n' % timestamp # NOQA: ignore=E501 + header += '## Module file created by spack (https://github.com/LLNL/spack) on %s\n' % timestamp header += '##\n' header += '## %s\n' % self.spec.short_spec header += '##\n' @@ -584,10 +587,12 @@ class TclModule(EnvModule): for naming_dir, conflict_dir in zip( self.naming_scheme.split('/'), item.split('/')): if naming_dir != conflict_dir: - message = 'conflict scheme does not match naming scheme [{spec}]\n\n' # NOQA: ignore=E501 + message = 'conflict scheme does not match naming ' + message += 'scheme [{spec}]\n\n' message += 'naming scheme : "{nformat}"\n' message += 'conflict scheme : "{cformat}"\n\n' - message += '** You may want to check your `modules.yaml` configuration file **\n' # NOQA: ignore=E501 + message += '** You may want to check your ' + message += '`modules.yaml` configuration file **\n' tty.error(message.format(spec=self.spec, nformat=self.naming_scheme, cformat=item)) diff --git a/lib/spack/spack/multimethod.py b/lib/spack/spack/multimethod.py index 0818f9092f..d1d1f32445 100644 --- a/lib/spack/spack/multimethod.py +++ b/lib/spack/spack/multimethod.py @@ -43,15 +43,13 @@ avoids overly complicated rat nests of if statements. Obviously, depending on the scenario, regular old conditionals might be clearer, so package authors should use their judgement. """ -import sys import functools -import collections from llnl.util.lang import * import spack.architecture import spack.error -from spack.spec import parse_anonymous_spec, Spec +from spack.spec import parse_anonymous_spec class SpecMultiMethod(object): @@ -89,13 +87,13 @@ class SpecMultiMethod(object): See the docs for decorators below for more details. """ + def __init__(self, default=None): self.method_list = [] self.default = default if default: functools.update_wrapper(self, default) - def register(self, spec, method): """Register a version of a method for a particular sys_type.""" self.method_list.append((spec, method)) @@ -105,12 +103,10 @@ class SpecMultiMethod(object): else: assert(self.__name__ == method.__name__) - def __get__(self, obj, objtype): """This makes __call__ support instance methods.""" return functools.partial(self.__call__, obj) - def __call__(self, package_self, *args, **kwargs): """Find the first method with a spec that matches the package's spec. If none is found, call the default @@ -127,7 +123,6 @@ class SpecMultiMethod(object): type(package_self), self.__name__, spec, [m[0] for m in self.method_list]) - def __str__(self): return "SpecMultiMethod {\n\tdefault: %s,\n\tspecs: %s\n}" % ( self.default, self.method_list) @@ -195,11 +190,13 @@ class when(object): platform-specific versions. There's not much we can do to get around this because of the way decorators work. """ + def __init__(self, spec): pkg = get_calling_module_name() if spec is True: spec = pkg - self.spec = parse_anonymous_spec(spec, pkg) if spec is not False else None + self.spec = (parse_anonymous_spec(spec, pkg) + if spec is not False else None) def __call__(self, method): # Get the first definition of the method in the calling scope @@ -218,12 +215,14 @@ class when(object): class MultiMethodError(spack.error.SpackError): """Superclass for multimethod dispatch errors""" + def __init__(self, message): super(MultiMethodError, self).__init__(message) class NoSuchMethodError(spack.error.SpackError): """Raised when we can't find a version of a multi-method.""" + def __init__(self, cls, method_name, spec, possible_specs): super(NoSuchMethodError, self).__init__( "Package %s does not support %s called with %s. Options are: %s" diff --git a/lib/spack/spack/operating_systems/cnl.py b/lib/spack/spack/operating_systems/cnl.py index dbd2775861..78807865b3 100644 --- a/lib/spack/spack/operating_systems/cnl.py +++ b/lib/spack/spack/operating_systems/cnl.py @@ -15,6 +15,7 @@ class Cnl(OperatingSystem): modules. If updated, user must make sure that version and name are updated to indicate that OS has been upgraded (or downgraded) """ + def __init__(self): name = 'CNL' version = '10' diff --git a/lib/spack/spack/operating_systems/linux_distro.py b/lib/spack/spack/operating_systems/linux_distro.py index 2e3c72719b..6d70ae80b6 100644 --- a/lib/spack/spack/operating_systems/linux_distro.py +++ b/lib/spack/spack/operating_systems/linux_distro.py @@ -2,6 +2,7 @@ import re import platform as py_platform from spack.architecture import OperatingSystem + class LinuxDistro(OperatingSystem): """ This class will represent the autodetected operating system for a Linux System. Since there are many different flavors of @@ -9,6 +10,7 @@ class LinuxDistro(OperatingSystem): autodetection using the python module platform and the method platform.dist() """ + def __init__(self): distname, version, _ = py_platform.linux_distribution( full_distribution_name=False) diff --git a/lib/spack/spack/operating_systems/mac_os.py b/lib/spack/spack/operating_systems/mac_os.py index f35b3ca577..3e5ab9b2e9 100644 --- a/lib/spack/spack/operating_systems/mac_os.py +++ b/lib/spack/spack/operating_systems/mac_os.py @@ -1,6 +1,7 @@ import platform as py_platform from spack.architecture import OperatingSystem + class MacOs(OperatingSystem): """This class represents the macOS operating system. This will be auto detected using the python platform.mac_ver. The macOS diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index 25e07541d0..ff8c8e96bc 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -34,6 +34,7 @@ rundown on spack and how it differs from homebrew, look at the README. """ import os +import sys import re import textwrap import time @@ -178,12 +179,10 @@ class Package(object): Most software comes in nicely packaged tarballs, like this one: http://www.cmake.org/files/v2.8/cmake-2.8.10.2.tar.gz - Taking a page from homebrew, spack deduces pretty much everything it needs to know from the URL above. If you simply type this: spack create http://www.cmake.org/files/v2.8/cmake-2.8.10.2.tar.gz - Spack will download the tarball, generate an md5 hash, figure out the version and the name of the package from the URL, and create a new package file for you with all the names and attributes set correctly. @@ -705,13 +704,13 @@ class Package(object): # Ask the user whether to skip the checksum if we're # interactive, but just fail if non-interactive. - checksum_msg = "Add a checksum or use --no-checksum to skip this check." # NOQA: ignore=E501 + ck_msg = "Add a checksum or use --no-checksum to skip this check." ignore_checksum = False if sys.stdout.isatty(): ignore_checksum = tty.get_yes_or_no(" Fetch anyway?", default=False) if ignore_checksum: - tty.msg("Fetching with no checksum.", checksum_msg) + tty.msg("Fetching with no checksum.", ck_msg) if not ignore_checksum: raise FetchError("Will not fetch %s" % @@ -1305,9 +1304,10 @@ class Package(object): continue for dep in aspec.traverse(deptype='run'): if self.spec == dep: + msg = ("Cannot deactivate %s because %s is activated " + "and depends on it.") raise ActivationError( - "Cannot deactivate %s because %s is activated and depends on it." # NOQA: ignore=E501 - % (self.spec.short_spec, aspec.short_spec)) + msg % (self.spec.short_spec, aspec.short_spec)) self.extendee_spec.package.deactivate(self, **self.extendee_args) @@ -1564,6 +1564,7 @@ def make_executable(path): class CMakePackage(StagedPackage): + def make_make(self): import multiprocessing # number of jobs spack will to build with. @@ -1740,12 +1741,14 @@ class ExtensionError(PackageError): class ExtensionConflictError(ExtensionError): + def __init__(self, path): super(ExtensionConflictError, self).__init__( "Extension blocked by file: %s" % path) class ActivationError(ExtensionError): + def __init__(self, msg, long_msg=None): super(ActivationError, self).__init__(msg, long_msg) diff --git a/lib/spack/spack/parse.py b/lib/spack/spack/parse.py index 8adf957e7f..1b88db2d7c 100644 --- a/lib/spack/spack/parse.py +++ b/lib/spack/spack/parse.py @@ -29,6 +29,7 @@ import spack.error class Token: """Represents tokens; generated from input by lexer and fed to parse().""" + def __init__(self, type, value='', start=0, end=0): self.type = type self.value = value @@ -51,11 +52,13 @@ class Token: class Lexer(object): """Base class for Lexers that keep track of line numbers.""" + def __init__(self, lexicon): self.scanner = re.Scanner(lexicon) def token(self, type, value=''): - return Token(type, value, self.scanner.match.start(0), self.scanner.match.end(0)) + return Token(type, value, + self.scanner.match.start(0), self.scanner.match.end(0)) def lex(self, text): tokens, remainder = self.scanner.scan(text) @@ -66,10 +69,11 @@ class Lexer(object): class Parser(object): """Base class for simple recursive descent parsers.""" + def __init__(self, lexer): - self.tokens = iter([]) # iterators over tokens, handled in order. Starts empty. - self.token = Token(None) # last accepted token starts at beginning of file - self.next = None # next token + self.tokens = iter([]) # iterators over tokens, handled in order. + self.token = Token(None) # last accepted token + self.next = None # next token self.lexer = lexer self.text = None @@ -82,11 +86,12 @@ class Parser(object): def push_tokens(self, iterable): """Adds all tokens in some iterable to the token stream.""" - self.tokens = itertools.chain(iter(iterable), iter([self.next]), self.tokens) + self.tokens = itertools.chain( + iter(iterable), iter([self.next]), self.tokens) self.gettok() def accept(self, id): - """Puts the next symbol in self.token if we like it. Then calls gettok()""" + """Put the next symbol in self.token if accepted, then call gettok()""" if self.next and self.next.is_a(id): self.token = self.next self.gettok() @@ -124,9 +129,9 @@ class Parser(object): return self.do_parse() - class ParseError(spack.error.SpackError): """Raised when we don't hit an error while parsing.""" + def __init__(self, message, string, pos): super(ParseError, self).__init__(message) self.string = string @@ -135,5 +140,6 @@ class ParseError(spack.error.SpackError): class LexError(ParseError): """Raised when we don't know how to lex something.""" + def __init__(self, message, string, pos): super(LexError, self).__init__(message, string, pos) diff --git a/lib/spack/spack/patch.py b/lib/spack/spack/patch.py index c2e181be2f..0bd9f5d29d 100644 --- a/lib/spack/spack/patch.py +++ b/lib/spack/spack/patch.py @@ -24,7 +24,6 @@ ############################################################################## import os -import llnl.util.tty as tty from llnl.util.filesystem import join_path import spack @@ -59,7 +58,6 @@ class Patch(object): if not os.path.isfile(self.path): raise NoSuchPatchFileError(pkg_name, self.path) - def apply(self, stage): """Fetch this patch, if necessary, and apply it to the source code in the supplied stage. @@ -84,9 +82,9 @@ class Patch(object): patch_stage.destroy() - class NoSuchPatchFileError(spack.error.SpackError): """Raised when user specifies a patch file that doesn't exist.""" + def __init__(self, package, path): super(NoSuchPatchFileError, self).__init__( "No such patch file for package %s: %s" % (package, path)) diff --git a/lib/spack/spack/platforms/bgq.py b/lib/spack/spack/platforms/bgq.py index e0eb76f336..91afdd04db 100644 --- a/lib/spack/spack/platforms/bgq.py +++ b/lib/spack/spack/platforms/bgq.py @@ -1,6 +1,7 @@ import os from spack.architecture import Platform, Target + class Bgq(Platform): priority = 30 front_end = 'power7' @@ -15,4 +16,3 @@ class Bgq(Platform): @classmethod def detect(self): return os.path.exists('/bgsys') - diff --git a/lib/spack/spack/platforms/darwin.py b/lib/spack/spack/platforms/darwin.py index d47dd640f9..974ce3a3f9 100644 --- a/lib/spack/spack/platforms/darwin.py +++ b/lib/spack/spack/platforms/darwin.py @@ -2,6 +2,7 @@ import subprocess from spack.architecture import Platform, Target from spack.operating_systems.mac_os import MacOs + class Darwin(Platform): priority = 89 front_end = 'x86_64' @@ -21,6 +22,6 @@ class Darwin(Platform): @classmethod def detect(self): - platform = subprocess.Popen(['uname', '-a'], stdout = subprocess.PIPE) + platform = subprocess.Popen(['uname', '-a'], stdout=subprocess.PIPE) platform, _ = platform.communicate() return 'darwin' in platform.strip().lower() diff --git a/lib/spack/spack/platforms/linux.py b/lib/spack/spack/platforms/linux.py index 4d3f59c320..38d2cdbfec 100644 --- a/lib/spack/spack/platforms/linux.py +++ b/lib/spack/spack/platforms/linux.py @@ -3,6 +3,7 @@ import platform from spack.architecture import Platform, Target from spack.operating_systems.linux_distro import LinuxDistro + class Linux(Platform): priority = 90 @@ -26,6 +27,6 @@ class Linux(Platform): @classmethod def detect(self): - platform = subprocess.Popen(['uname', '-a'], stdout = subprocess.PIPE) + platform = subprocess.Popen(['uname', '-a'], stdout=subprocess.PIPE) platform, _ = platform.communicate() return 'linux' in platform.strip().lower() diff --git a/lib/spack/spack/platforms/test.py b/lib/spack/spack/platforms/test.py index 8fa2585a7a..c918211555 100644 --- a/lib/spack/spack/platforms/test.py +++ b/lib/spack/spack/platforms/test.py @@ -1,4 +1,27 @@ -import subprocess +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/llnl/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## from spack.architecture import Platform, Target from spack.operating_systems.linux_distro import LinuxDistro from spack.operating_systems.cnl import Cnl @@ -9,7 +32,7 @@ class Test(Platform): front_end = 'x86_32' back_end = 'x86_64' default = 'x86_64' - + back_os = 'CNL10' default_os = 'CNL10' diff --git a/lib/spack/spack/preferred_packages.py b/lib/spack/spack/preferred_packages.py index f079c1ef8b..45a41c8e2b 100644 --- a/lib/spack/spack/preferred_packages.py +++ b/lib/spack/spack/preferred_packages.py @@ -156,7 +156,7 @@ class PreferredPackages(object): """Return True iff the named package has a list of preferred providers""" return bool(self._order_for_package(pkgname, 'providers', - provider_str, False)) + provider_str, False)) def spec_preferred_variants(self, pkgname): """Return a VariantMap of preferred variants and their values""" diff --git a/lib/spack/spack/provider_index.py b/lib/spack/spack/provider_index.py index b5fbb67c6e..3f9cd285e7 100644 --- a/lib/spack/spack/provider_index.py +++ b/lib/spack/spack/provider_index.py @@ -52,6 +52,7 @@ class ProviderIndex(object): matching implementation of MPI. """ + def __init__(self, specs=None, restrict=False): """Create a new ProviderIndex. diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py index d751a98b35..2d8dc39648 100644 --- a/lib/spack/spack/repository.py +++ b/lib/spack/spack/repository.py @@ -68,6 +68,7 @@ NOT_PROVIDED = object() def _autospec(function): """Decorator that automatically converts the argument of a single-arg function to a Spec.""" + def converter(self, spec_like, *args, **kwargs): if not isinstance(spec_like, spack.spec.Spec): spec_like = spack.spec.Spec(spec_like) @@ -77,6 +78,7 @@ def _autospec(function): class SpackNamespace(ModuleType): """ Allow lazy loading of modules.""" + def __init__(self, namespace): super(SpackNamespace, self).__init__(namespace) self.__file__ = "(spack namespace)" @@ -112,6 +114,7 @@ class RepoPath(object): combined results of the Repos in its list instead of on a single package repository. """ + def __init__(self, *repo_dirs, **kwargs): # super-namespace for all packages in the RepoPath self.super_namespace = kwargs.get('namespace', repo_namespace) @@ -360,6 +363,7 @@ class Repo(object): A Python namespace where the repository's packages should live. """ + def __init__(self, root, namespace=repo_namespace): """Instantiate a package repository from a filesystem path. @@ -923,6 +927,7 @@ class PackageLoadError(spack.error.SpackError): class UnknownPackageError(PackageLoadError): """Raised when we encounter a package spack doesn't have.""" + def __init__(self, name, repo=None): msg = None if repo: @@ -935,6 +940,7 @@ class UnknownPackageError(PackageLoadError): class UnknownNamespaceError(PackageLoadError): """Raised when we encounter an unknown namespace""" + def __init__(self, namespace): super(UnknownNamespaceError, self).__init__( "Unknown namespace: %s" % namespace) @@ -942,6 +948,7 @@ class UnknownNamespaceError(PackageLoadError): class FailedConstructorError(PackageLoadError): """Raised when a package's class constructor fails.""" + def __init__(self, name, exc_type, exc_obj, exc_tb): super(FailedConstructorError, self).__init__( "Class constructor failed for package '%s'." % name, diff --git a/lib/spack/spack/resource.py b/lib/spack/spack/resource.py index 24b675f8da..1d4d448298 100644 --- a/lib/spack/spack/resource.py +++ b/lib/spack/spack/resource.py @@ -31,9 +31,11 @@ package to enable optional features. class Resource(object): + """Represents an optional resource to be fetched by a package. + + Aggregates a name, a fetcher, a destination and a placement. """ - Represents an optional resource. Aggregates a name, a fetcher, a destination and a placement - """ + def __init__(self, name, fetcher, destination, placement): self.name = name self.fetcher = fetcher diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index a37b39be67..0d72d454c6 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -166,6 +166,7 @@ def colorize_spec(spec): """Returns a spec colorized according to the colors specified in color_formats.""" class insert_color: + def __init__(self): self.last = None @@ -186,6 +187,7 @@ class CompilerSpec(object): """The CompilerSpec field represents the compiler or range of compiler versions that a package should be built with. CompilerSpecs have a name and a version list. """ + def __init__(self, *args): nargs = len(args) if nargs == 1: @@ -296,6 +298,7 @@ class DependencySpec(object): - spec: the spack.spec.Spec description of a dependency. - deptypes: strings representing the type of dependency this is. """ + def __init__(self, spec, deptypes): self.spec = spec self.deptypes = deptypes @@ -317,6 +320,7 @@ class VariantSpec(object): on the particular package being built, and each named variant can be enabled or disabled. """ + def __init__(self, name, value): self.name = name self.value = value @@ -447,9 +451,9 @@ class FlagMap(HashableMap): sorted_keys = filter( lambda flag: self[flag] != [], sorted(self.keys())) cond_symbol = ' ' if len(sorted_keys) > 0 else '' - return cond_symbol + ' '.join(str(key) + '=\"' + ' '.join(str(f) - for f in self[key]) + '\"' - for key in sorted_keys) + return cond_symbol + ' '.join( + str(key) + '=\"' + ' '.join( + str(f) for f in self[key]) + '\"' for key in sorted_keys) class DependencyMap(HashableMap): @@ -910,7 +914,7 @@ class Spec(object): params = dict((name, v.value) for name, v in self.variants.items()) params.update(dict((name, value) - for name, value in self.compiler_flags.items())) + for name, value in self.compiler_flags.items())) if params: d['parameters'] = params @@ -1598,8 +1602,8 @@ class Spec(object): raise UnsatisfiableSpecNameError(self.name, other.name) if (other.namespace is not None and - self.namespace is not None and - other.namespace != self.namespace): + self.namespace is not None and + other.namespace != self.namespace): raise UnsatisfiableSpecNameError(self.fullname, other.fullname) if not self.versions.overlaps(other.versions): @@ -1753,8 +1757,8 @@ class Spec(object): # namespaces either match, or other doesn't require one. if (other.namespace is not None and - self.namespace is not None and - self.namespace != other.namespace): + self.namespace is not None and + self.namespace != other.namespace): return False if self.versions and other.versions: if not self.versions.satisfies(other.versions, strict=strict): @@ -1849,7 +1853,7 @@ class Spec(object): # compatible with mpich2) for spec in self.virtual_dependencies(): if (spec.name in other_index and - not other_index.providers_for(spec)): + not other_index.providers_for(spec)): return False for spec in other.virtual_dependencies(): @@ -2345,6 +2349,7 @@ _lexer = SpecLexer() class SpecParser(spack.parse.Parser): + def __init__(self): super(SpecParser, self).__init__(_lexer) self.previous = None diff --git a/lib/spack/spack/test/architecture.py b/lib/spack/spack/test/architecture.py index b8441bdac4..22ddd4c97e 100644 --- a/lib/spack/spack/test/architecture.py +++ b/lib/spack/spack/test/architecture.py @@ -40,6 +40,7 @@ from spack.test.mock_packages_test import * class ArchitectureTest(MockPackagesTest): + def setUp(self): super(ArchitectureTest, self).setUp() self.platform = spack.architecture.platform() diff --git a/lib/spack/spack/test/cc.py b/lib/spack/spack/test/cc.py index ea2b164462..f3e4bb31d2 100644 --- a/lib/spack/spack/test/cc.py +++ b/lib/spack/spack/test/cc.py @@ -45,7 +45,8 @@ test_command = [ '-llib1', '-llib2', 'arg4', '-Wl,--end-group', - '-Xlinker', '-rpath', '-Xlinker', '/third/rpath', '-Xlinker', '-rpath', '-Xlinker', '/fourth/rpath', + '-Xlinker', '-rpath', '-Xlinker', '/third/rpath', '-Xlinker', + '-rpath', '-Xlinker', '/fourth/rpath', '-llib3', '-llib4', 'arg5', 'arg6'] @@ -67,7 +68,7 @@ class CompilerTest(unittest.TestCase): os.environ['SPACK_FC'] = self.realcc os.environ['SPACK_PREFIX'] = self.prefix - os.environ['SPACK_ENV_PATH']="test" + os.environ['SPACK_ENV_PATH'] = "test" os.environ['SPACK_DEBUG_LOG_DIR'] = "." os.environ['SPACK_COMPILER_SPEC'] = "gcc@4.4.7" os.environ['SPACK_SHORT_SPEC'] = "foo@1.2" @@ -97,16 +98,13 @@ class CompilerTest(unittest.TestCase): if 'SPACK_DEPENDENCIES' in os.environ: del os.environ['SPACK_DEPENDENCIES'] - def tearDown(self): shutil.rmtree(self.tmp_deps, True) - def check_cc(self, command, args, expected): os.environ['SPACK_TEST_COMMAND'] = command self.assertEqual(self.cc(*args, output=str).strip(), expected) - def check_cxx(self, command, args, expected): os.environ['SPACK_TEST_COMMAND'] = command self.assertEqual(self.cxx(*args, output=str).strip(), expected) @@ -115,46 +113,46 @@ class CompilerTest(unittest.TestCase): os.environ['SPACK_TEST_COMMAND'] = command self.assertEqual(self.fc(*args, output=str).strip(), expected) - def check_ld(self, command, args, expected): os.environ['SPACK_TEST_COMMAND'] = command self.assertEqual(self.ld(*args, output=str).strip(), expected) - def check_cpp(self, command, args, expected): os.environ['SPACK_TEST_COMMAND'] = command self.assertEqual(self.cpp(*args, output=str).strip(), expected) - def test_vcheck_mode(self): self.check_cc('dump-mode', ['-I/include', '--version'], "vcheck") self.check_cc('dump-mode', ['-I/include', '-V'], "vcheck") self.check_cc('dump-mode', ['-I/include', '-v'], "vcheck") self.check_cc('dump-mode', ['-I/include', '-dumpversion'], "vcheck") self.check_cc('dump-mode', ['-I/include', '--version', '-c'], "vcheck") - self.check_cc('dump-mode', ['-I/include', '-V', '-o', 'output'], "vcheck") - + self.check_cc('dump-mode', ['-I/include', + '-V', '-o', 'output'], "vcheck") def test_cpp_mode(self): self.check_cc('dump-mode', ['-E'], "cpp") self.check_cpp('dump-mode', [], "cpp") - def test_as_mode(self): self.check_cc('dump-mode', ['-S'], "as") - def test_ccld_mode(self): self.check_cc('dump-mode', [], "ccld") self.check_cc('dump-mode', ['foo.c', '-o', 'foo'], "ccld") - self.check_cc('dump-mode', ['foo.c', '-o', 'foo', '-Wl,-rpath,foo'], "ccld") - self.check_cc('dump-mode', ['foo.o', 'bar.o', 'baz.o', '-o', 'foo', '-Wl,-rpath,foo'], "ccld") - + self.check_cc('dump-mode', ['foo.c', '-o', + 'foo', '-Wl,-rpath,foo'], "ccld") + self.check_cc( + 'dump-mode', + ['foo.o', 'bar.o', 'baz.o', '-o', 'foo', '-Wl,-rpath,foo'], + "ccld") def test_ld_mode(self): self.check_ld('dump-mode', [], "ld") - self.check_ld('dump-mode', ['foo.o', 'bar.o', 'baz.o', '-o', 'foo', '-Wl,-rpath,foo'], "ld") - + self.check_ld( + 'dump-mode', + ['foo.o', 'bar.o', 'baz.o', '-o', 'foo', '-Wl,-rpath,foo'], + "ld") def test_flags(self): os.environ['SPACK_LDFLAGS'] = '-L foo' @@ -176,10 +174,11 @@ class CompilerTest(unittest.TestCase): # Test cppflags added properly in cpp mode self.check_cpp('dump-args', test_command, "cpp " + - '-g -O1 ' + - ' '.join(test_command)) + '-g -O1 ' + + ' '.join(test_command)) - # Test ldflags, cppflags, and language specific flags are added in proper order + # Test ldflags, cppflags, and language specific flags are added in + # proper order self.check_cc('dump-args', test_command, self.realcc + ' ' + '-Wl,-rpath,' + self.prefix + '/lib ' + @@ -191,14 +190,14 @@ class CompilerTest(unittest.TestCase): '-lfoo') self.check_cxx('dump-args', test_command, - self.realcc + ' ' + - '-Wl,-rpath,' + self.prefix + '/lib ' + - '-Wl,-rpath,' + self.prefix + '/lib64 ' + - '-g -O1 ' + - '-Werror ' + - '-L foo ' + - ' '.join(test_command) + ' ' + - '-lfoo') + self.realcc + ' ' + + '-Wl,-rpath,' + self.prefix + '/lib ' + + '-Wl,-rpath,' + self.prefix + '/lib64 ' + + '-g -O1 ' + + '-Werror ' + + '-L foo ' + + ' '.join(test_command) + ' ' + + '-lfoo') self.check_fc('dump-args', test_command, self.realcc + ' ' + @@ -210,9 +209,8 @@ class CompilerTest(unittest.TestCase): ' '.join(test_command) + ' ' + '-lfoo') - os.environ['SPACK_LDFLAGS']='' - os.environ['SPACK_LDLIBS']='' - + os.environ['SPACK_LDFLAGS'] = '' + os.environ['SPACK_LDLIBS'] = '' def test_dep_rpath(self): """Ensure RPATHs for root package are added.""" @@ -222,7 +220,6 @@ class CompilerTest(unittest.TestCase): '-Wl,-rpath,' + self.prefix + '/lib64 ' + ' '.join(test_command)) - def test_dep_include(self): """Ensure a single dependency include directory is added.""" os.environ['SPACK_DEPENDENCIES'] = self.dep4 @@ -233,7 +230,6 @@ class CompilerTest(unittest.TestCase): '-I' + self.dep4 + '/include ' + ' '.join(test_command)) - def test_dep_lib(self): """Ensure a single dependency RPATH is added.""" os.environ['SPACK_DEPENDENCIES'] = self.dep2 @@ -245,7 +241,6 @@ class CompilerTest(unittest.TestCase): '-Wl,-rpath,' + self.dep2 + '/lib64 ' + ' '.join(test_command)) - def test_all_deps(self): """Ensure includes and RPATHs for all deps are added. """ os.environ['SPACK_DEPENDENCIES'] = ':'.join([ @@ -274,7 +269,6 @@ class CompilerTest(unittest.TestCase): ' '.join(test_command)) - def test_ld_deps(self): """Ensure no (extra) -I args or -Wl, are passed in ld mode.""" os.environ['SPACK_DEPENDENCIES'] = ':'.join([ diff --git a/lib/spack/spack/test/cmd/module.py b/lib/spack/spack/test/cmd/module.py index 36a4a73fe6..3a0ce32e6c 100644 --- a/lib/spack/spack/test/cmd/module.py +++ b/lib/spack/spack/test/cmd/module.py @@ -33,16 +33,17 @@ import spack.test.mock_database class TestModule(spack.test.mock_database.MockDatabase): def _get_module_files(self, args): - return [ - modules.module_types[args.module_type](spec).file_name for spec in args.specs # NOQA: ignore=E501 - ] + return [modules.module_types[args.module_type](spec).file_name + for spec in args.specs] def test_module_common_operations(self): parser = argparse.ArgumentParser() module.setup_parser(parser) + # Try to remove a non existing module [tcl] args = parser.parse_args(['rm', 'doesnotexist']) self.assertRaises(SystemExit, module.module, parser, args) + # Remove existing modules [tcl] args = parser.parse_args(['rm', '-y', 'mpileaks']) module_files = self._get_module_files(args) @@ -51,22 +52,28 @@ class TestModule(spack.test.mock_database.MockDatabase): module.module(parser, args) for item in module_files: self.assertFalse(os.path.exists(item)) + # Add them back [tcl] args = parser.parse_args(['refresh', '-y', 'mpileaks']) module.module(parser, args) for item in module_files: self.assertTrue(os.path.exists(item)) + # TODO : test the --delete-tree option # TODO : this requires having a separate directory for test modules + # Try to find a module with multiple matches args = parser.parse_args(['find', 'mpileaks']) self.assertRaises(SystemExit, module.module, parser, args) + # Try to find a module with no matches args = parser.parse_args(['find', 'doesnotexist']) self.assertRaises(SystemExit, module.module, parser, args) + # Try to find a module args = parser.parse_args(['find', 'libelf']) module.module(parser, args) + # Remove existing modules [dotkit] args = parser.parse_args(['rm', '-y', '-m', 'dotkit', 'mpileaks']) module_files = self._get_module_files(args) @@ -75,6 +82,7 @@ class TestModule(spack.test.mock_database.MockDatabase): module.module(parser, args) for item in module_files: self.assertFalse(os.path.exists(item)) + # Add them back [dotkit] args = parser.parse_args(['refresh', '-y', '-m', 'dotkit', 'mpileaks']) module.module(parser, args) diff --git a/lib/spack/spack/test/cmd/test_compiler_cmd.py b/lib/spack/spack/test/cmd/test_compiler_cmd.py index d89814154b..fa806ee6f4 100644 --- a/lib/spack/spack/test/cmd/test_compiler_cmd.py +++ b/lib/spack/spack/test/cmd/test_compiler_cmd.py @@ -12,7 +12,9 @@ from spack.test.mock_packages_test import * test_version = '4.5-spacktest' + class MockArgs(object): + def __init__(self, add_paths=[], scope=None, compiler_spec=None, all=None): self.add_paths = add_paths self.scope = scope @@ -52,14 +54,12 @@ done class CompilerCmdTest(MockPackagesTest): """ Test compiler commands for add and remove """ - def test_compiler_remove(self): args = MockArgs(all=True, compiler_spec='gcc@4.5.0') spack.cmd.compiler.compiler_remove(args) compilers = spack.compilers.all_compilers() self.assertTrue(spack.spec.CompilerSpec("gcc@4.5.0") not in compilers) - def test_compiler_add(self): # compilers available by default. old_compilers = set(spack.compilers.all_compilers()) @@ -75,7 +75,8 @@ class CompilerCmdTest(MockPackagesTest): new_compilers = set(spack.compilers.all_compilers()) new_compiler = new_compilers - old_compilers self.assertTrue(new_compiler) - self.assertTrue(new_compiler.pop().version == Version(test_version)) + self.assertTrue(new_compiler.pop().version == + Version(test_version)) finally: shutil.rmtree(compiler_dir, ignore_errors=True) diff --git a/lib/spack/spack/test/cmd/uninstall.py b/lib/spack/spack/test/cmd/uninstall.py index 9fffaace40..4ccb9ddbf4 100644 --- a/lib/spack/spack/test/cmd/uninstall.py +++ b/lib/spack/spack/test/cmd/uninstall.py @@ -28,6 +28,7 @@ from spack.cmd.uninstall import uninstall class MockArgs(object): + def __init__(self, packages, all=False, force=False, dependents=False): self.packages = packages self.all = all @@ -37,6 +38,7 @@ class MockArgs(object): class TestUninstall(spack.test.mock_database.MockDatabase): + def test_uninstall(self): parser = None # Multiple matches diff --git a/lib/spack/spack/test/config.py b/lib/spack/spack/test/config.py index 252d77e66b..0822e44db8 100644 --- a/lib/spack/spack/test/config.py +++ b/lib/spack/spack/test/config.py @@ -32,79 +32,80 @@ from ordereddict_backport import OrderedDict from spack.test.mock_packages_test import * # Some sample compiler config data -a_comps = [ +a_comps = [ {'compiler': { 'paths': { - "cc" : "/gcc473", + "cc": "/gcc473", "cxx": "/g++473", "f77": None, - "fc" : None - }, + "fc": None + }, 'modules': None, 'spec': 'gcc@4.7.3', 'operating_system': 'CNL10' - }}, + }}, {'compiler': { 'paths': { - "cc" : "/gcc450", + "cc": "/gcc450", "cxx": "/g++450", "f77": 'gfortran', - "fc" : 'gfortran' - }, + "fc": 'gfortran' + }, 'modules': None, 'spec': 'gcc@4.5.0', 'operating_system': 'CNL10' - }}, + }}, {'compiler': { 'paths': { - "cc" : "", + "cc": "", "cxx": "", "f77": '', - "fc" : '' }, + "fc": ''}, 'modules': None, 'spec': 'clang@3.3', 'operating_system': 'CNL10' - }} + }} ] b_comps = [ {'compiler': { 'paths': { - "cc" : "/icc100", + "cc": "/icc100", "cxx": "/icp100", "f77": None, - "fc" : None - }, + "fc": None + }, 'modules': None, 'spec': 'icc@10.0', 'operating_system': 'CNL10' - }}, + }}, {'compiler': { 'paths': { - "cc" : "/icc111", + "cc": "/icc111", "cxx": "/icp111", "f77": 'ifort', - "fc" : 'ifort' - }, + "fc": 'ifort' + }, 'modules': None, 'spec': 'icc@11.1', 'operating_system': 'CNL10' - }}, + }}, {'compiler': { 'paths': { - "cc" : "", + "cc": "", "cxx": "", "f77": '', - "fc" : '' }, + "fc": ''}, 'modules': None, 'spec': 'clang@3.3', 'operating_system': 'CNL10' - }} + }} ] # Some Sample repo data -repos_low = [ "/some/path" ] -repos_high = [ "/some/other/path" ] +repos_low = ["/some/path"] +repos_high = ["/some/other/path"] + class ConfigTest(MockPackagesTest): @@ -112,14 +113,15 @@ class ConfigTest(MockPackagesTest): super(ConfigTest, self).setUp() self.tmp_dir = mkdtemp('.tmp', 'spack-config-test-') spack.config.config_scopes = OrderedDict() - spack.config.ConfigScope('test_low_priority', os.path.join(self.tmp_dir, 'low')) - spack.config.ConfigScope('test_high_priority', os.path.join(self.tmp_dir, 'high')) + spack.config.ConfigScope( + 'test_low_priority', os.path.join(self.tmp_dir, 'low')) + spack.config.ConfigScope('test_high_priority', + os.path.join(self.tmp_dir, 'high')) def tearDown(self): super(ConfigTest, self).tearDown() shutil.rmtree(self.tmp_dir, True) - def check_config(self, comps, *compiler_names): """Check that named compilers in comps match Spack's config.""" config = spack.config.get_config('compilers') @@ -146,7 +148,7 @@ class ConfigTest(MockPackagesTest): spack.config.update_config('repos', repos_low, 'test_low_priority') spack.config.update_config('repos', repos_high, 'test_high_priority') config = spack.config.get_config('repos') - self.assertEqual(config, repos_high+repos_low) + self.assertEqual(config, repos_high + repos_low) def test_write_key_in_memory(self): # Write b_comps "on top of" a_comps. @@ -157,7 +159,6 @@ class ConfigTest(MockPackagesTest): self.check_config(a_comps, 'gcc@4.7.3', 'gcc@4.5.0') self.check_config(b_comps, 'icc@10.0', 'icc@11.1', 'clang@3.3') - def test_write_key_to_disk(self): # Write b_comps "on top of" a_comps. spack.config.update_config('compilers', a_comps, 'test_low_priority') diff --git a/lib/spack/spack/test/database.py b/lib/spack/spack/test/database.py index 0d44a27b7e..22b1f17890 100644 --- a/lib/spack/spack/test/database.py +++ b/lib/spack/spack/test/database.py @@ -71,6 +71,7 @@ def _print_ref_counts(): class DatabaseTest(MockDatabase): + def test_005_db_exists(self): """Make sure db cache file exists after creating.""" index_file = join_path(self.install_path, '.spack-db', 'index.yaml') diff --git a/lib/spack/spack/test/directory_layout.py b/lib/spack/spack/test/directory_layout.py index 74669fe8a2..2d0565acae 100644 --- a/lib/spack/spack/test/directory_layout.py +++ b/lib/spack/spack/test/directory_layout.py @@ -49,13 +49,11 @@ class DirectoryLayoutTest(MockPackagesTest): self.tmpdir = tempfile.mkdtemp() self.layout = YamlDirectoryLayout(self.tmpdir) - def tearDown(self): super(DirectoryLayoutTest, self).tearDown() shutil.rmtree(self.tmpdir, ignore_errors=True) self.layout = None - def test_read_and_write_spec(self): """This goes through each package in spack and creates a directory for it. It then ensures that the spec for the directory's @@ -67,8 +65,8 @@ class DirectoryLayoutTest(MockPackagesTest): for pkg in packages: if pkg.name.startswith('external'): - #External package tests cannot be installed - continue + # External package tests cannot be installed + continue spec = pkg.spec # If a spec fails to concretize, just skip it. If it is a @@ -115,7 +113,6 @@ class DirectoryLayoutTest(MockPackagesTest): self.assertFalse(os.path.isdir(install_dir)) self.assertFalse(os.path.exists(install_dir)) - def test_handle_unknown_package(self): """This test ensures that spack can at least do *some* operations with packages that are installed but that it @@ -166,7 +163,6 @@ class DirectoryLayoutTest(MockPackagesTest): spack.repo.swap(mock_db) - def test_find(self): """Test that finding specs within an install layout works.""" packages = list(spack.repo.all_packages())[:max_packages] @@ -175,13 +171,14 @@ class DirectoryLayoutTest(MockPackagesTest): installed_specs = {} for pkg in packages: if pkg.name.startswith('external'): - #External package tests cannot be installed + # External package tests cannot be installed continue spec = pkg.spec.concretized() installed_specs[spec.name] = spec self.layout.create_install_directory(spec) - # Make sure all the installed specs appear in DirectoryLayout.all_specs() + # Make sure all the installed specs appear in + # DirectoryLayout.all_specs() found_specs = dict((s.name, s) for s in self.layout.all_specs()) for name, spec in found_specs.items(): self.assertTrue(name in found_specs) diff --git a/lib/spack/spack/test/environment.py b/lib/spack/spack/test/environment.py index 2396961888..9b5d75f273 100644 --- a/lib/spack/spack/test/environment.py +++ b/lib/spack/spack/test/environment.py @@ -38,7 +38,8 @@ class EnvironmentTest(unittest.TestCase): os.environ['UNSET_ME'] = 'foo' os.environ['EMPTY_PATH_LIST'] = '' os.environ['PATH_LIST'] = '/path/second:/path/third' - os.environ['REMOVE_PATH_LIST'] = '/a/b:/duplicate:/a/c:/remove/this:/a/d:/duplicate/:/f/g' # NOQA: ignore=E501 + os.environ['REMOVE_PATH_LIST'] = \ + '/a/b:/duplicate:/a/c:/remove/this:/a/d:/duplicate/:/f/g' def tearDown(self): pass diff --git a/lib/spack/spack/test/git_fetch.py b/lib/spack/spack/test/git_fetch.py index 4de65760d7..0d1a8fe949 100644 --- a/lib/spack/spack/test/git_fetch.py +++ b/lib/spack/spack/test/git_fetch.py @@ -87,33 +87,29 @@ class GitFetchTest(MockPackagesTest): self.assert_rev(rev) - def test_fetch_master(self): """Test a default git checkout with no commit or tag specified.""" self.try_fetch('master', self.repo.r0_file, { - 'git' : self.repo.path + 'git': self.repo.path }) - def test_fetch_branch(self): """Test fetching a branch.""" self.try_fetch(self.repo.branch, self.repo.branch_file, { - 'git' : self.repo.path, - 'branch' : self.repo.branch + 'git': self.repo.path, + 'branch': self.repo.branch }) - def test_fetch_tag(self): """Test fetching a tag.""" self.try_fetch(self.repo.tag, self.repo.tag_file, { - 'git' : self.repo.path, - 'tag' : self.repo.tag + 'git': self.repo.path, + 'tag': self.repo.tag }) - def test_fetch_commit(self): """Test fetching a particular commit.""" self.try_fetch(self.repo.r1, self.repo.r1_file, { - 'git' : self.repo.path, - 'commit' : self.repo.r1 + 'git': self.repo.path, + 'commit': self.repo.r1 }) diff --git a/lib/spack/spack/test/hg_fetch.py b/lib/spack/spack/test/hg_fetch.py index 292ffba949..44af6730a1 100644 --- a/lib/spack/spack/test/hg_fetch.py +++ b/lib/spack/spack/test/hg_fetch.py @@ -83,17 +83,15 @@ class HgFetchTest(MockPackagesTest): self.assertEqual(self.repo.get_rev(), rev) - def test_fetch_default(self): """Test a default hg checkout with no commit or tag specified.""" self.try_fetch(self.repo.r1, self.repo.r1_file, { - 'hg' : self.repo.path + 'hg': self.repo.path }) - def test_fetch_rev0(self): """Test fetching a branch.""" self.try_fetch(self.repo.r0, self.repo.r0_file, { - 'hg' : self.repo.path, - 'revision' : self.repo.r0 + 'hg': self.repo.path, + 'revision': self.repo.r0 }) diff --git a/lib/spack/spack/test/install.py b/lib/spack/spack/test/install.py index 390ec096a9..232d5aeeaf 100644 --- a/lib/spack/spack/test/install.py +++ b/lib/spack/spack/test/install.py @@ -55,7 +55,6 @@ class InstallTest(MockPackagesTest): spack.install_layout = YamlDirectoryLayout(self.tmpdir) spack.installed_db = Database(self.tmpdir) - def tearDown(self): super(InstallTest, self).tearDown() self.repo.destroy() @@ -68,14 +67,12 @@ class InstallTest(MockPackagesTest): spack.installed_db = self.orig_db shutil.rmtree(self.tmpdir, ignore_errors=True) - def fake_fetchify(self, pkg): """Fake the URL for a package so it downloads from a file.""" fetcher = FetchStrategyComposite() fetcher.append(URLFetchStrategy(self.repo.url)) pkg.fetcher = fetcher - def test_install_and_uninstall(self): # Get a basic concrete spec for the trivial install package. spec = Spec('trivial_install_test_package') @@ -90,11 +87,10 @@ class InstallTest(MockPackagesTest): try: pkg.do_install() pkg.do_uninstall() - except Exception, e: + except Exception: pkg.remove_prefix() raise - def test_install_environment(self): spec = Spec('cmake-client').concretized() @@ -104,6 +100,6 @@ class InstallTest(MockPackagesTest): pkg = spec.package try: pkg.do_install() - except Exception, e: + except Exception: pkg.remove_prefix() raise diff --git a/lib/spack/spack/test/link_tree.py b/lib/spack/spack/test/link_tree.py index de40991b57..5d0a7430b6 100644 --- a/lib/spack/spack/test/link_tree.py +++ b/lib/spack/spack/test/link_tree.py @@ -53,16 +53,13 @@ class LinkTreeTest(unittest.TestCase): def tearDown(self): self.stage.destroy() - def check_file_link(self, filename): self.assertTrue(os.path.isfile(filename)) self.assertTrue(os.path.islink(filename)) - def check_dir(self, filename): self.assertTrue(os.path.isdir(filename)) - def test_merge_to_new_directory(self): with working_dir(self.stage.path): self.link_tree.merge('dest') @@ -79,7 +76,6 @@ class LinkTreeTest(unittest.TestCase): self.assertFalse(os.path.exists('dest')) - def test_merge_to_existing_directory(self): with working_dir(self.stage.path): @@ -112,7 +108,6 @@ class LinkTreeTest(unittest.TestCase): self.assertFalse(os.path.isfile('dest/c/d/6')) self.assertFalse(os.path.isfile('dest/c/d/e/7')) - def test_merge_with_empty_directories(self): with working_dir(self.stage.path): mkdirp('dest/f/g') @@ -132,7 +127,6 @@ class LinkTreeTest(unittest.TestCase): self.assertTrue(os.path.isdir('dest/a/b/h')) self.assertTrue(os.path.isdir('dest/f/g')) - def test_ignore(self): with working_dir(self.stage.path): touchp('source/.spec') diff --git a/lib/spack/spack/test/lock.py b/lib/spack/spack/test/lock.py index b24050aa74..fb96539897 100644 --- a/lib/spack/spack/test/lock.py +++ b/lib/spack/spack/test/lock.py @@ -329,6 +329,7 @@ class LockTest(unittest.TestCase): def test_transaction_with_context_manager(self): class TestContextManager(object): + def __enter__(self): vals['entered'] = True @@ -388,6 +389,7 @@ class LockTest(unittest.TestCase): def test_transaction_with_context_manager_and_exception(self): class TestContextManager(object): + def __enter__(self): vals['entered'] = True diff --git a/lib/spack/spack/test/make_executable.py b/lib/spack/spack/test/make_executable.py index b7a45a3f72..87a43a529a 100644 --- a/lib/spack/spack/test/make_executable.py +++ b/lib/spack/spack/test/make_executable.py @@ -38,6 +38,7 @@ from spack.util.environment import path_put_first class MakeExecutableTest(unittest.TestCase): + def setUp(self): self.tmpdir = tempfile.mkdtemp() @@ -49,34 +50,30 @@ class MakeExecutableTest(unittest.TestCase): path_put_first('PATH', [self.tmpdir]) - def tearDown(self): shutil.rmtree(self.tmpdir) - def test_make_normal(self): make = MakeExecutable('make', 8) self.assertEqual(make(output=str).strip(), '-j8') self.assertEqual(make('install', output=str).strip(), '-j8 install') - def test_make_explicit(self): make = MakeExecutable('make', 8) self.assertEqual(make(parallel=True, output=str).strip(), '-j8') - self.assertEqual(make('install', parallel=True, output=str).strip(), '-j8 install') - + self.assertEqual(make('install', parallel=True, + output=str).strip(), '-j8 install') def test_make_one_job(self): make = MakeExecutable('make', 1) self.assertEqual(make(output=str).strip(), '') self.assertEqual(make('install', output=str).strip(), 'install') - def test_make_parallel_false(self): make = MakeExecutable('make', 8) self.assertEqual(make(parallel=False, output=str).strip(), '') - self.assertEqual(make('install', parallel=False, output=str).strip(), 'install') - + self.assertEqual(make('install', parallel=False, + output=str).strip(), 'install') def test_make_parallel_disabled(self): make = MakeExecutable('make', 8) @@ -100,26 +97,29 @@ class MakeExecutableTest(unittest.TestCase): del os.environ['SPACK_NO_PARALLEL_MAKE'] - def test_make_parallel_precedence(self): make = MakeExecutable('make', 8) # These should work os.environ['SPACK_NO_PARALLEL_MAKE'] = 'true' self.assertEqual(make(parallel=True, output=str).strip(), '') - self.assertEqual(make('install', parallel=True, output=str).strip(), 'install') + self.assertEqual(make('install', parallel=True, + output=str).strip(), 'install') os.environ['SPACK_NO_PARALLEL_MAKE'] = '1' self.assertEqual(make(parallel=True, output=str).strip(), '') - self.assertEqual(make('install', parallel=True, output=str).strip(), 'install') + self.assertEqual(make('install', parallel=True, + output=str).strip(), 'install') # These don't disable (false and random string) os.environ['SPACK_NO_PARALLEL_MAKE'] = 'false' self.assertEqual(make(parallel=True, output=str).strip(), '-j8') - self.assertEqual(make('install', parallel=True, output=str).strip(), '-j8 install') + self.assertEqual(make('install', parallel=True, + output=str).strip(), '-j8 install') os.environ['SPACK_NO_PARALLEL_MAKE'] = 'foobar' self.assertEqual(make(parallel=True, output=str).strip(), '-j8') - self.assertEqual(make('install', parallel=True, output=str).strip(), '-j8 install') + self.assertEqual(make('install', parallel=True, + output=str).strip(), '-j8 install') del os.environ['SPACK_NO_PARALLEL_MAKE'] diff --git a/lib/spack/spack/test/mirror.py b/lib/spack/spack/test/mirror.py index b682d4e097..d6d7b30b7c 100644 --- a/lib/spack/spack/test/mirror.py +++ b/lib/spack/spack/test/mirror.py @@ -35,6 +35,7 @@ exclude = ['.hg', '.git', '.svn'] class MirrorTest(MockPackagesTest): + def setUp(self): """Sets up a mock package and a mock repo for each fetch strategy, to ensure that the mirror can create archives for each of them. @@ -42,7 +43,6 @@ class MirrorTest(MockPackagesTest): super(MirrorTest, self).setUp() self.repos = {} - def tearDown(self): """Destroy all the stages created by the repos in setup.""" super(MirrorTest, self).tearDown() @@ -50,7 +50,6 @@ class MirrorTest(MockPackagesTest): repo.destroy() self.repos.clear() - def set_up_package(self, name, MockRepoClass, url_attr): """Set up a mock package to be mirrored. Each package needs us to: @@ -71,16 +70,14 @@ class MirrorTest(MockPackagesTest): v = next(iter(pkg.versions)) pkg.versions[v][url_attr] = repo.url - def check_mirror(self): with Stage('spack-mirror-test') as stage: mirror_root = join_path(stage.path, 'test-mirror') # register mirror with spack config - mirrors = { 'spack-mirror-test' : 'file://' + mirror_root } + mirrors = {'spack-mirror-test': 'file://' + mirror_root} spack.config.update_config('mirrors', mirrors) - os.chdir(stage.path) spack.mirror.create( mirror_root, self.repos, no_checksum=True) @@ -110,16 +107,18 @@ class MirrorTest(MockPackagesTest): original_path = mock_repo.path if 'svn' in name: # have to check out the svn repo to compare. - original_path = join_path(mock_repo.path, 'checked_out') + original_path = join_path( + mock_repo.path, 'checked_out') svn('checkout', mock_repo.url, original_path) dcmp = dircmp(original_path, pkg.stage.source_path) - # make sure there are no new files in the expanded tarball + # make sure there are no new files in the expanded + # tarball self.assertFalse(dcmp.right_only) # and that all original files are present. - self.assertTrue(all(l in exclude for l in dcmp.left_only)) + self.assertTrue( + all(l in exclude for l in dcmp.left_only)) spack.do_checksum = saved_checksum_setting - def test_git_mirror(self): self.set_up_package('git-test', MockGitRepo, 'git') self.check_mirror() diff --git a/lib/spack/spack/test/mock_database.py b/lib/spack/spack/test/mock_database.py index da01e82bfa..d5867f06ec 100644 --- a/lib/spack/spack/test/mock_database.py +++ b/lib/spack/spack/test/mock_database.py @@ -33,6 +33,7 @@ from spack.test.mock_packages_test import MockPackagesTest class MockDatabase(MockPackagesTest): + def _mock_install(self, spec): s = Spec(spec) s.concretize() diff --git a/lib/spack/spack/test/mock_packages_test.py b/lib/spack/spack/test/mock_packages_test.py index 9d96622a6e..82c2712b0e 100644 --- a/lib/spack/spack/test/mock_packages_test.py +++ b/lib/spack/spack/test/mock_packages_test.py @@ -155,7 +155,9 @@ packages: externalmodule@1.0%gcc@4.5.0: external-module """ + class MockPackagesTest(unittest.TestCase): + def initmock(self): # Use the mock packages database for these tests. This allows # us to set up contrived packages that don't interfere with @@ -172,7 +174,8 @@ class MockPackagesTest(unittest.TestCase): self.mock_user_config = os.path.join(self.temp_config, 'user') mkdirp(self.mock_site_config) mkdirp(self.mock_user_config) - for confs in [('compilers.yaml', mock_compiler_config), ('packages.yaml', mock_packages_config)]: + for confs in [('compilers.yaml', mock_compiler_config), + ('packages.yaml', mock_packages_config)]: conf_yaml = os.path.join(self.mock_site_config, confs[0]) with open(conf_yaml, 'w') as f: f.write(confs[1]) @@ -209,7 +212,6 @@ class MockPackagesTest(unittest.TestCase): pkg.dependencies[spec.name] = {Spec(pkg_name): spec} pkg._deptypes[spec.name] = set(deptypes) - def cleanmock(self): """Restore the real packages path after any test.""" spack.repo.swap(self.db) @@ -226,10 +228,8 @@ class MockPackagesTest(unittest.TestCase): shutil.rmtree(spack.share_path, ignore_errors=True) spack.share_path = self.real_share_path - def setUp(self): self.initmock() - def tearDown(self): self.cleanmock() diff --git a/lib/spack/spack/test/mock_repo.py b/lib/spack/spack/test/mock_repo.py index 386af282e7..0ae7dbd516 100644 --- a/lib/spack/spack/test/mock_repo.py +++ b/lib/spack/spack/test/mock_repo.py @@ -40,6 +40,7 @@ tar = which('tar', required=True) class MockRepo(object): + def __init__(self, stage_name, repo_name): """This creates a stage where some archive/repo files can be staged for testing spack's fetch strategies.""" @@ -50,7 +51,6 @@ class MockRepo(object): self.path = join_path(self.stage.path, repo_name) mkdirp(self.path) - def destroy(self): """Destroy resources associated with this mock repo.""" if self.stage: @@ -90,6 +90,7 @@ class MockArchive(MockRepo): class MockVCSRepo(MockRepo): + def __init__(self, stage_name, repo_name): """This creates a stage and a repo directory within the stage.""" super(MockVCSRepo, self).__init__(stage_name, repo_name) @@ -100,6 +101,7 @@ class MockVCSRepo(MockRepo): class MockGitRepo(MockVCSRepo): + def __init__(self): super(MockGitRepo, self).__init__('mock-git-stage', 'mock-git-repo') @@ -147,6 +149,7 @@ class MockGitRepo(MockVCSRepo): class MockSvnRepo(MockVCSRepo): + def __init__(self): super(MockSvnRepo, self).__init__('mock-svn-stage', 'mock-svn-repo') @@ -176,6 +179,7 @@ class MockSvnRepo(MockVCSRepo): class MockHgRepo(MockVCSRepo): + def __init__(self): super(MockHgRepo, self).__init__('mock-hg-stage', 'mock-hg-repo') self.url = 'file://' + self.path diff --git a/lib/spack/spack/test/multimethod.py b/lib/spack/spack/test/multimethod.py index c233ea4fd6..a885374080 100644 --- a/lib/spack/spack/test/multimethod.py +++ b/lib/spack/spack/test/multimethod.py @@ -25,15 +25,10 @@ """ Test for multi_method dispatch. """ -import unittest - import spack from spack.multimethod import * from spack.version import * -from spack.spec import Spec -from spack.multimethod import when from spack.test.mock_packages_test import * -from spack.version import * class MultiMethodTest(MockPackagesTest): @@ -42,7 +37,6 @@ class MultiMethodTest(MockPackagesTest): pkg = spack.repo.get('multimethod@2.0') self.assertRaises(NoSuchMethodError, pkg.no_version_2) - def test_one_version_match(self): pkg = spack.repo.get('multimethod@1.0') self.assertEqual(pkg.no_version_2(), 1) @@ -53,7 +47,6 @@ class MultiMethodTest(MockPackagesTest): pkg = spack.repo.get('multimethod@4.0') self.assertEqual(pkg.no_version_2(), 4) - def test_version_overlap(self): pkg = spack.repo.get('multimethod@2.0') self.assertEqual(pkg.version_overlap(), 1) @@ -61,7 +54,6 @@ class MultiMethodTest(MockPackagesTest): pkg = spack.repo.get('multimethod@5.0') self.assertEqual(pkg.version_overlap(), 2) - def test_mpi_version(self): pkg = spack.repo.get('multimethod^mpich@3.0.4') self.assertEqual(pkg.mpi_version(), 3) @@ -72,7 +64,6 @@ class MultiMethodTest(MockPackagesTest): pkg = spack.repo.get('multimethod^mpich@1.0') self.assertEqual(pkg.mpi_version(), 1) - def test_undefined_mpi_version(self): pkg = spack.repo.get('multimethod^mpich@0.4') self.assertEqual(pkg.mpi_version(), 1) @@ -80,7 +71,6 @@ class MultiMethodTest(MockPackagesTest): pkg = spack.repo.get('multimethod^mpich@1.4') self.assertEqual(pkg.mpi_version(), 1) - def test_default_works(self): pkg = spack.repo.get('multimethod%gcc') self.assertEqual(pkg.has_a_default(), 'gcc') @@ -91,21 +81,19 @@ class MultiMethodTest(MockPackagesTest): pkg = spack.repo.get('multimethod%pgi') self.assertEqual(pkg.has_a_default(), 'default') - def test_target_match(self): platform = spack.architecture.platform() targets = platform.targets.values() for target in targets[:-1]: - pkg = spack.repo.get('multimethod target='+target.name) + pkg = spack.repo.get('multimethod target=' + target.name) self.assertEqual(pkg.different_by_target(), target.name) - pkg = spack.repo.get('multimethod target='+targets[-1].name) + pkg = spack.repo.get('multimethod target=' + targets[-1].name) if len(targets) == 1: self.assertEqual(pkg.different_by_target(), targets[-1].name) else: self.assertRaises(NoSuchMethodError, pkg.different_by_target) - def test_dependency_match(self): pkg = spack.repo.get('multimethod^zmpi') self.assertEqual(pkg.different_by_dep(), 'zmpi') @@ -118,7 +106,6 @@ class MultiMethodTest(MockPackagesTest): pkg = spack.repo.get('multimethod^foobar') self.assertEqual(pkg.different_by_dep(), 'mpich') - def test_virtual_dep_match(self): pkg = spack.repo.get('multimethod^mpich2') self.assertEqual(pkg.different_by_virtual_dep(), 2) diff --git a/lib/spack/spack/test/namespace_trie.py b/lib/spack/spack/test/namespace_trie.py index b38ecd6179..7927fc8e60 100644 --- a/lib/spack/spack/test/namespace_trie.py +++ b/lib/spack/spack/test/namespace_trie.py @@ -32,7 +32,6 @@ class NamespaceTrieTest(unittest.TestCase): def setUp(self): self.trie = NamespaceTrie() - def test_add_single(self): self.trie['foo'] = 'bar' @@ -40,7 +39,6 @@ class NamespaceTrieTest(unittest.TestCase): self.assertTrue(self.trie.has_value('foo')) self.assertEqual(self.trie['foo'], 'bar') - def test_add_multiple(self): self.trie['foo.bar'] = 'baz' @@ -54,7 +52,6 @@ class NamespaceTrieTest(unittest.TestCase): self.assertFalse(self.trie.is_prefix('foo.bar.baz')) self.assertFalse(self.trie.has_value('foo.bar.baz')) - def test_add_three(self): # add a three-level namespace self.trie['foo.bar.baz'] = 'quux' @@ -89,7 +86,6 @@ class NamespaceTrieTest(unittest.TestCase): self.assertFalse(self.trie.is_prefix('foo.bar.baz.quux')) self.assertFalse(self.trie.has_value('foo.bar.baz.quux')) - def test_add_none_single(self): self.trie['foo'] = None self.assertTrue(self.trie.is_prefix('foo')) @@ -99,8 +95,6 @@ class NamespaceTrieTest(unittest.TestCase): self.assertFalse(self.trie.is_prefix('foo.bar')) self.assertFalse(self.trie.has_value('foo.bar')) - - def test_add_none_multiple(self): self.trie['foo.bar'] = None diff --git a/lib/spack/spack/test/operating_system.py b/lib/spack/spack/test/operating_system.py index ed5f6ff8ad..8723f7244d 100644 --- a/lib/spack/spack/test/operating_system.py +++ b/lib/spack/spack/test/operating_system.py @@ -1,18 +1,39 @@ +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/llnl/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## """ Test checks if the operating_system class is created correctly and that the functions are using the correct operating_system. Also checks whether the operating_system correctly uses the compiler_strategy """ - import unittest -import os -import platform from spack.platforms.cray_xc import CrayXc from spack.platforms.linux import Linux from spack.platforms.darwin import Darwin from spack.operating_system.linux_distro import LinuxDistro -from spack.operating_system.mac_os import MacOs from spack.operating_system.cnl import ComputeNodeLinux + class TestOperatingSystem(unittest.TestCase): def setUp(self): @@ -32,7 +53,7 @@ class TestOperatingSystem(unittest.TestCase): self.assertEquals(self.cray_operating_sys.compiler_strategy, "PATH") def test_cray_back_end_operating_system(self): - self.assertIsInstance(self.cray_back_os,ComputeNodeLinux) + self.assertIsInstance(self.cray_back_os, ComputeNodeLinux) def test_cray_back_end_compiler_strategy(self): self.assertEquals(self.cray_back_os.compiler_strategy, "MODULES") @@ -43,7 +64,6 @@ class TestOperatingSystem(unittest.TestCase): def test_linux_compiler_strategy(self): self.assertEquals(self.linux_operating_sys.compiler_strategy, "PATH") - def test_cray_front_end_compiler_list(self): """ Operating systems will now be in charge of finding compilers. So, depending on which operating system you want to build for diff --git a/lib/spack/spack/test/optional_deps.py b/lib/spack/spack/test/optional_deps.py index b5ba0ecf35..a9a2b9abf5 100644 --- a/lib/spack/spack/test/optional_deps.py +++ b/lib/spack/spack/test/optional_deps.py @@ -22,10 +22,10 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## - from spack.spec import Spec from spack.test.mock_packages_test import * + class ConcretizeTest(MockPackagesTest): def check_normalize(self, spec_string, expected): @@ -34,10 +34,10 @@ class ConcretizeTest(MockPackagesTest): self.assertEqual(spec, expected) self.assertTrue(spec.eq_dag(expected)) - def test_normalize_simple_conditionals(self): self.check_normalize('optional-dep-test', Spec('optional-dep-test')) - self.check_normalize('optional-dep-test~a', Spec('optional-dep-test~a')) + self.check_normalize('optional-dep-test~a', + Spec('optional-dep-test~a')) self.check_normalize('optional-dep-test+a', Spec('optional-dep-test+a', Spec('a'))) @@ -45,7 +45,6 @@ class ConcretizeTest(MockPackagesTest): self.check_normalize('optional-dep-test a=true', Spec('optional-dep-test a=true', Spec('a'))) - self.check_normalize('optional-dep-test a=true', Spec('optional-dep-test+a', Spec('a'))) @@ -55,25 +54,29 @@ class ConcretizeTest(MockPackagesTest): self.check_normalize('optional-dep-test%intel', Spec('optional-dep-test%intel', Spec('c'))) - self.check_normalize('optional-dep-test%intel@64.1', - Spec('optional-dep-test%intel@64.1', Spec('c'), Spec('d'))) + self.check_normalize( + 'optional-dep-test%intel@64.1', + Spec('optional-dep-test%intel@64.1', Spec('c'), Spec('d'))) - self.check_normalize('optional-dep-test%intel@64.1.2', - Spec('optional-dep-test%intel@64.1.2', Spec('c'), Spec('d'))) + self.check_normalize( + 'optional-dep-test%intel@64.1.2', + Spec('optional-dep-test%intel@64.1.2', Spec('c'), Spec('d'))) self.check_normalize('optional-dep-test%clang@35', Spec('optional-dep-test%clang@35', Spec('e'))) - def test_multiple_conditionals(self): - self.check_normalize('optional-dep-test+a@1.1', - Spec('optional-dep-test+a@1.1', Spec('a'), Spec('b'))) + self.check_normalize( + 'optional-dep-test+a@1.1', + Spec('optional-dep-test+a@1.1', Spec('a'), Spec('b'))) - self.check_normalize('optional-dep-test+a%intel', - Spec('optional-dep-test+a%intel', Spec('a'), Spec('c'))) + self.check_normalize( + 'optional-dep-test+a%intel', + Spec('optional-dep-test+a%intel', Spec('a'), Spec('c'))) - self.check_normalize('optional-dep-test@1.1%intel', - Spec('optional-dep-test@1.1%intel', Spec('b'), Spec('c'))) + self.check_normalize( + 'optional-dep-test@1.1%intel', + Spec('optional-dep-test@1.1%intel', Spec('b'), Spec('c'))) self.check_normalize('optional-dep-test@1.1%intel@64.1.2+a', Spec('optional-dep-test@1.1%intel@64.1.2+a', @@ -83,14 +86,12 @@ class ConcretizeTest(MockPackagesTest): Spec('optional-dep-test@1.1%clang@36.5+a', Spec('b'), Spec('a'), Spec('e'))) - def test_chained_mpi(self): self.check_normalize('optional-dep-test-2+mpi', Spec('optional-dep-test-2+mpi', Spec('optional-dep-test+mpi', Spec('mpi')))) - def test_default_variant(self): spec = Spec('optional-dep-test-3') spec.concretize() @@ -104,7 +105,6 @@ class ConcretizeTest(MockPackagesTest): spec.concretize() self.assertTrue('b' in spec) - def test_transitive_chain(self): # Each of these dependencies comes from a conditional # dependency on another. This requires iterating to evaluate diff --git a/lib/spack/spack/test/package_sanity.py b/lib/spack/spack/test/package_sanity.py index 9198986f5d..c3c3923855 100644 --- a/lib/spack/spack/test/package_sanity.py +++ b/lib/spack/spack/test/package_sanity.py @@ -38,12 +38,10 @@ class PackageSanityTest(unittest.TestCase): for name in spack.repo.all_package_names(): spack.repo.get(name) - def test_get_all_packages(self): """Get all packages once and make sure that works.""" self.check_db() - def test_get_all_mock_packages(self): """Get the mock packages once each too.""" db = RepoPath(spack.mock_packages_path) @@ -51,7 +49,6 @@ class PackageSanityTest(unittest.TestCase): self.check_db() spack.repo.swap(db) - def test_url_versions(self): """Check URLs for regular packages, if they are explicitly defined.""" for pkg in spack.repo.all_packages(): diff --git a/lib/spack/spack/test/packages.py b/lib/spack/spack/test/packages.py index bea42bb33a..fdd079a8f7 100644 --- a/lib/spack/spack/test/packages.py +++ b/lib/spack/spack/test/packages.py @@ -22,7 +22,6 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## - import spack from llnl.util.filesystem import join_path from spack.repository import Repo @@ -33,33 +32,26 @@ from spack.util.naming import mod_to_class class PackagesTest(MockPackagesTest): def test_load_package(self): - pkg = spack.repo.get('mpich') - + spack.repo.get('mpich') def test_package_name(self): pkg = spack.repo.get('mpich') self.assertEqual(pkg.name, 'mpich') - def test_package_filename(self): repo = Repo(spack.mock_packages_path) filename = repo.filename_for_package_name('mpich') self.assertEqual(filename, - join_path(spack.mock_packages_path, 'packages', 'mpich', 'package.py')) - - - def test_package_name(self): - pkg = spack.repo.get('mpich') - self.assertEqual(pkg.name, 'mpich') - + join_path(spack.mock_packages_path, + 'packages', 'mpich', 'package.py')) def test_nonexisting_package_filename(self): repo = Repo(spack.mock_packages_path) filename = repo.filename_for_package_name('some-nonexisting-package') self.assertEqual( filename, - join_path(spack.mock_packages_path, 'packages', 'some-nonexisting-package', 'package.py')) - + join_path(spack.mock_packages_path, + 'packages', 'some-nonexisting-package', 'package.py')) def test_package_class_names(self): self.assertEqual('Mpich', mod_to_class('mpich')) @@ -68,37 +60,32 @@ class PackagesTest(MockPackagesTest): self.assertEqual('Pmgrcollective', mod_to_class('PmgrCollective')) self.assertEqual('_3db', mod_to_class('3db')) - # # Below tests target direct imports of spack packages from the # spack.pkg namespace # def test_import_package(self): - import spack.pkg.builtin.mock.mpich - + import spack.pkg.builtin.mock.mpich # noqa def test_import_package_as(self): - import spack.pkg.builtin.mock.mpich as mp - + import spack.pkg.builtin.mock.mpich as mp # noqa def test_import_class_from_package(self): - from spack.pkg.builtin.mock.mpich import Mpich - + from spack.pkg.builtin.mock.mpich import Mpich # noqa def test_import_module_from_package(self): - from spack.pkg.builtin.mock import mpich - + from spack.pkg.builtin.mock import mpich # noqa def test_import_namespace_container_modules(self): - import spack.pkg - import spack.pkg as p - from spack import pkg + import spack.pkg # noqa + import spack.pkg as p # noqa + from spack import pkg # noqa - import spack.pkg.builtin - import spack.pkg.builtin as b - from spack.pkg import builtin + import spack.pkg.builtin # noqa + import spack.pkg.builtin as b # noqa + from spack.pkg import builtin # noqa - import spack.pkg.builtin.mock - import spack.pkg.builtin.mock as m - from spack.pkg.builtin import mock + import spack.pkg.builtin.mock # noqa + import spack.pkg.builtin.mock as m # noqa + from spack.pkg.builtin import mock # noqa diff --git a/lib/spack/spack/test/pattern.py b/lib/spack/spack/test/pattern.py index 3419d600b8..0c772a0d2d 100644 --- a/lib/spack/spack/test/pattern.py +++ b/lib/spack/spack/test/pattern.py @@ -41,6 +41,7 @@ class CompositeTest(unittest.TestCase): raise NotImplemented('subtract not implemented') class One(Base): + def add(self): Base.counter += 1 @@ -48,6 +49,7 @@ class CompositeTest(unittest.TestCase): Base.counter -= 1 class Two(Base): + def add(self): Base.counter += 2 diff --git a/lib/spack/spack/test/python_version.py b/lib/spack/spack/test/python_version.py index 6c09effc56..5af55bdc5f 100644 --- a/lib/spack/spack/test/python_version.py +++ b/lib/spack/spack/test/python_version.py @@ -36,7 +36,8 @@ import llnl.util.tty as tty import pyqver2 import spack -spack_max_version = (2,6) +spack_max_version = (2, 6) + class PythonVersionTest(unittest.TestCase): @@ -51,12 +52,10 @@ class PythonVersionTest(unittest.TestCase): if re.match(r'^[^.#].*\.py$', filename): yield os.path.join(root, filename) - def package_py_files(self): for name in spack.repo.all_package_names(): yield spack.repo.filename_for_package_name(name) - def check_python_versions(self, *files): # dict version -> filename -> reasons all_issues = {} @@ -66,7 +65,7 @@ class PythonVersionTest(unittest.TestCase): versions = pyqver2.get_versions(pyfile.read()) for ver, reasons in versions.items(): if ver > spack_max_version: - if not ver in all_issues: + if ver not in all_issues: all_issues[ver] = {} all_issues[ver][fn] = reasons @@ -87,7 +86,7 @@ class PythonVersionTest(unittest.TestCase): tty.error("These files require version %d.%d:" % v) maxlen = max(len(f) for f, prob in msgs) - fmt = "%%-%ds%%s" % (maxlen+3) + fmt = "%%-%ds%%s" % (maxlen + 3) print fmt % ('File', 'Reason') print fmt % ('-' * (maxlen), '-' * 20) for msg in msgs: @@ -95,10 +94,8 @@ class PythonVersionTest(unittest.TestCase): self.assertTrue(len(all_issues) == 0) - def test_core_module_compatibility(self): self.check_python_versions(*self.pyfiles(spack.lib_path)) - def test_package_module_compatibility(self): self.check_python_versions(*self.pyfiles(spack.packages_path)) diff --git a/lib/spack/spack/test/sbang.py b/lib/spack/spack/test/sbang.py index 4ce854a1d8..12abce7b35 100644 --- a/lib/spack/spack/test/sbang.py +++ b/lib/spack/spack/test/sbang.py @@ -44,6 +44,7 @@ last_line = "last!\n" class SbangTest(unittest.TestCase): + def setUp(self): self.tempdir = tempfile.mkdtemp() diff --git a/lib/spack/spack/test/spec_dag.py b/lib/spack/spack/test/spec_dag.py index 8522431fbb..8f61c7ac76 100644 --- a/lib/spack/spack/test/spec_dag.py +++ b/lib/spack/spack/test/spec_dag.py @@ -455,6 +455,7 @@ class SpecDagTest(MockPackagesTest): run3 -b-> build3 """ + def test_deptype_traversal(self): dag = Spec('dtuse') dag.normalize() diff --git a/lib/spack/spack/test/spec_semantics.py b/lib/spack/spack/test/spec_semantics.py index b174e5305c..79ffc99298 100644 --- a/lib/spack/spack/test/spec_semantics.py +++ b/lib/spack/spack/test/spec_semantics.py @@ -22,18 +22,18 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import unittest import spack.architecture from spack.spec import * from spack.test.mock_packages_test import * + class SpecSematicsTest(MockPackagesTest): """This tests satisfies(), constrain() and other semantic operations on specs.""" - # ================================================================================ + # ======================================================================== # Utility functions to set everything up. - # ================================================================================ + # ======================================================================== def check_satisfies(self, spec, anon_spec, concrete=False): left = Spec(spec, concrete=concrete) try: @@ -49,7 +49,6 @@ class SpecSematicsTest(MockPackagesTest): # right by left. Reverse is not always true. right.copy().constrain(left) - def check_unsatisfiable(self, spec, anon_spec, concrete=False): left = Spec(spec, concrete=concrete) try: @@ -62,7 +61,6 @@ class SpecSematicsTest(MockPackagesTest): self.assertRaises(UnsatisfiableSpecError, right.copy().constrain, left) - def check_constrain(self, expected, spec, constraint): exp = Spec(expected) spec = Spec(spec) @@ -70,53 +68,48 @@ class SpecSematicsTest(MockPackagesTest): spec.constrain(constraint) self.assertEqual(exp, spec) - def check_constrain_changed(self, spec, constraint): spec = Spec(spec) self.assertTrue(spec.constrain(constraint)) - def check_constrain_not_changed(self, spec, constraint): spec = Spec(spec) self.assertFalse(spec.constrain(constraint)) - def check_invalid_constraint(self, spec, constraint): spec = Spec(spec) constraint = Spec(constraint) self.assertRaises(UnsatisfiableSpecError, spec.constrain, constraint) - - # ================================================================================ + # ======================================================================== # Satisfiability - # ================================================================================ + # ======================================================================== def test_satisfies(self): self.check_satisfies('libelf@0.8.13', '@0:1') self.check_satisfies('libdwarf^libelf@0.8.13', '^libelf@0:1') - def test_satisfies_namespace(self): self.check_satisfies('builtin.mpich', 'mpich') self.check_satisfies('builtin.mock.mpich', 'mpich') - # TODO: only works for deps now, but shouldn't we allow this for root spec? + # TODO: only works for deps now, but shouldn't we allow for root spec? # self.check_satisfies('builtin.mock.mpich', 'mpi') self.check_satisfies('builtin.mock.mpich', 'builtin.mock.mpich') self.check_unsatisfiable('builtin.mock.mpich', 'builtin.mpich') - def test_satisfies_namespaced_dep(self): """Ensure spec from same or unspecified namespace satisfies namespace constraint.""" self.check_satisfies('mpileaks ^builtin.mock.mpich', '^mpich') self.check_satisfies('mpileaks ^builtin.mock.mpich', '^mpi') - self.check_satisfies('mpileaks ^builtin.mock.mpich', '^builtin.mock.mpich') - - self.check_unsatisfiable('mpileaks ^builtin.mock.mpich', '^builtin.mpich') + self.check_satisfies( + 'mpileaks ^builtin.mock.mpich', '^builtin.mock.mpich') + self.check_unsatisfiable( + 'mpileaks ^builtin.mock.mpich', '^builtin.mpich') def test_satisfies_compiler(self): self.check_satisfies('foo%gcc', '%gcc') @@ -124,7 +117,6 @@ class SpecSematicsTest(MockPackagesTest): self.check_unsatisfiable('foo%intel', '%gcc') self.check_unsatisfiable('foo%intel', '%pgi') - def test_satisfies_compiler_version(self): self.check_satisfies('foo%gcc', '%gcc@4.7.2') self.check_satisfies('foo%intel', '%intel@4.7.2') @@ -139,7 +131,6 @@ class SpecSematicsTest(MockPackagesTest): self.check_satisfies('foo %gcc@4.7.3', '%gcc@4.7') self.check_unsatisfiable('foo %gcc@4.7', '%gcc@4.7.3') - def test_satisfies_architecture(self): self.check_satisfies( 'foo platform=test target=frontend os=frontend', @@ -151,7 +142,6 @@ class SpecSematicsTest(MockPackagesTest): 'foo platform=test target=default_target os=default_os', 'platform=test target=default_target os=default_os') - def test_satisfies_dependencies(self): self.check_satisfies('mpileaks^mpich', '^mpich') self.check_satisfies('mpileaks^zmpi', '^zmpi') @@ -159,7 +149,6 @@ class SpecSematicsTest(MockPackagesTest): self.check_unsatisfiable('mpileaks^mpich', '^zmpi') self.check_unsatisfiable('mpileaks^zmpi', '^mpich') - def test_satisfies_dependency_versions(self): self.check_satisfies('mpileaks^mpich@2.0', '^mpich@1:3') self.check_unsatisfiable('mpileaks^mpich@1.2', '^mpich@2.0') @@ -173,7 +162,6 @@ class SpecSematicsTest(MockPackagesTest): self.check_unsatisfiable( 'mpileaks^mpich@4.0^callpath@1.7', '^mpich@1:3^callpath@1.4:1.6') - def test_satisfies_virtual_dependencies(self): self.check_satisfies('mpileaks^mpi', '^mpi') self.check_satisfies('mpileaks^mpi', '^mpich') @@ -181,7 +169,6 @@ class SpecSematicsTest(MockPackagesTest): self.check_satisfies('mpileaks^mpi', '^zmpi') self.check_unsatisfiable('mpileaks^mpich', '^zmpi') - def test_satisfies_virtual_dependency_versions(self): self.check_satisfies('mpileaks^mpi@1.5', '^mpi@1.2:1.6') self.check_unsatisfiable('mpileaks^mpi@3', '^mpi@1.2:1.6') @@ -197,26 +184,23 @@ class SpecSematicsTest(MockPackagesTest): self.check_unsatisfiable('mpileaks^mpi@3:', '^mpich2') self.check_unsatisfiable('mpileaks^mpi@3:', '^mpich@1.0') - def test_satisfies_matching_variant(self): self.check_satisfies('mpich+foo', 'mpich+foo') self.check_satisfies('mpich~foo', 'mpich~foo') self.check_satisfies('mpich foo=1', 'mpich foo=1') - #confirm that synonymous syntax works correctly + # confirm that synonymous syntax works correctly self.check_satisfies('mpich+foo', 'mpich foo=True') self.check_satisfies('mpich foo=true', 'mpich+foo') self.check_satisfies('mpich~foo', 'mpich foo=FALSE') self.check_satisfies('mpich foo=False', 'mpich~foo') - def test_satisfies_unconstrained_variant(self): # only asked for mpich, no constraints. Either will do. self.check_satisfies('mpich+foo', 'mpich') self.check_satisfies('mpich~foo', 'mpich') self.check_satisfies('mpich foo=1', 'mpich') - def test_unsatisfiable_variants(self): # This case is different depending on whether the specs are concrete. @@ -230,24 +214,21 @@ class SpecSematicsTest(MockPackagesTest): self.check_unsatisfiable('mpich', 'mpich~foo', True) self.check_unsatisfiable('mpich', 'mpich foo=1', True) - def test_unsatisfiable_variant_mismatch(self): # No matchi in specs self.check_unsatisfiable('mpich~foo', 'mpich+foo') self.check_unsatisfiable('mpich+foo', 'mpich~foo') self.check_unsatisfiable('mpich foo=1', 'mpich foo=2') - def test_satisfies_matching_compiler_flag(self): self.check_satisfies('mpich cppflags="-O3"', 'mpich cppflags="-O3"') - self.check_satisfies('mpich cppflags="-O3 -Wall"', 'mpich cppflags="-O3 -Wall"') - + self.check_satisfies('mpich cppflags="-O3 -Wall"', + 'mpich cppflags="-O3 -Wall"') def test_satisfies_unconstrained_compiler_flag(self): # only asked for mpich, no constraints. Any will do. self.check_satisfies('mpich cppflags="-O3"', 'mpich') - def test_unsatisfiable_compiler_flag(self): # This case is different depending on whether the specs are concrete. @@ -257,11 +238,10 @@ class SpecSematicsTest(MockPackagesTest): # 'mpich' is concrete: self.check_unsatisfiable('mpich', 'mpich cppflags="-O3"', True) - def test_unsatisfiable_compiler_flag_mismatch(self): # No matchi in specs - self.check_unsatisfiable('mpich cppflags="-O3"', 'mpich cppflags="-O2"') - + self.check_unsatisfiable( + 'mpich cppflags="-O3"', 'mpich cppflags="-O2"') def test_satisfies_virtual(self): # Don't use check_satisfies: it checks constrain() too, and @@ -270,25 +250,30 @@ class SpecSematicsTest(MockPackagesTest): self.assertTrue(Spec('mpich2').satisfies(Spec('mpi'))) self.assertTrue(Spec('zmpi').satisfies(Spec('mpi'))) - def test_satisfies_virtual_dep_with_virtual_constraint(self): """Ensure we can satisfy virtual constraints when there are multiple vdep providers in the specs.""" - self.assertTrue(Spec('netlib-lapack ^openblas').satisfies('netlib-lapack ^openblas')) - self.assertFalse(Spec('netlib-lapack ^netlib-blas').satisfies('netlib-lapack ^openblas')) - - self.assertFalse(Spec('netlib-lapack ^openblas').satisfies('netlib-lapack ^netlib-blas')) - self.assertTrue(Spec('netlib-lapack ^netlib-blas').satisfies('netlib-lapack ^netlib-blas')) - - - # ================================================================================ + self.assertTrue( + Spec('netlib-lapack ^openblas').satisfies( + 'netlib-lapack ^openblas')) + self.assertFalse( + Spec('netlib-lapack ^netlib-blas').satisfies( + 'netlib-lapack ^openblas')) + + self.assertFalse( + Spec('netlib-lapack ^openblas').satisfies( + 'netlib-lapack ^netlib-blas')) + self.assertTrue( + Spec('netlib-lapack ^netlib-blas').satisfies( + 'netlib-lapack ^netlib-blas')) + + # ======================================================================== # Indexing specs - # ================================================================================ + # ======================================================================== def test_self_index(self): s = Spec('callpath') self.assertTrue(s['callpath'] == s) - def test_dep_index(self): s = Spec('callpath') s.normalize() @@ -304,7 +289,6 @@ class SpecSematicsTest(MockPackagesTest): self.assertTrue(s['libelf'].name == 'libelf') self.assertTrue(s['mpi'].name == 'mpi') - def test_spec_contains_deps(self): s = Spec('callpath') s.normalize() @@ -313,7 +297,6 @@ class SpecSematicsTest(MockPackagesTest): self.assertTrue('libelf' in s) self.assertTrue('mpi' in s) - def test_virtual_index(self): s = Spec('callpath') s.concretize() @@ -327,7 +310,6 @@ class SpecSematicsTest(MockPackagesTest): s_zmpi = Spec('callpath ^zmpi') s_zmpi.concretize() - self.assertTrue(s['mpi'].name != 'mpi') self.assertTrue(s_mpich['mpi'].name == 'mpich') self.assertTrue(s_mpich2['mpi'].name == 'mpich2') @@ -336,29 +318,34 @@ class SpecSematicsTest(MockPackagesTest): for spec in [s, s_mpich, s_mpich2, s_zmpi]: self.assertTrue('mpi' in spec) - - # ================================================================================ + # ======================================================================== # Constraints - # ================================================================================ + # ======================================================================== def test_constrain_variants(self): self.check_constrain('libelf@2.1:2.5', 'libelf@0:2.5', 'libelf@2.1:3') self.check_constrain('libelf@2.1:2.5%gcc@4.5:4.6', - 'libelf@0:2.5%gcc@2:4.6', 'libelf@2.1:3%gcc@4.5:4.7') + 'libelf@0:2.5%gcc@2:4.6', + 'libelf@2.1:3%gcc@4.5:4.7') self.check_constrain('libelf+debug+foo', 'libelf+debug', 'libelf+foo') - self.check_constrain('libelf+debug+foo', 'libelf+debug', 'libelf+debug+foo') + self.check_constrain('libelf+debug+foo', + 'libelf+debug', 'libelf+debug+foo') - self.check_constrain('libelf debug=2 foo=1', 'libelf debug=2', 'libelf foo=1') - self.check_constrain('libelf debug=2 foo=1', 'libelf debug=2', 'libelf debug=2 foo=1') + self.check_constrain('libelf debug=2 foo=1', + 'libelf debug=2', 'libelf foo=1') + self.check_constrain('libelf debug=2 foo=1', + 'libelf debug=2', 'libelf debug=2 foo=1') self.check_constrain('libelf+debug~foo', 'libelf+debug', 'libelf~foo') - self.check_constrain('libelf+debug~foo', 'libelf+debug', 'libelf+debug~foo') - + self.check_constrain('libelf+debug~foo', + 'libelf+debug', 'libelf+debug~foo') def test_constrain_compiler_flags(self): - self.check_constrain('libelf cflags="-O3" cppflags="-Wall"', 'libelf cflags="-O3"', 'libelf cppflags="-Wall"') - self.check_constrain('libelf cflags="-O3" cppflags="-Wall"', 'libelf cflags="-O3"', 'libelf cflags="-O3" cppflags="-Wall"') - + self.check_constrain('libelf cflags="-O3" cppflags="-Wall"', + 'libelf cflags="-O3"', 'libelf cppflags="-Wall"') + self.check_constrain('libelf cflags="-O3" cppflags="-Wall"', + 'libelf cflags="-O3"', + 'libelf cflags="-O3" cppflags="-Wall"') def test_constrain_architecture(self): self.check_constrain('libelf target=default_target os=default_os', @@ -369,21 +356,24 @@ class SpecSematicsTest(MockPackagesTest): 'libelf target=default_target os=default_os') def test_constrain_compiler(self): - self.check_constrain('libelf %gcc@4.4.7', 'libelf %gcc@4.4.7', 'libelf %gcc@4.4.7') - self.check_constrain('libelf %gcc@4.4.7', 'libelf', 'libelf %gcc@4.4.7') - + self.check_constrain('libelf %gcc@4.4.7', + 'libelf %gcc@4.4.7', 'libelf %gcc@4.4.7') + self.check_constrain('libelf %gcc@4.4.7', + 'libelf', 'libelf %gcc@4.4.7') def test_invalid_constraint(self): self.check_invalid_constraint('libelf@0:2.0', 'libelf@2.1:3') - self.check_invalid_constraint('libelf@0:2.5%gcc@4.8:4.9', 'libelf@2.1:3%gcc@4.5:4.7') + self.check_invalid_constraint( + 'libelf@0:2.5%gcc@4.8:4.9', 'libelf@2.1:3%gcc@4.5:4.7') self.check_invalid_constraint('libelf+debug', 'libelf~debug') self.check_invalid_constraint('libelf+debug~foo', 'libelf+debug+foo') self.check_invalid_constraint('libelf debug=2', 'libelf debug=1') - self.check_invalid_constraint('libelf cppflags="-O3"', 'libelf cppflags="-O2"') + self.check_invalid_constraint( + 'libelf cppflags="-O3"', 'libelf cppflags="-O2"') self.check_invalid_constraint('libelf platform=test target=be os=be', - 'libelf target=fe os=fe') + 'libelf target=fe os=fe') def test_constrain_changed(self): self.check_constrain_changed('libelf', '@1.0') @@ -396,9 +386,10 @@ class SpecSematicsTest(MockPackagesTest): self.check_constrain_changed('libelf', 'cppflags="-O3"') platform = spack.architecture.platform() - self.check_constrain_changed('libelf', 'target='+platform.target('default_target').name) - self.check_constrain_changed('libelf', 'os='+platform.operating_system('default_os').name) - + self.check_constrain_changed( + 'libelf', 'target=' + platform.target('default_target').name) + self.check_constrain_changed( + 'libelf', 'os=' + platform.operating_system('default_os').name) def test_constrain_not_changed(self): self.check_constrain_not_changed('libelf', 'libelf') @@ -409,12 +400,13 @@ class SpecSematicsTest(MockPackagesTest): self.check_constrain_not_changed('libelf+debug', '+debug') self.check_constrain_not_changed('libelf~debug', '~debug') self.check_constrain_not_changed('libelf debug=2', 'debug=2') - self.check_constrain_not_changed('libelf cppflags="-O3"', 'cppflags="-O3"') + self.check_constrain_not_changed( + 'libelf cppflags="-O3"', 'cppflags="-O3"') platform = spack.architecture.platform() default_target = platform.target('default_target').name - self.check_constrain_not_changed('libelf target='+default_target, 'target='+default_target) - + self.check_constrain_not_changed( + 'libelf target=' + default_target, 'target=' + default_target) def test_constrain_dependency_changed(self): self.check_constrain_changed('libelf^foo', 'libelf^foo@1.0') @@ -426,18 +418,25 @@ class SpecSematicsTest(MockPackagesTest): platform = spack.architecture.platform() default_target = platform.target('default_target').name - self.check_constrain_changed('libelf^foo', 'libelf^foo target='+default_target) - + self.check_constrain_changed( + 'libelf^foo', 'libelf^foo target=' + default_target) def test_constrain_dependency_not_changed(self): self.check_constrain_not_changed('libelf^foo@1.0', 'libelf^foo@1.0') - self.check_constrain_not_changed('libelf^foo@1.0:5.0', 'libelf^foo@1.0:5.0') + self.check_constrain_not_changed( + 'libelf^foo@1.0:5.0', 'libelf^foo@1.0:5.0') self.check_constrain_not_changed('libelf^foo%gcc', 'libelf^foo%gcc') - self.check_constrain_not_changed('libelf^foo%gcc@4.5', 'libelf^foo%gcc@4.5') - self.check_constrain_not_changed('libelf^foo+debug', 'libelf^foo+debug') - self.check_constrain_not_changed('libelf^foo~debug', 'libelf^foo~debug') - self.check_constrain_not_changed('libelf^foo cppflags="-O3"', 'libelf^foo cppflags="-O3"') + self.check_constrain_not_changed( + 'libelf^foo%gcc@4.5', 'libelf^foo%gcc@4.5') + self.check_constrain_not_changed( + 'libelf^foo+debug', 'libelf^foo+debug') + self.check_constrain_not_changed( + 'libelf^foo~debug', 'libelf^foo~debug') + self.check_constrain_not_changed( + 'libelf^foo cppflags="-O3"', 'libelf^foo cppflags="-O3"') platform = spack.architecture.platform() default_target = platform.target('default_target').name - self.check_constrain_not_changed('libelf^foo target='+default_target, 'libelf^foo target='+default_target) + self.check_constrain_not_changed( + 'libelf^foo target=' + default_target, + 'libelf^foo target=' + default_target) diff --git a/lib/spack/spack/test/spec_syntax.py b/lib/spack/spack/test/spec_syntax.py index 4a534d7b5c..3079288c77 100644 --- a/lib/spack/spack/test/spec_syntax.py +++ b/lib/spack/spack/test/spec_syntax.py @@ -55,17 +55,22 @@ complex_lex = [Token(ID, 'mvapich_foo'), class SpecSyntaxTest(unittest.TestCase): - # ================================================================================ + # ======================================================================== # Parse checks - # ================================================================================ + # ======================================================================== + def check_parse(self, expected, spec=None, remove_arch=True): """Assert that the provided spec is able to be parsed. - If this is called with one argument, it assumes that the string is - canonical (i.e., no spaces and ~ instead of - for variants) and that it - will convert back to the string it came from. - If this is called with two arguments, the first argument is the expected - canonical form and the second is a non-canonical input to be parsed. + If this is called with one argument, it assumes that the + string is canonical (i.e., no spaces and ~ instead of - for + variants) and that it will convert back to the string it came + from. + + If this is called with two arguments, the first argument is + the expected canonical form and the second is a non-canonical + input to be parsed. + """ if spec is None: spec = expected @@ -74,9 +79,8 @@ class SpecSyntaxTest(unittest.TestCase): parsed = (" ".join(str(spec) for spec in output)) self.assertEqual(expected, parsed) - def check_lex(self, tokens, spec): - """Check that the provided spec parses to the provided list of tokens.""" + """Check that the provided spec parses to the provided token list.""" lex_output = SpecLexer().lex(spec) for tok, spec_tok in zip(tokens, lex_output): if tok.type == ID: @@ -85,9 +89,9 @@ class SpecSyntaxTest(unittest.TestCase): # Only check the type for non-identifiers. self.assertEqual(tok.type, spec_tok.type) - # ================================================================================ + # ======================================================================== # Parse checks - # =============================================================================== + # ======================================================================== def test_package_names(self): self.check_parse("mvapich") self.check_parse("mvapich_foo") @@ -104,18 +108,37 @@ class SpecSyntaxTest(unittest.TestCase): self.check_parse("openmpi^hwloc@1.2e6:1.4b7-rc3") def test_full_specs(self): - self.check_parse("mvapich_foo^_openmpi@1.2:1.4,1.6%intel@12.1+debug~qt_4^stackwalker@8.1_1e") - self.check_parse("mvapich_foo^_openmpi@1.2:1.4,1.6%intel@12.1 debug=2~qt_4^stackwalker@8.1_1e") - self.check_parse('mvapich_foo^_openmpi@1.2:1.4,1.6%intel@12.1 cppflags="-O3"+debug~qt_4^stackwalker@8.1_1e') + self.check_parse( + "mvapich_foo" + "^_openmpi@1.2:1.4,1.6%intel@12.1+debug~qt_4" + "^stackwalker@8.1_1e") + self.check_parse( + "mvapich_foo" + "^_openmpi@1.2:1.4,1.6%intel@12.1 debug=2~qt_4" + "^stackwalker@8.1_1e") + self.check_parse( + 'mvapich_foo' + '^_openmpi@1.2:1.4,1.6%intel@12.1 cppflags="-O3"+debug~qt_4' + '^stackwalker@8.1_1e') def test_canonicalize(self): self.check_parse( - "mvapich_foo^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug~qt_4^stackwalker@8.1_1e", - "mvapich_foo ^_openmpi@1.6,1.2:1.4%intel@12.1:12.6+debug~qt_4 ^stackwalker@8.1_1e") + "mvapich_foo" + "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug~qt_4" + "^stackwalker@8.1_1e", + + "mvapich_foo " + "^_openmpi@1.6,1.2:1.4%intel@12.1:12.6+debug~qt_4 " + "^stackwalker@8.1_1e") self.check_parse( - "mvapich_foo^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug~qt_4^stackwalker@8.1_1e", - "mvapich_foo ^stackwalker@8.1_1e ^_openmpi@1.6,1.2:1.4%intel@12.1:12.6~qt_4+debug") + "mvapich_foo" + "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug~qt_4" + "^stackwalker@8.1_1e", + + "mvapich_foo " + "^stackwalker@8.1_1e " + "^_openmpi@1.6,1.2:1.4%intel@12.1:12.6~qt_4+debug") self.check_parse( "x^y@1,2:3,4%intel@1,2,3,4+a~b+c~d+e~f", @@ -130,58 +153,81 @@ class SpecSyntaxTest(unittest.TestCase): self.assertRaises(SpecParseError, self.check_parse, "x::") def test_duplicate_variant(self): - self.assertRaises(DuplicateVariantError, self.check_parse, "x@1.2+debug+debug") - self.assertRaises(DuplicateVariantError, self.check_parse, "x ^y@1.2+debug debug=true") - self.assertRaises(DuplicateVariantError, self.check_parse, "x ^y@1.2 debug=false debug=true") - self.assertRaises(DuplicateVariantError, self.check_parse, "x ^y@1.2 debug=false~debug") - + self.assertRaises(DuplicateVariantError, + self.check_parse, "x@1.2+debug+debug") + self.assertRaises(DuplicateVariantError, + self.check_parse, "x ^y@1.2+debug debug=true") + self.assertRaises(DuplicateVariantError, self.check_parse, + "x ^y@1.2 debug=false debug=true") + self.assertRaises(DuplicateVariantError, + self.check_parse, "x ^y@1.2 debug=false~debug") def test_duplicate_depdendence(self): - self.assertRaises(DuplicateDependencyError, self.check_parse, "x ^y ^y") + self.assertRaises(DuplicateDependencyError, + self.check_parse, "x ^y ^y") def test_duplicate_compiler(self): - self.assertRaises(DuplicateCompilerSpecError, self.check_parse, "x%intel%intel") - self.assertRaises(DuplicateCompilerSpecError, self.check_parse, "x%intel%gcc") - self.assertRaises(DuplicateCompilerSpecError, self.check_parse, "x%gcc%intel") - self.assertRaises(DuplicateCompilerSpecError, self.check_parse, "x ^y%intel%intel") - self.assertRaises(DuplicateCompilerSpecError, self.check_parse, "x ^y%intel%gcc") - self.assertRaises(DuplicateCompilerSpecError, self.check_parse, "x ^y%gcc%intel") - - - # ================================================================================ + self.assertRaises(DuplicateCompilerSpecError, + self.check_parse, "x%intel%intel") + self.assertRaises(DuplicateCompilerSpecError, + self.check_parse, "x%intel%gcc") + self.assertRaises(DuplicateCompilerSpecError, + self.check_parse, "x%gcc%intel") + self.assertRaises(DuplicateCompilerSpecError, + self.check_parse, "x ^y%intel%intel") + self.assertRaises(DuplicateCompilerSpecError, + self.check_parse, "x ^y%intel%gcc") + self.assertRaises(DuplicateCompilerSpecError, + self.check_parse, "x ^y%gcc%intel") + + # ======================================================================== # Lex checks - # ================================================================================ + # ======================================================================== def test_ambiguous(self): # This first one is ambiguous because - can be in an identifier AND # indicate disabling an option. self.assertRaises( AssertionError, self.check_lex, complex_lex, - "mvapich_foo^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug-qt_4^stackwalker@8.1_1e") + "mvapich_foo" + "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug-qt_4" + "^stackwalker@8.1_1e") - # The following lexes are non-ambiguous (add a space before -qt_4) and should all - # result in the tokens in complex_lex + # The following lexes are non-ambiguous (add a space before -qt_4) + # and should all result in the tokens in complex_lex def test_minimal_spaces(self): self.check_lex( complex_lex, - "mvapich_foo^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug -qt_4^stackwalker@8.1_1e") + "mvapich_foo" + "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug -qt_4" + "^stackwalker@8.1_1e") self.check_lex( complex_lex, - "mvapich_foo^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug~qt_4^stackwalker@8.1_1e") + "mvapich_foo" + "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug~qt_4" + "^stackwalker@8.1_1e") def test_spaces_between_dependences(self): self.check_lex( complex_lex, - "mvapich_foo ^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug -qt_4 ^stackwalker @ 8.1_1e") + "mvapich_foo " + "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug -qt_4 " + "^stackwalker @ 8.1_1e") self.check_lex( complex_lex, - "mvapich_foo ^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug~qt_4 ^stackwalker @ 8.1_1e") + "mvapich_foo " + "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug~qt_4 " + "^stackwalker @ 8.1_1e") def test_spaces_between_options(self): self.check_lex( complex_lex, - "mvapich_foo ^_openmpi @1.2:1.4,1.6 %intel @12.1:12.6 +debug -qt_4 ^stackwalker @8.1_1e") + "mvapich_foo " + "^_openmpi @1.2:1.4,1.6 %intel @12.1:12.6 +debug -qt_4 " + "^stackwalker @8.1_1e") def test_way_too_many_spaces(self): self.check_lex( complex_lex, - "mvapich_foo ^ _openmpi @ 1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 ^ stackwalker @ 8.1_1e") + "mvapich_foo " + "^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 " + "^ stackwalker @ 8.1_1e") diff --git a/lib/spack/spack/test/stage.py b/lib/spack/spack/test/stage.py index d3e3bf1383..ec661bfe50 100644 --- a/lib/spack/spack/test/stage.py +++ b/lib/spack/spack/test/stage.py @@ -62,6 +62,7 @@ def use_tmp(use_tmp): class StageTest(unittest.TestCase): + def setUp(self): """This sets up a mock archive to fetch, and a mock temp space for use by the Stage class. It doesn't actually create the Stage -- that @@ -89,7 +90,6 @@ class StageTest(unittest.TestCase): # be removed. self.working_dir = os.getcwd() - def tearDown(self): """Blows away the test environment directory.""" shutil.rmtree(test_files_dir) @@ -100,7 +100,6 @@ class StageTest(unittest.TestCase): # restore spack's original tmp environment spack.tmp_dirs = self.old_tmp_dirs - def get_stage_path(self, stage, stage_name): """Figure out where a stage should be living. This depends on whether it's named. @@ -114,7 +113,6 @@ class StageTest(unittest.TestCase): self.assertTrue(stage.path.startswith(spack.stage_path)) return stage.path - def check_setup(self, stage, stage_name): """Figure out whether a stage was set up correctly.""" stage_path = self.get_stage_path(stage, stage_name) @@ -139,14 +137,12 @@ class StageTest(unittest.TestCase): # Make sure the stage path is NOT a link for a non-tmp stage self.assertFalse(os.path.islink(stage_path)) - def check_fetch(self, stage, stage_name): stage_path = self.get_stage_path(stage, stage_name) self.assertTrue(archive_name in os.listdir(stage_path)) self.assertEqual(join_path(stage_path, archive_name), stage.fetcher.archive_file) - def check_expand_archive(self, stage, stage_name): stage_path = self.get_stage_path(stage, stage_name) self.assertTrue(archive_name in os.listdir(stage_path)) @@ -162,19 +158,16 @@ class StageTest(unittest.TestCase): with open(readme) as file: self.assertEqual(readme_text, file.read()) - def check_chdir(self, stage, stage_name): stage_path = self.get_stage_path(stage, stage_name) self.assertEqual(os.path.realpath(stage_path), os.getcwd()) - def check_chdir_to_source(self, stage, stage_name): stage_path = self.get_stage_path(stage, stage_name) self.assertEqual( join_path(os.path.realpath(stage_path), archive_dir), os.getcwd()) - def check_destroy(self, stage, stage_name): """Figure out whether a stage was destroyed correctly.""" stage_path = self.get_stage_path(stage, stage_name) @@ -187,35 +180,30 @@ class StageTest(unittest.TestCase): target = os.path.realpath(stage_path) self.assertFalse(os.path.exists(target)) - def test_setup_and_destroy_name_with_tmp(self): with use_tmp(True): with Stage(archive_url, name=stage_name) as stage: self.check_setup(stage, stage_name) self.check_destroy(stage, stage_name) - def test_setup_and_destroy_name_without_tmp(self): with use_tmp(False): with Stage(archive_url, name=stage_name) as stage: self.check_setup(stage, stage_name) self.check_destroy(stage, stage_name) - def test_setup_and_destroy_no_name_with_tmp(self): with use_tmp(True): with Stage(archive_url) as stage: self.check_setup(stage, None) self.check_destroy(stage, None) - def test_setup_and_destroy_no_name_without_tmp(self): with use_tmp(False): with Stage(archive_url) as stage: self.check_setup(stage, None) self.check_destroy(stage, None) - def test_chdir(self): with Stage(archive_url, name=stage_name) as stage: stage.chdir() @@ -223,7 +211,6 @@ class StageTest(unittest.TestCase): self.check_chdir(stage, stage_name) self.check_destroy(stage, stage_name) - def test_fetch(self): with Stage(archive_url, name=stage_name) as stage: stage.fetch() @@ -232,7 +219,6 @@ class StageTest(unittest.TestCase): self.check_fetch(stage, stage_name) self.check_destroy(stage, stage_name) - def test_expand_archive(self): with Stage(archive_url, name=stage_name) as stage: stage.fetch() @@ -242,8 +228,7 @@ class StageTest(unittest.TestCase): self.check_expand_archive(stage, stage_name) self.check_destroy(stage, stage_name) - - def test_expand_archive(self): + def test_expand_archive_with_chdir(self): with Stage(archive_url, name=stage_name) as stage: stage.fetch() self.check_setup(stage, stage_name) @@ -254,7 +239,6 @@ class StageTest(unittest.TestCase): self.check_chdir_to_source(stage, stage_name) self.check_destroy(stage, stage_name) - def test_restage(self): with Stage(archive_url, name=stage_name) as stage: stage.fetch() @@ -278,20 +262,17 @@ class StageTest(unittest.TestCase): self.assertFalse('foobar' in os.listdir(stage.source_path)) self.check_destroy(stage, stage_name) - def test_no_keep_without_exceptions(self): with Stage(archive_url, name=stage_name, keep=False) as stage: pass self.check_destroy(stage, stage_name) - def test_keep_without_exceptions(self): with Stage(archive_url, name=stage_name, keep=True) as stage: pass path = self.get_stage_path(stage, stage_name) self.assertTrue(os.path.isdir(path)) - def test_no_keep_with_exceptions(self): try: with Stage(archive_url, name=stage_name, keep=False) as stage: @@ -300,8 +281,7 @@ class StageTest(unittest.TestCase): path = self.get_stage_path(stage, stage_name) self.assertTrue(os.path.isdir(path)) except: - pass # ignore here. - + pass # ignore here. def test_keep_exceptions(self): try: @@ -311,4 +291,4 @@ class StageTest(unittest.TestCase): path = self.get_stage_path(stage, stage_name) self.assertTrue(os.path.isdir(path)) except: - pass # ignore here. + pass # ignore here. diff --git a/lib/spack/spack/test/svn_fetch.py b/lib/spack/spack/test/svn_fetch.py index 0a745a090b..9ef7593ed1 100644 --- a/lib/spack/spack/test/svn_fetch.py +++ b/lib/spack/spack/test/svn_fetch.py @@ -94,17 +94,15 @@ class SvnFetchTest(MockPackagesTest): self.assert_rev(rev) - def test_fetch_default(self): """Test a default checkout and make sure it's on rev 1""" self.try_fetch(self.repo.r1, self.repo.r1_file, { - 'svn' : self.repo.url + 'svn': self.repo.url }) - def test_fetch_r1(self): """Test fetching an older revision (0).""" self.try_fetch(self.repo.r0, self.repo.r0_file, { - 'svn' : self.repo.url, - 'revision' : self.repo.r0 + 'svn': self.repo.url, + 'revision': self.repo.r0 }) diff --git a/lib/spack/spack/test/tally_plugin.py b/lib/spack/spack/test/tally_plugin.py index 96af1c9b21..808694d186 100644 --- a/lib/spack/spack/test/tally_plugin.py +++ b/lib/spack/spack/test/tally_plugin.py @@ -26,6 +26,7 @@ import os from nose.plugins import Plugin + class Tally(Plugin): name = 'tally' diff --git a/lib/spack/spack/test/url_extrapolate.py b/lib/spack/spack/test/url_extrapolate.py index ffd4230f71..ca14dab958 100644 --- a/lib/spack/spack/test/url_extrapolate.py +++ b/lib/spack/spack/test/url_extrapolate.py @@ -34,20 +34,21 @@ class UrlExtrapolateTest(unittest.TestCase): def check_url(self, base, version, new_url): self.assertEqual(url.substitute_version(base, version), new_url) - def test_libelf_version(self): base = "http://www.mr511.de/software/libelf-0.8.13.tar.gz" self.check_url(base, '0.8.13', base) - self.check_url(base, '0.8.12', "http://www.mr511.de/software/libelf-0.8.12.tar.gz") - self.check_url(base, '0.3.1', "http://www.mr511.de/software/libelf-0.3.1.tar.gz") - self.check_url(base, '1.3.1b', "http://www.mr511.de/software/libelf-1.3.1b.tar.gz") - + self.check_url( + base, '0.8.12', "http://www.mr511.de/software/libelf-0.8.12.tar.gz") + self.check_url( + base, '0.3.1', "http://www.mr511.de/software/libelf-0.3.1.tar.gz") + self.check_url( + base, '1.3.1b', "http://www.mr511.de/software/libelf-1.3.1b.tar.gz") def test_libdwarf_version(self): base = "http://www.prevanders.net/libdwarf-20130729.tar.gz" self.check_url(base, '20130729', base) - self.check_url(base, '8.12', "http://www.prevanders.net/libdwarf-8.12.tar.gz") - + self.check_url( + base, '8.12', "http://www.prevanders.net/libdwarf-8.12.tar.gz") def test_dyninst_version(self): # Dyninst has a version twice in the URL. @@ -58,7 +59,6 @@ class UrlExtrapolateTest(unittest.TestCase): self.check_url(base, '8.3.1', "http://www.dyninst.org/sites/default/files/downloads/dyninst/8.3.1/DyninstAPI-8.3.1.tgz") - def test_partial_version_prefix(self): # Test now with a partial prefix earlier in the URL -- this is # hard to figure out so Spack only substitutes the last @@ -72,7 +72,6 @@ class UrlExtrapolateTest(unittest.TestCase): self.check_url(base, '8.3.1', "http://www.dyninst.org/sites/default/files/downloads/dyninst/8.1/DyninstAPI-8.3.1.tgz") - def test_scalasca_partial_version(self): # Note that this probably doesn't actually work, but sites are # inconsistent about their directory structure, so it's not @@ -84,19 +83,16 @@ class UrlExtrapolateTest(unittest.TestCase): self.check_url('http://apps.fz-juelich.de/scalasca/releases/cube/4.3/dist/cube-4.3-TP1.tar.gz', '8.3.1', 'http://apps.fz-juelich.de/scalasca/releases/cube/4.3/dist/cube-8.3.1.tar.gz') - def test_mpileaks_version(self): self.check_url('https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz', '2.1.3', 'https://github.com/hpc/mpileaks/releases/download/v2.1.3/mpileaks-2.1.3.tar.gz') - def test_gcc(self): self.check_url('http://open-source-box.org/gcc/gcc-4.9.2/gcc-4.9.2.tar.bz2', '4.7', 'http://open-source-box.org/gcc/gcc-4.7/gcc-4.7.tar.bz2') self.check_url('http://open-source-box.org/gcc/gcc-4.4.7/gcc-4.4.7.tar.bz2', '4.4.7', 'http://open-source-box.org/gcc/gcc-4.4.7/gcc-4.4.7.tar.bz2') - def test_github_raw(self): self.check_url('https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true', '2.0.7', 'https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true') diff --git a/lib/spack/spack/test/url_parse.py b/lib/spack/spack/test/url_parse.py index 648996aaaa..6c944a3e7a 100644 --- a/lib/spack/spack/test/url_parse.py +++ b/lib/spack/spack/test/url_parse.py @@ -32,11 +32,11 @@ import spack.url as url class UrlParseTest(unittest.TestCase): + def assert_not_detected(self, string): self.assertRaises( url.UndetectableVersionError, url.parse_name_and_version, string) - def check(self, name, v, string, **kwargs): # Make sure correct name and version are extracted. parsed_name, parsed_v = url.parse_name_and_version(string) @@ -52,7 +52,6 @@ class UrlParseTest(unittest.TestCase): # build one with a specific version. self.assertEqual(string, url.substitute_version(string, v)) - def test_wwwoffle_version(self): self.check( 'wwwoffle', '2.9h', @@ -72,7 +71,7 @@ class UrlParseTest(unittest.TestCase): def test_version_all_dots(self): self.check( - 'foo.bar.la', '1.14','http://example.com/foo.bar.la.1.14.zip') + 'foo.bar.la', '1.14', 'http://example.com/foo.bar.la.1.14.zip') def test_version_underscore_separator(self): self.check( @@ -286,7 +285,7 @@ class UrlParseTest(unittest.TestCase): 'mvapich2', '1.9', 'http://mvapich.cse.ohio-state.edu/download/mvapich2/mv2/mvapich2-1.9.tgz') - def test_mvapich2_19_version(self): + def test_mvapich2_20_version(self): self.check( 'mvapich2', '2.0', 'http://mvapich.cse.ohio-state.edu/download/mvapich/mv2/mvapich2-2.0.tar.gz') diff --git a/lib/spack/spack/test/url_substitution.py b/lib/spack/spack/test/url_substitution.py index 9cc04834b6..ea6374e3d2 100644 --- a/lib/spack/spack/test/url_substitution.py +++ b/lib/spack/spack/test/url_substitution.py @@ -26,37 +26,31 @@ This test does sanity checks on substituting new versions into URLs """ import unittest - import spack.url as url +base = "https://comp.llnl.gov/linear_solvers/download/hypre-2.9.0b.tar.gz" +stem = "https://comp.llnl.gov/linear_solvers/download/hypre-" + + class PackageSanityTest(unittest.TestCase): - def test_hypre_url_substitution(self): - base = "https://computation-rnd.llnl.gov/linear_solvers/download/hypre-2.9.0b.tar.gz" + def test_hypre_url_substitution(self): self.assertEqual(url.substitute_version(base, '2.9.0b'), base) self.assertEqual( - url.substitute_version(base, '2.8.0b'), - "https://computation-rnd.llnl.gov/linear_solvers/download/hypre-2.8.0b.tar.gz") + url.substitute_version(base, '2.8.0b'), stem + "2.8.0b.tar.gz") self.assertEqual( - url.substitute_version(base, '2.7.0b'), - "https://computation-rnd.llnl.gov/linear_solvers/download/hypre-2.7.0b.tar.gz") + url.substitute_version(base, '2.7.0b'), stem + "2.7.0b.tar.gz") self.assertEqual( - url.substitute_version(base, '2.6.0b'), - "https://computation-rnd.llnl.gov/linear_solvers/download/hypre-2.6.0b.tar.gz") + url.substitute_version(base, '2.6.0b'), stem + "2.6.0b.tar.gz") self.assertEqual( - url.substitute_version(base, '1.14.0b'), - "https://computation-rnd.llnl.gov/linear_solvers/download/hypre-1.14.0b.tar.gz") + url.substitute_version(base, '1.14.0b'), stem + "1.14.0b.tar.gz") self.assertEqual( - url.substitute_version(base, '1.13.0b'), - "https://computation-rnd.llnl.gov/linear_solvers/download/hypre-1.13.0b.tar.gz") + url.substitute_version(base, '1.13.0b'), stem + "1.13.0b.tar.gz") self.assertEqual( - url.substitute_version(base, '2.0.0'), - "https://computation-rnd.llnl.gov/linear_solvers/download/hypre-2.0.0.tar.gz") + url.substitute_version(base, '2.0.0'), stem + "2.0.0.tar.gz") self.assertEqual( - url.substitute_version(base, '1.6.0'), - "https://computation-rnd.llnl.gov/linear_solvers/download/hypre-1.6.0.tar.gz") - + url.substitute_version(base, '1.6.0'), stem + "1.6.0.tar.gz") def test_otf2_url_substitution(self): base = "http://www.vi-hps.org/upload/packages/otf2/otf2-1.4.tar.gz" diff --git a/lib/spack/spack/test/yaml.py b/lib/spack/spack/test/yaml.py index f1b83e7b71..dedbd15d10 100644 --- a/lib/spack/spack/test/yaml.py +++ b/lib/spack/spack/test/yaml.py @@ -45,26 +45,25 @@ config_file: """ test_data = { - 'config_file' : syaml.syaml_dict([ + 'config_file': syaml.syaml_dict([ ('x86_64', syaml.syaml_dict([ ('foo', '/path/to/foo'), ('bar', '/path/to/bar'), - ('baz', '/path/to/baz' )])), - ('some_list', [ 'item 1', 'item 2', 'item 3' ]), - ('another_list', [ 1, 2, 3 ]), + ('baz', '/path/to/baz')])), + ('some_list', ['item 1', 'item 2', 'item 3']), + ('another_list', [1, 2, 3]), ('some_key', 'some_string') ])} + class YamlTest(unittest.TestCase): def setUp(self): self.data = syaml.load(test_file) - def test_parse(self): self.assertEqual(test_data, self.data) - def test_dict_order(self): self.assertEqual( ['x86_64', 'some_list', 'another_list', 'some_key'], @@ -74,7 +73,6 @@ class YamlTest(unittest.TestCase): ['foo', 'bar', 'baz'], self.data['config_file']['x86_64'].keys()) - def test_line_numbers(self): def check(obj, start_line, end_line): self.assertEqual(obj._start_mark.line, start_line) diff --git a/lib/spack/spack/url.py b/lib/spack/spack/url.py index f678a2dca9..02c9c04380 100644 --- a/lib/spack/spack/url.py +++ b/lib/spack/spack/url.py @@ -56,12 +56,12 @@ import spack.error import spack.util.compression as comp from spack.version import Version + # # Note: We call the input to most of these functions a "path" but the functions # work on paths and URLs. There's not a good word for both of these, but # "path" seemed like the most generic term. # - def find_list_url(url): """Finds a good list URL for the supplied URL. This depends on the site. By default, just assumes that a good list URL is the @@ -71,8 +71,8 @@ def find_list_url(url): url_types = [ # e.g. https://github.com/llnl/callpath/archive/v1.0.1.tar.gz - (r'^(https://github.com/[^/]+/[^/]+)/archive/', lambda m: m.group(1) + '/releases') - ] + (r'^(https://github.com/[^/]+/[^/]+)/archive/', + lambda m: m.group(1) + '/releases')] for pattern, fun in url_types: match = re.search(pattern, url) @@ -89,8 +89,10 @@ def strip_query_and_fragment(path): query, frag = components[3:5] suffix = '' - if query: suffix += '?' + query - if frag: suffix += '#' + frag + if query: + suffix += '?' + query + if frag: + suffix += '#' + frag return (urlunsplit(stripped), suffix) @@ -152,8 +154,10 @@ def downloaded_file_extension(path): """ match = re.search(r'github.com/.+/(zip|tar)ball/', path) if match: - if match.group(1) == 'zip': return 'zip' - elif match.group(1) == 'tar': return 'tar.gz' + if match.group(1) == 'zip': + return 'zip' + elif match.group(1) == 'tar': + return 'tar.gz' prefix, ext, suffix = split_url_extension(path) if not ext: @@ -193,7 +197,8 @@ def parse_version_offset(path): (r'[-_](R\d+[AB]\d*(-\d+)?)', path), # e.g., https://github.com/hpc/libcircle/releases/download/0.2.1-rc.1/libcircle-0.2.1-rc.1.tar.gz - # e.g., https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz + # e.g., + # https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz (r'github.com/[^/]+/[^/]+/releases/download/v?([^/]+)/.*$', path), # e.g. boost_1_39_0 @@ -201,7 +206,7 @@ def parse_version_offset(path): # e.g. foobar-4.5.1-1 # e.g. ruby-1.9.1-p243 - (r'-((\d+\.)*\d\.\d+-(p|rc|RC)?\d+)(?:[-._](?:bin|dist|stable|src|sources))?$', stem), + (r'-((\d+\.)*\d\.\d+-(p|rc|RC)?\d+)(?:[-._](?:bin|dist|stable|src|sources))?$', stem), # noqa # e.g. lame-398-1 (r'-((\d)+-\d)', stem), @@ -275,7 +280,8 @@ def parse_name_offset(path, v=None): name_types = [ (r'/sourceforge/([^/]+)/', path), - (r'github.com/[^/]+/[^/]+/releases/download/%s/(.*)-%s$' % (v, v), path), + (r'github.com/[^/]+/[^/]+/releases/download/%s/(.*)-%s$' % + (v, v), path), (r'/([^/]+)/(tarball|zipball)/', path), (r'/([^/]+)[_.-](bin|dist|stable|src|sources)[_.-]%s' % v, path), (r'github.com/[^/]+/([^/]+)/archive', path), @@ -283,7 +289,8 @@ def parse_name_offset(path, v=None): (r'([^/]+)[_.-]v?%s' % v, stem), # prefer the stem (r'([^/]+)%s' % v, stem), - (r'/([^/]+)[_.-]v?%s' % v, path), # accept the path if name is not in stem. + # accept the path if name is not in stem. + (r'/([^/]+)[_.-]v?%s' % v, path), (r'/([^/]+)%s' % v, path), (r'^([^/]+)[_.-]v?%s' % v, path), @@ -326,7 +333,7 @@ def insensitize(string): return re.sub(r'([a-zA-Z])', to_ins, string) -def cumsum(elts, init=0, fn=lambda x:x): +def cumsum(elts, init=0, fn=lambda x: x): """Return cumulative sum of result of fn on each element in elts.""" sums = [] s = init @@ -337,21 +344,20 @@ def cumsum(elts, init=0, fn=lambda x:x): def substitution_offsets(path): - """This returns offsets for substituting versions and names in the provided path. - It is a helper for substitute_version(). + """This returns offsets for substituting versions and names in the + provided path. It is a helper for substitute_version(). """ # Get name and version offsets try: ver, vs, vl = parse_version_offset(path) name, ns, nl = parse_name_offset(path, ver) - except UndetectableNameError, e: + except UndetectableNameError: return (None, -1, -1, (), ver, vs, vl, (vs,)) - except UndetectableVersionError, e: + except UndetectableVersionError: return (None, -1, -1, (), None, -1, -1, ()) # protect extensions like bz2 from getting inadvertently # considered versions. - ext = comp.extension(path) path = comp.strip_extension(path) # Construct a case-insensitive regular expression for the package name. @@ -449,7 +455,7 @@ def color_url(path, **kwargs): Cyan: The version found by parse_version_offset(). Red: The name found by parse_name_offset(). - Green: Instances of version string substituted by substitute_version(). + Green: Instances of version string from substitute_version(). Magenta: Instances of the name (protected from substitution). Optional args: @@ -469,31 +475,46 @@ def color_url(path, **kwargs): nerr = verr = 0 out = StringIO() for i in range(len(path)): - if i == vs: out.write('@c'); verr += 1 - elif i == ns: out.write('@r'); nerr += 1 + if i == vs: + out.write('@c') + verr += 1 + elif i == ns: + out.write('@r') + nerr += 1 elif subs: - if i in voffs: out.write('@g') - elif i in noffs: out.write('@m') + if i in voffs: + out.write('@g') + elif i in noffs: + out.write('@m') out.write(path[i]) - if i == vs + vl - 1: out.write('@.'); verr += 1 - elif i == ns + nl - 1: out.write('@.'); nerr += 1 + if i == vs + vl - 1: + out.write('@.') + verr += 1 + elif i == ns + nl - 1: + out.write('@.') + nerr += 1 elif subs: if i in vends or i in nends: out.write('@.') if errors: - if nerr == 0: out.write(" @r{[no name]}") - if verr == 0: out.write(" @r{[no version]}") - if nerr == 1: out.write(" @r{[incomplete name]}") - if verr == 1: out.write(" @r{[incomplete version]}") + if nerr == 0: + out.write(" @r{[no name]}") + if verr == 0: + out.write(" @r{[no version]}") + if nerr == 1: + out.write(" @r{[incomplete name]}") + if verr == 1: + out.write(" @r{[incomplete version]}") return colorize(out.getvalue()) class UrlParseError(spack.error.SpackError): """Raised when the URL module can't parse something correctly.""" + def __init__(self, msg, path): super(UrlParseError, self).__init__(msg) self.path = path @@ -501,6 +522,7 @@ class UrlParseError(spack.error.SpackError): class UndetectableVersionError(UrlParseError): """Raised when we can't parse a version from a string.""" + def __init__(self, path): super(UndetectableVersionError, self).__init__( "Couldn't detect version in: " + path, path) @@ -508,6 +530,7 @@ class UndetectableVersionError(UrlParseError): class UndetectableNameError(UrlParseError): """Raised when we can't parse a package name from a string.""" + def __init__(self, path): super(UndetectableNameError, self).__init__( "Couldn't parse package name in: " + path, path) diff --git a/lib/spack/spack/util/compression.py b/lib/spack/spack/util/compression.py index dc1188eb0f..64554ab2f7 100644 --- a/lib/spack/spack/util/compression.py +++ b/lib/spack/spack/util/compression.py @@ -32,7 +32,9 @@ PRE_EXTS = ["tar"] EXTS = ["gz", "bz2", "xz", "Z", "zip", "tgz"] # Add PRE_EXTS and EXTS last so that .tar.gz is matched *before* .tar or .gz -ALLOWED_ARCHIVE_TYPES = [".".join(l) for l in product(PRE_EXTS, EXTS)] + PRE_EXTS + EXTS +ALLOWED_ARCHIVE_TYPES = [".".join(l) for l in product( + PRE_EXTS, EXTS)] + PRE_EXTS + EXTS + def allowed_archive(path): return any(path.endswith(t) for t in ALLOWED_ARCHIVE_TYPES) diff --git a/lib/spack/spack/util/crypto.py b/lib/spack/spack/util/crypto.py index 1ae9793518..22777fdb68 100644 --- a/lib/spack/spack/util/crypto.py +++ b/lib/spack/spack/util/crypto.py @@ -31,7 +31,7 @@ _acceptable_hashes = [ hashlib.sha224, hashlib.sha256, hashlib.sha384, - hashlib.sha512 ] + hashlib.sha512] """Index for looking up hasher for a digest.""" _size_to_hash = dict((h().digest_size, h) for h in _acceptable_hashes) @@ -52,7 +52,6 @@ def checksum(hashlib_algo, filename, **kwargs): return hasher.hexdigest() - class Checker(object): """A checker checks files against one particular hex digest. It will automatically determine what hashing algorithm @@ -74,25 +73,25 @@ class Checker(object): adjusting the block_size optional arg. By default it's a 1MB (2**20 bytes) buffer. """ + def __init__(self, hexdigest, **kwargs): self.block_size = kwargs.get('block_size', 2**20) self.hexdigest = hexdigest self.sum = None bytes = len(hexdigest) / 2 - if not bytes in _size_to_hash: + if bytes not in _size_to_hash: raise ValueError( - 'Spack knows no hash algorithm for this digest: %s' % hexdigest) + 'Spack knows no hash algorithm for this digest: %s' + % hexdigest) self.hash_fun = _size_to_hash[bytes] - @property def hash_name(self): """Get the name of the hash function this Checker is using.""" return self.hash_fun().name - def check(self, filename): """Read the file with the specified name and check its checksum against self.hexdigest. Return True if they match, False diff --git a/lib/spack/spack/util/debug.py b/lib/spack/spack/util/debug.py index e8a0595416..cf485a611d 100644 --- a/lib/spack/spack/util/debug.py +++ b/lib/spack/spack/util/debug.py @@ -33,10 +33,11 @@ import code import traceback import signal + def debug_handler(sig, frame): """Interrupt running process, and provide a python prompt for interactive debugging.""" - d = {'_frame':frame} # Allow access to frame object. + d = {'_frame': frame} # Allow access to frame object. d.update(frame.f_globals) # Unless shadowed by global d.update(frame.f_locals) @@ -48,5 +49,5 @@ def debug_handler(sig, frame): def register_interrupt_handler(): - """Register a handler to print a stack trace and enter an interpreter on Ctrl-C""" + """Print traceback and enter an interpreter on Ctrl-C""" signal.signal(signal.SIGINT, debug_handler) diff --git a/lib/spack/spack/util/executable.py b/lib/spack/spack/util/executable.py index 14b56e8d6c..5c27b92df5 100644 --- a/lib/spack/spack/util/executable.py +++ b/lib/spack/spack/util/executable.py @@ -165,7 +165,6 @@ class Executable(object): raise ProcessError("Command exited with status %d:" % proc.returncode, cmd_line) - if output is str or error is str: result = '' if output is str: @@ -227,6 +226,7 @@ def which(name, **kwargs): class ProcessError(spack.error.SpackError): + def __init__(self, msg, long_message=None): # These are used for detailed debugging information for # package builds. They're built up gradually as the exception diff --git a/lib/spack/spack/util/multiproc.py b/lib/spack/spack/util/multiproc.py index 038cd90121..6a25c45713 100644 --- a/lib/spack/spack/util/multiproc.py +++ b/lib/spack/spack/util/multiproc.py @@ -32,18 +32,21 @@ from itertools import izip __all__ = ['spawn', 'parmap', 'Barrier'] + def spawn(f): - def fun(pipe,x): + def fun(pipe, x): pipe.send(f(x)) pipe.close() return fun -def parmap(f,X): - pipe=[Pipe() for x in X] - proc=[Process(target=spawn(f),args=(c,x)) for x,(p,c) in izip(X,pipe)] + +def parmap(f, X): + pipe = [Pipe() for x in X] + proc = [Process(target=spawn(f), args=(c, x)) + for x, (p, c) in izip(X, pipe)] [p.start() for p in proc] [p.join() for p in proc] - return [p.recv() for (p,c) in pipe] + return [p.recv() for (p, c) in pipe] class Barrier: @@ -53,6 +56,7 @@ class Barrier: See http://greenteapress.com/semaphores/downey08semaphores.pdf, p. 41. """ + def __init__(self, n, timeout=None): self.n = n self.to = timeout @@ -61,7 +65,6 @@ class Barrier: self.turnstile1 = Semaphore(0) self.turnstile2 = Semaphore(1) - def wait(self): if not self.mutex.acquire(timeout=self.to): raise BarrierTimeoutError() @@ -90,4 +93,5 @@ class Barrier: self.turnstile2.release() -class BarrierTimeoutError: pass +class BarrierTimeoutError: + pass diff --git a/lib/spack/spack/util/naming.py b/lib/spack/spack/util/naming.py index 2d9762942d..9a5cdee411 100644 --- a/lib/spack/spack/util/naming.py +++ b/lib/spack/spack/util/naming.py @@ -31,9 +31,15 @@ from StringIO import StringIO import spack -__all__ = ['mod_to_class', 'spack_module_to_python_module', 'valid_module_name', - 'valid_fully_qualified_module_name', 'validate_fully_qualified_module_name', - 'validate_module_name', 'possible_spack_module_names', 'NamespaceTrie'] +__all__ = [ + 'mod_to_class', + 'spack_module_to_python_module', + 'valid_module_name', + 'valid_fully_qualified_module_name', + 'validate_fully_qualified_module_name', + 'validate_module_name', + 'possible_spack_module_names', + 'NamespaceTrie'] # Valid module names can contain '-' but can't start with it. _valid_module_re = r'^\w[\w-]*$' @@ -67,8 +73,8 @@ def mod_to_class(mod_name): class_name = string.capwords(class_name, '-') class_name = class_name.replace('-', '') - # If a class starts with a number, prefix it with Number_ to make it a valid - # Python class name. + # If a class starts with a number, prefix it with Number_ to make it + # a valid Python class name. if re.match(r'^[0-9]', class_name): class_name = "_%s" % class_name @@ -126,6 +132,7 @@ def validate_fully_qualified_module_name(mod_name): class InvalidModuleNameError(spack.error.SpackError): """Raised when we encounter a bad module name.""" + def __init__(self, name): super(InvalidModuleNameError, self).__init__( "Invalid module name: " + name) @@ -134,6 +141,7 @@ class InvalidModuleNameError(spack.error.SpackError): class InvalidFullyQualifiedModuleNameError(spack.error.SpackError): """Raised when we encounter a bad full package name.""" + def __init__(self, name): super(InvalidFullyQualifiedModuleNameError, self).__init__( "Invalid fully qualified package name: " + name) @@ -141,17 +149,17 @@ class InvalidFullyQualifiedModuleNameError(spack.error.SpackError): class NamespaceTrie(object): + class Element(object): + def __init__(self, value): self.value = value - def __init__(self, separator='.'): self._subspaces = {} self._value = None self._sep = separator - def __setitem__(self, namespace, value): first, sep, rest = namespace.partition(self._sep) @@ -164,7 +172,6 @@ class NamespaceTrie(object): self._subspaces[first][rest] = value - def _get_helper(self, namespace, full_name): first, sep, rest = namespace.partition(self._sep) if not first: @@ -176,13 +183,12 @@ class NamespaceTrie(object): else: return self._subspaces[first]._get_helper(rest, full_name) - def __getitem__(self, namespace): return self._get_helper(namespace, namespace) - def is_prefix(self, namespace): - """True if the namespace has a value, or if it's the prefix of one that does.""" + """True if the namespace has a value, or if it's the prefix of one that + does.""" first, sep, rest = namespace.partition(self._sep) if not first: return True @@ -191,7 +197,6 @@ class NamespaceTrie(object): else: return self._subspaces[first].is_prefix(rest) - def is_leaf(self, namespace): """True if this namespace has no children in the trie.""" first, sep, rest = namespace.partition(self._sep) @@ -202,7 +207,6 @@ class NamespaceTrie(object): else: return self._subspaces[first].is_leaf(rest) - def has_value(self, namespace): """True if there is a value set for the given namespace.""" first, sep, rest = namespace.partition(self._sep) @@ -213,20 +217,17 @@ class NamespaceTrie(object): else: return self._subspaces[first].has_value(rest) - def __contains__(self, namespace): """Returns whether a value has been set for the namespace.""" return self.has_value(namespace) - def _str_helper(self, stream, level=0): indent = (level * ' ') for name in sorted(self._subspaces): stream.write(indent + name + '\n') if self._value: stream.write(indent + ' ' + repr(self._value.value)) - stream.write(self._subspaces[name]._str_helper(stream, level+1)) - + stream.write(self._subspaces[name]._str_helper(stream, level + 1)) def __str__(self): stream = StringIO() diff --git a/lib/spack/spack/util/pattern.py b/lib/spack/spack/util/pattern.py index bc5e9d2ffe..c36445193c 100644 --- a/lib/spack/spack/util/pattern.py +++ b/lib/spack/spack/util/pattern.py @@ -53,7 +53,9 @@ def composite(interface=None, method_list=None, container=list): # Check if at least one of the 'interface' or the 'method_list' arguments # are defined if interface is None and method_list is None: - raise TypeError("Either 'interface' or 'method_list' must be defined on a call to composite") # NOQA : ignore=E501 + raise TypeError( + "Either 'interface' or 'method_list' must be defined on a call " + "to composite") def cls_decorator(cls): # Retrieve the base class of the composite. Inspect its methods and @@ -102,7 +104,8 @@ def composite(interface=None, method_list=None, container=list): # python@2.7: interface_methods = {name: method for name, method in # inspect.getmembers(interface, predicate=no_special_no_private)} interface_methods = {} - for name, method in inspect.getmembers(interface, predicate=no_special_no_private): # NOQA: ignore=E501 + for name, method in inspect.getmembers( + interface, predicate=no_special_no_private): interface_methods[name] = method ########## # python@2.7: interface_methods_dict = {name: IterateOver(name, @@ -118,7 +121,8 @@ def composite(interface=None, method_list=None, container=list): # python@2.7: cls_method = {name: method for name, method in # inspect.getmembers(cls, predicate=inspect.ismethod)} cls_method = {} - for name, method in inspect.getmembers(cls, predicate=inspect.ismethod): # NOQA: ignore=E501 + for name, method in inspect.getmembers( + cls, predicate=inspect.ismethod): cls_method[name] = method ########## dictionary_for_type_call.update(cls_method) diff --git a/lib/spack/spack/util/prefix.py b/lib/spack/spack/util/prefix.py index e1a0f2958b..985d862269 100644 --- a/lib/spack/spack/util/prefix.py +++ b/lib/spack/spack/util/prefix.py @@ -27,6 +27,7 @@ This file contains utilities to help with installing packages. """ from llnl.util.filesystem import join_path + class Prefix(str): """This class represents an installation prefix, but provides useful attributes for referring to directories inside the prefix. diff --git a/lib/spack/spack/util/spack_yaml.py b/lib/spack/spack/util/spack_yaml.py index 909f9a57a8..7bcdf2d61f 100644 --- a/lib/spack/spack/util/spack_yaml.py +++ b/lib/spack/spack/util/spack_yaml.py @@ -34,7 +34,6 @@ import yaml from yaml.nodes import * from yaml.constructor import ConstructorError -from yaml.representer import SafeRepresenter from ordereddict_backport import OrderedDict # Only export load and dump @@ -42,15 +41,23 @@ __all__ = ['load', 'dump'] # Make new classes so we can add custom attributes. # Also, use OrderedDict instead of just dict. + + class syaml_dict(OrderedDict): + def __repr__(self): - mappings = ('%r: %r' % (k,v) for k,v in self.items()) + mappings = ('%r: %r' % (k, v) for k, v in self.items()) return '{%s}' % ', '.join(mappings) + + class syaml_list(list): __repr__ = list.__repr__ + + class syaml_str(str): __repr__ = str.__repr__ + def mark(obj, node): """Add start and end markers to an object.""" obj._start_mark = node.start_mark @@ -73,6 +80,7 @@ class OrderedLineLoader(yaml.Loader): # The standard YAML constructors return empty instances and fill # in with mappings later. We preserve this behavior. # + def construct_yaml_str(self, node): value = self.construct_scalar(node) try: @@ -83,14 +91,12 @@ class OrderedLineLoader(yaml.Loader): mark(value, node) return value - def construct_yaml_seq(self, node): data = syaml_list() mark(data, node) yield data data.extend(self.construct_sequence(node)) - def construct_yaml_map(self, node): data = syaml_dict() mark(data, node) @@ -104,22 +110,23 @@ class OrderedLineLoader(yaml.Loader): # def construct_sequence(self, node, deep=False): if not isinstance(node, SequenceNode): - raise ConstructorError(None, None, - "expected a sequence node, but found %s" % node.id, - node.start_mark) - value = syaml_list(self.construct_object(child, deep=deep) - for child in node.value) + raise ConstructorError( + None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + value = syaml_list(self.construct_object(child, deep=deep) + for child in node.value) mark(value, node) return value - def construct_mapping(self, node, deep=False): """Store mappings as OrderedDicts instead of as regular python dictionaries to preserve file ordering.""" if not isinstance(node, MappingNode): - raise ConstructorError(None, None, - "expected a mapping node, but found %s" % node.id, - node.start_mark) + raise ConstructorError( + None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) mapping = syaml_dict() for key_node, value_node in node.value: @@ -127,22 +134,26 @@ class OrderedLineLoader(yaml.Loader): try: hash(key) except TypeError, exc: - raise ConstructorError("while constructing a mapping", node.start_mark, - "found unacceptable key (%s)" % exc, key_node.start_mark) + raise ConstructorError( + "while constructing a mapping", node.start_mark, + "found unacceptable key (%s)" % exc, key_node.start_mark) value = self.construct_object(value_node, deep=deep) if key in mapping: - raise ConstructorError("while constructing a mapping", node.start_mark, - "found already in-use key (%s)" % key, key_node.start_mark) + raise ConstructorError( + "while constructing a mapping", node.start_mark, + "found already in-use key (%s)" % key, key_node.start_mark) mapping[key] = value mark(mapping, node) return mapping # register above new constructors -OrderedLineLoader.add_constructor(u'tag:yaml.org,2002:map', OrderedLineLoader.construct_yaml_map) -OrderedLineLoader.add_constructor(u'tag:yaml.org,2002:seq', OrderedLineLoader.construct_yaml_seq) -OrderedLineLoader.add_constructor(u'tag:yaml.org,2002:str', OrderedLineLoader.construct_yaml_str) - +OrderedLineLoader.add_constructor( + u'tag:yaml.org,2002:map', OrderedLineLoader.construct_yaml_map) +OrderedLineLoader.add_constructor( + u'tag:yaml.org,2002:seq', OrderedLineLoader.construct_yaml_seq) +OrderedLineLoader.add_constructor( + u'tag:yaml.org,2002:str', OrderedLineLoader.construct_yaml_str) class OrderedLineDumper(yaml.Dumper): @@ -154,6 +165,7 @@ class OrderedLineDumper(yaml.Dumper): regular Python equivalents, instead of ugly YAML pyobjects. """ + def represent_mapping(self, tag, mapping, flow_style=None): value = [] node = MappingNode(tag, value, flow_style=flow_style) @@ -173,7 +185,8 @@ class OrderedLineDumper(yaml.Dumper): node_value = self.represent_data(item_value) if not (isinstance(node_key, ScalarNode) and not node_key.style): best_style = False - if not (isinstance(node_value, ScalarNode) and not node_value.style): + if not (isinstance(node_value, ScalarNode) and + not node_value.style): best_style = False value.append((node_key, node_value)) if flow_style is None: diff --git a/lib/spack/spack/util/string.py b/lib/spack/spack/util/string.py index ce017b8126..dae7afbf46 100644 --- a/lib/spack/spack/util/string.py +++ b/lib/spack/spack/util/string.py @@ -23,6 +23,7 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## + def comma_list(sequence, article=''): if type(sequence) != list: sequence = list(sequence) @@ -32,7 +33,7 @@ def comma_list(sequence, article=''): elif len(sequence) == 1: return sequence[0] else: - out = ', '.join(str(s) for s in sequence[:-1]) + out = ', '.join(str(s) for s in sequence[:-1]) if len(sequence) != 2: out += ',' # oxford comma out += ' ' @@ -41,6 +42,7 @@ def comma_list(sequence, article=''): out += str(sequence[-1]) return out + def comma_or(sequence): return comma_list(sequence, 'or') diff --git a/lib/spack/spack/util/web.py b/lib/spack/spack/util/web.py index cac783a368..25f1e605d6 100644 --- a/lib/spack/spack/util/web.py +++ b/lib/spack/spack/util/web.py @@ -43,6 +43,7 @@ TIMEOUT = 10 class LinkParser(HTMLParser): """This parser just takes an HTML page and strips out the hrefs on the links. Good enough for a really simple spider. """ + def __init__(self): HTMLParser.__init__(self) self.links = [] diff --git a/lib/spack/spack/variant.py b/lib/spack/spack/variant.py index ad875f5ef5..b2c1a73489 100644 --- a/lib/spack/spack/variant.py +++ b/lib/spack/spack/variant.py @@ -29,8 +29,10 @@ currently variants are just flags. """ + class Variant(object): """Represents a variant on a build. Can be either on or off.""" + def __init__(self, default, description): self.default = default self.description = str(description) diff --git a/lib/spack/spack/version.py b/lib/spack/spack/version.py index 6f6c83b3d8..e1311eb0d9 100644 --- a/lib/spack/spack/version.py +++ b/lib/spack/spack/version.py @@ -50,6 +50,8 @@ from functools import wraps from functools_backport import total_ordering +__all__ = ['Version', 'VersionRange', 'VersionList', 'ver'] + # Valid version characters VALID_VERSION = r'[A-Za-z0-9_.-]' @@ -346,8 +348,8 @@ class VersionRange(object): s, o = self, other if s.start != o.start: - return s.start is None or (o.start is not None and s.start < o.start) # NOQA: ignore=E501 - + return s.start is None or ( + o.start is not None and s.start < o.start) return (s.end != o.end and o.end is None or (s.end is not None and s.end < o.end)) diff --git a/lib/spack/spack/yaml_version_check.py b/lib/spack/spack/yaml_version_check.py index c2d084d6c3..2c5b511d7f 100644 --- a/lib/spack/spack/yaml_version_check.py +++ b/lib/spack/spack/yaml_version_check.py @@ -34,6 +34,7 @@ import spack.config def check_yaml_versions(): check_compiler_yaml_version() + def check_compiler_yaml_version(): config_scopes = spack.config.config_scopes for scope in config_scopes.values(): @@ -46,7 +47,8 @@ def check_compiler_yaml_version(): if data: compilers = data['compilers'] if len(compilers) > 0: - if (not isinstance(compilers, list)) or 'operating_system' not in compilers[0]['compiler']: + if (not isinstance(compilers, list) or + 'operating_system' not in compilers[0]['compiler']): new_file = os.path.join(scope.path, '_old_compilers.yaml') tty.warn('%s in out of date compilers format. ' 'Moved to %s. Spack automatically generate ' diff --git a/share/spack/qa/run-flake8 b/share/spack/qa/run-flake8 index 595df417ec..c59bfc9490 100755 --- a/share/spack/qa/run-flake8 +++ b/share/spack/qa/run-flake8 @@ -22,9 +22,13 @@ changed=$(git diff --name-only --find-renames develop... | grep '.py$') # Add approved style exemptions to the changed packages. for file in $changed; do - if [[ $file = *package.py ]]; then - cp "$file" "$file~" + # Make a backup to restore later + cp "$file" "$file.sbak~" + # + # Exemptions for package.py files + # + if [[ $file = *package.py ]]; then # Exempt lines with urls and descriptions from overlong line errors. perl -i -pe 's/^(\s*homepage\s*=.*)$/\1 # NOQA: ignore=E501/' $file perl -i -pe 's/^(\s*url\s*=.*)$/\1 # NOQA: ignore=E501/' $file @@ -36,6 +40,11 @@ for file in $changed; do # Exempt '@when' decorated functions from redefinition errors. perl -i -pe 's/^(\s*\@when\(.*\).*)$/\1 # NOQA: ignore=F811/' $file fi + + # + # Exemptions for all files + # + perl -i -pe 's/^(.*(https?|file)\:.*)$/\1 # NOQA: ignore=E501/' $file done return_code=0 @@ -58,8 +67,8 @@ fi # Restore original package files after modifying them. for file in $changed; do - if [[ $file = *package.py ]]; then - mv "${file}~" "${file}" + if [[ -e "${file}.sbak~" ]]; then + mv "${file}.sbak~" "${file}" fi done -- cgit v1.2.3-60-g2f50 From 2850ca60be1c5a5943af5ca404faad5bb08aef42 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Mon, 29 Aug 2016 09:56:54 -0700 Subject: Change spack --profile sort key for 2.6 compatibility. (#1656) --- bin/spack | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'bin') diff --git a/bin/spack b/bin/spack index 9fed11f33b..7efa88f1ee 100755 --- a/bin/spack +++ b/bin/spack @@ -176,7 +176,7 @@ def main(): if args.profile: import cProfile - cProfile.run('main()', sort='tottime') + cProfile.run('main()', sort='time') elif args.pdb: import pdb pdb.run('main()') -- cgit v1.2.3-60-g2f50 From ae2a803496772db9ce6f9245d165b44f587e4389 Mon Sep 17 00:00:00 2001 From: "Adam J. Stewart" Date: Tue, 30 Aug 2016 10:47:38 -0500 Subject: Make subcommands importable, '-' -> '_', fixes #1642 --- bin/spack | 11 +- lib/spack/spack/cmd/package-list.py | 104 ------------- lib/spack/spack/cmd/package_list.py | 104 +++++++++++++ lib/spack/spack/cmd/test-install.py | 245 ------------------------------- lib/spack/spack/cmd/test_install.py | 245 +++++++++++++++++++++++++++++++ lib/spack/spack/cmd/url-parse.py | 75 ---------- lib/spack/spack/cmd/url_parse.py | 75 ++++++++++ lib/spack/spack/test/cmd/test_install.py | 6 +- 8 files changed, 434 insertions(+), 431 deletions(-) delete mode 100644 lib/spack/spack/cmd/package-list.py create mode 100644 lib/spack/spack/cmd/package_list.py delete mode 100644 lib/spack/spack/cmd/test-install.py create mode 100644 lib/spack/spack/cmd/test_install.py delete mode 100644 lib/spack/spack/cmd/url-parse.py create mode 100644 lib/spack/spack/cmd/url_parse.py (limited to 'bin') diff --git a/bin/spack b/bin/spack index 7efa88f1ee..323a06aa53 100755 --- a/bin/spack +++ b/bin/spack @@ -56,8 +56,15 @@ with warnings.catch_warnings(): # Spack, were removed, but shadow system modules that Spack still # imports. If we leave them, Spack will fail in mysterious ways. # TODO: more elegant solution for orphaned pyc files. -orphaned_pyc_files = [os.path.join(SPACK_EXTERNAL_LIBS, n) - for n in ('functools.pyc', 'ordereddict.pyc')] +orphaned_pyc_files = [ + os.path.join(SPACK_EXTERNAL_LIBS, 'functools.pyc'), + os.path.join(SPACK_EXTERNAL_LIBS, 'ordereddict.pyc'), + os.path.join(SPACK_LIB_PATH, 'spack', 'platforms', 'cray_xc.pyc'), + os.path.join(SPACK_LIB_PATH, 'spack', 'cmd', 'package-list.pyc'), + os.path.join(SPACK_LIB_PATH, 'spack', 'cmd', 'test-install.pyc'), + os.path.join(SPACK_LIB_PATH, 'spack', 'cmd', 'url-parse.pyc') +] + for pyc_file in orphaned_pyc_files: if not os.path.exists(pyc_file): continue diff --git a/lib/spack/spack/cmd/package-list.py b/lib/spack/spack/cmd/package-list.py deleted file mode 100644 index 42f408af96..0000000000 --- a/lib/spack/spack/cmd/package-list.py +++ /dev/null @@ -1,104 +0,0 @@ -############################################################################## -# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# -# This file is part of Spack. -# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. -# LLNL-CODE-647188 -# -# For details, see https://github.com/llnl/spack -# Please also see the LICENSE file for our notice and the LGPL. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License (as -# published by the Free Software Foundation) version 2.1, February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and -# conditions of the GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -############################################################################## -import cgi -from StringIO import StringIO -from llnl.util.tty.colify import * -import spack - -description = "Print a list of all packages in reStructuredText." - - -def github_url(pkg): - """Link to a package file on github.""" - url = "https://github.com/LLNL/spack/blob/develop/var/spack/repos/builtin/packages/{0}/package.py" - return url.format(pkg.name) - - -def rst_table(elts): - """Print out a RST-style table.""" - cols = StringIO() - ncol, widths = colify(elts, output=cols, tty=True) - header = " ".join("=" * (w - 1) for w in widths) - return "%s\n%s%s" % (header, cols.getvalue(), header) - - -def print_rst_package_list(): - """Print out information on all packages in restructured text.""" - pkgs = sorted(spack.repo.all_packages(), key=lambda s: s.name.lower()) - pkg_names = [p.name for p in pkgs] - - print ".. _package-list:" - print - print "============" - print "Package List" - print "============" - print - print "This is a list of things you can install using Spack. It is" - print "automatically generated based on the packages in the latest Spack" - print "release." - print - print "Spack currently has %d mainline packages:" % len(pkgs) - print - print rst_table("`%s`_" % p for p in pkg_names) - print - - # Output some text for each package. - for pkg in pkgs: - print "-----" - print - print ".. _%s:" % pkg.name - print - # Must be at least 2 long, breaks for single letter packages like R. - print "-" * max(len(pkg.name), 2) - print pkg.name - print "-" * max(len(pkg.name), 2) - print - print "Homepage:" - print " * `%s <%s>`__" % (cgi.escape(pkg.homepage), pkg.homepage) - print - print "Spack package:" - print " * `%s/package.py <%s>`__" % (pkg.name, github_url(pkg)) - print - if pkg.versions: - print "Versions:" - print " " + ", ".join(str(v) for v in - reversed(sorted(pkg.versions))) - print - - for deptype in spack.alldeps: - deps = pkg.dependencies_of_type(deptype) - if deps: - print "%s Dependencies" % deptype.capitalize() - print " " + ", ".join("%s_" % d if d in pkg_names - else d for d in deps) - print - - print "Description:" - print pkg.format_doc(indent=2) - print - - -def package_list(parser, args): - print_rst_package_list() diff --git a/lib/spack/spack/cmd/package_list.py b/lib/spack/spack/cmd/package_list.py new file mode 100644 index 0000000000..42f408af96 --- /dev/null +++ b/lib/spack/spack/cmd/package_list.py @@ -0,0 +1,104 @@ +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/llnl/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +import cgi +from StringIO import StringIO +from llnl.util.tty.colify import * +import spack + +description = "Print a list of all packages in reStructuredText." + + +def github_url(pkg): + """Link to a package file on github.""" + url = "https://github.com/LLNL/spack/blob/develop/var/spack/repos/builtin/packages/{0}/package.py" + return url.format(pkg.name) + + +def rst_table(elts): + """Print out a RST-style table.""" + cols = StringIO() + ncol, widths = colify(elts, output=cols, tty=True) + header = " ".join("=" * (w - 1) for w in widths) + return "%s\n%s%s" % (header, cols.getvalue(), header) + + +def print_rst_package_list(): + """Print out information on all packages in restructured text.""" + pkgs = sorted(spack.repo.all_packages(), key=lambda s: s.name.lower()) + pkg_names = [p.name for p in pkgs] + + print ".. _package-list:" + print + print "============" + print "Package List" + print "============" + print + print "This is a list of things you can install using Spack. It is" + print "automatically generated based on the packages in the latest Spack" + print "release." + print + print "Spack currently has %d mainline packages:" % len(pkgs) + print + print rst_table("`%s`_" % p for p in pkg_names) + print + + # Output some text for each package. + for pkg in pkgs: + print "-----" + print + print ".. _%s:" % pkg.name + print + # Must be at least 2 long, breaks for single letter packages like R. + print "-" * max(len(pkg.name), 2) + print pkg.name + print "-" * max(len(pkg.name), 2) + print + print "Homepage:" + print " * `%s <%s>`__" % (cgi.escape(pkg.homepage), pkg.homepage) + print + print "Spack package:" + print " * `%s/package.py <%s>`__" % (pkg.name, github_url(pkg)) + print + if pkg.versions: + print "Versions:" + print " " + ", ".join(str(v) for v in + reversed(sorted(pkg.versions))) + print + + for deptype in spack.alldeps: + deps = pkg.dependencies_of_type(deptype) + if deps: + print "%s Dependencies" % deptype.capitalize() + print " " + ", ".join("%s_" % d if d in pkg_names + else d for d in deps) + print + + print "Description:" + print pkg.format_doc(indent=2) + print + + +def package_list(parser, args): + print_rst_package_list() diff --git a/lib/spack/spack/cmd/test-install.py b/lib/spack/spack/cmd/test-install.py deleted file mode 100644 index 8e7173e9a2..0000000000 --- a/lib/spack/spack/cmd/test-install.py +++ /dev/null @@ -1,245 +0,0 @@ -############################################################################## -# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# -# This file is part of Spack. -# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. -# LLNL-CODE-647188 -# -# For details, see https://github.com/llnl/spack -# Please also see the LICENSE file for our notice and the LGPL. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License (as -# published by the Free Software Foundation) version 2.1, February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and -# conditions of the GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -############################################################################## -import argparse -import codecs -import os -import time -import xml.dom.minidom -import xml.etree.ElementTree as ET - -import llnl.util.tty as tty -import spack -import spack.cmd -from llnl.util.filesystem import * -from spack.build_environment import InstallError -from spack.fetch_strategy import FetchError - -description = "Run package install as a unit test, output formatted results." - - -def setup_parser(subparser): - subparser.add_argument( - '-j', '--jobs', action='store', type=int, - help="Explicitly set number of make jobs. Default is #cpus.") - - subparser.add_argument( - '-n', '--no-checksum', action='store_true', dest='no_checksum', - help="Do not check packages against checksum") - - subparser.add_argument( - '-o', '--output', action='store', - help="test output goes in this file") - - subparser.add_argument( - 'package', nargs=argparse.REMAINDER, - help="spec of package to install") - - -class TestResult(object): - PASSED = 0 - FAILED = 1 - SKIPPED = 2 - ERRORED = 3 - - -class TestSuite(object): - - def __init__(self, filename): - self.filename = filename - self.root = ET.Element('testsuite') - self.tests = [] - - def __enter__(self): - return self - - def append(self, item): - if not isinstance(item, TestCase): - raise TypeError( - 'only TestCase instances may be appended to TestSuite') - self.tests.append(item) # Append the item to the list of tests - - def __exit__(self, exc_type, exc_val, exc_tb): - # Prepare the header for the entire test suite - number_of_errors = sum( - x.result_type == TestResult.ERRORED for x in self.tests) - self.root.set('errors', str(number_of_errors)) - number_of_failures = sum( - x.result_type == TestResult.FAILED for x in self.tests) - self.root.set('failures', str(number_of_failures)) - self.root.set('tests', str(len(self.tests))) - - for item in self.tests: - self.root.append(item.element) - - with open(self.filename, 'wb') as file: - xml_string = ET.tostring(self.root) - xml_string = xml.dom.minidom.parseString(xml_string).toprettyxml() - file.write(xml_string) - - -class TestCase(object): - - results = { - TestResult.PASSED: None, - TestResult.SKIPPED: 'skipped', - TestResult.FAILED: 'failure', - TestResult.ERRORED: 'error', - } - - def __init__(self, classname, name, time=None): - self.element = ET.Element('testcase') - self.element.set('classname', str(classname)) - self.element.set('name', str(name)) - if time is not None: - self.element.set('time', str(time)) - self.result_type = None - - def set_result(self, result_type, - message=None, error_type=None, text=None): - self.result_type = result_type - result = TestCase.results[self.result_type] - if result is not None and result is not TestResult.PASSED: - subelement = ET.SubElement(self.element, result) - if error_type is not None: - subelement.set('type', error_type) - if message is not None: - subelement.set('message', str(message)) - if text is not None: - subelement.text = text - - -def fetch_log(path): - if not os.path.exists(path): - return list() - with codecs.open(path, 'rb', 'utf-8') as F: - return list(line.strip() for line in F.readlines()) - - -def failed_dependencies(spec): - def get_deps(deptype): - return set(item for item in spec.dependencies(deptype) - if not spack.repo.get(item).installed) - link_deps = get_deps('link') - run_deps = get_deps('run') - return link_deps.union(run_deps) - - -def get_top_spec_or_die(args): - specs = spack.cmd.parse_specs(args.package, concretize=True) - if len(specs) > 1: - tty.die("Only 1 top-level package can be specified") - top_spec = iter(specs).next() - return top_spec - - -def install_single_spec(spec, number_of_jobs): - package = spack.repo.get(spec) - - # If it is already installed, skip the test - if spack.repo.get(spec).installed: - testcase = TestCase(package.name, package.spec.short_spec, time=0.0) - testcase.set_result( - TestResult.SKIPPED, - message='Skipped [already installed]', - error_type='already_installed') - return testcase - - # If it relies on dependencies that did not install, skip - if failed_dependencies(spec): - testcase = TestCase(package.name, package.spec.short_spec, time=0.0) - testcase.set_result( - TestResult.SKIPPED, - message='Skipped [failed dependencies]', - error_type='dep_failed') - return testcase - - # Otherwise try to install the spec - try: - start_time = time.time() - package.do_install(keep_prefix=False, - keep_stage=True, - ignore_deps=False, - make_jobs=number_of_jobs, - verbose=True, - fake=False) - duration = time.time() - start_time - testcase = TestCase(package.name, package.spec.short_spec, duration) - testcase.set_result(TestResult.PASSED) - except InstallError: - # An InstallError is considered a failure (the recipe didn't work - # correctly) - duration = time.time() - start_time - # Try to get the log - lines = fetch_log(package.build_log_path) - text = '\n'.join(lines) - testcase = TestCase(package.name, package.spec.short_spec, duration) - testcase.set_result(TestResult.FAILED, - message='Installation failure', text=text) - - except FetchError: - # A FetchError is considered an error (we didn't even start building) - duration = time.time() - start_time - testcase = TestCase(package.name, package.spec.short_spec, duration) - testcase.set_result(TestResult.ERRORED, - message='Unable to fetch package') - - return testcase - - -def get_filename(args, top_spec): - if not args.output: - fname = 'test-{x.name}-{x.version}-{hash}.xml'.format( - x=top_spec, hash=top_spec.dag_hash()) - output_directory = join_path(os.getcwd(), 'test-output') - if not os.path.exists(output_directory): - os.mkdir(output_directory) - output_filename = join_path(output_directory, fname) - else: - output_filename = args.output - return output_filename - - -def test_install(parser, args): - # Check the input - if not args.package: - tty.die("install requires a package argument") - - if args.jobs is not None: - if args.jobs <= 0: - tty.die("The -j option must be a positive integer!") - - if args.no_checksum: - spack.do_checksum = False # TODO: remove this global. - - # Get the one and only top spec - top_spec = get_top_spec_or_die(args) - # Get the filename of the test - output_filename = get_filename(args, top_spec) - # TEST SUITE - with TestSuite(output_filename) as test_suite: - # Traverse in post order : each spec is a test case - for spec in top_spec.traverse(order='post'): - test_case = install_single_spec(spec, args.jobs) - test_suite.append(test_case) diff --git a/lib/spack/spack/cmd/test_install.py b/lib/spack/spack/cmd/test_install.py new file mode 100644 index 0000000000..8e7173e9a2 --- /dev/null +++ b/lib/spack/spack/cmd/test_install.py @@ -0,0 +1,245 @@ +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/llnl/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +import argparse +import codecs +import os +import time +import xml.dom.minidom +import xml.etree.ElementTree as ET + +import llnl.util.tty as tty +import spack +import spack.cmd +from llnl.util.filesystem import * +from spack.build_environment import InstallError +from spack.fetch_strategy import FetchError + +description = "Run package install as a unit test, output formatted results." + + +def setup_parser(subparser): + subparser.add_argument( + '-j', '--jobs', action='store', type=int, + help="Explicitly set number of make jobs. Default is #cpus.") + + subparser.add_argument( + '-n', '--no-checksum', action='store_true', dest='no_checksum', + help="Do not check packages against checksum") + + subparser.add_argument( + '-o', '--output', action='store', + help="test output goes in this file") + + subparser.add_argument( + 'package', nargs=argparse.REMAINDER, + help="spec of package to install") + + +class TestResult(object): + PASSED = 0 + FAILED = 1 + SKIPPED = 2 + ERRORED = 3 + + +class TestSuite(object): + + def __init__(self, filename): + self.filename = filename + self.root = ET.Element('testsuite') + self.tests = [] + + def __enter__(self): + return self + + def append(self, item): + if not isinstance(item, TestCase): + raise TypeError( + 'only TestCase instances may be appended to TestSuite') + self.tests.append(item) # Append the item to the list of tests + + def __exit__(self, exc_type, exc_val, exc_tb): + # Prepare the header for the entire test suite + number_of_errors = sum( + x.result_type == TestResult.ERRORED for x in self.tests) + self.root.set('errors', str(number_of_errors)) + number_of_failures = sum( + x.result_type == TestResult.FAILED for x in self.tests) + self.root.set('failures', str(number_of_failures)) + self.root.set('tests', str(len(self.tests))) + + for item in self.tests: + self.root.append(item.element) + + with open(self.filename, 'wb') as file: + xml_string = ET.tostring(self.root) + xml_string = xml.dom.minidom.parseString(xml_string).toprettyxml() + file.write(xml_string) + + +class TestCase(object): + + results = { + TestResult.PASSED: None, + TestResult.SKIPPED: 'skipped', + TestResult.FAILED: 'failure', + TestResult.ERRORED: 'error', + } + + def __init__(self, classname, name, time=None): + self.element = ET.Element('testcase') + self.element.set('classname', str(classname)) + self.element.set('name', str(name)) + if time is not None: + self.element.set('time', str(time)) + self.result_type = None + + def set_result(self, result_type, + message=None, error_type=None, text=None): + self.result_type = result_type + result = TestCase.results[self.result_type] + if result is not None and result is not TestResult.PASSED: + subelement = ET.SubElement(self.element, result) + if error_type is not None: + subelement.set('type', error_type) + if message is not None: + subelement.set('message', str(message)) + if text is not None: + subelement.text = text + + +def fetch_log(path): + if not os.path.exists(path): + return list() + with codecs.open(path, 'rb', 'utf-8') as F: + return list(line.strip() for line in F.readlines()) + + +def failed_dependencies(spec): + def get_deps(deptype): + return set(item for item in spec.dependencies(deptype) + if not spack.repo.get(item).installed) + link_deps = get_deps('link') + run_deps = get_deps('run') + return link_deps.union(run_deps) + + +def get_top_spec_or_die(args): + specs = spack.cmd.parse_specs(args.package, concretize=True) + if len(specs) > 1: + tty.die("Only 1 top-level package can be specified") + top_spec = iter(specs).next() + return top_spec + + +def install_single_spec(spec, number_of_jobs): + package = spack.repo.get(spec) + + # If it is already installed, skip the test + if spack.repo.get(spec).installed: + testcase = TestCase(package.name, package.spec.short_spec, time=0.0) + testcase.set_result( + TestResult.SKIPPED, + message='Skipped [already installed]', + error_type='already_installed') + return testcase + + # If it relies on dependencies that did not install, skip + if failed_dependencies(spec): + testcase = TestCase(package.name, package.spec.short_spec, time=0.0) + testcase.set_result( + TestResult.SKIPPED, + message='Skipped [failed dependencies]', + error_type='dep_failed') + return testcase + + # Otherwise try to install the spec + try: + start_time = time.time() + package.do_install(keep_prefix=False, + keep_stage=True, + ignore_deps=False, + make_jobs=number_of_jobs, + verbose=True, + fake=False) + duration = time.time() - start_time + testcase = TestCase(package.name, package.spec.short_spec, duration) + testcase.set_result(TestResult.PASSED) + except InstallError: + # An InstallError is considered a failure (the recipe didn't work + # correctly) + duration = time.time() - start_time + # Try to get the log + lines = fetch_log(package.build_log_path) + text = '\n'.join(lines) + testcase = TestCase(package.name, package.spec.short_spec, duration) + testcase.set_result(TestResult.FAILED, + message='Installation failure', text=text) + + except FetchError: + # A FetchError is considered an error (we didn't even start building) + duration = time.time() - start_time + testcase = TestCase(package.name, package.spec.short_spec, duration) + testcase.set_result(TestResult.ERRORED, + message='Unable to fetch package') + + return testcase + + +def get_filename(args, top_spec): + if not args.output: + fname = 'test-{x.name}-{x.version}-{hash}.xml'.format( + x=top_spec, hash=top_spec.dag_hash()) + output_directory = join_path(os.getcwd(), 'test-output') + if not os.path.exists(output_directory): + os.mkdir(output_directory) + output_filename = join_path(output_directory, fname) + else: + output_filename = args.output + return output_filename + + +def test_install(parser, args): + # Check the input + if not args.package: + tty.die("install requires a package argument") + + if args.jobs is not None: + if args.jobs <= 0: + tty.die("The -j option must be a positive integer!") + + if args.no_checksum: + spack.do_checksum = False # TODO: remove this global. + + # Get the one and only top spec + top_spec = get_top_spec_or_die(args) + # Get the filename of the test + output_filename = get_filename(args, top_spec) + # TEST SUITE + with TestSuite(output_filename) as test_suite: + # Traverse in post order : each spec is a test case + for spec in top_spec.traverse(order='post'): + test_case = install_single_spec(spec, args.jobs) + test_suite.append(test_case) diff --git a/lib/spack/spack/cmd/url-parse.py b/lib/spack/spack/cmd/url-parse.py deleted file mode 100644 index b8c7c95040..0000000000 --- a/lib/spack/spack/cmd/url-parse.py +++ /dev/null @@ -1,75 +0,0 @@ -############################################################################## -# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# -# This file is part of Spack. -# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. -# LLNL-CODE-647188 -# -# For details, see https://github.com/llnl/spack -# Please also see the LICENSE file for our notice and the LGPL. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License (as -# published by the Free Software Foundation) version 2.1, February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and -# conditions of the GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -############################################################################## -import llnl.util.tty as tty - -import spack -import spack.url -from spack.util.web import find_versions_of_archive - -description = "Show parsing of a URL, optionally spider web for versions." - - -def setup_parser(subparser): - subparser.add_argument('url', help="url of a package archive") - subparser.add_argument( - '-s', '--spider', action='store_true', - help="Spider the source page for versions.") - - -def print_name_and_version(url): - name, ns, nl, ntup, ver, vs, vl, vtup = spack.url.substitution_offsets(url) - underlines = [" "] * max(ns + nl, vs + vl) - for i in range(ns, ns + nl): - underlines[i] = '-' - for i in range(vs, vs + vl): - underlines[i] = '~' - - print " %s" % url - print " %s" % ''.join(underlines) - - -def url_parse(parser, args): - url = args.url - - ver, vs, vl = spack.url.parse_version_offset(url) - name, ns, nl = spack.url.parse_name_offset(url, ver) - - tty.msg("Parsing URL:") - try: - print_name_and_version(url) - except spack.url.UrlParseError as e: - tty.error(str(e)) - - print - tty.msg("Substituting version 9.9.9b:") - newurl = spack.url.substitute_version(url, '9.9.9b') - print_name_and_version(newurl) - - if args.spider: - print - tty.msg("Spidering for versions:") - versions = find_versions_of_archive(url) - for v in sorted(versions): - print "%-20s%s" % (v, versions[v]) diff --git a/lib/spack/spack/cmd/url_parse.py b/lib/spack/spack/cmd/url_parse.py new file mode 100644 index 0000000000..b8c7c95040 --- /dev/null +++ b/lib/spack/spack/cmd/url_parse.py @@ -0,0 +1,75 @@ +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/llnl/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +import llnl.util.tty as tty + +import spack +import spack.url +from spack.util.web import find_versions_of_archive + +description = "Show parsing of a URL, optionally spider web for versions." + + +def setup_parser(subparser): + subparser.add_argument('url', help="url of a package archive") + subparser.add_argument( + '-s', '--spider', action='store_true', + help="Spider the source page for versions.") + + +def print_name_and_version(url): + name, ns, nl, ntup, ver, vs, vl, vtup = spack.url.substitution_offsets(url) + underlines = [" "] * max(ns + nl, vs + vl) + for i in range(ns, ns + nl): + underlines[i] = '-' + for i in range(vs, vs + vl): + underlines[i] = '~' + + print " %s" % url + print " %s" % ''.join(underlines) + + +def url_parse(parser, args): + url = args.url + + ver, vs, vl = spack.url.parse_version_offset(url) + name, ns, nl = spack.url.parse_name_offset(url, ver) + + tty.msg("Parsing URL:") + try: + print_name_and_version(url) + except spack.url.UrlParseError as e: + tty.error(str(e)) + + print + tty.msg("Substituting version 9.9.9b:") + newurl = spack.url.substitute_version(url, '9.9.9b') + print_name_and_version(newurl) + + if args.spider: + print + tty.msg("Spidering for versions:") + versions = find_versions_of_archive(url) + for v in sorted(versions): + print "%-20s%s" % (v, versions[v]) diff --git a/lib/spack/spack/test/cmd/test_install.py b/lib/spack/spack/test/cmd/test_install.py index 39287d5d6d..4734fe1267 100644 --- a/lib/spack/spack/test/cmd/test_install.py +++ b/lib/spack/spack/test/cmd/test_install.py @@ -30,6 +30,7 @@ import contextlib import spack import spack.cmd +from spack.cmd import test_install FILE_REGISTRY = collections.defaultdict(StringIO.StringIO) @@ -51,11 +52,6 @@ def mock_open(filename, mode): handle.close() -# The use of __import__ is necessary to maintain a name with hyphen (which -# cannot be an identifier in python) -test_install = __import__("spack.cmd.test-install", fromlist=['test_install']) - - class MockSpec(object): def __init__(self, name, version, hashStr=None): -- cgit v1.2.3-60-g2f50 From cc7df29e810b0733d7457720a18fe4400ef90990 Mon Sep 17 00:00:00 2001 From: "Adam J. Stewart" Date: Tue, 30 Aug 2016 12:05:14 -0500 Subject: Keep dashes in command names, translate to underscores --- bin/spack | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'bin') diff --git a/bin/spack b/bin/spack index 323a06aa53..17586cc1f6 100755 --- a/bin/spack +++ b/bin/spack @@ -127,7 +127,8 @@ subparsers = parser.add_subparsers(metavar='SUBCOMMAND', dest="command") import spack.cmd for cmd in spack.cmd.commands: module = spack.cmd.get_module(cmd) - subparser = subparsers.add_parser(cmd, help=module.description) + cmd_name = cmd.replace('_', '-') + subparser = subparsers.add_parser(cmd_name, help=module.description) module.setup_parser(subparser) # Just print help and exit if run with no arguments at all @@ -163,7 +164,7 @@ def main(): spack.curl.add_default_arg('-k') # Try to load the particular command asked for and run it - command = spack.cmd.get_command(args.command) + command = spack.cmd.get_command(args.command.replace('-', '_')) try: return_val = command(parser, args) except SpackError as e: -- cgit v1.2.3-60-g2f50 From 025b779a30476dd2b6ba9851e4ef1d57812b97c7 Mon Sep 17 00:00:00 2001 From: Eric Date: Thu, 22 Sep 2016 09:43:47 +0200 Subject: Fix sbang for perl (#1802) * Perform shebang fix for all files * Fix sbang for perl scripts Otherwise perl would look at the #! line and call sbang again, resulting in an infinite loop. --- bin/sbang | 8 ++++++-- lib/spack/spack/hooks/sbang.py | 12 +++++++----- 2 files changed, 13 insertions(+), 7 deletions(-) (limited to 'bin') diff --git a/bin/sbang b/bin/sbang index 1ea5f06592..e71074b330 100755 --- a/bin/sbang +++ b/bin/sbang @@ -111,8 +111,12 @@ while read line && ((lines < 2)) ; do done < "$script" # Invoke any interpreter found, or raise an error if none was found. -if [ -n "$interpreter" ]; then - exec $interpreter "$@" +if [[ -n "$interpreter" ]]; then + if [[ "${interpreter##*/}" = "perl" ]]; then + exec $interpreter -x "$@" + else + exec $interpreter "$@" + fi else echo "error: sbang found no interpreter in $script" exit 1 diff --git a/lib/spack/spack/hooks/sbang.py b/lib/spack/spack/hooks/sbang.py index 02c1ce3816..6f9736a018 100644 --- a/lib/spack/spack/hooks/sbang.py +++ b/lib/spack/spack/hooks/sbang.py @@ -81,8 +81,10 @@ def filter_shebang(path): tty.warn("Patched overlong shebang in %s" % path) -def filter_shebangs_in_directory(directory): - for file in os.listdir(directory): +def filter_shebangs_in_directory(directory, filenames=None): + if filenames is None: + filenames = os.listdir(directory) + for file in filenames: path = os.path.join(directory, file) # only handle files @@ -104,6 +106,6 @@ def post_install(pkg): """This hook edits scripts so that they call /bin/bash $spack_prefix/bin/sbang instead of something longer than the shebang limit.""" - if not os.path.isdir(pkg.prefix.bin): - return - filter_shebangs_in_directory(pkg.prefix.bin) + + for directory, _, filenames in os.walk(pkg.prefix): + filter_shebangs_in_directory(directory, filenames) -- cgit v1.2.3-60-g2f50 From 9aa77178f33793103653da1c81a73dd6a9e250be Mon Sep 17 00:00:00 2001 From: Elizabeth Fischer Date: Wed, 28 Sep 2016 12:36:25 -0400 Subject: Prohibit Python3 in Python version check. (#1872) --- bin/spack | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'bin') diff --git a/bin/spack b/bin/spack index 17586cc1f6..dcc25c5c83 100755 --- a/bin/spack +++ b/bin/spack @@ -25,9 +25,9 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import sys -if not sys.version_info[:2] >= (2, 6): +if (sys.version_info[0] > 2) or (sys.version_info[:2] < (2, 6)): v_info = sys.version_info[:3] - sys.exit("Spack requires Python 2.6 or higher. " + sys.exit("Spack requires Python 2.6 or 2.7. " "This is Python %d.%d.%d." % v_info) import os -- cgit v1.2.3-60-g2f50 From 488e1bab2ca384863517375f47be33dca4f170f8 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Tue, 11 Oct 2016 23:13:40 -0700 Subject: Make `insecure` option work with curl AND git. (#1786) --- bin/spack | 2 +- lib/spack/spack/__init__.py | 5 +++-- lib/spack/spack/fetch_strategy.py | 30 ++++++++++++++++++++----- lib/spack/spack/util/executable.py | 11 +++++++++ var/spack/repos/builtin/packages/jdk/package.py | 26 +++++---------------- 5 files changed, 46 insertions(+), 28 deletions(-) (limited to 'bin') diff --git a/bin/spack b/bin/spack index dcc25c5c83..503ef068bf 100755 --- a/bin/spack +++ b/bin/spack @@ -161,7 +161,7 @@ def main(): # If the user asked for it, don't check ssl certs. if args.insecure: tty.warn("You asked for --insecure. Will NOT check SSL certificates.") - spack.curl.add_default_arg('-k') + spack.insecure = True # Try to load the particular command asked for and run it command = spack.cmd.get_command(args.command.replace('-', '_')) diff --git a/lib/spack/spack/__init__.py b/lib/spack/spack/__init__.py index 3d508d0fde..e284a58194 100644 --- a/lib/spack/spack/__init__.py +++ b/lib/spack/spack/__init__.py @@ -129,8 +129,9 @@ from spack.util.executable import Executable, which # User's editor from the environment editor = Executable(os.environ.get("EDITOR", "vi")) -# Curl tool for fetching files. -curl = which("curl", required=True) +# If this is enabled, tools that use SSL should not verify +# certifiates. e.g., curl should use the -k option. +insecure = False # Whether to build in tmp space or directly in the stage_path. # If this is true, then spack will make stage directories in diff --git a/lib/spack/spack/fetch_strategy.py b/lib/spack/spack/fetch_strategy.py index 4b8829c32f..2becf9ed04 100644 --- a/lib/spack/spack/fetch_strategy.py +++ b/lib/spack/spack/fetch_strategy.py @@ -158,12 +158,20 @@ class URLFetchStrategy(FetchStrategy): self.digest = digest self.expand_archive = kwargs.get('expand', True) + self.extra_curl_options = kwargs.get('curl_options', []) + self._curl = None self.extension = kwargs.get('extension', None) if not self.url: raise ValueError("URLFetchStrategy requires a url for fetching.") + @property + def curl(self): + if not self._curl: + self._curl = which('curl', required=True) + return self._curl + @_needs_stage def fetch(self): self.stage.chdir() @@ -196,15 +204,21 @@ class URLFetchStrategy(FetchStrategy): self.url, ] + if spack.insecure: + curl_args.append('-k') + if sys.stdout.isatty(): curl_args.append('-#') # status bar when using a tty else: curl_args.append('-sS') # just errors when not. + curl_args += self.extra_curl_options + # Run curl but grab the mime type from the http headers - headers = spack.curl(*curl_args, output=str, fail_on_error=False) + curl = self.curl + headers = curl(*curl_args, output=str, fail_on_error=False) - if spack.curl.returncode != 0: + if curl.returncode != 0: # clean up archive on failure. if self.archive_file: os.remove(self.archive_file) @@ -212,12 +226,12 @@ class URLFetchStrategy(FetchStrategy): if partial_file and os.path.exists(partial_file): os.remove(partial_file) - if spack.curl.returncode == 22: + if curl.returncode == 22: # This is a 404. Curl will print the error. raise FailedDownloadError( self.url, "URL %s was not found!" % self.url) - elif spack.curl.returncode == 60: + elif curl.returncode == 60: # This is a certificate error. Suggest spack -k raise FailedDownloadError( self.url, @@ -233,7 +247,7 @@ class URLFetchStrategy(FetchStrategy): # error, but print a spack message too raise FailedDownloadError( self.url, - "Curl failed with error %d" % spack.curl.returncode) + "Curl failed with error %d" % curl.returncode) # Check if we somehow got an HTML file rather than the archive we # asked for. We only look at the last content type, to handle @@ -530,6 +544,12 @@ class GitFetchStrategy(VCSFetchStrategy): def git(self): if not self._git: self._git = which('git', required=True) + + # If the user asked for insecure fetching, make that work + # with git as well. + if spack.insecure: + self._git.add_default_env('GIT_SSL_NO_VERIFY', 'true') + return self._git @_needs_stage diff --git a/lib/spack/spack/util/executable.py b/lib/spack/spack/util/executable.py index 4fe4bd26ba..2790508ee8 100644 --- a/lib/spack/spack/util/executable.py +++ b/lib/spack/spack/util/executable.py @@ -40,6 +40,7 @@ class Executable(object): def __init__(self, name): self.exe = name.split(' ') + self.default_env = {} self.returncode = None if not self.exe: @@ -48,6 +49,9 @@ class Executable(object): def add_default_arg(self, arg): self.exe.append(arg) + def add_default_env(self, key, value): + self.default_env[key] = value + @property def command(self): return ' '.join(self.exe) @@ -103,7 +107,13 @@ class Executable(object): fail_on_error = kwargs.pop("fail_on_error", True) ignore_errors = kwargs.pop("ignore_errors", ()) + # environment env = kwargs.get('env', None) + if env is None: + env = os.environ.copy() + env.update(self.default_env) + else: + env = self.default_env.copy().update(env) # TODO: This is deprecated. Remove in a future version. return_output = kwargs.pop("return_output", False) @@ -149,6 +159,7 @@ class Executable(object): cmd_line = "'%s'" % "' '".join( map(lambda arg: arg.replace("'", "'\"'\"'"), cmd)) + tty.debug(cmd_line) try: diff --git a/var/spack/repos/builtin/packages/jdk/package.py b/var/spack/repos/builtin/packages/jdk/package.py index bab0920434..518a469435 100644 --- a/var/spack/repos/builtin/packages/jdk/package.py +++ b/var/spack/repos/builtin/packages/jdk/package.py @@ -26,10 +26,7 @@ # Author: Justin Too # import distutils.dir_util - -import spack from spack import * -import llnl.util.tty as tty class Jdk(Package): @@ -37,11 +34,6 @@ class Jdk(Package): in the form of a binary product aimed at Java developers.""" homepage = "http://www.oracle.com/technetwork/java/javase/downloads/index.html" - version('8u66-linux-x64', '88f31f3d642c3287134297b8c10e61bf', - url="http://download.oracle.com/otn-pub/java/jdk/8u66-b17/jdk-8u66-linux-x64.tar.gz") - version('8u92-linux-x64', '65a1cc17ea362453a6e0eb4f13be76e4', - url="http://download.oracle.com/otn-pub/java/jdk/8u92-b14/jdk-8u92-linux-x64.tar.gz") - # Oracle requires that you accept their License Agreement in order # to access the Java packages in download.oracle.com. In order to # automate this process, we need to utilize these additional curl @@ -53,18 +45,12 @@ class Jdk(Package): '-H', # specify required License Agreement cookie 'Cookie: oraclelicense=accept-securebackup-cookie'] - def do_fetch(self, mirror_only=False): - # Add our custom curl commandline options - tty.msg( - "[Jdk] Adding required commandline options to curl " + - "before performing fetch: %s" % - (self.curl_options)) - - for option in self.curl_options: - spack.curl.add_default_arg(option) - - # Now perform the actual fetch - super(Jdk, self).do_fetch(mirror_only) + version('8u66-linux-x64', '88f31f3d642c3287134297b8c10e61bf', + url="http://download.oracle.com/otn-pub/java/jdk/8u66-b17/jdk-8u66-linux-x64.tar.gz", + curl_options=curl_options) + version('8u92-linux-x64', '65a1cc17ea362453a6e0eb4f13be76e4', + url="http://download.oracle.com/otn-pub/java/jdk/8u92-b14/jdk-8u92-linux-x64.tar.gz", + curl_options=curl_options) def install(self, spec, prefix): distutils.dir_util.copy_tree(".", prefix) -- cgit v1.2.3-60-g2f50 From d861a52ebec2a73451fde5870b92b2ff4fb81620 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Wed, 12 Oct 2016 18:25:18 -0700 Subject: Use cYAML if it is available in Python. (#2010) --- bin/spack | 10 ++++++++++ lib/spack/spack/spec.py | 7 +++++-- lib/spack/spack/util/spack_yaml.py | 8 ++++++-- 3 files changed, 21 insertions(+), 4 deletions(-) (limited to 'bin') diff --git a/bin/spack b/bin/spack index 503ef068bf..29991c070d 100755 --- a/bin/spack +++ b/bin/spack @@ -40,6 +40,16 @@ SPACK_PREFIX = os.path.dirname(os.path.dirname(SPACK_FILE)) # Allow spack libs to be imported in our scripts SPACK_LIB_PATH = os.path.join(SPACK_PREFIX, "lib", "spack") sys.path.insert(0, SPACK_LIB_PATH) + +# Try to use system YAML if it is available, as it might have libyaml +# support (for faster loading via C). Load it before anything in +# lib/spack/external so it will take precedence over Spack's PyYAML. +try: + import yaml +except ImportError: + pass # ignore and use slow yaml + +# Add external libs SPACK_EXTERNAL_LIBS = os.path.join(SPACK_LIB_PATH, "external") sys.path.insert(0, SPACK_EXTERNAL_LIBS) diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index fc4bf41e34..37a3cf4d7e 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -98,7 +98,7 @@ expansion when it is the first character in an id typed on the command line. import base64 import hashlib import imp -import sys +import ctypes from StringIO import StringIO from operator import attrgetter @@ -203,6 +203,9 @@ special_types = { legal_deps = tuple(special_types) + alldeps +"""Max integer helps avoid passing too large a value to cyaml.""" +maxint = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1 + def validate_deptype(deptype): if isinstance(deptype, str): @@ -969,7 +972,7 @@ class Spec(object): return self._hash[:length] else: yaml_text = syaml.dump( - self.to_node_dict(), default_flow_style=True, width=sys.maxint) + self.to_node_dict(), default_flow_style=True, width=maxint) sha = hashlib.sha1(yaml_text) b32_hash = base64.b32encode(sha.digest()).lower()[:length] if self.concrete: diff --git a/lib/spack/spack/util/spack_yaml.py b/lib/spack/spack/util/spack_yaml.py index 7bcdf2d61f..506f56633a 100644 --- a/lib/spack/spack/util/spack_yaml.py +++ b/lib/spack/spack/util/spack_yaml.py @@ -32,6 +32,10 @@ """ import yaml +try: + from yaml import CLoader as Loader, CDumper as Dumper +except ImportError as e: + from yaml import Loader, Dumper from yaml.nodes import * from yaml.constructor import ConstructorError from ordereddict_backport import OrderedDict @@ -64,7 +68,7 @@ def mark(obj, node): obj._end_mark = node.end_mark -class OrderedLineLoader(yaml.Loader): +class OrderedLineLoader(Loader): """YAML loader that preserves order and line numbers. Mappings read in by this loader behave like an ordered dict. @@ -156,7 +160,7 @@ OrderedLineLoader.add_constructor( u'tag:yaml.org,2002:str', OrderedLineLoader.construct_yaml_str) -class OrderedLineDumper(yaml.Dumper): +class OrderedLineDumper(Dumper): """Dumper that preserves ordering and formats ``syaml_*`` objects. This dumper preserves insertion ordering ``syaml_dict`` objects -- cgit v1.2.3-60-g2f50 From 8f21332fec4c8adb5349ff90e30bb0e4f75e090e Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Sun, 30 Oct 2016 18:29:44 -0700 Subject: Bugfix: '::' only worked on top-level key in config. - generalized and fixed to work with any key in YAML file - simplified schema writing, as well - add more unit tests for the config system - Rename test/yaml.py to test/spack_yaml.py - Add test/yaml.pyc to ignored pyc files. --- bin/spack | 3 +- lib/spack/spack/config.py | 50 ++++-- lib/spack/spack/schema/compilers.py | 2 +- lib/spack/spack/schema/config.py | 16 +- lib/spack/spack/schema/mirrors.py | 2 +- lib/spack/spack/schema/modules.py | 2 +- lib/spack/spack/schema/packages.py | 2 +- lib/spack/spack/schema/repos.py | 2 +- lib/spack/spack/test/__init__.py | 2 +- lib/spack/spack/test/config.py | 324 ++++++++++++++++++++++-------------- lib/spack/spack/test/spack_yaml.py | 92 ++++++++++ lib/spack/spack/test/yaml.py | 92 ---------- lib/spack/spack/util/spack_yaml.py | 2 + 13 files changed, 348 insertions(+), 243 deletions(-) create mode 100644 lib/spack/spack/test/spack_yaml.py delete mode 100644 lib/spack/spack/test/yaml.py (limited to 'bin') diff --git a/bin/spack b/bin/spack index 29991c070d..1f5dec0b3d 100755 --- a/bin/spack +++ b/bin/spack @@ -72,7 +72,8 @@ orphaned_pyc_files = [ os.path.join(SPACK_LIB_PATH, 'spack', 'platforms', 'cray_xc.pyc'), os.path.join(SPACK_LIB_PATH, 'spack', 'cmd', 'package-list.pyc'), os.path.join(SPACK_LIB_PATH, 'spack', 'cmd', 'test-install.pyc'), - os.path.join(SPACK_LIB_PATH, 'spack', 'cmd', 'url-parse.pyc') + os.path.join(SPACK_LIB_PATH, 'spack', 'cmd', 'url-parse.pyc'), + os.path.join(SPACK_LIB_PATH, 'spack', 'test', 'yaml.pyc') ] for pyc_file in orphaned_pyc_files: diff --git a/lib/spack/spack/config.py b/lib/spack/spack/config.py index c47eb68e09..989b3da169 100644 --- a/lib/spack/spack/config.py +++ b/lib/spack/spack/config.py @@ -266,7 +266,7 @@ def _read_config_file(filename, schema): try: tty.debug("Reading config file %s" % filename) with open(filename) as f: - data = syaml.load(f) + data = _mark_overrides(syaml.load(f)) if data: validate_section(data, schema) @@ -288,6 +288,34 @@ def clear_config_caches(): scope.clear() +def override(string): + """Test if a spack YAML string is an override. + + See ``spack_yaml`` for details. Keys in Spack YAML can end in `::`, + and if they do, their values completely replace lower-precedence + configs instead of merging into them. + + """ + return hasattr(string, 'override') and string.override + + +def _mark_overrides(data): + if isinstance(data, list): + return [_mark_overrides(elt) for elt in data] + + elif isinstance(data, dict): + marked = {} + for key, val in data.iteritems(): + if isinstance(key, basestring) and key.endswith(':'): + key = syaml.syaml_str(key[:-1]) + key.override = True + marked[key] = _mark_overrides(val) + return marked + + else: + return data + + def _merge_yaml(dest, source): """Merges source into dest; entries in source take precedence over dest. @@ -320,9 +348,11 @@ def _merge_yaml(dest, source): # Source dict is merged into dest. elif they_are(dict): for sk, sv in source.iteritems(): - if sk not in dest: + if override(sk) or sk not in dest: + # if sk ended with ::, or if it's new, completely override dest[sk] = copy.copy(sv) else: + # otherwise, merge the YAML dest[sk] = _merge_yaml(dest[sk], source[sk]) return dest @@ -371,18 +401,18 @@ def get_config(section, scope=None): if not data or not isinstance(data, dict): continue - # Allow complete override of site config with '
::' - override_key = section + ':' - if not (section in data or override_key in data): + if section not in data: tty.warn("Skipping bad configuration file: '%s'" % scope.path) continue - if override_key in data: - merged_section = data[override_key] - else: - merged_section = _merge_yaml(merged_section, data[section]) + merged_section = _merge_yaml(merged_section, data) + + # no config files -- empty config. + if section not in merged_section: + return {} - return merged_section + # take the top key off before returning. + return merged_section[section] def get_config_filename(scope, section): diff --git a/lib/spack/spack/schema/compilers.py b/lib/spack/spack/schema/compilers.py index 1c7894d675..ea1071729f 100644 --- a/lib/spack/spack/schema/compilers.py +++ b/lib/spack/spack/schema/compilers.py @@ -35,7 +35,7 @@ schema = { 'type': 'object', 'additionalProperties': False, 'patternProperties': { - 'compilers:?': { # optional colon for overriding site config. + 'compilers': { 'type': 'array', 'items': { 'compiler': { diff --git a/lib/spack/spack/schema/config.py b/lib/spack/spack/schema/config.py index 31d4b8a8a8..e51fa69afe 100644 --- a/lib/spack/spack/schema/config.py +++ b/lib/spack/spack/schema/config.py @@ -49,7 +49,6 @@ schema = { }, 'module_roots': { 'type': 'object', - 'default': {}, 'additionalProperties': False, 'properties': { 'tcl': {'type': 'string'}, @@ -59,18 +58,9 @@ schema = { }, 'source_cache': {'type': 'string'}, 'misc_cache': {'type': 'string'}, - 'verify_ssl': { - 'type': 'boolean', - 'default': True, - }, - 'checksum': { - 'type': 'boolean', - 'default': True, - }, - 'dirty': { - 'type': 'boolean', - 'default': False, - }, + 'verify_ssl': {'type': 'boolean'}, + 'checksum': {'type': 'boolean'}, + 'dirty': {'type': 'boolean'}, } }, }, diff --git a/lib/spack/spack/schema/mirrors.py b/lib/spack/spack/schema/mirrors.py index 9aa3a0f747..60b865bb42 100644 --- a/lib/spack/spack/schema/mirrors.py +++ b/lib/spack/spack/schema/mirrors.py @@ -35,7 +35,7 @@ schema = { 'type': 'object', 'additionalProperties': False, 'patternProperties': { - r'mirrors:?': { + r'mirrors': { 'type': 'object', 'default': {}, 'additionalProperties': False, diff --git a/lib/spack/spack/schema/modules.py b/lib/spack/spack/schema/modules.py index 2e776635d2..2059e14fa6 100644 --- a/lib/spack/spack/schema/modules.py +++ b/lib/spack/spack/schema/modules.py @@ -127,7 +127,7 @@ schema = { } }, 'patternProperties': { - r'modules:?': { + r'modules': { 'type': 'object', 'default': {}, 'additionalProperties': False, diff --git a/lib/spack/spack/schema/packages.py b/lib/spack/spack/schema/packages.py index 29c62150ef..bf5648b1b7 100644 --- a/lib/spack/spack/schema/packages.py +++ b/lib/spack/spack/schema/packages.py @@ -35,7 +35,7 @@ schema = { 'type': 'object', 'additionalProperties': False, 'patternProperties': { - r'packages:?': { + r'packages': { 'type': 'object', 'default': {}, 'additionalProperties': False, diff --git a/lib/spack/spack/schema/repos.py b/lib/spack/spack/schema/repos.py index 74dcee7cdc..c7a3495ae1 100644 --- a/lib/spack/spack/schema/repos.py +++ b/lib/spack/spack/schema/repos.py @@ -35,7 +35,7 @@ schema = { 'type': 'object', 'additionalProperties': False, 'patternProperties': { - r'repos:?': { + r'repos': { 'type': 'array', 'default': [], 'items': { diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py index 457e5db9dc..c0a4c7354f 100644 --- a/lib/spack/spack/test/__init__.py +++ b/lib/spack/spack/test/__init__.py @@ -78,7 +78,7 @@ test_names = [ 'url_substitution', 'versions', 'provider_index', - 'yaml', + 'spack_yaml', # This test needs to be last until global compiler cache is fixed. 'cmd.test_compiler_cmd', ] diff --git a/lib/spack/spack/test/config.py b/lib/spack/spack/test/config.py index d5e1791b40..adc0795916 100644 --- a/lib/spack/spack/test/config.py +++ b/lib/spack/spack/test/config.py @@ -25,6 +25,7 @@ import os import shutil import getpass +import yaml from tempfile import mkdtemp import spack @@ -34,109 +35,136 @@ from ordereddict_backport import OrderedDict from spack.test.mock_packages_test import * # Some sample compiler config data -a_comps = [ - {'compiler': { - 'paths': { - "cc": "/gcc473", - "cxx": "/g++473", - "f77": None, - "fc": None - }, - 'modules': None, - 'spec': 'gcc@4.7.3', - 'operating_system': 'CNL10' - }}, - {'compiler': { - 'paths': { - "cc": "/gcc450", - "cxx": "/g++450", - "f77": 'gfortran', - "fc": 'gfortran' - }, - 'modules': None, - 'spec': 'gcc@4.5.0', - 'operating_system': 'CNL10' - }}, - {'compiler': { - 'paths': { - "cc": "/gcc422", - "cxx": "/g++422", - "f77": 'gfortran', - "fc": 'gfortran' - }, - 'flags': { - "cppflags": "-O0 -fpic", - "fflags": "-f77", - }, - 'modules': None, - 'spec': 'gcc@4.2.2', - 'operating_system': 'CNL10' - }}, - {'compiler': { - 'paths': { - "cc": "", - "cxx": "", - "f77": '', - "fc": ''}, - 'modules': None, - 'spec': 'clang@3.3', - 'operating_system': 'CNL10' - }} -] - -b_comps = [ - {'compiler': { - 'paths': { - "cc": "/icc100", - "cxx": "/icp100", - "f77": None, - "fc": None - }, - 'modules': None, - 'spec': 'icc@10.0', - 'operating_system': 'CNL10' - }}, - {'compiler': { - 'paths': { - "cc": "/icc111", - "cxx": "/icp111", - "f77": 'ifort', - "fc": 'ifort' - }, - 'modules': None, - 'spec': 'icc@11.1', - 'operating_system': 'CNL10' - }}, - {'compiler': { - 'paths': { - "cc": "/icc123", - "cxx": "/icp123", - "f77": 'ifort', - "fc": 'ifort' - }, - 'flags': { - "cppflags": "-O3", - "fflags": "-f77rtl", - }, - 'modules': None, - 'spec': 'icc@12.3', - 'operating_system': 'CNL10' - }}, - {'compiler': { - 'paths': { - "cc": "", - "cxx": "", - "f77": '', - "fc": ''}, - 'modules': None, - 'spec': 'clang@3.3', - 'operating_system': 'CNL10' - }} -] +a_comps = { + 'compilers': [ + {'compiler': { + 'paths': { + "cc": "/gcc473", + "cxx": "/g++473", + "f77": None, + "fc": None + }, + 'modules': None, + 'spec': 'gcc@4.7.3', + 'operating_system': 'CNL10' + }}, + {'compiler': { + 'paths': { + "cc": "/gcc450", + "cxx": "/g++450", + "f77": 'gfortran', + "fc": 'gfortran' + }, + 'modules': None, + 'spec': 'gcc@4.5.0', + 'operating_system': 'CNL10' + }}, + {'compiler': { + 'paths': { + "cc": "/gcc422", + "cxx": "/g++422", + "f77": 'gfortran', + "fc": 'gfortran' + }, + 'flags': { + "cppflags": "-O0 -fpic", + "fflags": "-f77", + }, + 'modules': None, + 'spec': 'gcc@4.2.2', + 'operating_system': 'CNL10' + }}, + {'compiler': { + 'paths': { + "cc": "", + "cxx": "", + "f77": '', + "fc": ''}, + 'modules': None, + 'spec': 'clang@3.3', + 'operating_system': 'CNL10' + }} + ] +} + +b_comps = { + 'compilers': [ + {'compiler': { + 'paths': { + "cc": "/icc100", + "cxx": "/icp100", + "f77": None, + "fc": None + }, + 'modules': None, + 'spec': 'icc@10.0', + 'operating_system': 'CNL10' + }}, + {'compiler': { + 'paths': { + "cc": "/icc111", + "cxx": "/icp111", + "f77": 'ifort', + "fc": 'ifort' + }, + 'modules': None, + 'spec': 'icc@11.1', + 'operating_system': 'CNL10' + }}, + {'compiler': { + 'paths': { + "cc": "/icc123", + "cxx": "/icp123", + "f77": 'ifort', + "fc": 'ifort' + }, + 'flags': { + "cppflags": "-O3", + "fflags": "-f77rtl", + }, + 'modules': None, + 'spec': 'icc@12.3', + 'operating_system': 'CNL10' + }}, + {'compiler': { + 'paths': { + "cc": "", + "cxx": "", + "f77": '', + "fc": ''}, + 'modules': None, + 'spec': 'clang@3.3', + 'operating_system': 'CNL10' + }} + ] +} # Some Sample repo data -repos_low = ["/some/path"] -repos_high = ["/some/other/path"] +repos_low = {'repos': ["/some/path"]} +repos_high = {'repos': ["/some/other/path"]} + + +# sample config data +config_low = { + 'config': { + 'install_tree': 'install_tree_path', + 'build_stage': ['path1', 'path2', 'path3']}} + +config_override_all = { + 'config:': { + 'install_tree:': 'override_all'}} + +config_override_key = { + 'config': { + 'install_tree:': 'override_key'}} + +config_merge_list = { + 'config': { + 'build_stage': ['patha', 'pathb']}} + +config_override_list = { + 'config': { + 'build_stage:': ['patha', 'pathb']}} class ConfigTest(MockPackagesTest): @@ -144,19 +172,30 @@ class ConfigTest(MockPackagesTest): def setUp(self): super(ConfigTest, self).setUp() self.tmp_dir = mkdtemp('.tmp', 'spack-config-test-') - self.a_comp_specs = [ac['compiler']['spec'] for ac in a_comps] - self.b_comp_specs = [bc['compiler']['spec'] for bc in b_comps] + self.a_comp_specs = [ + ac['compiler']['spec'] for ac in a_comps['compilers']] + self.b_comp_specs = [ + bc['compiler']['spec'] for bc in b_comps['compilers']] spack.config.config_scopes = OrderedDict() for priority in ['low', 'high']: - spack.config.ConfigScope('test_{0}_priority'.format(priority), - os.path.join(self.tmp_dir, priority)) + scope_dir = os.path.join(self.tmp_dir, priority) + spack.config.ConfigScope(priority, scope_dir) def tearDown(self): super(ConfigTest, self).tearDown() shutil.rmtree(self.tmp_dir, True) - def check_config(self, comps, *compiler_names): + def write_config_file(self, config, data, scope): + scope_dir = os.path.join(self.tmp_dir, scope) + mkdirp(scope_dir) + + path = os.path.join(scope_dir, config + '.yaml') + with open(path, 'w') as f: + print yaml + yaml.dump(data, f) + + def check_compiler_config(self, comps, *compiler_names): """Check that named compilers in comps match Spack's config.""" config = spack.config.get_config('compilers') compiler_list = ['cc', 'cxx', 'f77', 'fc'] @@ -182,43 +221,50 @@ class ConfigTest(MockPackagesTest): self.assertEqual(expected, actual) def test_write_list_in_memory(self): - spack.config.update_config('repos', repos_low, 'test_low_priority') - spack.config.update_config('repos', repos_high, 'test_high_priority') + spack.config.update_config('repos', repos_low['repos'], scope='low') + spack.config.update_config('repos', repos_high['repos'], scope='high') + config = spack.config.get_config('repos') - self.assertEqual(config, repos_high + repos_low) + self.assertEqual(config, repos_high['repos'] + repos_low['repos']) def test_write_key_in_memory(self): # Write b_comps "on top of" a_comps. - spack.config.update_config('compilers', a_comps, 'test_low_priority') - spack.config.update_config('compilers', b_comps, 'test_high_priority') + spack.config.update_config( + 'compilers', a_comps['compilers'], scope='low') + spack.config.update_config( + 'compilers', b_comps['compilers'], scope='high') # Make sure the config looks how we expect. - self.check_config(a_comps, *self.a_comp_specs) - self.check_config(b_comps, *self.b_comp_specs) + self.check_compiler_config(a_comps['compilers'], *self.a_comp_specs) + self.check_compiler_config(b_comps['compilers'], *self.b_comp_specs) def test_write_key_to_disk(self): # Write b_comps "on top of" a_comps. - spack.config.update_config('compilers', a_comps, 'test_low_priority') - spack.config.update_config('compilers', b_comps, 'test_high_priority') + spack.config.update_config( + 'compilers', a_comps['compilers'], scope='low') + spack.config.update_config( + 'compilers', b_comps['compilers'], scope='high') # Clear caches so we're forced to read from disk. spack.config.clear_config_caches() # Same check again, to ensure consistency. - self.check_config(a_comps, *self.a_comp_specs) - self.check_config(b_comps, *self.b_comp_specs) + self.check_compiler_config(a_comps['compilers'], *self.a_comp_specs) + self.check_compiler_config(b_comps['compilers'], *self.b_comp_specs) def test_write_to_same_priority_file(self): # Write b_comps in the same file as a_comps. - spack.config.update_config('compilers', a_comps, 'test_low_priority') - spack.config.update_config('compilers', b_comps, 'test_low_priority') + spack.config.update_config( + 'compilers', a_comps['compilers'], scope='low') + spack.config.update_config( + 'compilers', b_comps['compilers'], scope='low') # Clear caches so we're forced to read from disk. spack.config.clear_config_caches() # Same check again, to ensure consistency. - self.check_config(a_comps, *self.a_comp_specs) - self.check_config(b_comps, *self.b_comp_specs) + self.check_compiler_config(a_comps['compilers'], *self.a_comp_specs) + self.check_compiler_config(b_comps['compilers'], *self.b_comp_specs) def check_canonical(self, var, expected): """Ensure that is substituted properly for in strings @@ -270,3 +316,39 @@ class ConfigTest(MockPackagesTest): self.assertEqual(tempdir, canonicalize_path('$tempdir')) self.assertEqual(tempdir + '/foo/bar/baz', canonicalize_path('$tempdir/foo/bar/baz')) + + def test_read_config(self): + self.write_config_file('config', config_low, 'low') + self.assertEqual(spack.config.get_config('config'), + config_low['config']) + + def test_read_config_override_all(self): + self.write_config_file('config', config_low, 'low') + self.write_config_file('config', config_override_all, 'high') + self.assertEqual(spack.config.get_config('config'), { + 'install_tree': 'override_all' + }) + + def test_read_config_override_key(self): + self.write_config_file('config', config_low, 'low') + self.write_config_file('config', config_override_key, 'high') + self.assertEqual(spack.config.get_config('config'), { + 'install_tree': 'override_key', + 'build_stage': ['path1', 'path2', 'path3'] + }) + + def test_read_config_merge_list(self): + self.write_config_file('config', config_low, 'low') + self.write_config_file('config', config_merge_list, 'high') + self.assertEqual(spack.config.get_config('config'), { + 'install_tree': 'install_tree_path', + 'build_stage': ['patha', 'pathb', 'path1', 'path2', 'path3'] + }) + + def test_read_config_override_list(self): + self.write_config_file('config', config_low, 'low') + self.write_config_file('config', config_override_list, 'high') + self.assertEqual(spack.config.get_config('config'), { + 'install_tree': 'install_tree_path', + 'build_stage': ['patha', 'pathb'] + }) diff --git a/lib/spack/spack/test/spack_yaml.py b/lib/spack/spack/test/spack_yaml.py new file mode 100644 index 0000000000..30ed1672e2 --- /dev/null +++ b/lib/spack/spack/test/spack_yaml.py @@ -0,0 +1,92 @@ +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/llnl/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +""" +Test Spack's custom YAML format. +""" +import unittest + +import spack.util.spack_yaml as syaml + +test_file = """\ +config_file: + x86_64: + foo: /path/to/foo + bar: /path/to/bar + baz: /path/to/baz + some_list: + - item 1 + - item 2 + - item 3 + another_list: + [ 1, 2, 3 ] + some_key: some_string +""" + +test_data = { + 'config_file': syaml.syaml_dict([ + ('x86_64', syaml.syaml_dict([ + ('foo', '/path/to/foo'), + ('bar', '/path/to/bar'), + ('baz', '/path/to/baz')])), + ('some_list', ['item 1', 'item 2', 'item 3']), + ('another_list', [1, 2, 3]), + ('some_key', 'some_string') + ])} + + +class SpackYamlTest(unittest.TestCase): + + def setUp(self): + self.data = syaml.load(test_file) + + def test_parse(self): + self.assertEqual(test_data, self.data) + + def test_dict_order(self): + self.assertEqual( + ['x86_64', 'some_list', 'another_list', 'some_key'], + self.data['config_file'].keys()) + + self.assertEqual( + ['foo', 'bar', 'baz'], + self.data['config_file']['x86_64'].keys()) + + def test_line_numbers(self): + def check(obj, start_line, end_line): + self.assertEqual(obj._start_mark.line, start_line) + self.assertEqual(obj._end_mark.line, end_line) + + check(self.data, 0, 12) + check(self.data['config_file'], 1, 12) + check(self.data['config_file']['x86_64'], 2, 5) + check(self.data['config_file']['x86_64']['foo'], 2, 2) + check(self.data['config_file']['x86_64']['bar'], 3, 3) + check(self.data['config_file']['x86_64']['baz'], 4, 4) + check(self.data['config_file']['some_list'], 6, 9) + check(self.data['config_file']['some_list'][0], 6, 6) + check(self.data['config_file']['some_list'][1], 7, 7) + check(self.data['config_file']['some_list'][2], 8, 8) + check(self.data['config_file']['another_list'], 10, 10) + check(self.data['config_file']['some_key'], 11, 11) diff --git a/lib/spack/spack/test/yaml.py b/lib/spack/spack/test/yaml.py deleted file mode 100644 index dedbd15d10..0000000000 --- a/lib/spack/spack/test/yaml.py +++ /dev/null @@ -1,92 +0,0 @@ -############################################################################## -# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# -# This file is part of Spack. -# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. -# LLNL-CODE-647188 -# -# For details, see https://github.com/llnl/spack -# Please also see the LICENSE file for our notice and the LGPL. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License (as -# published by the Free Software Foundation) version 2.1, February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and -# conditions of the GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -############################################################################## -""" -Test Spack's custom YAML format. -""" -import unittest - -import spack.util.spack_yaml as syaml - -test_file = """\ -config_file: - x86_64: - foo: /path/to/foo - bar: /path/to/bar - baz: /path/to/baz - some_list: - - item 1 - - item 2 - - item 3 - another_list: - [ 1, 2, 3 ] - some_key: some_string -""" - -test_data = { - 'config_file': syaml.syaml_dict([ - ('x86_64', syaml.syaml_dict([ - ('foo', '/path/to/foo'), - ('bar', '/path/to/bar'), - ('baz', '/path/to/baz')])), - ('some_list', ['item 1', 'item 2', 'item 3']), - ('another_list', [1, 2, 3]), - ('some_key', 'some_string') - ])} - - -class YamlTest(unittest.TestCase): - - def setUp(self): - self.data = syaml.load(test_file) - - def test_parse(self): - self.assertEqual(test_data, self.data) - - def test_dict_order(self): - self.assertEqual( - ['x86_64', 'some_list', 'another_list', 'some_key'], - self.data['config_file'].keys()) - - self.assertEqual( - ['foo', 'bar', 'baz'], - self.data['config_file']['x86_64'].keys()) - - def test_line_numbers(self): - def check(obj, start_line, end_line): - self.assertEqual(obj._start_mark.line, start_line) - self.assertEqual(obj._end_mark.line, end_line) - - check(self.data, 0, 12) - check(self.data['config_file'], 1, 12) - check(self.data['config_file']['x86_64'], 2, 5) - check(self.data['config_file']['x86_64']['foo'], 2, 2) - check(self.data['config_file']['x86_64']['bar'], 3, 3) - check(self.data['config_file']['x86_64']['baz'], 4, 4) - check(self.data['config_file']['some_list'], 6, 9) - check(self.data['config_file']['some_list'][0], 6, 6) - check(self.data['config_file']['some_list'][1], 7, 7) - check(self.data['config_file']['some_list'][2], 8, 8) - check(self.data['config_file']['another_list'], 10, 10) - check(self.data['config_file']['some_key'], 11, 11) diff --git a/lib/spack/spack/util/spack_yaml.py b/lib/spack/spack/util/spack_yaml.py index 506f56633a..674c79bca1 100644 --- a/lib/spack/spack/util/spack_yaml.py +++ b/lib/spack/spack/util/spack_yaml.py @@ -91,7 +91,9 @@ class OrderedLineLoader(Loader): value = value.encode('ascii') except UnicodeEncodeError: pass + value = syaml_str(value) + mark(value, node) return value -- cgit v1.2.3-60-g2f50 From 8e6d890a19e557e86791da5905ed96c271a6f61f Mon Sep 17 00:00:00 2001 From: Matthew Krafczyk Date: Tue, 15 Nov 2016 15:17:03 -0500 Subject: -s now includes the file and line number with info The option -s now causes file and line number information to be printed along with any invocation of msg, info, etc... This will greatly ease debugging. --- bin/spack | 3 +++ lib/spack/llnl/util/tty/__init__.py | 34 ++++++++++++++++++++++++++++++++-- 2 files changed, 35 insertions(+), 2 deletions(-) (limited to 'bin') diff --git a/bin/spack b/bin/spack index 1f5dec0b3d..7cc1b28a37 100755 --- a/bin/spack +++ b/bin/spack @@ -128,6 +128,8 @@ parser.add_argument('-p', '--profile', action='store_true', help="Profile execution using cProfile.") parser.add_argument('-v', '--verbose', action='store_true', help="Print additional output during builds") +parser.add_argument('-s', '--stacktrace', action='store_true', + help="Add stacktrace information to all printed statements") parser.add_argument('-V', '--version', action='version', version="%s" % spack.spack_version) @@ -155,6 +157,7 @@ def main(): # Set up environment based on args. tty.set_verbose(args.verbose) tty.set_debug(args.debug) + tty.set_stacktrace(args.stacktrace) spack.debug = args.debug if spack.debug: diff --git a/lib/spack/llnl/util/tty/__init__.py b/lib/spack/llnl/util/tty/__init__.py index db74aaba6b..07fe579a8c 100644 --- a/lib/spack/llnl/util/tty/__init__.py +++ b/lib/spack/llnl/util/tty/__init__.py @@ -28,12 +28,14 @@ import textwrap import fcntl import termios import struct +import traceback from StringIO import StringIO from llnl.util.tty.color import * _debug = False _verbose = False +_stacktrace = False indent = " " @@ -45,6 +47,10 @@ def is_debug(): return _debug +def is_stacktrace(): + return _stacktrace + + def set_debug(flag): global _debug _debug = flag @@ -53,10 +59,29 @@ def set_debug(flag): def set_verbose(flag): global _verbose _verbose = flag + + +def set_stacktrace(flag): + global _stacktrace + _stacktrace = flag + + +def process_stacktrace(countback): + """Returns a string with the file and line of the stackframe 'countback' frames from the bottom of the stack""" + st = traceback.extract_stack() + #First entry should be bin/spack. Use this to get spack 'root'. + #bin/spack is 9 characters, the length of the 'root' is then len-9. + root_len = len(st[0][0])-9 + st_idx = len(st)-countback-1 + st_text = "%s:%i " % (st[st_idx][0][root_len:], st[st_idx][1]) + return st_text def msg(message, *args): - cprint("@*b{==>} %s" % cescape(message)) + st_text = "" + if _stacktrace: + st_text = process_stacktrace(2) + cprint("@*b{%s==>} %s" % (st_text, cescape(message))) for arg in args: print indent + str(arg) @@ -66,8 +91,12 @@ def info(message, *args, **kwargs): stream = kwargs.get('stream', sys.stdout) wrap = kwargs.get('wrap', False) break_long_words = kwargs.get('break_long_words', False) + st_countback = kwargs.get('countback', 3) - cprint("@%s{==>} %s" % (format, cescape(str(message))), stream=stream) + st_text = "" + if _stacktrace: + st_text = process_stacktrace(st_countback) + cprint("@%s{%s==>} %s" % (format, st_text, cescape(str(message))), stream=stream) for arg in args: if wrap: lines = textwrap.wrap( @@ -105,6 +134,7 @@ def warn(message, *args, **kwargs): def die(message, *args, **kwargs): + kwargs.setdefault('countback', 4) error(message, *args, **kwargs) sys.exit(1) -- cgit v1.2.3-60-g2f50 From 41b8f31bcd299d4ea7dd40138f725f77576278a3 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Mon, 5 Dec 2016 10:03:58 -0800 Subject: Use JSON for the database instead of YAML. (#2189) * Use JSON for the database instead of YAML. - JSON is much faster than YAML *and* can preserve ordered keys. - 170x+ faster than Python YAML when using unordered dicts - 55x faster than Python YAML (both using OrderedDicts) - 8x faster than C YAML (with OrderedDicts) - JSON is built into Python, unlike C YAML, so doesn't add a dependency. - Don't need human readability for the package database. - JSON requires no major changes to the code -- same object model as YAML. - add to_json, from_json methods to spec. * Add tests to ensure JSON and YAML don't need to be ordered in DB. * Write index.json first time it's not found instead of requiring reindex. * flake8 bug. --- bin/spack | 8 --- lib/spack/spack/database.py | 77 +++++++++++++++-------- lib/spack/spack/spec.py | 60 +++++++++++------- lib/spack/spack/test/database.py | 2 +- lib/spack/spack/test/spec_dag.py | 28 --------- lib/spack/spack/test/spec_yaml.py | 124 +++++++++++++++++++++++++++++++++++++ lib/spack/spack/util/spack_json.py | 56 +++++++++++++++++ lib/spack/spack/util/spack_yaml.py | 17 +++-- 8 files changed, 282 insertions(+), 90 deletions(-) create mode 100644 lib/spack/spack/util/spack_json.py (limited to 'bin') diff --git a/bin/spack b/bin/spack index 7cc1b28a37..454a9a5b2d 100755 --- a/bin/spack +++ b/bin/spack @@ -41,14 +41,6 @@ SPACK_PREFIX = os.path.dirname(os.path.dirname(SPACK_FILE)) SPACK_LIB_PATH = os.path.join(SPACK_PREFIX, "lib", "spack") sys.path.insert(0, SPACK_LIB_PATH) -# Try to use system YAML if it is available, as it might have libyaml -# support (for faster loading via C). Load it before anything in -# lib/spack/external so it will take precedence over Spack's PyYAML. -try: - import yaml -except ImportError: - pass # ignore and use slow yaml - # Add external libs SPACK_EXTERNAL_LIBS = os.path.join(SPACK_LIB_PATH, "external") sys.path.insert(0, SPACK_EXTERNAL_LIBS) diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index e8902ec024..a01795ca0f 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -55,6 +55,7 @@ from spack.version import Version import spack.spec from spack.error import SpackError import spack.util.spack_yaml as syaml +import spack.util.spack_json as sjson # DB goes in this directory underneath the root @@ -134,10 +135,12 @@ class Database(object): under ``root/.spack-db``, which is created if it does not exist. This is the ``db_dir``. - The Database will attempt to read an ``index.yaml`` file in - ``db_dir``. If it does not find one, it will be created when - needed by scanning the entire Database root for ``spec.yaml`` - files according to Spack's ``DirectoryLayout``. + The Database will attempt to read an ``index.json`` file in + ``db_dir``. If it does not find one, it will fall back to read + an ``index.yaml`` if one is present. If that does not exist, it + will create a database when needed by scanning the entire + Database root for ``spec.yaml`` files according to Spack's + ``DirectoryLayout``. Caller may optionally provide a custom ``db_dir`` parameter where data will be stored. This is intended to be used for @@ -154,7 +157,8 @@ class Database(object): self._db_dir = db_dir # Set up layout of database files within the db dir - self._index_path = join_path(self._db_dir, 'index.yaml') + self._old_yaml_index_path = join_path(self._db_dir, 'index.yaml') + self._index_path = join_path(self._db_dir, 'index.json') self._lock_path = join_path(self._db_dir, 'lock') # This is for other classes to use to lock prefix directories. @@ -179,8 +183,8 @@ class Database(object): """Get a read lock context manager for use in a `with` block.""" return ReadTransaction(self.lock, self._read, timeout=timeout) - def _write_to_yaml(self, stream): - """Write out the databsae to a YAML file. + def _write_to_file(self, stream): + """Write out the databsae to a JSON file. This function does not do any locking or transactions. """ @@ -201,12 +205,12 @@ class Database(object): } try: - return syaml.dump( - database, stream=stream, default_flow_style=False) + sjson.dump(database, stream) except YAMLError as e: - raise SpackYAMLError("error writing YAML database:", str(e)) + raise syaml.SpackYAMLError( + "error writing YAML database:", str(e)) - def _read_spec_from_yaml(self, hash_key, installs): + def _read_spec_from_dict(self, hash_key, installs): """Recursively construct a spec from a hash in a YAML database. Does not do any locking. @@ -241,24 +245,32 @@ class Database(object): child = data[dhash].spec spec._add_dependency(child, dtypes) - def _read_from_yaml(self, stream): + def _read_from_file(self, stream, format='json'): """ - Fill database from YAML, do not maintain old data + Fill database from file, do not maintain old data Translate the spec portions from node-dict form to spec form Does not do any locking. """ + if format.lower() == 'json': + load = sjson.load + elif format.lower() == 'yaml': + load = syaml.load + else: + raise ValueError("Invalid database format: %s" % format) + try: if isinstance(stream, basestring): with open(stream, 'r') as f: - yfile = syaml.load(f) + fdata = load(f) else: - yfile = syaml.load(stream) - + fdata = load(stream) except MarkedYAMLError as e: - raise SpackYAMLError("error parsing YAML database:", str(e)) + raise syaml.SpackYAMLError("error parsing YAML database:", str(e)) + except Exception as e: + raise CorruptDatabaseError("error parsing database:", str(e)) - if yfile is None: + if fdata is None: return def check(cond, msg): @@ -266,10 +278,10 @@ class Database(object): raise CorruptDatabaseError( "Spack database is corrupt: %s" % msg, self._index_path) - check('database' in yfile, "No 'database' attribute in YAML.") + check('database' in fdata, "No 'database' attribute in YAML.") # High-level file checks - db = yfile['database'] + db = fdata['database'] check('installs' in db, "No 'installs' in YAML DB.") check('version' in db, "No 'version' in YAML DB.") @@ -303,7 +315,7 @@ class Database(object): for hash_key, rec in installs.items(): try: # This constructs a spec DAG from the list of all installs - spec = self._read_spec_from_yaml(hash_key, installs) + spec = self._read_spec_from_dict(hash_key, installs) # Insert the brand new spec in the database. Each # spec has its own copies of its dependency specs. @@ -342,7 +354,7 @@ class Database(object): def _read_suppress_error(): try: if os.path.isfile(self._index_path): - self._read_from_yaml(self._index_path) + self._read_from_file(self._index_path) except CorruptDatabaseError as e: self._error = e self._data = {} @@ -426,7 +438,7 @@ class Database(object): # Write a temporary database file them move it into place try: with open(temp_file, 'w') as f: - self._write_to_yaml(f) + self._write_to_file(f) os.rename(temp_file, self._index_path) except: # Clean up temp file if something goes wrong. @@ -437,11 +449,24 @@ class Database(object): def _read(self): """Re-read Database from the data in the set location. - This does no locking. + This does no locking, with one exception: it will automatically + migrate an index.yaml to an index.json if possible. This requires + taking a write lock. + """ if os.path.isfile(self._index_path): - # Read from YAML file if a database exists - self._read_from_yaml(self._index_path) + # Read from JSON file if a JSON database exists + self._read_from_file(self._index_path, format='json') + + elif os.path.isfile(self._old_yaml_index_path): + if os.access(self._db_dir, os.R_OK | os.W_OK): + # if we can write, then read AND write a JSON file. + self._read_from_file(self._old_yaml_index_path, format='yaml') + with WriteTransaction(self.lock, timeout=_db_lock_timeout): + self._write(None, None, None) + else: + # Read chck for a YAML file if we can't find JSON. + self._read_from_file(self._old_yaml_index_path, format='yaml') else: # The file doesn't exist, try to traverse the directory. diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index 86eaa3bb91..2ef3757689 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -117,6 +117,7 @@ from spack.build_environment import get_path_from_module, load_module from spack.util.prefix import Prefix from spack.util.string import * import spack.util.spack_yaml as syaml +import spack.util.spack_json as sjson from spack.util.spack_yaml import syaml_dict from spack.util.crypto import prefix_bits from spack.version import * @@ -153,7 +154,6 @@ __all__ = [ 'UnsatisfiableArchitectureSpecError', 'UnsatisfiableProviderSpecError', 'UnsatisfiableDependencySpecError', - 'SpackYAMLError', 'AmbiguousHashError'] # Valid pattern for an identifier in Spack @@ -1174,15 +1174,21 @@ class Spec(object): return syaml_dict([(self.name, d)]) - def to_yaml(self, stream=None): + def to_dict(self): node_list = [] for s in self.traverse(order='pre', deptype=('link', 'run')): node = s.to_node_dict() node[s.name]['hash'] = s.dag_hash() node_list.append(node) + + return syaml_dict([('spec', node_list)]) + + def to_yaml(self, stream=None): return syaml.dump( - syaml_dict([('spec', node_list)]), - stream=stream, default_flow_style=False) + self.to_dict(), stream=stream, default_flow_style=False) + + def to_json(self, stream=None): + return sjson.dump(self.to_dict(), stream) @staticmethod def from_node_dict(node): @@ -1245,22 +1251,13 @@ class Spec(object): yield dep_name, dag_hash, list(deptypes) @staticmethod - def from_yaml(stream): + def from_dict(data): """Construct a spec from YAML. Parameters: - stream -- string or file object to read from. - - TODO: currently discards hashes. Include hashes when they - represent more than the DAG does. - + data -- a nested dict/list data structure read from YAML or JSON. """ - try: - yfile = syaml.load(stream) - except MarkedYAMLError as e: - raise SpackYAMLError("error parsing YAML spec:", str(e)) - - nodes = yfile['spec'] + nodes = data['spec'] # Read nodes out of list. Root spec is the first element; # dependencies are the following elements. @@ -1285,6 +1282,32 @@ class Spec(object): return spec + @staticmethod + def from_yaml(stream): + """Construct a spec from YAML. + + Parameters: + stream -- string or file object to read from. + """ + try: + data = syaml.load(stream) + return Spec.from_dict(data) + except MarkedYAMLError as e: + raise syaml.SpackYAMLError("error parsing YAML spec:", str(e)) + + @staticmethod + def from_json(stream): + """Construct a spec from JSON. + + Parameters: + stream -- string or file object to read from. + """ + try: + data = sjson.load(stream) + return Spec.from_dict(data) + except Exception as e: + raise sjson.SpackJSONError("error parsing JSON spec:", str(e)) + def _concretize_helper(self, presets=None, visited=None): """Recursive helper function for concretize(). This concretizes everything bottom-up. As things are @@ -3064,11 +3087,6 @@ class UnsatisfiableDependencySpecError(UnsatisfiableSpecError): provided, required, "dependency") -class SpackYAMLError(spack.error.SpackError): - def __init__(self, msg, yaml_error): - super(SpackYAMLError, self).__init__(msg, str(yaml_error)) - - class AmbiguousHashError(SpecError): def __init__(self, msg, *specs): super(AmbiguousHashError, self).__init__(msg) diff --git a/lib/spack/spack/test/database.py b/lib/spack/spack/test/database.py index 1a1281e10e..55a1d84e20 100644 --- a/lib/spack/spack/test/database.py +++ b/lib/spack/spack/test/database.py @@ -75,7 +75,7 @@ class DatabaseTest(MockDatabase): def test_005_db_exists(self): """Make sure db cache file exists after creating.""" - index_file = join_path(self.install_path, '.spack-db', 'index.yaml') + index_file = join_path(self.install_path, '.spack-db', 'index.json') lock_file = join_path(self.install_path, '.spack-db', 'lock') self.assertTrue(os.path.exists(index_file)) diff --git a/lib/spack/spack/test/spec_dag.py b/lib/spack/spack/test/spec_dag.py index 0bc63bcf0f..dd536f945c 100644 --- a/lib/spack/spack/test/spec_dag.py +++ b/lib/spack/spack/test/spec_dag.py @@ -496,34 +496,6 @@ class SpecDagTest(MockPackagesTest): traversal = dag.traverse(deptype='run') self.assertEqual([x.name for x in traversal], names) - def test_using_ordered_dict(self): - """ Checks that dicts are ordered - - Necessary to make sure that dag_hash is stable across python - versions and processes. - """ - def descend_and_check(iterable, level=0): - from spack.util.spack_yaml import syaml_dict - from collections import Iterable, Mapping - if isinstance(iterable, Mapping): - self.assertTrue(isinstance(iterable, syaml_dict)) - return descend_and_check(iterable.values(), level=level + 1) - max_level = level - for value in iterable: - if isinstance(value, Iterable) and not isinstance(value, str): - nlevel = descend_and_check(value, level=level + 1) - if nlevel > max_level: - max_level = nlevel - return max_level - - specs = ['mpileaks ^zmpi', 'dttop', 'dtuse'] - for spec in specs: - dag = Spec(spec) - dag.normalize() - level = descend_and_check(dag.to_node_dict()) - # level just makes sure we are doing something here - self.assertTrue(level >= 5) - def test_hash_bits(self): """Ensure getting first n bits of a base32-encoded DAG hash works.""" diff --git a/lib/spack/spack/test/spec_yaml.py b/lib/spack/spack/test/spec_yaml.py index 964aea9422..442c6e6e81 100644 --- a/lib/spack/spack/test/spec_yaml.py +++ b/lib/spack/spack/test/spec_yaml.py @@ -27,6 +27,10 @@ YAML format preserves DAG informatoin in the spec. """ +import spack.util.spack_yaml as syaml +import spack.util.spack_json as sjson +from spack.util.spack_yaml import syaml_dict + from spack.spec import Spec from spack.test.mock_packages_test import * @@ -64,3 +68,123 @@ class SpecYamlTest(MockPackagesTest): for dep in ('callpath', 'mpich', 'dyninst', 'libdwarf', 'libelf'): self.assertTrue(spec[dep].eq_dag(yaml_spec[dep])) + + def test_using_ordered_dict(self): + """ Checks that dicts are ordered + + Necessary to make sure that dag_hash is stable across python + versions and processes. + """ + def descend_and_check(iterable, level=0): + from spack.util.spack_yaml import syaml_dict + from collections import Iterable, Mapping + if isinstance(iterable, Mapping): + self.assertTrue(isinstance(iterable, syaml_dict)) + return descend_and_check(iterable.values(), level=level + 1) + max_level = level + for value in iterable: + if isinstance(value, Iterable) and not isinstance(value, str): + nlevel = descend_and_check(value, level=level + 1) + if nlevel > max_level: + max_level = nlevel + return max_level + + specs = ['mpileaks ^zmpi', 'dttop', 'dtuse'] + for spec in specs: + dag = Spec(spec) + dag.normalize() + level = descend_and_check(dag.to_node_dict()) + # level just makes sure we are doing something here + self.assertTrue(level >= 5) + + def test_ordered_read_not_required_for_consistent_dag_hash(self): + """Make sure ordered serialization isn't required to preserve hashes. + + For consistent hashes, we require that YAML and json documents + have their keys serialized in a deterministic order. However, we + don't want to require them to be serialized in order. This + ensures that is not reauired. + + """ + specs = ['mpileaks ^zmpi', 'dttop', 'dtuse'] + for spec in specs: + spec = Spec(spec) + spec.concretize() + + # + # Dict & corresponding YAML & JSON from the original spec. + # + spec_dict = spec.to_dict() + spec_yaml = spec.to_yaml() + spec_json = spec.to_json() + + # + # Make a spec with reversed OrderedDicts for every + # OrderedDict in the original. + # + reversed_spec_dict = reverse_all_dicts(spec.to_dict()) + + # + # Dump to YAML and JSON + # + yaml_string = syaml.dump(spec_dict, default_flow_style=False) + reversed_yaml_string = syaml.dump(reversed_spec_dict, + default_flow_style=False) + json_string = sjson.dump(spec_dict) + reversed_json_string = sjson.dump(reversed_spec_dict) + + # + # Do many consistency checks + # + + # spec yaml is ordered like the spec dict + self.assertEqual(yaml_string, spec_yaml) + self.assertEqual(json_string, spec_json) + + # reversed string is different from the original, so it + # *would* generate a different hash + self.assertNotEqual(yaml_string, reversed_yaml_string) + self.assertNotEqual(json_string, reversed_json_string) + + # build specs from the "wrongly" ordered data + round_trip_yaml_spec = Spec.from_yaml(yaml_string) + round_trip_json_spec = Spec.from_json(json_string) + round_trip_reversed_yaml_spec = Spec.from_yaml( + reversed_yaml_string) + round_trip_reversed_json_spec = Spec.from_yaml( + reversed_json_string) + + # TODO: remove this when build deps are in provenance. + spec = spec.copy(deps=('link', 'run')) + + # specs are equal to the original + self.assertEqual(spec, round_trip_yaml_spec) + self.assertEqual(spec, round_trip_json_spec) + self.assertEqual(spec, round_trip_reversed_yaml_spec) + self.assertEqual(spec, round_trip_reversed_json_spec) + self.assertEqual(round_trip_yaml_spec, + round_trip_reversed_yaml_spec) + self.assertEqual(round_trip_json_spec, + round_trip_reversed_json_spec) + + # dag_hashes are equal + self.assertEqual( + spec.dag_hash(), round_trip_yaml_spec.dag_hash()) + self.assertEqual( + spec.dag_hash(), round_trip_json_spec.dag_hash()) + self.assertEqual( + spec.dag_hash(), round_trip_reversed_yaml_spec.dag_hash()) + self.assertEqual( + spec.dag_hash(), round_trip_reversed_json_spec.dag_hash()) + + +def reverse_all_dicts(data): + """Descend into data and reverse all the dictionaries""" + if isinstance(data, dict): + return syaml_dict(reversed( + [(reverse_all_dicts(k), reverse_all_dicts(v)) + for k, v in data.items()])) + elif isinstance(data, (list, tuple)): + return type(data)(reverse_all_dicts(elt) for elt in data) + else: + return data diff --git a/lib/spack/spack/util/spack_json.py b/lib/spack/spack/util/spack_json.py new file mode 100644 index 0000000000..240ce86c68 --- /dev/null +++ b/lib/spack/spack/util/spack_json.py @@ -0,0 +1,56 @@ +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/llnl/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +"""Simple wrapper around JSON to guarantee consistent use of load/dump. """ +import json +import spack.error + +__all__ = ['load', 'dump', 'SpackJSONError'] + +_json_dump_args = { + 'indent': True, + 'separators': (',', ': ') +} + + +def load(stream): + """Spack JSON needs to be ordered to support specs.""" + if isinstance(stream, basestring): + return json.loads(stream) + else: + return json.load(stream) + + +def dump(data, stream=None): + """Dump JSON with a reasonable amount of indentation and separation.""" + if stream is None: + return json.dumps(data, **_json_dump_args) + else: + return json.dump(data, stream, **_json_dump_args) + + +class SpackJSONError(spack.error.SpackError): + """Raised when there are issues with JSON parsing.""" + def __init__(self, msg, yaml_error): + super(SpackJSONError, self).__init__(msg, str(yaml_error)) diff --git a/lib/spack/spack/util/spack_yaml.py b/lib/spack/spack/util/spack_yaml.py index c27db52066..9d4c607908 100644 --- a/lib/spack/spack/util/spack_yaml.py +++ b/lib/spack/spack/util/spack_yaml.py @@ -32,23 +32,21 @@ """ import yaml -try: - from yaml import CLoader as Loader, CDumper as Dumper -except ImportError as e: - from yaml import Loader, Dumper +from yaml import Loader, Dumper from yaml.nodes import * from yaml.constructor import ConstructorError from ordereddict_backport import OrderedDict +import spack.error + # Only export load and dump -__all__ = ['load', 'dump'] +__all__ = ['load', 'dump', 'SpackYAMLError'] # Make new classes so we can add custom attributes. # Also, use OrderedDict instead of just dict. class syaml_dict(OrderedDict): - def __repr__(self): mappings = ('%r: %r' % (k, v) for k, v in self.items()) return '{%s}' % ', '.join(mappings) @@ -153,6 +151,7 @@ class OrderedLineLoader(Loader): mark(mapping, node) return mapping + # register above new constructors OrderedLineLoader.add_constructor( u'tag:yaml.org,2002:map', OrderedLineLoader.construct_yaml_map) @@ -223,3 +222,9 @@ def load(*args, **kwargs): def dump(*args, **kwargs): kwargs['Dumper'] = OrderedLineDumper return yaml.dump(*args, **kwargs) + + +class SpackYAMLError(spack.error.SpackError): + """Raised when there are issues with YAML parsing.""" + def __init__(self, msg, yaml_error): + super(SpackYAMLError, self).__init__(msg, str(yaml_error)) -- cgit v1.2.3-60-g2f50 From 8496d8ff778e9c7dfb4dcb5557e8cfad36124620 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Tue, 13 Dec 2016 01:23:40 -0800 Subject: Add a test to ensure package names have the right case. (#2562) --- bin/spack | 4 +- lib/spack/spack/hooks/__init__.py | 7 +- lib/spack/spack/hooks/case_consistency.py | 101 ++++++++++++++++++++++++++++ lib/spack/spack/hooks/yaml_version_check.py | 57 ++++++++++++++++ lib/spack/spack/yaml_version_check.py | 57 ---------------- 5 files changed, 165 insertions(+), 61 deletions(-) create mode 100644 lib/spack/spack/hooks/case_consistency.py create mode 100644 lib/spack/spack/hooks/yaml_version_check.py delete mode 100644 lib/spack/spack/yaml_version_check.py (limited to 'bin') diff --git a/bin/spack b/bin/spack index 454a9a5b2d..cc9450ade7 100755 --- a/bin/spack +++ b/bin/spack @@ -156,8 +156,8 @@ def main(): import spack.util.debug as debug debug.register_interrupt_handler() - from spack.yaml_version_check import check_yaml_versions - check_yaml_versions() + # Run any available pre-run hooks + spack.hooks.pre_run() spack.spack_working_dir = working_dir if args.mock: diff --git a/lib/spack/spack/hooks/__init__.py b/lib/spack/spack/hooks/__init__.py index ff4ebc2e57..6454a865b6 100644 --- a/lib/spack/spack/hooks/__init__.py +++ b/lib/spack/spack/hooks/__init__.py @@ -64,16 +64,19 @@ class HookRunner(object): def __init__(self, hook_name): self.hook_name = hook_name - def __call__(self, pkg): + def __call__(self, *args, **kwargs): for module in all_hook_modules(): if hasattr(module, self.hook_name): hook = getattr(module, self.hook_name) if hasattr(hook, '__call__'): - hook(pkg) + hook(*args, **kwargs) + # # Define some functions that can be called to fire off hooks. # +pre_run = HookRunner('pre_run') + pre_install = HookRunner('pre_install') post_install = HookRunner('post_install') diff --git a/lib/spack/spack/hooks/case_consistency.py b/lib/spack/spack/hooks/case_consistency.py new file mode 100644 index 0000000000..faf38f7ae3 --- /dev/null +++ b/lib/spack/spack/hooks/case_consistency.py @@ -0,0 +1,101 @@ +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/llnl/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +from __future__ import absolute_import +import os +import re +import platform + +from llnl.util.filesystem import * + +import spack +from spack.util.executable import * + + +def pre_run(): + if platform.system() != "Darwin": + return + + git_case_consistency_check(spack.repo.get_repo('builtin').packages_path) + + +def git_case_consistency_check(path): + """Re-sync case of files in a directory with git. + + On case-insensitive but case-preserving filesystems like Mac OS X, + Git doesn't properly rename files that only had their case changed. + + This checks files in a directory against git and does a + case-restoring rename (actually two renames, e.g.:: + + name -> tmp -> NAME + + We use this in Spack to ensure package directories are named + correctly. + + TODO: this check can probably be removed once package names have been + TODO: lowercase for a long while. + + """ + with working_dir(path): + # Don't bother fixing case if Spack isn't in a git repository + git = which('git') + if not git: + return + + try: + git_filenames = git('ls-tree', '--name-only', 'HEAD', output=str) + git_filenames = set(re.split(r'\s+', git_filenames.strip())) + except ProcessError: + return # Ignore errors calling git + + lower_to_mixed = {} + for fn in git_filenames: + lower = fn.lower() + mixed = lower_to_mixed.setdefault(lower, []) + mixed.append(fn) + + # Iterate through all actual files and make sure their names are + # the same as corresponding names in git + actual_filenames = os.listdir('.') + for actual in actual_filenames: + lower = actual.lower() + + # not tracked by git + if lower not in lower_to_mixed: + continue + + # Don't know what to do with multiple matches + if len(lower_to_mixed[lower]) != 1: + continue + + # Skip if case is already correct + git_name = lower_to_mixed[lower][0] + if git_name == actual: + continue + + # restore case with two renames + tmp_name = actual + '.spack.tmp' + os.rename(actual, tmp_name) + os.rename(tmp_name, git_name) diff --git a/lib/spack/spack/hooks/yaml_version_check.py b/lib/spack/spack/hooks/yaml_version_check.py new file mode 100644 index 0000000000..a4b38198bc --- /dev/null +++ b/lib/spack/spack/hooks/yaml_version_check.py @@ -0,0 +1,57 @@ +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/llnl/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +"""Yaml Version Check is a module for ensuring that config file +formats are compatible with the current version of Spack.""" +import os.path +import os +import llnl.util.tty as tty +import spack.util.spack_yaml as syaml +import spack.config + + +def pre_run(): + check_compiler_yaml_version() + + +def check_compiler_yaml_version(): + config_scopes = spack.config.config_scopes + for scope in config_scopes.values(): + file_name = os.path.join(scope.path, 'compilers.yaml') + data = None + if os.path.isfile(file_name): + with open(file_name) as f: + data = syaml.load(f) + + if data: + compilers = data['compilers'] + if len(compilers) > 0: + if (not isinstance(compilers, list) or + 'operating_system' not in compilers[0]['compiler']): + new_file = os.path.join(scope.path, '_old_compilers.yaml') + tty.warn('%s in out of date compilers format. ' + 'Moved to %s. Spack automatically generate ' + 'a compilers config file ' + % (file_name, new_file)) + os.rename(file_name, new_file) diff --git a/lib/spack/spack/yaml_version_check.py b/lib/spack/spack/yaml_version_check.py deleted file mode 100644 index 2c5b511d7f..0000000000 --- a/lib/spack/spack/yaml_version_check.py +++ /dev/null @@ -1,57 +0,0 @@ -############################################################################## -# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# -# This file is part of Spack. -# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. -# LLNL-CODE-647188 -# -# For details, see https://github.com/llnl/spack -# Please also see the LICENSE file for our notice and the LGPL. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License (as -# published by the Free Software Foundation) version 2.1, February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and -# conditions of the GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -############################################################################## -"""Yaml Version Check is a module for ensuring that config file -formats are compatible with the current version of Spack.""" -import os.path -import os -import llnl.util.tty as tty -import spack.util.spack_yaml as syaml -import spack.config - - -def check_yaml_versions(): - check_compiler_yaml_version() - - -def check_compiler_yaml_version(): - config_scopes = spack.config.config_scopes - for scope in config_scopes.values(): - file_name = os.path.join(scope.path, 'compilers.yaml') - data = None - if os.path.isfile(file_name): - with open(file_name) as f: - data = syaml.load(f) - - if data: - compilers = data['compilers'] - if len(compilers) > 0: - if (not isinstance(compilers, list) or - 'operating_system' not in compilers[0]['compiler']): - new_file = os.path.join(scope.path, '_old_compilers.yaml') - tty.warn('%s in out of date compilers format. ' - 'Moved to %s. Spack automatically generate ' - 'a compilers config file ' - % (file_name, new_file)) - os.rename(file_name, new_file) -- cgit v1.2.3-60-g2f50 From 7ea10e768ee1a7deab98ae538d916bbeeb0346b8 Mon Sep 17 00:00:00 2001 From: Massimiliano Culpo Date: Thu, 29 Dec 2016 16:48:48 +0100 Subject: unit tests: replace nose with pytest (#2502) * Porting: substitute nose with ytest This huge commit substitutes nose with pytest as a testing system. Things done here: * deleted external/nose as it is no longer used * moved mock resources in their own directory 'test/mock/' * ported two tests (cmd/find, build_system) to pytest native syntax as an example * build_environment, log: used monkeypatch instead of try/catch * moved global mocking of fetch_cache to an auto-used fixture * moved global mocking from test/__init__.py to conftest.py * made `spack test` a wrapper around pytest * run-unit-tests: avoid running python 2.6 tests under coverage to speed them up * use `pytest --cov` instead of coverage run to cut down testing time * mock/packages_test: moved mock yaml configuration to files instead of leaving it in the code as string literals * concretize.py: ported tests to native pytest, reverted multiprocessing in pytest.ini as it was creating the wrong report for coveralls * conftest.py, fixtures: added docstrings * concretize_preferences.py: uses fixtures instead of subclassing MockPackagesTest * directory_layout.py: uses fixtures instead of subclassing MockPackagesTest * install.py: uses fixtures instead of subclassing MockPackagesTest * optional_deps.py: uses fixtures instead of subclassing MockPackagesTest optional_deps.py: uses fixtures instead of subclassing MockPackagesTest * packages.py: uses fixtures instead of subclassing MockPackagesTest * provider_index.py: uses fixtures instead of subclassing MockPackagesTest * spec_yaml.py: uses fixtures instead of subclassing MockPackagesTest * multimethod.py: uses fixtures instead of subclassing MockPackagesTest * install.py: now uses mock_archive_url * git_fetch.py: uses fixtures instead of subclassing MockPackagesTest * hg_fetch.py: uses fixtures instead of subclassing MockPackagesTest * svn_fetch.py, mirror.py: uses fixtures instead of subclassing MockPackagesTest repo.py: deleted * test_compiler_cmd.py: uses fixtures instead of subclassing MockPackagesTest * cmd/module.py, cmd/uninstall.py: uses fixtures instead of subclassing MockDatabase * database.py: uses fixtures instead of subclassing MockDatabase, removed mock/database * pytest: uncluttering fixture implementations * database: changing the scope to 'module' * config.py: uses fixtures instead of subclassing MockPackagesTest * spec_dag.py, spec_semantics.py: uses fixtures instead of subclassing MockPackagesTest * stage.py: uses fixtures instead of subclassing MockPackagesTest. Removed mock directory * pytest: added docstrings to all the fixtures * pytest: final cleanup * build_system_guess.py: fixed naming and docstrings as suggested by @scheibelp * spec_syntax.py: added expected failure on parsing multiple specs closes #1976 * Add pytest and pytest-cov to Spack externals. * Make `spack flake8` ignore externals. * run-unit-tests runs spack test and not pytest. * Remove all the special stuff for `spack test` - Remove `conftest.py` magic and all the special case stuff in `bin/spack` - Spack commands can optionally take unknown arguments, if they want to handle them. - `spack test` is now a command like the others. - `spack test` now just delegates its arguments to `pytest`, but it does it by receiving unknown arguments and NOT taking an explicit help argument. * Fix error in fixtures. * Improve `spack test` command a bit. - Now supports an approximation of the old simple interface - Also supports full pytest options if you want them. * Use external coverage instead of pytest-cov * Make coverage use parallel-mode. * change __init__.py docs to include pytest --- .coveragerc | 2 + .gitignore | 2 + .travis.yml | 8 +- bin/spack | 58 +- lib/spack/docs/contribution_guide.rst | 2 +- lib/spack/external/__init__.py | 2 +- lib/spack/external/_pytest/AUTHORS | 141 ++ lib/spack/external/_pytest/LICENSE | 21 + lib/spack/external/_pytest/README.rst | 102 + lib/spack/external/_pytest/__init__.py | 2 + lib/spack/external/_pytest/_argcomplete.py | 102 + lib/spack/external/_pytest/_code/__init__.py | 9 + lib/spack/external/_pytest/_code/_py2traceback.py | 81 + lib/spack/external/_pytest/_code/code.py | 861 ++++++++ lib/spack/external/_pytest/_code/source.py | 414 ++++ lib/spack/external/_pytest/_pluggy.py | 11 + lib/spack/external/_pytest/assertion/__init__.py | 164 ++ lib/spack/external/_pytest/assertion/rewrite.py | 945 ++++++++ lib/spack/external/_pytest/assertion/util.py | 300 +++ lib/spack/external/_pytest/cacheprovider.py | 245 +++ lib/spack/external/_pytest/capture.py | 491 +++++ lib/spack/external/_pytest/compat.py | 230 ++ lib/spack/external/_pytest/config.py | 1340 ++++++++++++ lib/spack/external/_pytest/debugging.py | 124 ++ lib/spack/external/_pytest/deprecated.py | 24 + lib/spack/external/_pytest/doctest.py | 331 +++ lib/spack/external/_pytest/fixtures.py | 1134 ++++++++++ lib/spack/external/_pytest/freeze_support.py | 45 + lib/spack/external/_pytest/helpconfig.py | 144 ++ lib/spack/external/_pytest/hookspec.py | 314 +++ lib/spack/external/_pytest/junitxml.py | 413 ++++ lib/spack/external/_pytest/main.py | 762 +++++++ lib/spack/external/_pytest/mark.py | 328 +++ lib/spack/external/_pytest/monkeypatch.py | 258 +++ lib/spack/external/_pytest/nose.py | 71 + lib/spack/external/_pytest/pastebin.py | 98 + lib/spack/external/_pytest/pytester.py | 1139 ++++++++++ lib/spack/external/_pytest/python.py | 1578 ++++++++++++++ lib/spack/external/_pytest/recwarn.py | 226 ++ lib/spack/external/_pytest/resultlog.py | 107 + lib/spack/external/_pytest/runner.py | 578 +++++ lib/spack/external/_pytest/setuponly.py | 72 + lib/spack/external/_pytest/setupplan.py | 23 + lib/spack/external/_pytest/skipping.py | 375 ++++ lib/spack/external/_pytest/terminal.py | 593 +++++ lib/spack/external/_pytest/tmpdir.py | 124 ++ lib/spack/external/_pytest/unittest.py | 217 ++ .../external/_pytest/vendored_packages/README.md | 13 + .../external/_pytest/vendored_packages/__init__.py | 0 .../pluggy-0.4.0.dist-info/DESCRIPTION.rst | 11 + .../pluggy-0.4.0.dist-info/INSTALLER | 1 + .../pluggy-0.4.0.dist-info/LICENSE.txt | 22 + .../pluggy-0.4.0.dist-info/METADATA | 40 + .../pluggy-0.4.0.dist-info/RECORD | 9 + .../vendored_packages/pluggy-0.4.0.dist-info/WHEEL | 6 + .../pluggy-0.4.0.dist-info/metadata.json | 1 + .../pluggy-0.4.0.dist-info/top_level.txt | 1 + .../external/_pytest/vendored_packages/pluggy.py | 802 +++++++ lib/spack/external/nose/LICENSE | 502 ----- lib/spack/external/nose/__init__.py | 15 - lib/spack/external/nose/__main__.py | 8 - lib/spack/external/nose/case.py | 397 ---- lib/spack/external/nose/commands.py | 172 -- lib/spack/external/nose/config.py | 661 ------ lib/spack/external/nose/core.py | 341 --- lib/spack/external/nose/exc.py | 9 - lib/spack/external/nose/ext/__init__.py | 3 - lib/spack/external/nose/ext/dtcompat.py | 2272 -------------------- lib/spack/external/nose/failure.py | 42 - lib/spack/external/nose/importer.py | 167 -- lib/spack/external/nose/inspector.py | 207 -- lib/spack/external/nose/loader.py | 623 ------ lib/spack/external/nose/plugins/__init__.py | 190 -- lib/spack/external/nose/plugins/allmodules.py | 45 - lib/spack/external/nose/plugins/attrib.py | 286 --- lib/spack/external/nose/plugins/base.py | 725 ------- lib/spack/external/nose/plugins/builtin.py | 34 - lib/spack/external/nose/plugins/capture.py | 115 - lib/spack/external/nose/plugins/collect.py | 94 - lib/spack/external/nose/plugins/cover.py | 271 --- lib/spack/external/nose/plugins/debug.py | 67 - lib/spack/external/nose/plugins/deprecated.py | 45 - lib/spack/external/nose/plugins/doctests.py | 455 ---- lib/spack/external/nose/plugins/errorclass.py | 210 -- lib/spack/external/nose/plugins/failuredetail.py | 49 - lib/spack/external/nose/plugins/isolate.py | 103 - lib/spack/external/nose/plugins/logcapture.py | 245 --- lib/spack/external/nose/plugins/manager.py | 460 ---- lib/spack/external/nose/plugins/multiprocess.py | 835 ------- lib/spack/external/nose/plugins/plugintest.py | 416 ---- lib/spack/external/nose/plugins/prof.py | 154 -- lib/spack/external/nose/plugins/skip.py | 63 - lib/spack/external/nose/plugins/testid.py | 311 --- lib/spack/external/nose/plugins/xunit.py | 341 --- lib/spack/external/nose/proxy.py | 188 -- lib/spack/external/nose/pyversion.py | 215 -- lib/spack/external/nose/result.py | 200 -- lib/spack/external/nose/selector.py | 251 --- lib/spack/external/nose/sphinx/__init__.py | 1 - lib/spack/external/nose/sphinx/pluginopts.py | 189 -- lib/spack/external/nose/suite.py | 609 ------ lib/spack/external/nose/tools/__init__.py | 15 - lib/spack/external/nose/tools/nontrivial.py | 151 -- lib/spack/external/nose/tools/trivial.py | 54 - lib/spack/external/nose/twistedtools.py | 173 -- lib/spack/external/nose/usage.txt | 115 - lib/spack/external/nose/util.py | 668 ------ lib/spack/external/pyqver2.py | 6 +- lib/spack/external/pytest.py | 28 + lib/spack/llnl/util/lang.py | 12 + lib/spack/llnl/util/tty/log.py | 5 +- lib/spack/spack/__init__.py | 6 + lib/spack/spack/build_environment.py | 9 +- lib/spack/spack/cmd/__init__.py | 10 + lib/spack/spack/cmd/flake8.py | 60 +- lib/spack/spack/cmd/test.py | 123 +- lib/spack/spack/repository.py | 2 +- lib/spack/spack/test/__init__.py | 129 -- lib/spack/spack/test/architecture.py | 223 +- lib/spack/spack/test/build_system_guess.py | 87 +- lib/spack/spack/test/cmd/find.py | 51 +- lib/spack/spack/test/cmd/module.py | 111 +- lib/spack/spack/test/cmd/test_compiler_cmd.py | 90 +- lib/spack/spack/test/cmd/uninstall.py | 47 +- lib/spack/spack/test/concretize.py | 351 ++- lib/spack/spack/test/concretize_preferences.py | 133 +- lib/spack/spack/test/config.py | 261 +-- lib/spack/spack/test/conftest.py | 515 +++++ lib/spack/spack/test/data/compilers.yaml | 116 + lib/spack/spack/test/data/config.yaml | 11 + lib/spack/spack/test/data/packages.yaml | 14 + lib/spack/spack/test/database.py | 547 ++--- lib/spack/spack/test/directory_layout.py | 316 ++- lib/spack/spack/test/git_fetch.py | 138 +- lib/spack/spack/test/hg_fetch.py | 124 +- lib/spack/spack/test/install.py | 142 +- lib/spack/spack/test/mirror.py | 230 +- lib/spack/spack/test/mock_database.py | 108 - lib/spack/spack/test/mock_packages_test.py | 281 --- lib/spack/spack/test/mock_repo.py | 202 -- lib/spack/spack/test/modules.py | 446 ++-- lib/spack/spack/test/multimethod.py | 134 +- lib/spack/spack/test/optional_deps.py | 166 +- lib/spack/spack/test/packages.py | 185 +- lib/spack/spack/test/provider_index.py | 76 +- lib/spack/spack/test/spec_dag.py | 267 +-- lib/spack/spack/test/spec_semantics.py | 526 ++--- lib/spack/spack/test/spec_syntax.py | 144 +- lib/spack/spack/test/spec_yaml.py | 272 ++- lib/spack/spack/test/stage.py | 605 +++--- lib/spack/spack/test/svn_fetch.py | 135 +- lib/spack/spack/test/tally_plugin.py | 64 - lib/spack/spack/test/url_extrapolate.py | 7 +- pytest.ini | 5 + share/spack/qa/changed_files | 31 - share/spack/qa/run-unit-tests | 7 +- 156 files changed, 19218 insertions(+), 17619 deletions(-) create mode 100644 lib/spack/external/_pytest/AUTHORS create mode 100644 lib/spack/external/_pytest/LICENSE create mode 100644 lib/spack/external/_pytest/README.rst create mode 100644 lib/spack/external/_pytest/__init__.py create mode 100644 lib/spack/external/_pytest/_argcomplete.py create mode 100644 lib/spack/external/_pytest/_code/__init__.py create mode 100644 lib/spack/external/_pytest/_code/_py2traceback.py create mode 100644 lib/spack/external/_pytest/_code/code.py create mode 100644 lib/spack/external/_pytest/_code/source.py create mode 100644 lib/spack/external/_pytest/_pluggy.py create mode 100644 lib/spack/external/_pytest/assertion/__init__.py create mode 100644 lib/spack/external/_pytest/assertion/rewrite.py create mode 100644 lib/spack/external/_pytest/assertion/util.py create mode 100644 lib/spack/external/_pytest/cacheprovider.py create mode 100644 lib/spack/external/_pytest/capture.py create mode 100644 lib/spack/external/_pytest/compat.py create mode 100644 lib/spack/external/_pytest/config.py create mode 100644 lib/spack/external/_pytest/debugging.py create mode 100644 lib/spack/external/_pytest/deprecated.py create mode 100644 lib/spack/external/_pytest/doctest.py create mode 100644 lib/spack/external/_pytest/fixtures.py create mode 100644 lib/spack/external/_pytest/freeze_support.py create mode 100644 lib/spack/external/_pytest/helpconfig.py create mode 100644 lib/spack/external/_pytest/hookspec.py create mode 100644 lib/spack/external/_pytest/junitxml.py create mode 100644 lib/spack/external/_pytest/main.py create mode 100644 lib/spack/external/_pytest/mark.py create mode 100644 lib/spack/external/_pytest/monkeypatch.py create mode 100644 lib/spack/external/_pytest/nose.py create mode 100644 lib/spack/external/_pytest/pastebin.py create mode 100644 lib/spack/external/_pytest/pytester.py create mode 100644 lib/spack/external/_pytest/python.py create mode 100644 lib/spack/external/_pytest/recwarn.py create mode 100644 lib/spack/external/_pytest/resultlog.py create mode 100644 lib/spack/external/_pytest/runner.py create mode 100644 lib/spack/external/_pytest/setuponly.py create mode 100644 lib/spack/external/_pytest/setupplan.py create mode 100644 lib/spack/external/_pytest/skipping.py create mode 100644 lib/spack/external/_pytest/terminal.py create mode 100644 lib/spack/external/_pytest/tmpdir.py create mode 100644 lib/spack/external/_pytest/unittest.py create mode 100644 lib/spack/external/_pytest/vendored_packages/README.md create mode 100644 lib/spack/external/_pytest/vendored_packages/__init__.py create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy.py delete mode 100644 lib/spack/external/nose/LICENSE delete mode 100644 lib/spack/external/nose/__init__.py delete mode 100644 lib/spack/external/nose/__main__.py delete mode 100644 lib/spack/external/nose/case.py delete mode 100644 lib/spack/external/nose/commands.py delete mode 100644 lib/spack/external/nose/config.py delete mode 100644 lib/spack/external/nose/core.py delete mode 100644 lib/spack/external/nose/exc.py delete mode 100644 lib/spack/external/nose/ext/__init__.py delete mode 100644 lib/spack/external/nose/ext/dtcompat.py delete mode 100644 lib/spack/external/nose/failure.py delete mode 100644 lib/spack/external/nose/importer.py delete mode 100644 lib/spack/external/nose/inspector.py delete mode 100644 lib/spack/external/nose/loader.py delete mode 100644 lib/spack/external/nose/plugins/__init__.py delete mode 100644 lib/spack/external/nose/plugins/allmodules.py delete mode 100644 lib/spack/external/nose/plugins/attrib.py delete mode 100644 lib/spack/external/nose/plugins/base.py delete mode 100644 lib/spack/external/nose/plugins/builtin.py delete mode 100644 lib/spack/external/nose/plugins/capture.py delete mode 100644 lib/spack/external/nose/plugins/collect.py delete mode 100644 lib/spack/external/nose/plugins/cover.py delete mode 100644 lib/spack/external/nose/plugins/debug.py delete mode 100644 lib/spack/external/nose/plugins/deprecated.py delete mode 100644 lib/spack/external/nose/plugins/doctests.py delete mode 100644 lib/spack/external/nose/plugins/errorclass.py delete mode 100644 lib/spack/external/nose/plugins/failuredetail.py delete mode 100644 lib/spack/external/nose/plugins/isolate.py delete mode 100644 lib/spack/external/nose/plugins/logcapture.py delete mode 100644 lib/spack/external/nose/plugins/manager.py delete mode 100644 lib/spack/external/nose/plugins/multiprocess.py delete mode 100644 lib/spack/external/nose/plugins/plugintest.py delete mode 100644 lib/spack/external/nose/plugins/prof.py delete mode 100644 lib/spack/external/nose/plugins/skip.py delete mode 100644 lib/spack/external/nose/plugins/testid.py delete mode 100644 lib/spack/external/nose/plugins/xunit.py delete mode 100644 lib/spack/external/nose/proxy.py delete mode 100644 lib/spack/external/nose/pyversion.py delete mode 100644 lib/spack/external/nose/result.py delete mode 100644 lib/spack/external/nose/selector.py delete mode 100644 lib/spack/external/nose/sphinx/__init__.py delete mode 100644 lib/spack/external/nose/sphinx/pluginopts.py delete mode 100644 lib/spack/external/nose/suite.py delete mode 100644 lib/spack/external/nose/tools/__init__.py delete mode 100644 lib/spack/external/nose/tools/nontrivial.py delete mode 100644 lib/spack/external/nose/tools/trivial.py delete mode 100644 lib/spack/external/nose/twistedtools.py delete mode 100644 lib/spack/external/nose/usage.txt delete mode 100644 lib/spack/external/nose/util.py create mode 100644 lib/spack/external/pytest.py create mode 100644 lib/spack/spack/test/conftest.py create mode 100644 lib/spack/spack/test/data/compilers.yaml create mode 100644 lib/spack/spack/test/data/config.yaml create mode 100644 lib/spack/spack/test/data/packages.yaml delete mode 100644 lib/spack/spack/test/mock_database.py delete mode 100644 lib/spack/spack/test/mock_packages_test.py delete mode 100644 lib/spack/spack/test/mock_repo.py delete mode 100644 lib/spack/spack/test/tally_plugin.py create mode 100644 pytest.ini delete mode 100755 share/spack/qa/changed_files (limited to 'bin') diff --git a/.coveragerc b/.coveragerc index a1271a94fc..0201a4b502 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,6 +1,8 @@ # -*- conf -*- # .coveragerc to control coverage.py [run] +parallel = True +concurrency = multiprocessing branch = True source = lib omit = diff --git a/.gitignore b/.gitignore index a451f9e14e..1a95d49377 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,5 @@ .coverage #* .#* +/.cache +/bin/spackc diff --git a/.travis.yml b/.travis.yml index 9553a85771..17549e42ab 100644 --- a/.travis.yml +++ b/.travis.yml @@ -37,10 +37,10 @@ addons: # Install various dependencies install: - - pip install coveralls - - pip install flake8 - - pip install sphinx - - pip install mercurial + - pip install --upgrade coveralls + - pip install --upgrade flake8 + - pip install --upgrade sphinx + - pip install --upgrade mercurial before_script: # Need this for the git tests to succeed. diff --git a/bin/spack b/bin/spack index cc9450ade7..2ff55a486b 100755 --- a/bin/spack +++ b/bin/spack @@ -31,6 +31,7 @@ if (sys.version_info[0] > 2) or (sys.version_info[:2] < (2, 6)): "This is Python %d.%d.%d." % v_info) import os +import inspect # Find spack's location and its prefix. SPACK_FILE = os.path.realpath(os.path.expanduser(__file__)) @@ -129,6 +130,7 @@ parser.add_argument('-V', '--version', action='version', # subparser for setup. subparsers = parser.add_subparsers(metavar='SUBCOMMAND', dest="command") + import spack.cmd for cmd in spack.cmd.commands: module = spack.cmd.get_module(cmd) @@ -136,16 +138,8 @@ for cmd in spack.cmd.commands: subparser = subparsers.add_parser(cmd_name, help=module.description) module.setup_parser(subparser) -# Just print help and exit if run with no arguments at all -if len(sys.argv) == 1: - parser.print_help() - sys.exit(1) - -# actually parse the args. -args = parser.parse_args() - -def main(): +def _main(args, unknown_args): # Set up environment based on args. tty.set_verbose(args.verbose) tty.set_debug(args.debug) @@ -171,8 +165,21 @@ def main(): # Try to load the particular command asked for and run it command = spack.cmd.get_command(args.command.replace('-', '_')) + + # Allow commands to inject an optional argument and get unknown args + # if they want to handle them. + info = dict(inspect.getmembers(command)) + varnames = info['__code__'].co_varnames + argcount = info['__code__'].co_argcount + + # Actually execute the command try: - return_val = command(parser, args) + if argcount == 3 and varnames[2] == 'unknown_args': + return_val = command(parser, args, unknown_args) + else: + if unknown_args: + tty.die('unrecognized arguments: %s' % ' '.join(unknown_args)) + return_val = command(parser, args) except SpackError as e: e.die() except KeyboardInterrupt: @@ -188,11 +195,26 @@ def main(): tty.die("Bad return value from command %s: %s" % (args.command, return_val)) -if args.profile: - import cProfile - cProfile.run('main()', sort='time') -elif args.pdb: - import pdb - pdb.run('main()') -else: - main() + +def main(args): + # Just print help and exit if run with no arguments at all + if len(args) == 1: + parser.print_help() + sys.exit(1) + + # actually parse the args. + args, unknown = parser.parse_known_args() + + if args.profile: + import cProfile + cProfile.runctx('_main(args, unknown)', globals(), locals(), + sort='time') + elif args.pdb: + import pdb + pdb.runctx('_main(args, unknown)', globals(), locals()) + else: + _main(args, unknown) + + +if __name__ == '__main__': + main(sys.argv) diff --git a/lib/spack/docs/contribution_guide.rst b/lib/spack/docs/contribution_guide.rst index 49595fecf8..4abf97ef92 100644 --- a/lib/spack/docs/contribution_guide.rst +++ b/lib/spack/docs/contribution_guide.rst @@ -75,7 +75,7 @@ This allows you to develop iteratively: make a change, test that change, make another change, test that change, etc. To get a list of all available unit tests, run: -.. command-output:: spack test --list +.. command-output:: spack test --collect-only Unit tests are crucial to making sure bugs aren't introduced into Spack. If you are modifying core Spack libraries or adding new functionality, please consider diff --git a/lib/spack/external/__init__.py b/lib/spack/external/__init__.py index 88d39a7654..49886ae595 100644 --- a/lib/spack/external/__init__.py +++ b/lib/spack/external/__init__.py @@ -35,7 +35,7 @@ So far: jsonschema: An implementation of JSON Schema for Python. - nose: The nose testing framework. + pytest: Testing framework used by Spack. ordereddict: We include our own version to be Python 2.6 compatible. diff --git a/lib/spack/external/_pytest/AUTHORS b/lib/spack/external/_pytest/AUTHORS new file mode 100644 index 0000000000..8c7cb19cee --- /dev/null +++ b/lib/spack/external/_pytest/AUTHORS @@ -0,0 +1,141 @@ +Holger Krekel, holger at merlinux eu +merlinux GmbH, Germany, office at merlinux eu + +Contributors include:: + +Abdeali JK +Abhijeet Kasurde +Ahn Ki-Wook +Alexei Kozlenok +Anatoly Bubenkoff +Andreas Zeidler +Andrzej Ostrowski +Andy Freeland +Anthon van der Neut +Antony Lee +Armin Rigo +Aron Curzon +Aviv Palivoda +Ben Webb +Benjamin Peterson +Bernard Pratz +Bob Ippolito +Brian Dorsey +Brian Okken +Brianna Laugher +Bruno Oliveira +Cal Leeming +Carl Friedrich Bolz +Charles Cloud +Charnjit SiNGH (CCSJ) +Chris Lamb +Christian Boelsen +Christian Theunert +Christian Tismer +Christopher Gilling +Daniel Grana +Daniel Hahler +Daniel Nuri +Daniel Wandschneider +Danielle Jenkins +Dave Hunt +David Díaz-Barquero +David Mohr +David Vierra +Diego Russo +Dmitry Dygalo +Duncan Betts +Edison Gustavo Muenz +Edoardo Batini +Eduardo Schettino +Elizaveta Shashkova +Endre Galaczi +Eric Hunsberger +Eric Siegerman +Erik M. Bray +Feng Ma +Florian Bruhin +Floris Bruynooghe +Gabriel Reis +Georgy Dyuldin +Graham Horler +Greg Price +Grig Gheorghiu +Grigorii Eremeev (budulianin) +Guido Wesdorp +Harald Armin Massa +Ian Bicking +Jaap Broekhuizen +Jan Balster +Janne Vanhala +Jason R. Coombs +Javier Domingo Cansino +Javier Romero +John Towler +Jon Sonesen +Jordan Guymon +Joshua Bronson +Jurko Gospodnetić +Justyna Janczyszyn +Kale Kundert +Katarzyna Jachim +Kevin Cox +Lee Kamentsky +Lev Maximov +Lukas Bednar +Luke Murphy +Maciek Fijalkowski +Maho +Marc Schlaich +Marcin Bachry +Mark Abramowitz +Markus Unterwaditzer +Martijn Faassen +Martin K. Scherer +Martin Prusse +Mathieu Clabaut +Matt Bachmann +Matt Williams +Matthias Hafner +mbyt +Michael Aquilina +Michael Birtwell +Michael Droettboom +Michael Seifert +Mike Lundy +Ned Batchelder +Neven Mundar +Nicolas Delaby +Oleg Pidsadnyi +Oliver Bestwalter +Omar Kohl +Pieter Mulder +Piotr Banaszkiewicz +Punyashloka Biswal +Quentin Pradet +Ralf Schmitt +Raphael Pierzina +Raquel Alegre +Roberto Polli +Romain Dorgueil +Roman Bolshakov +Ronny Pfannschmidt +Ross Lawley +Russel Winder +Ryan Wooden +Samuele Pedroni +Simon Gomizelj +Stefan Farmbauer +Stefan Zimmermann +Stefano Taschini +Steffen Allner +Stephan Obermann +Tareq Alayan +Ted Xiao +Thomas Grainger +Tom Viner +Trevor Bekolay +Tyler Goodlet +Vasily Kuznetsov +Wouter van Ackooy +Xuecong Liao diff --git a/lib/spack/external/_pytest/LICENSE b/lib/spack/external/_pytest/LICENSE new file mode 100644 index 0000000000..9e27bd7841 --- /dev/null +++ b/lib/spack/external/_pytest/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2004-2016 Holger Krekel and others + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/lib/spack/external/_pytest/README.rst b/lib/spack/external/_pytest/README.rst new file mode 100644 index 0000000000..d5650af655 --- /dev/null +++ b/lib/spack/external/_pytest/README.rst @@ -0,0 +1,102 @@ +.. image:: http://docs.pytest.org/en/latest/_static/pytest1.png + :target: http://docs.pytest.org + :align: center + :alt: pytest + +------ + +.. image:: https://img.shields.io/pypi/v/pytest.svg + :target: https://pypi.python.org/pypi/pytest +.. image:: https://img.shields.io/pypi/pyversions/pytest.svg + :target: https://pypi.python.org/pypi/pytest +.. image:: https://img.shields.io/coveralls/pytest-dev/pytest/master.svg + :target: https://coveralls.io/r/pytest-dev/pytest +.. image:: https://travis-ci.org/pytest-dev/pytest.svg?branch=master + :target: https://travis-ci.org/pytest-dev/pytest +.. image:: https://ci.appveyor.com/api/projects/status/mrgbjaua7t33pg6b?svg=true + :target: https://ci.appveyor.com/project/pytestbot/pytest + +The ``pytest`` framework makes it easy to write small tests, yet +scales to support complex functional testing for applications and libraries. + +An example of a simple test: + +.. code-block:: python + + # content of test_sample.py + def inc(x): + return x + 1 + + def test_answer(): + assert inc(3) == 5 + + +To execute it:: + + $ pytest + ============================= test session starts ============================= + collected 1 items + + test_sample.py F + + ================================== FAILURES =================================== + _________________________________ test_answer _________________________________ + + def test_answer(): + > assert inc(3) == 5 + E assert 4 == 5 + E + where 4 = inc(3) + + test_sample.py:5: AssertionError + ========================== 1 failed in 0.04 seconds =========================== + + +Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started `_ for more examples. + + +Features +-------- + +- Detailed info on failing `assert statements `_ (no need to remember ``self.assert*`` names); + +- `Auto-discovery + `_ + of test modules and functions; + +- `Modular fixtures `_ for + managing small or parametrized long-lived test resources; + +- Can run `unittest `_ (or trial), + `nose `_ test suites out of the box; + +- Python2.6+, Python3.3+, PyPy-2.3, Jython-2.5 (untested); + +- Rich plugin architecture, with over 150+ `external plugins `_ and thriving community; + + +Documentation +------------- + +For full documentation, including installation, tutorials and PDF documents, please see http://docs.pytest.org. + + +Bugs/Requests +------------- + +Please use the `GitHub issue tracker `_ to submit bugs or request features. + + +Changelog +--------- + +Consult the `Changelog `__ page for fixes and enhancements of each version. + + +License +------- + +Copyright Holger Krekel and others, 2004-2016. + +Distributed under the terms of the `MIT`_ license, pytest is free and open source software. + +.. _`MIT`: https://github.com/pytest-dev/pytest/blob/master/LICENSE diff --git a/lib/spack/external/_pytest/__init__.py b/lib/spack/external/_pytest/__init__.py new file mode 100644 index 0000000000..be20d3d41c --- /dev/null +++ b/lib/spack/external/_pytest/__init__.py @@ -0,0 +1,2 @@ +# +__version__ = '3.0.5' diff --git a/lib/spack/external/_pytest/_argcomplete.py b/lib/spack/external/_pytest/_argcomplete.py new file mode 100644 index 0000000000..3ab679d8be --- /dev/null +++ b/lib/spack/external/_pytest/_argcomplete.py @@ -0,0 +1,102 @@ + +"""allow bash-completion for argparse with argcomplete if installed +needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code. + +argcomplete does not support python 2.5 (although the changes for that +are minor). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*' + ).completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh ) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK + +INSTALL/DEBUGGING +================= +To include this support in another application that has setup.py generated +scripts: +- add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point +- include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + , call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument() +If things do not work right away: +- switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 +- run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not +- sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" + +import sys +import os +from glob import glob + +class FastFilesCompleter: + 'Fast file completer class' + def __init__(self, directories=True): + self.directories = directories + + def __call__(self, prefix, **kwargs): + """only called on non option completions""" + if os.path.sep in prefix[1:]: # + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if '*' not in prefix and '?' not in prefix: + if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash + globbed.extend(glob(prefix + '.*')) + prefix += '*' + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += '/' + # append stripping the prefix (like bash, not like compgen) + completion.append(x[prefix_dir:]) + return completion + + +if os.environ.get('_ARGCOMPLETE'): + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter = FastFilesCompleter() + + def try_argcomplete(parser): + argcomplete.autocomplete(parser) +else: + def try_argcomplete(parser): pass + filescompleter = None diff --git a/lib/spack/external/_pytest/_code/__init__.py b/lib/spack/external/_pytest/_code/__init__.py new file mode 100644 index 0000000000..3463c11eac --- /dev/null +++ b/lib/spack/external/_pytest/_code/__init__.py @@ -0,0 +1,9 @@ +""" python inspection/code generation API """ +from .code import Code # noqa +from .code import ExceptionInfo # noqa +from .code import Frame # noqa +from .code import Traceback # noqa +from .code import getrawcode # noqa +from .source import Source # noqa +from .source import compile_ as compile # noqa +from .source import getfslineno # noqa diff --git a/lib/spack/external/_pytest/_code/_py2traceback.py b/lib/spack/external/_pytest/_code/_py2traceback.py new file mode 100644 index 0000000000..a830d9899a --- /dev/null +++ b/lib/spack/external/_pytest/_code/_py2traceback.py @@ -0,0 +1,81 @@ +# copied from python-2.7.3's traceback.py +# CHANGES: +# - some_str is replaced, trying to create unicode strings +# +import types + +def format_exception_only(etype, value): + """Format the exception part of a traceback. + + The arguments are the exception type and value such as given by + sys.last_type and sys.last_value. The return value is a list of + strings, each ending in a newline. + + Normally, the list contains a single string; however, for + SyntaxError exceptions, it contains several lines that (when + printed) display detailed information about where the syntax + error occurred. + + The message indicating which exception occurred is always the last + string in the list. + + """ + + # An instance should not have a meaningful value parameter, but + # sometimes does, particularly for string exceptions, such as + # >>> raise string1, string2 # deprecated + # + # Clear these out first because issubtype(string1, SyntaxError) + # would throw another exception and mask the original problem. + if (isinstance(etype, BaseException) or + isinstance(etype, types.InstanceType) or + etype is None or type(etype) is str): + return [_format_final_exc_line(etype, value)] + + stype = etype.__name__ + + if not issubclass(etype, SyntaxError): + return [_format_final_exc_line(stype, value)] + + # It was a syntax error; show exactly where the problem was found. + lines = [] + try: + msg, (filename, lineno, offset, badline) = value.args + except Exception: + pass + else: + filename = filename or "" + lines.append(' File "%s", line %d\n' % (filename, lineno)) + if badline is not None: + if isinstance(badline, bytes): # python 2 only + badline = badline.decode('utf-8', 'replace') + lines.append(u' %s\n' % badline.strip()) + if offset is not None: + caretspace = badline.rstrip('\n')[:offset].lstrip() + # non-space whitespace (likes tabs) must be kept for alignment + caretspace = ((c.isspace() and c or ' ') for c in caretspace) + # only three spaces to account for offset1 == pos 0 + lines.append(' %s^\n' % ''.join(caretspace)) + value = msg + + lines.append(_format_final_exc_line(stype, value)) + return lines + +def _format_final_exc_line(etype, value): + """Return a list of a single line -- normal case for format_exception_only""" + valuestr = _some_str(value) + if value is None or not valuestr: + line = "%s\n" % etype + else: + line = "%s: %s\n" % (etype, valuestr) + return line + +def _some_str(value): + try: + return unicode(value) + except Exception: + try: + return str(value) + except Exception: + pass + return '' % type(value).__name__ diff --git a/lib/spack/external/_pytest/_code/code.py b/lib/spack/external/_pytest/_code/code.py new file mode 100644 index 0000000000..616d5c4313 --- /dev/null +++ b/lib/spack/external/_pytest/_code/code.py @@ -0,0 +1,861 @@ +import sys +from inspect import CO_VARARGS, CO_VARKEYWORDS +import re +from weakref import ref + +import py +builtin_repr = repr + +reprlib = py.builtin._tryimport('repr', 'reprlib') + +if sys.version_info[0] >= 3: + from traceback import format_exception_only +else: + from ._py2traceback import format_exception_only + + +class Code(object): + """ wrapper around Python code objects """ + def __init__(self, rawcode): + if not hasattr(rawcode, "co_filename"): + rawcode = getrawcode(rawcode) + try: + self.filename = rawcode.co_filename + self.firstlineno = rawcode.co_firstlineno - 1 + self.name = rawcode.co_name + except AttributeError: + raise TypeError("not a code object: %r" %(rawcode,)) + self.raw = rawcode + + def __eq__(self, other): + return self.raw == other.raw + + __hash__ = None + + def __ne__(self, other): + return not self == other + + @property + def path(self): + """ return a path object pointing to source code (note that it + might not point to an actually existing file). """ + try: + p = py.path.local(self.raw.co_filename) + # maybe don't try this checking + if not p.check(): + raise OSError("py.path check failed.") + except OSError: + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + p = self.raw.co_filename + + return p + + @property + def fullsource(self): + """ return a _pytest._code.Source object for the full source file of the code + """ + from _pytest._code import source + full, _ = source.findsource(self.raw) + return full + + def source(self): + """ return a _pytest._code.Source object for the code object's source only + """ + # return source only for that part of code + import _pytest._code + return _pytest._code.Source(self.raw) + + def getargs(self, var=False): + """ return a tuple with the argument names for the code object + + if 'var' is set True also return the names of the variable and + keyword arguments when present + """ + # handfull shortcut for getting args + raw = self.raw + argcount = raw.co_argcount + if var: + argcount += raw.co_flags & CO_VARARGS + argcount += raw.co_flags & CO_VARKEYWORDS + return raw.co_varnames[:argcount] + +class Frame(object): + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + def __init__(self, frame): + self.lineno = frame.f_lineno - 1 + self.f_globals = frame.f_globals + self.f_locals = frame.f_locals + self.raw = frame + self.code = Code(frame.f_code) + + @property + def statement(self): + """ statement this frame is at """ + import _pytest._code + if self.code.fullsource is None: + return _pytest._code.Source("") + return self.code.fullsource.getstatement(self.lineno) + + def eval(self, code, **vars): + """ evaluate 'code' in the frame + + 'vars' are optional additional local variables + + returns the result of the evaluation + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def exec_(self, code, **vars): + """ exec 'code' in the frame + + 'vars' are optiona; additional local variables + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + py.builtin.exec_(code, self.f_globals, f_locals ) + + def repr(self, object): + """ return a 'safe' (non-recursive, one-line) string repr for 'object' + """ + return py.io.saferepr(object) + + def is_true(self, object): + return object + + def getargs(self, var=False): + """ return a list of tuples (name, value) for all arguments + + if 'var' is set True also include the variable and keyword + arguments when present + """ + retval = [] + for arg in self.code.getargs(var): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + +class TracebackEntry(object): + """ a single entry in a traceback """ + + _repr_style = None + exprinfo = None + + def __init__(self, rawentry, excinfo=None): + self._excinfo = excinfo + self._rawentry = rawentry + self.lineno = rawentry.tb_lineno - 1 + + def set_repr_style(self, mode): + assert mode in ("short", "long") + self._repr_style = mode + + @property + def frame(self): + import _pytest._code + return _pytest._code.Frame(self._rawentry.tb_frame) + + @property + def relline(self): + return self.lineno - self.frame.code.firstlineno + + def __repr__(self): + return "" %(self.frame.code.path, self.lineno+1) + + @property + def statement(self): + """ _pytest._code.Source object for the current statement """ + source = self.frame.code.fullsource + return source.getstatement(self.lineno) + + @property + def path(self): + """ path to the source code """ + return self.frame.code.path + + def getlocals(self): + return self.frame.f_locals + locals = property(getlocals, None, None, "locals of underlaying frame") + + def getfirstlinesource(self): + # on Jython this firstlineno can be -1 apparently + return max(self.frame.code.firstlineno, 0) + + def getsource(self, astcache=None): + """ return failing source code. """ + # we use the passed in astcache to not reparse asttrees + # within exception info printing + from _pytest._code.source import getstatementrange_ast + source = self.frame.code.fullsource + if source is None: + return None + key = astnode = None + if astcache is not None: + key = self.frame.code.path + if key is not None: + astnode = astcache.get(key, None) + start = self.getfirstlinesource() + try: + astnode, _, end = getstatementrange_ast(self.lineno, source, + astnode=astnode) + except SyntaxError: + end = self.lineno + 1 + else: + if key is not None: + astcache[key] = astnode + return source[start:end] + + source = property(getsource) + + def ishidden(self): + """ return True if the current frame has a var __tracebackhide__ + resolving to True + + If __tracebackhide__ is a callable, it gets called with the + ExceptionInfo instance and can decide whether to hide the traceback. + + mostly for internal use + """ + try: + tbh = self.frame.f_locals['__tracebackhide__'] + except KeyError: + try: + tbh = self.frame.f_globals['__tracebackhide__'] + except KeyError: + return False + + if py.builtin.callable(tbh): + return tbh(None if self._excinfo is None else self._excinfo()) + else: + return tbh + + def __str__(self): + try: + fn = str(self.path) + except py.error.Error: + fn = '???' + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except: + line = "???" + return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line) + + def name(self): + return self.frame.code.raw.co_name + name = property(name, None, None, "co_name of underlaying code") + +class Traceback(list): + """ Traceback objects encapsulate and offer higher level + access to Traceback entries. + """ + Entry = TracebackEntry + def __init__(self, tb, excinfo=None): + """ initialize from given python traceback object and ExceptionInfo """ + self._excinfo = excinfo + if hasattr(tb, 'tb_next'): + def f(cur): + while cur is not None: + yield self.Entry(cur, excinfo=excinfo) + cur = cur.tb_next + list.__init__(self, f(tb)) + else: + list.__init__(self, tb) + + def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None): + """ return a Traceback instance wrapping part of this Traceback + + by provding any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined + + this allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback) + """ + for x in self: + code = x.frame.code + codepath = code.path + if ((path is None or codepath == path) and + (excludepath is None or not hasattr(codepath, 'relto') or + not codepath.relto(excludepath)) and + (lineno is None or x.lineno == lineno) and + (firstlineno is None or x.frame.code.firstlineno == firstlineno)): + return Traceback(x._rawentry, self._excinfo) + return self + + def __getitem__(self, key): + val = super(Traceback, self).__getitem__(key) + if isinstance(key, type(slice(0))): + val = self.__class__(val) + return val + + def filter(self, fn=lambda x: not x.ishidden()): + """ return a Traceback instance with certain items removed + + fn is a function that gets a single argument, a TracebackEntry + instance, and should return True when the item should be added + to the Traceback, False when not + + by default this removes all the TracebackEntries which are hidden + (see ishidden() above) + """ + return Traceback(filter(fn, self), self._excinfo) + + def getcrashentry(self): + """ return last non-hidden traceback entry that lead + to the exception of a traceback. + """ + for i in range(-1, -len(self)-1, -1): + entry = self[i] + if not entry.ishidden(): + return entry + return self[-1] + + def recursionindex(self): + """ return the index of the frame/TracebackEntry where recursion + originates if appropriate, None if no recursion occurred + """ + cache = {} + for i, entry in enumerate(self): + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + #XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno + #print "checking for recursion at", key + l = cache.setdefault(key, []) + if l: + f = entry.frame + loc = f.f_locals + for otherloc in l: + if f.is_true(f.eval(co_equal, + __recursioncache_locals_1=loc, + __recursioncache_locals_2=otherloc)): + return i + l.append(entry.frame.f_locals) + return None + + +co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2', + '?', 'eval') + +class ExceptionInfo(object): + """ wraps sys.exc_info() objects and offers + help for navigating the traceback. + """ + _striptext = '' + def __init__(self, tup=None, exprinfo=None): + import _pytest._code + if tup is None: + tup = sys.exc_info() + if exprinfo is None and isinstance(tup[1], AssertionError): + exprinfo = getattr(tup[1], 'msg', None) + if exprinfo is None: + exprinfo = py._builtin._totext(tup[1]) + if exprinfo and exprinfo.startswith('assert '): + self._striptext = 'AssertionError: ' + self._excinfo = tup + #: the exception class + self.type = tup[0] + #: the exception instance + self.value = tup[1] + #: the exception raw traceback + self.tb = tup[2] + #: the exception type name + self.typename = self.type.__name__ + #: the exception traceback (_pytest._code.Traceback instance) + self.traceback = _pytest._code.Traceback(self.tb, excinfo=ref(self)) + + def __repr__(self): + return "" % (self.typename, len(self.traceback)) + + def exconly(self, tryshort=False): + """ return the exception as a string + + when 'tryshort' resolves to True, and the exception is a + _pytest._code._AssertionError, only the actual exception part of + the exception representation is returned (so 'AssertionError: ' is + removed from the beginning) + """ + lines = format_exception_only(self.type, self.value) + text = ''.join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext):] + return text + + def errisinstance(self, exc): + """ return True if the exception is an instance of exc """ + return isinstance(self.value, exc) + + def _getreprcrash(self): + exconly = self.exconly(tryshort=True) + entry = self.traceback.getcrashentry() + path, lineno = entry.frame.code.raw.co_filename, entry.lineno + return ReprFileLocation(path, lineno+1, exconly) + + def getrepr(self, showlocals=False, style="long", + abspath=False, tbfilter=True, funcargs=False): + """ return str()able representation of this exception info. + showlocals: show locals per traceback entry + style: long|short|no|native traceback style + tbfilter: hide entries (where __tracebackhide__ is true) + + in case of style==native, tbfilter and showlocals is ignored. + """ + if style == 'native': + return ReprExceptionInfo(ReprTracebackNative( + py.std.traceback.format_exception( + self.type, + self.value, + self.traceback[0]._rawentry, + )), self._getreprcrash()) + + fmt = FormattedExcinfo(showlocals=showlocals, style=style, + abspath=abspath, tbfilter=tbfilter, funcargs=funcargs) + return fmt.repr_excinfo(self) + + def __str__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return str(loc) + + def __unicode__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return unicode(loc) + + def match(self, regexp): + """ + Match the regular expression 'regexp' on the string representation of + the exception. If it matches then True is returned (so that it is + possible to write 'assert excinfo.match()'). If it doesn't match an + AssertionError is raised. + """ + __tracebackhide__ = True + if not re.search(regexp, str(self.value)): + assert 0, "Pattern '{0!s}' not found in '{1!s}'".format( + regexp, self.value) + return True + + +class FormattedExcinfo(object): + """ presenting information about failing Functions and Generators. """ + # for traceback entries + flow_marker = ">" + fail_marker = "E" + + def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False): + self.showlocals = showlocals + self.style = style + self.tbfilter = tbfilter + self.funcargs = funcargs + self.abspath = abspath + self.astcache = {} + + def _getindent(self, source): + # figure out indent for given source + try: + s = str(source.getstatement(len(source)-1)) + except KeyboardInterrupt: + raise + except: + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except: + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry): + source = entry.getsource(self.astcache) + if source is not None: + source = source.deindent() + return source + + def _saferepr(self, obj): + return py.io.saferepr(obj) + + def repr_args(self, entry): + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(var=True): + args.append((argname, self._saferepr(argvalue))) + return ReprFuncArgs(args) + + def get_source(self, source, line_index=-1, excinfo=None, short=False): + """ return formatted and marked up source lines. """ + import _pytest._code + lines = [] + if source is None or line_index >= len(source.lines): + source = _pytest._code.Source("???") + line_index = 0 + if line_index < 0: + line_index += len(source) + space_prefix = " " + if short: + lines.append(space_prefix + source.lines[line_index].strip()) + else: + for line in source.lines[:line_index]: + lines.append(space_prefix + line) + lines.append(self.flow_marker + " " + source.lines[line_index]) + for line in source.lines[line_index+1:]: + lines.append(space_prefix + line) + if excinfo is not None: + indent = 4 if short else self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_exconly(self, excinfo, indent=4, markall=False): + lines = [] + indent = " " * indent + # get the real exception information out + exlines = excinfo.exconly(tryshort=True).split('\n') + failindent = self.fail_marker + indent[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indent + return lines + + def repr_locals(self, locals): + if self.showlocals: + lines = [] + keys = [loc for loc in locals if loc[0] != "@"] + keys.sort() + for name in keys: + value = locals[name] + if name == '__builtins__': + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only reprlib.Repr in + # disguise, so is very configurable. + str_repr = self._saferepr(value) + #if len(str_repr) < 70 or not isinstance(value, + # (list, tuple, dict)): + lines.append("%-10s = %s" %(name, str_repr)) + #else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # py.std.pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + + def repr_traceback_entry(self, entry, excinfo=None): + import _pytest._code + source = self._getentrysource(entry) + if source is None: + source = _pytest._code.Source("???") + line_index = 0 + else: + # entry.getfirstlinesource() can be -1, should be 0 on jython + line_index = entry.lineno - max(entry.getfirstlinesource(), 0) + + lines = [] + style = entry._repr_style + if style is None: + style = self.style + if style in ("short", "long"): + short = style == "short" + reprargs = self.repr_args(entry) if not short else None + s = self.get_source(source, line_index, excinfo, short=short) + lines.extend(s) + if short: + message = "in %s" %(entry.name) + else: + message = excinfo and excinfo.typename or "" + path = self._makepath(entry.path) + filelocrepr = ReprFileLocation(path, entry.lineno+1, message) + localsrepr = None + if not short: + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style) + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None, style) + + def _makepath(self, path): + if not self.abspath: + try: + np = py.path.local().bestrelpath(path) + except OSError: + return path + if len(np) < len(str(path)): + path = np + return path + + def repr_traceback(self, excinfo): + traceback = excinfo.traceback + if self.tbfilter: + traceback = traceback.filter() + recursionindex = None + if is_recursion_error(excinfo): + recursionindex = traceback.recursionindex() + last = traceback[-1] + entries = [] + extraline = None + for index, entry in enumerate(traceback): + einfo = (last == entry) and excinfo or None + reprentry = self.repr_traceback_entry(entry, einfo) + entries.append(reprentry) + if index == recursionindex: + extraline = "!!! Recursion detected (same locals & position)" + break + return ReprTraceback(entries, extraline, style=self.style) + + + def repr_excinfo(self, excinfo): + if sys.version_info[0] < 3: + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() + + return ReprExceptionInfo(reprtraceback, reprcrash) + else: + repr_chain = [] + e = excinfo.value + descr = None + while e is not None: + if excinfo: + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() + else: + # fallback to native repr if the exception doesn't have a traceback: + # ExceptionInfo objects require a full traceback to work + reprtraceback = ReprTracebackNative(py.std.traceback.format_exception(type(e), e, None)) + reprcrash = None + + repr_chain += [(reprtraceback, reprcrash, descr)] + if e.__cause__ is not None: + e = e.__cause__ + excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None + descr = 'The above exception was the direct cause of the following exception:' + elif e.__context__ is not None: + e = e.__context__ + excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None + descr = 'During handling of the above exception, another exception occurred:' + else: + e = None + repr_chain.reverse() + return ExceptionChainRepr(repr_chain) + + +class TerminalRepr(object): + def __str__(self): + s = self.__unicode__() + if sys.version_info[0] < 3: + s = s.encode('utf-8') + return s + + def __unicode__(self): + # FYI this is called from pytest-xdist's serialization of exception + # information. + io = py.io.TextIO() + tw = py.io.TerminalWriter(file=io) + self.toterminal(tw) + return io.getvalue().strip() + + def __repr__(self): + return "<%s instance at %0x>" %(self.__class__, id(self)) + + +class ExceptionRepr(TerminalRepr): + def __init__(self): + self.sections = [] + + def addsection(self, name, content, sep="-"): + self.sections.append((name, content, sep)) + + def toterminal(self, tw): + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + + +class ExceptionChainRepr(ExceptionRepr): + def __init__(self, chain): + super(ExceptionChainRepr, self).__init__() + self.chain = chain + # reprcrash and reprtraceback of the outermost (the newest) exception + # in the chain + self.reprtraceback = chain[-1][0] + self.reprcrash = chain[-1][1] + + def toterminal(self, tw): + for element in self.chain: + element[0].toterminal(tw) + if element[2] is not None: + tw.line("") + tw.line(element[2], yellow=True) + super(ExceptionChainRepr, self).toterminal(tw) + + +class ReprExceptionInfo(ExceptionRepr): + def __init__(self, reprtraceback, reprcrash): + super(ReprExceptionInfo, self).__init__() + self.reprtraceback = reprtraceback + self.reprcrash = reprcrash + + def toterminal(self, tw): + self.reprtraceback.toterminal(tw) + super(ReprExceptionInfo, self).toterminal(tw) + +class ReprTraceback(TerminalRepr): + entrysep = "_ " + + def __init__(self, reprentries, extraline, style): + self.reprentries = reprentries + self.extraline = extraline + self.style = style + + def toterminal(self, tw): + # the entries might have different styles + for i, entry in enumerate(self.reprentries): + if entry.style == "long": + tw.line("") + entry.toterminal(tw) + if i < len(self.reprentries) - 1: + next_entry = self.reprentries[i+1] + if entry.style == "long" or \ + entry.style == "short" and next_entry.style == "long": + tw.sep(self.entrysep) + + if self.extraline: + tw.line(self.extraline) + +class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines): + self.style = "native" + self.reprentries = [ReprEntryNative(tblines)] + self.extraline = None + +class ReprEntryNative(TerminalRepr): + style = "native" + + def __init__(self, tblines): + self.lines = tblines + + def toterminal(self, tw): + tw.write("".join(self.lines)) + +class ReprEntry(TerminalRepr): + localssep = "_ " + + def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style): + self.lines = lines + self.reprfuncargs = reprfuncargs + self.reprlocals = reprlocals + self.reprfileloc = filelocrepr + self.style = style + + def toterminal(self, tw): + if self.style == "short": + self.reprfileloc.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + #tw.line("") + return + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + if self.reprlocals: + #tw.sep(self.localssep, "Locals") + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + if self.lines: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self): + return "%s\n%s\n%s" % ("\n".join(self.lines), + self.reprlocals, + self.reprfileloc) + +class ReprFileLocation(TerminalRepr): + def __init__(self, path, lineno, message): + self.path = str(path) + self.lineno = lineno + self.message = message + + def toterminal(self, tw): + # filename and lineno output for each entry, + # using an output format that most editors unterstand + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.write(self.path, bold=True, red=True) + tw.line(":%s: %s" % (self.lineno, msg)) + +class ReprLocals(TerminalRepr): + def __init__(self, lines): + self.lines = lines + + def toterminal(self, tw): + for line in self.lines: + tw.line(line) + +class ReprFuncArgs(TerminalRepr): + def __init__(self, args): + self.args = args + + def toterminal(self, tw): + if self.args: + linesofar = "" + for name, value in self.args: + ns = "%s = %s" %(name, value) + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + +def getrawcode(obj, trycall=True): + """ return code object for given function. """ + try: + return obj.__code__ + except AttributeError: + obj = getattr(obj, 'im_func', obj) + obj = getattr(obj, 'func_code', obj) + obj = getattr(obj, 'f_code', obj) + obj = getattr(obj, '__code__', obj) + if trycall and not hasattr(obj, 'co_firstlineno'): + if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj): + x = getrawcode(obj.__call__, trycall=False) + if hasattr(x, 'co_firstlineno'): + return x + return obj + + +if sys.version_info[:2] >= (3, 5): # RecursionError introduced in 3.5 + def is_recursion_error(excinfo): + return excinfo.errisinstance(RecursionError) # noqa +else: + def is_recursion_error(excinfo): + if not excinfo.errisinstance(RuntimeError): + return False + try: + return "maximum recursion depth exceeded" in str(excinfo.value) + except UnicodeError: + return False diff --git a/lib/spack/external/_pytest/_code/source.py b/lib/spack/external/_pytest/_code/source.py new file mode 100644 index 0000000000..fcec0f5ca7 --- /dev/null +++ b/lib/spack/external/_pytest/_code/source.py @@ -0,0 +1,414 @@ +from __future__ import generators + +from bisect import bisect_right +import sys +import inspect, tokenize +import py +cpy_compile = compile + +try: + import _ast + from _ast import PyCF_ONLY_AST as _AST_FLAG +except ImportError: + _AST_FLAG = 0 + _ast = None + + +class Source(object): + """ a immutable object holding a source code fragment, + possibly deindenting it. + """ + _compilecounter = 0 + def __init__(self, *parts, **kwargs): + self.lines = lines = [] + de = kwargs.get('deindent', True) + rstrip = kwargs.get('rstrip', True) + for part in parts: + if not part: + partlines = [] + if isinstance(part, Source): + partlines = part.lines + elif isinstance(part, (tuple, list)): + partlines = [x.rstrip("\n") for x in part] + elif isinstance(part, py.builtin._basestring): + partlines = part.split('\n') + if rstrip: + while partlines: + if partlines[-1].strip(): + break + partlines.pop() + else: + partlines = getsource(part, deindent=de).lines + if de: + partlines = deindent(partlines) + lines.extend(partlines) + + def __eq__(self, other): + try: + return self.lines == other.lines + except AttributeError: + if isinstance(other, str): + return str(self) == other + return False + + __hash__ = None + + def __getitem__(self, key): + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + newsource = Source() + newsource.lines = self.lines[key.start:key.stop] + return newsource + + def __len__(self): + return len(self.lines) + + def strip(self): + """ return new source object with trailing + and leading blank lines removed. + """ + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end-1].strip(): + end -= 1 + source = Source() + source.lines[:] = self.lines[start:end] + return source + + def putaround(self, before='', after='', indent=' ' * 4): + """ return a copy of the source object with + 'before' and 'after' wrapped around it. + """ + before = Source(before) + after = Source(after) + newsource = Source() + lines = [ (indent + line) for line in self.lines] + newsource.lines = before.lines + lines + after.lines + return newsource + + def indent(self, indent=' ' * 4): + """ return a copy of the source object with + all lines indented by the given indent-string. + """ + newsource = Source() + newsource.lines = [(indent+line) for line in self.lines] + return newsource + + def getstatement(self, lineno, assertion=False): + """ return Source statement which contains the + given linenumber (counted from 0). + """ + start, end = self.getstatementrange(lineno, assertion) + return self[start:end] + + def getstatementrange(self, lineno, assertion=False): + """ return (start, end) tuple which spans the minimal + statement region which containing the given lineno. + """ + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + ast, start, end = getstatementrange_ast(lineno, self) + return start, end + + def deindent(self, offset=None): + """ return a new source object deindented by offset. + If offset is None then guess an indentation offset from + the first non-blank line. Subsequent lines which have a + lower indentation offset will be copied verbatim as + they are assumed to be part of multilines. + """ + # XXX maybe use the tokenizer to properly handle multiline + # strings etc.pp? + newsource = Source() + newsource.lines[:] = deindent(self.lines, offset) + return newsource + + def isparseable(self, deindent=True): + """ return True if source is parseable, heuristically + deindenting it by default. + """ + try: + import parser + except ImportError: + syntax_checker = lambda x: compile(x, 'asd', 'exec') + else: + syntax_checker = parser.suite + + if deindent: + source = str(self.deindent()) + else: + source = str(self) + try: + #compile(source+'\n', "x", "exec") + syntax_checker(source+'\n') + except KeyboardInterrupt: + raise + except Exception: + return False + else: + return True + + def __str__(self): + return "\n".join(self.lines) + + def compile(self, filename=None, mode='exec', + flag=generators.compiler_flag, + dont_inherit=0, _genframe=None): + """ return compiled code object. if filename is None + invent an artificial filename which displays + the source/line position of the caller frame. + """ + if not filename or py.path.local(filename).check(file=0): + if _genframe is None: + _genframe = sys._getframe(1) # the caller + fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno + base = "<%d-codegen " % self._compilecounter + self.__class__._compilecounter += 1 + if not filename: + filename = base + '%s:%d>' % (fn, lineno) + else: + filename = base + '%r %s:%d>' % (filename, fn, lineno) + source = "\n".join(self.lines) + '\n' + try: + co = cpy_compile(source, filename, mode, flag) + except SyntaxError: + ex = sys.exc_info()[1] + # re-represent syntax errors from parsing python strings + msglines = self.lines[:ex.lineno] + if ex.offset: + msglines.append(" "*ex.offset + '^') + msglines.append("(code was compiled probably from here: %s)" % filename) + newex = SyntaxError('\n'.join(msglines)) + newex.offset = ex.offset + newex.lineno = ex.lineno + newex.text = ex.text + raise newex + else: + if flag & _AST_FLAG: + return co + lines = [(x + "\n") for x in self.lines] + py.std.linecache.cache[filename] = (1, None, lines, filename) + return co + +# +# public API shortcut functions +# + +def compile_(source, filename=None, mode='exec', flags= + generators.compiler_flag, dont_inherit=0): + """ compile the given source to a raw code object, + and maintain an internal cache which allows later + retrieval of the source code for the code object + and any recursively created code objects. + """ + if _ast is not None and isinstance(source, _ast.AST): + # XXX should Source support having AST? + return cpy_compile(source, filename, mode, flags, dont_inherit) + _genframe = sys._getframe(1) # the caller + s = Source(source) + co = s.compile(filename, mode, flags, _genframe=_genframe) + return co + + +def getfslineno(obj): + """ Return source location (path, lineno) for the given object. + If the source cannot be determined return ("", -1) + """ + import _pytest._code + try: + code = _pytest._code.Code(obj) + except TypeError: + try: + fn = (py.std.inspect.getsourcefile(obj) or + py.std.inspect.getfile(obj)) + except TypeError: + return "", -1 + + fspath = fn and py.path.local(fn) or None + lineno = -1 + if fspath: + try: + _, lineno = findsource(obj) + except IOError: + pass + else: + fspath = code.path + lineno = code.firstlineno + assert isinstance(lineno, int) + return fspath, lineno + +# +# helper functions +# + +def findsource(obj): + try: + sourcelines, lineno = py.std.inspect.findsource(obj) + except py.builtin._sysex: + raise + except: + return None, -1 + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + return source, lineno + + +def getsource(obj, **kwargs): + import _pytest._code + obj = _pytest._code.getrawcode(obj) + try: + strsrc = inspect.getsource(obj) + except IndentationError: + strsrc = "\"Buggy python version consider upgrading, cannot get source\"" + assert isinstance(strsrc, str) + return Source(strsrc, **kwargs) + + +def deindent(lines, offset=None): + if offset is None: + for line in lines: + line = line.expandtabs() + s = line.lstrip() + if s: + offset = len(line)-len(s) + break + else: + offset = 0 + if offset == 0: + return list(lines) + newlines = [] + + def readline_generator(lines): + for line in lines: + yield line + '\n' + while True: + yield '' + + it = readline_generator(lines) + + try: + for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)): + if sline > len(lines): + break # End of input reached + if sline > len(newlines): + line = lines[sline - 1].expandtabs() + if line.lstrip() and line[:offset].isspace(): + line = line[offset:] # Deindent + newlines.append(line) + + for i in range(sline, eline): + # Don't deindent continuing lines of + # multiline tokens (i.e. multiline strings) + newlines.append(lines[i]) + except (IndentationError, tokenize.TokenError): + pass + # Add any lines we didn't see. E.g. if an exception was raised. + newlines.extend(lines[len(newlines):]) + return newlines + + +def get_statement_startend2(lineno, node): + import ast + # flatten all statements and except handlers into one lineno-list + # AST's line numbers start indexing at 1 + l = [] + for x in ast.walk(node): + if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler): + l.append(x.lineno - 1) + for name in "finalbody", "orelse": + val = getattr(x, name, None) + if val: + # treat the finally/orelse part as its own statement + l.append(val[0].lineno - 1 - 1) + l.sort() + insert_index = bisect_right(l, lineno) + start = l[insert_index - 1] + if insert_index >= len(l): + end = None + else: + end = l[insert_index] + return start, end + + +def getstatementrange_ast(lineno, source, assertion=False, astnode=None): + if astnode is None: + content = str(source) + if sys.version_info < (2,7): + content += "\n" + try: + astnode = compile(content, "source", "exec", 1024) # 1024 for AST + except ValueError: + start, end = getstatementrange_old(lineno, source, assertion) + return None, start, end + start, end = get_statement_startend2(lineno, astnode) + # we need to correct the end: + # - ast-parsing strips comments + # - there might be empty lines + # - we might have lesser indented code blocks at the end + if end is None: + end = len(source.lines) + + if end > start + 1: + # make sure we don't span differently indented code blocks + # by using the BlockFinder helper used which inspect.getsource() uses itself + block_finder = inspect.BlockFinder() + # if we start with an indented line, put blockfinder to "started" mode + block_finder.started = source.lines[start][0].isspace() + it = ((x + "\n") for x in source.lines[start:end]) + try: + for tok in tokenize.generate_tokens(lambda: next(it)): + block_finder.tokeneater(*tok) + except (inspect.EndOfBlock, IndentationError): + end = block_finder.last + start + except Exception: + pass + + # the end might still point to a comment or empty line, correct it + while end: + line = source.lines[end - 1].lstrip() + if line.startswith("#") or not line: + end -= 1 + else: + break + return astnode, start, end + + +def getstatementrange_old(lineno, source, assertion=False): + """ return (start, end) tuple which spans the minimal + statement region which containing the given lineno. + raise an IndexError if no such statementrange can be found. + """ + # XXX this logic is only used on python2.4 and below + # 1. find the start of the statement + from codeop import compile_command + for start in range(lineno, -1, -1): + if assertion: + line = source.lines[start] + # the following lines are not fully tested, change with care + if 'super' in line and 'self' in line and '__init__' in line: + raise IndexError("likely a subclass") + if "assert" not in line and "raise" not in line: + continue + trylines = source.lines[start:lineno+1] + # quick hack to prepare parsing an indented line with + # compile_command() (which errors on "return" outside defs) + trylines.insert(0, 'def xxx():') + trysource = '\n '.join(trylines) + # ^ space here + try: + compile_command(trysource) + except (SyntaxError, OverflowError, ValueError): + continue + + # 2. find the end of the statement + for end in range(lineno+1, len(source)+1): + trysource = source[start:end] + if trysource.isparseable(): + return start, end + raise SyntaxError("no valid source range around line %d " % (lineno,)) + + diff --git a/lib/spack/external/_pytest/_pluggy.py b/lib/spack/external/_pytest/_pluggy.py new file mode 100644 index 0000000000..87d32cf8dd --- /dev/null +++ b/lib/spack/external/_pytest/_pluggy.py @@ -0,0 +1,11 @@ +""" +imports symbols from vendored "pluggy" if available, otherwise +falls back to importing "pluggy" from the default namespace. +""" + +try: + from _pytest.vendored_packages.pluggy import * # noqa + from _pytest.vendored_packages.pluggy import __version__ # noqa +except ImportError: + from pluggy import * # noqa + from pluggy import __version__ # noqa diff --git a/lib/spack/external/_pytest/assertion/__init__.py b/lib/spack/external/_pytest/assertion/__init__.py new file mode 100644 index 0000000000..3f14a7ae76 --- /dev/null +++ b/lib/spack/external/_pytest/assertion/__init__.py @@ -0,0 +1,164 @@ +""" +support for presenting detailed information in failing assertions. +""" +import py +import os +import sys + +from _pytest.assertion import util +from _pytest.assertion import rewrite + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption('--assert', + action="store", + dest="assertmode", + choices=("rewrite", "plain",), + default="rewrite", + metavar="MODE", + help="""Control assertion debugging tools. 'plain' + performs no assertion debugging. 'rewrite' + (the default) rewrites assert statements in + test modules on import to provide assert + expression information.""") + + +def pytest_namespace(): + return {'register_assert_rewrite': register_assert_rewrite} + + +def register_assert_rewrite(*names): + """Register one or more module names to be rewritten on import. + + This function will make sure that this module or all modules inside + the package will get their assert statements rewritten. + Thus you should make sure to call this before the module is + actually imported, usually in your __init__.py if you are a plugin + using a package. + + :raise TypeError: if the given module names are not strings. + """ + for name in names: + if not isinstance(name, str): + msg = 'expected module names as *args, got {0} instead' + raise TypeError(msg.format(repr(names))) + for hook in sys.meta_path: + if isinstance(hook, rewrite.AssertionRewritingHook): + importhook = hook + break + else: + importhook = DummyRewriteHook() + importhook.mark_rewrite(*names) + + +class DummyRewriteHook(object): + """A no-op import hook for when rewriting is disabled.""" + + def mark_rewrite(self, *names): + pass + + +class AssertionState: + """State for the assertion plugin.""" + + def __init__(self, config, mode): + self.mode = mode + self.trace = config.trace.root.get("assertion") + self.hook = None + + +def install_importhook(config): + """Try to install the rewrite hook, raise SystemError if it fails.""" + # Both Jython and CPython 2.6.0 have AST bugs that make the + # assertion rewriting hook malfunction. + if (sys.platform.startswith('java') or + sys.version_info[:3] == (2, 6, 0)): + raise SystemError('rewrite not supported') + + config._assertstate = AssertionState(config, 'rewrite') + config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config) + sys.meta_path.insert(0, hook) + config._assertstate.trace('installed rewrite import hook') + + def undo(): + hook = config._assertstate.hook + if hook is not None and hook in sys.meta_path: + sys.meta_path.remove(hook) + + config.add_cleanup(undo) + return hook + + +def pytest_collection(session): + # this hook is only called when test modules are collected + # so for example not in the master process of pytest-xdist + # (which does not collect test modules) + assertstate = getattr(session.config, '_assertstate', None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(session) + + +def _running_on_ci(): + """Check if we're currently running on a CI system.""" + env_vars = ['CI', 'BUILD_NUMBER'] + return any(var in os.environ for var in env_vars) + + +def pytest_runtest_setup(item): + """Setup the pytest_assertrepr_compare hook + + The newinterpret and rewrite modules will use util._reprcompare if + it exists to use custom reporting via the + pytest_assertrepr_compare hook. This sets up this custom + comparison for the test. + """ + def callbinrepr(op, left, right): + """Call the pytest_assertrepr_compare hook and prepare the result + + This uses the first result from the hook and then ensures the + following: + * Overly verbose explanations are dropped unless -vv was used or + running on a CI. + * Embedded newlines are escaped to help util.format_explanation() + later. + * If the rewrite mode is used embedded %-characters are replaced + to protect later % formatting. + + The result can be formatted by util.format_explanation() for + pretty printing. + """ + hook_result = item.ihook.pytest_assertrepr_compare( + config=item.config, op=op, left=left, right=right) + for new_expl in hook_result: + if new_expl: + if (sum(len(p) for p in new_expl[1:]) > 80*8 and + item.config.option.verbose < 2 and + not _running_on_ci()): + show_max = 10 + truncated_lines = len(new_expl) - show_max + new_expl[show_max:] = [py.builtin._totext( + 'Detailed information truncated (%d more lines)' + ', use "-vv" to show' % truncated_lines)] + new_expl = [line.replace("\n", "\\n") for line in new_expl] + res = py.builtin._totext("\n~").join(new_expl) + if item.config.getvalue("assertmode") == "rewrite": + res = res.replace("%", "%%") + return res + util._reprcompare = callbinrepr + + +def pytest_runtest_teardown(item): + util._reprcompare = None + + +def pytest_sessionfinish(session): + assertstate = getattr(session.config, '_assertstate', None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(None) + + +# Expose this plugin's implementation for the pytest_assertrepr_compare hook +pytest_assertrepr_compare = util.assertrepr_compare diff --git a/lib/spack/external/_pytest/assertion/rewrite.py b/lib/spack/external/_pytest/assertion/rewrite.py new file mode 100644 index 0000000000..abf5b491fe --- /dev/null +++ b/lib/spack/external/_pytest/assertion/rewrite.py @@ -0,0 +1,945 @@ +"""Rewrite assertion AST to produce nice error messages""" + +import ast +import _ast +import errno +import itertools +import imp +import marshal +import os +import re +import struct +import sys +import types +from fnmatch import fnmatch + +import py +from _pytest.assertion import util + + +# pytest caches rewritten pycs in __pycache__. +if hasattr(imp, "get_tag"): + PYTEST_TAG = imp.get_tag() + "-PYTEST" +else: + if hasattr(sys, "pypy_version_info"): + impl = "pypy" + elif sys.platform == "java": + impl = "jython" + else: + impl = "cpython" + ver = sys.version_info + PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) + del ver, impl + +PYC_EXT = ".py" + (__debug__ and "c" or "o") +PYC_TAIL = "." + PYTEST_TAG + PYC_EXT + +REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2) +ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 + +if sys.version_info >= (3,5): + ast_Call = ast.Call +else: + ast_Call = lambda a,b,c: ast.Call(a, b, c, None, None) + + +class AssertionRewritingHook(object): + """PEP302 Import hook which rewrites asserts.""" + + def __init__(self, config): + self.config = config + self.fnpats = config.getini("python_files") + self.session = None + self.modules = {} + self._rewritten_names = set() + self._register_with_pkg_resources() + self._must_rewrite = set() + + def set_session(self, session): + self.session = session + + def find_module(self, name, path=None): + state = self.config._assertstate + state.trace("find_module called for: %s" % name) + names = name.rsplit(".", 1) + lastname = names[-1] + pth = None + if path is not None: + # Starting with Python 3.3, path is a _NamespacePath(), which + # causes problems if not converted to list. + path = list(path) + if len(path) == 1: + pth = path[0] + if pth is None: + try: + fd, fn, desc = imp.find_module(lastname, path) + except ImportError: + return None + if fd is not None: + fd.close() + tp = desc[2] + if tp == imp.PY_COMPILED: + if hasattr(imp, "source_from_cache"): + try: + fn = imp.source_from_cache(fn) + except ValueError: + # Python 3 doesn't like orphaned but still-importable + # .pyc files. + fn = fn[:-1] + else: + fn = fn[:-1] + elif tp != imp.PY_SOURCE: + # Don't know what this is. + return None + else: + fn = os.path.join(pth, name.rpartition(".")[2] + ".py") + + fn_pypath = py.path.local(fn) + if not self._should_rewrite(name, fn_pypath, state): + return None + + self._rewritten_names.add(name) + + # The requested module looks like a test file, so rewrite it. This is + # the most magical part of the process: load the source, rewrite the + # asserts, and load the rewritten source. We also cache the rewritten + # module code in a special pyc. We must be aware of the possibility of + # concurrent pytest processes rewriting and loading pycs. To avoid + # tricky race conditions, we maintain the following invariant: The + # cached pyc is always a complete, valid pyc. Operations on it must be + # atomic. POSIX's atomic rename comes in handy. + write = not sys.dont_write_bytecode + cache_dir = os.path.join(fn_pypath.dirname, "__pycache__") + if write: + try: + os.mkdir(cache_dir) + except OSError: + e = sys.exc_info()[1].errno + if e == errno.EEXIST: + # Either the __pycache__ directory already exists (the + # common case) or it's blocked by a non-dir node. In the + # latter case, we'll ignore it in _write_pyc. + pass + elif e in [errno.ENOENT, errno.ENOTDIR]: + # One of the path components was not a directory, likely + # because we're in a zip file. + write = False + elif e in [errno.EACCES, errno.EROFS, errno.EPERM]: + state.trace("read only directory: %r" % fn_pypath.dirname) + write = False + else: + raise + cache_name = fn_pypath.basename[:-3] + PYC_TAIL + pyc = os.path.join(cache_dir, cache_name) + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... + co = _read_pyc(fn_pypath, pyc, state.trace) + if co is None: + state.trace("rewriting %r" % (fn,)) + source_stat, co = _rewrite_test(self.config, fn_pypath) + if co is None: + # Probably a SyntaxError in the test. + return None + if write: + _make_rewritten_pyc(state, source_stat, pyc, co) + else: + state.trace("found cached rewritten pyc for %r" % (fn,)) + self.modules[name] = co, pyc + return self + + def _should_rewrite(self, name, fn_pypath, state): + # always rewrite conftest files + fn = str(fn_pypath) + if fn_pypath.basename == 'conftest.py': + state.trace("rewriting conftest file: %r" % (fn,)) + return True + + if self.session is not None: + if self.session.isinitpath(fn): + state.trace("matched test file (was specified on cmdline): %r" % + (fn,)) + return True + + # modules not passed explicitly on the command line are only + # rewritten if they match the naming convention for test files + for pat in self.fnpats: + # use fnmatch instead of fn_pypath.fnmatch because the + # latter might trigger an import to fnmatch.fnmatch + # internally, which would cause this method to be + # called recursively + if fnmatch(fn_pypath.basename, pat): + state.trace("matched test file %r" % (fn,)) + return True + + for marked in self._must_rewrite: + if name.startswith(marked): + state.trace("matched marked file %r (from %r)" % (name, marked)) + return True + + return False + + def mark_rewrite(self, *names): + """Mark import names as needing to be re-written. + + The named module or package as well as any nested modules will + be re-written on import. + """ + already_imported = set(names).intersection(set(sys.modules)) + if already_imported: + for name in already_imported: + if name not in self._rewritten_names: + self._warn_already_imported(name) + self._must_rewrite.update(names) + + def _warn_already_imported(self, name): + self.config.warn( + 'P1', + 'Module already imported so can not be re-written: %s' % name) + + def load_module(self, name): + # If there is an existing module object named 'fullname' in + # sys.modules, the loader must use that existing module. (Otherwise, + # the reload() builtin will not work correctly.) + if name in sys.modules: + return sys.modules[name] + + co, pyc = self.modules.pop(name) + # I wish I could just call imp.load_compiled here, but __file__ has to + # be set properly. In Python 3.2+, this all would be handled correctly + # by load_compiled. + mod = sys.modules[name] = imp.new_module(name) + try: + mod.__file__ = co.co_filename + # Normally, this attribute is 3.2+. + mod.__cached__ = pyc + mod.__loader__ = self + py.builtin.exec_(co, mod.__dict__) + except: + del sys.modules[name] + raise + return sys.modules[name] + + + + def is_package(self, name): + try: + fd, fn, desc = imp.find_module(name) + except ImportError: + return False + if fd is not None: + fd.close() + tp = desc[2] + return tp == imp.PKG_DIRECTORY + + @classmethod + def _register_with_pkg_resources(cls): + """ + Ensure package resources can be loaded from this loader. May be called + multiple times, as the operation is idempotent. + """ + try: + import pkg_resources + # access an attribute in case a deferred importer is present + pkg_resources.__name__ + except ImportError: + return + + # Since pytest tests are always located in the file system, the + # DefaultProvider is appropriate. + pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) + + def get_data(self, pathname): + """Optional PEP302 get_data API. + """ + with open(pathname, 'rb') as f: + return f.read() + + +def _write_pyc(state, co, source_stat, pyc): + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason deviate, and I hope + # sometime to be able to use imp.load_compiled to load them. (See + # the comment in load_module above.) + try: + fp = open(pyc, "wb") + except IOError: + err = sys.exc_info()[1].errno + state.trace("error writing pyc file at %s: errno=%s" %(pyc, err)) + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, __pycache__ being a + # file etc. + return False + try: + fp.write(imp.get_magic()) + mtime = int(source_stat.mtime) + size = source_stat.size & 0xFFFFFFFF + fp.write(struct.pack(">", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in" +} +# Python 3.5+ compatibility +try: + binop_map[ast.MatMult] = "@" +except AttributeError: + pass + +# Python 3.4+ compatibility +if hasattr(ast, "NameConstant"): + _NameConstant = ast.NameConstant +else: + def _NameConstant(c): + return ast.Name(str(c), ast.Load()) + + +def set_location(node, lineno, col_offset): + """Set node location information recursively.""" + def _fix(node, lineno, col_offset): + if "lineno" in node._attributes: + node.lineno = lineno + if "col_offset" in node._attributes: + node.col_offset = col_offset + for child in ast.iter_child_nodes(node): + _fix(child, lineno, col_offset) + _fix(node, lineno, col_offset) + return node + + +class AssertionRewriter(ast.NodeVisitor): + """Assertion rewriting implementation. + + The main entrypoint is to call .run() with an ast.Module instance, + this will then find all the assert statements and re-write them to + provide intermediate values and a detailed assertion error. See + http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html + for an overview of how this works. + + The entry point here is .run() which will iterate over all the + statements in an ast.Module and for each ast.Assert statement it + finds call .visit() with it. Then .visit_Assert() takes over and + is responsible for creating new ast statements to replace the + original assert statement: it re-writes the test of an assertion + to provide intermediate values and replace it with an if statement + which raises an assertion error with a detailed explanation in + case the expression is false. + + For this .visit_Assert() uses the visitor pattern to visit all the + AST nodes of the ast.Assert.test field, each visit call returning + an AST node and the corresponding explanation string. During this + state is kept in several instance attributes: + + :statements: All the AST statements which will replace the assert + statement. + + :variables: This is populated by .variable() with each variable + used by the statements so that they can all be set to None at + the end of the statements. + + :variable_counter: Counter to create new unique variables needed + by statements. Variables are created using .variable() and + have the form of "@py_assert0". + + :on_failure: The AST statements which will be executed if the + assertion test fails. This is the code which will construct + the failure message and raises the AssertionError. + + :explanation_specifiers: A dict filled by .explanation_param() + with %-formatting placeholders and their corresponding + expressions to use in the building of an assertion message. + This is used by .pop_format_context() to build a message. + + :stack: A stack of the explanation_specifiers dicts maintained by + .push_format_context() and .pop_format_context() which allows + to build another %-formatted string while already building one. + + This state is reset on every new assert statement visited and used + by the other visitors. + + """ + + def __init__(self, module_path, config): + super(AssertionRewriter, self).__init__() + self.module_path = module_path + self.config = config + + def run(self, mod): + """Find all assert statements in *mod* and rewrite them.""" + if not mod.body: + # Nothing to do. + return + # Insert some special imports at the top of the module but after any + # docstrings and __future__ imports. + aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"), + ast.alias("_pytest.assertion.rewrite", "@pytest_ar")] + expect_docstring = True + pos = 0 + lineno = 0 + for item in mod.body: + if (expect_docstring and isinstance(item, ast.Expr) and + isinstance(item.value, ast.Str)): + doc = item.value.s + if "PYTEST_DONT_REWRITE" in doc: + # The module has disabled assertion rewriting. + return + lineno += len(doc) - 1 + expect_docstring = False + elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or + item.module != "__future__"): + lineno = item.lineno + break + pos += 1 + imports = [ast.Import([alias], lineno=lineno, col_offset=0) + for alias in aliases] + mod.body[pos:pos] = imports + # Collect asserts. + nodes = [mod] + while nodes: + node = nodes.pop() + for name, field in ast.iter_fields(node): + if isinstance(field, list): + new = [] + for i, child in enumerate(field): + if isinstance(child, ast.Assert): + # Transform assert. + new.extend(self.visit(child)) + else: + new.append(child) + if isinstance(child, ast.AST): + nodes.append(child) + setattr(node, name, new) + elif (isinstance(field, ast.AST) and + # Don't recurse into expressions as they can't contain + # asserts. + not isinstance(field, ast.expr)): + nodes.append(field) + + def variable(self): + """Get a new variable.""" + # Use a character invalid in python identifiers to avoid clashing. + name = "@py_assert" + str(next(self.variable_counter)) + self.variables.append(name) + return name + + def assign(self, expr): + """Give *expr* a name.""" + name = self.variable() + self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) + return ast.Name(name, ast.Load()) + + def display(self, expr): + """Call py.io.saferepr on the expression.""" + return self.helper("saferepr", expr) + + def helper(self, name, *args): + """Call a helper in this module.""" + py_name = ast.Name("@pytest_ar", ast.Load()) + attr = ast.Attribute(py_name, "_" + name, ast.Load()) + return ast_Call(attr, list(args), []) + + def builtin(self, name): + """Return the builtin called *name*.""" + builtin_name = ast.Name("@py_builtins", ast.Load()) + return ast.Attribute(builtin_name, name, ast.Load()) + + def explanation_param(self, expr): + """Return a new named %-formatting placeholder for expr. + + This creates a %-formatting placeholder for expr in the + current formatting context, e.g. ``%(py0)s``. The placeholder + and expr are placed in the current format context so that it + can be used on the next call to .pop_format_context(). + + """ + specifier = "py" + str(next(self.variable_counter)) + self.explanation_specifiers[specifier] = expr + return "%(" + specifier + ")s" + + def push_format_context(self): + """Create a new formatting context. + + The format context is used for when an explanation wants to + have a variable value formatted in the assertion message. In + this case the value required can be added using + .explanation_param(). Finally .pop_format_context() is used + to format a string of %-formatted values as added by + .explanation_param(). + + """ + self.explanation_specifiers = {} + self.stack.append(self.explanation_specifiers) + + def pop_format_context(self, expl_expr): + """Format the %-formatted string with current format context. + + The expl_expr should be an ast.Str instance constructed from + the %-placeholders created by .explanation_param(). This will + add the required code to format said string to .on_failure and + return the ast.Name instance of the formatted string. + + """ + current = self.stack.pop() + if self.stack: + self.explanation_specifiers = self.stack[-1] + keys = [ast.Str(key) for key in current.keys()] + format_dict = ast.Dict(keys, list(current.values())) + form = ast.BinOp(expl_expr, ast.Mod(), format_dict) + name = "@py_format" + str(next(self.variable_counter)) + self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form)) + return ast.Name(name, ast.Load()) + + def generic_visit(self, node): + """Handle expressions we don't have custom code for.""" + assert isinstance(node, ast.expr) + res = self.assign(node) + return res, self.explanation_param(self.display(res)) + + def visit_Assert(self, assert_): + """Return the AST statements to replace the ast.Assert instance. + + This re-writes the test of an assertion to provide + intermediate values and replace it with an if statement which + raises an assertion error with a detailed explanation in case + the expression is false. + + """ + if isinstance(assert_.test, ast.Tuple) and self.config is not None: + fslocation = (self.module_path, assert_.lineno) + self.config.warn('R1', 'assertion is always true, perhaps ' + 'remove parentheses?', fslocation=fslocation) + self.statements = [] + self.variables = [] + self.variable_counter = itertools.count() + self.stack = [] + self.on_failure = [] + self.push_format_context() + # Rewrite assert into a bunch of statements. + top_condition, explanation = self.visit(assert_.test) + # Create failure message. + body = self.on_failure + negation = ast.UnaryOp(ast.Not(), top_condition) + self.statements.append(ast.If(negation, body, [])) + if assert_.msg: + assertmsg = self.helper('format_assertmsg', assert_.msg) + explanation = "\n>assert " + explanation + else: + assertmsg = ast.Str("") + explanation = "assert " + explanation + template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation)) + msg = self.pop_format_context(template) + fmt = self.helper("format_explanation", msg) + err_name = ast.Name("AssertionError", ast.Load()) + exc = ast_Call(err_name, [fmt], []) + if sys.version_info[0] >= 3: + raise_ = ast.Raise(exc, None) + else: + raise_ = ast.Raise(exc, None, None) + body.append(raise_) + # Clear temporary variables by setting them to None. + if self.variables: + variables = [ast.Name(name, ast.Store()) + for name in self.variables] + clear = ast.Assign(variables, _NameConstant(None)) + self.statements.append(clear) + # Fix line numbers. + for stmt in self.statements: + set_location(stmt, assert_.lineno, assert_.col_offset) + return self.statements + + def visit_Name(self, name): + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. + locs = ast_Call(self.builtin("locals"), [], []) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) + return name, self.explanation_param(expr) + + def visit_BoolOp(self, boolop): + res_var = self.variable() + expl_list = self.assign(ast.List([], ast.Load())) + app = ast.Attribute(expl_list, "append", ast.Load()) + is_or = int(isinstance(boolop.op, ast.Or)) + body = save = self.statements + fail_save = self.on_failure + levels = len(boolop.values) - 1 + self.push_format_context() + # Process each operand, short-circuting if needed. + for i, v in enumerate(boolop.values): + if i: + fail_inner = [] + # cond is set in a prior loop iteration below + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa + self.on_failure = fail_inner + self.push_format_context() + res, expl = self.visit(v) + body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) + expl_format = self.pop_format_context(ast.Str(expl)) + call = ast_Call(app, [expl_format], []) + self.on_failure.append(ast.Expr(call)) + if i < levels: + cond = res + if is_or: + cond = ast.UnaryOp(ast.Not(), cond) + inner = [] + self.statements.append(ast.If(cond, inner, [])) + self.statements = body = inner + self.statements = save + self.on_failure = fail_save + expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or)) + expl = self.pop_format_context(expl_template) + return ast.Name(res_var, ast.Load()), self.explanation_param(expl) + + def visit_UnaryOp(self, unary): + pattern = unary_map[unary.op.__class__] + operand_res, operand_expl = self.visit(unary.operand) + res = self.assign(ast.UnaryOp(unary.op, operand_res)) + return res, pattern % (operand_expl,) + + def visit_BinOp(self, binop): + symbol = binop_map[binop.op.__class__] + left_expr, left_expl = self.visit(binop.left) + right_expr, right_expl = self.visit(binop.right) + explanation = "(%s %s %s)" % (left_expl, symbol, right_expl) + res = self.assign(ast.BinOp(left_expr, binop.op, right_expr)) + return res, explanation + + def visit_Call_35(self, call): + """ + visit `ast.Call` nodes on Python3.5 and after + """ + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + for arg in call.args: + res, expl = self.visit(arg) + arg_expls.append(expl) + new_args.append(res) + for keyword in call.keywords: + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + if keyword.arg: + arg_expls.append(keyword.arg + "=" + expl) + else: ## **args have `arg` keywords with an .arg of None + arg_expls.append("**" + expl) + + expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) + new_call = ast.Call(new_func, new_args, new_kwargs) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) + return res, outer_expl + + def visit_Starred(self, starred): + # From Python 3.5, a Starred node can appear in a function call + res, expl = self.visit(starred.value) + return starred, '*' + expl + + def visit_Call_legacy(self, call): + """ + visit `ast.Call nodes on 3.4 and below` + """ + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + new_star = new_kwarg = None + for arg in call.args: + res, expl = self.visit(arg) + new_args.append(res) + arg_expls.append(expl) + for keyword in call.keywords: + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + arg_expls.append(keyword.arg + "=" + expl) + if call.starargs: + new_star, expl = self.visit(call.starargs) + arg_expls.append("*" + expl) + if call.kwargs: + new_kwarg, expl = self.visit(call.kwargs) + arg_expls.append("**" + expl) + expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) + new_call = ast.Call(new_func, new_args, new_kwargs, + new_star, new_kwarg) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) + return res, outer_expl + + # ast.Call signature changed on 3.5, + # conditionally change which methods is named + # visit_Call depending on Python version + if sys.version_info >= (3, 5): + visit_Call = visit_Call_35 + else: + visit_Call = visit_Call_legacy + + + def visit_Attribute(self, attr): + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + value, value_expl = self.visit(attr.value) + res = self.assign(ast.Attribute(value, attr.attr, ast.Load())) + res_expl = self.explanation_param(self.display(res)) + pat = "%s\n{%s = %s.%s\n}" + expl = pat % (res_expl, res_expl, value_expl, attr.attr) + return res, expl + + def visit_Compare(self, comp): + self.push_format_context() + left_res, left_expl = self.visit(comp.left) + if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)): + left_expl = "({0})".format(left_expl) + res_variables = [self.variable() for i in range(len(comp.ops))] + load_names = [ast.Name(v, ast.Load()) for v in res_variables] + store_names = [ast.Name(v, ast.Store()) for v in res_variables] + it = zip(range(len(comp.ops)), comp.ops, comp.comparators) + expls = [] + syms = [] + results = [left_res] + for i, op, next_operand in it: + next_res, next_expl = self.visit(next_operand) + if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)): + next_expl = "({0})".format(next_expl) + results.append(next_res) + sym = binop_map[op.__class__] + syms.append(ast.Str(sym)) + expl = "%s %s %s" % (left_expl, sym, next_expl) + expls.append(ast.Str(expl)) + res_expr = ast.Compare(left_res, [op], [next_res]) + self.statements.append(ast.Assign([store_names[i]], res_expr)) + left_res, left_expl = next_res, next_expl + # Use pytest.assertion.util._reprcompare if that's available. + expl_call = self.helper("call_reprcompare", + ast.Tuple(syms, ast.Load()), + ast.Tuple(load_names, ast.Load()), + ast.Tuple(expls, ast.Load()), + ast.Tuple(results, ast.Load())) + if len(comp.ops) > 1: + res = ast.BoolOp(ast.And(), load_names) + else: + res = load_names[0] + return res, self.explanation_param(self.pop_format_context(expl_call)) diff --git a/lib/spack/external/_pytest/assertion/util.py b/lib/spack/external/_pytest/assertion/util.py new file mode 100644 index 0000000000..4a0a4e4310 --- /dev/null +++ b/lib/spack/external/_pytest/assertion/util.py @@ -0,0 +1,300 @@ +"""Utilities for assertion debugging""" +import pprint + +import _pytest._code +import py +try: + from collections import Sequence +except ImportError: + Sequence = list + +BuiltinAssertionError = py.builtin.builtins.AssertionError +u = py.builtin._totext + +# The _reprcompare attribute on the util module is used by the new assertion +# interpretation code and assertion rewriter to detect this plugin was +# loaded and in turn call the hooks defined here as part of the +# DebugInterpreter. +_reprcompare = None + + +# the re-encoding is needed for python2 repr +# with non-ascii characters (see issue 877 and 1379) +def ecu(s): + try: + return u(s, 'utf-8', 'replace') + except TypeError: + return s + + +def format_explanation(explanation): + """This formats an explanation + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ + explanation = ecu(explanation) + lines = _split_explanation(explanation) + result = _format_lines(lines) + return u('\n').join(result) + + +def _split_explanation(explanation): + """Return a list of individual lines in the explanation + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or u('')).split('\n') + lines = [raw_lines[0]] + for l in raw_lines[1:]: + if l and l[0] in ['{', '}', '~', '>']: + lines.append(l) + else: + lines[-1] += '\\n' + l + return lines + + +def _format_lines(lines): + """Format the individual lines + + This will replace the '{', '}' and '~' characters of our mini + formatting language with the proper 'where ...', 'and ...' and ' + + ...' text, taking care of indentation along the way. + + Return a list of formatted lines. + """ + result = lines[:1] + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith('{'): + if stackcnt[-1]: + s = u('and ') + else: + s = u('where ') + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:]) + elif line.startswith('}'): + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + else: + assert line[0] in ['~', '>'] + stack[-1] += 1 + indent = len(stack) if line.startswith('~') else len(stack) - 1 + result.append(u(' ')*indent + line[1:]) + assert len(stack) == 1 + return result + + +# Provide basestring in python3 +try: + basestring = basestring +except NameError: + basestring = str + + +def assertrepr_compare(config, op, left, right): + """Return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op + left_repr = py.io.saferepr(left, maxsize=int(width//2)) + right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) + + summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr)) + + issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and + not isinstance(x, basestring)) + istext = lambda x: isinstance(x, basestring) + isdict = lambda x: isinstance(x, dict) + isset = lambda x: isinstance(x, (set, frozenset)) + + def isiterable(obj): + try: + iter(obj) + return not istext(obj) + except TypeError: + return False + + verbose = config.getoption('verbose') + explanation = None + try: + if op == '==': + if istext(left) and istext(right): + explanation = _diff_text(left, right, verbose) + else: + if issequence(left) and issequence(right): + explanation = _compare_eq_sequence(left, right, verbose) + elif isset(left) and isset(right): + explanation = _compare_eq_set(left, right, verbose) + elif isdict(left) and isdict(right): + explanation = _compare_eq_dict(left, right, verbose) + if isiterable(left) and isiterable(right): + expl = _compare_eq_iterable(left, right, verbose) + if explanation is not None: + explanation.extend(expl) + else: + explanation = expl + elif op == 'not in': + if istext(left) and istext(right): + explanation = _notin_text(left, right, verbose) + except Exception: + explanation = [ + u('(pytest_assertion plugin: representation of details failed. ' + 'Probably an object has a faulty __repr__.)'), + u(_pytest._code.ExceptionInfo())] + + if not explanation: + return None + + return [summary] + explanation + + +def _diff_text(left, right, verbose=False): + """Return the explanation for the diff between text or bytes + + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + + If the input are bytes they will be safely converted to text. + """ + from difflib import ndiff + explanation = [] + if isinstance(left, py.builtin.bytes): + left = u(repr(left)[1:-1]).replace(r'\n', '\n') + if isinstance(right, py.builtin.bytes): + right = u(repr(right)[1:-1]).replace(r'\n', '\n') + if not verbose: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + break + if i > 42: + i -= 10 # Provide some context + explanation = [u('Skipping %s identical leading ' + 'characters in diff, use -v to show') % i] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [u('Skipping %s identical trailing ' + 'characters in diff, use -v to show') % i] + left = left[:-i] + right = right[:-i] + keepends = True + explanation += [line.strip('\n') + for line in ndiff(left.splitlines(keepends), + right.splitlines(keepends))] + return explanation + + +def _compare_eq_iterable(left, right, verbose=False): + if not verbose: + return [u('Use -v to get the full diff')] + # dynamic import to speedup pytest + import difflib + + try: + left_formatting = pprint.pformat(left).splitlines() + right_formatting = pprint.pformat(right).splitlines() + explanation = [u('Full diff:')] + except Exception: + # hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling + # sorted() on a list would raise. See issue #718. + # As a workaround, the full diff is generated by using the repr() string of each item of each container. + left_formatting = sorted(repr(x) for x in left) + right_formatting = sorted(repr(x) for x in right) + explanation = [u('Full diff (fallback to calling repr on each item):')] + explanation.extend(line.strip() for line in difflib.ndiff(left_formatting, right_formatting)) + return explanation + + +def _compare_eq_sequence(left, right, verbose=False): + explanation = [] + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + explanation += [u('At index %s diff: %r != %r') + % (i, left[i], right[i])] + break + if len(left) > len(right): + explanation += [u('Left contains more items, first extra item: %s') + % py.io.saferepr(left[len(right)],)] + elif len(left) < len(right): + explanation += [ + u('Right contains more items, first extra item: %s') % + py.io.saferepr(right[len(left)],)] + return explanation + + +def _compare_eq_set(left, right, verbose=False): + explanation = [] + diff_left = left - right + diff_right = right - left + if diff_left: + explanation.append(u('Extra items in the left set:')) + for item in diff_left: + explanation.append(py.io.saferepr(item)) + if diff_right: + explanation.append(u('Extra items in the right set:')) + for item in diff_right: + explanation.append(py.io.saferepr(item)) + return explanation + + +def _compare_eq_dict(left, right, verbose=False): + explanation = [] + common = set(left).intersection(set(right)) + same = dict((k, left[k]) for k in common if left[k] == right[k]) + if same and not verbose: + explanation += [u('Omitting %s identical items, use -v to show') % + len(same)] + elif same: + explanation += [u('Common items:')] + explanation += pprint.pformat(same).splitlines() + diff = set(k for k in common if left[k] != right[k]) + if diff: + explanation += [u('Differing items:')] + for k in diff: + explanation += [py.io.saferepr({k: left[k]}) + ' != ' + + py.io.saferepr({k: right[k]})] + extra_left = set(left) - set(right) + if extra_left: + explanation.append(u('Left contains more items:')) + explanation.extend(pprint.pformat( + dict((k, left[k]) for k in extra_left)).splitlines()) + extra_right = set(right) - set(left) + if extra_right: + explanation.append(u('Right contains more items:')) + explanation.extend(pprint.pformat( + dict((k, right[k]) for k in extra_right)).splitlines()) + return explanation + + +def _notin_text(term, text, verbose=False): + index = text.find(term) + head = text[:index] + tail = text[index+len(term):] + correct_text = head + tail + diff = _diff_text(correct_text, text, verbose) + newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)] + for line in diff: + if line.startswith(u('Skipping')): + continue + if line.startswith(u('- ')): + continue + if line.startswith(u('+ ')): + newdiff.append(u(' ') + line[2:]) + else: + newdiff.append(line) + return newdiff diff --git a/lib/spack/external/_pytest/cacheprovider.py b/lib/spack/external/_pytest/cacheprovider.py new file mode 100644 index 0000000000..0657001f2d --- /dev/null +++ b/lib/spack/external/_pytest/cacheprovider.py @@ -0,0 +1,245 @@ +""" +merged implementation of the cache provider + +the name cache was not choosen to ensure pluggy automatically +ignores the external pytest-cache +""" + +import py +import pytest +import json +from os.path import sep as _sep, altsep as _altsep + + +class Cache(object): + def __init__(self, config): + self.config = config + self._cachedir = config.rootdir.join(".cache") + self.trace = config.trace.root.get("cache") + if config.getvalue("cacheclear"): + self.trace("clearing cachedir") + if self._cachedir.check(): + self._cachedir.remove() + self._cachedir.mkdir() + + def makedir(self, name): + """ return a directory path object with the given name. If the + directory does not yet exist, it will be created. You can use it + to manage files likes e. g. store/retrieve database + dumps across test sessions. + + :param name: must be a string not containing a ``/`` separator. + Make sure the name contains your plugin or application + identifiers to prevent clashes with other cache users. + """ + if _sep in name or _altsep is not None and _altsep in name: + raise ValueError("name is not allowed to contain path separators") + return self._cachedir.ensure_dir("d", name) + + def _getvaluepath(self, key): + return self._cachedir.join('v', *key.split('/')) + + def get(self, key, default): + """ return cached value for the given key. If no value + was yet cached or the value cannot be read, the specified + default is returned. + + :param key: must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param default: must be provided in case of a cache-miss or + invalid cache values. + + """ + path = self._getvaluepath(key) + if path.check(): + try: + with path.open("r") as f: + return json.load(f) + except ValueError: + self.trace("cache-invalid at %s" % (path,)) + return default + + def set(self, key, value): + """ save value for the given key. + + :param key: must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param value: must be of any combination of basic + python types, including nested types + like e. g. lists of dictionaries. + """ + path = self._getvaluepath(key) + try: + path.dirpath().ensure_dir() + except (py.error.EEXIST, py.error.EACCES): + self.config.warn( + code='I9', message='could not create cache path %s' % (path,) + ) + return + try: + f = path.open('w') + except py.error.ENOTDIR: + self.config.warn( + code='I9', message='cache could not write path %s' % (path,)) + else: + with f: + self.trace("cache-write %s: %r" % (key, value,)) + json.dump(value, f, indent=2, sort_keys=True) + + +class LFPlugin: + """ Plugin which implements the --lf (run last-failing) option """ + def __init__(self, config): + self.config = config + active_keys = 'lf', 'failedfirst' + self.active = any(config.getvalue(key) for key in active_keys) + if self.active: + self.lastfailed = config.cache.get("cache/lastfailed", {}) + else: + self.lastfailed = {} + + def pytest_report_header(self): + if self.active: + if not self.lastfailed: + mode = "run all (no recorded failures)" + else: + mode = "rerun last %d failures%s" % ( + len(self.lastfailed), + " first" if self.config.getvalue("failedfirst") else "") + return "run-last-failure: %s" % mode + + def pytest_runtest_logreport(self, report): + if report.failed and "xfail" not in report.keywords: + self.lastfailed[report.nodeid] = True + elif not report.failed: + if report.when == "call": + self.lastfailed.pop(report.nodeid, None) + + def pytest_collectreport(self, report): + passed = report.outcome in ('passed', 'skipped') + if passed: + if report.nodeid in self.lastfailed: + self.lastfailed.pop(report.nodeid) + self.lastfailed.update( + (item.nodeid, True) + for item in report.result) + else: + self.lastfailed[report.nodeid] = True + + def pytest_collection_modifyitems(self, session, config, items): + if self.active and self.lastfailed: + previously_failed = [] + previously_passed = [] + for item in items: + if item.nodeid in self.lastfailed: + previously_failed.append(item) + else: + previously_passed.append(item) + if not previously_failed and previously_passed: + # running a subset of all tests with recorded failures outside + # of the set of tests currently executing + pass + elif self.config.getvalue("failedfirst"): + items[:] = previously_failed + previously_passed + else: + items[:] = previously_failed + config.hook.pytest_deselected(items=previously_passed) + + def pytest_sessionfinish(self, session): + config = self.config + if config.getvalue("cacheshow") or hasattr(config, "slaveinput"): + return + prev_failed = config.cache.get("cache/lastfailed", None) is not None + if (session.testscollected and prev_failed) or self.lastfailed: + config.cache.set("cache/lastfailed", self.lastfailed) + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption( + '--lf', '--last-failed', action='store_true', dest="lf", + help="rerun only the tests that failed " + "at the last run (or all if none failed)") + group.addoption( + '--ff', '--failed-first', action='store_true', dest="failedfirst", + help="run all tests but run the last failures first. " + "This may re-order tests and thus lead to " + "repeated fixture setup/teardown") + group.addoption( + '--cache-show', action='store_true', dest="cacheshow", + help="show cache contents, don't perform collection or tests") + group.addoption( + '--cache-clear', action='store_true', dest="cacheclear", + help="remove all cache contents at start of test run.") + + +def pytest_cmdline_main(config): + if config.option.cacheshow: + from _pytest.main import wrap_session + return wrap_session(config, cacheshow) + + + +@pytest.hookimpl(tryfirst=True) +def pytest_configure(config): + config.cache = Cache(config) + config.pluginmanager.register(LFPlugin(config), "lfplugin") + + +@pytest.fixture +def cache(request): + """ + Return a cache object that can persist state between testing sessions. + + cache.get(key, default) + cache.set(key, value) + + Keys must be a ``/`` separated value, where the first part is usually the + name of your plugin or application to avoid clashes with other cache users. + + Values can be any object handled by the json stdlib module. + """ + return request.config.cache + + +def pytest_report_header(config): + if config.option.verbose: + relpath = py.path.local().bestrelpath(config.cache._cachedir) + return "cachedir: %s" % relpath + + +def cacheshow(config, session): + from pprint import pprint + tw = py.io.TerminalWriter() + tw.line("cachedir: " + str(config.cache._cachedir)) + if not config.cache._cachedir.check(): + tw.line("cache is empty") + return 0 + dummy = object() + basedir = config.cache._cachedir + vdir = basedir.join("v") + tw.sep("-", "cache values") + for valpath in vdir.visit(lambda x: x.isfile()): + key = valpath.relto(vdir).replace(valpath.sep, "/") + val = config.cache.get(key, dummy) + if val is dummy: + tw.line("%s contains unreadable content, " + "will be ignored" % key) + else: + tw.line("%s contains:" % key) + stream = py.io.TextIO() + pprint(val, stream=stream) + for line in stream.getvalue().splitlines(): + tw.line(" " + line) + + ddir = basedir.join("d") + if ddir.isdir() and ddir.listdir(): + tw.sep("-", "cache directories") + for p in basedir.join("d").visit(): + #if p.check(dir=1): + # print("%s/" % p.relto(basedir)) + if p.isfile(): + key = p.relto(basedir) + tw.line("%s is a file of length %d" % ( + key, p.size())) + return 0 diff --git a/lib/spack/external/_pytest/capture.py b/lib/spack/external/_pytest/capture.py new file mode 100644 index 0000000000..eea81ca187 --- /dev/null +++ b/lib/spack/external/_pytest/capture.py @@ -0,0 +1,491 @@ +""" +per-test stdout/stderr capturing mechanism. + +""" +from __future__ import with_statement + +import contextlib +import sys +import os +from tempfile import TemporaryFile + +import py +import pytest + +from py.io import TextIO +unicode = py.builtin.text + +patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption( + '--capture', action="store", + default="fd" if hasattr(os, "dup") else "sys", + metavar="method", choices=['fd', 'sys', 'no'], + help="per-test capturing method: one of fd|sys|no.") + group._addoption( + '-s', action="store_const", const="no", dest="capture", + help="shortcut for --capture=no.") + + +@pytest.hookimpl(hookwrapper=True) +def pytest_load_initial_conftests(early_config, parser, args): + _readline_workaround() + ns = early_config.known_args_namespace + pluginmanager = early_config.pluginmanager + capman = CaptureManager(ns.capture) + pluginmanager.register(capman, "capturemanager") + + # make sure that capturemanager is properly reset at final shutdown + early_config.add_cleanup(capman.reset_capturings) + + # make sure logging does not raise exceptions at the end + def silence_logging_at_shutdown(): + if "logging" in sys.modules: + sys.modules["logging"].raiseExceptions = False + early_config.add_cleanup(silence_logging_at_shutdown) + + # finally trigger conftest loading but while capturing (issue93) + capman.init_capturings() + outcome = yield + out, err = capman.suspendcapture() + if outcome.excinfo is not None: + sys.stdout.write(out) + sys.stderr.write(err) + + +class CaptureManager: + def __init__(self, method): + self._method = method + + def _getcapture(self, method): + if method == "fd": + return MultiCapture(out=True, err=True, Capture=FDCapture) + elif method == "sys": + return MultiCapture(out=True, err=True, Capture=SysCapture) + elif method == "no": + return MultiCapture(out=False, err=False, in_=False) + else: + raise ValueError("unknown capturing method: %r" % method) + + def init_capturings(self): + assert not hasattr(self, "_capturing") + self._capturing = self._getcapture(self._method) + self._capturing.start_capturing() + + def reset_capturings(self): + cap = self.__dict__.pop("_capturing", None) + if cap is not None: + cap.pop_outerr_to_orig() + cap.stop_capturing() + + def resumecapture(self): + self._capturing.resume_capturing() + + def suspendcapture(self, in_=False): + self.deactivate_funcargs() + cap = getattr(self, "_capturing", None) + if cap is not None: + try: + outerr = cap.readouterr() + finally: + cap.suspend_capturing(in_=in_) + return outerr + + def activate_funcargs(self, pyfuncitem): + capfuncarg = pyfuncitem.__dict__.pop("_capfuncarg", None) + if capfuncarg is not None: + capfuncarg._start() + self._capfuncarg = capfuncarg + + def deactivate_funcargs(self): + capfuncarg = self.__dict__.pop("_capfuncarg", None) + if capfuncarg is not None: + capfuncarg.close() + + @pytest.hookimpl(hookwrapper=True) + def pytest_make_collect_report(self, collector): + if isinstance(collector, pytest.File): + self.resumecapture() + outcome = yield + out, err = self.suspendcapture() + rep = outcome.get_result() + if out: + rep.sections.append(("Captured stdout", out)) + if err: + rep.sections.append(("Captured stderr", err)) + else: + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_setup(self, item): + self.resumecapture() + yield + self.suspendcapture_item(item, "setup") + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_call(self, item): + self.resumecapture() + self.activate_funcargs(item) + yield + #self.deactivate_funcargs() called from suspendcapture() + self.suspendcapture_item(item, "call") + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_teardown(self, item): + self.resumecapture() + yield + self.suspendcapture_item(item, "teardown") + + @pytest.hookimpl(tryfirst=True) + def pytest_keyboard_interrupt(self, excinfo): + self.reset_capturings() + + @pytest.hookimpl(tryfirst=True) + def pytest_internalerror(self, excinfo): + self.reset_capturings() + + def suspendcapture_item(self, item, when, in_=False): + out, err = self.suspendcapture(in_=in_) + item.add_report_section(when, "stdout", out) + item.add_report_section(when, "stderr", err) + + +error_capsysfderror = "cannot use capsys and capfd at the same time" + + +@pytest.fixture +def capsys(request): + """Enable capturing of writes to sys.stdout/sys.stderr and make + captured output available via ``capsys.readouterr()`` method calls + which return a ``(out, err)`` tuple. + """ + if "capfd" in request.fixturenames: + raise request.raiseerror(error_capsysfderror) + request.node._capfuncarg = c = CaptureFixture(SysCapture, request) + return c + +@pytest.fixture +def capfd(request): + """Enable capturing of writes to file descriptors 1 and 2 and make + captured output available via ``capfd.readouterr()`` method calls + which return a ``(out, err)`` tuple. + """ + if "capsys" in request.fixturenames: + request.raiseerror(error_capsysfderror) + if not hasattr(os, 'dup'): + pytest.skip("capfd funcarg needs os.dup") + request.node._capfuncarg = c = CaptureFixture(FDCapture, request) + return c + + +class CaptureFixture: + def __init__(self, captureclass, request): + self.captureclass = captureclass + self.request = request + + def _start(self): + self._capture = MultiCapture(out=True, err=True, in_=False, + Capture=self.captureclass) + self._capture.start_capturing() + + def close(self): + cap = self.__dict__.pop("_capture", None) + if cap is not None: + self._outerr = cap.pop_outerr_to_orig() + cap.stop_capturing() + + def readouterr(self): + try: + return self._capture.readouterr() + except AttributeError: + return self._outerr + + @contextlib.contextmanager + def disabled(self): + capmanager = self.request.config.pluginmanager.getplugin('capturemanager') + capmanager.suspendcapture_item(self.request.node, "call", in_=True) + try: + yield + finally: + capmanager.resumecapture() + + +def safe_text_dupfile(f, mode, default_encoding="UTF8"): + """ return a open text file object that's a duplicate of f on the + FD-level if possible. + """ + encoding = getattr(f, "encoding", None) + try: + fd = f.fileno() + except Exception: + if "b" not in getattr(f, "mode", "") and hasattr(f, "encoding"): + # we seem to have a text stream, let's just use it + return f + else: + newfd = os.dup(fd) + if "b" not in mode: + mode += "b" + f = os.fdopen(newfd, mode, 0) # no buffering + return EncodedFile(f, encoding or default_encoding) + + +class EncodedFile(object): + errors = "strict" # possibly needed by py3 code (issue555) + def __init__(self, buffer, encoding): + self.buffer = buffer + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding, "replace") + self.buffer.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(object.__getattribute__(self, "buffer"), name) + + +class MultiCapture(object): + out = err = in_ = None + + def __init__(self, out=True, err=True, in_=True, Capture=None): + if in_: + self.in_ = Capture(0) + if out: + self.out = Capture(1) + if err: + self.err = Capture(2) + + def start_capturing(self): + if self.in_: + self.in_.start() + if self.out: + self.out.start() + if self.err: + self.err.start() + + def pop_outerr_to_orig(self): + """ pop current snapshot out/err capture and flush to orig streams. """ + out, err = self.readouterr() + if out: + self.out.writeorg(out) + if err: + self.err.writeorg(err) + return out, err + + def suspend_capturing(self, in_=False): + if self.out: + self.out.suspend() + if self.err: + self.err.suspend() + if in_ and self.in_: + self.in_.suspend() + self._in_suspended = True + + def resume_capturing(self): + if self.out: + self.out.resume() + if self.err: + self.err.resume() + if hasattr(self, "_in_suspended"): + self.in_.resume() + del self._in_suspended + + def stop_capturing(self): + """ stop capturing and reset capturing streams """ + if hasattr(self, '_reset'): + raise ValueError("was already stopped") + self._reset = True + if self.out: + self.out.done() + if self.err: + self.err.done() + if self.in_: + self.in_.done() + + def readouterr(self): + """ return snapshot unicode value of stdout/stderr capturings. """ + return (self.out.snap() if self.out is not None else "", + self.err.snap() if self.err is not None else "") + +class NoCapture: + __init__ = start = done = suspend = resume = lambda *args: None + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None): + self.targetfd = targetfd + try: + self.targetfd_save = os.dup(self.targetfd) + except OSError: + self.start = lambda: None + self.done = lambda: None + else: + if targetfd == 0: + assert not tmpfile, "cannot set tmpfile with stdin" + tmpfile = open(os.devnull, "r") + self.syscapture = SysCapture(targetfd) + else: + if tmpfile is None: + f = TemporaryFile() + with f: + tmpfile = safe_text_dupfile(f, mode="wb+") + if targetfd in patchsysdict: + self.syscapture = SysCapture(targetfd, tmpfile) + else: + self.syscapture = NoCapture() + self.tmpfile = tmpfile + self.tmpfile_fd = tmpfile.fileno() + + def __repr__(self): + return "" % (self.targetfd, self.targetfd_save) + + def start(self): + """ Start capturing on targetfd using memorized tmpfile. """ + try: + os.fstat(self.targetfd_save) + except (AttributeError, OSError): + raise ValueError("saved filedescriptor not valid anymore") + os.dup2(self.tmpfile_fd, self.targetfd) + self.syscapture.start() + + def snap(self): + f = self.tmpfile + f.seek(0) + res = f.read() + if res: + enc = getattr(f, "encoding", None) + if enc and isinstance(res, bytes): + res = py.builtin._totext(res, enc, "replace") + f.truncate(0) + f.seek(0) + return res + return '' + + def done(self): + """ stop capturing, restore streams, return original capture file, + seeked to position zero. """ + targetfd_save = self.__dict__.pop("targetfd_save") + os.dup2(targetfd_save, self.targetfd) + os.close(targetfd_save) + self.syscapture.done() + self.tmpfile.close() + + def suspend(self): + self.syscapture.suspend() + os.dup2(self.targetfd_save, self.targetfd) + + def resume(self): + self.syscapture.resume() + os.dup2(self.tmpfile_fd, self.targetfd) + + def writeorg(self, data): + """ write to original file descriptor. """ + if py.builtin._istext(data): + data = data.encode("utf8") # XXX use encoding of original stream + os.write(self.targetfd_save, data) + + +class SysCapture: + def __init__(self, fd, tmpfile=None): + name = patchsysdict[fd] + self._old = getattr(sys, name) + self.name = name + if tmpfile is None: + if name == "stdin": + tmpfile = DontReadFromInput() + else: + tmpfile = TextIO() + self.tmpfile = tmpfile + + def start(self): + setattr(sys, self.name, self.tmpfile) + + def snap(self): + f = self.tmpfile + res = f.getvalue() + f.truncate(0) + f.seek(0) + return res + + def done(self): + setattr(sys, self.name, self._old) + del self._old + self.tmpfile.close() + + def suspend(self): + setattr(sys, self.name, self._old) + + def resume(self): + setattr(sys, self.name, self.tmpfile) + + def writeorg(self, data): + self._old.write(data) + self._old.flush() + + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + + encoding = None + + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + + def isatty(self): + return False + + def close(self): + pass + + @property + def buffer(self): + if sys.version_info >= (3,0): + return self + else: + raise AttributeError('redirected stdin has no attribute buffer') + + +def _readline_workaround(): + """ + Ensure readline is imported so that it attaches to the correct stdio + handles on Windows. + + Pdb uses readline support where available--when not running from the Python + prompt, the readline module is not imported until running the pdb REPL. If + running pytest with the --pdb option this means the readline module is not + imported until after I/O capture has been started. + + This is a problem for pyreadline, which is often used to implement readline + support on Windows, as it does not attach to the correct handles for stdout + and/or stdin if they have been redirected by the FDCapture mechanism. This + workaround ensures that readline is imported before I/O capture is setup so + that it can attach to the actual stdin/out for the console. + + See https://github.com/pytest-dev/pytest/pull/1281 + """ + + if not sys.platform.startswith('win32'): + return + try: + import readline # noqa + except ImportError: + pass diff --git a/lib/spack/external/_pytest/compat.py b/lib/spack/external/_pytest/compat.py new file mode 100644 index 0000000000..51fc3bc5c1 --- /dev/null +++ b/lib/spack/external/_pytest/compat.py @@ -0,0 +1,230 @@ +""" +python version compatibility code +""" +import sys +import inspect +import types +import re +import functools + +import py + +import _pytest + + + +try: + import enum +except ImportError: # pragma: no cover + # Only available in Python 3.4+ or as a backport + enum = None + +_PY3 = sys.version_info > (3, 0) +_PY2 = not _PY3 + + +NoneType = type(None) +NOTSET = object() + +if hasattr(inspect, 'signature'): + def _format_args(func): + return str(inspect.signature(func)) +else: + def _format_args(func): + return inspect.formatargspec(*inspect.getargspec(func)) + +isfunction = inspect.isfunction +isclass = inspect.isclass +# used to work around a python2 exception info leak +exc_clear = getattr(sys, 'exc_clear', lambda: None) +# The type of re.compile objects is not exposed in Python. +REGEX_TYPE = type(re.compile('')) + + +def is_generator(func): + try: + return _pytest._code.getrawcode(func).co_flags & 32 # generator function + except AttributeError: # builtin functions have no bytecode + # assume them to not be generators + return False + + +def getlocation(function, curdir): + import inspect + fn = py.path.local(inspect.getfile(function)) + lineno = py.builtin._getcode(function).co_firstlineno + if fn.relto(curdir): + fn = fn.relto(curdir) + return "%s:%d" %(fn, lineno+1) + + +def num_mock_patch_args(function): + """ return number of arguments used up by mock arguments (if any) """ + patchings = getattr(function, "patchings", None) + if not patchings: + return 0 + mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None)) + if mock is not None: + return len([p for p in patchings + if not p.attribute_name and p.new is mock.DEFAULT]) + return len(patchings) + + +def getfuncargnames(function, startindex=None): + # XXX merge with main.py's varnames + #assert not isclass(function) + realfunction = function + while hasattr(realfunction, "__wrapped__"): + realfunction = realfunction.__wrapped__ + if startindex is None: + startindex = inspect.ismethod(function) and 1 or 0 + if realfunction != function: + startindex += num_mock_patch_args(function) + function = realfunction + if isinstance(function, functools.partial): + argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0] + partial = function + argnames = argnames[len(partial.args):] + if partial.keywords: + for kw in partial.keywords: + argnames.remove(kw) + else: + argnames = inspect.getargs(_pytest._code.getrawcode(function))[0] + defaults = getattr(function, 'func_defaults', + getattr(function, '__defaults__', None)) or () + numdefaults = len(defaults) + if numdefaults: + return tuple(argnames[startindex:-numdefaults]) + return tuple(argnames[startindex:]) + + + +if sys.version_info[:2] == (2, 6): + def isclass(object): + """ Return true if the object is a class. Overrides inspect.isclass for + python 2.6 because it will return True for objects which always return + something on __getattr__ calls (see #1035). + Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc + """ + return isinstance(object, (type, types.ClassType)) + + +if _PY3: + import codecs + + STRING_TYPES = bytes, str + + def _escape_strings(val): + """If val is pure ascii, returns it as a str(). Otherwise, escapes + bytes objects into a sequence of escaped bytes: + + b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6' + + and escapes unicode objects into a sequence of escaped unicode + ids, e.g.: + + '4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944' + + note: + the obvious "v.decode('unicode-escape')" will return + valid utf-8 unicode if it finds them in bytes, but we + want to return escaped bytes for any byte, even if they match + a utf-8 string. + + """ + if isinstance(val, bytes): + if val: + # source: http://goo.gl/bGsnwC + encoded_bytes, _ = codecs.escape_encode(val) + return encoded_bytes.decode('ascii') + else: + # empty bytes crashes codecs.escape_encode (#1087) + return '' + else: + return val.encode('unicode_escape').decode('ascii') +else: + STRING_TYPES = bytes, str, unicode + + def _escape_strings(val): + """In py2 bytes and str are the same type, so return if it's a bytes + object, return it unchanged if it is a full ascii string, + otherwise escape it into its binary form. + + If it's a unicode string, change the unicode characters into + unicode escapes. + + """ + if isinstance(val, bytes): + try: + return val.encode('ascii') + except UnicodeDecodeError: + return val.encode('string-escape') + else: + return val.encode('unicode-escape') + + +def get_real_func(obj): + """ gets the real function object of the (possibly) wrapped object by + functools.wraps or functools.partial. + """ + while hasattr(obj, "__wrapped__"): + obj = obj.__wrapped__ + if isinstance(obj, functools.partial): + obj = obj.func + return obj + + +def getfslineno(obj): + # xxx let decorators etc specify a sane ordering + obj = get_real_func(obj) + if hasattr(obj, 'place_as'): + obj = obj.place_as + fslineno = _pytest._code.getfslineno(obj) + assert isinstance(fslineno[1], int), obj + return fslineno + + +def getimfunc(func): + try: + return func.__func__ + except AttributeError: + try: + return func.im_func + except AttributeError: + return func + + +def safe_getattr(object, name, default): + """ Like getattr but return default upon any Exception. + + Attribute access can potentially fail for 'evil' Python objects. + See issue214 + """ + try: + return getattr(object, name, default) + except Exception: + return default + + +def _is_unittest_unexpected_success_a_failure(): + """Return if the test suite should fail if a @expectedFailure unittest test PASSES. + + From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful: + Changed in version 3.4: Returns False if there were any + unexpectedSuccesses from tests marked with the expectedFailure() decorator. + """ + return sys.version_info >= (3, 4) + + +if _PY3: + def safe_str(v): + """returns v as string""" + return str(v) +else: + def safe_str(v): + """returns v as string, converting to ascii if necessary""" + try: + return str(v) + except UnicodeError: + errors = 'replace' + return v.encode('ascii', errors) diff --git a/lib/spack/external/_pytest/config.py b/lib/spack/external/_pytest/config.py new file mode 100644 index 0000000000..fe386ed0b1 --- /dev/null +++ b/lib/spack/external/_pytest/config.py @@ -0,0 +1,1340 @@ +""" command line options, ini-file and conftest.py processing. """ +import argparse +import shlex +import traceback +import types +import warnings + +import py +# DON't import pytest here because it causes import cycle troubles +import sys, os +import _pytest._code +import _pytest.hookspec # the extension point definitions +import _pytest.assertion +from _pytest._pluggy import PluginManager, HookimplMarker, HookspecMarker +from _pytest.compat import safe_str + +hookimpl = HookimplMarker("pytest") +hookspec = HookspecMarker("pytest") + +# pytest startup +# + + +class ConftestImportFailure(Exception): + def __init__(self, path, excinfo): + Exception.__init__(self, path, excinfo) + self.path = path + self.excinfo = excinfo + + def __str__(self): + etype, evalue, etb = self.excinfo + formatted = traceback.format_tb(etb) + # The level of the tracebacks we want to print is hand crafted :( + return repr(evalue) + '\n' + ''.join(formatted[2:]) + + +def main(args=None, plugins=None): + """ return exit code, after performing an in-process test run. + + :arg args: list of command line arguments. + + :arg plugins: list of plugin objects to be auto-registered during + initialization. + """ + try: + try: + config = _prepareconfig(args, plugins) + except ConftestImportFailure as e: + tw = py.io.TerminalWriter(sys.stderr) + for line in traceback.format_exception(*e.excinfo): + tw.line(line.rstrip(), red=True) + tw.line("ERROR: could not load %s\n" % (e.path), red=True) + return 4 + else: + try: + config.pluginmanager.check_pending() + return config.hook.pytest_cmdline_main(config=config) + finally: + config._ensure_unconfigure() + except UsageError as e: + for msg in e.args: + sys.stderr.write("ERROR: %s\n" %(msg,)) + return 4 + +class cmdline: # compatibility namespace + main = staticmethod(main) + + +class UsageError(Exception): + """ error in pytest usage or invocation""" + + +def filename_arg(path, optname): + """ Argparse type validator for filename arguments. + + :path: path of filename + :optname: name of the option + """ + if os.path.isdir(path): + raise UsageError("{0} must be a filename, given: {1}".format(optname, path)) + return path + + +def directory_arg(path, optname): + """Argparse type validator for directory arguments. + + :path: path of directory + :optname: name of the option + """ + if not os.path.isdir(path): + raise UsageError("{0} must be a directory, given: {1}".format(optname, path)) + return path + + +_preinit = [] + +default_plugins = ( + "mark main terminal runner python fixtures debugging unittest capture skipping " + "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion " + "junitxml resultlog doctest cacheprovider freeze_support " + "setuponly setupplan").split() + +builtin_plugins = set(default_plugins) +builtin_plugins.add("pytester") + + +def _preloadplugins(): + assert not _preinit + _preinit.append(get_config()) + +def get_config(): + if _preinit: + return _preinit.pop(0) + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + config = Config(pluginmanager) + for spec in default_plugins: + pluginmanager.import_plugin(spec) + return config + +def get_plugin_manager(): + """ + Obtain a new instance of the + :py:class:`_pytest.config.PytestPluginManager`, with default plugins + already loaded. + + This function can be used by integration with other tools, like hooking + into pytest to run tests into an IDE. + """ + return get_config().pluginmanager + +def _prepareconfig(args=None, plugins=None): + warning = None + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, (tuple, list)): + if not isinstance(args, str): + raise ValueError("not a string or argument list: %r" % (args,)) + args = shlex.split(args, posix=sys.platform != "win32") + from _pytest import deprecated + warning = deprecated.MAIN_STR_ARGS + config = get_config() + pluginmanager = config.pluginmanager + try: + if plugins: + for plugin in plugins: + if isinstance(plugin, py.builtin._basestring): + pluginmanager.consider_pluginarg(plugin) + else: + pluginmanager.register(plugin) + if warning: + config.warn('C1', warning) + return pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args) + except BaseException: + config._ensure_unconfigure() + raise + + +class PytestPluginManager(PluginManager): + """ + Overwrites :py:class:`pluggy.PluginManager` to add pytest-specific + functionality: + + * loading plugins from the command line, ``PYTEST_PLUGIN`` env variable and + ``pytest_plugins`` global variables found in plugins being loaded; + * ``conftest.py`` loading during start-up; + """ + def __init__(self): + super(PytestPluginManager, self).__init__("pytest", implprefix="pytest_") + self._conftest_plugins = set() + + # state related to local conftest plugins + self._path2confmods = {} + self._conftestpath2mod = {} + self._confcutdir = None + self._noconftest = False + self._duplicatepaths = set() + + self.add_hookspecs(_pytest.hookspec) + self.register(self) + if os.environ.get('PYTEST_DEBUG'): + err = sys.stderr + encoding = getattr(err, 'encoding', 'utf8') + try: + err = py.io.dupfile(err, encoding=encoding) + except Exception: + pass + self.trace.root.setwriter(err.write) + self.enable_tracing() + + # Config._consider_importhook will set a real object if required. + self.rewrite_hook = _pytest.assertion.DummyRewriteHook() + + def addhooks(self, module_or_class): + """ + .. deprecated:: 2.8 + + Use :py:meth:`pluggy.PluginManager.add_hookspecs` instead. + """ + warning = dict(code="I2", + fslocation=_pytest._code.getfslineno(sys._getframe(1)), + nodeid=None, + message="use pluginmanager.add_hookspecs instead of " + "deprecated addhooks() method.") + self._warn(warning) + return self.add_hookspecs(module_or_class) + + def parse_hookimpl_opts(self, plugin, name): + # pytest hooks are always prefixed with pytest_ + # so we avoid accessing possibly non-readable attributes + # (see issue #1073) + if not name.startswith("pytest_"): + return + # ignore some historic special names which can not be hooks anyway + if name == "pytest_plugins" or name.startswith("pytest_funcarg__"): + return + + method = getattr(plugin, name) + opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name) + if opts is not None: + for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): + opts.setdefault(name, hasattr(method, name)) + return opts + + def parse_hookspec_opts(self, module_or_class, name): + opts = super(PytestPluginManager, self).parse_hookspec_opts( + module_or_class, name) + if opts is None: + method = getattr(module_or_class, name) + if name.startswith("pytest_"): + opts = {"firstresult": hasattr(method, "firstresult"), + "historic": hasattr(method, "historic")} + return opts + + def _verify_hook(self, hook, hookmethod): + super(PytestPluginManager, self)._verify_hook(hook, hookmethod) + if "__multicall__" in hookmethod.argnames: + fslineno = _pytest._code.getfslineno(hookmethod.function) + warning = dict(code="I1", + fslocation=fslineno, + nodeid=None, + message="%r hook uses deprecated __multicall__ " + "argument" % (hook.name)) + self._warn(warning) + + def register(self, plugin, name=None): + ret = super(PytestPluginManager, self).register(plugin, name) + if ret: + self.hook.pytest_plugin_registered.call_historic( + kwargs=dict(plugin=plugin, manager=self)) + return ret + + def getplugin(self, name): + # support deprecated naming because plugins (xdist e.g.) use it + return self.get_plugin(name) + + def hasplugin(self, name): + """Return True if the plugin with the given name is registered.""" + return bool(self.get_plugin(name)) + + def pytest_configure(self, config): + # XXX now that the pluginmanager exposes hookimpl(tryfirst...) + # we should remove tryfirst/trylast as markers + config.addinivalue_line("markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.") + config.addinivalue_line("markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.") + + def _warn(self, message): + kwargs = message if isinstance(message, dict) else { + 'code': 'I1', + 'message': message, + 'fslocation': None, + 'nodeid': None, + } + self.hook.pytest_logwarning.call_historic(kwargs=kwargs) + + # + # internal API for local conftest plugin handling + # + def _set_initial_conftests(self, namespace): + """ load initial conftest files given a preparsed "namespace". + As conftest files may add their own command line options + which have arguments ('--my-opt somepath') we might get some + false positives. All builtin and 3rd party plugins will have + been loaded, however, so common options will not confuse our logic + here. + """ + current = py.path.local() + self._confcutdir = current.join(namespace.confcutdir, abs=True) \ + if namespace.confcutdir else None + self._noconftest = namespace.noconftest + testpaths = namespace.file_or_dir + foundanchor = False + for path in testpaths: + path = str(path) + # remove node-id syntax + i = path.find("::") + if i != -1: + path = path[:i] + anchor = current.join(path, abs=1) + if exists(anchor): # we found some file object + self._try_load_conftest(anchor) + foundanchor = True + if not foundanchor: + self._try_load_conftest(current) + + def _try_load_conftest(self, anchor): + self._getconftestmodules(anchor) + # let's also consider test* subdirs + if anchor.check(dir=1): + for x in anchor.listdir("test*"): + if x.check(dir=1): + self._getconftestmodules(x) + + def _getconftestmodules(self, path): + if self._noconftest: + return [] + try: + return self._path2confmods[path] + except KeyError: + if path.isfile(): + clist = self._getconftestmodules(path.dirpath()) + else: + # XXX these days we may rather want to use config.rootdir + # and allow users to opt into looking into the rootdir parent + # directories instead of requiring to specify confcutdir + clist = [] + for parent in path.parts(): + if self._confcutdir and self._confcutdir.relto(parent): + continue + conftestpath = parent.join("conftest.py") + if conftestpath.isfile(): + mod = self._importconftest(conftestpath) + clist.append(mod) + + self._path2confmods[path] = clist + return clist + + def _rget_with_confmod(self, name, path): + modules = self._getconftestmodules(path) + for mod in reversed(modules): + try: + return mod, getattr(mod, name) + except AttributeError: + continue + raise KeyError(name) + + def _importconftest(self, conftestpath): + try: + return self._conftestpath2mod[conftestpath] + except KeyError: + pkgpath = conftestpath.pypkgpath() + if pkgpath is None: + _ensure_removed_sysmodule(conftestpath.purebasename) + try: + mod = conftestpath.pyimport() + except Exception: + raise ConftestImportFailure(conftestpath, sys.exc_info()) + + self._conftest_plugins.add(mod) + self._conftestpath2mod[conftestpath] = mod + dirpath = conftestpath.dirpath() + if dirpath in self._path2confmods: + for path, mods in self._path2confmods.items(): + if path and path.relto(dirpath) or path == dirpath: + assert mod not in mods + mods.append(mod) + self.trace("loaded conftestmodule %r" %(mod)) + self.consider_conftest(mod) + return mod + + # + # API for bootstrapping plugin loading + # + # + + def consider_preparse(self, args): + for opt1,opt2 in zip(args, args[1:]): + if opt1 == "-p": + self.consider_pluginarg(opt2) + + def consider_pluginarg(self, arg): + if arg.startswith("no:"): + name = arg[3:] + self.set_blocked(name) + if not name.startswith("pytest_"): + self.set_blocked("pytest_" + name) + else: + self.import_plugin(arg) + + def consider_conftest(self, conftestmodule): + if self.register(conftestmodule, name=conftestmodule.__file__): + self.consider_module(conftestmodule) + + def consider_env(self): + self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) + + def consider_module(self, mod): + plugins = getattr(mod, 'pytest_plugins', []) + if isinstance(plugins, str): + plugins = [plugins] + self.rewrite_hook.mark_rewrite(*plugins) + self._import_plugin_specs(plugins) + + def _import_plugin_specs(self, spec): + if spec: + if isinstance(spec, str): + spec = spec.split(",") + for import_spec in spec: + self.import_plugin(import_spec) + + def import_plugin(self, modname): + # most often modname refers to builtin modules, e.g. "pytester", + # "terminal" or "capture". Those plugins are registered under their + # basename for historic purposes but must be imported with the + # _pytest prefix. + assert isinstance(modname, str) + if self.get_plugin(modname) is not None: + return + if modname in builtin_plugins: + importspec = "_pytest." + modname + else: + importspec = modname + try: + __import__(importspec) + except ImportError as e: + new_exc = ImportError('Error importing plugin "%s": %s' % (modname, safe_str(e.args[0]))) + # copy over name and path attributes + for attr in ('name', 'path'): + if hasattr(e, attr): + setattr(new_exc, attr, getattr(e, attr)) + raise new_exc + except Exception as e: + import pytest + if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception): + raise + self._warn("skipped plugin %r: %s" %((modname, e.msg))) + else: + mod = sys.modules[importspec] + self.register(mod, modname) + self.consider_module(mod) + + +class Parser: + """ Parser for command line arguments and ini-file values. + + :ivar extra_info: dict of generic param -> value to display in case + there's an error processing the command line arguments. + """ + + def __init__(self, usage=None, processopt=None): + self._anonymous = OptionGroup("custom options", parser=self) + self._groups = [] + self._processopt = processopt + self._usage = usage + self._inidict = {} + self._ininames = [] + self.extra_info = {} + + def processoption(self, option): + if self._processopt: + if option.dest: + self._processopt(option) + + def getgroup(self, name, description="", after=None): + """ get (or create) a named option Group. + + :name: name of the option group. + :description: long description for --help output. + :after: name of other group, used for ordering --help output. + + The returned group object has an ``addoption`` method with the same + signature as :py:func:`parser.addoption + <_pytest.config.Parser.addoption>` but will be shown in the + respective group in the output of ``pytest. --help``. + """ + for group in self._groups: + if group.name == name: + return group + group = OptionGroup(name, description, parser=self) + i = 0 + for i, grp in enumerate(self._groups): + if grp.name == after: + break + self._groups.insert(i+1, group) + return group + + def addoption(self, *opts, **attrs): + """ register a command line option. + + :opts: option names, can be short or long options. + :attrs: same attributes which the ``add_option()`` function of the + `argparse library + `_ + accepts. + + After command line parsing options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ + self._anonymous.addoption(*opts, **attrs) + + def parse(self, args, namespace=None): + from _pytest._argcomplete import try_argcomplete + self.optparser = self._getparser() + try_argcomplete(self.optparser) + return self.optparser.parse_args([str(x) for x in args], namespace=namespace) + + def _getparser(self): + from _pytest._argcomplete import filescompleter + optparser = MyOptionParser(self, self.extra_info) + groups = self._groups + [self._anonymous] + for group in groups: + if group.options: + desc = group.description or group.name + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + # bash like autocompletion for dirs (appending '/') + optparser.add_argument(FILE_OR_DIR, nargs='*').completer=filescompleter + return optparser + + def parse_setoption(self, args, option, namespace=None): + parsedoption = self.parse(args, namespace=namespace) + for name, value in parsedoption.__dict__.items(): + setattr(option, name, value) + return getattr(parsedoption, FILE_OR_DIR) + + def parse_known_args(self, args, namespace=None): + """parses and returns a namespace object with known arguments at this + point. + """ + return self.parse_known_and_unknown_args(args, namespace=namespace)[0] + + def parse_known_and_unknown_args(self, args, namespace=None): + """parses and returns a namespace object with known arguments, and + the remaining arguments unknown at this point. + """ + optparser = self._getparser() + args = [str(x) for x in args] + return optparser.parse_known_args(args, namespace=namespace) + + def addini(self, name, help, type=None, default=None): + """ register an ini-file option. + + :name: name of the ini-variable + :type: type of the variable, can be ``pathlist``, ``args``, ``linelist`` + or ``bool``. + :default: default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) <_pytest.config.Config.getini>`. + """ + assert type in (None, "pathlist", "args", "linelist", "bool") + self._inidict[name] = (help, type, default) + self._ininames.append(name) + + +class ArgumentError(Exception): + """ + Raised if an Argument instance is created with invalid or + inconsistent arguments. + """ + + def __init__(self, msg, option): + self.msg = msg + self.option_id = str(option) + + def __str__(self): + if self.option_id: + return "option %s: %s" % (self.option_id, self.msg) + else: + return self.msg + + +class Argument: + """class that mimics the necessary behaviour of optparse.Option + + its currently a least effort implementation + and ignoring choices and integer prefixes + https://docs.python.org/3/library/optparse.html#optparse-standard-option-types + """ + _typ_map = { + 'int': int, + 'string': str, + 'float': float, + 'complex': complex, + } + + def __init__(self, *names, **attrs): + """store parms in private vars for use in add_argument""" + self._attrs = attrs + self._short_opts = [] + self._long_opts = [] + self.dest = attrs.get('dest') + if '%default' in (attrs.get('help') or ''): + warnings.warn( + 'pytest now uses argparse. "%default" should be' + ' changed to "%(default)s" ', + DeprecationWarning, + stacklevel=3) + try: + typ = attrs['type'] + except KeyError: + pass + else: + # this might raise a keyerror as well, don't want to catch that + if isinstance(typ, py.builtin._basestring): + if typ == 'choice': + warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this is optional and when supplied' + ' should be a type.' + ' (options: %s)' % (typ, names), + DeprecationWarning, + stacklevel=3) + # argparse expects a type here take it from + # the type of the first element + attrs['type'] = type(attrs['choices'][0]) + else: + warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this should be a type.' + ' (options: %s)' % (typ, names), + DeprecationWarning, + stacklevel=3) + attrs['type'] = Argument._typ_map[typ] + # used in test_parseopt -> test_parse_defaultgetter + self.type = attrs['type'] + else: + self.type = typ + try: + # attribute existence is tested in Config._processopt + self.default = attrs['default'] + except KeyError: + pass + self._set_opt_strings(names) + if not self.dest: + if self._long_opts: + self.dest = self._long_opts[0][2:].replace('-', '_') + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError: + raise ArgumentError( + 'need a long or short option', self) + + def names(self): + return self._short_opts + self._long_opts + + def attrs(self): + # update any attributes set by processopt + attrs = 'default dest help'.split() + if self.dest: + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + if self._attrs.get('help'): + a = self._attrs['help'] + a = a.replace('%default', '%(default)s') + #a = a.replace('%prog', '%(prog)s') + self._attrs['help'] = a + return self._attrs + + def _set_opt_strings(self, opts): + """directly from optparse + + might not be necessary as this is passed to argparse later on""" + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + "invalid option string %r: " + "must be at least two characters long" % opt, self) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self) + self._long_opts.append(opt) + + def __repr__(self): + args = [] + if self._short_opts: + args += ['_short_opts: ' + repr(self._short_opts)] + if self._long_opts: + args += ['_long_opts: ' + repr(self._long_opts)] + args += ['dest: ' + repr(self.dest)] + if hasattr(self, 'type'): + args += ['type: ' + repr(self.type)] + if hasattr(self, 'default'): + args += ['default: ' + repr(self.default)] + return 'Argument({0})'.format(', '.join(args)) + + +class OptionGroup: + def __init__(self, name, description="", parser=None): + self.name = name + self.description = description + self.options = [] + self.parser = parser + + def addoption(self, *optnames, **attrs): + """ add an option to this group. + + if a shortened version of a long option is specified it will + be suppressed in the help. addoption('--twowords', '--two-words') + results in help showing '--two-words' only, but --twowords gets + accepted **and** the automatic destination is in args.twowords + """ + conflict = set(optnames).intersection( + name for opt in self.options for name in opt.names()) + if conflict: + raise ValueError("option names %s already added" % conflict) + option = Argument(*optnames, **attrs) + self._addoption_instance(option, shortupper=False) + + def _addoption(self, *optnames, **attrs): + option = Argument(*optnames, **attrs) + self._addoption_instance(option, shortupper=True) + + def _addoption_instance(self, option, shortupper=False): + if not shortupper: + for opt in option._short_opts: + if opt[0] == '-' and opt[1].islower(): + raise ValueError("lowercase shortoptions reserved") + if self.parser: + self.parser.processoption(option) + self.options.append(option) + + +class MyOptionParser(argparse.ArgumentParser): + def __init__(self, parser, extra_info=None): + if not extra_info: + extra_info = {} + self._parser = parser + argparse.ArgumentParser.__init__(self, usage=parser._usage, + add_help=False, formatter_class=DropShorterLongHelpFormatter) + # extra_info is a dict of (param -> value) to display if there's + # an usage error to provide more contextual information to the user + self.extra_info = extra_info + + def parse_args(self, args=None, namespace=None): + """allow splitting of positional arguments""" + args, argv = self.parse_known_args(args, namespace) + if argv: + for arg in argv: + if arg and arg[0] == '-': + lines = ['unrecognized arguments: %s' % (' '.join(argv))] + for k, v in sorted(self.extra_info.items()): + lines.append(' %s: %s' % (k, v)) + self.error('\n'.join(lines)) + getattr(args, FILE_OR_DIR).extend(argv) + return args + + +class DropShorterLongHelpFormatter(argparse.HelpFormatter): + """shorten help for long options that differ only in extra hyphens + + - collapse **long** options that are the same except for extra hyphens + - special action attribute map_long_option allows surpressing additional + long options + - shortcut if there are only two options and one of them is a short one + - cache result on action object as this is called at least 2 times + """ + def _format_action_invocation(self, action): + orgstr = argparse.HelpFormatter._format_action_invocation(self, action) + if orgstr and orgstr[0] != '-': # only optional arguments + return orgstr + res = getattr(action, '_formatted_action_invocation', None) + if res: + return res + options = orgstr.split(', ') + if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2): + # a shortcut for '-h, --help' or '--abc', '-a' + action._formatted_action_invocation = orgstr + return orgstr + return_list = [] + option_map = getattr(action, 'map_long_option', {}) + if option_map is None: + option_map = {} + short_long = {} + for option in options: + if len(option) == 2 or option[2] == ' ': + continue + if not option.startswith('--'): + raise ArgumentError('long optional argument without "--": [%s]' + % (option), self) + xxoption = option[2:] + if xxoption.split()[0] not in option_map: + shortened = xxoption.replace('-', '') + if shortened not in short_long or \ + len(short_long[shortened]) < len(xxoption): + short_long[shortened] = xxoption + # now short_long has been filled out to the longest with dashes + # **and** we keep the right option ordering from add_argument + for option in options: # + if len(option) == 2 or option[2] == ' ': + return_list.append(option) + if option[2:] == short_long.get(option.replace('-', '')): + return_list.append(option.replace(' ', '=', 1)) + action._formatted_action_invocation = ', '.join(return_list) + return action._formatted_action_invocation + + + +def _ensure_removed_sysmodule(modname): + try: + del sys.modules[modname] + except KeyError: + pass + +class CmdOptions(object): + """ holds cmdline options as attributes.""" + def __init__(self, values=()): + self.__dict__.update(values) + def __repr__(self): + return "" %(self.__dict__,) + def copy(self): + return CmdOptions(self.__dict__) + +class Notset: + def __repr__(self): + return "" + + +notset = Notset() +FILE_OR_DIR = 'file_or_dir' + + +class Config(object): + """ access to configuration values, pluginmanager and plugin hooks. """ + + def __init__(self, pluginmanager): + #: access to command line option as attributes. + #: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead + self.option = CmdOptions() + _a = FILE_OR_DIR + self._parser = Parser( + usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a), + processopt=self._processopt, + ) + #: a pluginmanager instance + self.pluginmanager = pluginmanager + self.trace = self.pluginmanager.trace.root.get("config") + self.hook = self.pluginmanager.hook + self._inicache = {} + self._opt2dest = {} + self._cleanup = [] + self._warn = self.pluginmanager._warn + self.pluginmanager.register(self, "pytestconfig") + self._configured = False + + def do_setns(dic): + import pytest + setns(pytest, dic) + + self.hook.pytest_namespace.call_historic(do_setns, {}) + self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser)) + + def add_cleanup(self, func): + """ Add a function to be called when the config object gets out of + use (usually coninciding with pytest_unconfigure).""" + self._cleanup.append(func) + + def _do_configure(self): + assert not self._configured + self._configured = True + self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) + + def _ensure_unconfigure(self): + if self._configured: + self._configured = False + self.hook.pytest_unconfigure(config=self) + self.hook.pytest_configure._call_history = [] + while self._cleanup: + fin = self._cleanup.pop() + fin() + + def warn(self, code, message, fslocation=None): + """ generate a warning for this test session. """ + self.hook.pytest_logwarning.call_historic(kwargs=dict( + code=code, message=message, + fslocation=fslocation, nodeid=None)) + + def get_terminal_writer(self): + return self.pluginmanager.get_plugin("terminalreporter")._tw + + def pytest_cmdline_parse(self, pluginmanager, args): + # REF1 assert self == pluginmanager.config, (self, pluginmanager.config) + self.parse(args) + return self + + def notify_exception(self, excinfo, option=None): + if option and option.fulltrace: + style = "long" + else: + style = "native" + excrepr = excinfo.getrepr(funcargs=True, + showlocals=getattr(option, 'showlocals', False), + style=style, + ) + res = self.hook.pytest_internalerror(excrepr=excrepr, + excinfo=excinfo) + if not py.builtin.any(res): + for line in str(excrepr).split("\n"): + sys.stderr.write("INTERNALERROR> %s\n" %line) + sys.stderr.flush() + + def cwd_relative_nodeid(self, nodeid): + # nodeid's are relative to the rootpath, compute relative to cwd + if self.invocation_dir != self.rootdir: + fullpath = self.rootdir.join(nodeid) + nodeid = self.invocation_dir.bestrelpath(fullpath) + return nodeid + + @classmethod + def fromdictargs(cls, option_dict, args): + """ constructor useable for subprocesses. """ + config = get_config() + config.option.__dict__.update(option_dict) + config.parse(args, addopts=False) + for x in config.option.plugins: + config.pluginmanager.consider_pluginarg(x) + return config + + def _processopt(self, opt): + for name in opt._short_opts + opt._long_opts: + self._opt2dest[name] = opt.dest + + if hasattr(opt, 'default') and opt.dest: + if not hasattr(self.option, opt.dest): + setattr(self.option, opt.dest, opt.default) + + @hookimpl(trylast=True) + def pytest_load_initial_conftests(self, early_config): + self.pluginmanager._set_initial_conftests(early_config.known_args_namespace) + + def _initini(self, args): + ns, unknown_args = self._parser.parse_known_and_unknown_args(args, namespace=self.option.copy()) + r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args, warnfunc=self.warn) + self.rootdir, self.inifile, self.inicfg = r + self._parser.extra_info['rootdir'] = self.rootdir + self._parser.extra_info['inifile'] = self.inifile + self.invocation_dir = py.path.local() + self._parser.addini('addopts', 'extra command line options', 'args') + self._parser.addini('minversion', 'minimally required pytest version') + + def _consider_importhook(self, args, entrypoint_name): + """Install the PEP 302 import hook if using assertion re-writing. + + Needs to parse the --assert= option from the commandline + and find all the installed plugins to mark them for re-writing + by the importhook. + """ + ns, unknown_args = self._parser.parse_known_and_unknown_args(args) + mode = ns.assertmode + if mode == 'rewrite': + try: + hook = _pytest.assertion.install_importhook(self) + except SystemError: + mode = 'plain' + else: + import pkg_resources + self.pluginmanager.rewrite_hook = hook + for entrypoint in pkg_resources.iter_entry_points('pytest11'): + # 'RECORD' available for plugins installed normally (pip install) + # 'SOURCES.txt' available for plugins installed in dev mode (pip install -e) + # for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa + # so it shouldn't be an issue + for metadata in ('RECORD', 'SOURCES.txt'): + for entry in entrypoint.dist._get_metadata(metadata): + fn = entry.split(',')[0] + is_simple_module = os.sep not in fn and fn.endswith('.py') + is_package = fn.count(os.sep) == 1 and fn.endswith('__init__.py') + if is_simple_module: + module_name, ext = os.path.splitext(fn) + hook.mark_rewrite(module_name) + elif is_package: + package_name = os.path.dirname(fn) + hook.mark_rewrite(package_name) + self._warn_about_missing_assertion(mode) + + def _warn_about_missing_assertion(self, mode): + try: + assert False + except AssertionError: + pass + else: + if mode == 'plain': + sys.stderr.write("WARNING: ASSERTIONS ARE NOT EXECUTED" + " and FAILING TESTS WILL PASS. Are you" + " using python -O?") + else: + sys.stderr.write("WARNING: assertions not in test modules or" + " plugins will be ignored" + " because assert statements are not executed " + "by the underlying Python interpreter " + "(are you using python -O?)\n") + + def _preparse(self, args, addopts=True): + self._initini(args) + if addopts: + args[:] = shlex.split(os.environ.get('PYTEST_ADDOPTS', '')) + args + args[:] = self.getini("addopts") + args + self._checkversion() + entrypoint_name = 'pytest11' + self._consider_importhook(args, entrypoint_name) + self.pluginmanager.consider_preparse(args) + self.pluginmanager.load_setuptools_entrypoints(entrypoint_name) + self.pluginmanager.consider_env() + self.known_args_namespace = ns = self._parser.parse_known_args(args, namespace=self.option.copy()) + confcutdir = self.known_args_namespace.confcutdir + if self.known_args_namespace.confcutdir is None and self.inifile: + confcutdir = py.path.local(self.inifile).dirname + self.known_args_namespace.confcutdir = confcutdir + try: + self.hook.pytest_load_initial_conftests(early_config=self, + args=args, parser=self._parser) + except ConftestImportFailure: + e = sys.exc_info()[1] + if ns.help or ns.version: + # we don't want to prevent --help/--version to work + # so just let is pass and print a warning at the end + self._warn("could not load initial conftests (%s)\n" % e.path) + else: + raise + + def _checkversion(self): + import pytest + minver = self.inicfg.get('minversion', None) + if minver: + ver = minver.split(".") + myver = pytest.__version__.split(".") + if myver < ver: + raise pytest.UsageError( + "%s:%d: requires pytest-%s, actual pytest-%s'" %( + self.inicfg.config.path, self.inicfg.lineof('minversion'), + minver, pytest.__version__)) + + def parse(self, args, addopts=True): + # parse given cmdline arguments into this config object. + assert not hasattr(self, 'args'), ( + "can only parse cmdline args at most once per Config object") + self._origargs = args + self.hook.pytest_addhooks.call_historic( + kwargs=dict(pluginmanager=self.pluginmanager)) + self._preparse(args, addopts=addopts) + # XXX deprecated hook: + self.hook.pytest_cmdline_preparse(config=self, args=args) + args = self._parser.parse_setoption(args, self.option, namespace=self.option) + if not args: + cwd = os.getcwd() + if cwd == self.rootdir: + args = self.getini('testpaths') + if not args: + args = [cwd] + self.args = args + + def addinivalue_line(self, name, line): + """ add a line to an ini-file option. The option must have been + declared but might not yet be set in which case the line becomes the + the first line in its value. """ + x = self.getini(name) + assert isinstance(x, list) + x.append(line) # modifies the cached list inline + + def getini(self, name): + """ return configuration value from an :ref:`ini file `. If the + specified name hasn't been registered through a prior + :py:func:`parser.addini ` + call (usually from a plugin), a ValueError is raised. """ + try: + return self._inicache[name] + except KeyError: + self._inicache[name] = val = self._getini(name) + return val + + def _getini(self, name): + try: + description, type, default = self._parser._inidict[name] + except KeyError: + raise ValueError("unknown configuration value: %r" %(name,)) + value = self._get_override_ini_value(name) + if value is None: + try: + value = self.inicfg[name] + except KeyError: + if default is not None: + return default + if type is None: + return '' + return [] + if type == "pathlist": + dp = py.path.local(self.inicfg.config.path).dirpath() + l = [] + for relpath in shlex.split(value): + l.append(dp.join(relpath, abs=True)) + return l + elif type == "args": + return shlex.split(value) + elif type == "linelist": + return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] + elif type == "bool": + return bool(_strtobool(value.strip())) + else: + assert type is None + return value + + def _getconftest_pathlist(self, name, path): + try: + mod, relroots = self.pluginmanager._rget_with_confmod(name, path) + except KeyError: + return None + modpath = py.path.local(mod.__file__).dirpath() + l = [] + for relroot in relroots: + if not isinstance(relroot, py.path.local): + relroot = relroot.replace("/", py.path.local.sep) + relroot = modpath.join(relroot, abs=True) + l.append(relroot) + return l + + def _get_override_ini_value(self, name): + value = None + # override_ini is a list of list, to support both -o foo1=bar1 foo2=bar2 and + # and -o foo1=bar1 -o foo2=bar2 options + # always use the last item if multiple value set for same ini-name, + # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2 + if self.getoption("override_ini", None): + for ini_config_list in self.option.override_ini: + for ini_config in ini_config_list: + try: + (key, user_ini_value) = ini_config.split("=", 1) + except ValueError: + raise UsageError("-o/--override-ini expects option=value style.") + if key == name: + value = user_ini_value + return value + + def getoption(self, name, default=notset, skip=False): + """ return command line option value. + + :arg name: name of the option. You may also specify + the literal ``--OPT`` option instead of the "dest" option name. + :arg default: default value if no option of that name exists. + :arg skip: if True raise pytest.skip if option does not exists + or has a None value. + """ + name = self._opt2dest.get(name, name) + try: + val = getattr(self.option, name) + if val is None and skip: + raise AttributeError(name) + return val + except AttributeError: + if default is not notset: + return default + if skip: + import pytest + pytest.skip("no %r option found" %(name,)) + raise ValueError("no option named %r" % (name,)) + + def getvalue(self, name, path=None): + """ (deprecated, use getoption()) """ + return self.getoption(name) + + def getvalueorskip(self, name, path=None): + """ (deprecated, use getoption(skip=True)) """ + return self.getoption(name, skip=True) + +def exists(path, ignore=EnvironmentError): + try: + return path.check() + except ignore: + return False + +def getcfg(args, warnfunc=None): + """ + Search the list of arguments for a valid ini-file for pytest, + and return a tuple of (rootdir, inifile, cfg-dict). + + note: warnfunc is an optional function used to warn + about ini-files that use deprecated features. + This parameter should be removed when pytest + adopts standard deprecation warnings (#1804). + """ + from _pytest.deprecated import SETUP_CFG_PYTEST + inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"] + args = [x for x in args if not str(x).startswith("-")] + if not args: + args = [py.path.local()] + for arg in args: + arg = py.path.local(arg) + for base in arg.parts(reverse=True): + for inibasename in inibasenames: + p = base.join(inibasename) + if exists(p): + iniconfig = py.iniconfig.IniConfig(p) + if 'pytest' in iniconfig.sections: + if inibasename == 'setup.cfg' and warnfunc: + warnfunc('C1', SETUP_CFG_PYTEST) + return base, p, iniconfig['pytest'] + if inibasename == 'setup.cfg' and 'tool:pytest' in iniconfig.sections: + return base, p, iniconfig['tool:pytest'] + elif inibasename == "pytest.ini": + # allowed to be empty + return base, p, {} + return None, None, None + + +def get_common_ancestor(args): + # args are what we get after early command line parsing (usually + # strings, but can be py.path.local objects as well) + common_ancestor = None + for arg in args: + if str(arg)[0] == "-": + continue + p = py.path.local(arg) + if not p.exists(): + continue + if common_ancestor is None: + common_ancestor = p + else: + if p.relto(common_ancestor) or p == common_ancestor: + continue + elif common_ancestor.relto(p): + common_ancestor = p + else: + shared = p.common(common_ancestor) + if shared is not None: + common_ancestor = shared + if common_ancestor is None: + common_ancestor = py.path.local() + elif common_ancestor.isfile(): + common_ancestor = common_ancestor.dirpath() + return common_ancestor + + +def get_dirs_from_args(args): + return [d for d in (py.path.local(x) for x in args + if not str(x).startswith("-")) + if d.exists()] + + +def determine_setup(inifile, args, warnfunc=None): + dirs = get_dirs_from_args(args) + if inifile: + iniconfig = py.iniconfig.IniConfig(inifile) + try: + inicfg = iniconfig["pytest"] + except KeyError: + inicfg = None + rootdir = get_common_ancestor(dirs) + else: + ancestor = get_common_ancestor(dirs) + rootdir, inifile, inicfg = getcfg([ancestor], warnfunc=warnfunc) + if rootdir is None: + for rootdir in ancestor.parts(reverse=True): + if rootdir.join("setup.py").exists(): + break + else: + rootdir, inifile, inicfg = getcfg(dirs, warnfunc=warnfunc) + if rootdir is None: + rootdir = get_common_ancestor([py.path.local(), ancestor]) + is_fs_root = os.path.splitdrive(str(rootdir))[1] == os.sep + if is_fs_root: + rootdir = ancestor + return rootdir, inifile, inicfg or {} + + +def setns(obj, dic): + import pytest + for name, value in dic.items(): + if isinstance(value, dict): + mod = getattr(obj, name, None) + if mod is None: + modname = "pytest.%s" % name + mod = types.ModuleType(modname) + sys.modules[modname] = mod + mod.__all__ = [] + setattr(obj, name, mod) + obj.__all__.append(name) + setns(mod, value) + else: + setattr(obj, name, value) + obj.__all__.append(name) + #if obj != pytest: + # pytest.__all__.append(name) + setattr(pytest, name, value) + + +def create_terminal_writer(config, *args, **kwargs): + """Create a TerminalWriter instance configured according to the options + in the config object. Every code which requires a TerminalWriter object + and has access to a config object should use this function. + """ + tw = py.io.TerminalWriter(*args, **kwargs) + if config.option.color == 'yes': + tw.hasmarkup = True + if config.option.color == 'no': + tw.hasmarkup = False + return tw + + +def _strtobool(val): + """Convert a string representation of truth to true (1) or false (0). + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + + .. note:: copied from distutils.util + """ + val = val.lower() + if val in ('y', 'yes', 't', 'true', 'on', '1'): + return 1 + elif val in ('n', 'no', 'f', 'false', 'off', '0'): + return 0 + else: + raise ValueError("invalid truth value %r" % (val,)) diff --git a/lib/spack/external/_pytest/debugging.py b/lib/spack/external/_pytest/debugging.py new file mode 100644 index 0000000000..d96170bd8b --- /dev/null +++ b/lib/spack/external/_pytest/debugging.py @@ -0,0 +1,124 @@ +""" interactive debugging with PDB, the Python Debugger. """ +from __future__ import absolute_import +import pdb +import sys + +import pytest + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption( + '--pdb', dest="usepdb", action="store_true", + help="start the interactive Python debugger on errors.") + group._addoption( + '--pdbcls', dest="usepdb_cls", metavar="modulename:classname", + help="start a custom interactive Python debugger on errors. " + "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb") + +def pytest_namespace(): + return {'set_trace': pytestPDB().set_trace} + +def pytest_configure(config): + if config.getvalue("usepdb") or config.getvalue("usepdb_cls"): + config.pluginmanager.register(PdbInvoke(), 'pdbinvoke') + if config.getvalue("usepdb_cls"): + modname, classname = config.getvalue("usepdb_cls").split(":") + __import__(modname) + pdb_cls = getattr(sys.modules[modname], classname) + else: + pdb_cls = pdb.Pdb + pytestPDB._pdb_cls = pdb_cls + + old = (pdb.set_trace, pytestPDB._pluginmanager) + + def fin(): + pdb.set_trace, pytestPDB._pluginmanager = old + pytestPDB._config = None + pytestPDB._pdb_cls = pdb.Pdb + + pdb.set_trace = pytest.set_trace + pytestPDB._pluginmanager = config.pluginmanager + pytestPDB._config = config + config._cleanup.append(fin) + +class pytestPDB: + """ Pseudo PDB that defers to the real pdb. """ + _pluginmanager = None + _config = None + _pdb_cls = pdb.Pdb + + def set_trace(self): + """ invoke PDB set_trace debugging, dropping any IO capturing. """ + import _pytest.config + frame = sys._getframe().f_back + if self._pluginmanager is not None: + capman = self._pluginmanager.getplugin("capturemanager") + if capman: + capman.suspendcapture(in_=True) + tw = _pytest.config.create_terminal_writer(self._config) + tw.line() + tw.sep(">", "PDB set_trace (IO-capturing turned off)") + self._pluginmanager.hook.pytest_enter_pdb(config=self._config) + self._pdb_cls().set_trace(frame) + + +class PdbInvoke: + def pytest_exception_interact(self, node, call, report): + capman = node.config.pluginmanager.getplugin("capturemanager") + if capman: + out, err = capman.suspendcapture(in_=True) + sys.stdout.write(out) + sys.stdout.write(err) + _enter_pdb(node, call.excinfo, report) + + def pytest_internalerror(self, excrepr, excinfo): + for line in str(excrepr).split("\n"): + sys.stderr.write("INTERNALERROR> %s\n" %line) + sys.stderr.flush() + tb = _postmortem_traceback(excinfo) + post_mortem(tb) + + +def _enter_pdb(node, excinfo, rep): + # XXX we re-use the TerminalReporter's terminalwriter + # because this seems to avoid some encoding related troubles + # for not completely clear reasons. + tw = node.config.pluginmanager.getplugin("terminalreporter")._tw + tw.line() + tw.sep(">", "traceback") + rep.toterminal(tw) + tw.sep(">", "entering PDB") + tb = _postmortem_traceback(excinfo) + post_mortem(tb) + rep._pdbshown = True + return rep + + +def _postmortem_traceback(excinfo): + # A doctest.UnexpectedException is not useful for post_mortem. + # Use the underlying exception instead: + from doctest import UnexpectedException + if isinstance(excinfo.value, UnexpectedException): + return excinfo.value.exc_info[2] + else: + return excinfo._excinfo[2] + + +def _find_last_non_hidden_frame(stack): + i = max(0, len(stack) - 1) + while i and stack[i][0].f_locals.get("__tracebackhide__", False): + i -= 1 + return i + + +def post_mortem(t): + class Pdb(pytestPDB._pdb_cls): + def get_stack(self, f, t): + stack, i = pdb.Pdb.get_stack(self, f, t) + if f is None: + i = _find_last_non_hidden_frame(stack) + return stack, i + p = Pdb() + p.reset() + p.interaction(None, t) diff --git a/lib/spack/external/_pytest/deprecated.py b/lib/spack/external/_pytest/deprecated.py new file mode 100644 index 0000000000..6edc475f6e --- /dev/null +++ b/lib/spack/external/_pytest/deprecated.py @@ -0,0 +1,24 @@ +""" +This module contains deprecation messages and bits of code used elsewhere in the codebase +that is planned to be removed in the next pytest release. + +Keeping it in a central location makes it easy to track what is deprecated and should +be removed when the time comes. +""" + + +MAIN_STR_ARGS = 'passing a string to pytest.main() is deprecated, ' \ + 'pass a list of arguments instead.' + +YIELD_TESTS = 'yield tests are deprecated, and scheduled to be removed in pytest 4.0' + +FUNCARG_PREFIX = ( + '{name}: declaring fixtures using "pytest_funcarg__" prefix is deprecated ' + 'and scheduled to be removed in pytest 4.0. ' + 'Please remove the prefix and use the @pytest.fixture decorator instead.') + +SETUP_CFG_PYTEST = '[pytest] section in setup.cfg files is deprecated, use [tool:pytest] instead.' + +GETFUNCARGVALUE = "use of getfuncargvalue is deprecated, use getfixturevalue" + +RESULT_LOG = '--result-log is deprecated and scheduled for removal in pytest 4.0' diff --git a/lib/spack/external/_pytest/doctest.py b/lib/spack/external/_pytest/doctest.py new file mode 100644 index 0000000000..f4782dded5 --- /dev/null +++ b/lib/spack/external/_pytest/doctest.py @@ -0,0 +1,331 @@ +""" discover and run doctests in modules and test files.""" +from __future__ import absolute_import + +import traceback + +import pytest +from _pytest._code.code import ExceptionInfo, ReprFileLocation, TerminalRepr +from _pytest.fixtures import FixtureRequest + + +DOCTEST_REPORT_CHOICE_NONE = 'none' +DOCTEST_REPORT_CHOICE_CDIFF = 'cdiff' +DOCTEST_REPORT_CHOICE_NDIFF = 'ndiff' +DOCTEST_REPORT_CHOICE_UDIFF = 'udiff' +DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = 'only_first_failure' + +DOCTEST_REPORT_CHOICES = ( + DOCTEST_REPORT_CHOICE_NONE, + DOCTEST_REPORT_CHOICE_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF, + DOCTEST_REPORT_CHOICE_UDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, +) + +def pytest_addoption(parser): + parser.addini('doctest_optionflags', 'option flags for doctests', + type="args", default=["ELLIPSIS"]) + group = parser.getgroup("collect") + group.addoption("--doctest-modules", + action="store_true", default=False, + help="run doctests in all .py modules", + dest="doctestmodules") + group.addoption("--doctest-report", + type=str.lower, default="udiff", + help="choose another output format for diffs on doctest failure", + choices=DOCTEST_REPORT_CHOICES, + dest="doctestreport") + group.addoption("--doctest-glob", + action="append", default=[], metavar="pat", + help="doctests file matching pattern, default: test*.txt", + dest="doctestglob") + group.addoption("--doctest-ignore-import-errors", + action="store_true", default=False, + help="ignore doctest ImportErrors", + dest="doctest_ignore_import_errors") + + +def pytest_collect_file(path, parent): + config = parent.config + if path.ext == ".py": + if config.option.doctestmodules: + return DoctestModule(path, parent) + elif _is_doctest(config, path, parent): + return DoctestTextfile(path, parent) + + +def _is_doctest(config, path, parent): + if path.ext in ('.txt', '.rst') and parent.session.isinitpath(path): + return True + globs = config.getoption("doctestglob") or ['test*.txt'] + for glob in globs: + if path.check(fnmatch=glob): + return True + return False + + +class ReprFailDoctest(TerminalRepr): + + def __init__(self, reprlocation, lines): + self.reprlocation = reprlocation + self.lines = lines + + def toterminal(self, tw): + for line in self.lines: + tw.line(line) + self.reprlocation.toterminal(tw) + + +class DoctestItem(pytest.Item): + def __init__(self, name, parent, runner=None, dtest=None): + super(DoctestItem, self).__init__(name, parent) + self.runner = runner + self.dtest = dtest + self.obj = None + self.fixture_request = None + + def setup(self): + if self.dtest is not None: + self.fixture_request = _setup_fixtures(self) + globs = dict(getfixture=self.fixture_request.getfixturevalue) + for name, value in self.fixture_request.getfixturevalue('doctest_namespace').items(): + globs[name] = value + self.dtest.globs.update(globs) + + def runtest(self): + _check_all_skipped(self.dtest) + self.runner.run(self.dtest) + + def repr_failure(self, excinfo): + import doctest + if excinfo.errisinstance((doctest.DocTestFailure, + doctest.UnexpectedException)): + doctestfailure = excinfo.value + example = doctestfailure.example + test = doctestfailure.test + filename = test.filename + if test.lineno is None: + lineno = None + else: + lineno = test.lineno + example.lineno + 1 + message = excinfo.type.__name__ + reprlocation = ReprFileLocation(filename, lineno, message) + checker = _get_checker() + report_choice = _get_report_choice(self.config.getoption("doctestreport")) + if lineno is not None: + lines = doctestfailure.test.docstring.splitlines(False) + # add line numbers to the left of the error message + lines = ["%03d %s" % (i + test.lineno + 1, x) + for (i, x) in enumerate(lines)] + # trim docstring error lines to 10 + lines = lines[example.lineno - 9:example.lineno + 1] + else: + lines = ['EXAMPLE LOCATION UNKNOWN, not showing all tests of that example'] + indent = '>>>' + for line in example.source.splitlines(): + lines.append('??? %s %s' % (indent, line)) + indent = '...' + if excinfo.errisinstance(doctest.DocTestFailure): + lines += checker.output_difference(example, + doctestfailure.got, report_choice).split("\n") + else: + inner_excinfo = ExceptionInfo(excinfo.value.exc_info) + lines += ["UNEXPECTED EXCEPTION: %s" % + repr(inner_excinfo.value)] + lines += traceback.format_exception(*excinfo.value.exc_info) + return ReprFailDoctest(reprlocation, lines) + else: + return super(DoctestItem, self).repr_failure(excinfo) + + def reportinfo(self): + return self.fspath, None, "[doctest] %s" % self.name + + +def _get_flag_lookup(): + import doctest + return dict(DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, + DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, + NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, + ELLIPSIS=doctest.ELLIPSIS, + IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, + COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, + ALLOW_UNICODE=_get_allow_unicode_flag(), + ALLOW_BYTES=_get_allow_bytes_flag(), + ) + + +def get_optionflags(parent): + optionflags_str = parent.config.getini("doctest_optionflags") + flag_lookup_table = _get_flag_lookup() + flag_acc = 0 + for flag in optionflags_str: + flag_acc |= flag_lookup_table[flag] + return flag_acc + + +class DoctestTextfile(pytest.Module): + obj = None + + def collect(self): + import doctest + + # inspired by doctest.testfile; ideally we would use it directly, + # but it doesn't support passing a custom checker + text = self.fspath.read() + filename = str(self.fspath) + name = self.fspath.basename + globs = {'__name__': '__main__'} + + + optionflags = get_optionflags(self) + runner = doctest.DebugRunner(verbose=0, optionflags=optionflags, + checker=_get_checker()) + + parser = doctest.DocTestParser() + test = parser.get_doctest(text, globs, name, filename, 0) + if test.examples: + yield DoctestItem(test.name, self, runner, test) + + +def _check_all_skipped(test): + """raises pytest.skip() if all examples in the given DocTest have the SKIP + option set. + """ + import doctest + all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) + if all_skipped: + pytest.skip('all tests skipped by +SKIP option') + + +class DoctestModule(pytest.Module): + def collect(self): + import doctest + if self.fspath.basename == "conftest.py": + module = self.config.pluginmanager._importconftest(self.fspath) + else: + try: + module = self.fspath.pyimport() + except ImportError: + if self.config.getvalue('doctest_ignore_import_errors'): + pytest.skip('unable to import module %r' % self.fspath) + else: + raise + # uses internal doctest module parsing mechanism + finder = doctest.DocTestFinder() + optionflags = get_optionflags(self) + runner = doctest.DebugRunner(verbose=0, optionflags=optionflags, + checker=_get_checker()) + for test in finder.find(module, module.__name__): + if test.examples: # skip empty doctests + yield DoctestItem(test.name, self, runner, test) + + +def _setup_fixtures(doctest_item): + """ + Used by DoctestTextfile and DoctestItem to setup fixture information. + """ + def func(): + pass + + doctest_item.funcargs = {} + fm = doctest_item.session._fixturemanager + doctest_item._fixtureinfo = fm.getfixtureinfo(node=doctest_item, func=func, + cls=None, funcargs=False) + fixture_request = FixtureRequest(doctest_item) + fixture_request._fillfixtures() + return fixture_request + + +def _get_checker(): + """ + Returns a doctest.OutputChecker subclass that takes in account the + ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES + to strip b'' prefixes. + Useful when the same doctest should run in Python 2 and Python 3. + + An inner class is used to avoid importing "doctest" at the module + level. + """ + if hasattr(_get_checker, 'LiteralsOutputChecker'): + return _get_checker.LiteralsOutputChecker() + + import doctest + import re + + class LiteralsOutputChecker(doctest.OutputChecker): + """ + Copied from doctest_nose_plugin.py from the nltk project: + https://github.com/nltk/nltk + + Further extended to also support byte literals. + """ + + _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) + _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) + + def check_output(self, want, got, optionflags): + res = doctest.OutputChecker.check_output(self, want, got, + optionflags) + if res: + return True + + allow_unicode = optionflags & _get_allow_unicode_flag() + allow_bytes = optionflags & _get_allow_bytes_flag() + if not allow_unicode and not allow_bytes: + return False + + else: # pragma: no cover + def remove_prefixes(regex, txt): + return re.sub(regex, r'\1\2', txt) + + if allow_unicode: + want = remove_prefixes(self._unicode_literal_re, want) + got = remove_prefixes(self._unicode_literal_re, got) + if allow_bytes: + want = remove_prefixes(self._bytes_literal_re, want) + got = remove_prefixes(self._bytes_literal_re, got) + res = doctest.OutputChecker.check_output(self, want, got, + optionflags) + return res + + _get_checker.LiteralsOutputChecker = LiteralsOutputChecker + return _get_checker.LiteralsOutputChecker() + + +def _get_allow_unicode_flag(): + """ + Registers and returns the ALLOW_UNICODE flag. + """ + import doctest + return doctest.register_optionflag('ALLOW_UNICODE') + + +def _get_allow_bytes_flag(): + """ + Registers and returns the ALLOW_BYTES flag. + """ + import doctest + return doctest.register_optionflag('ALLOW_BYTES') + + +def _get_report_choice(key): + """ + This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid + importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests. + """ + import doctest + + return { + DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF, + DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, + DOCTEST_REPORT_CHOICE_NONE: 0, + }[key] + +@pytest.fixture(scope='session') +def doctest_namespace(): + """ + Inject names into the doctest namespace. + """ + return dict() diff --git a/lib/spack/external/_pytest/fixtures.py b/lib/spack/external/_pytest/fixtures.py new file mode 100644 index 0000000000..28bcd4d8d7 --- /dev/null +++ b/lib/spack/external/_pytest/fixtures.py @@ -0,0 +1,1134 @@ +import sys + +from py._code.code import FormattedExcinfo + +import py +import pytest +import warnings + +import inspect +import _pytest +from _pytest._code.code import TerminalRepr +from _pytest.compat import ( + NOTSET, exc_clear, _format_args, + getfslineno, get_real_func, + is_generator, isclass, getimfunc, + getlocation, getfuncargnames, +) + +def pytest_sessionstart(session): + session._fixturemanager = FixtureManager(session) + + +scopename2class = {} + + +scope2props = dict(session=()) +scope2props["module"] = ("fspath", "module") +scope2props["class"] = scope2props["module"] + ("cls",) +scope2props["instance"] = scope2props["class"] + ("instance", ) +scope2props["function"] = scope2props["instance"] + ("function", "keywords") + +def scopeproperty(name=None, doc=None): + def decoratescope(func): + scopename = name or func.__name__ + + def provide(self): + if func.__name__ in scope2props[self.scope]: + return func(self) + raise AttributeError("%s not available in %s-scoped context" % ( + scopename, self.scope)) + + return property(provide, None, None, func.__doc__) + return decoratescope + + +def pytest_namespace(): + scopename2class.update({ + 'class': pytest.Class, + 'module': pytest.Module, + 'function': pytest.Item, + }) + return { + 'fixture': fixture, + 'yield_fixture': yield_fixture, + 'collect': {'_fillfuncargs': fillfixtures} + } + + +def get_scope_node(node, scope): + cls = scopename2class.get(scope) + if cls is None: + if scope == "session": + return node.session + raise ValueError("unknown scope") + return node.getparent(cls) + + +def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): + # this function will transform all collected calls to a functions + # if they use direct funcargs (i.e. direct parametrization) + # because we want later test execution to be able to rely on + # an existing FixtureDef structure for all arguments. + # XXX we can probably avoid this algorithm if we modify CallSpec2 + # to directly care for creating the fixturedefs within its methods. + if not metafunc._calls[0].funcargs: + return # this function call does not have direct parametrization + # collect funcargs of all callspecs into a list of values + arg2params = {} + arg2scope = {} + for callspec in metafunc._calls: + for argname, argvalue in callspec.funcargs.items(): + assert argname not in callspec.params + callspec.params[argname] = argvalue + arg2params_list = arg2params.setdefault(argname, []) + callspec.indices[argname] = len(arg2params_list) + arg2params_list.append(argvalue) + if argname not in arg2scope: + scopenum = callspec._arg2scopenum.get(argname, + scopenum_function) + arg2scope[argname] = scopes[scopenum] + callspec.funcargs.clear() + + # register artificial FixtureDef's so that later at test execution + # time we can rely on a proper FixtureDef to exist for fixture setup. + arg2fixturedefs = metafunc._arg2fixturedefs + for argname, valuelist in arg2params.items(): + # if we have a scope that is higher than function we need + # to make sure we only ever create an according fixturedef on + # a per-scope basis. We thus store and cache the fixturedef on the + # node related to the scope. + scope = arg2scope[argname] + node = None + if scope != "function": + node = get_scope_node(collector, scope) + if node is None: + assert scope == "class" and isinstance(collector, pytest.Module) + # use module-level collector for class-scope (for now) + node = collector + if node and argname in node._name2pseudofixturedef: + arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]] + else: + fixturedef = FixtureDef(fixturemanager, '', argname, + get_direct_param_fixture_func, + arg2scope[argname], + valuelist, False, False) + arg2fixturedefs[argname] = [fixturedef] + if node is not None: + node._name2pseudofixturedef[argname] = fixturedef + + + +def getfixturemarker(obj): + """ return fixturemarker or None if it doesn't exist or raised + exceptions.""" + try: + return getattr(obj, "_pytestfixturefunction", None) + except KeyboardInterrupt: + raise + except Exception: + # some objects raise errors like request (from flask import request) + # we don't expect them to be fixture functions + return None + + + +def get_parametrized_fixture_keys(item, scopenum): + """ return list of keys for all parametrized arguments which match + the specified scope. """ + assert scopenum < scopenum_function # function + try: + cs = item.callspec + except AttributeError: + pass + else: + # cs.indictes.items() is random order of argnames but + # then again different functions (items) can change order of + # arguments so it doesn't matter much probably + for argname, param_index in cs.indices.items(): + if cs._arg2scopenum[argname] != scopenum: + continue + if scopenum == 0: # session + key = (argname, param_index) + elif scopenum == 1: # module + key = (argname, param_index, item.fspath) + elif scopenum == 2: # class + key = (argname, param_index, item.fspath, item.cls) + yield key + + +# algorithm for sorting on a per-parametrized resource setup basis +# it is called for scopenum==0 (session) first and performs sorting +# down to the lower scopes such as to minimize number of "high scope" +# setups and teardowns + +def reorder_items(items): + argkeys_cache = {} + for scopenum in range(0, scopenum_function): + argkeys_cache[scopenum] = d = {} + for item in items: + keys = set(get_parametrized_fixture_keys(item, scopenum)) + if keys: + d[item] = keys + return reorder_items_atscope(items, set(), argkeys_cache, 0) + +def reorder_items_atscope(items, ignore, argkeys_cache, scopenum): + if scopenum >= scopenum_function or len(items) < 3: + return items + items_done = [] + while 1: + items_before, items_same, items_other, newignore = \ + slice_items(items, ignore, argkeys_cache[scopenum]) + items_before = reorder_items_atscope( + items_before, ignore, argkeys_cache,scopenum+1) + if items_same is None: + # nothing to reorder in this scope + assert items_other is None + return items_done + items_before + items_done.extend(items_before) + items = items_same + items_other + ignore = newignore + + +def slice_items(items, ignore, scoped_argkeys_cache): + # we pick the first item which uses a fixture instance in the + # requested scope and which we haven't seen yet. We slice the input + # items list into a list of items_nomatch, items_same and + # items_other + if scoped_argkeys_cache: # do we need to do work at all? + it = iter(items) + # first find a slicing key + for i, item in enumerate(it): + argkeys = scoped_argkeys_cache.get(item) + if argkeys is not None: + argkeys = argkeys.difference(ignore) + if argkeys: # found a slicing key + slicing_argkey = argkeys.pop() + items_before = items[:i] + items_same = [item] + items_other = [] + # now slice the remainder of the list + for item in it: + argkeys = scoped_argkeys_cache.get(item) + if argkeys and slicing_argkey in argkeys and \ + slicing_argkey not in ignore: + items_same.append(item) + else: + items_other.append(item) + newignore = ignore.copy() + newignore.add(slicing_argkey) + return (items_before, items_same, items_other, newignore) + return items, None, None, None + + + +class FuncargnamesCompatAttr: + """ helper class so that Metafunc, Function and FixtureRequest + don't need to each define the "funcargnames" compatibility attribute. + """ + @property + def funcargnames(self): + """ alias attribute for ``fixturenames`` for pre-2.3 compatibility""" + return self.fixturenames + + +def fillfixtures(function): + """ fill missing funcargs for a test function. """ + try: + request = function._request + except AttributeError: + # XXX this special code path is only expected to execute + # with the oejskit plugin. It uses classes with funcargs + # and we thus have to work a bit to allow this. + fm = function.session._fixturemanager + fi = fm.getfixtureinfo(function.parent, function.obj, None) + function._fixtureinfo = fi + request = function._request = FixtureRequest(function) + request._fillfixtures() + # prune out funcargs for jstests + newfuncargs = {} + for name in fi.argnames: + newfuncargs[name] = function.funcargs[name] + function.funcargs = newfuncargs + else: + request._fillfixtures() + + + +def get_direct_param_fixture_func(request): + return request.param + +class FuncFixtureInfo: + def __init__(self, argnames, names_closure, name2fixturedefs): + self.argnames = argnames + self.names_closure = names_closure + self.name2fixturedefs = name2fixturedefs + + +class FixtureRequest(FuncargnamesCompatAttr): + """ A request for a fixture from a test or fixture function. + + A request object gives access to the requesting test context + and has an optional ``param`` attribute in case + the fixture is parametrized indirectly. + """ + + def __init__(self, pyfuncitem): + self._pyfuncitem = pyfuncitem + #: fixture for which this request is being performed + self.fixturename = None + #: Scope string, one of "function", "class", "module", "session" + self.scope = "function" + self._fixture_values = {} # argname -> fixture value + self._fixture_defs = {} # argname -> FixtureDef + fixtureinfo = pyfuncitem._fixtureinfo + self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() + self._arg2index = {} + self._fixturemanager = pyfuncitem.session._fixturemanager + + @property + def fixturenames(self): + # backward incompatible note: now a readonly property + return list(self._pyfuncitem._fixtureinfo.names_closure) + + @property + def node(self): + """ underlying collection node (depends on current request scope)""" + return self._getscopeitem(self.scope) + + + def _getnextfixturedef(self, argname): + fixturedefs = self._arg2fixturedefs.get(argname, None) + if fixturedefs is None: + # we arrive here because of a a dynamic call to + # getfixturevalue(argname) usage which was naturally + # not known at parsing/collection time + parentid = self._pyfuncitem.parent.nodeid + fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid) + self._arg2fixturedefs[argname] = fixturedefs + # fixturedefs list is immutable so we maintain a decreasing index + index = self._arg2index.get(argname, 0) - 1 + if fixturedefs is None or (-index > len(fixturedefs)): + raise FixtureLookupError(argname, self) + self._arg2index[argname] = index + return fixturedefs[index] + + @property + def config(self): + """ the pytest config object associated with this request. """ + return self._pyfuncitem.config + + + @scopeproperty() + def function(self): + """ test function object if the request has a per-function scope. """ + return self._pyfuncitem.obj + + @scopeproperty("class") + def cls(self): + """ class (can be None) where the test function was collected. """ + clscol = self._pyfuncitem.getparent(pytest.Class) + if clscol: + return clscol.obj + + @property + def instance(self): + """ instance (can be None) on which test function was collected. """ + # unittest support hack, see _pytest.unittest.TestCaseFunction + try: + return self._pyfuncitem._testcase + except AttributeError: + function = getattr(self, "function", None) + if function is not None: + return py.builtin._getimself(function) + + @scopeproperty() + def module(self): + """ python module object where the test function was collected. """ + return self._pyfuncitem.getparent(pytest.Module).obj + + @scopeproperty() + def fspath(self): + """ the file system path of the test module which collected this test. """ + return self._pyfuncitem.fspath + + @property + def keywords(self): + """ keywords/markers dictionary for the underlying node. """ + return self.node.keywords + + @property + def session(self): + """ pytest session object. """ + return self._pyfuncitem.session + + def addfinalizer(self, finalizer): + """ add finalizer/teardown function to be called after the + last test within the requesting test context finished + execution. """ + # XXX usually this method is shadowed by fixturedef specific ones + self._addfinalizer(finalizer, scope=self.scope) + + def _addfinalizer(self, finalizer, scope): + colitem = self._getscopeitem(scope) + self._pyfuncitem.session._setupstate.addfinalizer( + finalizer=finalizer, colitem=colitem) + + def applymarker(self, marker): + """ Apply a marker to a single test function invocation. + This method is useful if you don't want to have a keyword/marker + on all function invocations. + + :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object + created by a call to ``pytest.mark.NAME(...)``. + """ + try: + self.node.keywords[marker.markname] = marker + except AttributeError: + raise ValueError(marker) + + def raiseerror(self, msg): + """ raise a FixtureLookupError with the given message. """ + raise self._fixturemanager.FixtureLookupError(None, self, msg) + + def _fillfixtures(self): + item = self._pyfuncitem + fixturenames = getattr(item, "fixturenames", self.fixturenames) + for argname in fixturenames: + if argname not in item.funcargs: + item.funcargs[argname] = self.getfixturevalue(argname) + + def cached_setup(self, setup, teardown=None, scope="module", extrakey=None): + """ (deprecated) Return a testing resource managed by ``setup`` & + ``teardown`` calls. ``scope`` and ``extrakey`` determine when the + ``teardown`` function will be called so that subsequent calls to + ``setup`` would recreate the resource. With pytest-2.3 you often + do not need ``cached_setup()`` as you can directly declare a scope + on a fixture function and register a finalizer through + ``request.addfinalizer()``. + + :arg teardown: function receiving a previously setup resource. + :arg setup: a no-argument function creating a resource. + :arg scope: a string value out of ``function``, ``class``, ``module`` + or ``session`` indicating the caching lifecycle of the resource. + :arg extrakey: added to internal caching key of (funcargname, scope). + """ + if not hasattr(self.config, '_setupcache'): + self.config._setupcache = {} # XXX weakref? + cachekey = (self.fixturename, self._getscopeitem(scope), extrakey) + cache = self.config._setupcache + try: + val = cache[cachekey] + except KeyError: + self._check_scope(self.fixturename, self.scope, scope) + val = setup() + cache[cachekey] = val + if teardown is not None: + def finalizer(): + del cache[cachekey] + teardown(val) + self._addfinalizer(finalizer, scope=scope) + return val + + def getfixturevalue(self, argname): + """ Dynamically run a named fixture function. + + Declaring fixtures via function argument is recommended where possible. + But if you can only decide whether to use another fixture at test + setup time, you may use this function to retrieve it inside a fixture + or test function body. + """ + return self._get_active_fixturedef(argname).cached_result[0] + + def getfuncargvalue(self, argname): + """ Deprecated, use getfixturevalue. """ + from _pytest import deprecated + warnings.warn( + deprecated.GETFUNCARGVALUE, + DeprecationWarning) + return self.getfixturevalue(argname) + + def _get_active_fixturedef(self, argname): + try: + return self._fixture_defs[argname] + except KeyError: + try: + fixturedef = self._getnextfixturedef(argname) + except FixtureLookupError: + if argname == "request": + class PseudoFixtureDef: + cached_result = (self, [0], None) + scope = "function" + return PseudoFixtureDef + raise + # remove indent to prevent the python3 exception + # from leaking into the call + result = self._getfixturevalue(fixturedef) + self._fixture_values[argname] = result + self._fixture_defs[argname] = fixturedef + return fixturedef + + def _get_fixturestack(self): + current = self + l = [] + while 1: + fixturedef = getattr(current, "_fixturedef", None) + if fixturedef is None: + l.reverse() + return l + l.append(fixturedef) + current = current._parent_request + + def _getfixturevalue(self, fixturedef): + # prepare a subrequest object before calling fixture function + # (latter managed by fixturedef) + argname = fixturedef.argname + funcitem = self._pyfuncitem + scope = fixturedef.scope + try: + param = funcitem.callspec.getparam(argname) + except (AttributeError, ValueError): + param = NOTSET + param_index = 0 + if fixturedef.params is not None: + frame = inspect.stack()[3] + frameinfo = inspect.getframeinfo(frame[0]) + source_path = frameinfo.filename + source_lineno = frameinfo.lineno + source_path = py.path.local(source_path) + if source_path.relto(funcitem.config.rootdir): + source_path = source_path.relto(funcitem.config.rootdir) + msg = ( + "The requested fixture has no parameter defined for the " + "current test.\n\nRequested fixture '{0}' defined in:\n{1}" + "\n\nRequested here:\n{2}:{3}".format( + fixturedef.argname, + getlocation(fixturedef.func, funcitem.config.rootdir), + source_path, + source_lineno, + ) + ) + pytest.fail(msg) + else: + # indices might not be set if old-style metafunc.addcall() was used + param_index = funcitem.callspec.indices.get(argname, 0) + # if a parametrize invocation set a scope it will override + # the static scope defined with the fixture function + paramscopenum = funcitem.callspec._arg2scopenum.get(argname) + if paramscopenum is not None: + scope = scopes[paramscopenum] + + subrequest = SubRequest(self, scope, param, param_index, fixturedef) + + # check if a higher-level scoped fixture accesses a lower level one + subrequest._check_scope(argname, self.scope, scope) + + # clear sys.exc_info before invoking the fixture (python bug?) + # if its not explicitly cleared it will leak into the call + exc_clear() + try: + # call the fixture function + val = fixturedef.execute(request=subrequest) + finally: + # if fixture function failed it might have registered finalizers + self.session._setupstate.addfinalizer(fixturedef.finish, + subrequest.node) + return val + + def _check_scope(self, argname, invoking_scope, requested_scope): + if argname == "request": + return + if scopemismatch(invoking_scope, requested_scope): + # try to report something helpful + lines = self._factorytraceback() + pytest.fail("ScopeMismatch: You tried to access the %r scoped " + "fixture %r with a %r scoped request object, " + "involved factories\n%s" %( + (requested_scope, argname, invoking_scope, "\n".join(lines))), + pytrace=False) + + def _factorytraceback(self): + lines = [] + for fixturedef in self._get_fixturestack(): + factory = fixturedef.func + fs, lineno = getfslineno(factory) + p = self._pyfuncitem.session.fspath.bestrelpath(fs) + args = _format_args(factory) + lines.append("%s:%d: def %s%s" %( + p, lineno, factory.__name__, args)) + return lines + + def _getscopeitem(self, scope): + if scope == "function": + # this might also be a non-function Item despite its attribute name + return self._pyfuncitem + node = get_scope_node(self._pyfuncitem, scope) + if node is None and scope == "class": + # fallback to function item itself + node = self._pyfuncitem + assert node + return node + + def __repr__(self): + return "" %(self.node) + + +class SubRequest(FixtureRequest): + """ a sub request for handling getting a fixture from a + test function/fixture. """ + def __init__(self, request, scope, param, param_index, fixturedef): + self._parent_request = request + self.fixturename = fixturedef.argname + if param is not NOTSET: + self.param = param + self.param_index = param_index + self.scope = scope + self._fixturedef = fixturedef + self.addfinalizer = fixturedef.addfinalizer + self._pyfuncitem = request._pyfuncitem + self._fixture_values = request._fixture_values + self._fixture_defs = request._fixture_defs + self._arg2fixturedefs = request._arg2fixturedefs + self._arg2index = request._arg2index + self._fixturemanager = request._fixturemanager + + def __repr__(self): + return "" % (self.fixturename, self._pyfuncitem) + + +class ScopeMismatchError(Exception): + """ A fixture function tries to use a different fixture function which + which has a lower scope (e.g. a Session one calls a function one) + """ + + +scopes = "session module class function".split() +scopenum_function = scopes.index("function") + + +def scopemismatch(currentscope, newscope): + return scopes.index(newscope) > scopes.index(currentscope) + + +def scope2index(scope, descr, where=None): + """Look up the index of ``scope`` and raise a descriptive value error + if not defined. + """ + try: + return scopes.index(scope) + except ValueError: + raise ValueError( + "{0} {1}has an unsupported scope value '{2}'".format( + descr, 'from {0} '.format(where) if where else '', + scope) + ) + + +class FixtureLookupError(LookupError): + """ could not return a requested Fixture (missing or invalid). """ + def __init__(self, argname, request, msg=None): + self.argname = argname + self.request = request + self.fixturestack = request._get_fixturestack() + self.msg = msg + + def formatrepr(self): + tblines = [] + addline = tblines.append + stack = [self.request._pyfuncitem.obj] + stack.extend(map(lambda x: x.func, self.fixturestack)) + msg = self.msg + if msg is not None: + # the last fixture raise an error, let's present + # it at the requesting side + stack = stack[:-1] + for function in stack: + fspath, lineno = getfslineno(function) + try: + lines, _ = inspect.getsourcelines(get_real_func(function)) + except (IOError, IndexError, TypeError): + error_msg = "file %s, line %s: source code not available" + addline(error_msg % (fspath, lineno+1)) + else: + addline("file %s, line %s" % (fspath, lineno+1)) + for i, line in enumerate(lines): + line = line.rstrip() + addline(" " + line) + if line.lstrip().startswith('def'): + break + + if msg is None: + fm = self.request._fixturemanager + available = [] + parentid = self.request._pyfuncitem.parent.nodeid + for name, fixturedefs in fm._arg2fixturedefs.items(): + faclist = list(fm._matchfactories(fixturedefs, parentid)) + if faclist and name not in available: + available.append(name) + msg = "fixture %r not found" % (self.argname,) + msg += "\n available fixtures: %s" %(", ".join(sorted(available)),) + msg += "\n use 'pytest --fixtures [testpath]' for help on them." + + return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) + + +class FixtureLookupErrorRepr(TerminalRepr): + def __init__(self, filename, firstlineno, tblines, errorstring, argname): + self.tblines = tblines + self.errorstring = errorstring + self.filename = filename + self.firstlineno = firstlineno + self.argname = argname + + def toterminal(self, tw): + # tw.line("FixtureLookupError: %s" %(self.argname), red=True) + for tbline in self.tblines: + tw.line(tbline.rstrip()) + lines = self.errorstring.split("\n") + if lines: + tw.line('{0} {1}'.format(FormattedExcinfo.fail_marker, + lines[0].strip()), red=True) + for line in lines[1:]: + tw.line('{0} {1}'.format(FormattedExcinfo.flow_marker, + line.strip()), red=True) + tw.line() + tw.line("%s:%d" % (self.filename, self.firstlineno+1)) + + +def fail_fixturefunc(fixturefunc, msg): + fs, lineno = getfslineno(fixturefunc) + location = "%s:%s" % (fs, lineno+1) + source = _pytest._code.Source(fixturefunc) + pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, + pytrace=False) + +def call_fixture_func(fixturefunc, request, kwargs): + yieldctx = is_generator(fixturefunc) + if yieldctx: + it = fixturefunc(**kwargs) + res = next(it) + + def teardown(): + try: + next(it) + except StopIteration: + pass + else: + fail_fixturefunc(fixturefunc, + "yield_fixture function has more than one 'yield'") + + request.addfinalizer(teardown) + else: + res = fixturefunc(**kwargs) + return res + + +class FixtureDef: + """ A container for a factory definition. """ + def __init__(self, fixturemanager, baseid, argname, func, scope, params, + unittest=False, ids=None): + self._fixturemanager = fixturemanager + self.baseid = baseid or '' + self.has_location = baseid is not None + self.func = func + self.argname = argname + self.scope = scope + self.scopenum = scope2index( + scope or "function", + descr='fixture {0}'.format(func.__name__), + where=baseid + ) + self.params = params + startindex = unittest and 1 or None + self.argnames = getfuncargnames(func, startindex=startindex) + self.unittest = unittest + self.ids = ids + self._finalizer = [] + + def addfinalizer(self, finalizer): + self._finalizer.append(finalizer) + + def finish(self): + try: + while self._finalizer: + func = self._finalizer.pop() + func() + finally: + ihook = self._fixturemanager.session.ihook + ihook.pytest_fixture_post_finalizer(fixturedef=self) + # even if finalization fails, we invalidate + # the cached fixture value + if hasattr(self, "cached_result"): + del self.cached_result + + def execute(self, request): + # get required arguments and register our own finish() + # with their finalization + for argname in self.argnames: + fixturedef = request._get_active_fixturedef(argname) + if argname != "request": + fixturedef.addfinalizer(self.finish) + + my_cache_key = request.param_index + cached_result = getattr(self, "cached_result", None) + if cached_result is not None: + result, cache_key, err = cached_result + if my_cache_key == cache_key: + if err is not None: + py.builtin._reraise(*err) + else: + return result + # we have a previous but differently parametrized fixture instance + # so we need to tear it down before creating a new one + self.finish() + assert not hasattr(self, "cached_result") + + ihook = self._fixturemanager.session.ihook + return ihook.pytest_fixture_setup(fixturedef=self, request=request) + + def __repr__(self): + return ("" % + (self.argname, self.scope, self.baseid)) + +def pytest_fixture_setup(fixturedef, request): + """ Execution of fixture setup. """ + kwargs = {} + for argname in fixturedef.argnames: + fixdef = request._get_active_fixturedef(argname) + result, arg_cache_key, exc = fixdef.cached_result + request._check_scope(argname, request.scope, fixdef.scope) + kwargs[argname] = result + + fixturefunc = fixturedef.func + if fixturedef.unittest: + if request.instance is not None: + # bind the unbound method to the TestCase instance + fixturefunc = fixturedef.func.__get__(request.instance) + else: + # the fixture function needs to be bound to the actual + # request.instance so that code working with "fixturedef" behaves + # as expected. + if request.instance is not None: + fixturefunc = getimfunc(fixturedef.func) + if fixturefunc != fixturedef.func: + fixturefunc = fixturefunc.__get__(request.instance) + my_cache_key = request.param_index + try: + result = call_fixture_func(fixturefunc, request, kwargs) + except Exception: + fixturedef.cached_result = (None, my_cache_key, sys.exc_info()) + raise + fixturedef.cached_result = (result, my_cache_key, None) + return result + + +class FixtureFunctionMarker: + def __init__(self, scope, params, autouse=False, ids=None, name=None): + self.scope = scope + self.params = params + self.autouse = autouse + self.ids = ids + self.name = name + + def __call__(self, function): + if isclass(function): + raise ValueError( + "class fixtures not supported (may be in the future)") + function._pytestfixturefunction = self + return function + + + +def fixture(scope="function", params=None, autouse=False, ids=None, name=None): + """ (return a) decorator to mark a fixture factory function. + + This decorator can be used (with or or without parameters) to define + a fixture function. The name of the fixture function can later be + referenced to cause its invocation ahead of running tests: test + modules or classes can use the pytest.mark.usefixtures(fixturename) + marker. Test functions can directly use fixture names as input + arguments in which case the fixture instance returned from the fixture + function will be injected. + + :arg scope: the scope for which this fixture is shared, one of + "function" (default), "class", "module" or "session". + + :arg params: an optional list of parameters which will cause multiple + invocations of the fixture function and all of the tests + using it. + + :arg autouse: if True, the fixture func is activated for all tests that + can see it. If False (the default) then an explicit + reference is needed to activate the fixture. + + :arg ids: list of string ids each corresponding to the params + so that they are part of the test id. If no ids are provided + they will be generated automatically from the params. + + :arg name: the name of the fixture. This defaults to the name of the + decorated function. If a fixture is used in the same module in + which it is defined, the function name of the fixture will be + shadowed by the function arg that requests the fixture; one way + to resolve this is to name the decorated function + ``fixture_`` and then use + ``@pytest.fixture(name='')``. + + Fixtures can optionally provide their values to test functions using a ``yield`` statement, + instead of ``return``. In this case, the code block after the ``yield`` statement is executed + as teardown code regardless of the test outcome. A fixture function must yield exactly once. + """ + if callable(scope) and params is None and autouse == False: + # direct decoration + return FixtureFunctionMarker( + "function", params, autouse, name=name)(scope) + if params is not None and not isinstance(params, (list, tuple)): + params = list(params) + return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) + + +def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None): + """ (return a) decorator to mark a yield-fixture factory function. + + .. deprecated:: 3.0 + Use :py:func:`pytest.fixture` directly instead. + """ + if callable(scope) and params is None and not autouse: + # direct decoration + return FixtureFunctionMarker( + "function", params, autouse, ids=ids, name=name)(scope) + else: + return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) + + +defaultfuncargprefixmarker = fixture() + + +@fixture(scope="session") +def pytestconfig(request): + """ the pytest config object with access to command line opts.""" + return request.config + + +class FixtureManager: + """ + pytest fixtures definitions and information is stored and managed + from this class. + + During collection fm.parsefactories() is called multiple times to parse + fixture function definitions into FixtureDef objects and internal + data structures. + + During collection of test functions, metafunc-mechanics instantiate + a FuncFixtureInfo object which is cached per node/func-name. + This FuncFixtureInfo object is later retrieved by Function nodes + which themselves offer a fixturenames attribute. + + The FuncFixtureInfo object holds information about fixtures and FixtureDefs + relevant for a particular function. An initial list of fixtures is + assembled like this: + + - ini-defined usefixtures + - autouse-marked fixtures along the collection chain up from the function + - usefixtures markers at module/class/function level + - test function funcargs + + Subsequently the funcfixtureinfo.fixturenames attribute is computed + as the closure of the fixtures needed to setup the initial fixtures, + i. e. fixtures needed by fixture functions themselves are appended + to the fixturenames list. + + Upon the test-setup phases all fixturenames are instantiated, retrieved + by a lookup of their FuncFixtureInfo. + """ + + _argprefix = "pytest_funcarg__" + FixtureLookupError = FixtureLookupError + FixtureLookupErrorRepr = FixtureLookupErrorRepr + + def __init__(self, session): + self.session = session + self.config = session.config + self._arg2fixturedefs = {} + self._holderobjseen = set() + self._arg2finish = {} + self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))] + session.config.pluginmanager.register(self, "funcmanage") + + + def getfixtureinfo(self, node, func, cls, funcargs=True): + if funcargs and not hasattr(node, "nofuncargs"): + if cls is not None: + startindex = 1 + else: + startindex = None + argnames = getfuncargnames(func, startindex) + else: + argnames = () + usefixtures = getattr(func, "usefixtures", None) + initialnames = argnames + if usefixtures is not None: + initialnames = usefixtures.args + initialnames + fm = node.session._fixturemanager + names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames, + node) + return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs) + + def pytest_plugin_registered(self, plugin): + nodeid = None + try: + p = py.path.local(plugin.__file__) + except AttributeError: + pass + else: + # construct the base nodeid which is later used to check + # what fixtures are visible for particular tests (as denoted + # by their test id) + if p.basename.startswith("conftest.py"): + nodeid = p.dirpath().relto(self.config.rootdir) + if p.sep != "/": + nodeid = nodeid.replace(p.sep, "/") + self.parsefactories(plugin, nodeid) + + def _getautousenames(self, nodeid): + """ return a tuple of fixture names to be used. """ + autousenames = [] + for baseid, basenames in self._nodeid_and_autousenames: + if nodeid.startswith(baseid): + if baseid: + i = len(baseid) + nextchar = nodeid[i:i+1] + if nextchar and nextchar not in ":/": + continue + autousenames.extend(basenames) + # make sure autousenames are sorted by scope, scopenum 0 is session + autousenames.sort( + key=lambda x: self._arg2fixturedefs[x][-1].scopenum) + return autousenames + + def getfixtureclosure(self, fixturenames, parentnode): + # collect the closure of all fixtures , starting with the given + # fixturenames as the initial set. As we have to visit all + # factory definitions anyway, we also return a arg2fixturedefs + # mapping so that the caller can reuse it and does not have + # to re-discover fixturedefs again for each fixturename + # (discovering matching fixtures for a given name/node is expensive) + + parentid = parentnode.nodeid + fixturenames_closure = self._getautousenames(parentid) + + def merge(otherlist): + for arg in otherlist: + if arg not in fixturenames_closure: + fixturenames_closure.append(arg) + + merge(fixturenames) + arg2fixturedefs = {} + lastlen = -1 + while lastlen != len(fixturenames_closure): + lastlen = len(fixturenames_closure) + for argname in fixturenames_closure: + if argname in arg2fixturedefs: + continue + fixturedefs = self.getfixturedefs(argname, parentid) + if fixturedefs: + arg2fixturedefs[argname] = fixturedefs + merge(fixturedefs[-1].argnames) + return fixturenames_closure, arg2fixturedefs + + def pytest_generate_tests(self, metafunc): + for argname in metafunc.fixturenames: + faclist = metafunc._arg2fixturedefs.get(argname) + if faclist: + fixturedef = faclist[-1] + if fixturedef.params is not None: + func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]]) + # skip directly parametrized arguments + argnames = func_params[0] + if not isinstance(argnames, (tuple, list)): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + if argname not in func_params and argname not in argnames: + metafunc.parametrize(argname, fixturedef.params, + indirect=True, scope=fixturedef.scope, + ids=fixturedef.ids) + else: + continue # will raise FixtureLookupError at setup time + + def pytest_collection_modifyitems(self, items): + # separate parametrized setups + items[:] = reorder_items(items) + + def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False): + if nodeid is not NOTSET: + holderobj = node_or_obj + else: + holderobj = node_or_obj.obj + nodeid = node_or_obj.nodeid + if holderobj in self._holderobjseen: + return + self._holderobjseen.add(holderobj) + autousenames = [] + for name in dir(holderobj): + obj = getattr(holderobj, name, None) + # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style) + # or are "@pytest.fixture" marked + marker = getfixturemarker(obj) + if marker is None: + if not name.startswith(self._argprefix): + continue + if not callable(obj): + continue + marker = defaultfuncargprefixmarker + from _pytest import deprecated + self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name)) + name = name[len(self._argprefix):] + elif not isinstance(marker, FixtureFunctionMarker): + # magic globals with __getattr__ might have got us a wrong + # fixture attribute + continue + else: + if marker.name: + name = marker.name + msg = 'fixtures cannot have "pytest_funcarg__" prefix ' \ + 'and be decorated with @pytest.fixture:\n%s' % name + assert not name.startswith(self._argprefix), msg + + fixture_def = FixtureDef(self, nodeid, name, obj, + marker.scope, marker.params, + unittest=unittest, ids=marker.ids) + + faclist = self._arg2fixturedefs.setdefault(name, []) + if fixture_def.has_location: + faclist.append(fixture_def) + else: + # fixturedefs with no location are at the front + # so this inserts the current fixturedef after the + # existing fixturedefs from external plugins but + # before the fixturedefs provided in conftests. + i = len([f for f in faclist if not f.has_location]) + faclist.insert(i, fixture_def) + if marker.autouse: + autousenames.append(name) + + if autousenames: + self._nodeid_and_autousenames.append((nodeid or '', autousenames)) + + def getfixturedefs(self, argname, nodeid): + """ + Gets a list of fixtures which are applicable to the given node id. + + :param str argname: name of the fixture to search for + :param str nodeid: full node id of the requesting test. + :return: list[FixtureDef] + """ + try: + fixturedefs = self._arg2fixturedefs[argname] + except KeyError: + return None + else: + return tuple(self._matchfactories(fixturedefs, nodeid)) + + def _matchfactories(self, fixturedefs, nodeid): + for fixturedef in fixturedefs: + if nodeid.startswith(fixturedef.baseid): + yield fixturedef + diff --git a/lib/spack/external/_pytest/freeze_support.py b/lib/spack/external/_pytest/freeze_support.py new file mode 100644 index 0000000000..f78ccd298e --- /dev/null +++ b/lib/spack/external/_pytest/freeze_support.py @@ -0,0 +1,45 @@ +""" +Provides a function to report all internal modules for using freezing tools +pytest +""" + +def pytest_namespace(): + return {'freeze_includes': freeze_includes} + + +def freeze_includes(): + """ + Returns a list of module names used by py.test that should be + included by cx_freeze. + """ + import py + import _pytest + result = list(_iter_all_modules(py)) + result += list(_iter_all_modules(_pytest)) + return result + + +def _iter_all_modules(package, prefix=''): + """ + Iterates over the names of all modules that can be found in the given + package, recursively. + Example: + _iter_all_modules(_pytest) -> + ['_pytest.assertion.newinterpret', + '_pytest.capture', + '_pytest.core', + ... + ] + """ + import os + import pkgutil + if type(package) is not str: + path, prefix = package.__path__[0], package.__name__ + '.' + else: + path = package + for _, name, is_package in pkgutil.iter_modules([path]): + if is_package: + for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'): + yield prefix + m + else: + yield prefix + name \ No newline at end of file diff --git a/lib/spack/external/_pytest/helpconfig.py b/lib/spack/external/_pytest/helpconfig.py new file mode 100644 index 0000000000..6e66b11c48 --- /dev/null +++ b/lib/spack/external/_pytest/helpconfig.py @@ -0,0 +1,144 @@ +""" version info, help messages, tracing configuration. """ +import py +import pytest +import os, sys + +def pytest_addoption(parser): + group = parser.getgroup('debugconfig') + group.addoption('--version', action="store_true", + help="display pytest lib version and import information.") + group._addoption("-h", "--help", action="store_true", dest="help", + help="show help message and configuration info") + group._addoption('-p', action="append", dest="plugins", default = [], + metavar="name", + help="early-load given plugin (multi-allowed). " + "To avoid loading of plugins, use the `no:` prefix, e.g. " + "`no:doctest`.") + group.addoption('--traceconfig', '--trace-config', + action="store_true", default=False, + help="trace considerations of conftest.py files."), + group.addoption('--debug', + action="store_true", dest="debug", default=False, + help="store internal tracing debug information in 'pytestdebug.log'.") + group._addoption( + '-o', '--override-ini', nargs='*', dest="override_ini", + action="append", + help="override config option with option=value style, e.g. `-o xfail_strict=True`.") + + +@pytest.hookimpl(hookwrapper=True) +def pytest_cmdline_parse(): + outcome = yield + config = outcome.get_result() + if config.option.debug: + path = os.path.abspath("pytestdebug.log") + debugfile = open(path, 'w') + debugfile.write("versions pytest-%s, py-%s, " + "python-%s\ncwd=%s\nargs=%s\n\n" %( + pytest.__version__, py.__version__, + ".".join(map(str, sys.version_info)), + os.getcwd(), config._origargs)) + config.trace.root.setwriter(debugfile.write) + undo_tracing = config.pluginmanager.enable_tracing() + sys.stderr.write("writing pytestdebug information to %s\n" % path) + + def unset_tracing(): + debugfile.close() + sys.stderr.write("wrote pytestdebug information to %s\n" % + debugfile.name) + config.trace.root.setwriter(None) + undo_tracing() + + config.add_cleanup(unset_tracing) + +def pytest_cmdline_main(config): + if config.option.version: + p = py.path.local(pytest.__file__) + sys.stderr.write("This is pytest version %s, imported from %s\n" % + (pytest.__version__, p)) + plugininfo = getpluginversioninfo(config) + if plugininfo: + for line in plugininfo: + sys.stderr.write(line + "\n") + return 0 + elif config.option.help: + config._do_configure() + showhelp(config) + config._ensure_unconfigure() + return 0 + +def showhelp(config): + reporter = config.pluginmanager.get_plugin('terminalreporter') + tw = reporter._tw + tw.write(config._parser.optparser.format_help()) + tw.line() + tw.line() + tw.line("[pytest] ini-options in the first " + "pytest.ini|tox.ini|setup.cfg file found:") + tw.line() + + for name in config._parser._ininames: + help, type, default = config._parser._inidict[name] + if type is None: + type = "string" + spec = "%s (%s)" % (name, type) + line = " %-24s %s" %(spec, help) + tw.line(line[:tw.fullwidth]) + + tw.line() + tw.line("environment variables:") + vars = [ + ("PYTEST_ADDOPTS", "extra command line options"), + ("PYTEST_PLUGINS", "comma-separated plugins to load during startup"), + ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals") + ] + for name, help in vars: + tw.line(" %-24s %s" % (name, help)) + tw.line() + tw.line() + + tw.line("to see available markers type: pytest --markers") + tw.line("to see available fixtures type: pytest --fixtures") + tw.line("(shown according to specified file_or_dir or current dir " + "if not specified)") + + for warningreport in reporter.stats.get('warnings', []): + tw.line("warning : " + warningreport.message, red=True) + return + + +conftest_options = [ + ('pytest_plugins', 'list of plugin names to load'), +] + +def getpluginversioninfo(config): + lines = [] + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + lines.append("setuptools registered plugins:") + for plugin, dist in plugininfo: + loc = getattr(plugin, '__file__', repr(plugin)) + content = "%s-%s at %s" % (dist.project_name, dist.version, loc) + lines.append(" " + content) + return lines + +def pytest_report_header(config): + lines = [] + if config.option.debug or config.option.traceconfig: + lines.append("using: pytest-%s pylib-%s" % + (pytest.__version__,py.__version__)) + + verinfo = getpluginversioninfo(config) + if verinfo: + lines.extend(verinfo) + + if config.option.traceconfig: + lines.append("active plugins:") + items = config.pluginmanager.list_name_plugin() + for name, plugin in items: + if hasattr(plugin, '__file__'): + r = plugin.__file__ + else: + r = repr(plugin) + lines.append(" %-20s: %s" %(name, r)) + return lines diff --git a/lib/spack/external/_pytest/hookspec.py b/lib/spack/external/_pytest/hookspec.py new file mode 100644 index 0000000000..b5f51eccf5 --- /dev/null +++ b/lib/spack/external/_pytest/hookspec.py @@ -0,0 +1,314 @@ +""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """ + +from _pytest._pluggy import HookspecMarker + +hookspec = HookspecMarker("pytest") + +# ------------------------------------------------------------------------- +# Initialization hooks called for every plugin +# ------------------------------------------------------------------------- + +@hookspec(historic=True) +def pytest_addhooks(pluginmanager): + """called at plugin registration time to allow adding new hooks via a call to + pluginmanager.add_hookspecs(module_or_class, prefix).""" + + +@hookspec(historic=True) +def pytest_namespace(): + """return dict of name->object to be made globally available in + the pytest namespace. This hook is called at plugin registration + time. + """ + +@hookspec(historic=True) +def pytest_plugin_registered(plugin, manager): + """ a new pytest plugin got registered. """ + + +@hookspec(historic=True) +def pytest_addoption(parser): + """register argparse-style options and ini-style config values, + called once at the beginning of a test run. + + .. note:: + + This function should be implemented only in plugins or ``conftest.py`` + files situated at the tests root directory due to how pytest + :ref:`discovers plugins during startup `. + + :arg parser: To add command line options, call + :py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`. + To add ini-file values call :py:func:`parser.addini(...) + <_pytest.config.Parser.addini>`. + + Options can later be accessed through the + :py:class:`config <_pytest.config.Config>` object, respectively: + + - :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to + retrieve the value of a command line option. + + - :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve + a value read from an ini-style file. + + The config object is passed around on many internal objects via the ``.config`` + attribute or can be retrieved as the ``pytestconfig`` fixture or accessed + via (deprecated) ``pytest.config``. + """ + +@hookspec(historic=True) +def pytest_configure(config): + """ called after command line options have been parsed + and all plugins and initial conftest files been loaded. + This hook is called for every plugin. + """ + +# ------------------------------------------------------------------------- +# Bootstrapping hooks called for plugins registered early enough: +# internal and 3rd party plugins as well as directly +# discoverable conftest.py local plugins. +# ------------------------------------------------------------------------- + +@hookspec(firstresult=True) +def pytest_cmdline_parse(pluginmanager, args): + """return initialized config object, parsing the specified args. """ + +def pytest_cmdline_preparse(config, args): + """(deprecated) modify command line arguments before option parsing. """ + +@hookspec(firstresult=True) +def pytest_cmdline_main(config): + """ called for performing the main command line action. The default + implementation will invoke the configure hooks and runtest_mainloop. """ + +def pytest_load_initial_conftests(early_config, parser, args): + """ implements the loading of initial conftest files ahead + of command line option parsing. """ + + +# ------------------------------------------------------------------------- +# collection hooks +# ------------------------------------------------------------------------- + +@hookspec(firstresult=True) +def pytest_collection(session): + """ perform the collection protocol for the given session. """ + +def pytest_collection_modifyitems(session, config, items): + """ called after collection has been performed, may filter or re-order + the items in-place.""" + +def pytest_collection_finish(session): + """ called after collection has been performed and modified. """ + +@hookspec(firstresult=True) +def pytest_ignore_collect(path, config): + """ return True to prevent considering this path for collection. + This hook is consulted for all files and directories prior to calling + more specific hooks. + """ + +@hookspec(firstresult=True) +def pytest_collect_directory(path, parent): + """ called before traversing a directory for collection files. """ + +def pytest_collect_file(path, parent): + """ return collection Node or None for the given path. Any new node + needs to have the specified ``parent`` as a parent.""" + +# logging hooks for collection +def pytest_collectstart(collector): + """ collector starts collecting. """ + +def pytest_itemcollected(item): + """ we just collected a test item. """ + +def pytest_collectreport(report): + """ collector finished collecting. """ + +def pytest_deselected(items): + """ called for test items deselected by keyword. """ + +@hookspec(firstresult=True) +def pytest_make_collect_report(collector): + """ perform ``collector.collect()`` and return a CollectReport. """ + +# ------------------------------------------------------------------------- +# Python test function related hooks +# ------------------------------------------------------------------------- + +@hookspec(firstresult=True) +def pytest_pycollect_makemodule(path, parent): + """ return a Module collector or None for the given path. + This hook will be called for each matching test module path. + The pytest_collect_file hook needs to be used if you want to + create test modules for files that do not match as a test module. + """ + +@hookspec(firstresult=True) +def pytest_pycollect_makeitem(collector, name, obj): + """ return custom item/collector for a python object in a module, or None. """ + +@hookspec(firstresult=True) +def pytest_pyfunc_call(pyfuncitem): + """ call underlying test function. """ + +def pytest_generate_tests(metafunc): + """ generate (multiple) parametrized calls to a test function.""" + +@hookspec(firstresult=True) +def pytest_make_parametrize_id(config, val): + """Return a user-friendly string representation of the given ``val`` that will be used + by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``. + """ + +# ------------------------------------------------------------------------- +# generic runtest related hooks +# ------------------------------------------------------------------------- + +@hookspec(firstresult=True) +def pytest_runtestloop(session): + """ called for performing the main runtest loop + (after collection finished). """ + +def pytest_itemstart(item, node): + """ (deprecated, use pytest_runtest_logstart). """ + +@hookspec(firstresult=True) +def pytest_runtest_protocol(item, nextitem): + """ implements the runtest_setup/call/teardown protocol for + the given test item, including capturing exceptions and calling + reporting hooks. + + :arg item: test item for which the runtest protocol is performed. + + :arg nextitem: the scheduled-to-be-next test item (or None if this + is the end my friend). This argument is passed on to + :py:func:`pytest_runtest_teardown`. + + :return boolean: True if no further hook implementations should be invoked. + """ + +def pytest_runtest_logstart(nodeid, location): + """ signal the start of running a single test item. """ + +def pytest_runtest_setup(item): + """ called before ``pytest_runtest_call(item)``. """ + +def pytest_runtest_call(item): + """ called to execute the test ``item``. """ + +def pytest_runtest_teardown(item, nextitem): + """ called after ``pytest_runtest_call``. + + :arg nextitem: the scheduled-to-be-next test item (None if no further + test item is scheduled). This argument can be used to + perform exact teardowns, i.e. calling just enough finalizers + so that nextitem only needs to call setup-functions. + """ + +@hookspec(firstresult=True) +def pytest_runtest_makereport(item, call): + """ return a :py:class:`_pytest.runner.TestReport` object + for the given :py:class:`pytest.Item` and + :py:class:`_pytest.runner.CallInfo`. + """ + +def pytest_runtest_logreport(report): + """ process a test setup/call/teardown report relating to + the respective phase of executing a test. """ + +# ------------------------------------------------------------------------- +# Fixture related hooks +# ------------------------------------------------------------------------- + +@hookspec(firstresult=True) +def pytest_fixture_setup(fixturedef, request): + """ performs fixture setup execution. """ + +def pytest_fixture_post_finalizer(fixturedef): + """ called after fixture teardown, but before the cache is cleared so + the fixture result cache ``fixturedef.cached_result`` can + still be accessed.""" + +# ------------------------------------------------------------------------- +# test session related hooks +# ------------------------------------------------------------------------- + +def pytest_sessionstart(session): + """ before session.main() is called. """ + +def pytest_sessionfinish(session, exitstatus): + """ whole test run finishes. """ + +def pytest_unconfigure(config): + """ called before test process is exited. """ + + +# ------------------------------------------------------------------------- +# hooks for customising the assert methods +# ------------------------------------------------------------------------- + +def pytest_assertrepr_compare(config, op, left, right): + """return explanation for comparisons in failing assert expressions. + + Return None for no custom explanation, otherwise return a list + of strings. The strings will be joined by newlines but any newlines + *in* a string will be escaped. Note that all but the first line will + be indented sligthly, the intention is for the first line to be a summary. + """ + +# ------------------------------------------------------------------------- +# hooks for influencing reporting (invoked from _pytest_terminal) +# ------------------------------------------------------------------------- + +def pytest_report_header(config, startdir): + """ return a string to be displayed as header info for terminal reporting.""" + +@hookspec(firstresult=True) +def pytest_report_teststatus(report): + """ return result-category, shortletter and verbose word for reporting.""" + +def pytest_terminal_summary(terminalreporter, exitstatus): + """ add additional section in terminal summary reporting. """ + + +@hookspec(historic=True) +def pytest_logwarning(message, code, nodeid, fslocation): + """ process a warning specified by a message, a code string, + a nodeid and fslocation (both of which may be None + if the warning is not tied to a partilar node/location).""" + +# ------------------------------------------------------------------------- +# doctest hooks +# ------------------------------------------------------------------------- + +@hookspec(firstresult=True) +def pytest_doctest_prepare_content(content): + """ return processed content for a given doctest""" + +# ------------------------------------------------------------------------- +# error handling and internal debugging hooks +# ------------------------------------------------------------------------- + +def pytest_internalerror(excrepr, excinfo): + """ called for internal errors. """ + +def pytest_keyboard_interrupt(excinfo): + """ called for keyboard interrupt. """ + +def pytest_exception_interact(node, call, report): + """called when an exception was raised which can potentially be + interactively handled. + + This hook is only called if an exception was raised + that is not an internal exception like ``skip.Exception``. + """ + +def pytest_enter_pdb(config): + """ called upon pdb.set_trace(), can be used by plugins to take special + action just before the python debugger enters in interactive mode. + + :arg config: pytest config object + :type config: _pytest.config.Config + """ diff --git a/lib/spack/external/_pytest/junitxml.py b/lib/spack/external/_pytest/junitxml.py new file mode 100644 index 0000000000..317382e637 --- /dev/null +++ b/lib/spack/external/_pytest/junitxml.py @@ -0,0 +1,413 @@ +""" + report test results in JUnit-XML format, + for use with Jenkins and build integration servers. + + +Based on initial code from Ross Lawley. +""" +# Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/ +# src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd + +import functools +import py +import os +import re +import sys +import time +import pytest +from _pytest.config import filename_arg + +# Python 2.X and 3.X compatibility +if sys.version_info[0] < 3: + from codecs import open +else: + unichr = chr + unicode = str + long = int + + +class Junit(py.xml.Namespace): + pass + + +# We need to get the subset of the invalid unicode ranges according to +# XML 1.0 which are valid in this python build. Hence we calculate +# this dynamically instead of hardcoding it. The spec range of valid +# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] +# | [#x10000-#x10FFFF] +_legal_chars = (0x09, 0x0A, 0x0d) +_legal_ranges = ( + (0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF), +) +_legal_xml_re = [ + unicode("%s-%s") % (unichr(low), unichr(high)) + for (low, high) in _legal_ranges if low < sys.maxunicode +] +_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re +illegal_xml_re = re.compile(unicode('[^%s]') % unicode('').join(_legal_xml_re)) +del _legal_chars +del _legal_ranges +del _legal_xml_re + +_py_ext_re = re.compile(r"\.py$") + + +def bin_xml_escape(arg): + def repl(matchobj): + i = ord(matchobj.group()) + if i <= 0xFF: + return unicode('#x%02X') % i + else: + return unicode('#x%04X') % i + + return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg))) + + +class _NodeReporter(object): + def __init__(self, nodeid, xml): + + self.id = nodeid + self.xml = xml + self.add_stats = self.xml.add_stats + self.duration = 0 + self.properties = [] + self.nodes = [] + self.testcase = None + self.attrs = {} + + def append(self, node): + self.xml.add_stats(type(node).__name__) + self.nodes.append(node) + + def add_property(self, name, value): + self.properties.append((str(name), bin_xml_escape(value))) + + def make_properties_node(self): + """Return a Junit node containing custom properties, if any. + """ + if self.properties: + return Junit.properties([ + Junit.property(name=name, value=value) + for name, value in self.properties + ]) + return '' + + def record_testreport(self, testreport): + assert not self.testcase + names = mangle_test_address(testreport.nodeid) + classnames = names[:-1] + if self.xml.prefix: + classnames.insert(0, self.xml.prefix) + attrs = { + "classname": ".".join(classnames), + "name": bin_xml_escape(names[-1]), + "file": testreport.location[0], + } + if testreport.location[1] is not None: + attrs["line"] = testreport.location[1] + self.attrs = attrs + + def to_xml(self): + testcase = Junit.testcase(time=self.duration, **self.attrs) + testcase.append(self.make_properties_node()) + for node in self.nodes: + testcase.append(node) + return testcase + + def _add_simple(self, kind, message, data=None): + data = bin_xml_escape(data) + node = kind(data, message=message) + self.append(node) + + def _write_captured_output(self, report): + for capname in ('out', 'err'): + content = getattr(report, 'capstd' + capname) + if content: + tag = getattr(Junit, 'system-' + capname) + self.append(tag(bin_xml_escape(content))) + + def append_pass(self, report): + self.add_stats('passed') + self._write_captured_output(report) + + def append_failure(self, report): + # msg = str(report.longrepr.reprtraceback.extraline) + if hasattr(report, "wasxfail"): + self._add_simple( + Junit.skipped, + "xfail-marked test passes unexpectedly") + else: + if hasattr(report.longrepr, "reprcrash"): + message = report.longrepr.reprcrash.message + elif isinstance(report.longrepr, (unicode, str)): + message = report.longrepr + else: + message = str(report.longrepr) + message = bin_xml_escape(message) + fail = Junit.failure(message=message) + fail.append(bin_xml_escape(report.longrepr)) + self.append(fail) + self._write_captured_output(report) + + def append_collect_error(self, report): + # msg = str(report.longrepr.reprtraceback.extraline) + self.append(Junit.error(bin_xml_escape(report.longrepr), + message="collection failure")) + + def append_collect_skipped(self, report): + self._add_simple( + Junit.skipped, "collection skipped", report.longrepr) + + def append_error(self, report): + if getattr(report, 'when', None) == 'teardown': + msg = "test teardown failure" + else: + msg = "test setup failure" + self._add_simple( + Junit.error, msg, report.longrepr) + self._write_captured_output(report) + + def append_skipped(self, report): + if hasattr(report, "wasxfail"): + self._add_simple( + Junit.skipped, "expected test failure", report.wasxfail + ) + else: + filename, lineno, skipreason = report.longrepr + if skipreason.startswith("Skipped: "): + skipreason = bin_xml_escape(skipreason[9:]) + self.append( + Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason), + type="pytest.skip", + message=skipreason)) + self._write_captured_output(report) + + def finalize(self): + data = self.to_xml().unicode(indent=0) + self.__dict__.clear() + self.to_xml = lambda: py.xml.raw(data) + + +@pytest.fixture +def record_xml_property(request): + """Add extra xml properties to the tag for the calling test. + The fixture is callable with ``(name, value)``, with value being automatically + xml-encoded. + """ + request.node.warn( + code='C3', + message='record_xml_property is an experimental feature', + ) + xml = getattr(request.config, "_xml", None) + if xml is not None: + node_reporter = xml.node_reporter(request.node.nodeid) + return node_reporter.add_property + else: + def add_property_noop(name, value): + pass + + return add_property_noop + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting") + group.addoption( + '--junitxml', '--junit-xml', + action="store", + dest="xmlpath", + metavar="path", + type=functools.partial(filename_arg, optname="--junitxml"), + default=None, + help="create junit-xml style report file at given path.") + group.addoption( + '--junitprefix', '--junit-prefix', + action="store", + metavar="str", + default=None, + help="prepend prefix to classnames in junit-xml output") + + +def pytest_configure(config): + xmlpath = config.option.xmlpath + # prevent opening xmllog on slave nodes (xdist) + if xmlpath and not hasattr(config, 'slaveinput'): + config._xml = LogXML(xmlpath, config.option.junitprefix) + config.pluginmanager.register(config._xml) + + +def pytest_unconfigure(config): + xml = getattr(config, '_xml', None) + if xml: + del config._xml + config.pluginmanager.unregister(xml) + + +def mangle_test_address(address): + path, possible_open_bracket, params = address.partition('[') + names = path.split("::") + try: + names.remove('()') + except ValueError: + pass + # convert file path to dotted path + names[0] = names[0].replace("/", '.') + names[0] = _py_ext_re.sub("", names[0]) + # put any params back + names[-1] += possible_open_bracket + params + return names + + +class LogXML(object): + def __init__(self, logfile, prefix): + logfile = os.path.expanduser(os.path.expandvars(logfile)) + self.logfile = os.path.normpath(os.path.abspath(logfile)) + self.prefix = prefix + self.stats = dict.fromkeys([ + 'error', + 'passed', + 'failure', + 'skipped', + ], 0) + self.node_reporters = {} # nodeid -> _NodeReporter + self.node_reporters_ordered = [] + self.global_properties = [] + + def finalize(self, report): + nodeid = getattr(report, 'nodeid', report) + # local hack to handle xdist report order + slavenode = getattr(report, 'node', None) + reporter = self.node_reporters.pop((nodeid, slavenode)) + if reporter is not None: + reporter.finalize() + + def node_reporter(self, report): + nodeid = getattr(report, 'nodeid', report) + # local hack to handle xdist report order + slavenode = getattr(report, 'node', None) + + key = nodeid, slavenode + + if key in self.node_reporters: + # TODO: breasks for --dist=each + return self.node_reporters[key] + + reporter = _NodeReporter(nodeid, self) + + self.node_reporters[key] = reporter + self.node_reporters_ordered.append(reporter) + + return reporter + + def add_stats(self, key): + if key in self.stats: + self.stats[key] += 1 + + def _opentestcase(self, report): + reporter = self.node_reporter(report) + reporter.record_testreport(report) + return reporter + + def pytest_runtest_logreport(self, report): + """handle a setup/call/teardown report, generating the appropriate + xml tags as necessary. + + note: due to plugins like xdist, this hook may be called in interlaced + order with reports from other nodes. for example: + + usual call order: + -> setup node1 + -> call node1 + -> teardown node1 + -> setup node2 + -> call node2 + -> teardown node2 + + possible call order in xdist: + -> setup node1 + -> call node1 + -> setup node2 + -> call node2 + -> teardown node2 + -> teardown node1 + """ + if report.passed: + if report.when == "call": # ignore setup/teardown + reporter = self._opentestcase(report) + reporter.append_pass(report) + elif report.failed: + reporter = self._opentestcase(report) + if report.when == "call": + reporter.append_failure(report) + else: + reporter.append_error(report) + elif report.skipped: + reporter = self._opentestcase(report) + reporter.append_skipped(report) + self.update_testcase_duration(report) + if report.when == "teardown": + self.finalize(report) + + def update_testcase_duration(self, report): + """accumulates total duration for nodeid from given report and updates + the Junit.testcase with the new total if already created. + """ + reporter = self.node_reporter(report) + reporter.duration += getattr(report, 'duration', 0.0) + + def pytest_collectreport(self, report): + if not report.passed: + reporter = self._opentestcase(report) + if report.failed: + reporter.append_collect_error(report) + else: + reporter.append_collect_skipped(report) + + def pytest_internalerror(self, excrepr): + reporter = self.node_reporter('internal') + reporter.attrs.update(classname="pytest", name='internal') + reporter._add_simple(Junit.error, 'internal error', excrepr) + + def pytest_sessionstart(self): + self.suite_start_time = time.time() + + def pytest_sessionfinish(self): + dirname = os.path.dirname(os.path.abspath(self.logfile)) + if not os.path.isdir(dirname): + os.makedirs(dirname) + logfile = open(self.logfile, 'w', encoding='utf-8') + suite_stop_time = time.time() + suite_time_delta = suite_stop_time - self.suite_start_time + + numtests = self.stats['passed'] + self.stats['failure'] + self.stats['skipped'] + self.stats['error'] + + logfile.write('') + + logfile.write(Junit.testsuite( + self._get_global_properties_node(), + [x.to_xml() for x in self.node_reporters_ordered], + name="pytest", + errors=self.stats['error'], + failures=self.stats['failure'], + skips=self.stats['skipped'], + tests=numtests, + time="%.3f" % suite_time_delta, ).unicode(indent=0)) + logfile.close() + + def pytest_terminal_summary(self, terminalreporter): + terminalreporter.write_sep("-", + "generated xml file: %s" % (self.logfile)) + + def add_global_property(self, name, value): + self.global_properties.append((str(name), bin_xml_escape(value))) + + def _get_global_properties_node(self): + """Return a Junit node containing custom properties, if any. + """ + if self.global_properties: + return Junit.properties( + [ + Junit.property(name=name, value=value) + for name, value in self.global_properties + ] + ) + return '' diff --git a/lib/spack/external/_pytest/main.py b/lib/spack/external/_pytest/main.py new file mode 100644 index 0000000000..52876c12a4 --- /dev/null +++ b/lib/spack/external/_pytest/main.py @@ -0,0 +1,762 @@ +""" core implementation of testing process: init, session, runtest loop. """ +import functools +import os +import sys + +import _pytest +import _pytest._code +import py +import pytest +try: + from collections import MutableMapping as MappingMixin +except ImportError: + from UserDict import DictMixin as MappingMixin + +from _pytest.config import directory_arg +from _pytest.runner import collect_one_node + +tracebackcutdir = py.path.local(_pytest.__file__).dirpath() + +# exitcodes for the command line +EXIT_OK = 0 +EXIT_TESTSFAILED = 1 +EXIT_INTERRUPTED = 2 +EXIT_INTERNALERROR = 3 +EXIT_USAGEERROR = 4 +EXIT_NOTESTSCOLLECTED = 5 + +def pytest_addoption(parser): + parser.addini("norecursedirs", "directory patterns to avoid for recursion", + type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg']) + parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.", + type="args", default=[]) + #parser.addini("dirpatterns", + # "patterns specifying possible locations of test files", + # type="linelist", default=["**/test_*.txt", + # "**/test_*.py", "**/*_test.py"] + #) + group = parser.getgroup("general", "running and selection options") + group._addoption('-x', '--exitfirst', action="store_const", + dest="maxfail", const=1, + help="exit instantly on first error or failed test."), + group._addoption('--maxfail', metavar="num", + action="store", type=int, dest="maxfail", default=0, + help="exit after first num failures or errors.") + group._addoption('--strict', action="store_true", + help="run pytest in strict mode, warnings become errors.") + group._addoption("-c", metavar="file", type=str, dest="inifilename", + help="load configuration from `file` instead of trying to locate one of the implicit configuration files.") + group._addoption("--continue-on-collection-errors", action="store_true", + default=False, dest="continue_on_collection_errors", + help="Force test execution even if collection errors occur.") + + group = parser.getgroup("collect", "collection") + group.addoption('--collectonly', '--collect-only', action="store_true", + help="only collect tests, don't execute them."), + group.addoption('--pyargs', action="store_true", + help="try to interpret all arguments as python packages.") + group.addoption("--ignore", action="append", metavar="path", + help="ignore path during collection (multi-allowed).") + # when changing this to --conf-cut-dir, config.py Conftest.setinitial + # needs upgrading as well + group.addoption('--confcutdir', dest="confcutdir", default=None, + metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"), + help="only load conftest.py's relative to specified dir.") + group.addoption('--noconftest', action="store_true", + dest="noconftest", default=False, + help="Don't load any conftest.py files.") + group.addoption('--keepduplicates', '--keep-duplicates', action="store_true", + dest="keepduplicates", default=False, + help="Keep duplicate tests.") + + group = parser.getgroup("debugconfig", + "test session debugging and configuration") + group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir", + help="base temporary directory for this test run.") + + +def pytest_namespace(): + collect = dict(Item=Item, Collector=Collector, File=File, Session=Session) + return dict(collect=collect) + + +def pytest_configure(config): + pytest.config = config # compatibiltiy + + +def wrap_session(config, doit): + """Skeleton command line program""" + session = Session(config) + session.exitstatus = EXIT_OK + initstate = 0 + try: + try: + config._do_configure() + initstate = 1 + config.hook.pytest_sessionstart(session=session) + initstate = 2 + session.exitstatus = doit(config, session) or 0 + except pytest.UsageError: + raise + except KeyboardInterrupt: + excinfo = _pytest._code.ExceptionInfo() + if initstate < 2 and isinstance( + excinfo.value, pytest.exit.Exception): + sys.stderr.write('{0}: {1}\n'.format( + excinfo.typename, excinfo.value.msg)) + config.hook.pytest_keyboard_interrupt(excinfo=excinfo) + session.exitstatus = EXIT_INTERRUPTED + except: + excinfo = _pytest._code.ExceptionInfo() + config.notify_exception(excinfo, config.option) + session.exitstatus = EXIT_INTERNALERROR + if excinfo.errisinstance(SystemExit): + sys.stderr.write("mainloop: caught Spurious SystemExit!\n") + + finally: + excinfo = None # Explicitly break reference cycle. + session.startdir.chdir() + if initstate >= 2: + config.hook.pytest_sessionfinish( + session=session, + exitstatus=session.exitstatus) + config._ensure_unconfigure() + return session.exitstatus + +def pytest_cmdline_main(config): + return wrap_session(config, _main) + +def _main(config, session): + """ default command line protocol for initialization, session, + running tests and reporting. """ + config.hook.pytest_collection(session=session) + config.hook.pytest_runtestloop(session=session) + + if session.testsfailed: + return EXIT_TESTSFAILED + elif session.testscollected == 0: + return EXIT_NOTESTSCOLLECTED + +def pytest_collection(session): + return session.perform_collect() + +def pytest_runtestloop(session): + if (session.testsfailed and + not session.config.option.continue_on_collection_errors): + raise session.Interrupted( + "%d errors during collection" % session.testsfailed) + + if session.config.option.collectonly: + return True + + for i, item in enumerate(session.items): + nextitem = session.items[i+1] if i+1 < len(session.items) else None + item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) + if session.shouldstop: + raise session.Interrupted(session.shouldstop) + return True + +def pytest_ignore_collect(path, config): + p = path.dirpath() + ignore_paths = config._getconftest_pathlist("collect_ignore", path=p) + ignore_paths = ignore_paths or [] + excludeopt = config.getoption("ignore") + if excludeopt: + ignore_paths.extend([py.path.local(x) for x in excludeopt]) + + if path in ignore_paths: + return True + + # Skip duplicate paths. + keepduplicates = config.getoption("keepduplicates") + duplicate_paths = config.pluginmanager._duplicatepaths + if not keepduplicates: + if path in duplicate_paths: + return True + else: + duplicate_paths.add(path) + + return False + + +class FSHookProxy: + def __init__(self, fspath, pm, remove_mods): + self.fspath = fspath + self.pm = pm + self.remove_mods = remove_mods + + def __getattr__(self, name): + x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) + self.__dict__[name] = x + return x + +def compatproperty(name): + def fget(self): + import warnings + warnings.warn("This usage is deprecated, please use pytest.{0} instead".format(name), + PendingDeprecationWarning, stacklevel=2) + return getattr(pytest, name) + + return property(fget) + +class NodeKeywords(MappingMixin): + def __init__(self, node): + self.node = node + self.parent = node.parent + self._markers = {node.name: True} + + def __getitem__(self, key): + try: + return self._markers[key] + except KeyError: + if self.parent is None: + raise + return self.parent.keywords[key] + + def __setitem__(self, key, value): + self._markers[key] = value + + def __delitem__(self, key): + raise ValueError("cannot delete key in keywords dict") + + def __iter__(self): + seen = set(self._markers) + if self.parent is not None: + seen.update(self.parent.keywords) + return iter(seen) + + def __len__(self): + return len(self.__iter__()) + + def keys(self): + return list(self) + + def __repr__(self): + return "" % (self.node, ) + + +class Node(object): + """ base class for Collector and Item the test collection tree. + Collector subclasses have children, Items are terminal nodes.""" + + def __init__(self, name, parent=None, config=None, session=None): + #: a unique name within the scope of the parent node + self.name = name + + #: the parent collector node. + self.parent = parent + + #: the pytest config object + self.config = config or parent.config + + #: the session this node is part of + self.session = session or parent.session + + #: filesystem path where this node was collected from (can be None) + self.fspath = getattr(parent, 'fspath', None) + + #: keywords/markers collected from all scopes + self.keywords = NodeKeywords(self) + + #: allow adding of extra keywords to use for matching + self.extra_keyword_matches = set() + + # used for storing artificial fixturedefs for direct parametrization + self._name2pseudofixturedef = {} + + @property + def ihook(self): + """ fspath sensitive hook proxy used to call pytest hooks""" + return self.session.gethookproxy(self.fspath) + + Module = compatproperty("Module") + Class = compatproperty("Class") + Instance = compatproperty("Instance") + Function = compatproperty("Function") + File = compatproperty("File") + Item = compatproperty("Item") + + def _getcustomclass(self, name): + cls = getattr(self, name) + if cls != getattr(pytest, name): + py.log._apiwarn("2.0", "use of node.%s is deprecated, " + "use pytest_pycollect_makeitem(...) to create custom " + "collection nodes" % name) + return cls + + def __repr__(self): + return "<%s %r>" %(self.__class__.__name__, + getattr(self, 'name', None)) + + def warn(self, code, message): + """ generate a warning with the given code and message for this + item. """ + assert isinstance(code, str) + fslocation = getattr(self, "location", None) + if fslocation is None: + fslocation = getattr(self, "fspath", None) + else: + fslocation = "%s:%s" % (fslocation[0], fslocation[1] + 1) + + self.ihook.pytest_logwarning.call_historic(kwargs=dict( + code=code, message=message, + nodeid=self.nodeid, fslocation=fslocation)) + + # methods for ordering nodes + @property + def nodeid(self): + """ a ::-separated string denoting its collection tree address. """ + try: + return self._nodeid + except AttributeError: + self._nodeid = x = self._makeid() + return x + + def _makeid(self): + return self.parent.nodeid + "::" + self.name + + def __hash__(self): + return hash(self.nodeid) + + def setup(self): + pass + + def teardown(self): + pass + + def _memoizedcall(self, attrname, function): + exattrname = "_ex_" + attrname + failure = getattr(self, exattrname, None) + if failure is not None: + py.builtin._reraise(failure[0], failure[1], failure[2]) + if hasattr(self, attrname): + return getattr(self, attrname) + try: + res = function() + except py.builtin._sysex: + raise + except: + failure = sys.exc_info() + setattr(self, exattrname, failure) + raise + setattr(self, attrname, res) + return res + + def listchain(self): + """ return list of all parent collectors up to self, + starting from root of collection tree. """ + chain = [] + item = self + while item is not None: + chain.append(item) + item = item.parent + chain.reverse() + return chain + + def add_marker(self, marker): + """ dynamically add a marker object to the node. + + ``marker`` can be a string or pytest.mark.* instance. + """ + from _pytest.mark import MarkDecorator + if isinstance(marker, py.builtin._basestring): + marker = MarkDecorator(marker) + elif not isinstance(marker, MarkDecorator): + raise ValueError("is not a string or pytest.mark.* Marker") + self.keywords[marker.name] = marker + + def get_marker(self, name): + """ get a marker object from this node or None if + the node doesn't have a marker with that name. """ + val = self.keywords.get(name, None) + if val is not None: + from _pytest.mark import MarkInfo, MarkDecorator + if isinstance(val, (MarkDecorator, MarkInfo)): + return val + + def listextrakeywords(self): + """ Return a set of all extra keywords in self and any parents.""" + extra_keywords = set() + item = self + for item in self.listchain(): + extra_keywords.update(item.extra_keyword_matches) + return extra_keywords + + def listnames(self): + return [x.name for x in self.listchain()] + + def addfinalizer(self, fin): + """ register a function to be called when this node is finalized. + + This method can only be called when this node is active + in a setup chain, for example during self.setup(). + """ + self.session._setupstate.addfinalizer(fin, self) + + def getparent(self, cls): + """ get the next parent node (including ourself) + which is an instance of the given class""" + current = self + while current and not isinstance(current, cls): + current = current.parent + return current + + def _prunetraceback(self, excinfo): + pass + + def _repr_failure_py(self, excinfo, style=None): + fm = self.session._fixturemanager + if excinfo.errisinstance(fm.FixtureLookupError): + return excinfo.value.formatrepr() + tbfilter = True + if self.config.option.fulltrace: + style="long" + else: + tb = _pytest._code.Traceback([excinfo.traceback[-1]]) + self._prunetraceback(excinfo) + if len(excinfo.traceback) == 0: + excinfo.traceback = tb + tbfilter = False # prunetraceback already does it + if style == "auto": + style = "long" + # XXX should excinfo.getrepr record all data and toterminal() process it? + if style is None: + if self.config.option.tbstyle == "short": + style = "short" + else: + style = "long" + + try: + os.getcwd() + abspath = False + except OSError: + abspath = True + + return excinfo.getrepr(funcargs=True, abspath=abspath, + showlocals=self.config.option.showlocals, + style=style, tbfilter=tbfilter) + + repr_failure = _repr_failure_py + +class Collector(Node): + """ Collector instances create children through collect() + and thus iteratively build a tree. + """ + + class CollectError(Exception): + """ an error during collection, contains a custom message. """ + + def collect(self): + """ returns a list of children (items and collectors) + for this collection node. + """ + raise NotImplementedError("abstract") + + def repr_failure(self, excinfo): + """ represent a collection failure. """ + if excinfo.errisinstance(self.CollectError): + exc = excinfo.value + return str(exc.args[0]) + return self._repr_failure_py(excinfo, style="short") + + def _memocollect(self): + """ internal helper method to cache results of calling collect(). """ + return self._memoizedcall('_collected', lambda: list(self.collect())) + + def _prunetraceback(self, excinfo): + if hasattr(self, 'fspath'): + traceback = excinfo.traceback + ntraceback = traceback.cut(path=self.fspath) + if ntraceback == traceback: + ntraceback = ntraceback.cut(excludepath=tracebackcutdir) + excinfo.traceback = ntraceback.filter() + +class FSCollector(Collector): + def __init__(self, fspath, parent=None, config=None, session=None): + fspath = py.path.local(fspath) # xxx only for test_resultlog.py? + name = fspath.basename + if parent is not None: + rel = fspath.relto(parent.fspath) + if rel: + name = rel + name = name.replace(os.sep, "/") + super(FSCollector, self).__init__(name, parent, config, session) + self.fspath = fspath + + def _makeid(self): + relpath = self.fspath.relto(self.config.rootdir) + if os.sep != "/": + relpath = relpath.replace(os.sep, "/") + return relpath + +class File(FSCollector): + """ base class for collecting tests from a file. """ + +class Item(Node): + """ a basic test invocation item. Note that for a single function + there might be multiple test invocation items. + """ + nextitem = None + + def __init__(self, name, parent=None, config=None, session=None): + super(Item, self).__init__(name, parent, config, session) + self._report_sections = [] + + def add_report_section(self, when, key, content): + if content: + self._report_sections.append((when, key, content)) + + def reportinfo(self): + return self.fspath, None, "" + + @property + def location(self): + try: + return self._location + except AttributeError: + location = self.reportinfo() + # bestrelpath is a quite slow function + cache = self.config.__dict__.setdefault("_bestrelpathcache", {}) + try: + fspath = cache[location[0]] + except KeyError: + fspath = self.session.fspath.bestrelpath(location[0]) + cache[location[0]] = fspath + location = (fspath, location[1], str(location[2])) + self._location = location + return location + +class NoMatch(Exception): + """ raised if matching cannot locate a matching names. """ + +class Interrupted(KeyboardInterrupt): + """ signals an interrupted test run. """ + __module__ = 'builtins' # for py3 + +class Session(FSCollector): + Interrupted = Interrupted + + def __init__(self, config): + FSCollector.__init__(self, config.rootdir, parent=None, + config=config, session=self) + self.testsfailed = 0 + self.testscollected = 0 + self.shouldstop = False + self.trace = config.trace.root.get("collection") + self._norecursepatterns = config.getini("norecursedirs") + self.startdir = py.path.local() + self.config.pluginmanager.register(self, name="session") + + def _makeid(self): + return "" + + @pytest.hookimpl(tryfirst=True) + def pytest_collectstart(self): + if self.shouldstop: + raise self.Interrupted(self.shouldstop) + + @pytest.hookimpl(tryfirst=True) + def pytest_runtest_logreport(self, report): + if report.failed and not hasattr(report, 'wasxfail'): + self.testsfailed += 1 + maxfail = self.config.getvalue("maxfail") + if maxfail and self.testsfailed >= maxfail: + self.shouldstop = "stopping after %d failures" % ( + self.testsfailed) + pytest_collectreport = pytest_runtest_logreport + + def isinitpath(self, path): + return path in self._initialpaths + + def gethookproxy(self, fspath): + # check if we have the common case of running + # hooks with all conftest.py filesall conftest.py + pm = self.config.pluginmanager + my_conftestmodules = pm._getconftestmodules(fspath) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + if remove_mods: + # one or more conftests are not in use at this fspath + proxy = FSHookProxy(fspath, pm, remove_mods) + else: + # all plugis are active for this fspath + proxy = self.config.hook + return proxy + + def perform_collect(self, args=None, genitems=True): + hook = self.config.hook + try: + items = self._perform_collect(args, genitems) + hook.pytest_collection_modifyitems(session=self, + config=self.config, items=items) + finally: + hook.pytest_collection_finish(session=self) + self.testscollected = len(items) + return items + + def _perform_collect(self, args, genitems): + if args is None: + args = self.config.args + self.trace("perform_collect", self, args) + self.trace.root.indent += 1 + self._notfound = [] + self._initialpaths = set() + self._initialparts = [] + self.items = items = [] + for arg in args: + parts = self._parsearg(arg) + self._initialparts.append(parts) + self._initialpaths.add(parts[0]) + rep = collect_one_node(self) + self.ihook.pytest_collectreport(report=rep) + self.trace.root.indent -= 1 + if self._notfound: + errors = [] + for arg, exc in self._notfound: + line = "(no name %r in any of %r)" % (arg, exc.args[0]) + errors.append("not found: %s\n%s" % (arg, line)) + #XXX: test this + raise pytest.UsageError(*errors) + if not genitems: + return rep.result + else: + if rep.passed: + for node in rep.result: + self.items.extend(self.genitems(node)) + return items + + def collect(self): + for parts in self._initialparts: + arg = "::".join(map(str, parts)) + self.trace("processing argument", arg) + self.trace.root.indent += 1 + try: + for x in self._collect(arg): + yield x + except NoMatch: + # we are inside a make_report hook so + # we cannot directly pass through the exception + self._notfound.append((arg, sys.exc_info()[1])) + + self.trace.root.indent -= 1 + + def _collect(self, arg): + names = self._parsearg(arg) + path = names.pop(0) + if path.check(dir=1): + assert not names, "invalid arg %r" %(arg,) + for path in path.visit(fil=lambda x: x.check(file=1), + rec=self._recurse, bf=True, sort=True): + for x in self._collectfile(path): + yield x + else: + assert path.check(file=1) + for x in self.matchnodes(self._collectfile(path), names): + yield x + + def _collectfile(self, path): + ihook = self.gethookproxy(path) + if not self.isinitpath(path): + if ihook.pytest_ignore_collect(path=path, config=self.config): + return () + return ihook.pytest_collect_file(path=path, parent=self) + + def _recurse(self, path): + ihook = self.gethookproxy(path.dirpath()) + if ihook.pytest_ignore_collect(path=path, config=self.config): + return + for pat in self._norecursepatterns: + if path.check(fnmatch=pat): + return False + ihook = self.gethookproxy(path) + ihook.pytest_collect_directory(path=path, parent=self) + return True + + def _tryconvertpyarg(self, x): + """Convert a dotted module name to path. + + """ + import pkgutil + try: + loader = pkgutil.find_loader(x) + except ImportError: + return x + if loader is None: + return x + # This method is sometimes invoked when AssertionRewritingHook, which + # does not define a get_filename method, is already in place: + try: + path = loader.get_filename(x) + except AttributeError: + # Retrieve path from AssertionRewritingHook: + path = loader.modules[x][0].co_filename + if loader.is_package(x): + path = os.path.dirname(path) + return path + + def _parsearg(self, arg): + """ return (fspath, names) tuple after checking the file exists. """ + parts = str(arg).split("::") + if self.config.option.pyargs: + parts[0] = self._tryconvertpyarg(parts[0]) + relpath = parts[0].replace("/", os.sep) + path = self.config.invocation_dir.join(relpath, abs=True) + if not path.check(): + if self.config.option.pyargs: + raise pytest.UsageError("file or package not found: " + arg + " (missing __init__.py?)") + else: + raise pytest.UsageError("file not found: " + arg) + parts[0] = path + return parts + + def matchnodes(self, matching, names): + self.trace("matchnodes", matching, names) + self.trace.root.indent += 1 + nodes = self._matchnodes(matching, names) + num = len(nodes) + self.trace("matchnodes finished -> ", num, "nodes") + self.trace.root.indent -= 1 + if num == 0: + raise NoMatch(matching, names[:1]) + return nodes + + def _matchnodes(self, matching, names): + if not matching or not names: + return matching + name = names[0] + assert name + nextnames = names[1:] + resultnodes = [] + for node in matching: + if isinstance(node, pytest.Item): + if not names: + resultnodes.append(node) + continue + assert isinstance(node, pytest.Collector) + rep = collect_one_node(node) + if rep.passed: + has_matched = False + for x in rep.result: + # TODO: remove parametrized workaround once collection structure contains parametrization + if x.name == name or x.name.split("[")[0] == name: + resultnodes.extend(self.matchnodes([x], nextnames)) + has_matched = True + # XXX accept IDs that don't have "()" for class instances + if not has_matched and len(rep.result) == 1 and x.name == "()": + nextnames.insert(0, name) + resultnodes.extend(self.matchnodes([x], nextnames)) + node.ihook.pytest_collectreport(report=rep) + return resultnodes + + def genitems(self, node): + self.trace("genitems", node) + if isinstance(node, pytest.Item): + node.ihook.pytest_itemcollected(item=node) + yield node + else: + assert isinstance(node, pytest.Collector) + rep = collect_one_node(node) + if rep.passed: + for subnode in rep.result: + for x in self.genitems(subnode): + yield x + node.ihook.pytest_collectreport(report=rep) diff --git a/lib/spack/external/_pytest/mark.py b/lib/spack/external/_pytest/mark.py new file mode 100644 index 0000000000..357a60492e --- /dev/null +++ b/lib/spack/external/_pytest/mark.py @@ -0,0 +1,328 @@ +""" generic mechanism for marking and selecting python functions. """ +import inspect + + +class MarkerError(Exception): + + """Error in use of a pytest marker/attribute.""" + + +def pytest_namespace(): + return {'mark': MarkGenerator()} + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption( + '-k', + action="store", dest="keyword", default='', metavar="EXPRESSION", + help="only run tests which match the given substring expression. " + "An expression is a python evaluatable expression " + "where all names are substring-matched against test names " + "and their parent classes. Example: -k 'test_method or test_" + "other' matches all test functions and classes whose name " + "contains 'test_method' or 'test_other'. " + "Additionally keywords are matched to classes and functions " + "containing extra names in their 'extra_keyword_matches' set, " + "as well as functions which have names assigned directly to them." + ) + + group._addoption( + "-m", + action="store", dest="markexpr", default="", metavar="MARKEXPR", + help="only run tests matching given mark expression. " + "example: -m 'mark1 and not mark2'." + ) + + group.addoption( + "--markers", action="store_true", + help="show markers (builtin, plugin and per-project ones)." + ) + + parser.addini("markers", "markers for test functions", 'linelist') + + +def pytest_cmdline_main(config): + import _pytest.config + if config.option.markers: + config._do_configure() + tw = _pytest.config.create_terminal_writer(config) + for line in config.getini("markers"): + name, rest = line.split(":", 1) + tw.write("@pytest.mark.%s:" % name, bold=True) + tw.line(rest) + tw.line() + config._ensure_unconfigure() + return 0 + + +pytest_cmdline_main.tryfirst = True + + +def pytest_collection_modifyitems(items, config): + keywordexpr = config.option.keyword.lstrip() + matchexpr = config.option.markexpr + if not keywordexpr and not matchexpr: + return + # pytest used to allow "-" for negating + # but today we just allow "-" at the beginning, use "not" instead + # we probably remove "-" alltogether soon + if keywordexpr.startswith("-"): + keywordexpr = "not " + keywordexpr[1:] + selectuntil = False + if keywordexpr[-1:] == ":": + selectuntil = True + keywordexpr = keywordexpr[:-1] + + remaining = [] + deselected = [] + for colitem in items: + if keywordexpr and not matchkeyword(colitem, keywordexpr): + deselected.append(colitem) + else: + if selectuntil: + keywordexpr = None + if matchexpr: + if not matchmark(colitem, matchexpr): + deselected.append(colitem) + continue + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +class MarkMapping: + """Provides a local mapping for markers where item access + resolves to True if the marker is present. """ + def __init__(self, keywords): + mymarks = set() + for key, value in keywords.items(): + if isinstance(value, MarkInfo) or isinstance(value, MarkDecorator): + mymarks.add(key) + self._mymarks = mymarks + + def __getitem__(self, name): + return name in self._mymarks + + +class KeywordMapping: + """Provides a local mapping for keywords. + Given a list of names, map any substring of one of these names to True. + """ + def __init__(self, names): + self._names = names + + def __getitem__(self, subname): + for name in self._names: + if subname in name: + return True + return False + + +def matchmark(colitem, markexpr): + """Tries to match on any marker names, attached to the given colitem.""" + return eval(markexpr, {}, MarkMapping(colitem.keywords)) + + +def matchkeyword(colitem, keywordexpr): + """Tries to match given keyword expression to given collector item. + + Will match on the name of colitem, including the names of its parents. + Only matches names of items which are either a :class:`Class` or a + :class:`Function`. + Additionally, matches on names in the 'extra_keyword_matches' set of + any item, as well as names directly assigned to test functions. + """ + mapped_names = set() + + # Add the names of the current item and any parent items + import pytest + for item in colitem.listchain(): + if not isinstance(item, pytest.Instance): + mapped_names.add(item.name) + + # Add the names added as extra keywords to current or parent items + for name in colitem.listextrakeywords(): + mapped_names.add(name) + + # Add the names attached to the current function through direct assignment + if hasattr(colitem, 'function'): + for name in colitem.function.__dict__: + mapped_names.add(name) + + mapping = KeywordMapping(mapped_names) + if " " not in keywordexpr: + # special case to allow for simple "-k pass" and "-k 1.3" + return mapping[keywordexpr] + elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]: + return not mapping[keywordexpr[4:]] + return eval(keywordexpr, {}, mapping) + + +def pytest_configure(config): + import pytest + if config.option.strict: + pytest.mark._config = config + + +class MarkGenerator: + """ Factory for :class:`MarkDecorator` objects - exposed as + a ``pytest.mark`` singleton instance. Example:: + + import pytest + @pytest.mark.slowtest + def test_function(): + pass + + will set a 'slowtest' :class:`MarkInfo` object + on the ``test_function`` object. """ + + def __getattr__(self, name): + if name[0] == "_": + raise AttributeError("Marker name must NOT start with underscore") + if hasattr(self, '_config'): + self._check(name) + return MarkDecorator(name) + + def _check(self, name): + try: + if name in self._markers: + return + except AttributeError: + pass + self._markers = l = set() + for line in self._config.getini("markers"): + beginning = line.split(":", 1) + x = beginning[0].split("(", 1)[0] + l.add(x) + if name not in self._markers: + raise AttributeError("%r not a registered marker" % (name,)) + +def istestfunc(func): + return hasattr(func, "__call__") and \ + getattr(func, "__name__", "") != "" + +class MarkDecorator: + """ A decorator for test functions and test classes. When applied + it will create :class:`MarkInfo` objects which may be + :ref:`retrieved by hooks as item keywords `. + MarkDecorator instances are often created like this:: + + mark1 = pytest.mark.NAME # simple MarkDecorator + mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator + + and can then be applied as decorators to test functions:: + + @mark2 + def test_function(): + pass + + When a MarkDecorator instance is called it does the following: + 1. If called with a single class as its only positional argument and no + additional keyword arguments, it attaches itself to the class so it + gets applied automatically to all test cases found in that class. + 2. If called with a single function as its only positional argument and + no additional keyword arguments, it attaches a MarkInfo object to the + function, containing all the arguments already stored internally in + the MarkDecorator. + 3. When called in any other case, it performs a 'fake construction' call, + i.e. it returns a new MarkDecorator instance with the original + MarkDecorator's content updated with the arguments passed to this + call. + + Note: The rules above prevent MarkDecorator objects from storing only a + single function or class reference as their positional argument with no + additional keyword or positional arguments. + + """ + def __init__(self, name, args=None, kwargs=None): + self.name = name + self.args = args or () + self.kwargs = kwargs or {} + + @property + def markname(self): + return self.name # for backward-compat (2.4.1 had this attr) + + def __repr__(self): + d = self.__dict__.copy() + name = d.pop('name') + return "" % (name, d) + + def __call__(self, *args, **kwargs): + """ if passed a single callable argument: decorate it with mark info. + otherwise add *args/**kwargs in-place to mark information. """ + if args and not kwargs: + func = args[0] + is_class = inspect.isclass(func) + if len(args) == 1 and (istestfunc(func) or is_class): + if is_class: + if hasattr(func, 'pytestmark'): + mark_list = func.pytestmark + if not isinstance(mark_list, list): + mark_list = [mark_list] + # always work on a copy to avoid updating pytestmark + # from a superclass by accident + mark_list = mark_list + [self] + func.pytestmark = mark_list + else: + func.pytestmark = [self] + else: + holder = getattr(func, self.name, None) + if holder is None: + holder = MarkInfo( + self.name, self.args, self.kwargs + ) + setattr(func, self.name, holder) + else: + holder.add(self.args, self.kwargs) + return func + kw = self.kwargs.copy() + kw.update(kwargs) + args = self.args + args + return self.__class__(self.name, args=args, kwargs=kw) + + +def extract_argvalue(maybe_marked_args): + # TODO: incorrect mark data, the old code wanst able to collect lists + # individual parametrized argument sets can be wrapped in a series + # of markers in which case we unwrap the values and apply the mark + # at Function init + newmarks = {} + argval = maybe_marked_args + while isinstance(argval, MarkDecorator): + newmark = MarkDecorator(argval.markname, + argval.args[:-1], argval.kwargs) + newmarks[newmark.markname] = newmark + argval = argval.args[-1] + return argval, newmarks + + +class MarkInfo: + """ Marking object created by :class:`MarkDecorator` instances. """ + def __init__(self, name, args, kwargs): + #: name of attribute + self.name = name + #: positional argument list, empty if none specified + self.args = args + #: keyword argument dictionary, empty if nothing specified + self.kwargs = kwargs.copy() + self._arglist = [(args, kwargs.copy())] + + def __repr__(self): + return "" % ( + self.name, self.args, self.kwargs + ) + + def add(self, args, kwargs): + """ add a MarkInfo with the given args and kwargs. """ + self._arglist.append((args, kwargs)) + self.args += args + self.kwargs.update(kwargs) + + def __iter__(self): + """ yield MarkInfo objects each relating to a marking-call. """ + for args, kwargs in self._arglist: + yield MarkInfo(self.name, args, kwargs) diff --git a/lib/spack/external/_pytest/monkeypatch.py b/lib/spack/external/_pytest/monkeypatch.py new file mode 100644 index 0000000000..852e72beda --- /dev/null +++ b/lib/spack/external/_pytest/monkeypatch.py @@ -0,0 +1,258 @@ +""" monkeypatching and mocking functionality. """ + +import os, sys +import re + +from py.builtin import _basestring + +import pytest + +RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$") + + +@pytest.fixture +def monkeypatch(request): + """The returned ``monkeypatch`` fixture provides these + helper methods to modify objects, dictionaries or os.environ:: + + monkeypatch.setattr(obj, name, value, raising=True) + monkeypatch.delattr(obj, name, raising=True) + monkeypatch.setitem(mapping, name, value) + monkeypatch.delitem(obj, name, raising=True) + monkeypatch.setenv(name, value, prepend=False) + monkeypatch.delenv(name, value, raising=True) + monkeypatch.syspath_prepend(path) + monkeypatch.chdir(path) + + All modifications will be undone after the requesting + test function or fixture has finished. The ``raising`` + parameter determines if a KeyError or AttributeError + will be raised if the set/deletion operation has no target. + """ + mpatch = MonkeyPatch() + request.addfinalizer(mpatch.undo) + return mpatch + + +def resolve(name): + # simplified from zope.dottedname + parts = name.split('.') + + used = parts.pop(0) + found = __import__(used) + for part in parts: + used += '.' + part + try: + found = getattr(found, part) + except AttributeError: + pass + else: + continue + # we use explicit un-nesting of the handling block in order + # to avoid nested exceptions on python 3 + try: + __import__(used) + except ImportError as ex: + # str is used for py2 vs py3 + expected = str(ex).split()[-1] + if expected == used: + raise + else: + raise ImportError( + 'import error in %s: %s' % (used, ex) + ) + found = annotated_getattr(found, part, used) + return found + + +def annotated_getattr(obj, name, ann): + try: + obj = getattr(obj, name) + except AttributeError: + raise AttributeError( + '%r object at %s has no attribute %r' % ( + type(obj).__name__, ann, name + ) + ) + return obj + + +def derive_importpath(import_path, raising): + if not isinstance(import_path, _basestring) or "." not in import_path: + raise TypeError("must be absolute import path string, not %r" % + (import_path,)) + module, attr = import_path.rsplit('.', 1) + target = resolve(module) + if raising: + annotated_getattr(target, attr, ann=module) + return attr, target + + +class Notset: + def __repr__(self): + return "" + + +notset = Notset() + + +class MonkeyPatch: + """ Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes. + """ + + def __init__(self): + self._setattr = [] + self._setitem = [] + self._cwd = None + self._savesyspath = None + + def setattr(self, target, name, value=notset, raising=True): + """ Set attribute value on target, memorizing the old value. + By default raise AttributeError if the attribute did not exist. + + For convenience you can specify a string as ``target`` which + will be interpreted as a dotted import path, with the last part + being the attribute name. Example: + ``monkeypatch.setattr("os.getcwd", lambda x: "/")`` + would set the ``getcwd`` function of the ``os`` module. + + The ``raising`` value determines if the setattr should fail + if the attribute is not already present (defaults to True + which means it will raise). + """ + __tracebackhide__ = True + import inspect + + if value is notset: + if not isinstance(target, _basestring): + raise TypeError("use setattr(target, name, value) or " + "setattr(target, value) with target being a dotted " + "import string") + value = name + name, target = derive_importpath(target, raising) + + oldval = getattr(target, name, notset) + if raising and oldval is notset: + raise AttributeError("%r has no attribute %r" % (target, name)) + + # avoid class descriptors like staticmethod/classmethod + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + setattr(target, name, value) + + def delattr(self, target, name=notset, raising=True): + """ Delete attribute ``name`` from ``target``, by default raise + AttributeError it the attribute did not previously exist. + + If no ``name`` is specified and ``target`` is a string + it will be interpreted as a dotted import path with the + last part being the attribute name. + + If ``raising`` is set to False, no exception will be raised if the + attribute is missing. + """ + __tracebackhide__ = True + if name is notset: + if not isinstance(target, _basestring): + raise TypeError("use delattr(target, name) or " + "delattr(target) with target being a dotted " + "import string") + name, target = derive_importpath(target, raising) + + if not hasattr(target, name): + if raising: + raise AttributeError(name) + else: + self._setattr.append((target, name, getattr(target, name, notset))) + delattr(target, name) + + def setitem(self, dic, name, value): + """ Set dictionary entry ``name`` to value. """ + self._setitem.append((dic, name, dic.get(name, notset))) + dic[name] = value + + def delitem(self, dic, name, raising=True): + """ Delete ``name`` from dict. Raise KeyError if it doesn't exist. + + If ``raising`` is set to False, no exception will be raised if the + key is missing. + """ + if name not in dic: + if raising: + raise KeyError(name) + else: + self._setitem.append((dic, name, dic.get(name, notset))) + del dic[name] + + def setenv(self, name, value, prepend=None): + """ Set environment variable ``name`` to ``value``. If ``prepend`` + is a character, read the current environment variable value + and prepend the ``value`` adjoined with the ``prepend`` character.""" + value = str(value) + if prepend and name in os.environ: + value = value + prepend + os.environ[name] + self.setitem(os.environ, name, value) + + def delenv(self, name, raising=True): + """ Delete ``name`` from the environment. Raise KeyError it does not + exist. + + If ``raising`` is set to False, no exception will be raised if the + environment variable is missing. + """ + self.delitem(os.environ, name, raising=raising) + + def syspath_prepend(self, path): + """ Prepend ``path`` to ``sys.path`` list of import locations. """ + if self._savesyspath is None: + self._savesyspath = sys.path[:] + sys.path.insert(0, str(path)) + + def chdir(self, path): + """ Change the current working directory to the specified path. + Path can be a string or a py.path.local object. + """ + if self._cwd is None: + self._cwd = os.getcwd() + if hasattr(path, "chdir"): + path.chdir() + else: + os.chdir(path) + + def undo(self): + """ Undo previous changes. This call consumes the + undo stack. Calling it a second time has no effect unless + you do more monkeypatching after the undo call. + + There is generally no need to call `undo()`, since it is + called automatically during tear-down. + + Note that the same `monkeypatch` fixture is used across a + single test function invocation. If `monkeypatch` is used both by + the test function itself and one of the test fixtures, + calling `undo()` will undo all of the changes made in + both functions. + """ + for obj, name, value in reversed(self._setattr): + if value is not notset: + setattr(obj, name, value) + else: + delattr(obj, name) + self._setattr[:] = [] + for dictionary, name, value in reversed(self._setitem): + if value is notset: + try: + del dictionary[name] + except KeyError: + pass # was already deleted, so we have the desired state + else: + dictionary[name] = value + self._setitem[:] = [] + if self._savesyspath is not None: + sys.path[:] = self._savesyspath + self._savesyspath = None + + if self._cwd is not None: + os.chdir(self._cwd) + self._cwd = None diff --git a/lib/spack/external/_pytest/nose.py b/lib/spack/external/_pytest/nose.py new file mode 100644 index 0000000000..0387468686 --- /dev/null +++ b/lib/spack/external/_pytest/nose.py @@ -0,0 +1,71 @@ +""" run test suites written for nose. """ + +import sys + +import py +import pytest +from _pytest import unittest + + +def get_skip_exceptions(): + skip_classes = set() + for module_name in ('unittest', 'unittest2', 'nose'): + mod = sys.modules.get(module_name) + if hasattr(mod, 'SkipTest'): + skip_classes.add(mod.SkipTest) + return tuple(skip_classes) + + +def pytest_runtest_makereport(item, call): + if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()): + # let's substitute the excinfo with a pytest.skip one + call2 = call.__class__(lambda: + pytest.skip(str(call.excinfo.value)), call.when) + call.excinfo = call2.excinfo + + +@pytest.hookimpl(trylast=True) +def pytest_runtest_setup(item): + if is_potential_nosetest(item): + if isinstance(item.parent, pytest.Generator): + gen = item.parent + if not hasattr(gen, '_nosegensetup'): + call_optional(gen.obj, 'setup') + if isinstance(gen.parent, pytest.Instance): + call_optional(gen.parent.obj, 'setup') + gen._nosegensetup = True + if not call_optional(item.obj, 'setup'): + # call module level setup if there is no object level one + call_optional(item.parent.obj, 'setup') + #XXX this implies we only call teardown when setup worked + item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item) + +def teardown_nose(item): + if is_potential_nosetest(item): + if not call_optional(item.obj, 'teardown'): + call_optional(item.parent.obj, 'teardown') + #if hasattr(item.parent, '_nosegensetup'): + # #call_optional(item._nosegensetup, 'teardown') + # del item.parent._nosegensetup + + +def pytest_make_collect_report(collector): + if isinstance(collector, pytest.Generator): + call_optional(collector.obj, 'setup') + + +def is_potential_nosetest(item): + # extra check needed since we do not do nose style setup/teardown + # on direct unittest style classes + return isinstance(item, pytest.Function) and \ + not isinstance(item, unittest.TestCaseFunction) + + +def call_optional(obj, name): + method = getattr(obj, name, None) + isfixture = hasattr(method, "_pytestfixturefunction") + if method is not None and not isfixture and py.builtin.callable(method): + # If there's any problems allow the exception to raise rather than + # silently ignoring them + method() + return True diff --git a/lib/spack/external/_pytest/pastebin.py b/lib/spack/external/_pytest/pastebin.py new file mode 100644 index 0000000000..9f1cf90637 --- /dev/null +++ b/lib/spack/external/_pytest/pastebin.py @@ -0,0 +1,98 @@ +""" submit failure or test session information to a pastebin service. """ +import pytest +import sys +import tempfile + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting") + group._addoption('--pastebin', metavar="mode", + action='store', dest="pastebin", default=None, + choices=['failed', 'all'], + help="send failed|all info to bpaste.net pastebin service.") + + +@pytest.hookimpl(trylast=True) +def pytest_configure(config): + import py + if config.option.pastebin == "all": + tr = config.pluginmanager.getplugin('terminalreporter') + # if no terminal reporter plugin is present, nothing we can do here; + # this can happen when this function executes in a slave node + # when using pytest-xdist, for example + if tr is not None: + # pastebin file will be utf-8 encoded binary file + config._pastebinfile = tempfile.TemporaryFile('w+b') + oldwrite = tr._tw.write + + def tee_write(s, **kwargs): + oldwrite(s, **kwargs) + if py.builtin._istext(s): + s = s.encode('utf-8') + config._pastebinfile.write(s) + + tr._tw.write = tee_write + + +def pytest_unconfigure(config): + if hasattr(config, '_pastebinfile'): + # get terminal contents and delete file + config._pastebinfile.seek(0) + sessionlog = config._pastebinfile.read() + config._pastebinfile.close() + del config._pastebinfile + # undo our patching in the terminal reporter + tr = config.pluginmanager.getplugin('terminalreporter') + del tr._tw.__dict__['write'] + # write summary + tr.write_sep("=", "Sending information to Paste Service") + pastebinurl = create_new_paste(sessionlog) + tr.write_line("pastebin session-log: %s\n" % pastebinurl) + + +def create_new_paste(contents): + """ + Creates a new paste using bpaste.net service. + + :contents: paste contents as utf-8 encoded bytes + :returns: url to the pasted contents + """ + import re + if sys.version_info < (3, 0): + from urllib import urlopen, urlencode + else: + from urllib.request import urlopen + from urllib.parse import urlencode + + params = { + 'code': contents, + 'lexer': 'python3' if sys.version_info[0] == 3 else 'python', + 'expiry': '1week', + } + url = 'https://bpaste.net' + response = urlopen(url, data=urlencode(params).encode('ascii')).read() + m = re.search(r'href="/raw/(\w+)"', response.decode('utf-8')) + if m: + return '%s/show/%s' % (url, m.group(1)) + else: + return 'bad response: ' + response + + +def pytest_terminal_summary(terminalreporter): + import _pytest.config + if terminalreporter.config.option.pastebin != "failed": + return + tr = terminalreporter + if 'failed' in tr.stats: + terminalreporter.write_sep("=", "Sending information to Paste Service") + for rep in terminalreporter.stats.get('failed'): + try: + msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc + except AttributeError: + msg = tr._getfailureheadline(rep) + tw = _pytest.config.create_terminal_writer(terminalreporter.config, stringio=True) + rep.toterminal(tw) + s = tw.stringio.getvalue() + assert len(s) + pastebinurl = create_new_paste(s) + tr.write_line("%s --> %s" %(msg, pastebinurl)) diff --git a/lib/spack/external/_pytest/pytester.py b/lib/spack/external/_pytest/pytester.py new file mode 100644 index 0000000000..17ff529a6c --- /dev/null +++ b/lib/spack/external/_pytest/pytester.py @@ -0,0 +1,1139 @@ +""" (disabled by default) support for testing pytest and pytest plugins. """ +import codecs +import gc +import os +import platform +import re +import subprocess +import sys +import time +import traceback +from fnmatch import fnmatch + +from py.builtin import print_ + +from _pytest._code import Source +import py +import pytest +from _pytest.main import Session, EXIT_OK +from _pytest.assertion.rewrite import AssertionRewritingHook + + +def pytest_addoption(parser): + # group = parser.getgroup("pytester", "pytester (self-tests) options") + parser.addoption('--lsof', + action="store_true", dest="lsof", default=False, + help=("run FD checks if lsof is available")) + + parser.addoption('--runpytest', default="inprocess", dest="runpytest", + choices=("inprocess", "subprocess", ), + help=("run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method")) + + +def pytest_configure(config): + # This might be called multiple times. Only take the first. + global _pytest_fullpath + try: + _pytest_fullpath + except NameError: + _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc")) + _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py") + + if config.getvalue("lsof"): + checker = LsofFdLeakChecker() + if checker.matching_platform(): + config.pluginmanager.register(checker) + + +class LsofFdLeakChecker(object): + def get_open_files(self): + out = self._exec_lsof() + open_files = self._parse_lsof_output(out) + return open_files + + def _exec_lsof(self): + pid = os.getpid() + return py.process.cmdexec("lsof -Ffn0 -p %d" % pid) + + def _parse_lsof_output(self, out): + def isopen(line): + return line.startswith('f') and ("deleted" not in line and + 'mem' not in line and "txt" not in line and 'cwd' not in line) + + open_files = [] + + for line in out.split("\n"): + if isopen(line): + fields = line.split('\0') + fd = fields[0][1:] + filename = fields[1][1:] + if filename.startswith('/'): + open_files.append((fd, filename)) + + return open_files + + def matching_platform(self): + try: + py.process.cmdexec("lsof -v") + except (py.process.cmdexec.Error, UnicodeDecodeError): + # cmdexec may raise UnicodeDecodeError on Windows systems + # with locale other than english: + # https://bitbucket.org/pytest-dev/py/issues/66 + return False + else: + return True + + @pytest.hookimpl(hookwrapper=True, tryfirst=True) + def pytest_runtest_item(self, item): + lines1 = self.get_open_files() + yield + if hasattr(sys, "pypy_version_info"): + gc.collect() + lines2 = self.get_open_files() + + new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1]) + leaked_files = [t for t in lines2 if t[0] in new_fds] + if leaked_files: + error = [] + error.append("***** %s FD leakage detected" % len(leaked_files)) + error.extend([str(f) for f in leaked_files]) + error.append("*** Before:") + error.extend([str(f) for f in lines1]) + error.append("*** After:") + error.extend([str(f) for f in lines2]) + error.append(error[0]) + error.append("*** function %s:%s: %s " % item.location) + pytest.fail("\n".join(error), pytrace=False) + + +# XXX copied from execnet's conftest.py - needs to be merged +winpymap = { + 'python2.7': r'C:\Python27\python.exe', + 'python2.6': r'C:\Python26\python.exe', + 'python3.1': r'C:\Python31\python.exe', + 'python3.2': r'C:\Python32\python.exe', + 'python3.3': r'C:\Python33\python.exe', + 'python3.4': r'C:\Python34\python.exe', + 'python3.5': r'C:\Python35\python.exe', +} + +def getexecutable(name, cache={}): + try: + return cache[name] + except KeyError: + executable = py.path.local.sysfind(name) + if executable: + import subprocess + popen = subprocess.Popen([str(executable), "--version"], + universal_newlines=True, stderr=subprocess.PIPE) + out, err = popen.communicate() + if name == "jython": + if not err or "2.5" not in err: + executable = None + if "2.5.2" in err: + executable = None # http://bugs.jython.org/issue1790 + elif popen.returncode != 0: + # Handle pyenv's 127. + executable = None + cache[name] = executable + return executable + +@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4", + 'pypy', 'pypy3']) +def anypython(request): + name = request.param + executable = getexecutable(name) + if executable is None: + if sys.platform == "win32": + executable = winpymap.get(name, None) + if executable: + executable = py.path.local(executable) + if executable.check(): + return executable + pytest.skip("no suitable %s found" % (name,)) + return executable + +# used at least by pytest-xdist plugin +@pytest.fixture +def _pytest(request): + """ Return a helper which offers a gethookrecorder(hook) + method which returns a HookRecorder instance which helps + to make assertions about called hooks. + """ + return PytestArg(request) + +class PytestArg: + def __init__(self, request): + self.request = request + + def gethookrecorder(self, hook): + hookrecorder = HookRecorder(hook._pm) + self.request.addfinalizer(hookrecorder.finish_recording) + return hookrecorder + + +def get_public_names(l): + """Only return names from iterator l without a leading underscore.""" + return [x for x in l if x[0] != "_"] + + +class ParsedCall: + def __init__(self, name, kwargs): + self.__dict__.update(kwargs) + self._name = name + + def __repr__(self): + d = self.__dict__.copy() + del d['_name'] + return "" %(self._name, d) + + +class HookRecorder: + """Record all hooks called in a plugin manager. + + This wraps all the hook calls in the plugin manager, recording + each call before propagating the normal calls. + + """ + + def __init__(self, pluginmanager): + self._pluginmanager = pluginmanager + self.calls = [] + + def before(hook_name, hook_impls, kwargs): + self.calls.append(ParsedCall(hook_name, kwargs)) + + def after(outcome, hook_name, hook_impls, kwargs): + pass + + self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after) + + def finish_recording(self): + self._undo_wrapping() + + def getcalls(self, names): + if isinstance(names, str): + names = names.split() + return [call for call in self.calls if call._name in names] + + def assert_contains(self, entries): + __tracebackhide__ = True + i = 0 + entries = list(entries) + backlocals = sys._getframe(1).f_locals + while entries: + name, check = entries.pop(0) + for ind, call in enumerate(self.calls[i:]): + if call._name == name: + print_("NAMEMATCH", name, call) + if eval(check, backlocals, call.__dict__): + print_("CHECKERMATCH", repr(check), "->", call) + else: + print_("NOCHECKERMATCH", repr(check), "-", call) + continue + i += ind + 1 + break + print_("NONAMEMATCH", name, "with", call) + else: + pytest.fail("could not find %r check %r" % (name, check)) + + def popcall(self, name): + __tracebackhide__ = True + for i, call in enumerate(self.calls): + if call._name == name: + del self.calls[i] + return call + lines = ["could not find call %r, in:" % (name,)] + lines.extend([" %s" % str(x) for x in self.calls]) + pytest.fail("\n".join(lines)) + + def getcall(self, name): + l = self.getcalls(name) + assert len(l) == 1, (name, l) + return l[0] + + # functionality for test reports + + def getreports(self, + names="pytest_runtest_logreport pytest_collectreport"): + return [x.report for x in self.getcalls(names)] + + def matchreport(self, inamepart="", + names="pytest_runtest_logreport pytest_collectreport", when=None): + """ return a testreport whose dotted import path matches """ + l = [] + for rep in self.getreports(names=names): + try: + if not when and rep.when != "call" and rep.passed: + # setup/teardown passing reports - let's ignore those + continue + except AttributeError: + pass + if when and getattr(rep, 'when', None) != when: + continue + if not inamepart or inamepart in rep.nodeid.split("::"): + l.append(rep) + if not l: + raise ValueError("could not find test report matching %r: " + "no test reports at all!" % (inamepart,)) + if len(l) > 1: + raise ValueError( + "found 2 or more testreports matching %r: %s" %(inamepart, l)) + return l[0] + + def getfailures(self, + names='pytest_runtest_logreport pytest_collectreport'): + return [rep for rep in self.getreports(names) if rep.failed] + + def getfailedcollections(self): + return self.getfailures('pytest_collectreport') + + def listoutcomes(self): + passed = [] + skipped = [] + failed = [] + for rep in self.getreports( + "pytest_collectreport pytest_runtest_logreport"): + if rep.passed: + if getattr(rep, "when", None) == "call": + passed.append(rep) + elif rep.skipped: + skipped.append(rep) + elif rep.failed: + failed.append(rep) + return passed, skipped, failed + + def countoutcomes(self): + return [len(x) for x in self.listoutcomes()] + + def assertoutcome(self, passed=0, skipped=0, failed=0): + realpassed, realskipped, realfailed = self.listoutcomes() + assert passed == len(realpassed) + assert skipped == len(realskipped) + assert failed == len(realfailed) + + def clear(self): + self.calls[:] = [] + + +@pytest.fixture +def linecomp(request): + return LineComp() + + +@pytest.fixture(name='LineMatcher') +def LineMatcher_fixture(request): + return LineMatcher + + +@pytest.fixture +def testdir(request, tmpdir_factory): + return Testdir(request, tmpdir_factory) + + +rex_outcome = re.compile("(\d+) ([\w-]+)") +class RunResult: + """The result of running a command. + + Attributes: + + :ret: The return value. + :outlines: List of lines captured from stdout. + :errlines: List of lines captures from stderr. + :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to + reconstruct stdout or the commonly used + ``stdout.fnmatch_lines()`` method. + :stderrr: :py:class:`LineMatcher` of stderr. + :duration: Duration in seconds. + + """ + def __init__(self, ret, outlines, errlines, duration): + self.ret = ret + self.outlines = outlines + self.errlines = errlines + self.stdout = LineMatcher(outlines) + self.stderr = LineMatcher(errlines) + self.duration = duration + + def parseoutcomes(self): + """ Return a dictionary of outcomestring->num from parsing + the terminal output that the test process produced.""" + for line in reversed(self.outlines): + if 'seconds' in line: + outcomes = rex_outcome.findall(line) + if outcomes: + d = {} + for num, cat in outcomes: + d[cat] = int(num) + return d + + def assert_outcomes(self, passed=0, skipped=0, failed=0): + """ assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run.""" + d = self.parseoutcomes() + assert passed == d.get("passed", 0) + assert skipped == d.get("skipped", 0) + assert failed == d.get("failed", 0) + + + +class Testdir: + """Temporary test directory with tools to test/run pytest itself. + + This is based on the ``tmpdir`` fixture but provides a number of + methods which aid with testing pytest itself. Unless + :py:meth:`chdir` is used all methods will use :py:attr:`tmpdir` as + current working directory. + + Attributes: + + :tmpdir: The :py:class:`py.path.local` instance of the temporary + directory. + + :plugins: A list of plugins to use with :py:meth:`parseconfig` and + :py:meth:`runpytest`. Initially this is an empty list but + plugins can be added to the list. The type of items to add to + the list depend on the method which uses them so refer to them + for details. + + """ + + def __init__(self, request, tmpdir_factory): + self.request = request + # XXX remove duplication with tmpdir plugin + basetmp = tmpdir_factory.ensuretemp("testdir") + name = request.function.__name__ + for i in range(100): + try: + tmpdir = basetmp.mkdir(name + str(i)) + except py.error.EEXIST: + continue + break + self.tmpdir = tmpdir + self.plugins = [] + self._savesyspath = (list(sys.path), list(sys.meta_path)) + self._savemodulekeys = set(sys.modules) + self.chdir() # always chdir + self.request.addfinalizer(self.finalize) + method = self.request.config.getoption("--runpytest") + if method == "inprocess": + self._runpytest_method = self.runpytest_inprocess + elif method == "subprocess": + self._runpytest_method = self.runpytest_subprocess + + def __repr__(self): + return "" % (self.tmpdir,) + + def finalize(self): + """Clean up global state artifacts. + + Some methods modify the global interpreter state and this + tries to clean this up. It does not remove the temporary + directory however so it can be looked at after the test run + has finished. + + """ + sys.path[:], sys.meta_path[:] = self._savesyspath + if hasattr(self, '_olddir'): + self._olddir.chdir() + self.delete_loaded_modules() + + def delete_loaded_modules(self): + """Delete modules that have been loaded during a test. + + This allows the interpreter to catch module changes in case + the module is re-imported. + """ + for name in set(sys.modules).difference(self._savemodulekeys): + # it seems zope.interfaces is keeping some state + # (used by twisted related tests) + if name != "zope.interface": + del sys.modules[name] + + def make_hook_recorder(self, pluginmanager): + """Create a new :py:class:`HookRecorder` for a PluginManager.""" + assert not hasattr(pluginmanager, "reprec") + pluginmanager.reprec = reprec = HookRecorder(pluginmanager) + self.request.addfinalizer(reprec.finish_recording) + return reprec + + def chdir(self): + """Cd into the temporary directory. + + This is done automatically upon instantiation. + + """ + old = self.tmpdir.chdir() + if not hasattr(self, '_olddir'): + self._olddir = old + + def _makefile(self, ext, args, kwargs): + items = list(kwargs.items()) + if args: + source = py.builtin._totext("\n").join( + map(py.builtin._totext, args)) + py.builtin._totext("\n") + basename = self.request.function.__name__ + items.insert(0, (basename, source)) + ret = None + for name, value in items: + p = self.tmpdir.join(name).new(ext=ext) + p.dirpath().ensure_dir() + source = Source(value) + + def my_totext(s, encoding="utf-8"): + if py.builtin._isbytes(s): + s = py.builtin._totext(s, encoding=encoding) + return s + + source_unicode = "\n".join([my_totext(line) for line in source.lines]) + source = py.builtin._totext(source_unicode) + content = source.strip().encode("utf-8") # + "\n" + #content = content.rstrip() + "\n" + p.write(content, "wb") + if ret is None: + ret = p + return ret + + def makefile(self, ext, *args, **kwargs): + """Create a new file in the testdir. + + ext: The extension the file should use, including the dot. + E.g. ".py". + + args: All args will be treated as strings and joined using + newlines. The result will be written as contents to the + file. The name of the file will be based on the test + function requesting this fixture. + E.g. "testdir.makefile('.txt', 'line1', 'line2')" + + kwargs: Each keyword is the name of a file, while the value of + it will be written as contents of the file. + E.g. "testdir.makefile('.ini', pytest='[pytest]\naddopts=-rs\n')" + + """ + return self._makefile(ext, args, kwargs) + + def makeconftest(self, source): + """Write a contest.py file with 'source' as contents.""" + return self.makepyfile(conftest=source) + + def makeini(self, source): + """Write a tox.ini file with 'source' as contents.""" + return self.makefile('.ini', tox=source) + + def getinicfg(self, source): + """Return the pytest section from the tox.ini config file.""" + p = self.makeini(source) + return py.iniconfig.IniConfig(p)['pytest'] + + def makepyfile(self, *args, **kwargs): + """Shortcut for .makefile() with a .py extension.""" + return self._makefile('.py', args, kwargs) + + def maketxtfile(self, *args, **kwargs): + """Shortcut for .makefile() with a .txt extension.""" + return self._makefile('.txt', args, kwargs) + + def syspathinsert(self, path=None): + """Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`. + + This is undone automatically after the test. + """ + if path is None: + path = self.tmpdir + sys.path.insert(0, str(path)) + # a call to syspathinsert() usually means that the caller + # wants to import some dynamically created files. + # with python3 we thus invalidate import caches. + self._possibly_invalidate_import_caches() + + def _possibly_invalidate_import_caches(self): + # invalidate caches if we can (py33 and above) + try: + import importlib + except ImportError: + pass + else: + if hasattr(importlib, "invalidate_caches"): + importlib.invalidate_caches() + + def mkdir(self, name): + """Create a new (sub)directory.""" + return self.tmpdir.mkdir(name) + + def mkpydir(self, name): + """Create a new python package. + + This creates a (sub)direcotry with an empty ``__init__.py`` + file so that is recognised as a python package. + + """ + p = self.mkdir(name) + p.ensure("__init__.py") + return p + + Session = Session + def getnode(self, config, arg): + """Return the collection node of a file. + + :param config: :py:class:`_pytest.config.Config` instance, see + :py:meth:`parseconfig` and :py:meth:`parseconfigure` to + create the configuration. + + :param arg: A :py:class:`py.path.local` instance of the file. + + """ + session = Session(config) + assert '::' not in str(arg) + p = py.path.local(arg) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([str(p)], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) + return res + + def getpathnode(self, path): + """Return the collection node of a file. + + This is like :py:meth:`getnode` but uses + :py:meth:`parseconfigure` to create the (configured) pytest + Config instance. + + :param path: A :py:class:`py.path.local` instance of the file. + + """ + config = self.parseconfigure(path) + session = Session(config) + x = session.fspath.bestrelpath(path) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([x], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) + return res + + def genitems(self, colitems): + """Generate all test items from a collection node. + + This recurses into the collection node and returns a list of + all the test items contained within. + + """ + session = colitems[0].session + result = [] + for colitem in colitems: + result.extend(session.genitems(colitem)) + return result + + def runitem(self, source): + """Run the "test_func" Item. + + The calling test instance (the class which contains the test + method) must provide a ``.getrunner()`` method which should + return a runner which can run the test protocol for a single + item, like e.g. :py:func:`_pytest.runner.runtestprotocol`. + + """ + # used from runner functional tests + item = self.getitem(source) + # the test class where we are called from wants to provide the runner + testclassinstance = self.request.instance + runner = testclassinstance.getrunner() + return runner(item) + + def inline_runsource(self, source, *cmdlineargs): + """Run a test module in process using ``pytest.main()``. + + This run writes "source" into a temporary file and runs + ``pytest.main()`` on it, returning a :py:class:`HookRecorder` + instance for the result. + + :param source: The source code of the test module. + + :param cmdlineargs: Any extra command line arguments to use. + + :return: :py:class:`HookRecorder` instance of the result. + + """ + p = self.makepyfile(source) + l = list(cmdlineargs) + [p] + return self.inline_run(*l) + + def inline_genitems(self, *args): + """Run ``pytest.main(['--collectonly'])`` in-process. + + Retuns a tuple of the collected items and a + :py:class:`HookRecorder` instance. + + This runs the :py:func:`pytest.main` function to run all of + pytest inside the test process itself like + :py:meth:`inline_run`. However the return value is a tuple of + the collection items and a :py:class:`HookRecorder` instance. + + """ + rec = self.inline_run("--collect-only", *args) + items = [x.item for x in rec.getcalls("pytest_itemcollected")] + return items, rec + + def inline_run(self, *args, **kwargs): + """Run ``pytest.main()`` in-process, returning a HookRecorder. + + This runs the :py:func:`pytest.main` function to run all of + pytest inside the test process itself. This means it can + return a :py:class:`HookRecorder` instance which gives more + detailed results from then run then can be done by matching + stdout/stderr from :py:meth:`runpytest`. + + :param args: Any command line arguments to pass to + :py:func:`pytest.main`. + + :param plugin: (keyword-only) Extra plugin instances the + ``pytest.main()`` instance should use. + + :return: A :py:class:`HookRecorder` instance. + """ + # When running py.test inline any plugins active in the main + # test process are already imported. So this disables the + # warning which will trigger to say they can no longer be + # re-written, which is fine as they are already re-written. + orig_warn = AssertionRewritingHook._warn_already_imported + + def revert(): + AssertionRewritingHook._warn_already_imported = orig_warn + + self.request.addfinalizer(revert) + AssertionRewritingHook._warn_already_imported = lambda *a: None + + rec = [] + + class Collect: + def pytest_configure(x, config): + rec.append(self.make_hook_recorder(config.pluginmanager)) + + plugins = kwargs.get("plugins") or [] + plugins.append(Collect()) + ret = pytest.main(list(args), plugins=plugins) + self.delete_loaded_modules() + if len(rec) == 1: + reprec = rec.pop() + else: + class reprec: + pass + reprec.ret = ret + + # typically we reraise keyboard interrupts from the child run + # because it's our user requesting interruption of the testing + if ret == 2 and not kwargs.get("no_reraise_ctrlc"): + calls = reprec.getcalls("pytest_keyboard_interrupt") + if calls and calls[-1].excinfo.type == KeyboardInterrupt: + raise KeyboardInterrupt() + return reprec + + def runpytest_inprocess(self, *args, **kwargs): + """ Return result of running pytest in-process, providing a similar + interface to what self.runpytest() provides. """ + if kwargs.get("syspathinsert"): + self.syspathinsert() + now = time.time() + capture = py.io.StdCapture() + try: + try: + reprec = self.inline_run(*args, **kwargs) + except SystemExit as e: + + class reprec: + ret = e.args[0] + + except Exception: + traceback.print_exc() + + class reprec: + ret = 3 + finally: + out, err = capture.reset() + sys.stdout.write(out) + sys.stderr.write(err) + + res = RunResult(reprec.ret, + out.split("\n"), err.split("\n"), + time.time()-now) + res.reprec = reprec + return res + + def runpytest(self, *args, **kwargs): + """ Run pytest inline or in a subprocess, depending on the command line + option "--runpytest" and return a :py:class:`RunResult`. + + """ + args = self._ensure_basetemp(args) + return self._runpytest_method(*args, **kwargs) + + def _ensure_basetemp(self, args): + args = [str(x) for x in args] + for x in args: + if str(x).startswith('--basetemp'): + #print ("basedtemp exists: %s" %(args,)) + break + else: + args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp')) + #print ("added basetemp: %s" %(args,)) + return args + + def parseconfig(self, *args): + """Return a new pytest Config instance from given commandline args. + + This invokes the pytest bootstrapping code in _pytest.config + to create a new :py:class:`_pytest.core.PluginManager` and + call the pytest_cmdline_parse hook to create new + :py:class:`_pytest.config.Config` instance. + + If :py:attr:`plugins` has been populated they should be plugin + modules which will be registered with the PluginManager. + + """ + args = self._ensure_basetemp(args) + + import _pytest.config + config = _pytest.config._prepareconfig(args, self.plugins) + # we don't know what the test will do with this half-setup config + # object and thus we make sure it gets unconfigured properly in any + # case (otherwise capturing could still be active, for example) + self.request.addfinalizer(config._ensure_unconfigure) + return config + + def parseconfigure(self, *args): + """Return a new pytest configured Config instance. + + This returns a new :py:class:`_pytest.config.Config` instance + like :py:meth:`parseconfig`, but also calls the + pytest_configure hook. + + """ + config = self.parseconfig(*args) + config._do_configure() + self.request.addfinalizer(config._ensure_unconfigure) + return config + + def getitem(self, source, funcname="test_func"): + """Return the test item for a test function. + + This writes the source to a python file and runs pytest's + collection on the resulting module, returning the test item + for the requested function name. + + :param source: The module source. + + :param funcname: The name of the test function for which the + Item must be returned. + + """ + items = self.getitems(source) + for item in items: + if item.name == funcname: + return item + assert 0, "%r item not found in module:\n%s\nitems: %s" %( + funcname, source, items) + + def getitems(self, source): + """Return all test items collected from the module. + + This writes the source to a python file and runs pytest's + collection on the resulting module, returning all test items + contained within. + + """ + modcol = self.getmodulecol(source) + return self.genitems([modcol]) + + def getmodulecol(self, source, configargs=(), withinit=False): + """Return the module collection node for ``source``. + + This writes ``source`` to a file using :py:meth:`makepyfile` + and then runs the pytest collection on it, returning the + collection node for the test module. + + :param source: The source code of the module to collect. + + :param configargs: Any extra arguments to pass to + :py:meth:`parseconfigure`. + + :param withinit: Whether to also write a ``__init__.py`` file + to the temporarly directory to ensure it is a package. + + """ + kw = {self.request.function.__name__: Source(source).strip()} + path = self.makepyfile(**kw) + if withinit: + self.makepyfile(__init__ = "#") + self.config = config = self.parseconfigure(path, *configargs) + node = self.getnode(config, path) + return node + + def collect_by_name(self, modcol, name): + """Return the collection node for name from the module collection. + + This will search a module collection node for a collection + node matching the given name. + + :param modcol: A module collection node, see + :py:meth:`getmodulecol`. + + :param name: The name of the node to return. + + """ + for colitem in modcol._memocollect(): + if colitem.name == name: + return colitem + + def popen(self, cmdargs, stdout, stderr, **kw): + """Invoke subprocess.Popen. + + This calls subprocess.Popen making sure the current working + directory is the PYTHONPATH. + + You probably want to use :py:meth:`run` instead. + + """ + env = os.environ.copy() + env['PYTHONPATH'] = os.pathsep.join(filter(None, [ + str(os.getcwd()), env.get('PYTHONPATH', '')])) + kw['env'] = env + return subprocess.Popen(cmdargs, + stdout=stdout, stderr=stderr, **kw) + + def run(self, *cmdargs): + """Run a command with arguments. + + Run a process using subprocess.Popen saving the stdout and + stderr. + + Returns a :py:class:`RunResult`. + + """ + return self._run(*cmdargs) + + def _run(self, *cmdargs): + cmdargs = [str(x) for x in cmdargs] + p1 = self.tmpdir.join("stdout") + p2 = self.tmpdir.join("stderr") + print_("running:", ' '.join(cmdargs)) + print_(" in:", str(py.path.local())) + f1 = codecs.open(str(p1), "w", encoding="utf8") + f2 = codecs.open(str(p2), "w", encoding="utf8") + try: + now = time.time() + popen = self.popen(cmdargs, stdout=f1, stderr=f2, + close_fds=(sys.platform != "win32")) + ret = popen.wait() + finally: + f1.close() + f2.close() + f1 = codecs.open(str(p1), "r", encoding="utf8") + f2 = codecs.open(str(p2), "r", encoding="utf8") + try: + out = f1.read().splitlines() + err = f2.read().splitlines() + finally: + f1.close() + f2.close() + self._dump_lines(out, sys.stdout) + self._dump_lines(err, sys.stderr) + return RunResult(ret, out, err, time.time()-now) + + def _dump_lines(self, lines, fp): + try: + for line in lines: + py.builtin.print_(line, file=fp) + except UnicodeEncodeError: + print("couldn't print to %s because of encoding" % (fp,)) + + def _getpytestargs(self): + # we cannot use "(sys.executable,script)" + # because on windows the script is e.g. a pytest.exe + return (sys.executable, _pytest_fullpath,) # noqa + + def runpython(self, script): + """Run a python script using sys.executable as interpreter. + + Returns a :py:class:`RunResult`. + """ + return self.run(sys.executable, script) + + def runpython_c(self, command): + """Run python -c "command", return a :py:class:`RunResult`.""" + return self.run(sys.executable, "-c", command) + + def runpytest_subprocess(self, *args, **kwargs): + """Run pytest as a subprocess with given arguments. + + Any plugins added to the :py:attr:`plugins` list will added + using the ``-p`` command line option. Addtionally + ``--basetemp`` is used put any temporary files and directories + in a numbered directory prefixed with "runpytest-" so they do + not conflict with the normal numberd pytest location for + temporary files and directories. + + Returns a :py:class:`RunResult`. + + """ + p = py.path.local.make_numbered_dir(prefix="runpytest-", + keep=None, rootdir=self.tmpdir) + args = ('--basetemp=%s' % p, ) + args + #for x in args: + # if '--confcutdir' in str(x): + # break + #else: + # pass + # args = ('--confcutdir=.',) + args + plugins = [x for x in self.plugins if isinstance(x, str)] + if plugins: + args = ('-p', plugins[0]) + args + args = self._getpytestargs() + args + return self.run(*args) + + def spawn_pytest(self, string, expect_timeout=10.0): + """Run pytest using pexpect. + + This makes sure to use the right pytest and sets up the + temporary directory locations. + + The pexpect child is returned. + + """ + basetemp = self.tmpdir.mkdir("pexpect") + invoke = " ".join(map(str, self._getpytestargs())) + cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string) + return self.spawn(cmd, expect_timeout=expect_timeout) + + def spawn(self, cmd, expect_timeout=10.0): + """Run a command using pexpect. + + The pexpect child is returned. + """ + pexpect = pytest.importorskip("pexpect", "3.0") + if hasattr(sys, 'pypy_version_info') and '64' in platform.machine(): + pytest.skip("pypy-64 bit not supported") + if sys.platform.startswith("freebsd"): + pytest.xfail("pexpect does not work reliably on freebsd") + logfile = self.tmpdir.join("spawn.out").open("wb") + child = pexpect.spawn(cmd, logfile=logfile) + self.request.addfinalizer(logfile.close) + child.timeout = expect_timeout + return child + +def getdecoded(out): + try: + return out.decode("utf-8") + except UnicodeDecodeError: + return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % ( + py.io.saferepr(out),) + + +class LineComp: + def __init__(self): + self.stringio = py.io.TextIO() + + def assert_contains_lines(self, lines2): + """ assert that lines2 are contained (linearly) in lines1. + return a list of extralines found. + """ + __tracebackhide__ = True + val = self.stringio.getvalue() + self.stringio.truncate(0) + self.stringio.seek(0) + lines1 = val.split("\n") + return LineMatcher(lines1).fnmatch_lines(lines2) + + +class LineMatcher: + """Flexible matching of text. + + This is a convenience class to test large texts like the output of + commands. + + The constructor takes a list of lines without their trailing + newlines, i.e. ``text.splitlines()``. + + """ + + def __init__(self, lines): + self.lines = lines + self._log_output = [] + + def str(self): + """Return the entire original text.""" + return "\n".join(self.lines) + + def _getlines(self, lines2): + if isinstance(lines2, str): + lines2 = Source(lines2) + if isinstance(lines2, Source): + lines2 = lines2.strip().lines + return lines2 + + def fnmatch_lines_random(self, lines2): + """Check lines exist in the output. + + The argument is a list of lines which have to occur in the + output, in any order. Each line can contain glob whildcards. + + """ + lines2 = self._getlines(lines2) + for line in lines2: + for x in self.lines: + if line == x or fnmatch(x, line): + self._log("matched: ", repr(line)) + break + else: + self._log("line %r not found in output" % line) + raise ValueError(self._log_text) + + def get_lines_after(self, fnline): + """Return all lines following the given line in the text. + + The given line can contain glob wildcards. + """ + for i, line in enumerate(self.lines): + if fnline == line or fnmatch(line, fnline): + return self.lines[i+1:] + raise ValueError("line %r not found in output" % fnline) + + def _log(self, *args): + self._log_output.append(' '.join((str(x) for x in args))) + + @property + def _log_text(self): + return '\n'.join(self._log_output) + + def fnmatch_lines(self, lines2): + """Search the text for matching lines. + + The argument is a list of lines which have to match and can + use glob wildcards. If they do not match an pytest.fail() is + called. The matches and non-matches are also printed on + stdout. + + """ + lines2 = self._getlines(lines2) + lines1 = self.lines[:] + nextline = None + extralines = [] + __tracebackhide__ = True + for line in lines2: + nomatchprinted = False + while lines1: + nextline = lines1.pop(0) + if line == nextline: + self._log("exact match:", repr(line)) + break + elif fnmatch(nextline, line): + self._log("fnmatch:", repr(line)) + self._log(" with:", repr(nextline)) + break + else: + if not nomatchprinted: + self._log("nomatch:", repr(line)) + nomatchprinted = True + self._log(" and:", repr(nextline)) + extralines.append(nextline) + else: + self._log("remains unmatched: %r" % (line,)) + pytest.fail(self._log_text) diff --git a/lib/spack/external/_pytest/python.py b/lib/spack/external/_pytest/python.py new file mode 100644 index 0000000000..53815da2f0 --- /dev/null +++ b/lib/spack/external/_pytest/python.py @@ -0,0 +1,1578 @@ +""" Python test discovery, setup and run of test functions. """ + +import fnmatch +import inspect +import sys +import collections +import math +from itertools import count + +import py +import pytest +from _pytest.mark import MarkerError + + +import _pytest +import _pytest._pluggy as pluggy +from _pytest import fixtures +from _pytest.compat import ( + isclass, isfunction, is_generator, _escape_strings, + REGEX_TYPE, STRING_TYPES, NoneType, NOTSET, + get_real_func, getfslineno, safe_getattr, + getlocation, enum, +) + +cutdir1 = py.path.local(pluggy.__file__.rstrip("oc")) +cutdir2 = py.path.local(_pytest.__file__).dirpath() +cutdir3 = py.path.local(py.__file__).dirpath() + + +def filter_traceback(entry): + """Return True if a TracebackEntry instance should be removed from tracebacks: + * dynamically generated code (no code to show up for it); + * internal traceback from pytest or its internal libraries, py and pluggy. + """ + # entry.path might sometimes return a str object when the entry + # points to dynamically generated code + # see https://bitbucket.org/pytest-dev/py/issues/71 + raw_filename = entry.frame.code.raw.co_filename + is_generated = '<' in raw_filename and '>' in raw_filename + if is_generated: + return False + # entry.path might point to an inexisting file, in which case it will + # alsso return a str object. see #1133 + p = py.path.local(entry.path) + return p != cutdir1 and not p.relto(cutdir2) and not p.relto(cutdir3) + + + +def pyobj_property(name): + def get(self): + node = self.getparent(getattr(pytest, name)) + if node is not None: + return node.obj + doc = "python %s object this node was collected from (can be None)." % ( + name.lower(),) + return property(get, None, None, doc) + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption('--fixtures', '--funcargs', + action="store_true", dest="showfixtures", default=False, + help="show available fixtures, sorted by plugin appearance") + group.addoption( + '--fixtures-per-test', + action="store_true", + dest="show_fixtures_per_test", + default=False, + help="show fixtures per test", + ) + parser.addini("usefixtures", type="args", default=[], + help="list of default fixtures to be used with this project") + parser.addini("python_files", type="args", + default=['test_*.py', '*_test.py'], + help="glob-style file patterns for Python test module discovery") + parser.addini("python_classes", type="args", default=["Test",], + help="prefixes or glob names for Python test class discovery") + parser.addini("python_functions", type="args", default=["test",], + help="prefixes or glob names for Python test function and " + "method discovery") + + group.addoption("--import-mode", default="prepend", + choices=["prepend", "append"], dest="importmode", + help="prepend/append to sys.path when importing test modules, " + "default is to prepend.") + + +def pytest_cmdline_main(config): + if config.option.showfixtures: + showfixtures(config) + return 0 + if config.option.show_fixtures_per_test: + show_fixtures_per_test(config) + return 0 + + +def pytest_generate_tests(metafunc): + # those alternative spellings are common - raise a specific error to alert + # the user + alt_spellings = ['parameterize', 'parametrise', 'parameterise'] + for attr in alt_spellings: + if hasattr(metafunc.function, attr): + msg = "{0} has '{1}', spelling should be 'parametrize'" + raise MarkerError(msg.format(metafunc.function.__name__, attr)) + try: + markers = metafunc.function.parametrize + except AttributeError: + return + for marker in markers: + metafunc.parametrize(*marker.args, **marker.kwargs) + +def pytest_configure(config): + config.addinivalue_line("markers", + "parametrize(argnames, argvalues): call a test function multiple " + "times passing in different arguments in turn. argvalues generally " + "needs to be a list of values if argnames specifies only one name " + "or a list of tuples of values if argnames specifies multiple names. " + "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " + "decorated test function, one with arg1=1 and another with arg1=2." + "see http://pytest.org/latest/parametrize.html for more info and " + "examples." + ) + config.addinivalue_line("markers", + "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " + "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures " + ) + +@pytest.hookimpl(trylast=True) +def pytest_namespace(): + raises.Exception = pytest.fail.Exception + return { + 'raises': raises, + 'approx': approx, + 'collect': { + 'Module': Module, + 'Class': Class, + 'Instance': Instance, + 'Function': Function, + 'Generator': Generator, + } + } + + +@pytest.hookimpl(trylast=True) +def pytest_pyfunc_call(pyfuncitem): + testfunction = pyfuncitem.obj + if pyfuncitem._isyieldedfunction(): + testfunction(*pyfuncitem._args) + else: + funcargs = pyfuncitem.funcargs + testargs = {} + for arg in pyfuncitem._fixtureinfo.argnames: + testargs[arg] = funcargs[arg] + testfunction(**testargs) + return True + +def pytest_collect_file(path, parent): + ext = path.ext + if ext == ".py": + if not parent.session.isinitpath(path): + for pat in parent.config.getini('python_files'): + if path.fnmatch(pat): + break + else: + return + ihook = parent.session.gethookproxy(path) + return ihook.pytest_pycollect_makemodule(path=path, parent=parent) + +def pytest_pycollect_makemodule(path, parent): + return Module(path, parent) + +@pytest.hookimpl(hookwrapper=True) +def pytest_pycollect_makeitem(collector, name, obj): + outcome = yield + res = outcome.get_result() + if res is not None: + raise StopIteration + # nothing was collected elsewhere, let's do it here + if isclass(obj): + if collector.istestclass(obj, name): + Class = collector._getcustomclass("Class") + outcome.force_result(Class(name, parent=collector)) + elif collector.istestfunction(obj, name): + # mock seems to store unbound methods (issue473), normalize it + obj = getattr(obj, "__func__", obj) + # We need to try and unwrap the function if it's a functools.partial + # or a funtools.wrapped. + # We musn't if it's been wrapped with mock.patch (python 2 only) + if not (isfunction(obj) or isfunction(get_real_func(obj))): + collector.warn(code="C2", message= + "cannot collect %r because it is not a function." + % name, ) + elif getattr(obj, "__test__", True): + if is_generator(obj): + res = Generator(name, parent=collector) + else: + res = list(collector._genfunctions(name, obj)) + outcome.force_result(res) + +def pytest_make_parametrize_id(config, val): + return None + + + +class PyobjContext(object): + module = pyobj_property("Module") + cls = pyobj_property("Class") + instance = pyobj_property("Instance") + +class PyobjMixin(PyobjContext): + def obj(): + def fget(self): + obj = getattr(self, '_obj', None) + if obj is None: + self._obj = obj = self._getobj() + return obj + + def fset(self, value): + self._obj = value + + return property(fget, fset, None, "underlying python object") + + obj = obj() + + def _getobj(self): + return getattr(self.parent.obj, self.name) + + def getmodpath(self, stopatmodule=True, includemodule=False): + """ return python path relative to the containing module. """ + chain = self.listchain() + chain.reverse() + parts = [] + for node in chain: + if isinstance(node, Instance): + continue + name = node.name + if isinstance(node, Module): + assert name.endswith(".py") + name = name[:-3] + if stopatmodule: + if includemodule: + parts.append(name) + break + parts.append(name) + parts.reverse() + s = ".".join(parts) + return s.replace(".[", "[") + + def _getfslineno(self): + return getfslineno(self.obj) + + def reportinfo(self): + # XXX caching? + obj = self.obj + compat_co_firstlineno = getattr(obj, 'compat_co_firstlineno', None) + if isinstance(compat_co_firstlineno, int): + # nose compatibility + fspath = sys.modules[obj.__module__].__file__ + if fspath.endswith(".pyc"): + fspath = fspath[:-1] + lineno = compat_co_firstlineno + else: + fspath, lineno = getfslineno(obj) + modpath = self.getmodpath() + assert isinstance(lineno, int) + return fspath, lineno, modpath + +class PyCollector(PyobjMixin, pytest.Collector): + + def funcnamefilter(self, name): + return self._matches_prefix_or_glob_option('python_functions', name) + + def isnosetest(self, obj): + """ Look for the __test__ attribute, which is applied by the + @nose.tools.istest decorator + """ + # We explicitly check for "is True" here to not mistakenly treat + # classes with a custom __getattr__ returning something truthy (like a + # function) as test classes. + return safe_getattr(obj, '__test__', False) is True + + def classnamefilter(self, name): + return self._matches_prefix_or_glob_option('python_classes', name) + + def istestfunction(self, obj, name): + return ( + (self.funcnamefilter(name) or self.isnosetest(obj)) and + safe_getattr(obj, "__call__", False) and fixtures.getfixturemarker(obj) is None + ) + + def istestclass(self, obj, name): + return self.classnamefilter(name) or self.isnosetest(obj) + + def _matches_prefix_or_glob_option(self, option_name, name): + """ + checks if the given name matches the prefix or glob-pattern defined + in ini configuration. + """ + for option in self.config.getini(option_name): + if name.startswith(option): + return True + # check that name looks like a glob-string before calling fnmatch + # because this is called for every name in each collected module, + # and fnmatch is somewhat expensive to call + elif ('*' in option or '?' in option or '[' in option) and \ + fnmatch.fnmatch(name, option): + return True + return False + + def collect(self): + if not getattr(self.obj, "__test__", True): + return [] + + # NB. we avoid random getattrs and peek in the __dict__ instead + # (XXX originally introduced from a PyPy need, still true?) + dicts = [getattr(self.obj, '__dict__', {})] + for basecls in inspect.getmro(self.obj.__class__): + dicts.append(basecls.__dict__) + seen = {} + l = [] + for dic in dicts: + for name, obj in list(dic.items()): + if name in seen: + continue + seen[name] = True + res = self.makeitem(name, obj) + if res is None: + continue + if not isinstance(res, list): + res = [res] + l.extend(res) + l.sort(key=lambda item: item.reportinfo()[:2]) + return l + + def makeitem(self, name, obj): + #assert self.ihook.fspath == self.fspath, self + return self.ihook.pytest_pycollect_makeitem( + collector=self, name=name, obj=obj) + + def _genfunctions(self, name, funcobj): + module = self.getparent(Module).obj + clscol = self.getparent(Class) + cls = clscol and clscol.obj or None + transfer_markers(funcobj, cls, module) + fm = self.session._fixturemanager + fixtureinfo = fm.getfixtureinfo(self, funcobj, cls) + metafunc = Metafunc(funcobj, fixtureinfo, self.config, + cls=cls, module=module) + methods = [] + if hasattr(module, "pytest_generate_tests"): + methods.append(module.pytest_generate_tests) + if hasattr(cls, "pytest_generate_tests"): + methods.append(cls().pytest_generate_tests) + if methods: + self.ihook.pytest_generate_tests.call_extra(methods, + dict(metafunc=metafunc)) + else: + self.ihook.pytest_generate_tests(metafunc=metafunc) + + Function = self._getcustomclass("Function") + if not metafunc._calls: + yield Function(name, parent=self, fixtureinfo=fixtureinfo) + else: + # add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs + fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm) + + for callspec in metafunc._calls: + subname = "%s[%s]" % (name, callspec.id) + yield Function(name=subname, parent=self, + callspec=callspec, callobj=funcobj, + fixtureinfo=fixtureinfo, + keywords={callspec.id:True}, + originalname=name, + ) + + +def _marked(func, mark): + """ Returns True if :func: is already marked with :mark:, False otherwise. + This can happen if marker is applied to class and the test file is + invoked more than once. + """ + try: + func_mark = getattr(func, mark.name) + except AttributeError: + return False + return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs + + +def transfer_markers(funcobj, cls, mod): + # XXX this should rather be code in the mark plugin or the mark + # plugin should merge with the python plugin. + for holder in (cls, mod): + try: + pytestmark = holder.pytestmark + except AttributeError: + continue + if isinstance(pytestmark, list): + for mark in pytestmark: + if not _marked(funcobj, mark): + mark(funcobj) + else: + if not _marked(funcobj, pytestmark): + pytestmark(funcobj) + +class Module(pytest.File, PyCollector): + """ Collector for test classes and functions. """ + def _getobj(self): + return self._memoizedcall('_obj', self._importtestmodule) + + def collect(self): + self.session._fixturemanager.parsefactories(self) + return super(Module, self).collect() + + def _importtestmodule(self): + # we assume we are only called once per module + importmode = self.config.getoption("--import-mode") + try: + mod = self.fspath.pyimport(ensuresyspath=importmode) + except SyntaxError: + raise self.CollectError( + _pytest._code.ExceptionInfo().getrepr(style="short")) + except self.fspath.ImportMismatchError: + e = sys.exc_info()[1] + raise self.CollectError( + "import file mismatch:\n" + "imported module %r has this __file__ attribute:\n" + " %s\n" + "which is not the same as the test file we want to collect:\n" + " %s\n" + "HINT: remove __pycache__ / .pyc files and/or use a " + "unique basename for your test file modules" + % e.args + ) + except ImportError: + from _pytest._code.code import ExceptionInfo + exc_info = ExceptionInfo() + if self.config.getoption('verbose') < 2: + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = exc_info.getrepr(style='short') if exc_info.traceback else exc_info.exconly() + formatted_tb = py._builtin._totext(exc_repr) + raise self.CollectError( + "ImportError while importing test module '{fspath}'.\n" + "Hint: make sure your test modules/packages have valid Python names.\n" + "Traceback:\n" + "{traceback}".format(fspath=self.fspath, traceback=formatted_tb) + ) + except _pytest.runner.Skipped as e: + if e.allow_module_level: + raise + raise self.CollectError( + "Using pytest.skip outside of a test is not allowed. If you are " + "trying to decorate a test function, use the @pytest.mark.skip " + "or @pytest.mark.skipif decorators instead." + ) + self.config.pluginmanager.consider_module(mod) + return mod + + def setup(self): + setup_module = _get_xunit_setup_teardown(self.obj, "setUpModule") + if setup_module is None: + setup_module = _get_xunit_setup_teardown(self.obj, "setup_module") + if setup_module is not None: + setup_module() + + teardown_module = _get_xunit_setup_teardown(self.obj, 'tearDownModule') + if teardown_module is None: + teardown_module = _get_xunit_setup_teardown(self.obj, 'teardown_module') + if teardown_module is not None: + self.addfinalizer(teardown_module) + + +def _get_xunit_setup_teardown(holder, attr_name, param_obj=None): + """ + Return a callable to perform xunit-style setup or teardown if + the function exists in the ``holder`` object. + The ``param_obj`` parameter is the parameter which will be passed to the function + when the callable is called without arguments, defaults to the ``holder`` object. + Return ``None`` if a suitable callable is not found. + """ + param_obj = param_obj if param_obj is not None else holder + result = _get_xunit_func(holder, attr_name) + if result is not None: + arg_count = result.__code__.co_argcount + if inspect.ismethod(result): + arg_count -= 1 + if arg_count: + return lambda: result(param_obj) + else: + return result + + +def _get_xunit_func(obj, name): + """Return the attribute from the given object to be used as a setup/teardown + xunit-style function, but only if not marked as a fixture to + avoid calling it twice. + """ + meth = getattr(obj, name, None) + if fixtures.getfixturemarker(meth) is None: + return meth + + +class Class(PyCollector): + """ Collector for test methods. """ + def collect(self): + if hasinit(self.obj): + self.warn("C1", "cannot collect test class %r because it has a " + "__init__ constructor" % self.obj.__name__) + return [] + elif hasnew(self.obj): + self.warn("C1", "cannot collect test class %r because it has a " + "__new__ constructor" % self.obj.__name__) + return [] + return [self._getcustomclass("Instance")(name="()", parent=self)] + + def setup(self): + setup_class = _get_xunit_func(self.obj, 'setup_class') + if setup_class is not None: + setup_class = getattr(setup_class, 'im_func', setup_class) + setup_class = getattr(setup_class, '__func__', setup_class) + setup_class(self.obj) + + fin_class = getattr(self.obj, 'teardown_class', None) + if fin_class is not None: + fin_class = getattr(fin_class, 'im_func', fin_class) + fin_class = getattr(fin_class, '__func__', fin_class) + self.addfinalizer(lambda: fin_class(self.obj)) + +class Instance(PyCollector): + def _getobj(self): + return self.parent.obj() + + def collect(self): + self.session._fixturemanager.parsefactories(self) + return super(Instance, self).collect() + + def newinstance(self): + self.obj = self._getobj() + return self.obj + +class FunctionMixin(PyobjMixin): + """ mixin for the code common to Function and Generator. + """ + + def setup(self): + """ perform setup for this test function. """ + if hasattr(self, '_preservedparent'): + obj = self._preservedparent + elif isinstance(self.parent, Instance): + obj = self.parent.newinstance() + self.obj = self._getobj() + else: + obj = self.parent.obj + if inspect.ismethod(self.obj): + setup_name = 'setup_method' + teardown_name = 'teardown_method' + else: + setup_name = 'setup_function' + teardown_name = 'teardown_function' + setup_func_or_method = _get_xunit_setup_teardown(obj, setup_name, param_obj=self.obj) + if setup_func_or_method is not None: + setup_func_or_method() + teardown_func_or_method = _get_xunit_setup_teardown(obj, teardown_name, param_obj=self.obj) + if teardown_func_or_method is not None: + self.addfinalizer(teardown_func_or_method) + + def _prunetraceback(self, excinfo): + if hasattr(self, '_obj') and not self.config.option.fulltrace: + code = _pytest._code.Code(get_real_func(self.obj)) + path, firstlineno = code.path, code.firstlineno + traceback = excinfo.traceback + ntraceback = traceback.cut(path=path, firstlineno=firstlineno) + if ntraceback == traceback: + ntraceback = ntraceback.cut(path=path) + if ntraceback == traceback: + #ntraceback = ntraceback.cut(excludepath=cutdir2) + ntraceback = ntraceback.filter(filter_traceback) + if not ntraceback: + ntraceback = traceback + + excinfo.traceback = ntraceback.filter() + # issue364: mark all but first and last frames to + # only show a single-line message for each frame + if self.config.option.tbstyle == "auto": + if len(excinfo.traceback) > 2: + for entry in excinfo.traceback[1:-1]: + entry.set_repr_style('short') + + def _repr_failure_py(self, excinfo, style="long"): + if excinfo.errisinstance(pytest.fail.Exception): + if not excinfo.value.pytrace: + return py._builtin._totext(excinfo.value) + return super(FunctionMixin, self)._repr_failure_py(excinfo, + style=style) + + def repr_failure(self, excinfo, outerr=None): + assert outerr is None, "XXX outerr usage is deprecated" + style = self.config.option.tbstyle + if style == "auto": + style = "long" + return self._repr_failure_py(excinfo, style=style) + + +class Generator(FunctionMixin, PyCollector): + def collect(self): + # test generators are seen as collectors but they also + # invoke setup/teardown on popular request + # (induced by the common "test_*" naming shared with normal tests) + from _pytest import deprecated + self.session._setupstate.prepare(self) + # see FunctionMixin.setup and test_setupstate_is_preserved_134 + self._preservedparent = self.parent.obj + l = [] + seen = {} + for i, x in enumerate(self.obj()): + name, call, args = self.getcallargs(x) + if not callable(call): + raise TypeError("%r yielded non callable test %r" %(self.obj, call,)) + if name is None: + name = "[%d]" % i + else: + name = "['%s']" % name + if name in seen: + raise ValueError("%r generated tests with non-unique name %r" %(self, name)) + seen[name] = True + l.append(self.Function(name, self, args=args, callobj=call)) + self.config.warn('C1', deprecated.YIELD_TESTS, fslocation=self.fspath) + return l + + def getcallargs(self, obj): + if not isinstance(obj, (tuple, list)): + obj = (obj,) + # explict naming + if isinstance(obj[0], py.builtin._basestring): + name = obj[0] + obj = obj[1:] + else: + name = None + call, args = obj[0], obj[1:] + return name, call, args + + +def hasinit(obj): + init = getattr(obj, '__init__', None) + if init: + return init != object.__init__ + + +def hasnew(obj): + new = getattr(obj, '__new__', None) + if new: + return new != object.__new__ + + +class CallSpec2(object): + def __init__(self, metafunc): + self.metafunc = metafunc + self.funcargs = {} + self._idlist = [] + self.params = {} + self._globalid = NOTSET + self._globalid_args = set() + self._globalparam = NOTSET + self._arg2scopenum = {} # used for sorting parametrized resources + self.keywords = {} + self.indices = {} + + def copy(self, metafunc): + cs = CallSpec2(self.metafunc) + cs.funcargs.update(self.funcargs) + cs.params.update(self.params) + cs.keywords.update(self.keywords) + cs.indices.update(self.indices) + cs._arg2scopenum.update(self._arg2scopenum) + cs._idlist = list(self._idlist) + cs._globalid = self._globalid + cs._globalid_args = self._globalid_args + cs._globalparam = self._globalparam + return cs + + def _checkargnotcontained(self, arg): + if arg in self.params or arg in self.funcargs: + raise ValueError("duplicate %r" %(arg,)) + + def getparam(self, name): + try: + return self.params[name] + except KeyError: + if self._globalparam is NOTSET: + raise ValueError(name) + return self._globalparam + + @property + def id(self): + return "-".join(map(str, filter(None, self._idlist))) + + def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum, + param_index): + for arg,val in zip(argnames, valset): + self._checkargnotcontained(arg) + valtype_for_arg = valtypes[arg] + getattr(self, valtype_for_arg)[arg] = val + self.indices[arg] = param_index + self._arg2scopenum[arg] = scopenum + self._idlist.append(id) + self.keywords.update(keywords) + + def setall(self, funcargs, id, param): + for x in funcargs: + self._checkargnotcontained(x) + self.funcargs.update(funcargs) + if id is not NOTSET: + self._idlist.append(id) + if param is not NOTSET: + assert self._globalparam is NOTSET + self._globalparam = param + for arg in funcargs: + self._arg2scopenum[arg] = fixtures.scopenum_function + + +class Metafunc(fixtures.FuncargnamesCompatAttr): + """ + Metafunc objects are passed to the ``pytest_generate_tests`` hook. + They help to inspect a test function and to generate tests according to + test configuration or values specified in the class or module where a + test function is defined. + """ + def __init__(self, function, fixtureinfo, config, cls=None, module=None): + #: access to the :class:`_pytest.config.Config` object for the test session + self.config = config + + #: the module object where the test function is defined in. + self.module = module + + #: underlying python test function + self.function = function + + #: set of fixture names required by the test function + self.fixturenames = fixtureinfo.names_closure + + #: class object where the test function is defined in or ``None``. + self.cls = cls + + self._calls = [] + self._ids = py.builtin.set() + self._arg2fixturedefs = fixtureinfo.name2fixturedefs + + def parametrize(self, argnames, argvalues, indirect=False, ids=None, + scope=None): + """ Add new invocations to the underlying test function using the list + of argvalues for the given argnames. Parametrization is performed + during the collection phase. If you need to setup expensive resources + see about setting indirect to do it rather at test setup time. + + :arg argnames: a comma-separated string denoting one or more argument + names, or a list/tuple of argument strings. + + :arg argvalues: The list of argvalues determines how often a + test is invoked with different argument values. If only one + argname was specified argvalues is a list of values. If N + argnames were specified, argvalues must be a list of N-tuples, + where each tuple-element specifies a value for its respective + argname. + + :arg indirect: The list of argnames or boolean. A list of arguments' + names (subset of argnames). If True the list contains all names from + the argnames. Each argvalue corresponding to an argname in this list will + be passed as request.param to its respective argname fixture + function so that it can perform more expensive setups during the + setup phase of a test rather than at collection time. + + :arg ids: list of string ids, or a callable. + If strings, each is corresponding to the argvalues so that they are + part of the test id. If None is given as id of specific test, the + automatically generated id for that argument will be used. + If callable, it should take one argument (a single argvalue) and return + a string or return None. If None, the automatically generated id for that + argument will be used. + If no ids are provided they will be generated automatically from + the argvalues. + + :arg scope: if specified it denotes the scope of the parameters. + The scope is used for grouping tests by parameter instances. + It will also override any fixture-function defined scope, allowing + to set a dynamic scope using test context or configuration. + """ + from _pytest.fixtures import scope2index + from _pytest.mark import extract_argvalue + from py.io import saferepr + + unwrapped_argvalues = [] + newkeywords = [] + for maybe_marked_args in argvalues: + argval, newmarks = extract_argvalue(maybe_marked_args) + unwrapped_argvalues.append(argval) + newkeywords.append(newmarks) + argvalues = unwrapped_argvalues + + if not isinstance(argnames, (tuple, list)): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + if len(argnames) == 1: + argvalues = [(val,) for val in argvalues] + if not argvalues: + argvalues = [(NOTSET,) * len(argnames)] + # we passed a empty list to parameterize, skip that test + # + fs, lineno = getfslineno(self.function) + newmark = pytest.mark.skip( + reason="got empty parameter set %r, function %s at %s:%d" % ( + argnames, self.function.__name__, fs, lineno)) + newkeywords = [{newmark.markname: newmark}] + + if scope is None: + scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) + + scopenum = scope2index( + scope, descr='call to {0}'.format(self.parametrize)) + valtypes = {} + for arg in argnames: + if arg not in self.fixturenames: + if isinstance(indirect, (tuple, list)): + name = 'fixture' if arg in indirect else 'argument' + else: + name = 'fixture' if indirect else 'argument' + raise ValueError( + "%r uses no %s %r" % ( + self.function, name, arg)) + + if indirect is True: + valtypes = dict.fromkeys(argnames, "params") + elif indirect is False: + valtypes = dict.fromkeys(argnames, "funcargs") + elif isinstance(indirect, (tuple, list)): + valtypes = dict.fromkeys(argnames, "funcargs") + for arg in indirect: + if arg not in argnames: + raise ValueError("indirect given to %r: fixture %r doesn't exist" % ( + self.function, arg)) + valtypes[arg] = "params" + idfn = None + if callable(ids): + idfn = ids + ids = None + if ids: + if len(ids) != len(argvalues): + raise ValueError('%d tests specified with %d ids' %( + len(argvalues), len(ids))) + for id_value in ids: + if id_value is not None and not isinstance(id_value, py.builtin._basestring): + msg = 'ids must be list of strings, found: %s (type: %s)' + raise ValueError(msg % (saferepr(id_value), type(id_value).__name__)) + ids = idmaker(argnames, argvalues, idfn, ids, self.config) + newcalls = [] + for callspec in self._calls or [CallSpec2(self)]: + elements = zip(ids, argvalues, newkeywords, count()) + for a_id, valset, keywords, param_index in elements: + assert len(valset) == len(argnames) + newcallspec = callspec.copy(self) + newcallspec.setmulti(valtypes, argnames, valset, a_id, + keywords, scopenum, param_index) + newcalls.append(newcallspec) + self._calls = newcalls + + def addcall(self, funcargs=None, id=NOTSET, param=NOTSET): + """ (deprecated, use parametrize) Add a new call to the underlying + test function during the collection phase of a test run. Note that + request.addcall() is called during the test collection phase prior and + independently to actual test execution. You should only use addcall() + if you need to specify multiple arguments of a test function. + + :arg funcargs: argument keyword dictionary used when invoking + the test function. + + :arg id: used for reporting and identification purposes. If you + don't supply an `id` an automatic unique id will be generated. + + :arg param: a parameter which will be exposed to a later fixture function + invocation through the ``request.param`` attribute. + """ + assert funcargs is None or isinstance(funcargs, dict) + if funcargs is not None: + for name in funcargs: + if name not in self.fixturenames: + pytest.fail("funcarg %r not used in this function." % name) + else: + funcargs = {} + if id is None: + raise ValueError("id=None not allowed") + if id is NOTSET: + id = len(self._calls) + id = str(id) + if id in self._ids: + raise ValueError("duplicate id %r" % id) + self._ids.add(id) + + cs = CallSpec2(self) + cs.setall(funcargs, id, param) + self._calls.append(cs) + + +def _find_parametrized_scope(argnames, arg2fixturedefs, indirect): + """Find the most appropriate scope for a parametrized call based on its arguments. + + When there's at least one direct argument, always use "function" scope. + + When a test function is parametrized and all its arguments are indirect + (e.g. fixtures), return the most narrow scope based on the fixtures used. + + Related to issue #1832, based on code posted by @Kingdread. + """ + from _pytest.fixtures import scopes + indirect_as_list = isinstance(indirect, (list, tuple)) + all_arguments_are_fixtures = indirect is True or \ + indirect_as_list and len(indirect) == argnames + if all_arguments_are_fixtures: + fixturedefs = arg2fixturedefs or {} + used_scopes = [fixturedef[0].scope for name, fixturedef in fixturedefs.items()] + if used_scopes: + # Takes the most narrow scope from used fixtures + for scope in reversed(scopes): + if scope in used_scopes: + return scope + + return 'function' + + +def _idval(val, argname, idx, idfn, config=None): + if idfn: + try: + s = idfn(val) + if s: + return _escape_strings(s) + except Exception: + pass + + if config: + hook_id = config.hook.pytest_make_parametrize_id(config=config, val=val) + if hook_id: + return hook_id + + if isinstance(val, STRING_TYPES): + return _escape_strings(val) + elif isinstance(val, (float, int, bool, NoneType)): + return str(val) + elif isinstance(val, REGEX_TYPE): + return _escape_strings(val.pattern) + elif enum is not None and isinstance(val, enum.Enum): + return str(val) + elif isclass(val) and hasattr(val, '__name__'): + return val.__name__ + return str(argname)+str(idx) + +def _idvalset(idx, valset, argnames, idfn, ids, config=None): + if ids is None or (idx >= len(ids) or ids[idx] is None): + this_id = [_idval(val, argname, idx, idfn, config) + for val, argname in zip(valset, argnames)] + return "-".join(this_id) + else: + return _escape_strings(ids[idx]) + +def idmaker(argnames, argvalues, idfn=None, ids=None, config=None): + ids = [_idvalset(valindex, valset, argnames, idfn, ids, config) + for valindex, valset in enumerate(argvalues)] + if len(set(ids)) != len(ids): + # The ids are not unique + duplicates = [testid for testid in ids if ids.count(testid) > 1] + counters = collections.defaultdict(lambda: 0) + for index, testid in enumerate(ids): + if testid in duplicates: + ids[index] = testid + str(counters[testid]) + counters[testid] += 1 + return ids + + +def show_fixtures_per_test(config): + from _pytest.main import wrap_session + return wrap_session(config, _show_fixtures_per_test) + + +def _show_fixtures_per_test(config, session): + import _pytest.config + session.perform_collect() + curdir = py.path.local() + tw = _pytest.config.create_terminal_writer(config) + verbose = config.getvalue("verbose") + + def get_best_rel(func): + loc = getlocation(func, curdir) + return curdir.bestrelpath(loc) + + def write_fixture(fixture_def): + argname = fixture_def.argname + + if verbose <= 0 and argname.startswith("_"): + return + if verbose > 0: + bestrel = get_best_rel(fixture_def.func) + funcargspec = "{0} -- {1}".format(argname, bestrel) + else: + funcargspec = argname + tw.line(funcargspec, green=True) + + INDENT = ' {0}' + fixture_doc = fixture_def.func.__doc__ + + if fixture_doc: + for line in fixture_doc.strip().split('\n'): + tw.line(INDENT.format(line.strip())) + else: + tw.line(INDENT.format('no docstring available'), red=True) + + def write_item(item): + name2fixturedefs = item._fixtureinfo.name2fixturedefs + + if not name2fixturedefs: + # The given test item does not use any fixtures + return + bestrel = get_best_rel(item.function) + + tw.line() + tw.sep('-', 'fixtures used by {0}'.format(item.name)) + tw.sep('-', '({0})'.format(bestrel)) + for argname, fixture_defs in sorted(name2fixturedefs.items()): + assert fixture_defs is not None + if not fixture_defs: + continue + # The last fixture def item in the list is expected + # to be the one used by the test item + write_fixture(fixture_defs[-1]) + + for item in session.items: + write_item(item) + + +def showfixtures(config): + from _pytest.main import wrap_session + return wrap_session(config, _showfixtures_main) + +def _showfixtures_main(config, session): + import _pytest.config + session.perform_collect() + curdir = py.path.local() + tw = _pytest.config.create_terminal_writer(config) + verbose = config.getvalue("verbose") + + fm = session._fixturemanager + + available = [] + seen = set() + + for argname, fixturedefs in fm._arg2fixturedefs.items(): + assert fixturedefs is not None + if not fixturedefs: + continue + for fixturedef in fixturedefs: + loc = getlocation(fixturedef.func, curdir) + if (fixturedef.argname, loc) in seen: + continue + seen.add((fixturedef.argname, loc)) + available.append((len(fixturedef.baseid), + fixturedef.func.__module__, + curdir.bestrelpath(loc), + fixturedef.argname, fixturedef)) + + available.sort() + currentmodule = None + for baseid, module, bestrel, argname, fixturedef in available: + if currentmodule != module: + if not module.startswith("_pytest."): + tw.line() + tw.sep("-", "fixtures defined from %s" %(module,)) + currentmodule = module + if verbose <= 0 and argname[0] == "_": + continue + if verbose > 0: + funcargspec = "%s -- %s" %(argname, bestrel,) + else: + funcargspec = argname + tw.line(funcargspec, green=True) + loc = getlocation(fixturedef.func, curdir) + doc = fixturedef.func.__doc__ or "" + if doc: + for line in doc.strip().split("\n"): + tw.line(" " + line.strip()) + else: + tw.line(" %s: no docstring available" %(loc,), + red=True) + + +# builtin pytest.raises helper + +def raises(expected_exception, *args, **kwargs): + """ + Assert that a code block/function call raises ``expected_exception`` + and raise a failure exception otherwise. + + This helper produces a ``ExceptionInfo()`` object (see below). + + If using Python 2.5 or above, you may use this function as a + context manager:: + + >>> with raises(ZeroDivisionError): + ... 1/0 + + .. versionchanged:: 2.10 + + In the context manager form you may use the keyword argument + ``message`` to specify a custom failure message:: + + >>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"): + ... pass + Traceback (most recent call last): + ... + Failed: Expecting ZeroDivisionError + + + .. note:: + + When using ``pytest.raises`` as a context manager, it's worthwhile to + note that normal context manager rules apply and that the exception + raised *must* be the final line in the scope of the context manager. + Lines of code after that, within the scope of the context manager will + not be executed. For example:: + + >>> value = 15 + >>> with raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... assert str(exc_info.value) == "value must be <= 10" # this will not execute + + Instead, the following approach must be taken (note the difference in + scope):: + + >>> with raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... + >>> assert str(exc_info.value) == "value must be <= 10" + + + Or you can specify a callable by passing a to-be-called lambda:: + + >>> raises(ZeroDivisionError, lambda: 1/0) + + + or you can specify an arbitrary callable with arguments:: + + >>> def f(x): return 1/x + ... + >>> raises(ZeroDivisionError, f, 0) + + >>> raises(ZeroDivisionError, f, x=0) + + + A third possibility is to use a string to be executed:: + + >>> raises(ZeroDivisionError, "f(0)") + + + .. autoclass:: _pytest._code.ExceptionInfo + :members: + + .. note:: + Similar to caught exception objects in Python, explicitly clearing + local references to returned ``ExceptionInfo`` objects can + help the Python interpreter speed up its garbage collection. + + Clearing those references breaks a reference cycle + (``ExceptionInfo`` --> caught exception --> frame stack raising + the exception --> current frame stack --> local variables --> + ``ExceptionInfo``) which makes Python keep all objects referenced + from that cycle (including all local variables in the current + frame) alive until the next cyclic garbage collection run. See the + official Python ``try`` statement documentation for more detailed + information. + + """ + __tracebackhide__ = True + if expected_exception is AssertionError: + # we want to catch a AssertionError + # replace our subclass with the builtin one + # see https://github.com/pytest-dev/pytest/issues/176 + from _pytest.assertion.util import BuiltinAssertionError \ + as expected_exception + msg = ("exceptions must be old-style classes or" + " derived from BaseException, not %s") + if isinstance(expected_exception, tuple): + for exc in expected_exception: + if not isclass(exc): + raise TypeError(msg % type(exc)) + elif not isclass(expected_exception): + raise TypeError(msg % type(expected_exception)) + + message = "DID NOT RAISE {0}".format(expected_exception) + + if not args: + if "message" in kwargs: + message = kwargs.pop("message") + return RaisesContext(expected_exception, message) + elif isinstance(args[0], str): + code, = args + assert isinstance(code, str) + frame = sys._getframe(1) + loc = frame.f_locals.copy() + loc.update(kwargs) + #print "raises frame scope: %r" % frame.f_locals + try: + code = _pytest._code.Source(code).compile() + py.builtin.exec_(code, frame.f_globals, loc) + # XXX didn'T mean f_globals == f_locals something special? + # this is destroyed here ... + except expected_exception: + return _pytest._code.ExceptionInfo() + else: + func = args[0] + try: + func(*args[1:], **kwargs) + except expected_exception: + return _pytest._code.ExceptionInfo() + pytest.fail(message) + +class RaisesContext(object): + def __init__(self, expected_exception, message): + self.expected_exception = expected_exception + self.message = message + self.excinfo = None + + def __enter__(self): + self.excinfo = object.__new__(_pytest._code.ExceptionInfo) + return self.excinfo + + def __exit__(self, *tp): + __tracebackhide__ = True + if tp[0] is None: + pytest.fail(self.message) + if sys.version_info < (2, 7): + # py26: on __exit__() exc_value often does not contain the + # exception value. + # http://bugs.python.org/issue7853 + if not isinstance(tp[1], BaseException): + exc_type, value, traceback = tp + tp = exc_type, exc_type(value), traceback + self.excinfo.__init__(tp) + suppress_exception = issubclass(self.excinfo.type, self.expected_exception) + if sys.version_info[0] == 2 and suppress_exception: + sys.exc_clear() + return suppress_exception + + +# builtin pytest.approx helper + +class approx(object): + """ + Assert that two numbers (or two sets of numbers) are equal to each other + within some tolerance. + + Due to the `intricacies of floating-point arithmetic`__, numbers that we + would intuitively expect to be equal are not always so:: + + >>> 0.1 + 0.2 == 0.3 + False + + __ https://docs.python.org/3/tutorial/floatingpoint.html + + This problem is commonly encountered when writing tests, e.g. when making + sure that floating-point values are what you expect them to be. One way to + deal with this problem is to assert that two floating-point numbers are + equal to within some appropriate tolerance:: + + >>> abs((0.1 + 0.2) - 0.3) < 1e-6 + True + + However, comparisons like this are tedious to write and difficult to + understand. Furthermore, absolute comparisons like the one above are + usually discouraged because there's no tolerance that works well for all + situations. ``1e-6`` is good for numbers around ``1``, but too small for + very big numbers and too big for very small ones. It's better to express + the tolerance as a fraction of the expected value, but relative comparisons + like that are even more difficult to write correctly and concisely. + + The ``approx`` class performs floating-point comparisons using a syntax + that's as intuitive as possible:: + + >>> from pytest import approx + >>> 0.1 + 0.2 == approx(0.3) + True + + The same syntax also works on sequences of numbers:: + + >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) + True + + By default, ``approx`` considers numbers within a relative tolerance of + ``1e-6`` (i.e. one part in a million) of its expected value to be equal. + This treatment would lead to surprising results if the expected value was + ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. + To handle this case less surprisingly, ``approx`` also considers numbers + within an absolute tolerance of ``1e-12`` of its expected value to be + equal. Infinite numbers are another special case. They are only + considered equal to themselves, regardless of the relative tolerance. Both + the relative and absolute tolerances can be changed by passing arguments to + the ``approx`` constructor:: + + >>> 1.0001 == approx(1) + False + >>> 1.0001 == approx(1, rel=1e-3) + True + >>> 1.0001 == approx(1, abs=1e-3) + True + + If you specify ``abs`` but not ``rel``, the comparison will not consider + the relative tolerance at all. In other words, two numbers that are within + the default relative tolerance of ``1e-6`` will still be considered unequal + if they exceed the specified absolute tolerance. If you specify both + ``abs`` and ``rel``, the numbers will be considered equal if either + tolerance is met:: + + >>> 1 + 1e-8 == approx(1) + True + >>> 1 + 1e-8 == approx(1, abs=1e-12) + False + >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) + True + + If you're thinking about using ``approx``, then you might want to know how + it compares to other good ways of comparing floating-point numbers. All of + these algorithms are based on relative and absolute tolerances and should + agree for the most part, but they do have meaningful differences: + + - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative + tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute + tolerance is met. Because the relative tolerance is calculated w.r.t. + both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor + ``b`` is a "reference value"). You have to specify an absolute tolerance + if you want to compare to ``0.0`` because there is no tolerance by + default. Only available in python>=3.5. `More information...`__ + + __ https://docs.python.org/3/library/math.html#math.isclose + + - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference + between ``a`` and ``b`` is less that the sum of the relative tolerance + w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance + is only calculated w.r.t. ``b``, this test is asymmetric and you can + think of ``b`` as the reference value. Support for comparing sequences + is provided by ``numpy.allclose``. `More information...`__ + + __ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html + + - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` + are within an absolute tolerance of ``1e-7``. No relative tolerance is + considered and the absolute tolerance cannot be changed, so this function + is not appropriate for very large or very small numbers. Also, it's only + available in subclasses of ``unittest.TestCase`` and it's ugly because it + doesn't follow PEP8. `More information...`__ + + __ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual + + - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative + tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. + Because the relative tolerance is only calculated w.r.t. ``b``, this test + is asymmetric and you can think of ``b`` as the reference value. In the + special case that you explicitly specify an absolute tolerance but not a + relative tolerance, only the absolute tolerance is considered. + """ + + def __init__(self, expected, rel=None, abs=None): + self.expected = expected + self.abs = abs + self.rel = rel + + def __repr__(self): + return ', '.join(repr(x) for x in self.expected) + + def __eq__(self, actual): + from collections import Iterable + if not isinstance(actual, Iterable): + actual = [actual] + if len(actual) != len(self.expected): + return False + return all(a == x for a, x in zip(actual, self.expected)) + + __hash__ = None + + def __ne__(self, actual): + return not (actual == self) + + @property + def expected(self): + # Regardless of whether the user-specified expected value is a number + # or a sequence of numbers, return a list of ApproxNotIterable objects + # that can be compared against. + from collections import Iterable + approx_non_iter = lambda x: ApproxNonIterable(x, self.rel, self.abs) + if isinstance(self._expected, Iterable): + return [approx_non_iter(x) for x in self._expected] + else: + return [approx_non_iter(self._expected)] + + @expected.setter + def expected(self, expected): + self._expected = expected + + +class ApproxNonIterable(object): + """ + Perform approximate comparisons for single numbers only. + + In other words, the ``expected`` attribute for objects of this class must + be some sort of number. This is in contrast to the ``approx`` class, where + the ``expected`` attribute can either be a number of a sequence of numbers. + This class is responsible for making comparisons, while ``approx`` is + responsible for abstracting the difference between numbers and sequences of + numbers. Although this class can stand on its own, it's only meant to be + used within ``approx``. + """ + + def __init__(self, expected, rel=None, abs=None): + self.expected = expected + self.abs = abs + self.rel = rel + + def __repr__(self): + if isinstance(self.expected, complex): + return str(self.expected) + + # Infinities aren't compared using tolerances, so don't show a + # tolerance. + if math.isinf(self.expected): + return str(self.expected) + + # If a sensible tolerance can't be calculated, self.tolerance will + # raise a ValueError. In this case, display '???'. + try: + vetted_tolerance = '{:.1e}'.format(self.tolerance) + except ValueError: + vetted_tolerance = '???' + + if sys.version_info[0] == 2: + return '{0} +- {1}'.format(self.expected, vetted_tolerance) + else: + return u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance) + + def __eq__(self, actual): + # Short-circuit exact equality. + if actual == self.expected: + return True + + # Infinity shouldn't be approximately equal to anything but itself, but + # if there's a relative tolerance, it will be infinite and infinity + # will seem approximately equal to everything. The equal-to-itself + # case would have been short circuited above, so here we can just + # return false if the expected value is infinite. The abs() call is + # for compatibility with complex numbers. + if math.isinf(abs(self.expected)): + return False + + # Return true if the two numbers are within the tolerance. + return abs(self.expected - actual) <= self.tolerance + + __hash__ = None + + def __ne__(self, actual): + return not (actual == self) + + @property + def tolerance(self): + set_default = lambda x, default: x if x is not None else default + + # Figure out what the absolute tolerance should be. ``self.abs`` is + # either None or a value specified by the user. + absolute_tolerance = set_default(self.abs, 1e-12) + + if absolute_tolerance < 0: + raise ValueError("absolute tolerance can't be negative: {0}".format(absolute_tolerance)) + if math.isnan(absolute_tolerance): + raise ValueError("absolute tolerance can't be NaN.") + + # If the user specified an absolute tolerance but not a relative one, + # just return the absolute tolerance. + if self.rel is None: + if self.abs is not None: + return absolute_tolerance + + # Figure out what the relative tolerance should be. ``self.rel`` is + # either None or a value specified by the user. This is done after + # we've made sure the user didn't ask for an absolute tolerance only, + # because we don't want to raise errors about the relative tolerance if + # we aren't even going to use it. + relative_tolerance = set_default(self.rel, 1e-6) * abs(self.expected) + + if relative_tolerance < 0: + raise ValueError("relative tolerance can't be negative: {0}".format(absolute_tolerance)) + if math.isnan(relative_tolerance): + raise ValueError("relative tolerance can't be NaN.") + + # Return the larger of the relative and absolute tolerances. + return max(relative_tolerance, absolute_tolerance) + + +# +# the basic pytest Function item +# + +class Function(FunctionMixin, pytest.Item, fixtures.FuncargnamesCompatAttr): + """ a Function Item is responsible for setting up and executing a + Python test function. + """ + _genid = None + def __init__(self, name, parent, args=None, config=None, + callspec=None, callobj=NOTSET, keywords=None, session=None, + fixtureinfo=None, originalname=None): + super(Function, self).__init__(name, parent, config=config, + session=session) + self._args = args + if callobj is not NOTSET: + self.obj = callobj + + self.keywords.update(self.obj.__dict__) + if callspec: + self.callspec = callspec + self.keywords.update(callspec.keywords) + if keywords: + self.keywords.update(keywords) + + if fixtureinfo is None: + fixtureinfo = self.session._fixturemanager.getfixtureinfo( + self.parent, self.obj, self.cls, + funcargs=not self._isyieldedfunction()) + self._fixtureinfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() + + #: original function name, without any decorations (for example + #: parametrization adds a ``"[...]"`` suffix to function names). + #: + #: .. versionadded:: 3.0 + self.originalname = originalname + + def _initrequest(self): + self.funcargs = {} + if self._isyieldedfunction(): + assert not hasattr(self, "callspec"), ( + "yielded functions (deprecated) cannot have funcargs") + else: + if hasattr(self, "callspec"): + callspec = self.callspec + assert not callspec.funcargs + self._genid = callspec.id + if hasattr(callspec, "param"): + self.param = callspec.param + self._request = fixtures.FixtureRequest(self) + + @property + def function(self): + "underlying python 'function' object" + return getattr(self.obj, 'im_func', self.obj) + + def _getobj(self): + name = self.name + i = name.find("[") # parametrization + if i != -1: + name = name[:i] + return getattr(self.parent.obj, name) + + @property + def _pyfuncitem(self): + "(compatonly) for code expecting pytest-2.2 style request objects" + return self + + def _isyieldedfunction(self): + return getattr(self, "_args", None) is not None + + def runtest(self): + """ execute the underlying test function. """ + self.ihook.pytest_pyfunc_call(pyfuncitem=self) + + def setup(self): + super(Function, self).setup() + fixtures.fillfixtures(self) diff --git a/lib/spack/external/_pytest/recwarn.py b/lib/spack/external/_pytest/recwarn.py new file mode 100644 index 0000000000..87823bfbc6 --- /dev/null +++ b/lib/spack/external/_pytest/recwarn.py @@ -0,0 +1,226 @@ +""" recording warnings during test function execution. """ + +import inspect + +import _pytest._code +import py +import sys +import warnings +import pytest + + +@pytest.yield_fixture +def recwarn(request): + """Return a WarningsRecorder instance that provides these methods: + + * ``pop(category=None)``: return last warning matching the category. + * ``clear()``: clear list of warnings + + See http://docs.python.org/library/warnings.html for information + on warning categories. + """ + wrec = WarningsRecorder() + with wrec: + warnings.simplefilter('default') + yield wrec + + +def pytest_namespace(): + return {'deprecated_call': deprecated_call, + 'warns': warns} + + +def deprecated_call(func=None, *args, **kwargs): + """ assert that calling ``func(*args, **kwargs)`` triggers a + ``DeprecationWarning`` or ``PendingDeprecationWarning``. + + This function can be used as a context manager:: + + >>> import warnings + >>> def api_call_v2(): + ... warnings.warn('use v3 of this api', DeprecationWarning) + ... return 200 + + >>> with deprecated_call(): + ... assert api_call_v2() == 200 + + Note: we cannot use WarningsRecorder here because it is still subject + to the mechanism that prevents warnings of the same type from being + triggered twice for the same module. See #1190. + """ + if not func: + return WarningsChecker(expected_warning=DeprecationWarning) + + categories = [] + + def warn_explicit(message, category, *args, **kwargs): + categories.append(category) + old_warn_explicit(message, category, *args, **kwargs) + + def warn(message, category=None, *args, **kwargs): + if isinstance(message, Warning): + categories.append(message.__class__) + else: + categories.append(category) + old_warn(message, category, *args, **kwargs) + + old_warn = warnings.warn + old_warn_explicit = warnings.warn_explicit + warnings.warn_explicit = warn_explicit + warnings.warn = warn + try: + ret = func(*args, **kwargs) + finally: + warnings.warn_explicit = old_warn_explicit + warnings.warn = old_warn + deprecation_categories = (DeprecationWarning, PendingDeprecationWarning) + if not any(issubclass(c, deprecation_categories) for c in categories): + __tracebackhide__ = True + raise AssertionError("%r did not produce DeprecationWarning" % (func,)) + return ret + + +def warns(expected_warning, *args, **kwargs): + """Assert that code raises a particular class of warning. + + Specifically, the input @expected_warning can be a warning class or + tuple of warning classes, and the code must return that warning + (if a single class) or one of those warnings (if a tuple). + + This helper produces a list of ``warnings.WarningMessage`` objects, + one for each warning raised. + + This function can be used as a context manager, or any of the other ways + ``pytest.raises`` can be used:: + + >>> with warns(RuntimeWarning): + ... warnings.warn("my warning", RuntimeWarning) + """ + wcheck = WarningsChecker(expected_warning) + if not args: + return wcheck + elif isinstance(args[0], str): + code, = args + assert isinstance(code, str) + frame = sys._getframe(1) + loc = frame.f_locals.copy() + loc.update(kwargs) + + with wcheck: + code = _pytest._code.Source(code).compile() + py.builtin.exec_(code, frame.f_globals, loc) + else: + func = args[0] + with wcheck: + return func(*args[1:], **kwargs) + + +class RecordedWarning(object): + def __init__(self, message, category, filename, lineno, file, line): + self.message = message + self.category = category + self.filename = filename + self.lineno = lineno + self.file = file + self.line = line + + +class WarningsRecorder(object): + """A context manager to record raised warnings. + + Adapted from `warnings.catch_warnings`. + """ + + def __init__(self, module=None): + self._module = sys.modules['warnings'] if module is None else module + self._entered = False + self._list = [] + + @property + def list(self): + """The list of recorded warnings.""" + return self._list + + def __getitem__(self, i): + """Get a recorded warning by index.""" + return self._list[i] + + def __iter__(self): + """Iterate through the recorded warnings.""" + return iter(self._list) + + def __len__(self): + """The number of recorded warnings.""" + return len(self._list) + + def pop(self, cls=Warning): + """Pop the first recorded warning, raise exception if not exists.""" + for i, w in enumerate(self._list): + if issubclass(w.category, cls): + return self._list.pop(i) + __tracebackhide__ = True + raise AssertionError("%r not found in warning list" % cls) + + def clear(self): + """Clear the list of recorded warnings.""" + self._list[:] = [] + + def __enter__(self): + if self._entered: + __tracebackhide__ = True + raise RuntimeError("Cannot enter %r twice" % self) + self._entered = True + self._filters = self._module.filters + self._module.filters = self._filters[:] + self._showwarning = self._module.showwarning + + def showwarning(message, category, filename, lineno, + file=None, line=None): + self._list.append(RecordedWarning( + message, category, filename, lineno, file, line)) + + # still perform old showwarning functionality + self._showwarning( + message, category, filename, lineno, file=file, line=line) + + self._module.showwarning = showwarning + + # allow the same warning to be raised more than once + + self._module.simplefilter('always') + return self + + def __exit__(self, *exc_info): + if not self._entered: + __tracebackhide__ = True + raise RuntimeError("Cannot exit %r without entering first" % self) + self._module.filters = self._filters + self._module.showwarning = self._showwarning + + +class WarningsChecker(WarningsRecorder): + def __init__(self, expected_warning=None, module=None): + super(WarningsChecker, self).__init__(module=module) + + msg = ("exceptions must be old-style classes or " + "derived from Warning, not %s") + if isinstance(expected_warning, tuple): + for exc in expected_warning: + if not inspect.isclass(exc): + raise TypeError(msg % type(exc)) + elif inspect.isclass(expected_warning): + expected_warning = (expected_warning,) + elif expected_warning is not None: + raise TypeError(msg % type(expected_warning)) + + self.expected_warning = expected_warning + + def __exit__(self, *exc_info): + super(WarningsChecker, self).__exit__(*exc_info) + + # only check if we're not currently handling an exception + if all(a is None for a in exc_info): + if self.expected_warning is not None: + if not any(r.category in self.expected_warning for r in self): + __tracebackhide__ = True + pytest.fail("DID NOT WARN") diff --git a/lib/spack/external/_pytest/resultlog.py b/lib/spack/external/_pytest/resultlog.py new file mode 100644 index 0000000000..fc00259834 --- /dev/null +++ b/lib/spack/external/_pytest/resultlog.py @@ -0,0 +1,107 @@ +""" log machine-parseable test session result information in a plain +text file. +""" + +import py +import os + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "resultlog plugin options") + group.addoption('--resultlog', '--result-log', action="store", + metavar="path", default=None, + help="DEPRECATED path for machine-readable result log.") + +def pytest_configure(config): + resultlog = config.option.resultlog + # prevent opening resultlog on slave nodes (xdist) + if resultlog and not hasattr(config, 'slaveinput'): + dirname = os.path.dirname(os.path.abspath(resultlog)) + if not os.path.isdir(dirname): + os.makedirs(dirname) + logfile = open(resultlog, 'w', 1) # line buffered + config._resultlog = ResultLog(config, logfile) + config.pluginmanager.register(config._resultlog) + + from _pytest.deprecated import RESULT_LOG + config.warn('C1', RESULT_LOG) + +def pytest_unconfigure(config): + resultlog = getattr(config, '_resultlog', None) + if resultlog: + resultlog.logfile.close() + del config._resultlog + config.pluginmanager.unregister(resultlog) + +def generic_path(item): + chain = item.listchain() + gpath = [chain[0].name] + fspath = chain[0].fspath + fspart = False + for node in chain[1:]: + newfspath = node.fspath + if newfspath == fspath: + if fspart: + gpath.append(':') + fspart = False + else: + gpath.append('.') + else: + gpath.append('/') + fspart = True + name = node.name + if name[0] in '([': + gpath.pop() + gpath.append(name) + fspath = newfspath + return ''.join(gpath) + +class ResultLog(object): + def __init__(self, config, logfile): + self.config = config + self.logfile = logfile # preferably line buffered + + def write_log_entry(self, testpath, lettercode, longrepr): + py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile) + for line in longrepr.splitlines(): + py.builtin.print_(" %s" % line, file=self.logfile) + + def log_outcome(self, report, lettercode, longrepr): + testpath = getattr(report, 'nodeid', None) + if testpath is None: + testpath = report.fspath + self.write_log_entry(testpath, lettercode, longrepr) + + def pytest_runtest_logreport(self, report): + if report.when != "call" and report.passed: + return + res = self.config.hook.pytest_report_teststatus(report=report) + code = res[1] + if code == 'x': + longrepr = str(report.longrepr) + elif code == 'X': + longrepr = '' + elif report.passed: + longrepr = "" + elif report.failed: + longrepr = str(report.longrepr) + elif report.skipped: + longrepr = str(report.longrepr[2]) + self.log_outcome(report, code, longrepr) + + def pytest_collectreport(self, report): + if not report.passed: + if report.failed: + code = "F" + longrepr = str(report.longrepr) + else: + assert report.skipped + code = "S" + longrepr = "%s:%d: %s" % report.longrepr + self.log_outcome(report, code, longrepr) + + def pytest_internalerror(self, excrepr): + reprcrash = getattr(excrepr, 'reprcrash', None) + path = getattr(reprcrash, "path", None) + if path is None: + path = "cwd:%s" % py.path.local() + self.write_log_entry(path, '!', str(excrepr)) diff --git a/lib/spack/external/_pytest/runner.py b/lib/spack/external/_pytest/runner.py new file mode 100644 index 0000000000..eb29e7370c --- /dev/null +++ b/lib/spack/external/_pytest/runner.py @@ -0,0 +1,578 @@ +""" basic collect and runtest protocol implementations """ +import bdb +import sys +from time import time + +import py +import pytest +from _pytest._code.code import TerminalRepr, ExceptionInfo + + +def pytest_namespace(): + return { + 'fail' : fail, + 'skip' : skip, + 'importorskip' : importorskip, + 'exit' : exit, + } + +# +# pytest plugin hooks + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "reporting", after="general") + group.addoption('--durations', + action="store", type=int, default=None, metavar="N", + help="show N slowest setup/test durations (N=0 for all)."), + +def pytest_terminal_summary(terminalreporter): + durations = terminalreporter.config.option.durations + if durations is None: + return + tr = terminalreporter + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, 'duration'): + dlist.append(rep) + if not dlist: + return + dlist.sort(key=lambda x: x.duration) + dlist.reverse() + if not durations: + tr.write_sep("=", "slowest test durations") + else: + tr.write_sep("=", "slowest %s test durations" % durations) + dlist = dlist[:durations] + + for rep in dlist: + nodeid = rep.nodeid.replace("::()::", "::") + tr.write_line("%02.2fs %-8s %s" % + (rep.duration, rep.when, nodeid)) + +def pytest_sessionstart(session): + session._setupstate = SetupState() +def pytest_sessionfinish(session): + session._setupstate.teardown_all() + +class NodeInfo: + def __init__(self, location): + self.location = location + +def pytest_runtest_protocol(item, nextitem): + item.ihook.pytest_runtest_logstart( + nodeid=item.nodeid, location=item.location, + ) + runtestprotocol(item, nextitem=nextitem) + return True + +def runtestprotocol(item, log=True, nextitem=None): + hasrequest = hasattr(item, "_request") + if hasrequest and not item._request: + item._initrequest() + rep = call_and_report(item, "setup", log) + reports = [rep] + if rep.passed: + if item.config.option.setupshow: + show_test_item(item) + if not item.config.option.setuponly: + reports.append(call_and_report(item, "call", log)) + reports.append(call_and_report(item, "teardown", log, + nextitem=nextitem)) + # after all teardown hooks have been called + # want funcargs and request info to go away + if hasrequest: + item._request = False + item.funcargs = None + return reports + +def show_test_item(item): + """Show test function, parameters and the fixtures of the test item.""" + tw = item.config.get_terminal_writer() + tw.line() + tw.write(' ' * 8) + tw.write(item._nodeid) + used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys()) + if used_fixtures: + tw.write(' (fixtures used: {0})'.format(', '.join(used_fixtures))) + +def pytest_runtest_setup(item): + item.session._setupstate.prepare(item) + +def pytest_runtest_call(item): + try: + item.runtest() + except Exception: + # Store trace info to allow postmortem debugging + type, value, tb = sys.exc_info() + tb = tb.tb_next # Skip *this* frame + sys.last_type = type + sys.last_value = value + sys.last_traceback = tb + del tb # Get rid of it in this namespace + raise + +def pytest_runtest_teardown(item, nextitem): + item.session._setupstate.teardown_exact(item, nextitem) + +def pytest_report_teststatus(report): + if report.when in ("setup", "teardown"): + if report.failed: + # category, shortletter, verbose-word + return "error", "E", "ERROR" + elif report.skipped: + return "skipped", "s", "SKIPPED" + else: + return "", "", "" + + +# +# Implementation + +def call_and_report(item, when, log=True, **kwds): + call = call_runtest_hook(item, when, **kwds) + hook = item.ihook + report = hook.pytest_runtest_makereport(item=item, call=call) + if log: + hook.pytest_runtest_logreport(report=report) + if check_interactive_exception(call, report): + hook.pytest_exception_interact(node=item, call=call, report=report) + return report + +def check_interactive_exception(call, report): + return call.excinfo and not ( + hasattr(report, "wasxfail") or + call.excinfo.errisinstance(skip.Exception) or + call.excinfo.errisinstance(bdb.BdbQuit)) + +def call_runtest_hook(item, when, **kwds): + hookname = "pytest_runtest_" + when + ihook = getattr(item.ihook, hookname) + return CallInfo(lambda: ihook(item=item, **kwds), when=when) + +class CallInfo: + """ Result/Exception info a function invocation. """ + #: None or ExceptionInfo object. + excinfo = None + def __init__(self, func, when): + #: context of invocation: one of "setup", "call", + #: "teardown", "memocollect" + self.when = when + self.start = time() + try: + self.result = func() + except KeyboardInterrupt: + self.stop = time() + raise + except: + self.excinfo = ExceptionInfo() + self.stop = time() + + def __repr__(self): + if self.excinfo: + status = "exception: %s" % str(self.excinfo.value) + else: + status = "result: %r" % (self.result,) + return "" % (self.when, status) + +def getslaveinfoline(node): + try: + return node._slaveinfocache + except AttributeError: + d = node.slaveinfo + ver = "%s.%s.%s" % d['version_info'][:3] + node._slaveinfocache = s = "[%s] %s -- Python %s %s" % ( + d['id'], d['sysplatform'], ver, d['executable']) + return s + +class BaseReport(object): + + def __init__(self, **kw): + self.__dict__.update(kw) + + def toterminal(self, out): + if hasattr(self, 'node'): + out.line(getslaveinfoline(self.node)) + + longrepr = self.longrepr + if longrepr is None: + return + + if hasattr(longrepr, 'toterminal'): + longrepr.toterminal(out) + else: + try: + out.line(longrepr) + except UnicodeEncodeError: + out.line("") + + def get_sections(self, prefix): + for name, content in self.sections: + if name.startswith(prefix): + yield prefix, content + + @property + def longreprtext(self): + """ + Read-only property that returns the full string representation + of ``longrepr``. + + .. versionadded:: 3.0 + """ + tw = py.io.TerminalWriter(stringio=True) + tw.hasmarkup = False + self.toterminal(tw) + exc = tw.stringio.getvalue() + return exc.strip() + + @property + def capstdout(self): + """Return captured text from stdout, if capturing is enabled + + .. versionadded:: 3.0 + """ + return ''.join(content for (prefix, content) in self.get_sections('Captured stdout')) + + @property + def capstderr(self): + """Return captured text from stderr, if capturing is enabled + + .. versionadded:: 3.0 + """ + return ''.join(content for (prefix, content) in self.get_sections('Captured stderr')) + + passed = property(lambda x: x.outcome == "passed") + failed = property(lambda x: x.outcome == "failed") + skipped = property(lambda x: x.outcome == "skipped") + + @property + def fspath(self): + return self.nodeid.split("::")[0] + +def pytest_runtest_makereport(item, call): + when = call.when + duration = call.stop-call.start + keywords = dict([(x,1) for x in item.keywords]) + excinfo = call.excinfo + sections = [] + if not call.excinfo: + outcome = "passed" + longrepr = None + else: + if not isinstance(excinfo, ExceptionInfo): + outcome = "failed" + longrepr = excinfo + elif excinfo.errisinstance(pytest.skip.Exception): + outcome = "skipped" + r = excinfo._getreprcrash() + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + if call.when == "call": + longrepr = item.repr_failure(excinfo) + else: # exception in setup or teardown + longrepr = item._repr_failure_py(excinfo, + style=item.config.option.tbstyle) + for rwhen, key, content in item._report_sections: + sections.append(("Captured %s %s" %(key, rwhen), content)) + return TestReport(item.nodeid, item.location, + keywords, outcome, longrepr, when, + sections, duration) + +class TestReport(BaseReport): + """ Basic test report object (also used for setup and teardown calls if + they fail). + """ + def __init__(self, nodeid, location, keywords, outcome, + longrepr, when, sections=(), duration=0, **extra): + #: normalized collection node id + self.nodeid = nodeid + + #: a (filesystempath, lineno, domaininfo) tuple indicating the + #: actual location of a test item - it might be different from the + #: collected one e.g. if a method is inherited from a different module. + self.location = location + + #: a name -> value dictionary containing all keywords and + #: markers associated with a test invocation. + self.keywords = keywords + + #: test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: one of 'setup', 'call', 'teardown' to indicate runtest phase. + self.when = when + + #: list of pairs ``(str, str)`` of extra information which needs to + #: marshallable. Used by pytest to add captured text + #: from ``stdout`` and ``stderr``, but may be used by other plugins + #: to add arbitrary information to reports. + self.sections = list(sections) + + #: time it took to run just the test + self.duration = duration + + self.__dict__.update(extra) + + def __repr__(self): + return "" % ( + self.nodeid, self.when, self.outcome) + +class TeardownErrorReport(BaseReport): + outcome = "failed" + when = "teardown" + def __init__(self, longrepr, **extra): + self.longrepr = longrepr + self.sections = [] + self.__dict__.update(extra) + +def pytest_make_collect_report(collector): + call = CallInfo(collector._memocollect, "memocollect") + longrepr = None + if not call.excinfo: + outcome = "passed" + else: + from _pytest import nose + skip_exceptions = (Skipped,) + nose.get_skip_exceptions() + if call.excinfo.errisinstance(skip_exceptions): + outcome = "skipped" + r = collector._repr_failure_py(call.excinfo, "line").reprcrash + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + errorinfo = collector.repr_failure(call.excinfo) + if not hasattr(errorinfo, "toterminal"): + errorinfo = CollectErrorRepr(errorinfo) + longrepr = errorinfo + rep = CollectReport(collector.nodeid, outcome, longrepr, + getattr(call, 'result', None)) + rep.call = call # see collect_one_node + return rep + + +class CollectReport(BaseReport): + def __init__(self, nodeid, outcome, longrepr, result, + sections=(), **extra): + self.nodeid = nodeid + self.outcome = outcome + self.longrepr = longrepr + self.result = result or [] + self.sections = list(sections) + self.__dict__.update(extra) + + @property + def location(self): + return (self.fspath, None, self.fspath) + + def __repr__(self): + return "" % ( + self.nodeid, len(self.result), self.outcome) + +class CollectErrorRepr(TerminalRepr): + def __init__(self, msg): + self.longrepr = msg + def toterminal(self, out): + out.line(self.longrepr, red=True) + +class SetupState(object): + """ shared state for setting up/tearing down test items or collectors. """ + def __init__(self): + self.stack = [] + self._finalizers = {} + + def addfinalizer(self, finalizer, colitem): + """ attach a finalizer to the given colitem. + if colitem is None, this will add a finalizer that + is called at the end of teardown_all(). + """ + assert colitem and not isinstance(colitem, tuple) + assert py.builtin.callable(finalizer) + #assert colitem in self.stack # some unit tests don't setup stack :/ + self._finalizers.setdefault(colitem, []).append(finalizer) + + def _pop_and_teardown(self): + colitem = self.stack.pop() + self._teardown_with_finalization(colitem) + + def _callfinalizers(self, colitem): + finalizers = self._finalizers.pop(colitem, None) + exc = None + while finalizers: + fin = finalizers.pop() + try: + fin() + except Exception: + # XXX Only first exception will be seen by user, + # ideally all should be reported. + if exc is None: + exc = sys.exc_info() + if exc: + py.builtin._reraise(*exc) + + def _teardown_with_finalization(self, colitem): + self._callfinalizers(colitem) + if hasattr(colitem, "teardown"): + colitem.teardown() + for colitem in self._finalizers: + assert colitem is None or colitem in self.stack \ + or isinstance(colitem, tuple) + + def teardown_all(self): + while self.stack: + self._pop_and_teardown() + for key in list(self._finalizers): + self._teardown_with_finalization(key) + assert not self._finalizers + + def teardown_exact(self, item, nextitem): + needed_collectors = nextitem and nextitem.listchain() or [] + self._teardown_towards(needed_collectors) + + def _teardown_towards(self, needed_collectors): + while self.stack: + if self.stack == needed_collectors[:len(self.stack)]: + break + self._pop_and_teardown() + + def prepare(self, colitem): + """ setup objects along the collector chain to the test-method + and teardown previously setup objects.""" + needed_collectors = colitem.listchain() + self._teardown_towards(needed_collectors) + + # check if the last collection node has raised an error + for col in self.stack: + if hasattr(col, '_prepare_exc'): + py.builtin._reraise(*col._prepare_exc) + for col in needed_collectors[len(self.stack):]: + self.stack.append(col) + try: + col.setup() + except Exception: + col._prepare_exc = sys.exc_info() + raise + +def collect_one_node(collector): + ihook = collector.ihook + ihook.pytest_collectstart(collector=collector) + rep = ihook.pytest_make_collect_report(collector=collector) + call = rep.__dict__.pop("call", None) + if call and check_interactive_exception(call, rep): + ihook.pytest_exception_interact(node=collector, call=call, report=rep) + return rep + + +# ============================================================= +# Test OutcomeExceptions and helpers for creating them. + + +class OutcomeException(Exception): + """ OutcomeException and its subclass instances indicate and + contain info about test and collection outcomes. + """ + def __init__(self, msg=None, pytrace=True): + Exception.__init__(self, msg) + self.msg = msg + self.pytrace = pytrace + + def __repr__(self): + if self.msg: + val = self.msg + if isinstance(val, bytes): + val = py._builtin._totext(val, errors='replace') + return val + return "<%s instance>" %(self.__class__.__name__,) + __str__ = __repr__ + +class Skipped(OutcomeException): + # XXX hackish: on 3k we fake to live in the builtins + # in order to have Skipped exception printing shorter/nicer + __module__ = 'builtins' + + def __init__(self, msg=None, pytrace=True, allow_module_level=False): + OutcomeException.__init__(self, msg=msg, pytrace=pytrace) + self.allow_module_level = allow_module_level + + +class Failed(OutcomeException): + """ raised from an explicit call to pytest.fail() """ + __module__ = 'builtins' + + +class Exit(KeyboardInterrupt): + """ raised for immediate program exits (no tracebacks/summaries)""" + def __init__(self, msg="unknown reason"): + self.msg = msg + KeyboardInterrupt.__init__(self, msg) + +# exposed helper methods + +def exit(msg): + """ exit testing process as if KeyboardInterrupt was triggered. """ + __tracebackhide__ = True + raise Exit(msg) + + +exit.Exception = Exit + + +def skip(msg=""): + """ skip an executing test with the given message. Note: it's usually + better to use the pytest.mark.skipif marker to declare a test to be + skipped under certain conditions like mismatching platforms or + dependencies. See the pytest_skipping plugin for details. + """ + __tracebackhide__ = True + raise Skipped(msg=msg) + + +skip.Exception = Skipped + + +def fail(msg="", pytrace=True): + """ explicitly fail an currently-executing test with the given Message. + + :arg pytrace: if false the msg represents the full failure information + and no python traceback will be reported. + """ + __tracebackhide__ = True + raise Failed(msg=msg, pytrace=pytrace) + + +fail.Exception = Failed + + +def importorskip(modname, minversion=None): + """ return imported module if it has at least "minversion" as its + __version__ attribute. If no minversion is specified the a skip + is only triggered if the module can not be imported. + """ + __tracebackhide__ = True + compile(modname, '', 'eval') # to catch syntaxerrors + should_skip = False + try: + __import__(modname) + except ImportError: + # Do not raise chained exception here(#1485) + should_skip = True + if should_skip: + raise Skipped("could not import %r" %(modname,), allow_module_level=True) + mod = sys.modules[modname] + if minversion is None: + return mod + verattr = getattr(mod, '__version__', None) + if minversion is not None: + try: + from pkg_resources import parse_version as pv + except ImportError: + raise Skipped("we have a required version for %r but can not import " + "pkg_resources to parse version strings." % (modname,), + allow_module_level=True) + if verattr is None or pv(verattr) < pv(minversion): + raise Skipped("module %r has __version__ %r, required is: %r" %( + modname, verattr, minversion), allow_module_level=True) + return mod + diff --git a/lib/spack/external/_pytest/setuponly.py b/lib/spack/external/_pytest/setuponly.py new file mode 100644 index 0000000000..1752c575f5 --- /dev/null +++ b/lib/spack/external/_pytest/setuponly.py @@ -0,0 +1,72 @@ +import pytest +import sys + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption('--setuponly', '--setup-only', action="store_true", + help="only setup fixtures, do not execute tests.") + group.addoption('--setupshow', '--setup-show', action="store_true", + help="show setup of fixtures while executing tests.") + + +@pytest.hookimpl(hookwrapper=True) +def pytest_fixture_setup(fixturedef, request): + yield + config = request.config + if config.option.setupshow: + if hasattr(request, 'param'): + # Save the fixture parameter so ._show_fixture_action() can + # display it now and during the teardown (in .finish()). + if fixturedef.ids: + if callable(fixturedef.ids): + fixturedef.cached_param = fixturedef.ids(request.param) + else: + fixturedef.cached_param = fixturedef.ids[ + request.param_index] + else: + fixturedef.cached_param = request.param + _show_fixture_action(fixturedef, 'SETUP') + + +def pytest_fixture_post_finalizer(fixturedef): + if hasattr(fixturedef, "cached_result"): + config = fixturedef._fixturemanager.config + if config.option.setupshow: + _show_fixture_action(fixturedef, 'TEARDOWN') + if hasattr(fixturedef, "cached_param"): + del fixturedef.cached_param + + +def _show_fixture_action(fixturedef, msg): + config = fixturedef._fixturemanager.config + capman = config.pluginmanager.getplugin('capturemanager') + if capman: + out, err = capman.suspendcapture() + + tw = config.get_terminal_writer() + tw.line() + tw.write(' ' * 2 * fixturedef.scopenum) + tw.write('{step} {scope} {fixture}'.format( + step=msg.ljust(8), # align the output to TEARDOWN + scope=fixturedef.scope[0].upper(), + fixture=fixturedef.argname)) + + if msg == 'SETUP': + deps = sorted(arg for arg in fixturedef.argnames if arg != 'request') + if deps: + tw.write(' (fixtures used: {0})'.format(', '.join(deps))) + + if hasattr(fixturedef, 'cached_param'): + tw.write('[{0}]'.format(fixturedef.cached_param)) + + if capman: + capman.resumecapture() + sys.stdout.write(out) + sys.stderr.write(err) + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config): + if config.option.setuponly: + config.option.setupshow = True diff --git a/lib/spack/external/_pytest/setupplan.py b/lib/spack/external/_pytest/setupplan.py new file mode 100644 index 0000000000..f0853dee54 --- /dev/null +++ b/lib/spack/external/_pytest/setupplan.py @@ -0,0 +1,23 @@ +import pytest + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption('--setupplan', '--setup-plan', action="store_true", + help="show what fixtures and tests would be executed but " + "don't execute anything.") + + +@pytest.hookimpl(tryfirst=True) +def pytest_fixture_setup(fixturedef, request): + # Will return a dummy fixture if the setuponly option is provided. + if request.config.option.setupplan: + fixturedef.cached_result = (None, None, None) + return fixturedef.cached_result + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config): + if config.option.setupplan: + config.option.setuponly = True + config.option.setupshow = True diff --git a/lib/spack/external/_pytest/skipping.py b/lib/spack/external/_pytest/skipping.py new file mode 100644 index 0000000000..a8eaea98aa --- /dev/null +++ b/lib/spack/external/_pytest/skipping.py @@ -0,0 +1,375 @@ +""" support for skip/xfail functions and markers. """ +import os +import sys +import traceback + +import py +import pytest +from _pytest.mark import MarkInfo, MarkDecorator + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption('--runxfail', + action="store_true", dest="runxfail", default=False, + help="run tests even if they are marked xfail") + + parser.addini("xfail_strict", "default for the strict parameter of xfail " + "markers when not given explicitly (default: " + "False)", + default=False, + type="bool") + + +def pytest_configure(config): + if config.option.runxfail: + old = pytest.xfail + config._cleanup.append(lambda: setattr(pytest, "xfail", old)) + + def nop(*args, **kwargs): + pass + + nop.Exception = XFailed + setattr(pytest, "xfail", nop) + + config.addinivalue_line("markers", + "skip(reason=None): skip the given test function with an optional reason. " + "Example: skip(reason=\"no way of currently testing this\") skips the " + "test." + ) + config.addinivalue_line("markers", + "skipif(condition): skip the given test function if eval(condition) " + "results in a True value. Evaluation happens within the " + "module global context. Example: skipif('sys.platform == \"win32\"') " + "skips the test if we are on the win32 platform. see " + "http://pytest.org/latest/skipping.html" + ) + config.addinivalue_line("markers", + "xfail(condition, reason=None, run=True, raises=None, strict=False): " + "mark the the test function as an expected failure if eval(condition) " + "has a True value. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See http://pytest.org/latest/skipping.html" + ) + + +def pytest_namespace(): + return dict(xfail=xfail) + + +class XFailed(pytest.fail.Exception): + """ raised from an explicit call to pytest.xfail() """ + + +def xfail(reason=""): + """ xfail an executing test or setup functions with the given reason.""" + __tracebackhide__ = True + raise XFailed(reason) + + +xfail.Exception = XFailed + + +class MarkEvaluator: + def __init__(self, item, name): + self.item = item + self.name = name + + @property + def holder(self): + return self.item.keywords.get(self.name) + + def __bool__(self): + return bool(self.holder) + __nonzero__ = __bool__ + + def wasvalid(self): + return not hasattr(self, 'exc') + + def invalidraise(self, exc): + raises = self.get('raises') + if not raises: + return + return not isinstance(exc, raises) + + def istrue(self): + try: + return self._istrue() + except Exception: + self.exc = sys.exc_info() + if isinstance(self.exc[1], SyntaxError): + msg = [" " * (self.exc[1].offset + 4) + "^",] + msg.append("SyntaxError: invalid syntax") + else: + msg = traceback.format_exception_only(*self.exc[:2]) + pytest.fail("Error evaluating %r expression\n" + " %s\n" + "%s" + %(self.name, self.expr, "\n".join(msg)), + pytrace=False) + + def _getglobals(self): + d = {'os': os, 'sys': sys, 'config': self.item.config} + d.update(self.item.obj.__globals__) + return d + + def _istrue(self): + if hasattr(self, 'result'): + return self.result + if self.holder: + d = self._getglobals() + if self.holder.args or 'condition' in self.holder.kwargs: + self.result = False + # "holder" might be a MarkInfo or a MarkDecorator; only + # MarkInfo keeps track of all parameters it received in an + # _arglist attribute + if hasattr(self.holder, '_arglist'): + arglist = self.holder._arglist + else: + arglist = [(self.holder.args, self.holder.kwargs)] + for args, kwargs in arglist: + if 'condition' in kwargs: + args = (kwargs['condition'],) + for expr in args: + self.expr = expr + if isinstance(expr, py.builtin._basestring): + result = cached_eval(self.item.config, expr, d) + else: + if "reason" not in kwargs: + # XXX better be checked at collection time + msg = "you need to specify reason=STRING " \ + "when using booleans as conditions." + pytest.fail(msg) + result = bool(expr) + if result: + self.result = True + self.reason = kwargs.get('reason', None) + self.expr = expr + return self.result + else: + self.result = True + return getattr(self, 'result', False) + + def get(self, attr, default=None): + return self.holder.kwargs.get(attr, default) + + def getexplanation(self): + expl = getattr(self, 'reason', None) or self.get('reason', None) + if not expl: + if not hasattr(self, 'expr'): + return "" + else: + return "condition: " + str(self.expr) + return expl + + +@pytest.hookimpl(tryfirst=True) +def pytest_runtest_setup(item): + # Check if skip or skipif are specified as pytest marks + + skipif_info = item.keywords.get('skipif') + if isinstance(skipif_info, (MarkInfo, MarkDecorator)): + eval_skipif = MarkEvaluator(item, 'skipif') + if eval_skipif.istrue(): + item._evalskip = eval_skipif + pytest.skip(eval_skipif.getexplanation()) + + skip_info = item.keywords.get('skip') + if isinstance(skip_info, (MarkInfo, MarkDecorator)): + item._evalskip = True + if 'reason' in skip_info.kwargs: + pytest.skip(skip_info.kwargs['reason']) + elif skip_info.args: + pytest.skip(skip_info.args[0]) + else: + pytest.skip("unconditional skip") + + item._evalxfail = MarkEvaluator(item, 'xfail') + check_xfail_no_run(item) + + +@pytest.mark.hookwrapper +def pytest_pyfunc_call(pyfuncitem): + check_xfail_no_run(pyfuncitem) + outcome = yield + passed = outcome.excinfo is None + if passed: + check_strict_xfail(pyfuncitem) + + +def check_xfail_no_run(item): + """check xfail(run=False)""" + if not item.config.option.runxfail: + evalxfail = item._evalxfail + if evalxfail.istrue(): + if not evalxfail.get('run', True): + pytest.xfail("[NOTRUN] " + evalxfail.getexplanation()) + + +def check_strict_xfail(pyfuncitem): + """check xfail(strict=True) for the given PASSING test""" + evalxfail = pyfuncitem._evalxfail + if evalxfail.istrue(): + strict_default = pyfuncitem.config.getini('xfail_strict') + is_strict_xfail = evalxfail.get('strict', strict_default) + if is_strict_xfail: + del pyfuncitem._evalxfail + explanation = evalxfail.getexplanation() + pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_runtest_makereport(item, call): + outcome = yield + rep = outcome.get_result() + evalxfail = getattr(item, '_evalxfail', None) + evalskip = getattr(item, '_evalskip', None) + # unitttest special case, see setting of _unexpectedsuccess + if hasattr(item, '_unexpectedsuccess') and rep.when == "call": + from _pytest.compat import _is_unittest_unexpected_success_a_failure + if item._unexpectedsuccess: + rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess) + else: + rep.longrepr = "Unexpected success" + if _is_unittest_unexpected_success_a_failure(): + rep.outcome = "failed" + else: + rep.outcome = "passed" + rep.wasxfail = rep.longrepr + elif item.config.option.runxfail: + pass # don't interefere + elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception): + rep.wasxfail = "reason: " + call.excinfo.value.msg + rep.outcome = "skipped" + elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \ + evalxfail.istrue(): + if call.excinfo: + if evalxfail.invalidraise(call.excinfo.value): + rep.outcome = "failed" + else: + rep.outcome = "skipped" + rep.wasxfail = evalxfail.getexplanation() + elif call.when == "call": + strict_default = item.config.getini('xfail_strict') + is_strict_xfail = evalxfail.get('strict', strict_default) + explanation = evalxfail.getexplanation() + if is_strict_xfail: + rep.outcome = "failed" + rep.longrepr = "[XPASS(strict)] {0}".format(explanation) + else: + rep.outcome = "passed" + rep.wasxfail = explanation + elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple: + # skipped by mark.skipif; change the location of the failure + # to point to the item definition, otherwise it will display + # the location of where the skip exception was raised within pytest + filename, line, reason = rep.longrepr + filename, line = item.location[:2] + rep.longrepr = filename, line, reason + +# called by terminalreporter progress reporting +def pytest_report_teststatus(report): + if hasattr(report, "wasxfail"): + if report.skipped: + return "xfailed", "x", "xfail" + elif report.passed: + return "xpassed", "X", ("XPASS", {'yellow': True}) + +# called by the terminalreporter instance/plugin +def pytest_terminal_summary(terminalreporter): + tr = terminalreporter + if not tr.reportchars: + #for name in "xfailed skipped failed xpassed": + # if not tr.stats.get(name, 0): + # tr.write_line("HINT: use '-r' option to see extra " + # "summary info about tests") + # break + return + + lines = [] + for char in tr.reportchars: + if char == "x": + show_xfailed(terminalreporter, lines) + elif char == "X": + show_xpassed(terminalreporter, lines) + elif char in "fF": + show_simple(terminalreporter, lines, 'failed', "FAIL %s") + elif char in "sS": + show_skipped(terminalreporter, lines) + elif char == "E": + show_simple(terminalreporter, lines, 'error', "ERROR %s") + elif char == 'p': + show_simple(terminalreporter, lines, 'passed', "PASSED %s") + + if lines: + tr._tw.sep("=", "short test summary info") + for line in lines: + tr._tw.line(line) + +def show_simple(terminalreporter, lines, stat, format): + failed = terminalreporter.stats.get(stat) + if failed: + for rep in failed: + pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) + lines.append(format %(pos,)) + +def show_xfailed(terminalreporter, lines): + xfailed = terminalreporter.stats.get("xfailed") + if xfailed: + for rep in xfailed: + pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) + reason = rep.wasxfail + lines.append("XFAIL %s" % (pos,)) + if reason: + lines.append(" " + str(reason)) + +def show_xpassed(terminalreporter, lines): + xpassed = terminalreporter.stats.get("xpassed") + if xpassed: + for rep in xpassed: + pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) + reason = rep.wasxfail + lines.append("XPASS %s %s" %(pos, reason)) + +def cached_eval(config, expr, d): + if not hasattr(config, '_evalcache'): + config._evalcache = {} + try: + return config._evalcache[expr] + except KeyError: + import _pytest._code + exprcode = _pytest._code.compile(expr, mode="eval") + config._evalcache[expr] = x = eval(exprcode, d) + return x + + +def folded_skips(skipped): + d = {} + for event in skipped: + key = event.longrepr + assert len(key) == 3, (event, key) + d.setdefault(key, []).append(event) + l = [] + for key, events in d.items(): + l.append((len(events),) + key) + return l + +def show_skipped(terminalreporter, lines): + tr = terminalreporter + skipped = tr.stats.get('skipped', []) + if skipped: + #if not tr.hasopt('skipped'): + # tr.write_line( + # "%d skipped tests, specify -rs for more info" % + # len(skipped)) + # return + fskips = folded_skips(skipped) + if fskips: + #tr.write_sep("_", "skipped test summary") + for num, fspath, lineno, reason in fskips: + if reason.startswith("Skipped: "): + reason = reason[9:] + lines.append("SKIP [%d] %s:%d: %s" % + (num, fspath, lineno, reason)) diff --git a/lib/spack/external/_pytest/terminal.py b/lib/spack/external/_pytest/terminal.py new file mode 100644 index 0000000000..16bf757338 --- /dev/null +++ b/lib/spack/external/_pytest/terminal.py @@ -0,0 +1,593 @@ +""" terminal reporting of the full testing process. + +This is a good source for looking at the various reporting hooks. +""" +from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \ + EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED +import pytest +import py +import sys +import time +import platform + +import _pytest._pluggy as pluggy + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "reporting", after="general") + group._addoption('-v', '--verbose', action="count", + dest="verbose", default=0, help="increase verbosity."), + group._addoption('-q', '--quiet', action="count", + dest="quiet", default=0, help="decrease verbosity."), + group._addoption('-r', + action="store", dest="reportchars", default='', metavar="chars", + help="show extra test summary info as specified by chars (f)ailed, " + "(E)error, (s)skipped, (x)failed, (X)passed, " + "(p)passed, (P)passed with output, (a)all except pP. " + "The pytest warnings are displayed at all times except when " + "--disable-pytest-warnings is set") + group._addoption('--disable-pytest-warnings', default=False, + dest='disablepytestwarnings', action='store_true', + help='disable warnings summary, overrides -r w flag') + group._addoption('-l', '--showlocals', + action="store_true", dest="showlocals", default=False, + help="show locals in tracebacks (disabled by default).") + group._addoption('--tb', metavar="style", + action="store", dest="tbstyle", default='auto', + choices=['auto', 'long', 'short', 'no', 'line', 'native'], + help="traceback print mode (auto/long/short/line/native/no).") + group._addoption('--fulltrace', '--full-trace', + action="store_true", default=False, + help="don't cut any tracebacks (default is to cut).") + group._addoption('--color', metavar="color", + action="store", dest="color", default='auto', + choices=['yes', 'no', 'auto'], + help="color terminal output (yes/no/auto).") + +def pytest_configure(config): + config.option.verbose -= config.option.quiet + reporter = TerminalReporter(config, sys.stdout) + config.pluginmanager.register(reporter, 'terminalreporter') + if config.option.debug or config.option.traceconfig: + def mywriter(tags, args): + msg = " ".join(map(str, args)) + reporter.write_line("[traceconfig] " + msg) + config.trace.root.setprocessor("pytest:config", mywriter) + +def getreportopt(config): + reportopts = "" + reportchars = config.option.reportchars + if not config.option.disablepytestwarnings and 'w' not in reportchars: + reportchars += 'w' + elif config.option.disablepytestwarnings and 'w' in reportchars: + reportchars = reportchars.replace('w', '') + if reportchars: + for char in reportchars: + if char not in reportopts and char != 'a': + reportopts += char + elif char == 'a': + reportopts = 'fEsxXw' + return reportopts + +def pytest_report_teststatus(report): + if report.passed: + letter = "." + elif report.skipped: + letter = "s" + elif report.failed: + letter = "F" + if report.when != "call": + letter = "f" + return report.outcome, letter, report.outcome.upper() + +class WarningReport: + def __init__(self, code, message, nodeid=None, fslocation=None): + self.code = code + self.message = message + self.nodeid = nodeid + self.fslocation = fslocation + + +class TerminalReporter: + def __init__(self, config, file=None): + import _pytest.config + self.config = config + self.verbosity = self.config.option.verbose + self.showheader = self.verbosity >= 0 + self.showfspath = self.verbosity >= 0 + self.showlongtestinfo = self.verbosity > 0 + self._numcollected = 0 + + self.stats = {} + self.startdir = py.path.local() + if file is None: + file = sys.stdout + self._tw = self.writer = _pytest.config.create_terminal_writer(config, + file) + self.currentfspath = None + self.reportchars = getreportopt(config) + self.hasmarkup = self._tw.hasmarkup + self.isatty = file.isatty() + + def hasopt(self, char): + char = {'xfailed': 'x', 'skipped': 's'}.get(char, char) + return char in self.reportchars + + def write_fspath_result(self, nodeid, res): + fspath = self.config.rootdir.join(nodeid.split("::")[0]) + if fspath != self.currentfspath: + self.currentfspath = fspath + fspath = self.startdir.bestrelpath(fspath) + self._tw.line() + self._tw.write(fspath + " ") + self._tw.write(res) + + def write_ensure_prefix(self, prefix, extra="", **kwargs): + if self.currentfspath != prefix: + self._tw.line() + self.currentfspath = prefix + self._tw.write(prefix) + if extra: + self._tw.write(extra, **kwargs) + self.currentfspath = -2 + + def ensure_newline(self): + if self.currentfspath: + self._tw.line() + self.currentfspath = None + + def write(self, content, **markup): + self._tw.write(content, **markup) + + def write_line(self, line, **markup): + if not py.builtin._istext(line): + line = py.builtin.text(line, errors="replace") + self.ensure_newline() + self._tw.line(line, **markup) + + def rewrite(self, line, **markup): + line = str(line) + self._tw.write("\r" + line, **markup) + + def write_sep(self, sep, title=None, **markup): + self.ensure_newline() + self._tw.sep(sep, title, **markup) + + def section(self, title, sep="=", **kw): + self._tw.sep(sep, title, **kw) + + def line(self, msg, **kw): + self._tw.line(msg, **kw) + + def pytest_internalerror(self, excrepr): + for line in py.builtin.text(excrepr).split("\n"): + self.write_line("INTERNALERROR> " + line) + return 1 + + def pytest_logwarning(self, code, fslocation, message, nodeid): + warnings = self.stats.setdefault("warnings", []) + if isinstance(fslocation, tuple): + fslocation = "%s:%d" % fslocation + warning = WarningReport(code=code, fslocation=fslocation, + message=message, nodeid=nodeid) + warnings.append(warning) + + def pytest_plugin_registered(self, plugin): + if self.config.option.traceconfig: + msg = "PLUGIN registered: %s" % (plugin,) + # XXX this event may happen during setup/teardown time + # which unfortunately captures our output here + # which garbles our output if we use self.write_line + self.write_line(msg) + + def pytest_deselected(self, items): + self.stats.setdefault('deselected', []).extend(items) + + def pytest_runtest_logstart(self, nodeid, location): + # ensure that the path is printed before the + # 1st test of a module starts running + if self.showlongtestinfo: + line = self._locationline(nodeid, *location) + self.write_ensure_prefix(line, "") + elif self.showfspath: + fsid = nodeid.split("::")[0] + self.write_fspath_result(fsid, "") + + def pytest_runtest_logreport(self, report): + rep = report + res = self.config.hook.pytest_report_teststatus(report=rep) + cat, letter, word = res + self.stats.setdefault(cat, []).append(rep) + self._tests_ran = True + if not letter and not word: + # probably passed setup/teardown + return + if self.verbosity <= 0: + if not hasattr(rep, 'node') and self.showfspath: + self.write_fspath_result(rep.nodeid, letter) + else: + self._tw.write(letter) + else: + if isinstance(word, tuple): + word, markup = word + else: + if rep.passed: + markup = {'green':True} + elif rep.failed: + markup = {'red':True} + elif rep.skipped: + markup = {'yellow':True} + line = self._locationline(rep.nodeid, *rep.location) + if not hasattr(rep, 'node'): + self.write_ensure_prefix(line, word, **markup) + #self._tw.write(word, **markup) + else: + self.ensure_newline() + if hasattr(rep, 'node'): + self._tw.write("[%s] " % rep.node.gateway.id) + self._tw.write(word, **markup) + self._tw.write(" " + line) + self.currentfspath = -2 + + def pytest_collection(self): + if not self.isatty and self.config.option.verbose >= 1: + self.write("collecting ... ", bold=True) + + def pytest_collectreport(self, report): + if report.failed: + self.stats.setdefault("error", []).append(report) + elif report.skipped: + self.stats.setdefault("skipped", []).append(report) + items = [x for x in report.result if isinstance(x, pytest.Item)] + self._numcollected += len(items) + if self.isatty: + #self.write_fspath_result(report.nodeid, 'E') + self.report_collect() + + def report_collect(self, final=False): + if self.config.option.verbose < 0: + return + + errors = len(self.stats.get('error', [])) + skipped = len(self.stats.get('skipped', [])) + if final: + line = "collected " + else: + line = "collecting " + line += str(self._numcollected) + " items" + if errors: + line += " / %d errors" % errors + if skipped: + line += " / %d skipped" % skipped + if self.isatty: + if final: + line += " \n" + self.rewrite(line, bold=True) + else: + self.write_line(line) + + def pytest_collection_modifyitems(self): + self.report_collect(True) + + @pytest.hookimpl(trylast=True) + def pytest_sessionstart(self, session): + self._sessionstarttime = time.time() + if not self.showheader: + return + self.write_sep("=", "test session starts", bold=True) + verinfo = platform.python_version() + msg = "platform %s -- Python %s" % (sys.platform, verinfo) + if hasattr(sys, 'pypy_version_info'): + verinfo = ".".join(map(str, sys.pypy_version_info[:3])) + msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3]) + msg += ", pytest-%s, py-%s, pluggy-%s" % ( + pytest.__version__, py.__version__, pluggy.__version__) + if self.verbosity > 0 or self.config.option.debug or \ + getattr(self.config.option, 'pastebin', None): + msg += " -- " + str(sys.executable) + self.write_line(msg) + lines = self.config.hook.pytest_report_header( + config=self.config, startdir=self.startdir) + lines.reverse() + for line in flatten(lines): + self.write_line(line) + + def pytest_report_header(self, config): + inifile = "" + if config.inifile: + inifile = config.rootdir.bestrelpath(config.inifile) + lines = ["rootdir: %s, inifile: %s" %(config.rootdir, inifile)] + + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + + lines.append( + "plugins: %s" % ", ".join(_plugin_nameversions(plugininfo))) + return lines + + def pytest_collection_finish(self, session): + if self.config.option.collectonly: + self._printcollecteditems(session.items) + if self.stats.get('failed'): + self._tw.sep("!", "collection failures") + for rep in self.stats.get('failed'): + rep.toterminal(self._tw) + return 1 + return 0 + if not self.showheader: + return + #for i, testarg in enumerate(self.config.args): + # self.write_line("test path %d: %s" %(i+1, testarg)) + + def _printcollecteditems(self, items): + # to print out items and their parent collectors + # we take care to leave out Instances aka () + # because later versions are going to get rid of them anyway + if self.config.option.verbose < 0: + if self.config.option.verbose < -1: + counts = {} + for item in items: + name = item.nodeid.split('::', 1)[0] + counts[name] = counts.get(name, 0) + 1 + for name, count in sorted(counts.items()): + self._tw.line("%s: %d" % (name, count)) + else: + for item in items: + nodeid = item.nodeid + nodeid = nodeid.replace("::()::", "::") + self._tw.line(nodeid) + return + stack = [] + indent = "" + for item in items: + needed_collectors = item.listchain()[1:] # strip root node + while stack: + if stack == needed_collectors[:len(stack)]: + break + stack.pop() + for col in needed_collectors[len(stack):]: + stack.append(col) + #if col.name == "()": + # continue + indent = (len(stack) - 1) * " " + self._tw.line("%s%s" % (indent, col)) + + @pytest.hookimpl(hookwrapper=True) + def pytest_sessionfinish(self, exitstatus): + outcome = yield + outcome.get_result() + self._tw.line("") + summary_exit_codes = ( + EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR, + EXIT_NOTESTSCOLLECTED) + if exitstatus in summary_exit_codes: + self.config.hook.pytest_terminal_summary(terminalreporter=self, + exitstatus=exitstatus) + self.summary_errors() + self.summary_failures() + self.summary_warnings() + self.summary_passes() + if exitstatus == EXIT_INTERRUPTED: + self._report_keyboardinterrupt() + del self._keyboardinterrupt_memo + self.summary_deselected() + self.summary_stats() + + def pytest_keyboard_interrupt(self, excinfo): + self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) + + def pytest_unconfigure(self): + if hasattr(self, '_keyboardinterrupt_memo'): + self._report_keyboardinterrupt() + + def _report_keyboardinterrupt(self): + excrepr = self._keyboardinterrupt_memo + msg = excrepr.reprcrash.message + self.write_sep("!", msg) + if "KeyboardInterrupt" in msg: + if self.config.option.fulltrace: + excrepr.toterminal(self._tw) + else: + self._tw.line("to show a full traceback on KeyboardInterrupt use --fulltrace", yellow=True) + excrepr.reprcrash.toterminal(self._tw) + + def _locationline(self, nodeid, fspath, lineno, domain): + def mkrel(nodeid): + line = self.config.cwd_relative_nodeid(nodeid) + if domain and line.endswith(domain): + line = line[:-len(domain)] + l = domain.split("[") + l[0] = l[0].replace('.', '::') # don't replace '.' in params + line += "[".join(l) + return line + # collect_fspath comes from testid which has a "/"-normalized path + + if fspath: + res = mkrel(nodeid).replace("::()", "") # parens-normalization + if nodeid.split("::")[0] != fspath.replace("\\", "/"): + res += " <- " + self.startdir.bestrelpath(fspath) + else: + res = "[location]" + return res + " " + + def _getfailureheadline(self, rep): + if hasattr(rep, 'location'): + fspath, lineno, domain = rep.location + return domain + else: + return "test session" # XXX? + + def _getcrashline(self, rep): + try: + return str(rep.longrepr.reprcrash) + except AttributeError: + try: + return str(rep.longrepr)[:50] + except AttributeError: + return "" + + # + # summaries for sessionfinish + # + def getreports(self, name): + l = [] + for x in self.stats.get(name, []): + if not hasattr(x, '_pdbshown'): + l.append(x) + return l + + def summary_warnings(self): + if self.hasopt("w"): + warnings = self.stats.get("warnings") + if not warnings: + return + self.write_sep("=", "pytest-warning summary") + for w in warnings: + self._tw.line("W%s %s %s" % (w.code, + w.fslocation, w.message)) + + def summary_passes(self): + if self.config.option.tbstyle != "no": + if self.hasopt("P"): + reports = self.getreports('passed') + if not reports: + return + self.write_sep("=", "PASSES") + for rep in reports: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg) + self._outrep_summary(rep) + + def print_teardown_sections(self, rep): + for secname, content in rep.sections: + if 'teardown' in secname: + self._tw.sep('-', secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + + def summary_failures(self): + if self.config.option.tbstyle != "no": + reports = self.getreports('failed') + if not reports: + return + self.write_sep("=", "FAILURES") + for rep in reports: + if self.config.option.tbstyle == "line": + line = self._getcrashline(rep) + self.write_line(line) + else: + msg = self._getfailureheadline(rep) + markup = {'red': True, 'bold': True} + self.write_sep("_", msg, **markup) + self._outrep_summary(rep) + for report in self.getreports(''): + if report.nodeid == rep.nodeid and report.when == 'teardown': + self.print_teardown_sections(report) + + def summary_errors(self): + if self.config.option.tbstyle != "no": + reports = self.getreports('error') + if not reports: + return + self.write_sep("=", "ERRORS") + for rep in self.stats['error']: + msg = self._getfailureheadline(rep) + if not hasattr(rep, 'when'): + # collect + msg = "ERROR collecting " + msg + elif rep.when == "setup": + msg = "ERROR at setup of " + msg + elif rep.when == "teardown": + msg = "ERROR at teardown of " + msg + self.write_sep("_", msg) + self._outrep_summary(rep) + + def _outrep_summary(self, rep): + rep.toterminal(self._tw) + for secname, content in rep.sections: + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_stats(self): + session_duration = time.time() - self._sessionstarttime + (line, color) = build_summary_stats_line(self.stats) + msg = "%s in %.2f seconds" % (line, session_duration) + markup = {color: True, 'bold': True} + + if self.verbosity >= 0: + self.write_sep("=", msg, **markup) + if self.verbosity == -1: + self.write_line(msg, **markup) + + def summary_deselected(self): + if 'deselected' in self.stats: + self.write_sep("=", "%d tests deselected" % ( + len(self.stats['deselected'])), bold=True) + +def repr_pythonversion(v=None): + if v is None: + v = sys.version_info + try: + return "%s.%s.%s-%s-%s" % v + except (TypeError, ValueError): + return str(v) + +def flatten(l): + for x in l: + if isinstance(x, (list, tuple)): + for y in flatten(x): + yield y + else: + yield x + +def build_summary_stats_line(stats): + keys = ("failed passed skipped deselected " + "xfailed xpassed warnings error").split() + key_translation = {'warnings': 'pytest-warnings'} + unknown_key_seen = False + for key in stats.keys(): + if key not in keys: + if key: # setup/teardown reports have an empty key, ignore them + keys.append(key) + unknown_key_seen = True + parts = [] + for key in keys: + val = stats.get(key, None) + if val: + key_name = key_translation.get(key, key) + parts.append("%d %s" % (len(val), key_name)) + + if parts: + line = ", ".join(parts) + else: + line = "no tests ran" + + if 'failed' in stats or 'error' in stats: + color = 'red' + elif 'warnings' in stats or unknown_key_seen: + color = 'yellow' + elif 'passed' in stats: + color = 'green' + else: + color = 'yellow' + + return (line, color) + + +def _plugin_nameversions(plugininfo): + l = [] + for plugin, dist in plugininfo: + # gets us name and version! + name = '{dist.project_name}-{dist.version}'.format(dist=dist) + # questionable convenience, but it keeps things short + if name.startswith("pytest-"): + name = name[7:] + # we decided to print python package names + # they can have more than one plugin + if name not in l: + l.append(name) + return l diff --git a/lib/spack/external/_pytest/tmpdir.py b/lib/spack/external/_pytest/tmpdir.py new file mode 100644 index 0000000000..28a6b06366 --- /dev/null +++ b/lib/spack/external/_pytest/tmpdir.py @@ -0,0 +1,124 @@ +""" support for providing temporary directories to test functions. """ +import re + +import pytest +import py +from _pytest.monkeypatch import MonkeyPatch + + +class TempdirFactory: + """Factory for temporary directories under the common base temp directory. + + The base directory can be configured using the ``--basetemp`` option. + """ + + def __init__(self, config): + self.config = config + self.trace = config.trace.get("tmpdir") + + def ensuretemp(self, string, dir=1): + """ (deprecated) return temporary directory path with + the given string as the trailing part. It is usually + better to use the 'tmpdir' function argument which + provides an empty unique-per-test-invocation directory + and is guaranteed to be empty. + """ + #py.log._apiwarn(">1.1", "use tmpdir function argument") + return self.getbasetemp().ensure(string, dir=dir) + + def mktemp(self, basename, numbered=True): + """Create a subdirectory of the base temporary directory and return it. + If ``numbered``, ensure the directory is unique by adding a number + prefix greater than any existing one. + """ + basetemp = self.getbasetemp() + if not numbered: + p = basetemp.mkdir(basename) + else: + p = py.path.local.make_numbered_dir(prefix=basename, + keep=0, rootdir=basetemp, lock_timeout=None) + self.trace("mktemp", p) + return p + + def getbasetemp(self): + """ return base temporary directory. """ + try: + return self._basetemp + except AttributeError: + basetemp = self.config.option.basetemp + if basetemp: + basetemp = py.path.local(basetemp) + if basetemp.check(): + basetemp.remove() + basetemp.mkdir() + else: + temproot = py.path.local.get_temproot() + user = get_user() + if user: + # use a sub-directory in the temproot to speed-up + # make_numbered_dir() call + rootdir = temproot.join('pytest-of-%s' % user) + else: + rootdir = temproot + rootdir.ensure(dir=1) + basetemp = py.path.local.make_numbered_dir(prefix='pytest-', + rootdir=rootdir) + self._basetemp = t = basetemp.realpath() + self.trace("new basetemp", t) + return t + + def finish(self): + self.trace("finish") + + +def get_user(): + """Return the current user name, or None if getuser() does not work + in the current environment (see #1010). + """ + import getpass + try: + return getpass.getuser() + except (ImportError, KeyError): + return None + + +# backward compatibility +TempdirHandler = TempdirFactory + + +def pytest_configure(config): + """Create a TempdirFactory and attach it to the config object. + + This is to comply with existing plugins which expect the handler to be + available at pytest_configure time, but ideally should be moved entirely + to the tmpdir_factory session fixture. + """ + mp = MonkeyPatch() + t = TempdirFactory(config) + config._cleanup.extend([mp.undo, t.finish]) + mp.setattr(config, '_tmpdirhandler', t, raising=False) + mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False) + + +@pytest.fixture(scope='session') +def tmpdir_factory(request): + """Return a TempdirFactory instance for the test session. + """ + return request.config._tmpdirhandler + + +@pytest.fixture +def tmpdir(request, tmpdir_factory): + """Return a temporary directory path object + which is unique to each test function invocation, + created as a sub directory of the base temporary + directory. The returned object is a `py.path.local`_ + path object. + """ + name = request.node.name + name = re.sub("[\W]", "_", name) + MAXVAL = 30 + if len(name) > MAXVAL: + name = name[:MAXVAL] + x = tmpdir_factory.mktemp(name, numbered=True) + return x diff --git a/lib/spack/external/_pytest/unittest.py b/lib/spack/external/_pytest/unittest.py new file mode 100644 index 0000000000..73224010b2 --- /dev/null +++ b/lib/spack/external/_pytest/unittest.py @@ -0,0 +1,217 @@ +""" discovery and running of std-library "unittest" style tests. """ +from __future__ import absolute_import + +import sys +import traceback + +import pytest +# for transfering markers +import _pytest._code +from _pytest.python import transfer_markers +from _pytest.skipping import MarkEvaluator + + +def pytest_pycollect_makeitem(collector, name, obj): + # has unittest been imported and is obj a subclass of its TestCase? + try: + if not issubclass(obj, sys.modules["unittest"].TestCase): + return + except Exception: + return + # yes, so let's collect it + return UnitTestCase(name, parent=collector) + + +class UnitTestCase(pytest.Class): + # marker for fixturemanger.getfixtureinfo() + # to declare that our children do not support funcargs + nofuncargs = True + + def setup(self): + cls = self.obj + if getattr(cls, '__unittest_skip__', False): + return # skipped + setup = getattr(cls, 'setUpClass', None) + if setup is not None: + setup() + teardown = getattr(cls, 'tearDownClass', None) + if teardown is not None: + self.addfinalizer(teardown) + super(UnitTestCase, self).setup() + + def collect(self): + from unittest import TestLoader + cls = self.obj + if not getattr(cls, "__test__", True): + return + self.session._fixturemanager.parsefactories(self, unittest=True) + loader = TestLoader() + module = self.getparent(pytest.Module).obj + foundsomething = False + for name in loader.getTestCaseNames(self.obj): + x = getattr(self.obj, name) + if not getattr(x, '__test__', True): + continue + funcobj = getattr(x, 'im_func', x) + transfer_markers(funcobj, cls, module) + yield TestCaseFunction(name, parent=self) + foundsomething = True + + if not foundsomething: + runtest = getattr(self.obj, 'runTest', None) + if runtest is not None: + ut = sys.modules.get("twisted.trial.unittest", None) + if ut is None or runtest != ut.TestCase.runTest: + yield TestCaseFunction('runTest', parent=self) + + + +class TestCaseFunction(pytest.Function): + _excinfo = None + + def setup(self): + self._testcase = self.parent.obj(self.name) + self._fix_unittest_skip_decorator() + self._obj = getattr(self._testcase, self.name) + if hasattr(self._testcase, 'setup_method'): + self._testcase.setup_method(self._obj) + if hasattr(self, "_request"): + self._request._fillfixtures() + + def _fix_unittest_skip_decorator(self): + """ + The @unittest.skip decorator calls functools.wraps(self._testcase) + The call to functools.wraps() fails unless self._testcase + has a __name__ attribute. This is usually automatically supplied + if the test is a function or method, but we need to add manually + here. + + See issue #1169 + """ + if sys.version_info[0] == 2: + setattr(self._testcase, "__name__", self.name) + + def teardown(self): + if hasattr(self._testcase, 'teardown_method'): + self._testcase.teardown_method(self._obj) + # Allow garbage collection on TestCase instance attributes. + self._testcase = None + self._obj = None + + def startTest(self, testcase): + pass + + def _addexcinfo(self, rawexcinfo): + # unwrap potential exception info (see twisted trial support below) + rawexcinfo = getattr(rawexcinfo, '_rawexcinfo', rawexcinfo) + try: + excinfo = _pytest._code.ExceptionInfo(rawexcinfo) + except TypeError: + try: + try: + l = traceback.format_exception(*rawexcinfo) + l.insert(0, "NOTE: Incompatible Exception Representation, " + "displaying natively:\n\n") + pytest.fail("".join(l), pytrace=False) + except (pytest.fail.Exception, KeyboardInterrupt): + raise + except: + pytest.fail("ERROR: Unknown Incompatible Exception " + "representation:\n%r" %(rawexcinfo,), pytrace=False) + except KeyboardInterrupt: + raise + except pytest.fail.Exception: + excinfo = _pytest._code.ExceptionInfo() + self.__dict__.setdefault('_excinfo', []).append(excinfo) + + def addError(self, testcase, rawexcinfo): + self._addexcinfo(rawexcinfo) + def addFailure(self, testcase, rawexcinfo): + self._addexcinfo(rawexcinfo) + + def addSkip(self, testcase, reason): + try: + pytest.skip(reason) + except pytest.skip.Exception: + self._evalskip = MarkEvaluator(self, 'SkipTest') + self._evalskip.result = True + self._addexcinfo(sys.exc_info()) + + def addExpectedFailure(self, testcase, rawexcinfo, reason=""): + try: + pytest.xfail(str(reason)) + except pytest.xfail.Exception: + self._addexcinfo(sys.exc_info()) + + def addUnexpectedSuccess(self, testcase, reason=""): + self._unexpectedsuccess = reason + + def addSuccess(self, testcase): + pass + + def stopTest(self, testcase): + pass + + def runtest(self): + if self.config.pluginmanager.get_plugin("pdbinvoke") is None: + self._testcase(result=self) + else: + # disables tearDown and cleanups for post mortem debugging (see #1890) + self._testcase.debug() + + + def _prunetraceback(self, excinfo): + pytest.Function._prunetraceback(self, excinfo) + traceback = excinfo.traceback.filter( + lambda x:not x.frame.f_globals.get('__unittest')) + if traceback: + excinfo.traceback = traceback + +@pytest.hookimpl(tryfirst=True) +def pytest_runtest_makereport(item, call): + if isinstance(item, TestCaseFunction): + if item._excinfo: + call.excinfo = item._excinfo.pop(0) + try: + del call.result + except AttributeError: + pass + +# twisted trial support + +@pytest.hookimpl(hookwrapper=True) +def pytest_runtest_protocol(item): + if isinstance(item, TestCaseFunction) and \ + 'twisted.trial.unittest' in sys.modules: + ut = sys.modules['twisted.python.failure'] + Failure__init__ = ut.Failure.__init__ + check_testcase_implements_trial_reporter() + + def excstore(self, exc_value=None, exc_type=None, exc_tb=None, + captureVars=None): + if exc_value is None: + self._rawexcinfo = sys.exc_info() + else: + if exc_type is None: + exc_type = type(exc_value) + self._rawexcinfo = (exc_type, exc_value, exc_tb) + try: + Failure__init__(self, exc_value, exc_type, exc_tb, + captureVars=captureVars) + except TypeError: + Failure__init__(self, exc_value, exc_type, exc_tb) + + ut.Failure.__init__ = excstore + yield + ut.Failure.__init__ = Failure__init__ + else: + yield + + +def check_testcase_implements_trial_reporter(done=[]): + if done: + return + from zope.interface import classImplements + from twisted.trial.itrial import IReporter + classImplements(TestCaseFunction, IReporter) + done.append(1) diff --git a/lib/spack/external/_pytest/vendored_packages/README.md b/lib/spack/external/_pytest/vendored_packages/README.md new file mode 100644 index 0000000000..b5fe6febb0 --- /dev/null +++ b/lib/spack/external/_pytest/vendored_packages/README.md @@ -0,0 +1,13 @@ +This directory vendors the `pluggy` module. + +For a more detailed discussion for the reasons to vendoring this +package, please see [this issue](https://github.com/pytest-dev/pytest/issues/944). + +To update the current version, execute: + +``` +$ pip install -U pluggy== --no-compile --target=_pytest/vendored_packages +``` + +And commit the modified files. The `pluggy-.dist-info` directory +created by `pip` should be added as well. diff --git a/lib/spack/external/_pytest/vendored_packages/__init__.py b/lib/spack/external/_pytest/vendored_packages/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000000..da0e7a6ed7 --- /dev/null +++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst @@ -0,0 +1,11 @@ + +Plugin registration and hook calling for Python +=============================================== + +This is the plugin manager as used by pytest but stripped +of pytest specific details. + +During the 0.x series this plugin does not have much documentation +except extensive docstrings in the pluggy.py module. + + diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER new file mode 100644 index 0000000000..a1b589e38a --- /dev/null +++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt new file mode 100644 index 0000000000..121017d086 --- /dev/null +++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 holger krekel (rather uses bitbucket/hpk42) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA new file mode 100644 index 0000000000..bd88517c94 --- /dev/null +++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA @@ -0,0 +1,40 @@ +Metadata-Version: 2.0 +Name: pluggy +Version: 0.4.0 +Summary: plugin and hook calling mechanisms for python +Home-page: https://github.com/pytest-dev/pluggy +Author: Holger Krekel +Author-email: holger at merlinux.eu +License: MIT license +Platform: unix +Platform: linux +Platform: osx +Platform: win32 +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: POSIX +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Topic :: Software Development :: Testing +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 + + +Plugin registration and hook calling for Python +=============================================== + +This is the plugin manager as used by pytest but stripped +of pytest specific details. + +During the 0.x series this plugin does not have much documentation +except extensive docstrings in the pluggy.py module. + + diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD new file mode 100644 index 0000000000..3003a3bf2b --- /dev/null +++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD @@ -0,0 +1,9 @@ +pluggy.py,sha256=u0oG9cv-oLOkNvEBlwnnu8pp1AyxpoERgUO00S3rvpQ,31543 +pluggy-0.4.0.dist-info/DESCRIPTION.rst,sha256=ltvjkFd40LW_xShthp6RRVM6OB_uACYDFR3kTpKw7o4,307 +pluggy-0.4.0.dist-info/LICENSE.txt,sha256=ruwhUOyV1HgE9F35JVL9BCZ9vMSALx369I4xq9rhpkM,1134 +pluggy-0.4.0.dist-info/METADATA,sha256=pe2hbsqKFaLHC6wAQPpFPn0KlpcPfLBe_BnS4O70bfk,1364 +pluggy-0.4.0.dist-info/RECORD,, +pluggy-0.4.0.dist-info/WHEEL,sha256=9Z5Xm-eel1bTS7e6ogYiKz0zmPEqDwIypurdHN1hR40,116 +pluggy-0.4.0.dist-info/metadata.json,sha256=T3go5L2qOa_-H-HpCZi3EoVKb8sZ3R-fOssbkWo2nvM,1119 +pluggy-0.4.0.dist-info/top_level.txt,sha256=xKSCRhai-v9MckvMuWqNz16c1tbsmOggoMSwTgcpYHE,7 +pluggy-0.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL new file mode 100644 index 0000000000..8b6dd1b5a8 --- /dev/null +++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.29.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json new file mode 100644 index 0000000000..cde22aff02 --- /dev/null +++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS :: MacOS X", "Topic :: Software Development :: Testing", "Topic :: Software Development :: Libraries", "Topic :: Utilities", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"], "extensions": {"python.details": {"contacts": [{"email": "holger at merlinux.eu", "name": "Holger Krekel", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "https://github.com/pytest-dev/pluggy"}}}, "generator": "bdist_wheel (0.29.0)", "license": "MIT license", "metadata_version": "2.0", "name": "pluggy", "platform": "unix", "summary": "plugin and hook calling mechanisms for python", "version": "0.4.0"} \ No newline at end of file diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt new file mode 100644 index 0000000000..11bdb5c1f5 --- /dev/null +++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt @@ -0,0 +1 @@ +pluggy diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy.py b/lib/spack/external/_pytest/vendored_packages/pluggy.py new file mode 100644 index 0000000000..9c13932b36 --- /dev/null +++ b/lib/spack/external/_pytest/vendored_packages/pluggy.py @@ -0,0 +1,802 @@ +""" +PluginManager, basic initialization and tracing. + +pluggy is the cristallized core of plugin management as used +by some 150 plugins for pytest. + +Pluggy uses semantic versioning. Breaking changes are only foreseen for +Major releases (incremented X in "X.Y.Z"). If you want to use pluggy in +your project you should thus use a dependency restriction like +"pluggy>=0.1.0,<1.0" to avoid surprises. + +pluggy is concerned with hook specification, hook implementations and hook +calling. For any given hook specification a hook call invokes up to N implementations. +A hook implementation can influence its position and type of execution: +if attributed "tryfirst" or "trylast" it will be tried to execute +first or last. However, if attributed "hookwrapper" an implementation +can wrap all calls to non-hookwrapper implementations. A hookwrapper +can thus execute some code ahead and after the execution of other hooks. + +Hook specification is done by way of a regular python function where +both the function name and the names of all its arguments are significant. +Each hook implementation function is verified against the original specification +function, including the names of all its arguments. To allow for hook specifications +to evolve over the livetime of a project, hook implementations can +accept less arguments. One can thus add new arguments and semantics to +a hook specification by adding another argument typically without breaking +existing hook implementations. + +The chosen approach is meant to let a hook designer think carefuly about +which objects are needed by an extension writer. By contrast, subclass-based +extension mechanisms often expose a lot more state and behaviour than needed, +thus restricting future developments. + +Pluggy currently consists of functionality for: + +- a way to register new hook specifications. Without a hook + specification no hook calling can be performed. + +- a registry of plugins which contain hook implementation functions. It + is possible to register plugins for which a hook specification is not yet + known and validate all hooks when the system is in a more referentially + consistent state. Setting an "optionalhook" attribution to a hook + implementation will avoid PluginValidationError's if a specification + is missing. This allows to have optional integration between plugins. + +- a "hook" relay object from which you can launch 1:N calls to + registered hook implementation functions + +- a mechanism for ordering hook implementation functions + +- mechanisms for two different type of 1:N calls: "firstresult" for when + the call should stop when the first implementation returns a non-None result. + And the other (default) way of guaranteeing that all hook implementations + will be called and their non-None result collected. + +- mechanisms for "historic" extension points such that all newly + registered functions will receive all hook calls that happened + before their registration. + +- a mechanism for discovering plugin objects which are based on + setuptools based entry points. + +- a simple tracing mechanism, including tracing of plugin calls and + their arguments. + +""" +import sys +import inspect + +__version__ = '0.4.0' + +__all__ = ["PluginManager", "PluginValidationError", "HookCallError", + "HookspecMarker", "HookimplMarker"] + +_py3 = sys.version_info > (3, 0) + + +class HookspecMarker: + """ Decorator helper class for marking functions as hook specifications. + + You can instantiate it with a project_name to get a decorator. + Calling PluginManager.add_hookspecs later will discover all marked functions + if the PluginManager uses the same project_name. + """ + + def __init__(self, project_name): + self.project_name = project_name + + def __call__(self, function=None, firstresult=False, historic=False): + """ if passed a function, directly sets attributes on the function + which will make it discoverable to add_hookspecs(). If passed no + function, returns a decorator which can be applied to a function + later using the attributes supplied. + + If firstresult is True the 1:N hook call (N being the number of registered + hook implementation functions) will stop at I<=N when the I'th function + returns a non-None result. + + If historic is True calls to a hook will be memorized and replayed + on later registered plugins. + + """ + def setattr_hookspec_opts(func): + if historic and firstresult: + raise ValueError("cannot have a historic firstresult hook") + setattr(func, self.project_name + "_spec", + dict(firstresult=firstresult, historic=historic)) + return func + + if function is not None: + return setattr_hookspec_opts(function) + else: + return setattr_hookspec_opts + + +class HookimplMarker: + """ Decorator helper class for marking functions as hook implementations. + + You can instantiate with a project_name to get a decorator. + Calling PluginManager.register later will discover all marked functions + if the PluginManager uses the same project_name. + """ + def __init__(self, project_name): + self.project_name = project_name + + def __call__(self, function=None, hookwrapper=False, optionalhook=False, + tryfirst=False, trylast=False): + + """ if passed a function, directly sets attributes on the function + which will make it discoverable to register(). If passed no function, + returns a decorator which can be applied to a function later using + the attributes supplied. + + If optionalhook is True a missing matching hook specification will not result + in an error (by default it is an error if no matching spec is found). + + If tryfirst is True this hook implementation will run as early as possible + in the chain of N hook implementations for a specfication. + + If trylast is True this hook implementation will run as late as possible + in the chain of N hook implementations. + + If hookwrapper is True the hook implementations needs to execute exactly + one "yield". The code before the yield is run early before any non-hookwrapper + function is run. The code after the yield is run after all non-hookwrapper + function have run. The yield receives an ``_CallOutcome`` object representing + the exception or result outcome of the inner calls (including other hookwrapper + calls). + + """ + def setattr_hookimpl_opts(func): + setattr(func, self.project_name + "_impl", + dict(hookwrapper=hookwrapper, optionalhook=optionalhook, + tryfirst=tryfirst, trylast=trylast)) + return func + + if function is None: + return setattr_hookimpl_opts + else: + return setattr_hookimpl_opts(function) + + +def normalize_hookimpl_opts(opts): + opts.setdefault("tryfirst", False) + opts.setdefault("trylast", False) + opts.setdefault("hookwrapper", False) + opts.setdefault("optionalhook", False) + + +class _TagTracer: + def __init__(self): + self._tag2proc = {} + self.writer = None + self.indent = 0 + + def get(self, name): + return _TagTracerSub(self, (name,)) + + def format_message(self, tags, args): + if isinstance(args[-1], dict): + extra = args[-1] + args = args[:-1] + else: + extra = {} + + content = " ".join(map(str, args)) + indent = " " * self.indent + + lines = [ + "%s%s [%s]\n" % (indent, content, ":".join(tags)) + ] + + for name, value in extra.items(): + lines.append("%s %s: %s\n" % (indent, name, value)) + return lines + + def processmessage(self, tags, args): + if self.writer is not None and args: + lines = self.format_message(tags, args) + self.writer(''.join(lines)) + try: + self._tag2proc[tags](tags, args) + except KeyError: + pass + + def setwriter(self, writer): + self.writer = writer + + def setprocessor(self, tags, processor): + if isinstance(tags, str): + tags = tuple(tags.split(":")) + else: + assert isinstance(tags, tuple) + self._tag2proc[tags] = processor + + +class _TagTracerSub: + def __init__(self, root, tags): + self.root = root + self.tags = tags + + def __call__(self, *args): + self.root.processmessage(self.tags, args) + + def setmyprocessor(self, processor): + self.root.setprocessor(self.tags, processor) + + def get(self, name): + return self.__class__(self.root, self.tags + (name,)) + + +def _raise_wrapfail(wrap_controller, msg): + co = wrap_controller.gi_code + raise RuntimeError("wrap_controller at %r %s:%d %s" % + (co.co_name, co.co_filename, co.co_firstlineno, msg)) + + +def _wrapped_call(wrap_controller, func): + """ Wrap calling to a function with a generator which needs to yield + exactly once. The yield point will trigger calling the wrapped function + and return its _CallOutcome to the yield point. The generator then needs + to finish (raise StopIteration) in order for the wrapped call to complete. + """ + try: + next(wrap_controller) # first yield + except StopIteration: + _raise_wrapfail(wrap_controller, "did not yield") + call_outcome = _CallOutcome(func) + try: + wrap_controller.send(call_outcome) + _raise_wrapfail(wrap_controller, "has second yield") + except StopIteration: + pass + return call_outcome.get_result() + + +class _CallOutcome: + """ Outcome of a function call, either an exception or a proper result. + Calling the ``get_result`` method will return the result or reraise + the exception raised when the function was called. """ + excinfo = None + + def __init__(self, func): + try: + self.result = func() + except BaseException: + self.excinfo = sys.exc_info() + + def force_result(self, result): + self.result = result + self.excinfo = None + + def get_result(self): + if self.excinfo is None: + return self.result + else: + ex = self.excinfo + if _py3: + raise ex[1].with_traceback(ex[2]) + _reraise(*ex) # noqa + +if not _py3: + exec(""" +def _reraise(cls, val, tb): + raise cls, val, tb +""") + + +class _TracedHookExecution: + def __init__(self, pluginmanager, before, after): + self.pluginmanager = pluginmanager + self.before = before + self.after = after + self.oldcall = pluginmanager._inner_hookexec + assert not isinstance(self.oldcall, _TracedHookExecution) + self.pluginmanager._inner_hookexec = self + + def __call__(self, hook, hook_impls, kwargs): + self.before(hook.name, hook_impls, kwargs) + outcome = _CallOutcome(lambda: self.oldcall(hook, hook_impls, kwargs)) + self.after(outcome, hook.name, hook_impls, kwargs) + return outcome.get_result() + + def undo(self): + self.pluginmanager._inner_hookexec = self.oldcall + + +class PluginManager(object): + """ Core Pluginmanager class which manages registration + of plugin objects and 1:N hook calling. + + You can register new hooks by calling ``add_hookspec(module_or_class)``. + You can register plugin objects (which contain hooks) by calling + ``register(plugin)``. The Pluginmanager is initialized with a + prefix that is searched for in the names of the dict of registered + plugin objects. An optional excludefunc allows to blacklist names which + are not considered as hooks despite a matching prefix. + + For debugging purposes you can call ``enable_tracing()`` + which will subsequently send debug information to the trace helper. + """ + + def __init__(self, project_name, implprefix=None): + """ if implprefix is given implementation functions + will be recognized if their name matches the implprefix. """ + self.project_name = project_name + self._name2plugin = {} + self._plugin2hookcallers = {} + self._plugin_distinfo = [] + self.trace = _TagTracer().get("pluginmanage") + self.hook = _HookRelay(self.trace.root.get("hook")) + self._implprefix = implprefix + self._inner_hookexec = lambda hook, methods, kwargs: \ + _MultiCall(methods, kwargs, hook.spec_opts).execute() + + def _hookexec(self, hook, methods, kwargs): + # called from all hookcaller instances. + # enable_tracing will set its own wrapping function at self._inner_hookexec + return self._inner_hookexec(hook, methods, kwargs) + + def register(self, plugin, name=None): + """ Register a plugin and return its canonical name or None if the name + is blocked from registering. Raise a ValueError if the plugin is already + registered. """ + plugin_name = name or self.get_canonical_name(plugin) + + if plugin_name in self._name2plugin or plugin in self._plugin2hookcallers: + if self._name2plugin.get(plugin_name, -1) is None: + return # blocked plugin, return None to indicate no registration + raise ValueError("Plugin already registered: %s=%s\n%s" % + (plugin_name, plugin, self._name2plugin)) + + # XXX if an error happens we should make sure no state has been + # changed at point of return + self._name2plugin[plugin_name] = plugin + + # register matching hook implementations of the plugin + self._plugin2hookcallers[plugin] = hookcallers = [] + for name in dir(plugin): + hookimpl_opts = self.parse_hookimpl_opts(plugin, name) + if hookimpl_opts is not None: + normalize_hookimpl_opts(hookimpl_opts) + method = getattr(plugin, name) + hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts) + hook = getattr(self.hook, name, None) + if hook is None: + hook = _HookCaller(name, self._hookexec) + setattr(self.hook, name, hook) + elif hook.has_spec(): + self._verify_hook(hook, hookimpl) + hook._maybe_apply_history(hookimpl) + hook._add_hookimpl(hookimpl) + hookcallers.append(hook) + return plugin_name + + def parse_hookimpl_opts(self, plugin, name): + method = getattr(plugin, name) + try: + res = getattr(method, self.project_name + "_impl", None) + except Exception: + res = {} + if res is not None and not isinstance(res, dict): + # false positive + res = None + elif res is None and self._implprefix and name.startswith(self._implprefix): + res = {} + return res + + def unregister(self, plugin=None, name=None): + """ unregister a plugin object and all its contained hook implementations + from internal data structures. """ + if name is None: + assert plugin is not None, "one of name or plugin needs to be specified" + name = self.get_name(plugin) + + if plugin is None: + plugin = self.get_plugin(name) + + # if self._name2plugin[name] == None registration was blocked: ignore + if self._name2plugin.get(name): + del self._name2plugin[name] + + for hookcaller in self._plugin2hookcallers.pop(plugin, []): + hookcaller._remove_plugin(plugin) + + return plugin + + def set_blocked(self, name): + """ block registrations of the given name, unregister if already registered. """ + self.unregister(name=name) + self._name2plugin[name] = None + + def is_blocked(self, name): + """ return True if the name blogs registering plugins of that name. """ + return name in self._name2plugin and self._name2plugin[name] is None + + def add_hookspecs(self, module_or_class): + """ add new hook specifications defined in the given module_or_class. + Functions are recognized if they have been decorated accordingly. """ + names = [] + for name in dir(module_or_class): + spec_opts = self.parse_hookspec_opts(module_or_class, name) + if spec_opts is not None: + hc = getattr(self.hook, name, None) + if hc is None: + hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts) + setattr(self.hook, name, hc) + else: + # plugins registered this hook without knowing the spec + hc.set_specification(module_or_class, spec_opts) + for hookfunction in (hc._wrappers + hc._nonwrappers): + self._verify_hook(hc, hookfunction) + names.append(name) + + if not names: + raise ValueError("did not find any %r hooks in %r" % + (self.project_name, module_or_class)) + + def parse_hookspec_opts(self, module_or_class, name): + method = getattr(module_or_class, name) + return getattr(method, self.project_name + "_spec", None) + + def get_plugins(self): + """ return the set of registered plugins. """ + return set(self._plugin2hookcallers) + + def is_registered(self, plugin): + """ Return True if the plugin is already registered. """ + return plugin in self._plugin2hookcallers + + def get_canonical_name(self, plugin): + """ Return canonical name for a plugin object. Note that a plugin + may be registered under a different name which was specified + by the caller of register(plugin, name). To obtain the name + of an registered plugin use ``get_name(plugin)`` instead.""" + return getattr(plugin, "__name__", None) or str(id(plugin)) + + def get_plugin(self, name): + """ Return a plugin or None for the given name. """ + return self._name2plugin.get(name) + + def has_plugin(self, name): + """ Return True if a plugin with the given name is registered. """ + return self.get_plugin(name) is not None + + def get_name(self, plugin): + """ Return name for registered plugin or None if not registered. """ + for name, val in self._name2plugin.items(): + if plugin == val: + return name + + def _verify_hook(self, hook, hookimpl): + if hook.is_historic() and hookimpl.hookwrapper: + raise PluginValidationError( + "Plugin %r\nhook %r\nhistoric incompatible to hookwrapper" % + (hookimpl.plugin_name, hook.name)) + + for arg in hookimpl.argnames: + if arg not in hook.argnames: + raise PluginValidationError( + "Plugin %r\nhook %r\nargument %r not available\n" + "plugin definition: %s\n" + "available hookargs: %s" % + (hookimpl.plugin_name, hook.name, arg, + _formatdef(hookimpl.function), ", ".join(hook.argnames))) + + def check_pending(self): + """ Verify that all hooks which have not been verified against + a hook specification are optional, otherwise raise PluginValidationError""" + for name in self.hook.__dict__: + if name[0] != "_": + hook = getattr(self.hook, name) + if not hook.has_spec(): + for hookimpl in (hook._wrappers + hook._nonwrappers): + if not hookimpl.optionalhook: + raise PluginValidationError( + "unknown hook %r in plugin %r" % + (name, hookimpl.plugin)) + + def load_setuptools_entrypoints(self, entrypoint_name): + """ Load modules from querying the specified setuptools entrypoint name. + Return the number of loaded plugins. """ + from pkg_resources import (iter_entry_points, DistributionNotFound, + VersionConflict) + for ep in iter_entry_points(entrypoint_name): + # is the plugin registered or blocked? + if self.get_plugin(ep.name) or self.is_blocked(ep.name): + continue + try: + plugin = ep.load() + except DistributionNotFound: + continue + except VersionConflict as e: + raise PluginValidationError( + "Plugin %r could not be loaded: %s!" % (ep.name, e)) + self.register(plugin, name=ep.name) + self._plugin_distinfo.append((plugin, ep.dist)) + return len(self._plugin_distinfo) + + def list_plugin_distinfo(self): + """ return list of distinfo/plugin tuples for all setuptools registered + plugins. """ + return list(self._plugin_distinfo) + + def list_name_plugin(self): + """ return list of name/plugin pairs. """ + return list(self._name2plugin.items()) + + def get_hookcallers(self, plugin): + """ get all hook callers for the specified plugin. """ + return self._plugin2hookcallers.get(plugin) + + def add_hookcall_monitoring(self, before, after): + """ add before/after tracing functions for all hooks + and return an undo function which, when called, + will remove the added tracers. + + ``before(hook_name, hook_impls, kwargs)`` will be called ahead + of all hook calls and receive a hookcaller instance, a list + of HookImpl instances and the keyword arguments for the hook call. + + ``after(outcome, hook_name, hook_impls, kwargs)`` receives the + same arguments as ``before`` but also a :py:class:`_CallOutcome`` object + which represents the result of the overall hook call. + """ + return _TracedHookExecution(self, before, after).undo + + def enable_tracing(self): + """ enable tracing of hook calls and return an undo function. """ + hooktrace = self.hook._trace + + def before(hook_name, methods, kwargs): + hooktrace.root.indent += 1 + hooktrace(hook_name, kwargs) + + def after(outcome, hook_name, methods, kwargs): + if outcome.excinfo is None: + hooktrace("finish", hook_name, "-->", outcome.result) + hooktrace.root.indent -= 1 + + return self.add_hookcall_monitoring(before, after) + + def subset_hook_caller(self, name, remove_plugins): + """ Return a new _HookCaller instance for the named method + which manages calls to all registered plugins except the + ones from remove_plugins. """ + orig = getattr(self.hook, name) + plugins_to_remove = [plug for plug in remove_plugins if hasattr(plug, name)] + if plugins_to_remove: + hc = _HookCaller(orig.name, orig._hookexec, orig._specmodule_or_class, + orig.spec_opts) + for hookimpl in (orig._wrappers + orig._nonwrappers): + plugin = hookimpl.plugin + if plugin not in plugins_to_remove: + hc._add_hookimpl(hookimpl) + # we also keep track of this hook caller so it + # gets properly removed on plugin unregistration + self._plugin2hookcallers.setdefault(plugin, []).append(hc) + return hc + return orig + + +class _MultiCall: + """ execute a call into multiple python functions/methods. """ + + # XXX note that the __multicall__ argument is supported only + # for pytest compatibility reasons. It was never officially + # supported there and is explicitely deprecated since 2.8 + # so we can remove it soon, allowing to avoid the below recursion + # in execute() and simplify/speed up the execute loop. + + def __init__(self, hook_impls, kwargs, specopts={}): + self.hook_impls = hook_impls + self.kwargs = kwargs + self.kwargs["__multicall__"] = self + self.specopts = specopts + + def execute(self): + all_kwargs = self.kwargs + self.results = results = [] + firstresult = self.specopts.get("firstresult") + + while self.hook_impls: + hook_impl = self.hook_impls.pop() + try: + args = [all_kwargs[argname] for argname in hook_impl.argnames] + except KeyError: + for argname in hook_impl.argnames: + if argname not in all_kwargs: + raise HookCallError( + "hook call must provide argument %r" % (argname,)) + if hook_impl.hookwrapper: + return _wrapped_call(hook_impl.function(*args), self.execute) + res = hook_impl.function(*args) + if res is not None: + if firstresult: + return res + results.append(res) + + if not firstresult: + return results + + def __repr__(self): + status = "%d meths" % (len(self.hook_impls),) + if hasattr(self, "results"): + status = ("%d results, " % len(self.results)) + status + return "<_MultiCall %s, kwargs=%r>" % (status, self.kwargs) + + +def varnames(func, startindex=None): + """ return argument name tuple for a function, method, class or callable. + + In case of a class, its "__init__" method is considered. + For methods the "self" parameter is not included unless you are passing + an unbound method with Python3 (which has no supports for unbound methods) + """ + cache = getattr(func, "__dict__", {}) + try: + return cache["_varnames"] + except KeyError: + pass + if inspect.isclass(func): + try: + func = func.__init__ + except AttributeError: + return () + startindex = 1 + else: + if not inspect.isfunction(func) and not inspect.ismethod(func): + try: + func = getattr(func, '__call__', func) + except Exception: + return () + if startindex is None: + startindex = int(inspect.ismethod(func)) + + try: + rawcode = func.__code__ + except AttributeError: + return () + try: + x = rawcode.co_varnames[startindex:rawcode.co_argcount] + except AttributeError: + x = () + else: + defaults = func.__defaults__ + if defaults: + x = x[:-len(defaults)] + try: + cache["_varnames"] = x + except TypeError: + pass + return x + + +class _HookRelay: + """ hook holder object for performing 1:N hook calls where N is the number + of registered plugins. + + """ + + def __init__(self, trace): + self._trace = trace + + +class _HookCaller(object): + def __init__(self, name, hook_execute, specmodule_or_class=None, spec_opts=None): + self.name = name + self._wrappers = [] + self._nonwrappers = [] + self._hookexec = hook_execute + if specmodule_or_class is not None: + assert spec_opts is not None + self.set_specification(specmodule_or_class, spec_opts) + + def has_spec(self): + return hasattr(self, "_specmodule_or_class") + + def set_specification(self, specmodule_or_class, spec_opts): + assert not self.has_spec() + self._specmodule_or_class = specmodule_or_class + specfunc = getattr(specmodule_or_class, self.name) + argnames = varnames(specfunc, startindex=inspect.isclass(specmodule_or_class)) + assert "self" not in argnames # sanity check + self.argnames = ["__multicall__"] + list(argnames) + self.spec_opts = spec_opts + if spec_opts.get("historic"): + self._call_history = [] + + def is_historic(self): + return hasattr(self, "_call_history") + + def _remove_plugin(self, plugin): + def remove(wrappers): + for i, method in enumerate(wrappers): + if method.plugin == plugin: + del wrappers[i] + return True + if remove(self._wrappers) is None: + if remove(self._nonwrappers) is None: + raise ValueError("plugin %r not found" % (plugin,)) + + def _add_hookimpl(self, hookimpl): + if hookimpl.hookwrapper: + methods = self._wrappers + else: + methods = self._nonwrappers + + if hookimpl.trylast: + methods.insert(0, hookimpl) + elif hookimpl.tryfirst: + methods.append(hookimpl) + else: + # find last non-tryfirst method + i = len(methods) - 1 + while i >= 0 and methods[i].tryfirst: + i -= 1 + methods.insert(i + 1, hookimpl) + + def __repr__(self): + return "<_HookCaller %r>" % (self.name,) + + def __call__(self, **kwargs): + assert not self.is_historic() + return self._hookexec(self, self._nonwrappers + self._wrappers, kwargs) + + def call_historic(self, proc=None, kwargs=None): + self._call_history.append((kwargs or {}, proc)) + # historizing hooks don't return results + self._hookexec(self, self._nonwrappers + self._wrappers, kwargs) + + def call_extra(self, methods, kwargs): + """ Call the hook with some additional temporarily participating + methods using the specified kwargs as call parameters. """ + old = list(self._nonwrappers), list(self._wrappers) + for method in methods: + opts = dict(hookwrapper=False, trylast=False, tryfirst=False) + hookimpl = HookImpl(None, "", method, opts) + self._add_hookimpl(hookimpl) + try: + return self(**kwargs) + finally: + self._nonwrappers, self._wrappers = old + + def _maybe_apply_history(self, method): + if self.is_historic(): + for kwargs, proc in self._call_history: + res = self._hookexec(self, [method], kwargs) + if res and proc is not None: + proc(res[0]) + + +class HookImpl: + def __init__(self, plugin, plugin_name, function, hook_impl_opts): + self.function = function + self.argnames = varnames(self.function) + self.plugin = plugin + self.opts = hook_impl_opts + self.plugin_name = plugin_name + self.__dict__.update(hook_impl_opts) + + +class PluginValidationError(Exception): + """ plugin failed validation. """ + + +class HookCallError(Exception): + """ Hook was called wrongly. """ + + +if hasattr(inspect, 'signature'): + def _formatdef(func): + return "%s%s" % ( + func.__name__, + str(inspect.signature(func)) + ) +else: + def _formatdef(func): + return "%s%s" % ( + func.__name__, + inspect.formatargspec(*inspect.getargspec(func)) + ) diff --git a/lib/spack/external/nose/LICENSE b/lib/spack/external/nose/LICENSE deleted file mode 100644 index 9f6e791624..0000000000 --- a/lib/spack/external/nose/LICENSE +++ /dev/null @@ -1,502 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 2.1, February 1999 - - Copyright (C) 1991, 1999 Free Software Foundation, Inc. - 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - -[This is the first released version of the Lesser GPL. It also counts - as the successor of the GNU Library Public License, version 2, hence - the version number 2.1.] - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -Licenses are intended to guarantee your freedom to share and change -free software--to make sure the software is free for all its users. - - This license, the Lesser General Public License, applies to some -specially designated software packages--typically libraries--of the -Free Software Foundation and other authors who decide to use it. You -can use it too, but we suggest you first think carefully about whether -this license or the ordinary General Public License is the better -strategy to use in any particular case, based on the explanations below. - - When we speak of free software, we are referring to freedom of use, -not price. Our General Public Licenses are designed to make sure that -you have the freedom to distribute copies of free software (and charge -for this service if you wish); that you receive source code or can get -it if you want it; that you can change the software and use pieces of -it in new free programs; and that you are informed that you can do -these things. - - To protect your rights, we need to make restrictions that forbid -distributors to deny you these rights or to ask you to surrender these -rights. These restrictions translate to certain responsibilities for -you if you distribute copies of the library or if you modify it. - - For example, if you distribute copies of the library, whether gratis -or for a fee, you must give the recipients all the rights that we gave -you. You must make sure that they, too, receive or can get the source -code. If you link other code with the library, you must provide -complete object files to the recipients, so that they can relink them -with the library after making changes to the library and recompiling -it. And you must show them these terms so they know their rights. - - We protect your rights with a two-step method: (1) we copyright the -library, and (2) we offer you this license, which gives you legal -permission to copy, distribute and/or modify the library. - - To protect each distributor, we want to make it very clear that -there is no warranty for the free library. Also, if the library is -modified by someone else and passed on, the recipients should know -that what they have is not the original version, so that the original -author's reputation will not be affected by problems that might be -introduced by others. - - Finally, software patents pose a constant threat to the existence of -any free program. We wish to make sure that a company cannot -effectively restrict the users of a free program by obtaining a -restrictive license from a patent holder. Therefore, we insist that -any patent license obtained for a version of the library must be -consistent with the full freedom of use specified in this license. - - Most GNU software, including some libraries, is covered by the -ordinary GNU General Public License. This license, the GNU Lesser -General Public License, applies to certain designated libraries, and -is quite different from the ordinary General Public License. We use -this license for certain libraries in order to permit linking those -libraries into non-free programs. - - When a program is linked with a library, whether statically or using -a shared library, the combination of the two is legally speaking a -combined work, a derivative of the original library. The ordinary -General Public License therefore permits such linking only if the -entire combination fits its criteria of freedom. The Lesser General -Public License permits more lax criteria for linking other code with -the library. - - We call this license the "Lesser" General Public License because it -does Less to protect the user's freedom than the ordinary General -Public License. It also provides other free software developers Less -of an advantage over competing non-free programs. These disadvantages -are the reason we use the ordinary General Public License for many -libraries. However, the Lesser license provides advantages in certain -special circumstances. - - For example, on rare occasions, there may be a special need to -encourage the widest possible use of a certain library, so that it becomes -a de-facto standard. To achieve this, non-free programs must be -allowed to use the library. A more frequent case is that a free -library does the same job as widely used non-free libraries. In this -case, there is little to gain by limiting the free library to free -software only, so we use the Lesser General Public License. - - In other cases, permission to use a particular library in non-free -programs enables a greater number of people to use a large body of -free software. For example, permission to use the GNU C Library in -non-free programs enables many more people to use the whole GNU -operating system, as well as its variant, the GNU/Linux operating -system. - - Although the Lesser General Public License is Less protective of the -users' freedom, it does ensure that the user of a program that is -linked with the Library has the freedom and the wherewithal to run -that program using a modified version of the Library. - - The precise terms and conditions for copying, distribution and -modification follow. Pay close attention to the difference between a -"work based on the library" and a "work that uses the library". The -former contains code derived from the library, whereas the latter must -be combined with the library in order to run. - - GNU LESSER GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License Agreement applies to any software library or other -program which contains a notice placed by the copyright holder or -other authorized party saying it may be distributed under the terms of -this Lesser General Public License (also called "this License"). -Each licensee is addressed as "you". - - A "library" means a collection of software functions and/or data -prepared so as to be conveniently linked with application programs -(which use some of those functions and data) to form executables. - - The "Library", below, refers to any such software library or work -which has been distributed under these terms. A "work based on the -Library" means either the Library or any derivative work under -copyright law: that is to say, a work containing the Library or a -portion of it, either verbatim or with modifications and/or translated -straightforwardly into another language. (Hereinafter, translation is -included without limitation in the term "modification".) - - "Source code" for a work means the preferred form of the work for -making modifications to it. For a library, complete source code means -all the source code for all modules it contains, plus any associated -interface definition files, plus the scripts used to control compilation -and installation of the library. - - Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running a program using the Library is not restricted, and output from -such a program is covered only if its contents constitute a work based -on the Library (independent of the use of the Library in a tool for -writing it). Whether that is true depends on what the Library does -and what the program that uses the Library does. - - 1. You may copy and distribute verbatim copies of the Library's -complete source code as you receive it, in any medium, provided that -you conspicuously and appropriately publish on each copy an -appropriate copyright notice and disclaimer of warranty; keep intact -all the notices that refer to this License and to the absence of any -warranty; and distribute a copy of this License along with the -Library. - - You may charge a fee for the physical act of transferring a copy, -and you may at your option offer warranty protection in exchange for a -fee. - - 2. You may modify your copy or copies of the Library or any portion -of it, thus forming a work based on the Library, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) The modified work must itself be a software library. - - b) You must cause the files modified to carry prominent notices - stating that you changed the files and the date of any change. - - c) You must cause the whole of the work to be licensed at no - charge to all third parties under the terms of this License. - - d) If a facility in the modified Library refers to a function or a - table of data to be supplied by an application program that uses - the facility, other than as an argument passed when the facility - is invoked, then you must make a good faith effort to ensure that, - in the event an application does not supply such function or - table, the facility still operates, and performs whatever part of - its purpose remains meaningful. - - (For example, a function in a library to compute square roots has - a purpose that is entirely well-defined independent of the - application. Therefore, Subsection 2d requires that any - application-supplied function or table used by this function must - be optional: if the application does not supply it, the square - root function must still compute square roots.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Library, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Library, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote -it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Library. - -In addition, mere aggregation of another work not based on the Library -with the Library (or with a work based on the Library) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may opt to apply the terms of the ordinary GNU General Public -License instead of this License to a given copy of the Library. To do -this, you must alter all the notices that refer to this License, so -that they refer to the ordinary GNU General Public License, version 2, -instead of to this License. (If a newer version than version 2 of the -ordinary GNU General Public License has appeared, then you can specify -that version instead if you wish.) Do not make any other change in -these notices. - - Once this change is made in a given copy, it is irreversible for -that copy, so the ordinary GNU General Public License applies to all -subsequent copies and derivative works made from that copy. - - This option is useful when you wish to copy part of the code of -the Library into a program that is not a library. - - 4. You may copy and distribute the Library (or a portion or -derivative of it, under Section 2) in object code or executable form -under the terms of Sections 1 and 2 above provided that you accompany -it with the complete corresponding machine-readable source code, which -must be distributed under the terms of Sections 1 and 2 above on a -medium customarily used for software interchange. - - If distribution of object code is made by offering access to copy -from a designated place, then offering equivalent access to copy the -source code from the same place satisfies the requirement to -distribute the source code, even though third parties are not -compelled to copy the source along with the object code. - - 5. A program that contains no derivative of any portion of the -Library, but is designed to work with the Library by being compiled or -linked with it, is called a "work that uses the Library". Such a -work, in isolation, is not a derivative work of the Library, and -therefore falls outside the scope of this License. - - However, linking a "work that uses the Library" with the Library -creates an executable that is a derivative of the Library (because it -contains portions of the Library), rather than a "work that uses the -library". The executable is therefore covered by this License. -Section 6 states terms for distribution of such executables. - - When a "work that uses the Library" uses material from a header file -that is part of the Library, the object code for the work may be a -derivative work of the Library even though the source code is not. -Whether this is true is especially significant if the work can be -linked without the Library, or if the work is itself a library. The -threshold for this to be true is not precisely defined by law. - - If such an object file uses only numerical parameters, data -structure layouts and accessors, and small macros and small inline -functions (ten lines or less in length), then the use of the object -file is unrestricted, regardless of whether it is legally a derivative -work. (Executables containing this object code plus portions of the -Library will still fall under Section 6.) - - Otherwise, if the work is a derivative of the Library, you may -distribute the object code for the work under the terms of Section 6. -Any executables containing that work also fall under Section 6, -whether or not they are linked directly with the Library itself. - - 6. As an exception to the Sections above, you may also combine or -link a "work that uses the Library" with the Library to produce a -work containing portions of the Library, and distribute that work -under terms of your choice, provided that the terms permit -modification of the work for the customer's own use and reverse -engineering for debugging such modifications. - - You must give prominent notice with each copy of the work that the -Library is used in it and that the Library and its use are covered by -this License. You must supply a copy of this License. If the work -during execution displays copyright notices, you must include the -copyright notice for the Library among them, as well as a reference -directing the user to the copy of this License. Also, you must do one -of these things: - - a) Accompany the work with the complete corresponding - machine-readable source code for the Library including whatever - changes were used in the work (which must be distributed under - Sections 1 and 2 above); and, if the work is an executable linked - with the Library, with the complete machine-readable "work that - uses the Library", as object code and/or source code, so that the - user can modify the Library and then relink to produce a modified - executable containing the modified Library. (It is understood - that the user who changes the contents of definitions files in the - Library will not necessarily be able to recompile the application - to use the modified definitions.) - - b) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (1) uses at run time a - copy of the library already present on the user's computer system, - rather than copying library functions into the executable, and (2) - will operate properly with a modified version of the library, if - the user installs one, as long as the modified version is - interface-compatible with the version that the work was made with. - - c) Accompany the work with a written offer, valid for at - least three years, to give the same user the materials - specified in Subsection 6a, above, for a charge no more - than the cost of performing this distribution. - - d) If distribution of the work is made by offering access to copy - from a designated place, offer equivalent access to copy the above - specified materials from the same place. - - e) Verify that the user has already received a copy of these - materials or that you have already sent this user a copy. - - For an executable, the required form of the "work that uses the -Library" must include any data and utility programs needed for -reproducing the executable from it. However, as a special exception, -the materials to be distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies -the executable. - - It may happen that this requirement contradicts the license -restrictions of other proprietary libraries that do not normally -accompany the operating system. Such a contradiction means you cannot -use both them and the Library together in an executable that you -distribute. - - 7. You may place library facilities that are a work based on the -Library side-by-side in a single library together with other library -facilities not covered by this License, and distribute such a combined -library, provided that the separate distribution of the work based on -the Library and of the other library facilities is otherwise -permitted, and provided that you do these two things: - - a) Accompany the combined library with a copy of the same work - based on the Library, uncombined with any other library - facilities. This must be distributed under the terms of the - Sections above. - - b) Give prominent notice with the combined library of the fact - that part of it is a work based on the Library, and explaining - where to find the accompanying uncombined form of the same work. - - 8. You may not copy, modify, sublicense, link with, or distribute -the Library except as expressly provided under this License. Any -attempt otherwise to copy, modify, sublicense, link with, or -distribute the Library is void, and will automatically terminate your -rights under this License. However, parties who have received copies, -or rights, from you under this License will not have their licenses -terminated so long as such parties remain in full compliance. - - 9. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Library or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Library (or any work based on the -Library), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Library or works based on it. - - 10. Each time you redistribute the Library (or any work based on the -Library), the recipient automatically receives a license from the -original licensor to copy, distribute, link with or modify the Library -subject to these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties with -this License. - - 11. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Library at all. For example, if a patent -license would not permit royalty-free redistribution of the Library by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Library. - -If any portion of this section is held invalid or unenforceable under any -particular circumstance, the balance of the section is intended to apply, -and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 12. If the distribution and/or use of the Library is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Library under this License may add -an explicit geographical distribution limitation excluding those countries, -so that distribution is permitted only in or among countries not thus -excluded. In such case, this License incorporates the limitation as if -written in the body of this License. - - 13. The Free Software Foundation may publish revised and/or new -versions of the Lesser General Public License from time to time. -Such new versions will be similar in spirit to the present version, -but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Library -specifies a version number of this License which applies to it and -"any later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Library does not specify a -license version number, you may choose any version ever published by -the Free Software Foundation. - - 14. If you wish to incorporate parts of the Library into other free -programs whose distribution conditions are incompatible with these, -write to the author to ask for permission. For software which is -copyrighted by the Free Software Foundation, write to the Free -Software Foundation; we sometimes make exceptions for this. Our -decision will be guided by the two goals of preserving the free status -of all derivatives of our free software and of promoting the sharing -and reuse of software generally. - - NO WARRANTY - - 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY -KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE -LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME -THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU -FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR -CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE -LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING -RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A -FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF -SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH -DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Libraries - - If you develop a new library, and you want it to be of the greatest -possible use to the public, we recommend making it free software that -everyone can redistribute and change. You can do so by permitting -redistribution under these terms (or, alternatively, under the terms of the -ordinary General Public License). - - To apply these terms, attach the following notices to the library. It is -safest to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -Also add information on how to contact you by electronic and paper mail. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the library, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - library `Frob' (a library for tweaking knobs) written by James Random Hacker. - - , 1 April 1990 - Ty Coon, President of Vice - -That's all there is to it! diff --git a/lib/spack/external/nose/__init__.py b/lib/spack/external/nose/__init__.py deleted file mode 100644 index 1ae1362b7a..0000000000 --- a/lib/spack/external/nose/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from nose.core import collector, main, run, run_exit, runmodule -# backwards compatibility -from nose.exc import SkipTest, DeprecatedTest -from nose.tools import with_setup - -__author__ = 'Jason Pellerin' -__versioninfo__ = (1, 3, 7) -__version__ = '.'.join(map(str, __versioninfo__)) - -__all__ = [ - 'main', 'run', 'run_exit', 'runmodule', 'with_setup', - 'SkipTest', 'DeprecatedTest', 'collector' - ] - - diff --git a/lib/spack/external/nose/__main__.py b/lib/spack/external/nose/__main__.py deleted file mode 100644 index b402d9df12..0000000000 --- a/lib/spack/external/nose/__main__.py +++ /dev/null @@ -1,8 +0,0 @@ -import sys - -from nose.core import run_exit - -if sys.argv[0].endswith('__main__.py'): - sys.argv[0] = '%s -m nose' % sys.executable - -run_exit() diff --git a/lib/spack/external/nose/case.py b/lib/spack/external/nose/case.py deleted file mode 100644 index cffa4ab4c9..0000000000 --- a/lib/spack/external/nose/case.py +++ /dev/null @@ -1,397 +0,0 @@ -"""nose unittest.TestCase subclasses. It is not necessary to subclass these -classes when writing tests; they are used internally by nose.loader.TestLoader -to create test cases from test functions and methods in test classes. -""" -import logging -import sys -import unittest -from inspect import isfunction -from nose.config import Config -from nose.failure import Failure # for backwards compatibility -from nose.util import resolve_name, test_address, try_run - -log = logging.getLogger(__name__) - - -__all__ = ['Test'] - - -class Test(unittest.TestCase): - """The universal test case wrapper. - - When a plugin sees a test, it will always see an instance of this - class. To access the actual test case that will be run, access the - test property of the nose.case.Test instance. - """ - __test__ = False # do not collect - def __init__(self, test, config=None, resultProxy=None): - # sanity check - if not callable(test): - raise TypeError("nose.case.Test called with argument %r that " - "is not callable. A callable is required." - % test) - self.test = test - if config is None: - config = Config() - self.config = config - self.tbinfo = None - self.capturedOutput = None - self.resultProxy = resultProxy - self.plugins = config.plugins - self.passed = None - unittest.TestCase.__init__(self) - - def __call__(self, *arg, **kwarg): - return self.run(*arg, **kwarg) - - def __str__(self): - name = self.plugins.testName(self) - if name is not None: - return name - return str(self.test) - - def __repr__(self): - return "Test(%r)" % self.test - - def afterTest(self, result): - """Called after test is complete (after result.stopTest) - """ - try: - afterTest = result.afterTest - except AttributeError: - pass - else: - afterTest(self.test) - - def beforeTest(self, result): - """Called before test is run (before result.startTest) - """ - try: - beforeTest = result.beforeTest - except AttributeError: - pass - else: - beforeTest(self.test) - - def exc_info(self): - """Extract exception info. - """ - exc, exv, tb = sys.exc_info() - return (exc, exv, tb) - - def id(self): - """Get a short(er) description of the test - """ - return self.test.id() - - def address(self): - """Return a round-trip name for this test, a name that can be - fed back as input to loadTestByName and (assuming the same - plugin configuration) result in the loading of this test. - """ - if hasattr(self.test, 'address'): - return self.test.address() - else: - # not a nose case - return test_address(self.test) - - def _context(self): - try: - return self.test.context - except AttributeError: - pass - try: - return self.test.__class__ - except AttributeError: - pass - try: - return resolve_name(self.test.__module__) - except AttributeError: - pass - return None - context = property(_context, None, None, - """Get the context object of this test (if any).""") - - def run(self, result): - """Modified run for the test wrapper. - - From here we don't call result.startTest or stopTest or - addSuccess. The wrapper calls addError/addFailure only if its - own setup or teardown fails, or running the wrapped test fails - (eg, if the wrapped "test" is not callable). - - Two additional methods are called, beforeTest and - afterTest. These give plugins a chance to modify the wrapped - test before it is called and do cleanup after it is - called. They are called unconditionally. - """ - if self.resultProxy: - result = self.resultProxy(result, self) - try: - try: - self.beforeTest(result) - self.runTest(result) - except KeyboardInterrupt: - raise - except: - err = sys.exc_info() - result.addError(self, err) - finally: - self.afterTest(result) - - def runTest(self, result): - """Run the test. Plugins may alter the test by returning a - value from prepareTestCase. The value must be callable and - must accept one argument, the result instance. - """ - test = self.test - plug_test = self.config.plugins.prepareTestCase(self) - if plug_test is not None: - test = plug_test - test(result) - - def shortDescription(self): - desc = self.plugins.describeTest(self) - if desc is not None: - return desc - # work around bug in unittest.TestCase.shortDescription - # with multiline docstrings. - test = self.test - try: - test._testMethodDoc = test._testMethodDoc.strip()# 2.5 - except AttributeError: - try: - # 2.4 and earlier - test._TestCase__testMethodDoc = \ - test._TestCase__testMethodDoc.strip() - except AttributeError: - pass - # 2.7 compat: shortDescription() always returns something - # which is a change from 2.6 and below, and breaks the - # testName plugin call. - try: - desc = self.test.shortDescription() - except Exception: - # this is probably caused by a problem in test.__str__() and is - # only triggered by python 3.1's unittest! - pass - try: - if desc == str(self.test): - return - except Exception: - # If str() triggers an exception then ignore it. - # see issue 422 - pass - return desc - - -class TestBase(unittest.TestCase): - """Common functionality for FunctionTestCase and MethodTestCase. - """ - __test__ = False # do not collect - - def id(self): - return str(self) - - def runTest(self): - self.test(*self.arg) - - def shortDescription(self): - if hasattr(self.test, 'description'): - return self.test.description - func, arg = self._descriptors() - doc = getattr(func, '__doc__', None) - if not doc: - doc = str(self) - return doc.strip().split("\n")[0].strip() - - -class FunctionTestCase(TestBase): - """TestCase wrapper for test functions. - - Don't use this class directly; it is used internally in nose to - create test cases for test functions. - """ - __test__ = False # do not collect - - def __init__(self, test, setUp=None, tearDown=None, arg=tuple(), - descriptor=None): - """Initialize the MethodTestCase. - - Required argument: - - * test -- the test function to call. - - Optional arguments: - - * setUp -- function to run at setup. - - * tearDown -- function to run at teardown. - - * arg -- arguments to pass to the test function. This is to support - generator functions that yield arguments. - - * descriptor -- the function, other than the test, that should be used - to construct the test name. This is to support generator functions. - """ - - self.test = test - self.setUpFunc = setUp - self.tearDownFunc = tearDown - self.arg = arg - self.descriptor = descriptor - TestBase.__init__(self) - - def address(self): - """Return a round-trip name for this test, a name that can be - fed back as input to loadTestByName and (assuming the same - plugin configuration) result in the loading of this test. - """ - if self.descriptor is not None: - return test_address(self.descriptor) - else: - return test_address(self.test) - - def _context(self): - return resolve_name(self.test.__module__) - context = property(_context, None, None, - """Get context (module) of this test""") - - def setUp(self): - """Run any setup function attached to the test function - """ - if self.setUpFunc: - self.setUpFunc() - else: - names = ('setup', 'setUp', 'setUpFunc') - try_run(self.test, names) - - def tearDown(self): - """Run any teardown function attached to the test function - """ - if self.tearDownFunc: - self.tearDownFunc() - else: - names = ('teardown', 'tearDown', 'tearDownFunc') - try_run(self.test, names) - - def __str__(self): - func, arg = self._descriptors() - if hasattr(func, 'compat_func_name'): - name = func.compat_func_name - else: - name = func.__name__ - name = "%s.%s" % (func.__module__, name) - if arg: - name = "%s%s" % (name, arg) - # FIXME need to include the full dir path to disambiguate - # in cases where test module of the same name was seen in - # another directory (old fromDirectory) - return name - __repr__ = __str__ - - def _descriptors(self): - """Get the descriptors of the test function: the function and - arguments that will be used to construct the test name. In - most cases, this is the function itself and no arguments. For - tests generated by generator functions, the original - (generator) function and args passed to the generated function - are returned. - """ - if self.descriptor: - return self.descriptor, self.arg - else: - return self.test, self.arg - - -class MethodTestCase(TestBase): - """Test case wrapper for test methods. - - Don't use this class directly; it is used internally in nose to - create test cases for test methods. - """ - __test__ = False # do not collect - - def __init__(self, method, test=None, arg=tuple(), descriptor=None): - """Initialize the MethodTestCase. - - Required argument: - - * method -- the method to call, may be bound or unbound. In either - case, a new instance of the method's class will be instantiated to - make the call. Note: In Python 3.x, if using an unbound method, you - must wrap it using pyversion.unbound_method. - - Optional arguments: - - * test -- the test function to call. If this is passed, it will be - called instead of getting a new bound method of the same name as the - desired method from the test instance. This is to support generator - methods that yield inline functions. - - * arg -- arguments to pass to the test function. This is to support - generator methods that yield arguments. - - * descriptor -- the function, other than the test, that should be used - to construct the test name. This is to support generator methods. - """ - self.method = method - self.test = test - self.arg = arg - self.descriptor = descriptor - if isfunction(method): - raise ValueError("Unbound methods must be wrapped using pyversion.unbound_method before passing to MethodTestCase") - self.cls = method.im_class - self.inst = self.cls() - if self.test is None: - method_name = self.method.__name__ - self.test = getattr(self.inst, method_name) - TestBase.__init__(self) - - def __str__(self): - func, arg = self._descriptors() - if hasattr(func, 'compat_func_name'): - name = func.compat_func_name - else: - name = func.__name__ - name = "%s.%s.%s" % (self.cls.__module__, - self.cls.__name__, - name) - if arg: - name = "%s%s" % (name, arg) - return name - __repr__ = __str__ - - def address(self): - """Return a round-trip name for this test, a name that can be - fed back as input to loadTestByName and (assuming the same - plugin configuration) result in the loading of this test. - """ - if self.descriptor is not None: - return test_address(self.descriptor) - else: - return test_address(self.method) - - def _context(self): - return self.cls - context = property(_context, None, None, - """Get context (class) of this test""") - - def setUp(self): - try_run(self.inst, ('setup', 'setUp')) - - def tearDown(self): - try_run(self.inst, ('teardown', 'tearDown')) - - def _descriptors(self): - """Get the descriptors of the test method: the method and - arguments that will be used to construct the test name. In - most cases, this is the method itself and no arguments. For - tests generated by generator methods, the original - (generator) method and args passed to the generated method - or function are returned. - """ - if self.descriptor: - return self.descriptor, self.arg - else: - return self.method, self.arg diff --git a/lib/spack/external/nose/commands.py b/lib/spack/external/nose/commands.py deleted file mode 100644 index ef0e9caed4..0000000000 --- a/lib/spack/external/nose/commands.py +++ /dev/null @@ -1,172 +0,0 @@ -""" -nosetests setuptools command ----------------------------- - -The easiest way to run tests with nose is to use the `nosetests` setuptools -command:: - - python setup.py nosetests - -This command has one *major* benefit over the standard `test` command: *all -nose plugins are supported*. - -To configure the `nosetests` command, add a [nosetests] section to your -setup.cfg. The [nosetests] section can contain any command line arguments that -nosetests supports. The differences between issuing an option on the command -line and adding it to setup.cfg are: - -* In setup.cfg, the -- prefix must be excluded -* In setup.cfg, command line flags that take no arguments must be given an - argument flag (1, T or TRUE for active, 0, F or FALSE for inactive) - -Here's an example [nosetests] setup.cfg section:: - - [nosetests] - verbosity=1 - detailed-errors=1 - with-coverage=1 - cover-package=nose - debug=nose.loader - pdb=1 - pdb-failures=1 - -If you commonly run nosetests with a large number of options, using -the nosetests setuptools command and configuring with setup.cfg can -make running your tests much less tedious. (Note that the same options -and format supported in setup.cfg are supported in all other config -files, and the nosetests script will also load config files.) - -Another reason to run tests with the command is that the command will -install packages listed in your `tests_require`, as well as doing a -complete build of your package before running tests. For packages with -dependencies or that build C extensions, using the setuptools command -can be more convenient than building by hand and running the nosetests -script. - -Bootstrapping -------------- - -If you are distributing your project and want users to be able to run tests -without having to install nose themselves, add nose to the setup_requires -section of your setup():: - - setup( - # ... - setup_requires=['nose>=1.0'] - ) - -This will direct setuptools to download and activate nose during the setup -process, making the ``nosetests`` command available. - -""" -try: - from setuptools import Command -except ImportError: - Command = nosetests = None -else: - from nose.config import Config, option_blacklist, user_config_files, \ - flag, _bool - from nose.core import TestProgram - from nose.plugins import DefaultPluginManager - - - def get_user_options(parser): - """convert a optparse option list into a distutils option tuple list""" - opt_list = [] - for opt in parser.option_list: - if opt._long_opts[0][2:] in option_blacklist: - continue - long_name = opt._long_opts[0][2:] - if opt.action not in ('store_true', 'store_false'): - long_name = long_name + "=" - short_name = None - if opt._short_opts: - short_name = opt._short_opts[0][1:] - opt_list.append((long_name, short_name, opt.help or "")) - return opt_list - - - class nosetests(Command): - description = "Run unit tests using nosetests" - __config = Config(files=user_config_files(), - plugins=DefaultPluginManager()) - __parser = __config.getParser() - user_options = get_user_options(__parser) - - def initialize_options(self): - """create the member variables, but change hyphens to - underscores - """ - - self.option_to_cmds = {} - for opt in self.__parser.option_list: - cmd_name = opt._long_opts[0][2:] - option_name = cmd_name.replace('-', '_') - self.option_to_cmds[option_name] = cmd_name - setattr(self, option_name, None) - self.attr = None - - def finalize_options(self): - """nothing to do here""" - pass - - def run(self): - """ensure tests are capable of being run, then - run nose.main with a reconstructed argument list""" - if getattr(self.distribution, 'use_2to3', False): - # If we run 2to3 we can not do this inplace: - - # Ensure metadata is up-to-date - build_py = self.get_finalized_command('build_py') - build_py.inplace = 0 - build_py.run() - bpy_cmd = self.get_finalized_command("build_py") - build_path = bpy_cmd.build_lib - - # Build extensions - egg_info = self.get_finalized_command('egg_info') - egg_info.egg_base = build_path - egg_info.run() - - build_ext = self.get_finalized_command('build_ext') - build_ext.inplace = 0 - build_ext.run() - else: - self.run_command('egg_info') - - # Build extensions in-place - build_ext = self.get_finalized_command('build_ext') - build_ext.inplace = 1 - build_ext.run() - - if self.distribution.install_requires: - self.distribution.fetch_build_eggs( - self.distribution.install_requires) - if self.distribution.tests_require: - self.distribution.fetch_build_eggs( - self.distribution.tests_require) - - ei_cmd = self.get_finalized_command("egg_info") - argv = ['nosetests', '--where', ei_cmd.egg_base] - for (option_name, cmd_name) in self.option_to_cmds.items(): - if option_name in option_blacklist: - continue - value = getattr(self, option_name) - if value is not None: - argv.extend( - self.cfgToArg(option_name.replace('_', '-'), value)) - TestProgram(argv=argv, config=self.__config) - - def cfgToArg(self, optname, value): - argv = [] - long_optname = '--' + optname - opt = self.__parser.get_option(long_optname) - if opt.action in ('store_true', 'store_false'): - if not flag(value): - raise ValueError("Invalid value '%s' for '%s'" % ( - value, optname)) - if _bool(value): - argv.append(long_optname) - else: - argv.extend([long_optname, value]) - return argv diff --git a/lib/spack/external/nose/config.py b/lib/spack/external/nose/config.py deleted file mode 100644 index 125eb5579d..0000000000 --- a/lib/spack/external/nose/config.py +++ /dev/null @@ -1,661 +0,0 @@ -import logging -import optparse -import os -import re -import sys -import ConfigParser -from optparse import OptionParser -from nose.util import absdir, tolist -from nose.plugins.manager import NoPlugins -from warnings import warn, filterwarnings - -log = logging.getLogger(__name__) - -# not allowed in config files -option_blacklist = ['help', 'verbose'] - -config_files = [ - # Linux users will prefer this - "~/.noserc", - # Windows users will prefer this - "~/nose.cfg" - ] - -# plaforms on which the exe check defaults to off -# Windows and IronPython -exe_allowed_platforms = ('win32', 'cli') - -filterwarnings("always", category=DeprecationWarning, - module=r'(.*\.)?nose\.config') - -class NoSuchOptionError(Exception): - def __init__(self, name): - Exception.__init__(self, name) - self.name = name - - -class ConfigError(Exception): - pass - - -class ConfiguredDefaultsOptionParser(object): - """ - Handler for options from commandline and config files. - """ - def __init__(self, parser, config_section, error=None, file_error=None): - self._parser = parser - self._config_section = config_section - if error is None: - error = self._parser.error - self._error = error - if file_error is None: - file_error = lambda msg, **kw: error(msg) - self._file_error = file_error - - def _configTuples(self, cfg, filename): - config = [] - if self._config_section in cfg.sections(): - for name, value in cfg.items(self._config_section): - config.append((name, value, filename)) - return config - - def _readFromFilenames(self, filenames): - config = [] - for filename in filenames: - cfg = ConfigParser.RawConfigParser() - try: - cfg.read(filename) - except ConfigParser.Error, exc: - raise ConfigError("Error reading config file %r: %s" % - (filename, str(exc))) - config.extend(self._configTuples(cfg, filename)) - return config - - def _readFromFileObject(self, fh): - cfg = ConfigParser.RawConfigParser() - try: - filename = fh.name - except AttributeError: - filename = '' - try: - cfg.readfp(fh) - except ConfigParser.Error, exc: - raise ConfigError("Error reading config file %r: %s" % - (filename, str(exc))) - return self._configTuples(cfg, filename) - - def _readConfiguration(self, config_files): - try: - config_files.readline - except AttributeError: - filename_or_filenames = config_files - if isinstance(filename_or_filenames, basestring): - filenames = [filename_or_filenames] - else: - filenames = filename_or_filenames - config = self._readFromFilenames(filenames) - else: - fh = config_files - config = self._readFromFileObject(fh) - return config - - def _processConfigValue(self, name, value, values, parser): - opt_str = '--' + name - option = parser.get_option(opt_str) - if option is None: - raise NoSuchOptionError(name) - else: - option.process(opt_str, value, values, parser) - - def _applyConfigurationToValues(self, parser, config, values): - for name, value, filename in config: - if name in option_blacklist: - continue - try: - self._processConfigValue(name, value, values, parser) - except NoSuchOptionError, exc: - self._file_error( - "Error reading config file %r: " - "no such option %r" % (filename, exc.name), - name=name, filename=filename) - except optparse.OptionValueError, exc: - msg = str(exc).replace('--' + name, repr(name), 1) - self._file_error("Error reading config file %r: " - "%s" % (filename, msg), - name=name, filename=filename) - - def parseArgsAndConfigFiles(self, args, config_files): - values = self._parser.get_default_values() - try: - config = self._readConfiguration(config_files) - except ConfigError, exc: - self._error(str(exc)) - else: - try: - self._applyConfigurationToValues(self._parser, config, values) - except ConfigError, exc: - self._error(str(exc)) - return self._parser.parse_args(args, values) - - -class Config(object): - """nose configuration. - - Instances of Config are used throughout nose to configure - behavior, including plugin lists. Here are the default values for - all config keys:: - - self.env = env = kw.pop('env', {}) - self.args = () - self.testMatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) - self.addPaths = not env.get('NOSE_NOPATH', False) - self.configSection = 'nosetests' - self.debug = env.get('NOSE_DEBUG') - self.debugLog = env.get('NOSE_DEBUG_LOG') - self.exclude = None - self.getTestCaseNamesCompat = False - self.includeExe = env.get('NOSE_INCLUDE_EXE', - sys.platform in exe_allowed_platforms) - self.ignoreFiles = (re.compile(r'^\.'), - re.compile(r'^_'), - re.compile(r'^setup\.py$') - ) - self.include = None - self.loggingConfig = None - self.logStream = sys.stderr - self.options = NoOptions() - self.parser = None - self.plugins = NoPlugins() - self.srcDirs = ('lib', 'src') - self.runOnInit = True - self.stopOnError = env.get('NOSE_STOP', False) - self.stream = sys.stderr - self.testNames = () - self.verbosity = int(env.get('NOSE_VERBOSE', 1)) - self.where = () - self.py3where = () - self.workingDir = None - """ - - def __init__(self, **kw): - self.env = env = kw.pop('env', {}) - self.args = () - self.testMatchPat = env.get('NOSE_TESTMATCH', - r'(?:^|[\b_\.%s-])[Tt]est' % os.sep) - self.testMatch = re.compile(self.testMatchPat) - self.addPaths = not env.get('NOSE_NOPATH', False) - self.configSection = 'nosetests' - self.debug = env.get('NOSE_DEBUG') - self.debugLog = env.get('NOSE_DEBUG_LOG') - self.exclude = None - self.getTestCaseNamesCompat = False - self.includeExe = env.get('NOSE_INCLUDE_EXE', - sys.platform in exe_allowed_platforms) - self.ignoreFilesDefaultStrings = [r'^\.', - r'^_', - r'^setup\.py$', - ] - self.ignoreFiles = map(re.compile, self.ignoreFilesDefaultStrings) - self.include = None - self.loggingConfig = None - self.logStream = sys.stderr - self.options = NoOptions() - self.parser = None - self.plugins = NoPlugins() - self.srcDirs = ('lib', 'src') - self.runOnInit = True - self.stopOnError = env.get('NOSE_STOP', False) - self.stream = sys.stderr - self.testNames = [] - self.verbosity = int(env.get('NOSE_VERBOSE', 1)) - self.where = () - self.py3where = () - self.workingDir = os.getcwd() - self.traverseNamespace = False - self.firstPackageWins = False - self.parserClass = OptionParser - self.worker = False - - self._default = self.__dict__.copy() - self.update(kw) - self._orig = self.__dict__.copy() - - def __getstate__(self): - state = self.__dict__.copy() - del state['stream'] - del state['_orig'] - del state['_default'] - del state['env'] - del state['logStream'] - # FIXME remove plugins, have only plugin manager class - state['plugins'] = self.plugins.__class__ - return state - - def __setstate__(self, state): - plugincls = state.pop('plugins') - self.update(state) - self.worker = True - # FIXME won't work for static plugin lists - self.plugins = plugincls() - self.plugins.loadPlugins() - # needed so .can_configure gets set appropriately - dummy_parser = self.parserClass() - self.plugins.addOptions(dummy_parser, {}) - self.plugins.configure(self.options, self) - - def __repr__(self): - d = self.__dict__.copy() - # don't expose env, could include sensitive info - d['env'] = {} - keys = [ k for k in d.keys() - if not k.startswith('_') ] - keys.sort() - return "Config(%s)" % ', '.join([ '%s=%r' % (k, d[k]) - for k in keys ]) - __str__ = __repr__ - - def _parseArgs(self, argv, cfg_files): - def warn_sometimes(msg, name=None, filename=None): - if (hasattr(self.plugins, 'excludedOption') and - self.plugins.excludedOption(name)): - msg = ("Option %r in config file %r ignored: " - "excluded by runtime environment" % - (name, filename)) - warn(msg, RuntimeWarning) - else: - raise ConfigError(msg) - parser = ConfiguredDefaultsOptionParser( - self.getParser(), self.configSection, file_error=warn_sometimes) - return parser.parseArgsAndConfigFiles(argv[1:], cfg_files) - - def configure(self, argv=None, doc=None): - """Configure the nose running environment. Execute configure before - collecting tests with nose.TestCollector to enable output capture and - other features. - """ - env = self.env - if argv is None: - argv = sys.argv - - cfg_files = getattr(self, 'files', []) - options, args = self._parseArgs(argv, cfg_files) - # If -c --config has been specified on command line, - # load those config files and reparse - if getattr(options, 'files', []): - options, args = self._parseArgs(argv, options.files) - - self.options = options - if args: - self.testNames = args - if options.testNames is not None: - self.testNames.extend(tolist(options.testNames)) - - if options.py3where is not None: - if sys.version_info >= (3,): - options.where = options.py3where - - # `where` is an append action, so it can't have a default value - # in the parser, or that default will always be in the list - if not options.where: - options.where = env.get('NOSE_WHERE', None) - - # include and exclude also - if not options.ignoreFiles: - options.ignoreFiles = env.get('NOSE_IGNORE_FILES', []) - if not options.include: - options.include = env.get('NOSE_INCLUDE', []) - if not options.exclude: - options.exclude = env.get('NOSE_EXCLUDE', []) - - self.addPaths = options.addPaths - self.stopOnError = options.stopOnError - self.verbosity = options.verbosity - self.includeExe = options.includeExe - self.traverseNamespace = options.traverseNamespace - self.debug = options.debug - self.debugLog = options.debugLog - self.loggingConfig = options.loggingConfig - self.firstPackageWins = options.firstPackageWins - self.configureLogging() - - if not options.byteCompile: - sys.dont_write_bytecode = True - - if options.where is not None: - self.configureWhere(options.where) - - if options.testMatch: - self.testMatch = re.compile(options.testMatch) - - if options.ignoreFiles: - self.ignoreFiles = map(re.compile, tolist(options.ignoreFiles)) - log.info("Ignoring files matching %s", options.ignoreFiles) - else: - log.info("Ignoring files matching %s", self.ignoreFilesDefaultStrings) - - if options.include: - self.include = map(re.compile, tolist(options.include)) - log.info("Including tests matching %s", options.include) - - if options.exclude: - self.exclude = map(re.compile, tolist(options.exclude)) - log.info("Excluding tests matching %s", options.exclude) - - # When listing plugins we don't want to run them - if not options.showPlugins: - self.plugins.configure(options, self) - self.plugins.begin() - - def configureLogging(self): - """Configure logging for nose, or optionally other packages. Any logger - name may be set with the debug option, and that logger will be set to - debug level and be assigned the same handler as the nose loggers, unless - it already has a handler. - """ - if self.loggingConfig: - from logging.config import fileConfig - fileConfig(self.loggingConfig) - return - - format = logging.Formatter('%(name)s: %(levelname)s: %(message)s') - if self.debugLog: - handler = logging.FileHandler(self.debugLog) - else: - handler = logging.StreamHandler(self.logStream) - handler.setFormatter(format) - - logger = logging.getLogger('nose') - logger.propagate = 0 - - # only add our default handler if there isn't already one there - # this avoids annoying duplicate log messages. - found = False - if self.debugLog: - debugLogAbsPath = os.path.abspath(self.debugLog) - for h in logger.handlers: - if type(h) == logging.FileHandler and \ - h.baseFilename == debugLogAbsPath: - found = True - else: - for h in logger.handlers: - if type(h) == logging.StreamHandler and \ - h.stream == self.logStream: - found = True - if not found: - logger.addHandler(handler) - - # default level - lvl = logging.WARNING - if self.verbosity >= 5: - lvl = 0 - elif self.verbosity >= 4: - lvl = logging.DEBUG - elif self.verbosity >= 3: - lvl = logging.INFO - logger.setLevel(lvl) - - # individual overrides - if self.debug: - # no blanks - debug_loggers = [ name for name in self.debug.split(',') - if name ] - for logger_name in debug_loggers: - l = logging.getLogger(logger_name) - l.setLevel(logging.DEBUG) - if not l.handlers and not logger_name.startswith('nose'): - l.addHandler(handler) - - def configureWhere(self, where): - """Configure the working directory or directories for the test run. - """ - from nose.importer import add_path - self.workingDir = None - where = tolist(where) - warned = False - for path in where: - if not self.workingDir: - abs_path = absdir(path) - if abs_path is None: - raise ValueError("Working directory '%s' not found, or " - "not a directory" % path) - log.info("Set working dir to %s", abs_path) - self.workingDir = abs_path - if self.addPaths and \ - os.path.exists(os.path.join(abs_path, '__init__.py')): - log.info("Working directory %s is a package; " - "adding to sys.path" % abs_path) - add_path(abs_path) - continue - if not warned: - warn("Use of multiple -w arguments is deprecated and " - "support may be removed in a future release. You can " - "get the same behavior by passing directories without " - "the -w argument on the command line, or by using the " - "--tests argument in a configuration file.", - DeprecationWarning) - warned = True - self.testNames.append(path) - - def default(self): - """Reset all config values to defaults. - """ - self.__dict__.update(self._default) - - def getParser(self, doc=None): - """Get the command line option parser. - """ - if self.parser: - return self.parser - env = self.env - parser = self.parserClass(doc) - parser.add_option( - "-V","--version", action="store_true", - dest="version", default=False, - help="Output nose version and exit") - parser.add_option( - "-p", "--plugins", action="store_true", - dest="showPlugins", default=False, - help="Output list of available plugins and exit. Combine with " - "higher verbosity for greater detail") - parser.add_option( - "-v", "--verbose", - action="count", dest="verbosity", - default=self.verbosity, - help="Be more verbose. [NOSE_VERBOSE]") - parser.add_option( - "--verbosity", action="store", dest="verbosity", - metavar='VERBOSITY', - type="int", help="Set verbosity; --verbosity=2 is " - "the same as -v") - parser.add_option( - "-q", "--quiet", action="store_const", const=0, dest="verbosity", - help="Be less verbose") - parser.add_option( - "-c", "--config", action="append", dest="files", - metavar="FILES", - help="Load configuration from config file(s). May be specified " - "multiple times; in that case, all config files will be " - "loaded and combined") - parser.add_option( - "-w", "--where", action="append", dest="where", - metavar="WHERE", - help="Look for tests in this directory. " - "May be specified multiple times. The first directory passed " - "will be used as the working directory, in place of the current " - "working directory, which is the default. Others will be added " - "to the list of tests to execute. [NOSE_WHERE]" - ) - parser.add_option( - "--py3where", action="append", dest="py3where", - metavar="PY3WHERE", - help="Look for tests in this directory under Python 3.x. " - "Functions the same as 'where', but only applies if running under " - "Python 3.x or above. Note that, if present under 3.x, this " - "option completely replaces any directories specified with " - "'where', so the 'where' option becomes ineffective. " - "[NOSE_PY3WHERE]" - ) - parser.add_option( - "-m", "--match", "--testmatch", action="store", - dest="testMatch", metavar="REGEX", - help="Files, directories, function names, and class names " - "that match this regular expression are considered tests. " - "Default: %s [NOSE_TESTMATCH]" % self.testMatchPat, - default=self.testMatchPat) - parser.add_option( - "--tests", action="store", dest="testNames", default=None, - metavar='NAMES', - help="Run these tests (comma-separated list). This argument is " - "useful mainly from configuration files; on the command line, " - "just pass the tests to run as additional arguments with no " - "switch.") - parser.add_option( - "-l", "--debug", action="store", - dest="debug", default=self.debug, - help="Activate debug logging for one or more systems. " - "Available debug loggers: nose, nose.importer, " - "nose.inspector, nose.plugins, nose.result and " - "nose.selector. Separate multiple names with a comma.") - parser.add_option( - "--debug-log", dest="debugLog", action="store", - default=self.debugLog, metavar="FILE", - help="Log debug messages to this file " - "(default: sys.stderr)") - parser.add_option( - "--logging-config", "--log-config", - dest="loggingConfig", action="store", - default=self.loggingConfig, metavar="FILE", - help="Load logging config from this file -- bypasses all other" - " logging config settings.") - parser.add_option( - "-I", "--ignore-files", action="append", dest="ignoreFiles", - metavar="REGEX", - help="Completely ignore any file that matches this regular " - "expression. Takes precedence over any other settings or " - "plugins. " - "Specifying this option will replace the default setting. " - "Specify this option multiple times " - "to add more regular expressions [NOSE_IGNORE_FILES]") - parser.add_option( - "-e", "--exclude", action="append", dest="exclude", - metavar="REGEX", - help="Don't run tests that match regular " - "expression [NOSE_EXCLUDE]") - parser.add_option( - "-i", "--include", action="append", dest="include", - metavar="REGEX", - help="This regular expression will be applied to files, " - "directories, function names, and class names for a chance " - "to include additional tests that do not match TESTMATCH. " - "Specify this option multiple times " - "to add more regular expressions [NOSE_INCLUDE]") - parser.add_option( - "-x", "--stop", action="store_true", dest="stopOnError", - default=self.stopOnError, - help="Stop running tests after the first error or failure") - parser.add_option( - "-P", "--no-path-adjustment", action="store_false", - dest="addPaths", - default=self.addPaths, - help="Don't make any changes to sys.path when " - "loading tests [NOSE_NOPATH]") - parser.add_option( - "--exe", action="store_true", dest="includeExe", - default=self.includeExe, - help="Look for tests in python modules that are " - "executable. Normal behavior is to exclude executable " - "modules, since they may not be import-safe " - "[NOSE_INCLUDE_EXE]") - parser.add_option( - "--noexe", action="store_false", dest="includeExe", - help="DO NOT look for tests in python modules that are " - "executable. (The default on the windows platform is to " - "do so.)") - parser.add_option( - "--traverse-namespace", action="store_true", - default=self.traverseNamespace, dest="traverseNamespace", - help="Traverse through all path entries of a namespace package") - parser.add_option( - "--first-package-wins", "--first-pkg-wins", "--1st-pkg-wins", - action="store_true", default=False, dest="firstPackageWins", - help="nose's importer will normally evict a package from sys." - "modules if it sees a package with the same name in a different " - "location. Set this option to disable that behavior.") - parser.add_option( - "--no-byte-compile", - action="store_false", default=True, dest="byteCompile", - help="Prevent nose from byte-compiling the source into .pyc files " - "while nose is scanning for and running tests.") - - self.plugins.loadPlugins() - self.pluginOpts(parser) - - self.parser = parser - return parser - - def help(self, doc=None): - """Return the generated help message - """ - return self.getParser(doc).format_help() - - def pluginOpts(self, parser): - self.plugins.addOptions(parser, self.env) - - def reset(self): - self.__dict__.update(self._orig) - - def todict(self): - return self.__dict__.copy() - - def update(self, d): - self.__dict__.update(d) - - -class NoOptions(object): - """Options container that returns None for all options. - """ - def __getstate__(self): - return {} - - def __setstate__(self, state): - pass - - def __getnewargs__(self): - return () - - def __nonzero__(self): - return False - - -def user_config_files(): - """Return path to any existing user config files - """ - return filter(os.path.exists, - map(os.path.expanduser, config_files)) - - -def all_config_files(): - """Return path to any existing user config files, plus any setup.cfg - in the current working directory. - """ - user = user_config_files() - if os.path.exists('setup.cfg'): - return user + ['setup.cfg'] - return user - - -# used when parsing config files -def flag(val): - """Does the value look like an on/off flag?""" - if val == 1: - return True - elif val == 0: - return False - val = str(val) - if len(val) > 5: - return False - return val.upper() in ('1', '0', 'F', 'T', 'TRUE', 'FALSE', 'ON', 'OFF') - - -def _bool(val): - return str(val).upper() in ('1', 'T', 'TRUE', 'ON') diff --git a/lib/spack/external/nose/core.py b/lib/spack/external/nose/core.py deleted file mode 100644 index 49e7939b98..0000000000 --- a/lib/spack/external/nose/core.py +++ /dev/null @@ -1,341 +0,0 @@ -"""Implements nose test program and collector. -""" -from __future__ import generators - -import logging -import os -import sys -import time -import unittest - -from nose.config import Config, all_config_files -from nose.loader import defaultTestLoader -from nose.plugins.manager import PluginManager, DefaultPluginManager, \ - RestrictedPluginManager -from nose.result import TextTestResult -from nose.suite import FinalizingSuiteWrapper -from nose.util import isclass, tolist - - -log = logging.getLogger('nose.core') -compat_24 = sys.version_info >= (2, 4) - -__all__ = ['TestProgram', 'main', 'run', 'run_exit', 'runmodule', 'collector', - 'TextTestRunner'] - - -class TextTestRunner(unittest.TextTestRunner): - """Test runner that uses nose's TextTestResult to enable errorClasses, - as well as providing hooks for plugins to override or replace the test - output stream, results, and the test case itself. - """ - def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1, - config=None): - if config is None: - config = Config() - self.config = config - unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity) - - - def _makeResult(self): - return TextTestResult(self.stream, - self.descriptions, - self.verbosity, - self.config) - - def run(self, test): - """Overrides to provide plugin hooks and defer all output to - the test result class. - """ - wrapper = self.config.plugins.prepareTest(test) - if wrapper is not None: - test = wrapper - - # plugins can decorate or capture the output stream - wrapped = self.config.plugins.setOutputStream(self.stream) - if wrapped is not None: - self.stream = wrapped - - result = self._makeResult() - start = time.time() - try: - test(result) - except KeyboardInterrupt: - pass - stop = time.time() - result.printErrors() - result.printSummary(start, stop) - self.config.plugins.finalize(result) - return result - - -class TestProgram(unittest.TestProgram): - """Collect and run tests, returning success or failure. - - The arguments to TestProgram() are the same as to - :func:`main()` and :func:`run()`: - - * module: All tests are in this module (default: None) - * defaultTest: Tests to load (default: '.') - * argv: Command line arguments (default: None; sys.argv is read) - * testRunner: Test runner instance (default: None) - * testLoader: Test loader instance (default: None) - * env: Environment; ignored if config is provided (default: None; - os.environ is read) - * config: :class:`nose.config.Config` instance (default: None) - * suite: Suite or list of tests to run (default: None). Passing a - suite or lists of tests will bypass all test discovery and - loading. *ALSO NOTE* that if you pass a unittest.TestSuite - instance as the suite, context fixtures at the class, module and - package level will not be used, and many plugin hooks will not - be called. If you want normal nose behavior, either pass a list - of tests, or a fully-configured :class:`nose.suite.ContextSuite`. - * exit: Exit after running tests and printing report (default: True) - * plugins: List of plugins to use; ignored if config is provided - (default: load plugins with DefaultPluginManager) - * addplugins: List of **extra** plugins to use. Pass a list of plugin - instances in this argument to make custom plugins available while - still using the DefaultPluginManager. - """ - verbosity = 1 - - def __init__(self, module=None, defaultTest='.', argv=None, - testRunner=None, testLoader=None, env=None, config=None, - suite=None, exit=True, plugins=None, addplugins=None): - if env is None: - env = os.environ - if config is None: - config = self.makeConfig(env, plugins) - if addplugins: - config.plugins.addPlugins(extraplugins=addplugins) - self.config = config - self.suite = suite - self.exit = exit - extra_args = {} - version = sys.version_info[0:2] - if version >= (2,7) and version != (3,0): - extra_args['exit'] = exit - unittest.TestProgram.__init__( - self, module=module, defaultTest=defaultTest, - argv=argv, testRunner=testRunner, testLoader=testLoader, - **extra_args) - - def getAllConfigFiles(self, env=None): - env = env or {} - if env.get('NOSE_IGNORE_CONFIG_FILES', False): - return [] - else: - return all_config_files() - - def makeConfig(self, env, plugins=None): - """Load a Config, pre-filled with user config files if any are - found. - """ - cfg_files = self.getAllConfigFiles(env) - if plugins: - manager = PluginManager(plugins=plugins) - else: - manager = DefaultPluginManager() - return Config( - env=env, files=cfg_files, plugins=manager) - - def parseArgs(self, argv): - """Parse argv and env and configure running environment. - """ - self.config.configure(argv, doc=self.usage()) - log.debug("configured %s", self.config) - - # quick outs: version, plugins (optparse would have already - # caught and exited on help) - if self.config.options.version: - from nose import __version__ - sys.stdout = sys.__stdout__ - print "%s version %s" % (os.path.basename(sys.argv[0]), __version__) - sys.exit(0) - - if self.config.options.showPlugins: - self.showPlugins() - sys.exit(0) - - if self.testLoader is None: - self.testLoader = defaultTestLoader(config=self.config) - elif isclass(self.testLoader): - self.testLoader = self.testLoader(config=self.config) - plug_loader = self.config.plugins.prepareTestLoader(self.testLoader) - if plug_loader is not None: - self.testLoader = plug_loader - log.debug("test loader is %s", self.testLoader) - - # FIXME if self.module is a string, add it to self.testNames? not sure - - if self.config.testNames: - self.testNames = self.config.testNames - else: - self.testNames = tolist(self.defaultTest) - log.debug('defaultTest %s', self.defaultTest) - log.debug('Test names are %s', self.testNames) - if self.config.workingDir is not None: - os.chdir(self.config.workingDir) - self.createTests() - - def createTests(self): - """Create the tests to run. If a self.suite - is set, then that suite will be used. Otherwise, tests will be - loaded from the given test names (self.testNames) using the - test loader. - """ - log.debug("createTests called with %s", self.suite) - if self.suite is not None: - # We were given an explicit suite to run. Make sure it's - # loaded and wrapped correctly. - self.test = self.testLoader.suiteClass(self.suite) - else: - self.test = self.testLoader.loadTestsFromNames(self.testNames) - - def runTests(self): - """Run Tests. Returns true on success, false on failure, and sets - self.success to the same value. - """ - log.debug("runTests called") - if self.testRunner is None: - self.testRunner = TextTestRunner(stream=self.config.stream, - verbosity=self.config.verbosity, - config=self.config) - plug_runner = self.config.plugins.prepareTestRunner(self.testRunner) - if plug_runner is not None: - self.testRunner = plug_runner - result = self.testRunner.run(self.test) - self.success = result.wasSuccessful() - if self.exit: - sys.exit(not self.success) - return self.success - - def showPlugins(self): - """Print list of available plugins. - """ - import textwrap - - class DummyParser: - def __init__(self): - self.options = [] - def add_option(self, *arg, **kw): - self.options.append((arg, kw.pop('help', ''))) - - v = self.config.verbosity - self.config.plugins.sort() - for p in self.config.plugins: - print "Plugin %s" % p.name - if v >= 2: - print " score: %s" % p.score - print '\n'.join(textwrap.wrap(p.help().strip(), - initial_indent=' ', - subsequent_indent=' ')) - if v >= 3: - parser = DummyParser() - p.addOptions(parser) - if len(parser.options): - print - print " Options:" - for opts, help in parser.options: - print ' %s' % (', '.join(opts)) - if help: - print '\n'.join( - textwrap.wrap(help.strip(), - initial_indent=' ', - subsequent_indent=' ')) - print - - def usage(cls): - import nose - try: - ld = nose.__loader__ - text = ld.get_data(os.path.join( - os.path.dirname(__file__), 'usage.txt')) - except AttributeError: - f = open(os.path.join( - os.path.dirname(__file__), 'usage.txt'), 'r') - try: - text = f.read() - finally: - f.close() - # Ensure that we return str, not bytes. - if not isinstance(text, str): - text = text.decode('utf-8') - return text - usage = classmethod(usage) - -# backwards compatibility -run_exit = main = TestProgram - - -def run(*arg, **kw): - """Collect and run tests, returning success or failure. - - The arguments to `run()` are the same as to `main()`: - - * module: All tests are in this module (default: None) - * defaultTest: Tests to load (default: '.') - * argv: Command line arguments (default: None; sys.argv is read) - * testRunner: Test runner instance (default: None) - * testLoader: Test loader instance (default: None) - * env: Environment; ignored if config is provided (default: None; - os.environ is read) - * config: :class:`nose.config.Config` instance (default: None) - * suite: Suite or list of tests to run (default: None). Passing a - suite or lists of tests will bypass all test discovery and - loading. *ALSO NOTE* that if you pass a unittest.TestSuite - instance as the suite, context fixtures at the class, module and - package level will not be used, and many plugin hooks will not - be called. If you want normal nose behavior, either pass a list - of tests, or a fully-configured :class:`nose.suite.ContextSuite`. - * plugins: List of plugins to use; ignored if config is provided - (default: load plugins with DefaultPluginManager) - * addplugins: List of **extra** plugins to use. Pass a list of plugin - instances in this argument to make custom plugins available while - still using the DefaultPluginManager. - - With the exception that the ``exit`` argument is always set - to False. - """ - kw['exit'] = False - return TestProgram(*arg, **kw).success - - -def runmodule(name='__main__', **kw): - """Collect and run tests in a single module only. Defaults to running - tests in __main__. Additional arguments to TestProgram may be passed - as keyword arguments. - """ - main(defaultTest=name, **kw) - - -def collector(): - """TestSuite replacement entry point. Use anywhere you might use a - unittest.TestSuite. The collector will, by default, load options from - all config files and execute loader.loadTestsFromNames() on the - configured testNames, or '.' if no testNames are configured. - """ - # plugins that implement any of these methods are disabled, since - # we don't control the test runner and won't be able to run them - # finalize() is also not called, but plugins that use it aren't disabled, - # because capture needs it. - setuptools_incompat = ('report', 'prepareTest', - 'prepareTestLoader', 'prepareTestRunner', - 'setOutputStream') - - plugins = RestrictedPluginManager(exclude=setuptools_incompat) - conf = Config(files=all_config_files(), - plugins=plugins) - conf.configure(argv=['collector']) - loader = defaultTestLoader(conf) - - if conf.testNames: - suite = loader.loadTestsFromNames(conf.testNames) - else: - suite = loader.loadTestsFromNames(('.',)) - return FinalizingSuiteWrapper(suite, plugins.finalize) - - - -if __name__ == '__main__': - main() diff --git a/lib/spack/external/nose/exc.py b/lib/spack/external/nose/exc.py deleted file mode 100644 index 8b780db0d4..0000000000 --- a/lib/spack/external/nose/exc.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Exceptions for marking tests as skipped or deprecated. - -This module exists to provide backwards compatibility with previous -versions of nose where skipped and deprecated tests were core -functionality, rather than being provided by plugins. It may be -removed in a future release. -""" -from nose.plugins.skip import SkipTest -from nose.plugins.deprecated import DeprecatedTest diff --git a/lib/spack/external/nose/ext/__init__.py b/lib/spack/external/nose/ext/__init__.py deleted file mode 100644 index 5fd1516a09..0000000000 --- a/lib/spack/external/nose/ext/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -""" -External or vendor files -""" diff --git a/lib/spack/external/nose/ext/dtcompat.py b/lib/spack/external/nose/ext/dtcompat.py deleted file mode 100644 index 332cf08c12..0000000000 --- a/lib/spack/external/nose/ext/dtcompat.py +++ /dev/null @@ -1,2272 +0,0 @@ -# Module doctest. -# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org). -# Major enhancements and refactoring by: -# Jim Fulton -# Edward Loper - -# Provided as-is; use at your own risk; no warranty; no promises; enjoy! -# -# Modified for inclusion in nose to provide support for DocFileTest in -# python 2.3: -# -# - all doctests removed from module (they fail under 2.3 and 2.5) -# - now handles the $py.class extension when ran under Jython - -r"""Module doctest -- a framework for running examples in docstrings. - -In simplest use, end each module M to be tested with: - -def _test(): - import doctest - doctest.testmod() - -if __name__ == "__main__": - _test() - -Then running the module as a script will cause the examples in the -docstrings to get executed and verified: - -python M.py - -This won't display anything unless an example fails, in which case the -failing example(s) and the cause(s) of the failure(s) are printed to stdout -(why not stderr? because stderr is a lame hack <0.2 wink>), and the final -line of output is "Test failed.". - -Run it with the -v switch instead: - -python M.py -v - -and a detailed report of all examples tried is printed to stdout, along -with assorted summaries at the end. - -You can force verbose mode by passing "verbose=True" to testmod, or prohibit -it by passing "verbose=False". In either of those cases, sys.argv is not -examined by testmod. - -There are a variety of other ways to run doctests, including integration -with the unittest framework, and support for running non-Python text -files containing doctests. There are also many ways to override parts -of doctest's default behaviors. See the Library Reference Manual for -details. -""" - -__docformat__ = 'reStructuredText en' - -__all__ = [ - # 0, Option Flags - 'register_optionflag', - 'DONT_ACCEPT_TRUE_FOR_1', - 'DONT_ACCEPT_BLANKLINE', - 'NORMALIZE_WHITESPACE', - 'ELLIPSIS', - 'IGNORE_EXCEPTION_DETAIL', - 'COMPARISON_FLAGS', - 'REPORT_UDIFF', - 'REPORT_CDIFF', - 'REPORT_NDIFF', - 'REPORT_ONLY_FIRST_FAILURE', - 'REPORTING_FLAGS', - # 1. Utility Functions - 'is_private', - # 2. Example & DocTest - 'Example', - 'DocTest', - # 3. Doctest Parser - 'DocTestParser', - # 4. Doctest Finder - 'DocTestFinder', - # 5. Doctest Runner - 'DocTestRunner', - 'OutputChecker', - 'DocTestFailure', - 'UnexpectedException', - 'DebugRunner', - # 6. Test Functions - 'testmod', - 'testfile', - 'run_docstring_examples', - # 7. Tester - 'Tester', - # 8. Unittest Support - 'DocTestSuite', - 'DocFileSuite', - 'set_unittest_reportflags', - # 9. Debugging Support - 'script_from_examples', - 'testsource', - 'debug_src', - 'debug', -] - -import __future__ - -import sys, traceback, inspect, linecache, os, re -import unittest, difflib, pdb, tempfile -import warnings -from StringIO import StringIO - -# Don't whine about the deprecated is_private function in this -# module's tests. -warnings.filterwarnings("ignore", "is_private", DeprecationWarning, - __name__, 0) - -# There are 4 basic classes: -# - Example: a pair, plus an intra-docstring line number. -# - DocTest: a collection of examples, parsed from a docstring, plus -# info about where the docstring came from (name, filename, lineno). -# - DocTestFinder: extracts DocTests from a given object's docstring and -# its contained objects' docstrings. -# - DocTestRunner: runs DocTest cases, and accumulates statistics. -# -# So the basic picture is: -# -# list of: -# +------+ +---------+ +-------+ -# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results| -# +------+ +---------+ +-------+ -# | Example | -# | ... | -# | Example | -# +---------+ - -# Option constants. - -OPTIONFLAGS_BY_NAME = {} -def register_optionflag(name): - # Create a new flag unless `name` is already known. - return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME)) - -DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1') -DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE') -NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE') -ELLIPSIS = register_optionflag('ELLIPSIS') -IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL') - -COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 | - DONT_ACCEPT_BLANKLINE | - NORMALIZE_WHITESPACE | - ELLIPSIS | - IGNORE_EXCEPTION_DETAIL) - -REPORT_UDIFF = register_optionflag('REPORT_UDIFF') -REPORT_CDIFF = register_optionflag('REPORT_CDIFF') -REPORT_NDIFF = register_optionflag('REPORT_NDIFF') -REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE') - -REPORTING_FLAGS = (REPORT_UDIFF | - REPORT_CDIFF | - REPORT_NDIFF | - REPORT_ONLY_FIRST_FAILURE) - -# Special string markers for use in `want` strings: -BLANKLINE_MARKER = '' -ELLIPSIS_MARKER = '...' - -###################################################################### -## Table of Contents -###################################################################### -# 1. Utility Functions -# 2. Example & DocTest -- store test cases -# 3. DocTest Parser -- extracts examples from strings -# 4. DocTest Finder -- extracts test cases from objects -# 5. DocTest Runner -- runs test cases -# 6. Test Functions -- convenient wrappers for testing -# 7. Tester Class -- for backwards compatibility -# 8. Unittest Support -# 9. Debugging Support -# 10. Example Usage - -###################################################################### -## 1. Utility Functions -###################################################################### - -def is_private(prefix, base): - """prefix, base -> true iff name prefix + "." + base is "private". - - Prefix may be an empty string, and base does not contain a period. - Prefix is ignored (although functions you write conforming to this - protocol may make use of it). - Return true iff base begins with an (at least one) underscore, but - does not both begin and end with (at least) two underscores. - """ - warnings.warn("is_private is deprecated; it wasn't useful; " - "examine DocTestFinder.find() lists instead", - DeprecationWarning, stacklevel=2) - return base[:1] == "_" and not base[:2] == "__" == base[-2:] - -def _extract_future_flags(globs): - """ - Return the compiler-flags associated with the future features that - have been imported into the given namespace (globs). - """ - flags = 0 - for fname in __future__.all_feature_names: - feature = globs.get(fname, None) - if feature is getattr(__future__, fname): - flags |= feature.compiler_flag - return flags - -def _normalize_module(module, depth=2): - """ - Return the module specified by `module`. In particular: - - If `module` is a module, then return module. - - If `module` is a string, then import and return the - module with that name. - - If `module` is None, then return the calling module. - The calling module is assumed to be the module of - the stack frame at the given depth in the call stack. - """ - if inspect.ismodule(module): - return module - elif isinstance(module, (str, unicode)): - return __import__(module, globals(), locals(), ["*"]) - elif module is None: - return sys.modules[sys._getframe(depth).f_globals['__name__']] - else: - raise TypeError("Expected a module, string, or None") - -def _indent(s, indent=4): - """ - Add the given number of space characters to the beginning every - non-blank line in `s`, and return the result. - """ - # This regexp matches the start of non-blank lines: - return re.sub('(?m)^(?!$)', indent*' ', s) - -def _exception_traceback(exc_info): - """ - Return a string containing a traceback message for the given - exc_info tuple (as returned by sys.exc_info()). - """ - # Get a traceback message. - excout = StringIO() - exc_type, exc_val, exc_tb = exc_info - traceback.print_exception(exc_type, exc_val, exc_tb, file=excout) - return excout.getvalue() - -# Override some StringIO methods. -class _SpoofOut(StringIO): - def getvalue(self): - result = StringIO.getvalue(self) - # If anything at all was written, make sure there's a trailing - # newline. There's no way for the expected output to indicate - # that a trailing newline is missing. - if result and not result.endswith("\n"): - result += "\n" - # Prevent softspace from screwing up the next test case, in - # case they used print with a trailing comma in an example. - if hasattr(self, "softspace"): - del self.softspace - return result - - def truncate(self, size=None): - StringIO.truncate(self, size) - if hasattr(self, "softspace"): - del self.softspace - -# Worst-case linear-time ellipsis matching. -def _ellipsis_match(want, got): - if ELLIPSIS_MARKER not in want: - return want == got - - # Find "the real" strings. - ws = want.split(ELLIPSIS_MARKER) - assert len(ws) >= 2 - - # Deal with exact matches possibly needed at one or both ends. - startpos, endpos = 0, len(got) - w = ws[0] - if w: # starts with exact match - if got.startswith(w): - startpos = len(w) - del ws[0] - else: - return False - w = ws[-1] - if w: # ends with exact match - if got.endswith(w): - endpos -= len(w) - del ws[-1] - else: - return False - - if startpos > endpos: - # Exact end matches required more characters than we have, as in - # _ellipsis_match('aa...aa', 'aaa') - return False - - # For the rest, we only need to find the leftmost non-overlapping - # match for each piece. If there's no overall match that way alone, - # there's no overall match period. - for w in ws: - # w may be '' at times, if there are consecutive ellipses, or - # due to an ellipsis at the start or end of `want`. That's OK. - # Search for an empty string succeeds, and doesn't change startpos. - startpos = got.find(w, startpos, endpos) - if startpos < 0: - return False - startpos += len(w) - - return True - -def _comment_line(line): - "Return a commented form of the given line" - line = line.rstrip() - if line: - return '# '+line - else: - return '#' - -class _OutputRedirectingPdb(pdb.Pdb): - """ - A specialized version of the python debugger that redirects stdout - to a given stream when interacting with the user. Stdout is *not* - redirected when traced code is executed. - """ - def __init__(self, out): - self.__out = out - pdb.Pdb.__init__(self) - - def trace_dispatch(self, *args): - # Redirect stdout to the given stream. - save_stdout = sys.stdout - sys.stdout = self.__out - # Call Pdb's trace dispatch method. - try: - return pdb.Pdb.trace_dispatch(self, *args) - finally: - sys.stdout = save_stdout - -# [XX] Normalize with respect to os.path.pardir? -def _module_relative_path(module, path): - if not inspect.ismodule(module): - raise TypeError, 'Expected a module: %r' % module - if path.startswith('/'): - raise ValueError, 'Module-relative files may not have absolute paths' - - # Find the base directory for the path. - if hasattr(module, '__file__'): - # A normal module/package - basedir = os.path.split(module.__file__)[0] - elif module.__name__ == '__main__': - # An interactive session. - if len(sys.argv)>0 and sys.argv[0] != '': - basedir = os.path.split(sys.argv[0])[0] - else: - basedir = os.curdir - else: - # A module w/o __file__ (this includes builtins) - raise ValueError("Can't resolve paths relative to the module " + - module + " (it has no __file__)") - - # Combine the base directory and the path. - return os.path.join(basedir, *(path.split('/'))) - -###################################################################### -## 2. Example & DocTest -###################################################################### -## - An "example" is a pair, where "source" is a -## fragment of source code, and "want" is the expected output for -## "source." The Example class also includes information about -## where the example was extracted from. -## -## - A "doctest" is a collection of examples, typically extracted from -## a string (such as an object's docstring). The DocTest class also -## includes information about where the string was extracted from. - -class Example: - """ - A single doctest example, consisting of source code and expected - output. `Example` defines the following attributes: - - - source: A single Python statement, always ending with a newline. - The constructor adds a newline if needed. - - - want: The expected output from running the source code (either - from stdout, or a traceback in case of exception). `want` ends - with a newline unless it's empty, in which case it's an empty - string. The constructor adds a newline if needed. - - - exc_msg: The exception message generated by the example, if - the example is expected to generate an exception; or `None` if - it is not expected to generate an exception. This exception - message is compared against the return value of - `traceback.format_exception_only()`. `exc_msg` ends with a - newline unless it's `None`. The constructor adds a newline - if needed. - - - lineno: The line number within the DocTest string containing - this Example where the Example begins. This line number is - zero-based, with respect to the beginning of the DocTest. - - - indent: The example's indentation in the DocTest string. - I.e., the number of space characters that preceed the - example's first prompt. - - - options: A dictionary mapping from option flags to True or - False, which is used to override default options for this - example. Any option flags not contained in this dictionary - are left at their default value (as specified by the - DocTestRunner's optionflags). By default, no options are set. - """ - def __init__(self, source, want, exc_msg=None, lineno=0, indent=0, - options=None): - # Normalize inputs. - if not source.endswith('\n'): - source += '\n' - if want and not want.endswith('\n'): - want += '\n' - if exc_msg is not None and not exc_msg.endswith('\n'): - exc_msg += '\n' - # Store properties. - self.source = source - self.want = want - self.lineno = lineno - self.indent = indent - if options is None: options = {} - self.options = options - self.exc_msg = exc_msg - -class DocTest: - """ - A collection of doctest examples that should be run in a single - namespace. Each `DocTest` defines the following attributes: - - - examples: the list of examples. - - - globs: The namespace (aka globals) that the examples should - be run in. - - - name: A name identifying the DocTest (typically, the name of - the object whose docstring this DocTest was extracted from). - - - filename: The name of the file that this DocTest was extracted - from, or `None` if the filename is unknown. - - - lineno: The line number within filename where this DocTest - begins, or `None` if the line number is unavailable. This - line number is zero-based, with respect to the beginning of - the file. - - - docstring: The string that the examples were extracted from, - or `None` if the string is unavailable. - """ - def __init__(self, examples, globs, name, filename, lineno, docstring): - """ - Create a new DocTest containing the given examples. The - DocTest's globals are initialized with a copy of `globs`. - """ - assert not isinstance(examples, basestring), \ - "DocTest no longer accepts str; use DocTestParser instead" - self.examples = examples - self.docstring = docstring - self.globs = globs.copy() - self.name = name - self.filename = filename - self.lineno = lineno - - def __repr__(self): - if len(self.examples) == 0: - examples = 'no examples' - elif len(self.examples) == 1: - examples = '1 example' - else: - examples = '%d examples' % len(self.examples) - return ('' % - (self.name, self.filename, self.lineno, examples)) - - - # This lets us sort tests by name: - def __cmp__(self, other): - if not isinstance(other, DocTest): - return -1 - return cmp((self.name, self.filename, self.lineno, id(self)), - (other.name, other.filename, other.lineno, id(other))) - -###################################################################### -## 3. DocTestParser -###################################################################### - -class DocTestParser: - """ - A class used to parse strings containing doctest examples. - """ - # This regular expression is used to find doctest examples in a - # string. It defines three groups: `source` is the source code - # (including leading indentation and prompts); `indent` is the - # indentation of the first (PS1) line of the source code; and - # `want` is the expected output (including leading indentation). - _EXAMPLE_RE = re.compile(r''' - # Source consists of a PS1 line followed by zero or more PS2 lines. - (?P - (?:^(?P [ ]*) >>> .*) # PS1 line - (?:\n [ ]* \.\.\. .*)*) # PS2 lines - \n? - # Want consists of any non-blank lines that do not start with PS1. - (?P (?:(?![ ]*$) # Not a blank line - (?![ ]*>>>) # Not a line starting with PS1 - .*$\n? # But any other line - )*) - ''', re.MULTILINE | re.VERBOSE) - - # A regular expression for handling `want` strings that contain - # expected exceptions. It divides `want` into three pieces: - # - the traceback header line (`hdr`) - # - the traceback stack (`stack`) - # - the exception message (`msg`), as generated by - # traceback.format_exception_only() - # `msg` may have multiple lines. We assume/require that the - # exception message is the first non-indented line starting with a word - # character following the traceback header line. - _EXCEPTION_RE = re.compile(r""" - # Grab the traceback header. Different versions of Python have - # said different things on the first traceback line. - ^(?P Traceback\ \( - (?: most\ recent\ call\ last - | innermost\ last - ) \) : - ) - \s* $ # toss trailing whitespace on the header. - (?P .*?) # don't blink: absorb stuff until... - ^ (?P \w+ .*) # a line *starts* with alphanum. - """, re.VERBOSE | re.MULTILINE | re.DOTALL) - - # A callable returning a true value iff its argument is a blank line - # or contains a single comment. - _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match - - def parse(self, string, name=''): - """ - Divide the given string into examples and intervening text, - and return them as a list of alternating Examples and strings. - Line numbers for the Examples are 0-based. The optional - argument `name` is a name identifying this string, and is only - used for error messages. - """ - string = string.expandtabs() - # If all lines begin with the same indentation, then strip it. - min_indent = self._min_indent(string) - if min_indent > 0: - string = '\n'.join([l[min_indent:] for l in string.split('\n')]) - - output = [] - charno, lineno = 0, 0 - # Find all doctest examples in the string: - for m in self._EXAMPLE_RE.finditer(string): - # Add the pre-example text to `output`. - output.append(string[charno:m.start()]) - # Update lineno (lines before this example) - lineno += string.count('\n', charno, m.start()) - # Extract info from the regexp match. - (source, options, want, exc_msg) = \ - self._parse_example(m, name, lineno) - # Create an Example, and add it to the list. - if not self._IS_BLANK_OR_COMMENT(source): - output.append( Example(source, want, exc_msg, - lineno=lineno, - indent=min_indent+len(m.group('indent')), - options=options) ) - # Update lineno (lines inside this example) - lineno += string.count('\n', m.start(), m.end()) - # Update charno. - charno = m.end() - # Add any remaining post-example text to `output`. - output.append(string[charno:]) - return output - - def get_doctest(self, string, globs, name, filename, lineno): - """ - Extract all doctest examples from the given string, and - collect them into a `DocTest` object. - - `globs`, `name`, `filename`, and `lineno` are attributes for - the new `DocTest` object. See the documentation for `DocTest` - for more information. - """ - return DocTest(self.get_examples(string, name), globs, - name, filename, lineno, string) - - def get_examples(self, string, name=''): - """ - Extract all doctest examples from the given string, and return - them as a list of `Example` objects. Line numbers are - 0-based, because it's most common in doctests that nothing - interesting appears on the same line as opening triple-quote, - and so the first interesting line is called \"line 1\" then. - - The optional argument `name` is a name identifying this - string, and is only used for error messages. - """ - return [x for x in self.parse(string, name) - if isinstance(x, Example)] - - def _parse_example(self, m, name, lineno): - """ - Given a regular expression match from `_EXAMPLE_RE` (`m`), - return a pair `(source, want)`, where `source` is the matched - example's source code (with prompts and indentation stripped); - and `want` is the example's expected output (with indentation - stripped). - - `name` is the string's name, and `lineno` is the line number - where the example starts; both are used for error messages. - """ - # Get the example's indentation level. - indent = len(m.group('indent')) - - # Divide source into lines; check that they're properly - # indented; and then strip their indentation & prompts. - source_lines = m.group('source').split('\n') - self._check_prompt_blank(source_lines, indent, name, lineno) - self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno) - source = '\n'.join([sl[indent+4:] for sl in source_lines]) - - # Divide want into lines; check that it's properly indented; and - # then strip the indentation. Spaces before the last newline should - # be preserved, so plain rstrip() isn't good enough. - want = m.group('want') - want_lines = want.split('\n') - if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]): - del want_lines[-1] # forget final newline & spaces after it - self._check_prefix(want_lines, ' '*indent, name, - lineno + len(source_lines)) - want = '\n'.join([wl[indent:] for wl in want_lines]) - - # If `want` contains a traceback message, then extract it. - m = self._EXCEPTION_RE.match(want) - if m: - exc_msg = m.group('msg') - else: - exc_msg = None - - # Extract options from the source. - options = self._find_options(source, name, lineno) - - return source, options, want, exc_msg - - # This regular expression looks for option directives in the - # source code of an example. Option directives are comments - # starting with "doctest:". Warning: this may give false - # positives for string-literals that contain the string - # "#doctest:". Eliminating these false positives would require - # actually parsing the string; but we limit them by ignoring any - # line containing "#doctest:" that is *followed* by a quote mark. - _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$', - re.MULTILINE) - - def _find_options(self, source, name, lineno): - """ - Return a dictionary containing option overrides extracted from - option directives in the given source string. - - `name` is the string's name, and `lineno` is the line number - where the example starts; both are used for error messages. - """ - options = {} - # (note: with the current regexp, this will match at most once:) - for m in self._OPTION_DIRECTIVE_RE.finditer(source): - option_strings = m.group(1).replace(',', ' ').split() - for option in option_strings: - if (option[0] not in '+-' or - option[1:] not in OPTIONFLAGS_BY_NAME): - raise ValueError('line %r of the doctest for %s ' - 'has an invalid option: %r' % - (lineno+1, name, option)) - flag = OPTIONFLAGS_BY_NAME[option[1:]] - options[flag] = (option[0] == '+') - if options and self._IS_BLANK_OR_COMMENT(source): - raise ValueError('line %r of the doctest for %s has an option ' - 'directive on a line with no example: %r' % - (lineno, name, source)) - return options - - # This regular expression finds the indentation of every non-blank - # line in a string. - _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE) - - def _min_indent(self, s): - "Return the minimum indentation of any non-blank line in `s`" - indents = [len(indent) for indent in self._INDENT_RE.findall(s)] - if len(indents) > 0: - return min(indents) - else: - return 0 - - def _check_prompt_blank(self, lines, indent, name, lineno): - """ - Given the lines of a source string (including prompts and - leading indentation), check to make sure that every prompt is - followed by a space character. If any line is not followed by - a space character, then raise ValueError. - """ - for i, line in enumerate(lines): - if len(line) >= indent+4 and line[indent+3] != ' ': - raise ValueError('line %r of the docstring for %s ' - 'lacks blank after %s: %r' % - (lineno+i+1, name, - line[indent:indent+3], line)) - - def _check_prefix(self, lines, prefix, name, lineno): - """ - Check that every line in the given list starts with the given - prefix; if any line does not, then raise a ValueError. - """ - for i, line in enumerate(lines): - if line and not line.startswith(prefix): - raise ValueError('line %r of the docstring for %s has ' - 'inconsistent leading whitespace: %r' % - (lineno+i+1, name, line)) - - -###################################################################### -## 4. DocTest Finder -###################################################################### - -class DocTestFinder: - """ - A class used to extract the DocTests that are relevant to a given - object, from its docstring and the docstrings of its contained - objects. Doctests can currently be extracted from the following - object types: modules, functions, classes, methods, staticmethods, - classmethods, and properties. - """ - - def __init__(self, verbose=False, parser=DocTestParser(), - recurse=True, _namefilter=None, exclude_empty=True): - """ - Create a new doctest finder. - - The optional argument `parser` specifies a class or - function that should be used to create new DocTest objects (or - objects that implement the same interface as DocTest). The - signature for this factory function should match the signature - of the DocTest constructor. - - If the optional argument `recurse` is false, then `find` will - only examine the given object, and not any contained objects. - - If the optional argument `exclude_empty` is false, then `find` - will include tests for objects with empty docstrings. - """ - self._parser = parser - self._verbose = verbose - self._recurse = recurse - self._exclude_empty = exclude_empty - # _namefilter is undocumented, and exists only for temporary backward- - # compatibility support of testmod's deprecated isprivate mess. - self._namefilter = _namefilter - - def find(self, obj, name=None, module=None, globs=None, - extraglobs=None): - """ - Return a list of the DocTests that are defined by the given - object's docstring, or by any of its contained objects' - docstrings. - - The optional parameter `module` is the module that contains - the given object. If the module is not specified or is None, then - the test finder will attempt to automatically determine the - correct module. The object's module is used: - - - As a default namespace, if `globs` is not specified. - - To prevent the DocTestFinder from extracting DocTests - from objects that are imported from other modules. - - To find the name of the file containing the object. - - To help find the line number of the object within its - file. - - Contained objects whose module does not match `module` are ignored. - - If `module` is False, no attempt to find the module will be made. - This is obscure, of use mostly in tests: if `module` is False, or - is None but cannot be found automatically, then all objects are - considered to belong to the (non-existent) module, so all contained - objects will (recursively) be searched for doctests. - - The globals for each DocTest is formed by combining `globs` - and `extraglobs` (bindings in `extraglobs` override bindings - in `globs`). A new copy of the globals dictionary is created - for each DocTest. If `globs` is not specified, then it - defaults to the module's `__dict__`, if specified, or {} - otherwise. If `extraglobs` is not specified, then it defaults - to {}. - - """ - # If name was not specified, then extract it from the object. - if name is None: - name = getattr(obj, '__name__', None) - if name is None: - raise ValueError("DocTestFinder.find: name must be given " - "when obj.__name__ doesn't exist: %r" % - (type(obj),)) - - # Find the module that contains the given object (if obj is - # a module, then module=obj.). Note: this may fail, in which - # case module will be None. - if module is False: - module = None - elif module is None: - module = inspect.getmodule(obj) - - # Read the module's source code. This is used by - # DocTestFinder._find_lineno to find the line number for a - # given object's docstring. - try: - file = inspect.getsourcefile(obj) or inspect.getfile(obj) - source_lines = linecache.getlines(file) - if not source_lines: - source_lines = None - except TypeError: - source_lines = None - - # Initialize globals, and merge in extraglobs. - if globs is None: - if module is None: - globs = {} - else: - globs = module.__dict__.copy() - else: - globs = globs.copy() - if extraglobs is not None: - globs.update(extraglobs) - - # Recursively expore `obj`, extracting DocTests. - tests = [] - self._find(tests, obj, name, module, source_lines, globs, {}) - # Sort the tests by alpha order of names, for consistency in - # verbose-mode output. This was a feature of doctest in Pythons - # <= 2.3 that got lost by accident in 2.4. It was repaired in - # 2.4.4 and 2.5. - tests.sort() - return tests - - def _filter(self, obj, prefix, base): - """ - Return true if the given object should not be examined. - """ - return (self._namefilter is not None and - self._namefilter(prefix, base)) - - def _from_module(self, module, object): - """ - Return true if the given object is defined in the given - module. - """ - if module is None: - return True - elif inspect.isfunction(object): - return module.__dict__ is object.func_globals - elif inspect.isclass(object): - # Some jython classes don't set __module__ - return module.__name__ == getattr(object, '__module__', None) - elif inspect.getmodule(object) is not None: - return module is inspect.getmodule(object) - elif hasattr(object, '__module__'): - return module.__name__ == object.__module__ - elif isinstance(object, property): - return True # [XX] no way not be sure. - else: - raise ValueError("object must be a class or function") - - def _find(self, tests, obj, name, module, source_lines, globs, seen): - """ - Find tests for the given object and any contained objects, and - add them to `tests`. - """ - if self._verbose: - print 'Finding tests in %s' % name - - # If we've already processed this object, then ignore it. - if id(obj) in seen: - return - seen[id(obj)] = 1 - - # Find a test for this object, and add it to the list of tests. - test = self._get_test(obj, name, module, globs, source_lines) - if test is not None: - tests.append(test) - - # Look for tests in a module's contained objects. - if inspect.ismodule(obj) and self._recurse: - for valname, val in obj.__dict__.items(): - # Check if this contained object should be ignored. - if self._filter(val, name, valname): - continue - valname = '%s.%s' % (name, valname) - # Recurse to functions & classes. - if ((inspect.isfunction(val) or inspect.isclass(val)) and - self._from_module(module, val)): - self._find(tests, val, valname, module, source_lines, - globs, seen) - - # Look for tests in a module's __test__ dictionary. - if inspect.ismodule(obj) and self._recurse: - for valname, val in getattr(obj, '__test__', {}).items(): - if not isinstance(valname, basestring): - raise ValueError("DocTestFinder.find: __test__ keys " - "must be strings: %r" % - (type(valname),)) - if not (inspect.isfunction(val) or inspect.isclass(val) or - inspect.ismethod(val) or inspect.ismodule(val) or - isinstance(val, basestring)): - raise ValueError("DocTestFinder.find: __test__ values " - "must be strings, functions, methods, " - "classes, or modules: %r" % - (type(val),)) - valname = '%s.__test__.%s' % (name, valname) - self._find(tests, val, valname, module, source_lines, - globs, seen) - - # Look for tests in a class's contained objects. - if inspect.isclass(obj) and self._recurse: - for valname, val in obj.__dict__.items(): - # Check if this contained object should be ignored. - if self._filter(val, name, valname): - continue - # Special handling for staticmethod/classmethod. - if isinstance(val, staticmethod): - val = getattr(obj, valname) - if isinstance(val, classmethod): - val = getattr(obj, valname).im_func - - # Recurse to methods, properties, and nested classes. - if ((inspect.isfunction(val) or inspect.isclass(val) or - isinstance(val, property)) and - self._from_module(module, val)): - valname = '%s.%s' % (name, valname) - self._find(tests, val, valname, module, source_lines, - globs, seen) - - def _get_test(self, obj, name, module, globs, source_lines): - """ - Return a DocTest for the given object, if it defines a docstring; - otherwise, return None. - """ - # Extract the object's docstring. If it doesn't have one, - # then return None (no test for this object). - if isinstance(obj, basestring): - docstring = obj - else: - try: - if obj.__doc__ is None: - docstring = '' - else: - docstring = obj.__doc__ - if not isinstance(docstring, basestring): - docstring = str(docstring) - except (TypeError, AttributeError): - docstring = '' - - # Find the docstring's location in the file. - lineno = self._find_lineno(obj, source_lines) - - # Don't bother if the docstring is empty. - if self._exclude_empty and not docstring: - return None - - # Return a DocTest for this object. - if module is None: - filename = None - else: - filename = getattr(module, '__file__', module.__name__) - if filename[-4:] in (".pyc", ".pyo"): - filename = filename[:-1] - elif sys.platform.startswith('java') and \ - filename.endswith('$py.class'): - filename = '%s.py' % filename[:-9] - return self._parser.get_doctest(docstring, globs, name, - filename, lineno) - - def _find_lineno(self, obj, source_lines): - """ - Return a line number of the given object's docstring. Note: - this method assumes that the object has a docstring. - """ - lineno = None - - # Find the line number for modules. - if inspect.ismodule(obj): - lineno = 0 - - # Find the line number for classes. - # Note: this could be fooled if a class is defined multiple - # times in a single file. - if inspect.isclass(obj): - if source_lines is None: - return None - pat = re.compile(r'^\s*class\s*%s\b' % - getattr(obj, '__name__', '-')) - for i, line in enumerate(source_lines): - if pat.match(line): - lineno = i - break - - # Find the line number for functions & methods. - if inspect.ismethod(obj): obj = obj.im_func - if inspect.isfunction(obj): obj = obj.func_code - if inspect.istraceback(obj): obj = obj.tb_frame - if inspect.isframe(obj): obj = obj.f_code - if inspect.iscode(obj): - lineno = getattr(obj, 'co_firstlineno', None)-1 - - # Find the line number where the docstring starts. Assume - # that it's the first line that begins with a quote mark. - # Note: this could be fooled by a multiline function - # signature, where a continuation line begins with a quote - # mark. - if lineno is not None: - if source_lines is None: - return lineno+1 - pat = re.compile('(^|.*:)\s*\w*("|\')') - for lineno in range(lineno, len(source_lines)): - if pat.match(source_lines[lineno]): - return lineno - - # We couldn't find the line number. - return None - -###################################################################### -## 5. DocTest Runner -###################################################################### - -class DocTestRunner: - # This divider string is used to separate failure messages, and to - # separate sections of the summary. - DIVIDER = "*" * 70 - - def __init__(self, checker=None, verbose=None, optionflags=0): - """ - Create a new test runner. - - Optional keyword arg `checker` is the `OutputChecker` that - should be used to compare the expected outputs and actual - outputs of doctest examples. - - Optional keyword arg 'verbose' prints lots of stuff if true, - only failures if false; by default, it's true iff '-v' is in - sys.argv. - - Optional argument `optionflags` can be used to control how the - test runner compares expected output to actual output, and how - it displays failures. See the documentation for `testmod` for - more information. - """ - self._checker = checker or OutputChecker() - if verbose is None: - verbose = '-v' in sys.argv - self._verbose = verbose - self.optionflags = optionflags - self.original_optionflags = optionflags - - # Keep track of the examples we've run. - self.tries = 0 - self.failures = 0 - self._name2ft = {} - - # Create a fake output target for capturing doctest output. - self._fakeout = _SpoofOut() - - #///////////////////////////////////////////////////////////////// - # Reporting methods - #///////////////////////////////////////////////////////////////// - - def report_start(self, out, test, example): - """ - Report that the test runner is about to process the given - example. (Only displays a message if verbose=True) - """ - if self._verbose: - if example.want: - out('Trying:\n' + _indent(example.source) + - 'Expecting:\n' + _indent(example.want)) - else: - out('Trying:\n' + _indent(example.source) + - 'Expecting nothing\n') - - def report_success(self, out, test, example, got): - """ - Report that the given example ran successfully. (Only - displays a message if verbose=True) - """ - if self._verbose: - out("ok\n") - - def report_failure(self, out, test, example, got): - """ - Report that the given example failed. - """ - out(self._failure_header(test, example) + - self._checker.output_difference(example, got, self.optionflags)) - - def report_unexpected_exception(self, out, test, example, exc_info): - """ - Report that the given example raised an unexpected exception. - """ - out(self._failure_header(test, example) + - 'Exception raised:\n' + _indent(_exception_traceback(exc_info))) - - def _failure_header(self, test, example): - out = [self.DIVIDER] - if test.filename: - if test.lineno is not None and example.lineno is not None: - lineno = test.lineno + example.lineno + 1 - else: - lineno = '?' - out.append('File "%s", line %s, in %s' % - (test.filename, lineno, test.name)) - else: - out.append('Line %s, in %s' % (example.lineno+1, test.name)) - out.append('Failed example:') - source = example.source - out.append(_indent(source)) - return '\n'.join(out) - - #///////////////////////////////////////////////////////////////// - # DocTest Running - #///////////////////////////////////////////////////////////////// - - def __run(self, test, compileflags, out): - """ - Run the examples in `test`. Write the outcome of each example - with one of the `DocTestRunner.report_*` methods, using the - writer function `out`. `compileflags` is the set of compiler - flags that should be used to execute examples. Return a tuple - `(f, t)`, where `t` is the number of examples tried, and `f` - is the number of examples that failed. The examples are run - in the namespace `test.globs`. - """ - # Keep track of the number of failures and tries. - failures = tries = 0 - - # Save the option flags (since option directives can be used - # to modify them). - original_optionflags = self.optionflags - - SUCCESS, FAILURE, BOOM = range(3) # `outcome` state - - check = self._checker.check_output - - # Process each example. - for examplenum, example in enumerate(test.examples): - - # If REPORT_ONLY_FIRST_FAILURE is set, then supress - # reporting after the first failure. - quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and - failures > 0) - - # Merge in the example's options. - self.optionflags = original_optionflags - if example.options: - for (optionflag, val) in example.options.items(): - if val: - self.optionflags |= optionflag - else: - self.optionflags &= ~optionflag - - # Record that we started this example. - tries += 1 - if not quiet: - self.report_start(out, test, example) - - # Use a special filename for compile(), so we can retrieve - # the source code during interactive debugging (see - # __patched_linecache_getlines). - filename = '' % (test.name, examplenum) - - # Run the example in the given context (globs), and record - # any exception that gets raised. (But don't intercept - # keyboard interrupts.) - try: - # Don't blink! This is where the user's code gets run. - exec compile(example.source, filename, "single", - compileflags, 1) in test.globs - self.debugger.set_continue() # ==== Example Finished ==== - exception = None - except KeyboardInterrupt: - raise - except: - exception = sys.exc_info() - self.debugger.set_continue() # ==== Example Finished ==== - - got = self._fakeout.getvalue() # the actual output - self._fakeout.truncate(0) - outcome = FAILURE # guilty until proved innocent or insane - - # If the example executed without raising any exceptions, - # verify its output. - if exception is None: - if check(example.want, got, self.optionflags): - outcome = SUCCESS - - # The example raised an exception: check if it was expected. - else: - exc_info = sys.exc_info() - exc_msg = traceback.format_exception_only(*exc_info[:2])[-1] - if not quiet: - got += _exception_traceback(exc_info) - - # If `example.exc_msg` is None, then we weren't expecting - # an exception. - if example.exc_msg is None: - outcome = BOOM - - # We expected an exception: see whether it matches. - elif check(example.exc_msg, exc_msg, self.optionflags): - outcome = SUCCESS - - # Another chance if they didn't care about the detail. - elif self.optionflags & IGNORE_EXCEPTION_DETAIL: - m1 = re.match(r'[^:]*:', example.exc_msg) - m2 = re.match(r'[^:]*:', exc_msg) - if m1 and m2 and check(m1.group(0), m2.group(0), - self.optionflags): - outcome = SUCCESS - - # Report the outcome. - if outcome is SUCCESS: - if not quiet: - self.report_success(out, test, example, got) - elif outcome is FAILURE: - if not quiet: - self.report_failure(out, test, example, got) - failures += 1 - elif outcome is BOOM: - if not quiet: - self.report_unexpected_exception(out, test, example, - exc_info) - failures += 1 - else: - assert False, ("unknown outcome", outcome) - - # Restore the option flags (in case they were modified) - self.optionflags = original_optionflags - - # Record and return the number of failures and tries. - self.__record_outcome(test, failures, tries) - return failures, tries - - def __record_outcome(self, test, f, t): - """ - Record the fact that the given DocTest (`test`) generated `f` - failures out of `t` tried examples. - """ - f2, t2 = self._name2ft.get(test.name, (0,0)) - self._name2ft[test.name] = (f+f2, t+t2) - self.failures += f - self.tries += t - - __LINECACHE_FILENAME_RE = re.compile(r'[\w\.]+)' - r'\[(?P\d+)\]>$') - def __patched_linecache_getlines(self, filename): - m = self.__LINECACHE_FILENAME_RE.match(filename) - if m and m.group('name') == self.test.name: - example = self.test.examples[int(m.group('examplenum'))] - return example.source.splitlines(True) - else: - return self.save_linecache_getlines(filename) - - def run(self, test, compileflags=None, out=None, clear_globs=True): - """ - Run the examples in `test`, and display the results using the - writer function `out`. - - The examples are run in the namespace `test.globs`. If - `clear_globs` is true (the default), then this namespace will - be cleared after the test runs, to help with garbage - collection. If you would like to examine the namespace after - the test completes, then use `clear_globs=False`. - - `compileflags` gives the set of flags that should be used by - the Python compiler when running the examples. If not - specified, then it will default to the set of future-import - flags that apply to `globs`. - - The output of each example is checked using - `DocTestRunner.check_output`, and the results are formatted by - the `DocTestRunner.report_*` methods. - """ - self.test = test - - if compileflags is None: - compileflags = _extract_future_flags(test.globs) - - save_stdout = sys.stdout - if out is None: - out = save_stdout.write - sys.stdout = self._fakeout - - # Patch pdb.set_trace to restore sys.stdout during interactive - # debugging (so it's not still redirected to self._fakeout). - # Note that the interactive output will go to *our* - # save_stdout, even if that's not the real sys.stdout; this - # allows us to write test cases for the set_trace behavior. - save_set_trace = pdb.set_trace - self.debugger = _OutputRedirectingPdb(save_stdout) - self.debugger.reset() - pdb.set_trace = self.debugger.set_trace - - # Patch linecache.getlines, so we can see the example's source - # when we're inside the debugger. - self.save_linecache_getlines = linecache.getlines - linecache.getlines = self.__patched_linecache_getlines - - try: - return self.__run(test, compileflags, out) - finally: - sys.stdout = save_stdout - pdb.set_trace = save_set_trace - linecache.getlines = self.save_linecache_getlines - if clear_globs: - test.globs.clear() - - #///////////////////////////////////////////////////////////////// - # Summarization - #///////////////////////////////////////////////////////////////// - def summarize(self, verbose=None): - """ - Print a summary of all the test cases that have been run by - this DocTestRunner, and return a tuple `(f, t)`, where `f` is - the total number of failed examples, and `t` is the total - number of tried examples. - - The optional `verbose` argument controls how detailed the - summary is. If the verbosity is not specified, then the - DocTestRunner's verbosity is used. - """ - if verbose is None: - verbose = self._verbose - notests = [] - passed = [] - failed = [] - totalt = totalf = 0 - for x in self._name2ft.items(): - name, (f, t) = x - assert f <= t - totalt += t - totalf += f - if t == 0: - notests.append(name) - elif f == 0: - passed.append( (name, t) ) - else: - failed.append(x) - if verbose: - if notests: - print len(notests), "items had no tests:" - notests.sort() - for thing in notests: - print " ", thing - if passed: - print len(passed), "items passed all tests:" - passed.sort() - for thing, count in passed: - print " %3d tests in %s" % (count, thing) - if failed: - print self.DIVIDER - print len(failed), "items had failures:" - failed.sort() - for thing, (f, t) in failed: - print " %3d of %3d in %s" % (f, t, thing) - if verbose: - print totalt, "tests in", len(self._name2ft), "items." - print totalt - totalf, "passed and", totalf, "failed." - if totalf: - print "***Test Failed***", totalf, "failures." - elif verbose: - print "Test passed." - return totalf, totalt - - #///////////////////////////////////////////////////////////////// - # Backward compatibility cruft to maintain doctest.master. - #///////////////////////////////////////////////////////////////// - def merge(self, other): - d = self._name2ft - for name, (f, t) in other._name2ft.items(): - if name in d: - print "*** DocTestRunner.merge: '" + name + "' in both" \ - " testers; summing outcomes." - f2, t2 = d[name] - f = f + f2 - t = t + t2 - d[name] = f, t - -class OutputChecker: - """ - A class used to check the whether the actual output from a doctest - example matches the expected output. `OutputChecker` defines two - methods: `check_output`, which compares a given pair of outputs, - and returns true if they match; and `output_difference`, which - returns a string describing the differences between two outputs. - """ - def check_output(self, want, got, optionflags): - """ - Return True iff the actual output from an example (`got`) - matches the expected output (`want`). These strings are - always considered to match if they are identical; but - depending on what option flags the test runner is using, - several non-exact match types are also possible. See the - documentation for `TestRunner` for more information about - option flags. - """ - # Handle the common case first, for efficiency: - # if they're string-identical, always return true. - if got == want: - return True - - # The values True and False replaced 1 and 0 as the return - # value for boolean comparisons in Python 2.3. - if not (optionflags & DONT_ACCEPT_TRUE_FOR_1): - if (got,want) == ("True\n", "1\n"): - return True - if (got,want) == ("False\n", "0\n"): - return True - - # can be used as a special sequence to signify a - # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. - if not (optionflags & DONT_ACCEPT_BLANKLINE): - # Replace in want with a blank line. - want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER), - '', want) - # If a line in got contains only spaces, then remove the - # spaces. - got = re.sub('(?m)^\s*?$', '', got) - if got == want: - return True - - # This flag causes doctest to ignore any differences in the - # contents of whitespace strings. Note that this can be used - # in conjunction with the ELLIPSIS flag. - if optionflags & NORMALIZE_WHITESPACE: - got = ' '.join(got.split()) - want = ' '.join(want.split()) - if got == want: - return True - - # The ELLIPSIS flag says to let the sequence "..." in `want` - # match any substring in `got`. - if optionflags & ELLIPSIS: - if _ellipsis_match(want, got): - return True - - # We didn't find any match; return false. - return False - - # Should we do a fancy diff? - def _do_a_fancy_diff(self, want, got, optionflags): - # Not unless they asked for a fancy diff. - if not optionflags & (REPORT_UDIFF | - REPORT_CDIFF | - REPORT_NDIFF): - return False - - # If expected output uses ellipsis, a meaningful fancy diff is - # too hard ... or maybe not. In two real-life failures Tim saw, - # a diff was a major help anyway, so this is commented out. - # [todo] _ellipsis_match() knows which pieces do and don't match, - # and could be the basis for a kick-ass diff in this case. - ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want: - ## return False - - # ndiff does intraline difference marking, so can be useful even - # for 1-line differences. - if optionflags & REPORT_NDIFF: - return True - - # The other diff types need at least a few lines to be helpful. - return want.count('\n') > 2 and got.count('\n') > 2 - - def output_difference(self, example, got, optionflags): - """ - Return a string describing the differences between the - expected output for a given example (`example`) and the actual - output (`got`). `optionflags` is the set of option flags used - to compare `want` and `got`. - """ - want = example.want - # If s are being used, then replace blank lines - # with in the actual output string. - if not (optionflags & DONT_ACCEPT_BLANKLINE): - got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got) - - # Check if we should use diff. - if self._do_a_fancy_diff(want, got, optionflags): - # Split want & got into lines. - want_lines = want.splitlines(True) # True == keep line ends - got_lines = got.splitlines(True) - # Use difflib to find their differences. - if optionflags & REPORT_UDIFF: - diff = difflib.unified_diff(want_lines, got_lines, n=2) - diff = list(diff)[2:] # strip the diff header - kind = 'unified diff with -expected +actual' - elif optionflags & REPORT_CDIFF: - diff = difflib.context_diff(want_lines, got_lines, n=2) - diff = list(diff)[2:] # strip the diff header - kind = 'context diff with expected followed by actual' - elif optionflags & REPORT_NDIFF: - engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK) - diff = list(engine.compare(want_lines, got_lines)) - kind = 'ndiff with -expected +actual' - else: - assert 0, 'Bad diff option' - # Remove trailing whitespace on diff output. - diff = [line.rstrip() + '\n' for line in diff] - return 'Differences (%s):\n' % kind + _indent(''.join(diff)) - - # If we're not using diff, then simply list the expected - # output followed by the actual output. - if want and got: - return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got)) - elif want: - return 'Expected:\n%sGot nothing\n' % _indent(want) - elif got: - return 'Expected nothing\nGot:\n%s' % _indent(got) - else: - return 'Expected nothing\nGot nothing\n' - -class DocTestFailure(Exception): - """A DocTest example has failed in debugging mode. - - The exception instance has variables: - - - test: the DocTest object being run - - - excample: the Example object that failed - - - got: the actual output - """ - def __init__(self, test, example, got): - self.test = test - self.example = example - self.got = got - - def __str__(self): - return str(self.test) - -class UnexpectedException(Exception): - """A DocTest example has encountered an unexpected exception - - The exception instance has variables: - - - test: the DocTest object being run - - - excample: the Example object that failed - - - exc_info: the exception info - """ - def __init__(self, test, example, exc_info): - self.test = test - self.example = example - self.exc_info = exc_info - - def __str__(self): - return str(self.test) - -class DebugRunner(DocTestRunner): - - def run(self, test, compileflags=None, out=None, clear_globs=True): - r = DocTestRunner.run(self, test, compileflags, out, False) - if clear_globs: - test.globs.clear() - return r - - def report_unexpected_exception(self, out, test, example, exc_info): - raise UnexpectedException(test, example, exc_info) - - def report_failure(self, out, test, example, got): - raise DocTestFailure(test, example, got) - -###################################################################### -## 6. Test Functions -###################################################################### -# These should be backwards compatible. - -# For backward compatibility, a global instance of a DocTestRunner -# class, updated by testmod. -master = None - -def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None, - report=True, optionflags=0, extraglobs=None, - raise_on_error=False, exclude_empty=False): - """m=None, name=None, globs=None, verbose=None, isprivate=None, - report=True, optionflags=0, extraglobs=None, raise_on_error=False, - exclude_empty=False - - Test examples in docstrings in functions and classes reachable - from module m (or the current module if m is not supplied), starting - with m.__doc__. Unless isprivate is specified, private names - are not skipped. - - Also test examples reachable from dict m.__test__ if it exists and is - not None. m.__test__ maps names to functions, classes and strings; - function and class docstrings are tested even if the name is private; - strings are tested directly, as if they were docstrings. - - Return (#failures, #tests). - - See doctest.__doc__ for an overview. - - Optional keyword arg "name" gives the name of the module; by default - use m.__name__. - - Optional keyword arg "globs" gives a dict to be used as the globals - when executing examples; by default, use m.__dict__. A copy of this - dict is actually used for each docstring, so that each docstring's - examples start with a clean slate. - - Optional keyword arg "extraglobs" gives a dictionary that should be - merged into the globals that are used to execute examples. By - default, no extra globals are used. This is new in 2.4. - - Optional keyword arg "verbose" prints lots of stuff if true, prints - only failures if false; by default, it's true iff "-v" is in sys.argv. - - Optional keyword arg "report" prints a summary at the end when true, - else prints nothing at the end. In verbose mode, the summary is - detailed, else very brief (in fact, empty if all tests passed). - - Optional keyword arg "optionflags" or's together module constants, - and defaults to 0. This is new in 2.3. Possible values (see the - docs for details): - - DONT_ACCEPT_TRUE_FOR_1 - DONT_ACCEPT_BLANKLINE - NORMALIZE_WHITESPACE - ELLIPSIS - IGNORE_EXCEPTION_DETAIL - REPORT_UDIFF - REPORT_CDIFF - REPORT_NDIFF - REPORT_ONLY_FIRST_FAILURE - - Optional keyword arg "raise_on_error" raises an exception on the - first unexpected exception or failure. This allows failures to be - post-mortem debugged. - - Deprecated in Python 2.4: - Optional keyword arg "isprivate" specifies a function used to - determine whether a name is private. The default function is - treat all functions as public. Optionally, "isprivate" can be - set to doctest.is_private to skip over functions marked as private - using the underscore naming convention; see its docs for details. - - Advanced tomfoolery: testmod runs methods of a local instance of - class doctest.Tester, then merges the results into (or creates) - global Tester instance doctest.master. Methods of doctest.master - can be called directly too, if you want to do something unusual. - Passing report=0 to testmod is especially useful then, to delay - displaying a summary. Invoke doctest.master.summarize(verbose) - when you're done fiddling. - """ - global master - - if isprivate is not None: - warnings.warn("the isprivate argument is deprecated; " - "examine DocTestFinder.find() lists instead", - DeprecationWarning) - - # If no module was given, then use __main__. - if m is None: - # DWA - m will still be None if this wasn't invoked from the command - # line, in which case the following TypeError is about as good an error - # as we should expect - m = sys.modules.get('__main__') - - # Check that we were actually given a module. - if not inspect.ismodule(m): - raise TypeError("testmod: module required; %r" % (m,)) - - # If no name was given, then use the module's name. - if name is None: - name = m.__name__ - - # Find, parse, and run all tests in the given module. - finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty) - - if raise_on_error: - runner = DebugRunner(verbose=verbose, optionflags=optionflags) - else: - runner = DocTestRunner(verbose=verbose, optionflags=optionflags) - - for test in finder.find(m, name, globs=globs, extraglobs=extraglobs): - runner.run(test) - - if report: - runner.summarize() - - if master is None: - master = runner - else: - master.merge(runner) - - return runner.failures, runner.tries - -def testfile(filename, module_relative=True, name=None, package=None, - globs=None, verbose=None, report=True, optionflags=0, - extraglobs=None, raise_on_error=False, parser=DocTestParser()): - """ - Test examples in the given file. Return (#failures, #tests). - - Optional keyword arg "module_relative" specifies how filenames - should be interpreted: - - - If "module_relative" is True (the default), then "filename" - specifies a module-relative path. By default, this path is - relative to the calling module's directory; but if the - "package" argument is specified, then it is relative to that - package. To ensure os-independence, "filename" should use - "/" characters to separate path segments, and should not - be an absolute path (i.e., it may not begin with "/"). - - - If "module_relative" is False, then "filename" specifies an - os-specific path. The path may be absolute or relative (to - the current working directory). - - Optional keyword arg "name" gives the name of the test; by default - use the file's basename. - - Optional keyword argument "package" is a Python package or the - name of a Python package whose directory should be used as the - base directory for a module relative filename. If no package is - specified, then the calling module's directory is used as the base - directory for module relative filenames. It is an error to - specify "package" if "module_relative" is False. - - Optional keyword arg "globs" gives a dict to be used as the globals - when executing examples; by default, use {}. A copy of this dict - is actually used for each docstring, so that each docstring's - examples start with a clean slate. - - Optional keyword arg "extraglobs" gives a dictionary that should be - merged into the globals that are used to execute examples. By - default, no extra globals are used. - - Optional keyword arg "verbose" prints lots of stuff if true, prints - only failures if false; by default, it's true iff "-v" is in sys.argv. - - Optional keyword arg "report" prints a summary at the end when true, - else prints nothing at the end. In verbose mode, the summary is - detailed, else very brief (in fact, empty if all tests passed). - - Optional keyword arg "optionflags" or's together module constants, - and defaults to 0. Possible values (see the docs for details): - - DONT_ACCEPT_TRUE_FOR_1 - DONT_ACCEPT_BLANKLINE - NORMALIZE_WHITESPACE - ELLIPSIS - IGNORE_EXCEPTION_DETAIL - REPORT_UDIFF - REPORT_CDIFF - REPORT_NDIFF - REPORT_ONLY_FIRST_FAILURE - - Optional keyword arg "raise_on_error" raises an exception on the - first unexpected exception or failure. This allows failures to be - post-mortem debugged. - - Optional keyword arg "parser" specifies a DocTestParser (or - subclass) that should be used to extract tests from the files. - - Advanced tomfoolery: testmod runs methods of a local instance of - class doctest.Tester, then merges the results into (or creates) - global Tester instance doctest.master. Methods of doctest.master - can be called directly too, if you want to do something unusual. - Passing report=0 to testmod is especially useful then, to delay - displaying a summary. Invoke doctest.master.summarize(verbose) - when you're done fiddling. - """ - global master - - if package and not module_relative: - raise ValueError("Package may only be specified for module-" - "relative paths.") - - # Relativize the path - if module_relative: - package = _normalize_module(package) - filename = _module_relative_path(package, filename) - - # If no name was given, then use the file's name. - if name is None: - name = os.path.basename(filename) - - # Assemble the globals. - if globs is None: - globs = {} - else: - globs = globs.copy() - if extraglobs is not None: - globs.update(extraglobs) - - if raise_on_error: - runner = DebugRunner(verbose=verbose, optionflags=optionflags) - else: - runner = DocTestRunner(verbose=verbose, optionflags=optionflags) - - # Read the file, convert it to a test, and run it. - s = open(filename).read() - test = parser.get_doctest(s, globs, name, filename, 0) - runner.run(test) - - if report: - runner.summarize() - - if master is None: - master = runner - else: - master.merge(runner) - - return runner.failures, runner.tries - -def run_docstring_examples(f, globs, verbose=False, name="NoName", - compileflags=None, optionflags=0): - """ - Test examples in the given object's docstring (`f`), using `globs` - as globals. Optional argument `name` is used in failure messages. - If the optional argument `verbose` is true, then generate output - even if there are no failures. - - `compileflags` gives the set of flags that should be used by the - Python compiler when running the examples. If not specified, then - it will default to the set of future-import flags that apply to - `globs`. - - Optional keyword arg `optionflags` specifies options for the - testing and output. See the documentation for `testmod` for more - information. - """ - # Find, parse, and run all tests in the given module. - finder = DocTestFinder(verbose=verbose, recurse=False) - runner = DocTestRunner(verbose=verbose, optionflags=optionflags) - for test in finder.find(f, name, globs=globs): - runner.run(test, compileflags=compileflags) - -###################################################################### -## 7. Tester -###################################################################### -# This is provided only for backwards compatibility. It's not -# actually used in any way. - -class Tester: - def __init__(self, mod=None, globs=None, verbose=None, - isprivate=None, optionflags=0): - - warnings.warn("class Tester is deprecated; " - "use class doctest.DocTestRunner instead", - DeprecationWarning, stacklevel=2) - if mod is None and globs is None: - raise TypeError("Tester.__init__: must specify mod or globs") - if mod is not None and not inspect.ismodule(mod): - raise TypeError("Tester.__init__: mod must be a module; %r" % - (mod,)) - if globs is None: - globs = mod.__dict__ - self.globs = globs - - self.verbose = verbose - self.isprivate = isprivate - self.optionflags = optionflags - self.testfinder = DocTestFinder(_namefilter=isprivate) - self.testrunner = DocTestRunner(verbose=verbose, - optionflags=optionflags) - - def runstring(self, s, name): - test = DocTestParser().get_doctest(s, self.globs, name, None, None) - if self.verbose: - print "Running string", name - (f,t) = self.testrunner.run(test) - if self.verbose: - print f, "of", t, "examples failed in string", name - return (f,t) - - def rundoc(self, object, name=None, module=None): - f = t = 0 - tests = self.testfinder.find(object, name, module=module, - globs=self.globs) - for test in tests: - (f2, t2) = self.testrunner.run(test) - (f,t) = (f+f2, t+t2) - return (f,t) - - def rundict(self, d, name, module=None): - import new - m = new.module(name) - m.__dict__.update(d) - if module is None: - module = False - return self.rundoc(m, name, module) - - def run__test__(self, d, name): - import new - m = new.module(name) - m.__test__ = d - return self.rundoc(m, name) - - def summarize(self, verbose=None): - return self.testrunner.summarize(verbose) - - def merge(self, other): - self.testrunner.merge(other.testrunner) - -###################################################################### -## 8. Unittest Support -###################################################################### - -_unittest_reportflags = 0 - -def set_unittest_reportflags(flags): - global _unittest_reportflags - - if (flags & REPORTING_FLAGS) != flags: - raise ValueError("Only reporting flags allowed", flags) - old = _unittest_reportflags - _unittest_reportflags = flags - return old - - -class DocTestCase(unittest.TestCase): - - def __init__(self, test, optionflags=0, setUp=None, tearDown=None, - checker=None): - - unittest.TestCase.__init__(self) - self._dt_optionflags = optionflags - self._dt_checker = checker - self._dt_test = test - self._dt_setUp = setUp - self._dt_tearDown = tearDown - - def setUp(self): - test = self._dt_test - - if self._dt_setUp is not None: - self._dt_setUp(test) - - def tearDown(self): - test = self._dt_test - - if self._dt_tearDown is not None: - self._dt_tearDown(test) - - test.globs.clear() - - def runTest(self): - test = self._dt_test - old = sys.stdout - new = StringIO() - optionflags = self._dt_optionflags - - if not (optionflags & REPORTING_FLAGS): - # The option flags don't include any reporting flags, - # so add the default reporting flags - optionflags |= _unittest_reportflags - - runner = DocTestRunner(optionflags=optionflags, - checker=self._dt_checker, verbose=False) - - try: - runner.DIVIDER = "-"*70 - failures, tries = runner.run( - test, out=new.write, clear_globs=False) - finally: - sys.stdout = old - - if failures: - raise self.failureException(self.format_failure(new.getvalue())) - - def format_failure(self, err): - test = self._dt_test - if test.lineno is None: - lineno = 'unknown line number' - else: - lineno = '%s' % test.lineno - lname = '.'.join(test.name.split('.')[-1:]) - return ('Failed doctest test for %s\n' - ' File "%s", line %s, in %s\n\n%s' - % (test.name, test.filename, lineno, lname, err) - ) - - def debug(self): - self.setUp() - runner = DebugRunner(optionflags=self._dt_optionflags, - checker=self._dt_checker, verbose=False) - runner.run(self._dt_test) - self.tearDown() - - def id(self): - return self._dt_test.name - - def __repr__(self): - name = self._dt_test.name.split('.') - return "%s (%s)" % (name[-1], '.'.join(name[:-1])) - - __str__ = __repr__ - - def shortDescription(self): - return "Doctest: " + self._dt_test.name - -def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None, - **options): - """ - Convert doctest tests for a module to a unittest test suite. - - This converts each documentation string in a module that - contains doctest tests to a unittest test case. If any of the - tests in a doc string fail, then the test case fails. An exception - is raised showing the name of the file containing the test and a - (sometimes approximate) line number. - - The `module` argument provides the module to be tested. The argument - can be either a module or a module name. - - If no argument is given, the calling module is used. - - A number of options may be provided as keyword arguments: - - setUp - A set-up function. This is called before running the - tests in each file. The setUp function will be passed a DocTest - object. The setUp function can access the test globals as the - globs attribute of the test passed. - - tearDown - A tear-down function. This is called after running the - tests in each file. The tearDown function will be passed a DocTest - object. The tearDown function can access the test globals as the - globs attribute of the test passed. - - globs - A dictionary containing initial global variables for the tests. - - optionflags - A set of doctest option flags expressed as an integer. - """ - - if test_finder is None: - test_finder = DocTestFinder() - - module = _normalize_module(module) - tests = test_finder.find(module, globs=globs, extraglobs=extraglobs) - if globs is None: - globs = module.__dict__ - if not tests: - # Why do we want to do this? Because it reveals a bug that might - # otherwise be hidden. - raise ValueError(module, "has no tests") - - tests.sort() - suite = unittest.TestSuite() - for test in tests: - if len(test.examples) == 0: - continue - if not test.filename: - filename = module.__file__ - if filename[-4:] in (".pyc", ".pyo"): - filename = filename[:-1] - elif sys.platform.startswith('java') and \ - filename.endswith('$py.class'): - filename = '%s.py' % filename[:-9] - test.filename = filename - suite.addTest(DocTestCase(test, **options)) - - return suite - -class DocFileCase(DocTestCase): - - def id(self): - return '_'.join(self._dt_test.name.split('.')) - - def __repr__(self): - return self._dt_test.filename - __str__ = __repr__ - - def format_failure(self, err): - return ('Failed doctest test for %s\n File "%s", line 0\n\n%s' - % (self._dt_test.name, self._dt_test.filename, err) - ) - -def DocFileTest(path, module_relative=True, package=None, - globs=None, parser=DocTestParser(), **options): - if globs is None: - globs = {} - - if package and not module_relative: - raise ValueError("Package may only be specified for module-" - "relative paths.") - - # Relativize the path. - if module_relative: - package = _normalize_module(package) - path = _module_relative_path(package, path) - - # Find the file and read it. - name = os.path.basename(path) - doc = open(path).read() - - # Convert it to a test, and wrap it in a DocFileCase. - test = parser.get_doctest(doc, globs, name, path, 0) - return DocFileCase(test, **options) - -def DocFileSuite(*paths, **kw): - """A unittest suite for one or more doctest files. - - The path to each doctest file is given as a string; the - interpretation of that string depends on the keyword argument - "module_relative". - - A number of options may be provided as keyword arguments: - - module_relative - If "module_relative" is True, then the given file paths are - interpreted as os-independent module-relative paths. By - default, these paths are relative to the calling module's - directory; but if the "package" argument is specified, then - they are relative to that package. To ensure os-independence, - "filename" should use "/" characters to separate path - segments, and may not be an absolute path (i.e., it may not - begin with "/"). - - If "module_relative" is False, then the given file paths are - interpreted as os-specific paths. These paths may be absolute - or relative (to the current working directory). - - package - A Python package or the name of a Python package whose directory - should be used as the base directory for module relative paths. - If "package" is not specified, then the calling module's - directory is used as the base directory for module relative - filenames. It is an error to specify "package" if - "module_relative" is False. - - setUp - A set-up function. This is called before running the - tests in each file. The setUp function will be passed a DocTest - object. The setUp function can access the test globals as the - globs attribute of the test passed. - - tearDown - A tear-down function. This is called after running the - tests in each file. The tearDown function will be passed a DocTest - object. The tearDown function can access the test globals as the - globs attribute of the test passed. - - globs - A dictionary containing initial global variables for the tests. - - optionflags - A set of doctest option flags expressed as an integer. - - parser - A DocTestParser (or subclass) that should be used to extract - tests from the files. - """ - suite = unittest.TestSuite() - - # We do this here so that _normalize_module is called at the right - # level. If it were called in DocFileTest, then this function - # would be the caller and we might guess the package incorrectly. - if kw.get('module_relative', True): - kw['package'] = _normalize_module(kw.get('package')) - - for path in paths: - suite.addTest(DocFileTest(path, **kw)) - - return suite - -###################################################################### -## 9. Debugging Support -###################################################################### - -def script_from_examples(s): - output = [] - for piece in DocTestParser().parse(s): - if isinstance(piece, Example): - # Add the example's source code (strip trailing NL) - output.append(piece.source[:-1]) - # Add the expected output: - want = piece.want - if want: - output.append('# Expected:') - output += ['## '+l for l in want.split('\n')[:-1]] - else: - # Add non-example text. - output += [_comment_line(l) - for l in piece.split('\n')[:-1]] - - # Trim junk on both ends. - while output and output[-1] == '#': - output.pop() - while output and output[0] == '#': - output.pop(0) - # Combine the output, and return it. - # Add a courtesy newline to prevent exec from choking (see bug #1172785) - return '\n'.join(output) + '\n' - -def testsource(module, name): - """Extract the test sources from a doctest docstring as a script. - - Provide the module (or dotted name of the module) containing the - test to be debugged and the name (within the module) of the object - with the doc string with tests to be debugged. - """ - module = _normalize_module(module) - tests = DocTestFinder().find(module) - test = [t for t in tests if t.name == name] - if not test: - raise ValueError(name, "not found in tests") - test = test[0] - testsrc = script_from_examples(test.docstring) - return testsrc - -def debug_src(src, pm=False, globs=None): - """Debug a single doctest docstring, in argument `src`'""" - testsrc = script_from_examples(src) - debug_script(testsrc, pm, globs) - -def debug_script(src, pm=False, globs=None): - "Debug a test script. `src` is the script, as a string." - import pdb - - # Note that tempfile.NameTemporaryFile() cannot be used. As the - # docs say, a file so created cannot be opened by name a second time - # on modern Windows boxes, and execfile() needs to open it. - srcfilename = tempfile.mktemp(".py", "doctestdebug") - f = open(srcfilename, 'w') - f.write(src) - f.close() - - try: - if globs: - globs = globs.copy() - else: - globs = {} - - if pm: - try: - execfile(srcfilename, globs, globs) - except: - print sys.exc_info()[1] - pdb.post_mortem(sys.exc_info()[2]) - else: - # Note that %r is vital here. '%s' instead can, e.g., cause - # backslashes to get treated as metacharacters on Windows. - pdb.run("execfile(%r)" % srcfilename, globs, globs) - - finally: - os.remove(srcfilename) - -def debug(module, name, pm=False): - """Debug a single doctest docstring. - - Provide the module (or dotted name of the module) containing the - test to be debugged and the name (within the module) of the object - with the docstring with tests to be debugged. - """ - module = _normalize_module(module) - testsrc = testsource(module, name) - debug_script(testsrc, pm, module.__dict__) - - -__test__ = {} diff --git a/lib/spack/external/nose/failure.py b/lib/spack/external/nose/failure.py deleted file mode 100644 index c5fabfda5e..0000000000 --- a/lib/spack/external/nose/failure.py +++ /dev/null @@ -1,42 +0,0 @@ -import logging -import unittest -from traceback import format_tb -from nose.pyversion import is_base_exception - -log = logging.getLogger(__name__) - - -__all__ = ['Failure'] - - -class Failure(unittest.TestCase): - """Unloadable or unexecutable test. - - A Failure case is placed in a test suite to indicate the presence of a - test that could not be loaded or executed. A common example is a test - module that fails to import. - - """ - __test__ = False # do not collect - def __init__(self, exc_class, exc_val, tb=None, address=None): - log.debug("A failure! %s %s %s", exc_class, exc_val, format_tb(tb)) - self.exc_class = exc_class - self.exc_val = exc_val - self.tb = tb - self._address = address - unittest.TestCase.__init__(self) - - def __str__(self): - return "Failure: %s (%s)" % ( - getattr(self.exc_class, '__name__', self.exc_class), self.exc_val) - - def address(self): - return self._address - - def runTest(self): - if self.tb is not None: - if is_base_exception(self.exc_val): - raise self.exc_val, None, self.tb - raise self.exc_class, self.exc_val, self.tb - else: - raise self.exc_class(self.exc_val) diff --git a/lib/spack/external/nose/importer.py b/lib/spack/external/nose/importer.py deleted file mode 100644 index e677658ce6..0000000000 --- a/lib/spack/external/nose/importer.py +++ /dev/null @@ -1,167 +0,0 @@ -"""Implements an importer that looks only in specific path (ignoring -sys.path), and uses a per-path cache in addition to sys.modules. This is -necessary because test modules in different directories frequently have the -same names, which means that the first loaded would mask the rest when using -the builtin importer. -""" -import logging -import os -import sys -from nose.config import Config - -from imp import find_module, load_module, acquire_lock, release_lock - -log = logging.getLogger(__name__) - -try: - _samefile = os.path.samefile -except AttributeError: - def _samefile(src, dst): - return (os.path.normcase(os.path.realpath(src)) == - os.path.normcase(os.path.realpath(dst))) - - -class Importer(object): - """An importer class that does only path-specific imports. That - is, the given module is not searched for on sys.path, but only at - the path or in the directory specified. - """ - def __init__(self, config=None): - if config is None: - config = Config() - self.config = config - - def importFromPath(self, path, fqname): - """Import a dotted-name package whose tail is at path. In other words, - given foo.bar and path/to/foo/bar.py, import foo from path/to/foo then - bar from path/to/foo/bar, returning bar. - """ - # find the base dir of the package - path_parts = os.path.normpath(os.path.abspath(path)).split(os.sep) - name_parts = fqname.split('.') - if path_parts[-1] == '__init__.py': - path_parts.pop() - path_parts = path_parts[:-(len(name_parts))] - dir_path = os.sep.join(path_parts) - # then import fqname starting from that dir - return self.importFromDir(dir_path, fqname) - - def importFromDir(self, dir, fqname): - """Import a module *only* from path, ignoring sys.path and - reloading if the version in sys.modules is not the one we want. - """ - dir = os.path.normpath(os.path.abspath(dir)) - log.debug("Import %s from %s", fqname, dir) - - # FIXME reimplement local per-dir cache? - - # special case for __main__ - if fqname == '__main__': - return sys.modules[fqname] - - if self.config.addPaths: - add_path(dir, self.config) - - path = [dir] - parts = fqname.split('.') - part_fqname = '' - mod = parent = fh = None - - for part in parts: - if part_fqname == '': - part_fqname = part - else: - part_fqname = "%s.%s" % (part_fqname, part) - try: - acquire_lock() - log.debug("find module part %s (%s) in %s", - part, part_fqname, path) - fh, filename, desc = find_module(part, path) - old = sys.modules.get(part_fqname) - if old is not None: - # test modules frequently have name overlap; make sure - # we get a fresh copy of anything we are trying to load - # from a new path - log.debug("sys.modules has %s as %s", part_fqname, old) - if (self.sameModule(old, filename) - or (self.config.firstPackageWins and - getattr(old, '__path__', None))): - mod = old - else: - del sys.modules[part_fqname] - mod = load_module(part_fqname, fh, filename, desc) - else: - mod = load_module(part_fqname, fh, filename, desc) - finally: - if fh: - fh.close() - release_lock() - if parent: - setattr(parent, part, mod) - if hasattr(mod, '__path__'): - path = mod.__path__ - parent = mod - return mod - - def _dirname_if_file(self, filename): - # We only take the dirname if we have a path to a non-dir, - # because taking the dirname of a symlink to a directory does not - # give the actual directory parent. - if os.path.isdir(filename): - return filename - else: - return os.path.dirname(filename) - - def sameModule(self, mod, filename): - mod_paths = [] - if hasattr(mod, '__path__'): - for path in mod.__path__: - mod_paths.append(self._dirname_if_file(path)) - elif hasattr(mod, '__file__'): - mod_paths.append(self._dirname_if_file(mod.__file__)) - else: - # builtin or other module-like object that - # doesn't have __file__; must be new - return False - new_path = self._dirname_if_file(filename) - for mod_path in mod_paths: - log.debug( - "module already loaded? mod: %s new: %s", - mod_path, new_path) - if _samefile(mod_path, new_path): - return True - return False - - -def add_path(path, config=None): - """Ensure that the path, or the root of the current package (if - path is in a package), is in sys.path. - """ - - # FIXME add any src-looking dirs seen too... need to get config for that - - log.debug('Add path %s' % path) - if not path: - return [] - added = [] - parent = os.path.dirname(path) - if (parent - and os.path.exists(os.path.join(path, '__init__.py'))): - added.extend(add_path(parent, config)) - elif not path in sys.path: - log.debug("insert %s into sys.path", path) - sys.path.insert(0, path) - added.append(path) - if config and config.srcDirs: - for dirname in config.srcDirs: - dirpath = os.path.join(path, dirname) - if os.path.isdir(dirpath): - sys.path.insert(0, dirpath) - added.append(dirpath) - return added - - -def remove_path(path): - log.debug('Remove path %s' % path) - if path in sys.path: - sys.path.remove(path) diff --git a/lib/spack/external/nose/inspector.py b/lib/spack/external/nose/inspector.py deleted file mode 100644 index a6c4a3e3b6..0000000000 --- a/lib/spack/external/nose/inspector.py +++ /dev/null @@ -1,207 +0,0 @@ -"""Simple traceback introspection. Used to add additional information to -AssertionErrors in tests, so that failure messages may be more informative. -""" -import inspect -import logging -import re -import sys -import textwrap -import tokenize - -try: - from cStringIO import StringIO -except ImportError: - from StringIO import StringIO - -log = logging.getLogger(__name__) - -def inspect_traceback(tb): - """Inspect a traceback and its frame, returning source for the expression - where the exception was raised, with simple variable replacement performed - and the line on which the exception was raised marked with '>>' - """ - log.debug('inspect traceback %s', tb) - - # we only want the innermost frame, where the exception was raised - while tb.tb_next: - tb = tb.tb_next - - frame = tb.tb_frame - lines, exc_line = tbsource(tb) - - # figure out the set of lines to grab. - inspect_lines, mark_line = find_inspectable_lines(lines, exc_line) - src = StringIO(textwrap.dedent(''.join(inspect_lines))) - exp = Expander(frame.f_locals, frame.f_globals) - - while inspect_lines: - try: - for tok in tokenize.generate_tokens(src.readline): - exp(*tok) - except tokenize.TokenError, e: - # this can happen if our inspectable region happens to butt up - # against the end of a construct like a docstring with the closing - # """ on separate line - log.debug("Tokenizer error: %s", e) - inspect_lines.pop(0) - mark_line -= 1 - src = StringIO(textwrap.dedent(''.join(inspect_lines))) - exp = Expander(frame.f_locals, frame.f_globals) - continue - break - padded = [] - if exp.expanded_source: - exp_lines = exp.expanded_source.split('\n') - ep = 0 - for line in exp_lines: - if ep == mark_line: - padded.append('>> ' + line) - else: - padded.append(' ' + line) - ep += 1 - return '\n'.join(padded) - - -def tbsource(tb, context=6): - """Get source from a traceback object. - - A tuple of two things is returned: a list of lines of context from - the source code, and the index of the current line within that list. - The optional second argument specifies the number of lines of context - to return, which are centered around the current line. - - .. Note :: - This is adapted from inspect.py in the python 2.4 standard library, - since a bug in the 2.3 version of inspect prevents it from correctly - locating source lines in a traceback frame. - """ - - lineno = tb.tb_lineno - frame = tb.tb_frame - - if context > 0: - start = lineno - 1 - context//2 - log.debug("lineno: %s start: %s", lineno, start) - - try: - lines, dummy = inspect.findsource(frame) - except IOError: - lines, index = [''], 0 - else: - all_lines = lines - start = max(start, 1) - start = max(0, min(start, len(lines) - context)) - lines = lines[start:start+context] - index = lineno - 1 - start - - # python 2.5 compat: if previous line ends in a continuation, - # decrement start by 1 to match 2.4 behavior - if sys.version_info >= (2, 5) and index > 0: - while lines[index-1].strip().endswith('\\'): - start -= 1 - lines = all_lines[start:start+context] - else: - lines, index = [''], 0 - log.debug("tbsource lines '''%s''' around index %s", lines, index) - return (lines, index) - - -def find_inspectable_lines(lines, pos): - """Find lines in home that are inspectable. - - Walk back from the err line up to 3 lines, but don't walk back over - changes in indent level. - - Walk forward up to 3 lines, counting \ separated lines as 1. Don't walk - over changes in indent level (unless part of an extended line) - """ - cnt = re.compile(r'\\[\s\n]*$') - df = re.compile(r':[\s\n]*$') - ind = re.compile(r'^(\s*)') - toinspect = [] - home = lines[pos] - home_indent = ind.match(home).groups()[0] - - before = lines[max(pos-3, 0):pos] - before.reverse() - after = lines[pos+1:min(pos+4, len(lines))] - - for line in before: - if ind.match(line).groups()[0] == home_indent: - toinspect.append(line) - else: - break - toinspect.reverse() - toinspect.append(home) - home_pos = len(toinspect)-1 - continued = cnt.search(home) - for line in after: - if ((continued or ind.match(line).groups()[0] == home_indent) - and not df.search(line)): - toinspect.append(line) - continued = cnt.search(line) - else: - break - log.debug("Inspecting lines '''%s''' around %s", toinspect, home_pos) - return toinspect, home_pos - - -class Expander: - """Simple expression expander. Uses tokenize to find the names and - expands any that can be looked up in the frame. - """ - def __init__(self, locals, globals): - self.locals = locals - self.globals = globals - self.lpos = None - self.expanded_source = '' - - def __call__(self, ttype, tok, start, end, line): - # TODO - # deal with unicode properly - - # TODO - # Dealing with instance members - # always keep the last thing seen - # if the current token is a dot, - # get ready to getattr(lastthing, this thing) on the - # next call. - - if self.lpos is not None: - if start[1] >= self.lpos: - self.expanded_source += ' ' * (start[1]-self.lpos) - elif start[1] < self.lpos: - # newline, indent correctly - self.expanded_source += ' ' * start[1] - self.lpos = end[1] - - if ttype == tokenize.INDENT: - pass - elif ttype == tokenize.NAME: - # Clean this junk up - try: - val = self.locals[tok] - if callable(val): - val = tok - else: - val = repr(val) - except KeyError: - try: - val = self.globals[tok] - if callable(val): - val = tok - else: - val = repr(val) - - except KeyError: - val = tok - # FIXME... not sure how to handle things like funcs, classes - # FIXME this is broken for some unicode strings - self.expanded_source += val - else: - self.expanded_source += tok - # if this is the end of the line and the line ends with - # \, then tack a \ and newline onto the output - # print line[end[1]:] - if re.match(r'\s+\\\n', line[end[1]:]): - self.expanded_source += ' \\\n' diff --git a/lib/spack/external/nose/loader.py b/lib/spack/external/nose/loader.py deleted file mode 100644 index 3744e54ff6..0000000000 --- a/lib/spack/external/nose/loader.py +++ /dev/null @@ -1,623 +0,0 @@ -""" -Test Loader ------------ - -nose's test loader implements the same basic functionality as its -superclass, unittest.TestLoader, but extends it by more liberal -interpretations of what may be a test and how a test may be named. -""" -from __future__ import generators - -import logging -import os -import sys -import unittest -import types -from inspect import isfunction -from nose.pyversion import unbound_method, ismethod -from nose.case import FunctionTestCase, MethodTestCase -from nose.failure import Failure -from nose.config import Config -from nose.importer import Importer, add_path, remove_path -from nose.selector import defaultSelector, TestAddress -from nose.util import func_lineno, getpackage, isclass, isgenerator, \ - ispackage, regex_last_key, resolve_name, transplant_func, \ - transplant_class, test_address -from nose.suite import ContextSuiteFactory, ContextList, LazySuite -from nose.pyversion import sort_list, cmp_to_key - - -log = logging.getLogger(__name__) -#log.setLevel(logging.DEBUG) - -# for efficiency and easier mocking -op_normpath = os.path.normpath -op_abspath = os.path.abspath -op_join = os.path.join -op_isdir = os.path.isdir -op_isfile = os.path.isfile - - -__all__ = ['TestLoader', 'defaultTestLoader'] - - -class TestLoader(unittest.TestLoader): - """Test loader that extends unittest.TestLoader to: - - * Load tests from test-like functions and classes that are not - unittest.TestCase subclasses - * Find and load test modules in a directory - * Support tests that are generators - * Support easy extensions of or changes to that behavior through plugins - """ - config = None - importer = None - workingDir = None - selector = None - suiteClass = None - - def __init__(self, config=None, importer=None, workingDir=None, - selector=None): - """Initialize a test loader. - - Parameters (all optional): - - * config: provide a `nose.config.Config`_ or other config class - instance; if not provided a `nose.config.Config`_ with - default values is used. - * importer: provide an importer instance that implements - `importFromPath`. If not provided, a - `nose.importer.Importer`_ is used. - * workingDir: the directory to which file and module names are - relative. If not provided, assumed to be the current working - directory. - * selector: a selector class or instance. If a class is - provided, it will be instantiated with one argument, the - current config. If not provided, a `nose.selector.Selector`_ - is used. - """ - if config is None: - config = Config() - if importer is None: - importer = Importer(config=config) - if workingDir is None: - workingDir = config.workingDir - if selector is None: - selector = defaultSelector(config) - elif isclass(selector): - selector = selector(config) - self.config = config - self.importer = importer - self.workingDir = op_normpath(op_abspath(workingDir)) - self.selector = selector - if config.addPaths: - add_path(workingDir, config) - self.suiteClass = ContextSuiteFactory(config=config) - - self._visitedPaths = set([]) - - unittest.TestLoader.__init__(self) - - def getTestCaseNames(self, testCaseClass): - """Override to select with selector, unless - config.getTestCaseNamesCompat is True - """ - if self.config.getTestCaseNamesCompat: - return unittest.TestLoader.getTestCaseNames(self, testCaseClass) - - def wanted(attr, cls=testCaseClass, sel=self.selector): - item = getattr(cls, attr, None) - if isfunction(item): - item = unbound_method(cls, item) - elif not ismethod(item): - return False - return sel.wantMethod(item) - - cases = filter(wanted, dir(testCaseClass)) - - # add runTest if nothing else picked - if not cases and hasattr(testCaseClass, 'runTest'): - cases = ['runTest'] - if self.sortTestMethodsUsing: - sort_list(cases, cmp_to_key(self.sortTestMethodsUsing)) - return cases - - def _haveVisited(self, path): - # For cases where path is None, we always pretend we haven't visited - # them. - if path is None: - return False - - return path in self._visitedPaths - - def _addVisitedPath(self, path): - if path is not None: - self._visitedPaths.add(path) - - def loadTestsFromDir(self, path): - """Load tests from the directory at path. This is a generator - -- each suite of tests from a module or other file is yielded - and is expected to be executed before the next file is - examined. - """ - log.debug("load from dir %s", path) - plugins = self.config.plugins - plugins.beforeDirectory(path) - if self.config.addPaths: - paths_added = add_path(path, self.config) - - entries = os.listdir(path) - sort_list(entries, regex_last_key(self.config.testMatch)) - for entry in entries: - # this hard-coded initial-dot test will be removed: - # http://code.google.com/p/python-nose/issues/detail?id=82 - if entry.startswith('.'): - continue - entry_path = op_abspath(op_join(path, entry)) - is_file = op_isfile(entry_path) - wanted = False - if is_file: - is_dir = False - wanted = self.selector.wantFile(entry_path) - else: - is_dir = op_isdir(entry_path) - if is_dir: - # this hard-coded initial-underscore test will be removed: - # http://code.google.com/p/python-nose/issues/detail?id=82 - if entry.startswith('_'): - continue - wanted = self.selector.wantDirectory(entry_path) - is_package = ispackage(entry_path) - - # Python 3.3 now implements PEP 420: Implicit Namespace Packages. - # As a result, it's now possible that parent paths that have a - # segment with the same basename as our package ends up - # in module.__path__. So we have to keep track of what we've - # visited, and not-revisit them again. - if wanted and not self._haveVisited(entry_path): - self._addVisitedPath(entry_path) - if is_file: - plugins.beforeContext() - if entry.endswith('.py'): - yield self.loadTestsFromName( - entry_path, discovered=True) - else: - yield self.loadTestsFromFile(entry_path) - plugins.afterContext() - elif is_package: - # Load the entry as a package: given the full path, - # loadTestsFromName() will figure it out - yield self.loadTestsFromName( - entry_path, discovered=True) - else: - # Another test dir in this one: recurse lazily - yield self.suiteClass( - lambda: self.loadTestsFromDir(entry_path)) - tests = [] - for test in plugins.loadTestsFromDir(path): - tests.append(test) - # TODO: is this try/except needed? - try: - if tests: - yield self.suiteClass(tests) - except (KeyboardInterrupt, SystemExit): - raise - except: - yield self.suiteClass([Failure(*sys.exc_info())]) - - # pop paths - if self.config.addPaths: - for p in paths_added: - remove_path(p) - plugins.afterDirectory(path) - - def loadTestsFromFile(self, filename): - """Load tests from a non-module file. Default is to raise a - ValueError; plugins may implement `loadTestsFromFile` to - provide a list of tests loaded from the file. - """ - log.debug("Load from non-module file %s", filename) - try: - tests = [test for test in - self.config.plugins.loadTestsFromFile(filename)] - if tests: - # Plugins can yield False to indicate that they were - # unable to load tests from a file, but it was not an - # error -- the file just had no tests to load. - tests = filter(None, tests) - return self.suiteClass(tests) - else: - # Nothing was able to even try to load from this file - open(filename, 'r').close() # trigger os error - raise ValueError("Unable to load tests from file %s" - % filename) - except (KeyboardInterrupt, SystemExit): - raise - except: - exc = sys.exc_info() - return self.suiteClass( - [Failure(exc[0], exc[1], exc[2], - address=(filename, None, None))]) - - def loadTestsFromGenerator(self, generator, module): - """Lazy-load tests from a generator function. The generator function - may yield either: - - * a callable, or - * a function name resolvable within the same module - """ - def generate(g=generator, m=module): - try: - for test in g(): - test_func, arg = self.parseGeneratedTest(test) - if not callable(test_func): - test_func = getattr(m, test_func) - yield FunctionTestCase(test_func, arg=arg, descriptor=g) - except KeyboardInterrupt: - raise - except: - exc = sys.exc_info() - yield Failure(exc[0], exc[1], exc[2], - address=test_address(generator)) - return self.suiteClass(generate, context=generator, can_split=False) - - def loadTestsFromGeneratorMethod(self, generator, cls): - """Lazy-load tests from a generator method. - - This is more complicated than loading from a generator function, - since a generator method may yield: - - * a function - * a bound or unbound method, or - * a method name - """ - # convert the unbound generator method - # into a bound method so it can be called below - if hasattr(generator, 'im_class'): - cls = generator.im_class - inst = cls() - method = generator.__name__ - generator = getattr(inst, method) - - def generate(g=generator, c=cls): - try: - for test in g(): - test_func, arg = self.parseGeneratedTest(test) - if not callable(test_func): - test_func = unbound_method(c, getattr(c, test_func)) - if ismethod(test_func): - yield MethodTestCase(test_func, arg=arg, descriptor=g) - elif callable(test_func): - # In this case we're forcing the 'MethodTestCase' - # to run the inline function as its test call, - # but using the generator method as the 'method of - # record' (so no need to pass it as the descriptor) - yield MethodTestCase(g, test=test_func, arg=arg) - else: - yield Failure( - TypeError, - "%s is not a callable or method" % test_func) - except KeyboardInterrupt: - raise - except: - exc = sys.exc_info() - yield Failure(exc[0], exc[1], exc[2], - address=test_address(generator)) - return self.suiteClass(generate, context=generator, can_split=False) - - def loadTestsFromModule(self, module, path=None, discovered=False): - """Load all tests from module and return a suite containing - them. If the module has been discovered and is not test-like, - the suite will be empty by default, though plugins may add - their own tests. - """ - log.debug("Load from module %s", module) - tests = [] - test_classes = [] - test_funcs = [] - # For *discovered* modules, we only load tests when the module looks - # testlike. For modules we've been directed to load, we always - # look for tests. (discovered is set to True by loadTestsFromDir) - if not discovered or self.selector.wantModule(module): - for item in dir(module): - test = getattr(module, item, None) - # print "Check %s (%s) in %s" % (item, test, module.__name__) - if isclass(test): - if self.selector.wantClass(test): - test_classes.append(test) - elif isfunction(test) and self.selector.wantFunction(test): - test_funcs.append(test) - sort_list(test_classes, lambda x: x.__name__) - sort_list(test_funcs, func_lineno) - tests = map(lambda t: self.makeTest(t, parent=module), - test_classes + test_funcs) - - # Now, descend into packages - # FIXME can or should this be lazy? - # is this syntax 2.2 compatible? - module_paths = getattr(module, '__path__', []) - - if path: - path = os.path.normcase(os.path.realpath(path)) - - for module_path in module_paths: - log.debug("Load tests from module path %s?", module_path) - log.debug("path: %s os.path.realpath(%s): %s", - path, os.path.normcase(module_path), - os.path.realpath(os.path.normcase(module_path))) - if (self.config.traverseNamespace or not path) or \ - os.path.realpath( - os.path.normcase(module_path)).startswith(path): - # Egg files can be on sys.path, so make sure the path is a - # directory before trying to load from it. - if os.path.isdir(module_path): - tests.extend(self.loadTestsFromDir(module_path)) - - for test in self.config.plugins.loadTestsFromModule(module, path): - tests.append(test) - - return self.suiteClass(ContextList(tests, context=module)) - - def loadTestsFromName(self, name, module=None, discovered=False): - """Load tests from the entity with the given name. - - The name may indicate a file, directory, module, or any object - within a module. See `nose.util.split_test_name` for details on - test name parsing. - """ - # FIXME refactor this method into little bites? - log.debug("load from %s (%s)", name, module) - - suite = self.suiteClass - - # give plugins first crack - plug_tests = self.config.plugins.loadTestsFromName(name, module) - if plug_tests: - return suite(plug_tests) - - addr = TestAddress(name, workingDir=self.workingDir) - if module: - # Two cases: - # name is class.foo - # The addr will be incorrect, since it thinks class.foo is - # a dotted module name. It's actually a dotted attribute - # name. In this case we want to use the full submitted - # name as the name to load from the module. - # name is module:class.foo - # The addr will be correct. The part we want is the part after - # the :, which is in addr.call. - if addr.call: - name = addr.call - parent, obj = self.resolve(name, module) - if (isclass(parent) - and getattr(parent, '__module__', None) != module.__name__ - and not isinstance(obj, Failure)): - parent = transplant_class(parent, module.__name__) - obj = getattr(parent, obj.__name__) - log.debug("parent %s obj %s module %s", parent, obj, module) - if isinstance(obj, Failure): - return suite([obj]) - else: - return suite(ContextList([self.makeTest(obj, parent)], - context=parent)) - else: - if addr.module: - try: - if addr.filename is None: - module = resolve_name(addr.module) - else: - self.config.plugins.beforeImport( - addr.filename, addr.module) - # FIXME: to support module.name names, - # do what resolve-name does and keep trying to - # import, popping tail of module into addr.call, - # until we either get an import or run out of - # module parts - try: - module = self.importer.importFromPath( - addr.filename, addr.module) - finally: - self.config.plugins.afterImport( - addr.filename, addr.module) - except (KeyboardInterrupt, SystemExit): - raise - except: - exc = sys.exc_info() - return suite([Failure(exc[0], exc[1], exc[2], - address=addr.totuple())]) - if addr.call: - return self.loadTestsFromName(addr.call, module) - else: - return self.loadTestsFromModule( - module, addr.filename, - discovered=discovered) - elif addr.filename: - path = addr.filename - if addr.call: - package = getpackage(path) - if package is None: - return suite([ - Failure(ValueError, - "Can't find callable %s in file %s: " - "file is not a python module" % - (addr.call, path), - address=addr.totuple())]) - return self.loadTestsFromName(addr.call, module=package) - else: - if op_isdir(path): - # In this case we *can* be lazy since we know - # that each module in the dir will be fully - # loaded before its tests are executed; we - # also know that we're not going to be asked - # to load from . and ./some_module.py *as part - # of this named test load* - return LazySuite( - lambda: self.loadTestsFromDir(path)) - elif op_isfile(path): - return self.loadTestsFromFile(path) - else: - return suite([ - Failure(OSError, "No such file %s" % path, - address=addr.totuple())]) - else: - # just a function? what to do? I think it can only be - # handled when module is not None - return suite([ - Failure(ValueError, "Unresolvable test name %s" % name, - address=addr.totuple())]) - - def loadTestsFromNames(self, names, module=None): - """Load tests from all names, returning a suite containing all - tests. - """ - plug_res = self.config.plugins.loadTestsFromNames(names, module) - if plug_res: - suite, names = plug_res - if suite: - return self.suiteClass([ - self.suiteClass(suite), - unittest.TestLoader.loadTestsFromNames(self, names, module) - ]) - return unittest.TestLoader.loadTestsFromNames(self, names, module) - - def loadTestsFromTestCase(self, testCaseClass): - """Load tests from a unittest.TestCase subclass. - """ - cases = [] - plugins = self.config.plugins - for case in plugins.loadTestsFromTestCase(testCaseClass): - cases.append(case) - # For efficiency in the most common case, just call and return from - # super. This avoids having to extract cases and rebuild a context - # suite when there are no plugin-contributed cases. - if not cases: - return super(TestLoader, self).loadTestsFromTestCase(testCaseClass) - cases.extend( - [case for case in - super(TestLoader, self).loadTestsFromTestCase(testCaseClass)]) - return self.suiteClass(cases) - - def loadTestsFromTestClass(self, cls): - """Load tests from a test class that is *not* a unittest.TestCase - subclass. - - In this case, we can't depend on the class's `__init__` taking method - name arguments, so we have to compose a MethodTestCase for each - method in the class that looks testlike. - """ - def wanted(attr, cls=cls, sel=self.selector): - item = getattr(cls, attr, None) - if isfunction(item): - item = unbound_method(cls, item) - elif not ismethod(item): - return False - return sel.wantMethod(item) - cases = [self.makeTest(getattr(cls, case), cls) - for case in filter(wanted, dir(cls))] - for test in self.config.plugins.loadTestsFromTestClass(cls): - cases.append(test) - return self.suiteClass(ContextList(cases, context=cls)) - - def makeTest(self, obj, parent=None): - try: - return self._makeTest(obj, parent) - except (KeyboardInterrupt, SystemExit): - raise - except: - exc = sys.exc_info() - try: - addr = test_address(obj) - except KeyboardInterrupt: - raise - except: - addr = None - return Failure(exc[0], exc[1], exc[2], address=addr) - - def _makeTest(self, obj, parent=None): - """Given a test object and its parent, return a test case - or test suite. - """ - plug_tests = [] - try: - addr = test_address(obj) - except KeyboardInterrupt: - raise - except: - addr = None - for test in self.config.plugins.makeTest(obj, parent): - plug_tests.append(test) - # TODO: is this try/except needed? - try: - if plug_tests: - return self.suiteClass(plug_tests) - except (KeyboardInterrupt, SystemExit): - raise - except: - exc = sys.exc_info() - return Failure(exc[0], exc[1], exc[2], address=addr) - - if isfunction(obj) and parent and not isinstance(parent, types.ModuleType): - # This is a Python 3.x 'unbound method'. Wrap it with its - # associated class.. - obj = unbound_method(parent, obj) - - if isinstance(obj, unittest.TestCase): - return obj - elif isclass(obj): - if parent and obj.__module__ != parent.__name__: - obj = transplant_class(obj, parent.__name__) - if issubclass(obj, unittest.TestCase): - return self.loadTestsFromTestCase(obj) - else: - return self.loadTestsFromTestClass(obj) - elif ismethod(obj): - if parent is None: - parent = obj.__class__ - if issubclass(parent, unittest.TestCase): - return parent(obj.__name__) - else: - if isgenerator(obj): - return self.loadTestsFromGeneratorMethod(obj, parent) - else: - return MethodTestCase(obj) - elif isfunction(obj): - if parent and obj.__module__ != parent.__name__: - obj = transplant_func(obj, parent.__name__) - if isgenerator(obj): - return self.loadTestsFromGenerator(obj, parent) - else: - return FunctionTestCase(obj) - else: - return Failure(TypeError, - "Can't make a test from %s" % obj, - address=addr) - - def resolve(self, name, module): - """Resolve name within module - """ - obj = module - parts = name.split('.') - for part in parts: - parent, obj = obj, getattr(obj, part, None) - if obj is None: - # no such test - obj = Failure(ValueError, "No such test %s" % name) - return parent, obj - - def parseGeneratedTest(self, test): - """Given the yield value of a test generator, return a func and args. - - This is used in the two loadTestsFromGenerator* methods. - - """ - if not isinstance(test, tuple): # yield test - test_func, arg = (test, tuple()) - elif len(test) == 1: # yield (test,) - test_func, arg = (test[0], tuple()) - else: # yield test, foo, bar, ... - assert len(test) > 1 # sanity check - test_func, arg = (test[0], test[1:]) - return test_func, arg - -defaultTestLoader = TestLoader - diff --git a/lib/spack/external/nose/plugins/__init__.py b/lib/spack/external/nose/plugins/__init__.py deleted file mode 100644 index 08ee8f3230..0000000000 --- a/lib/spack/external/nose/plugins/__init__.py +++ /dev/null @@ -1,190 +0,0 @@ -""" -Writing Plugins ---------------- - -nose supports plugins for test collection, selection, observation and -reporting. There are two basic rules for plugins: - -* Plugin classes should subclass :class:`nose.plugins.Plugin`. - -* Plugins may implement any of the methods described in the class - :doc:`IPluginInterface ` in nose.plugins.base. Please note that - this class is for documentary purposes only; plugins may not subclass - IPluginInterface. - -Hello World -=========== - -Here's a basic plugin. It doesn't do much so read on for more ideas or dive -into the :doc:`IPluginInterface ` to see all available hooks. - -.. code-block:: python - - import logging - import os - - from nose.plugins import Plugin - - log = logging.getLogger('nose.plugins.helloworld') - - class HelloWorld(Plugin): - name = 'helloworld' - - def options(self, parser, env=os.environ): - super(HelloWorld, self).options(parser, env=env) - - def configure(self, options, conf): - super(HelloWorld, self).configure(options, conf) - if not self.enabled: - return - - def finalize(self, result): - log.info('Hello pluginized world!') - -Registering -=========== - -.. Note:: - Important note: the following applies only to the default - plugin manager. Other plugin managers may use different means to - locate and load plugins. - -For nose to find a plugin, it must be part of a package that uses -setuptools_, and the plugin must be included in the entry points defined -in the setup.py for the package: - -.. code-block:: python - - setup(name='Some plugin', - # ... - entry_points = { - 'nose.plugins.0.10': [ - 'someplugin = someplugin:SomePlugin' - ] - }, - # ... - ) - -Once the package is installed with install or develop, nose will be able -to load the plugin. - -.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools - -Registering a plugin without setuptools -======================================= - -It is currently possible to register a plugin programmatically by -creating a custom nose runner like this : - -.. code-block:: python - - import nose - from yourplugin import YourPlugin - - if __name__ == '__main__': - nose.main(addplugins=[YourPlugin()]) - -Defining options -================ - -All plugins must implement the methods ``options(self, parser, env)`` -and ``configure(self, options, conf)``. Subclasses of nose.plugins.Plugin -that want the standard options should call the superclass methods. - -nose uses optparse.OptionParser from the standard library to parse -arguments. A plugin's ``options()`` method receives a parser -instance. It's good form for a plugin to use that instance only to add -additional arguments that take only long arguments (--like-this). Most -of nose's built-in arguments get their default value from an environment -variable. - -A plugin's ``configure()`` method receives the parsed ``OptionParser`` options -object, as well as the current config object. Plugins should configure their -behavior based on the user-selected settings, and may raise exceptions -if the configured behavior is nonsensical. - -Logging -======= - -nose uses the logging classes from the standard library. To enable users -to view debug messages easily, plugins should use ``logging.getLogger()`` to -acquire a logger in the ``nose.plugins`` namespace. - -Recipes -======= - -* Writing a plugin that monitors or controls test result output - - Implement any or all of ``addError``, ``addFailure``, etc., to monitor test - results. If you also want to monitor output, implement - ``setOutputStream`` and keep a reference to the output stream. If you - want to prevent the builtin ``TextTestResult`` output, implement - ``setOutputSteam`` and *return a dummy stream*. The default output will go - to the dummy stream, while you send your desired output to the real stream. - - Example: `examples/html_plugin/htmlplug.py`_ - -* Writing a plugin that handles exceptions - - Subclass :doc:`ErrorClassPlugin `. - - Examples: :doc:`nose.plugins.deprecated `, - :doc:`nose.plugins.skip ` - -* Writing a plugin that adds detail to error reports - - Implement ``formatError`` and/or ``formatFailure``. The error tuple - you return (error class, error message, traceback) will replace the - original error tuple. - - Examples: :doc:`nose.plugins.capture `, - :doc:`nose.plugins.failuredetail ` - -* Writing a plugin that loads tests from files other than python modules - - Implement ``wantFile`` and ``loadTestsFromFile``. In ``wantFile``, - return True for files that you want to examine for tests. In - ``loadTestsFromFile``, for those files, return an iterable - containing TestCases (or yield them as you find them; - ``loadTestsFromFile`` may also be a generator). - - Example: :doc:`nose.plugins.doctests ` - -* Writing a plugin that prints a report - - Implement ``begin`` if you need to perform setup before testing - begins. Implement ``report`` and output your report to the provided stream. - - Examples: :doc:`nose.plugins.cover `, :doc:`nose.plugins.prof ` - -* Writing a plugin that selects or rejects tests - - Implement any or all ``want*`` methods. Return False to reject the test - candidate, True to accept it -- which means that the test candidate - will pass through the rest of the system, so you must be prepared to - load tests from it if tests can't be loaded by the core loader or - another plugin -- and None if you don't care. - - Examples: :doc:`nose.plugins.attrib `, - :doc:`nose.plugins.doctests `, :doc:`nose.plugins.testid ` - - -More Examples -============= - -See any builtin plugin or example plugin in the examples_ directory in -the nose source distribution. There is a list of third-party plugins -`on jottit`_. - -.. _examples/html_plugin/htmlplug.py: http://python-nose.googlecode.com/svn/trunk/examples/html_plugin/htmlplug.py -.. _examples: http://python-nose.googlecode.com/svn/trunk/examples -.. _on jottit: http://nose-plugins.jottit.com/ - -""" -from nose.plugins.base import Plugin -from nose.plugins.manager import * -from nose.plugins.plugintest import PluginTester - -if __name__ == '__main__': - import doctest - doctest.testmod() diff --git a/lib/spack/external/nose/plugins/allmodules.py b/lib/spack/external/nose/plugins/allmodules.py deleted file mode 100644 index 1ccd7773a7..0000000000 --- a/lib/spack/external/nose/plugins/allmodules.py +++ /dev/null @@ -1,45 +0,0 @@ -"""Use the AllModules plugin by passing ``--all-modules`` or setting the -NOSE_ALL_MODULES environment variable to enable collection and execution of -tests in all python modules. Normal nose behavior is to look for tests only in -modules that match testMatch. - -More information: :doc:`../doc_tests/test_allmodules/test_allmodules` - -.. warning :: - - This plugin can have surprising interactions with plugins that load tests - from what nose normally considers non-test modules, such as - the :doc:`doctest plugin `. This is because any given - object in a module can't be loaded both by a plugin and the normal nose - :class:`test loader `. Also, if you have functions - or classes in non-test modules that look like tests but aren't, you will - likely see errors as nose attempts to run them as tests. - -""" - -import os -from nose.plugins.base import Plugin - -class AllModules(Plugin): - """Collect tests from all python modules. - """ - def options(self, parser, env): - """Register commandline options. - """ - env_opt = 'NOSE_ALL_MODULES' - parser.add_option('--all-modules', - action="store_true", - dest=self.enableOpt, - default=env.get(env_opt), - help="Enable plugin %s: %s [%s]" % - (self.__class__.__name__, self.help(), env_opt)) - - def wantFile(self, file): - """Override to return True for all files ending with .py""" - # always want .py files - if file.endswith('.py'): - return True - - def wantModule(self, module): - """Override return True for all modules""" - return True diff --git a/lib/spack/external/nose/plugins/attrib.py b/lib/spack/external/nose/plugins/attrib.py deleted file mode 100644 index 3d4422a23a..0000000000 --- a/lib/spack/external/nose/plugins/attrib.py +++ /dev/null @@ -1,286 +0,0 @@ -"""Attribute selector plugin. - -Oftentimes when testing you will want to select tests based on -criteria rather then simply by filename. For example, you might want -to run all tests except for the slow ones. You can do this with the -Attribute selector plugin by setting attributes on your test methods. -Here is an example: - -.. code-block:: python - - def test_big_download(): - import urllib - # commence slowness... - - test_big_download.slow = 1 - -Once you've assigned an attribute ``slow = 1`` you can exclude that -test and all other tests having the slow attribute by running :: - - $ nosetests -a '!slow' - -There is also a decorator available for you that will set attributes. -Here's how to set ``slow=1`` like above with the decorator: - -.. code-block:: python - - from nose.plugins.attrib import attr - @attr('slow') - def test_big_download(): - import urllib - # commence slowness... - -And here's how to set an attribute with a specific value: - -.. code-block:: python - - from nose.plugins.attrib import attr - @attr(speed='slow') - def test_big_download(): - import urllib - # commence slowness... - -This test could be run with :: - - $ nosetests -a speed=slow - -In Python 2.6 and higher, ``@attr`` can be used on a class to set attributes -on all its test methods at once. For example: - -.. code-block:: python - - from nose.plugins.attrib import attr - @attr(speed='slow') - class MyTestCase: - def test_long_integration(self): - pass - def test_end_to_end_something(self): - pass - -Below is a reference to the different syntaxes available. - -Simple syntax -------------- - -Examples of using the ``-a`` and ``--attr`` options: - -* ``nosetests -a status=stable`` - Only runs tests with attribute "status" having value "stable" - -* ``nosetests -a priority=2,status=stable`` - Runs tests having both attributes and values - -* ``nosetests -a priority=2 -a slow`` - Runs tests that match either attribute - -* ``nosetests -a tags=http`` - If a test's ``tags`` attribute was a list and it contained the value - ``http`` then it would be run - -* ``nosetests -a slow`` - Runs tests with the attribute ``slow`` if its value does not equal False - (False, [], "", etc...) - -* ``nosetests -a '!slow'`` - Runs tests that do NOT have the attribute ``slow`` or have a ``slow`` - attribute that is equal to False - **NOTE**: - if your shell (like bash) interprets '!' as a special character make sure to - put single quotes around it. - -Expression Evaluation ---------------------- - -Examples using the ``-A`` and ``--eval-attr`` options: - -* ``nosetests -A "not slow"`` - Evaluates the Python expression "not slow" and runs the test if True - -* ``nosetests -A "(priority > 5) and not slow"`` - Evaluates a complex Python expression and runs the test if True - -""" -import inspect -import logging -import os -import sys -from inspect import isfunction -from nose.plugins.base import Plugin -from nose.util import tolist - -log = logging.getLogger('nose.plugins.attrib') -compat_24 = sys.version_info >= (2, 4) - -def attr(*args, **kwargs): - """Decorator that adds attributes to classes or functions - for use with the Attribute (-a) plugin. - """ - def wrap_ob(ob): - for name in args: - setattr(ob, name, True) - for name, value in kwargs.iteritems(): - setattr(ob, name, value) - return ob - return wrap_ob - -def get_method_attr(method, cls, attr_name, default = False): - """Look up an attribute on a method/ function. - If the attribute isn't found there, looking it up in the - method's class, if any. - """ - Missing = object() - value = getattr(method, attr_name, Missing) - if value is Missing and cls is not None: - value = getattr(cls, attr_name, Missing) - if value is Missing: - return default - return value - - -class ContextHelper: - """Object that can act as context dictionary for eval and looks up - names as attributes on a method/ function and its class. - """ - def __init__(self, method, cls): - self.method = method - self.cls = cls - - def __getitem__(self, name): - return get_method_attr(self.method, self.cls, name) - - -class AttributeSelector(Plugin): - """Selects test cases to be run based on their attributes. - """ - - def __init__(self): - Plugin.__init__(self) - self.attribs = [] - - def options(self, parser, env): - """Register command line options""" - parser.add_option("-a", "--attr", - dest="attr", action="append", - default=env.get('NOSE_ATTR'), - metavar="ATTR", - help="Run only tests that have attributes " - "specified by ATTR [NOSE_ATTR]") - # disable in < 2.4: eval can't take needed args - if compat_24: - parser.add_option("-A", "--eval-attr", - dest="eval_attr", metavar="EXPR", action="append", - default=env.get('NOSE_EVAL_ATTR'), - help="Run only tests for whose attributes " - "the Python expression EXPR evaluates " - "to True [NOSE_EVAL_ATTR]") - - def configure(self, options, config): - """Configure the plugin and system, based on selected options. - - attr and eval_attr may each be lists. - - self.attribs will be a list of lists of tuples. In that list, each - list is a group of attributes, all of which must match for the rule to - match. - """ - self.attribs = [] - - # handle python eval-expression parameter - if compat_24 and options.eval_attr: - eval_attr = tolist(options.eval_attr) - for attr in eval_attr: - # "" - # -> eval(expr) in attribute context must be True - def eval_in_context(expr, obj, cls): - return eval(expr, None, ContextHelper(obj, cls)) - self.attribs.append([(attr, eval_in_context)]) - - # attribute requirements are a comma separated list of - # 'key=value' pairs - if options.attr: - std_attr = tolist(options.attr) - for attr in std_attr: - # all attributes within an attribute group must match - attr_group = [] - for attrib in attr.strip().split(","): - # don't die on trailing comma - if not attrib: - continue - items = attrib.split("=", 1) - if len(items) > 1: - # "name=value" - # -> 'str(obj.name) == value' must be True - key, value = items - else: - key = items[0] - if key[0] == "!": - # "!name" - # 'bool(obj.name)' must be False - key = key[1:] - value = False - else: - # "name" - # -> 'bool(obj.name)' must be True - value = True - attr_group.append((key, value)) - self.attribs.append(attr_group) - if self.attribs: - self.enabled = True - - def validateAttrib(self, method, cls = None): - """Verify whether a method has the required attributes - The method is considered a match if it matches all attributes - for any attribute group. - .""" - # TODO: is there a need for case-sensitive value comparison? - any = False - for group in self.attribs: - match = True - for key, value in group: - attr = get_method_attr(method, cls, key) - if callable(value): - if not value(key, method, cls): - match = False - break - elif value is True: - # value must exist and be True - if not bool(attr): - match = False - break - elif value is False: - # value must not exist or be False - if bool(attr): - match = False - break - elif type(attr) in (list, tuple): - # value must be found in the list attribute - if not str(value).lower() in [str(x).lower() - for x in attr]: - match = False - break - else: - # value must match, convert to string and compare - if (value != attr - and str(value).lower() != str(attr).lower()): - match = False - break - any = any or match - if any: - # not True because we don't want to FORCE the selection of the - # item, only say that it is acceptable - return None - return False - - def wantFunction(self, function): - """Accept the function if its attributes match. - """ - return self.validateAttrib(function) - - def wantMethod(self, method): - """Accept the method if its attributes match. - """ - try: - cls = method.im_class - except AttributeError: - return False - return self.validateAttrib(method, cls) diff --git a/lib/spack/external/nose/plugins/base.py b/lib/spack/external/nose/plugins/base.py deleted file mode 100644 index f09beb696f..0000000000 --- a/lib/spack/external/nose/plugins/base.py +++ /dev/null @@ -1,725 +0,0 @@ -import os -import textwrap -from optparse import OptionConflictError -from warnings import warn -from nose.util import tolist - -class Plugin(object): - """Base class for nose plugins. It's recommended but not *necessary* to - subclass this class to create a plugin, but all plugins *must* implement - `options(self, parser, env)` and `configure(self, options, conf)`, and - must have the attributes `enabled`, `name` and `score`. The `name` - attribute may contain hyphens ('-'). - - Plugins should not be enabled by default. - - Subclassing Plugin (and calling the superclass methods in - __init__, configure, and options, if you override them) will give - your plugin some friendly default behavior: - - * A --with-$name option will be added to the command line interface - to enable the plugin, and a corresponding environment variable - will be used as the default value. The plugin class's docstring - will be used as the help for this option. - * The plugin will not be enabled unless this option is selected by - the user. - """ - can_configure = False - enabled = False - enableOpt = None - name = None - score = 100 - - def __init__(self): - if self.name is None: - self.name = self.__class__.__name__.lower() - if self.enableOpt is None: - self.enableOpt = "enable_plugin_%s" % self.name.replace('-', '_') - - def addOptions(self, parser, env=None): - """Add command-line options for this plugin. - - The base plugin class adds --with-$name by default, used to enable the - plugin. - - .. warning :: Don't implement addOptions unless you want to override - all default option handling behavior, including - warnings for conflicting options. Implement - :meth:`options - ` - instead. - """ - self.add_options(parser, env) - - def add_options(self, parser, env=None): - """Non-camel-case version of func name for backwards compatibility. - - .. warning :: - - DEPRECATED: Do not use this method, - use :meth:`options ` - instead. - - """ - # FIXME raise deprecation warning if wasn't called by wrapper - if env is None: - env = os.environ - try: - self.options(parser, env) - self.can_configure = True - except OptionConflictError, e: - warn("Plugin %s has conflicting option string: %s and will " - "be disabled" % (self, e), RuntimeWarning) - self.enabled = False - self.can_configure = False - - def options(self, parser, env): - """Register commandline options. - - Implement this method for normal options behavior with protection from - OptionConflictErrors. If you override this method and want the default - --with-$name option to be registered, be sure to call super(). - """ - env_opt = 'NOSE_WITH_%s' % self.name.upper() - env_opt = env_opt.replace('-', '_') - parser.add_option("--with-%s" % self.name, - action="store_true", - dest=self.enableOpt, - default=env.get(env_opt), - help="Enable plugin %s: %s [%s]" % - (self.__class__.__name__, self.help(), env_opt)) - - def configure(self, options, conf): - """Configure the plugin and system, based on selected options. - - The base plugin class sets the plugin to enabled if the enable option - for the plugin (self.enableOpt) is true. - """ - if not self.can_configure: - return - self.conf = conf - if hasattr(options, self.enableOpt): - self.enabled = getattr(options, self.enableOpt) - - def help(self): - """Return help for this plugin. This will be output as the help - section of the --with-$name option that enables the plugin. - """ - if self.__class__.__doc__: - # doc sections are often indented; compress the spaces - return textwrap.dedent(self.__class__.__doc__) - return "(no help available)" - - # Compatiblity shim - def tolist(self, val): - warn("Plugin.tolist is deprecated. Use nose.util.tolist instead", - DeprecationWarning) - return tolist(val) - - -class IPluginInterface(object): - """ - IPluginInterface describes the plugin API. Do not subclass or use this - class directly. - """ - def __new__(cls, *arg, **kw): - raise TypeError("IPluginInterface class is for documentation only") - - def addOptions(self, parser, env): - """Called to allow plugin to register command-line options with the - parser. DO NOT return a value from this method unless you want to stop - all other plugins from setting their options. - - .. warning :: - - DEPRECATED -- implement - :meth:`options ` instead. - """ - pass - add_options = addOptions - add_options.deprecated = True - - def addDeprecated(self, test): - """Called when a deprecated test is seen. DO NOT return a value - unless you want to stop other plugins from seeing the deprecated - test. - - .. warning :: DEPRECATED -- check error class in addError instead - """ - pass - addDeprecated.deprecated = True - - def addError(self, test, err): - """Called when a test raises an uncaught exception. DO NOT return a - value unless you want to stop other plugins from seeing that the - test has raised an error. - - :param test: the test case - :type test: :class:`nose.case.Test` - :param err: sys.exc_info() tuple - :type err: 3-tuple - """ - pass - addError.changed = True - - def addFailure(self, test, err): - """Called when a test fails. DO NOT return a value unless you - want to stop other plugins from seeing that the test has failed. - - :param test: the test case - :type test: :class:`nose.case.Test` - :param err: 3-tuple - :type err: sys.exc_info() tuple - """ - pass - addFailure.changed = True - - def addSkip(self, test): - """Called when a test is skipped. DO NOT return a value unless - you want to stop other plugins from seeing the skipped test. - - .. warning:: DEPRECATED -- check error class in addError instead - """ - pass - addSkip.deprecated = True - - def addSuccess(self, test): - """Called when a test passes. DO NOT return a value unless you - want to stop other plugins from seeing the passing test. - - :param test: the test case - :type test: :class:`nose.case.Test` - """ - pass - addSuccess.changed = True - - def afterContext(self): - """Called after a context (generally a module) has been - lazy-loaded, imported, setup, had its tests loaded and - executed, and torn down. - """ - pass - afterContext._new = True - - def afterDirectory(self, path): - """Called after all tests have been loaded from directory at path - and run. - - :param path: the directory that has finished processing - :type path: string - """ - pass - afterDirectory._new = True - - def afterImport(self, filename, module): - """Called after module is imported from filename. afterImport - is called even if the import failed. - - :param filename: The file that was loaded - :type filename: string - :param module: The name of the module - :type module: string - """ - pass - afterImport._new = True - - def afterTest(self, test): - """Called after the test has been run and the result recorded - (after stopTest). - - :param test: the test case - :type test: :class:`nose.case.Test` - """ - pass - afterTest._new = True - - def beforeContext(self): - """Called before a context (generally a module) is - examined. Because the context is not yet loaded, plugins don't - get to know what the context is; so any context operations - should use a stack that is pushed in `beforeContext` and popped - in `afterContext` to ensure they operate symmetrically. - - `beforeContext` and `afterContext` are mainly useful for tracking - and restoring global state around possible changes from within a - context, whatever the context may be. If you need to operate on - contexts themselves, see `startContext` and `stopContext`, which - are passed the context in question, but are called after - it has been loaded (imported in the module case). - """ - pass - beforeContext._new = True - - def beforeDirectory(self, path): - """Called before tests are loaded from directory at path. - - :param path: the directory that is about to be processed - """ - pass - beforeDirectory._new = True - - def beforeImport(self, filename, module): - """Called before module is imported from filename. - - :param filename: The file that will be loaded - :param module: The name of the module found in file - :type module: string - """ - beforeImport._new = True - - def beforeTest(self, test): - """Called before the test is run (before startTest). - - :param test: the test case - :type test: :class:`nose.case.Test` - """ - pass - beforeTest._new = True - - def begin(self): - """Called before any tests are collected or run. Use this to - perform any setup needed before testing begins. - """ - pass - - def configure(self, options, conf): - """Called after the command line has been parsed, with the - parsed options and the config container. Here, implement any - config storage or changes to state or operation that are set - by command line options. - - DO NOT return a value from this method unless you want to - stop all other plugins from being configured. - """ - pass - - def finalize(self, result): - """Called after all report output, including output from all - plugins, has been sent to the stream. Use this to print final - test results or perform final cleanup. Return None to allow - other plugins to continue printing, or any other value to stop - them. - - :param result: test result object - - .. Note:: When tests are run under a test runner other than - :class:`nose.core.TextTestRunner`, such as - via ``python setup.py test``, this method may be called - **before** the default report output is sent. - """ - pass - - def describeTest(self, test): - """Return a test description. - - Called by :meth:`nose.case.Test.shortDescription`. - - :param test: the test case - :type test: :class:`nose.case.Test` - """ - pass - describeTest._new = True - - def formatError(self, test, err): - """Called in result.addError, before plugin.addError. If you - want to replace or modify the error tuple, return a new error - tuple, otherwise return err, the original error tuple. - - :param test: the test case - :type test: :class:`nose.case.Test` - :param err: sys.exc_info() tuple - :type err: 3-tuple - """ - pass - formatError._new = True - formatError.chainable = True - # test arg is not chainable - formatError.static_args = (True, False) - - def formatFailure(self, test, err): - """Called in result.addFailure, before plugin.addFailure. If you - want to replace or modify the error tuple, return a new error - tuple, otherwise return err, the original error tuple. - - :param test: the test case - :type test: :class:`nose.case.Test` - :param err: sys.exc_info() tuple - :type err: 3-tuple - """ - pass - formatFailure._new = True - formatFailure.chainable = True - # test arg is not chainable - formatFailure.static_args = (True, False) - - def handleError(self, test, err): - """Called on addError. To handle the error yourself and prevent normal - error processing, return a true value. - - :param test: the test case - :type test: :class:`nose.case.Test` - :param err: sys.exc_info() tuple - :type err: 3-tuple - """ - pass - handleError._new = True - - def handleFailure(self, test, err): - """Called on addFailure. To handle the failure yourself and - prevent normal failure processing, return a true value. - - :param test: the test case - :type test: :class:`nose.case.Test` - :param err: sys.exc_info() tuple - :type err: 3-tuple - """ - pass - handleFailure._new = True - - def loadTestsFromDir(self, path): - """Return iterable of tests from a directory. May be a - generator. Each item returned must be a runnable - unittest.TestCase (or subclass) instance or suite instance. - Return None if your plugin cannot collect any tests from - directory. - - :param path: The path to the directory. - """ - pass - loadTestsFromDir.generative = True - loadTestsFromDir._new = True - - def loadTestsFromModule(self, module, path=None): - """Return iterable of tests in a module. May be a - generator. Each item returned must be a runnable - unittest.TestCase (or subclass) instance. - Return None if your plugin cannot - collect any tests from module. - - :param module: The module object - :type module: python module - :param path: the path of the module to search, to distinguish from - namespace package modules - - .. note:: - - NEW. The ``path`` parameter will only be passed by nose 0.11 - or above. - """ - pass - loadTestsFromModule.generative = True - - def loadTestsFromName(self, name, module=None, importPath=None): - """Return tests in this file or module. Return None if you are not able - to load any tests, or an iterable if you are. May be a - generator. - - :param name: The test name. May be a file or module name plus a test - callable. Use split_test_name to split into parts. Or it might - be some crazy name of your own devising, in which case, do - whatever you want. - :param module: Module from which the name is to be loaded - :param importPath: Path from which file (must be a python module) was - found - - .. warning:: DEPRECATED: this argument will NOT be passed. - """ - pass - loadTestsFromName.generative = True - - def loadTestsFromNames(self, names, module=None): - """Return a tuple of (tests loaded, remaining names). Return - None if you are not able to load any tests. Multiple plugins - may implement loadTestsFromNames; the remaining name list from - each will be passed to the next as input. - - :param names: List of test names. - :type names: iterable - :param module: Module from which the names are to be loaded - """ - pass - loadTestsFromNames._new = True - loadTestsFromNames.chainable = True - - def loadTestsFromFile(self, filename): - """Return tests in this file. Return None if you are not - interested in loading any tests, or an iterable if you are and - can load some. May be a generator. *If you are interested in - loading tests from the file and encounter no errors, but find - no tests, yield False or return [False].* - - .. Note:: This method replaces loadTestsFromPath from the 0.9 - API. - - :param filename: The full path to the file or directory. - """ - pass - loadTestsFromFile.generative = True - loadTestsFromFile._new = True - - def loadTestsFromPath(self, path): - """ - .. warning:: DEPRECATED -- use loadTestsFromFile instead - """ - pass - loadTestsFromPath.deprecated = True - - def loadTestsFromTestCase(self, cls): - """Return tests in this test case class. Return None if you are - not able to load any tests, or an iterable if you are. May be a - generator. - - :param cls: The test case class. Must be subclass of - :class:`unittest.TestCase`. - """ - pass - loadTestsFromTestCase.generative = True - - def loadTestsFromTestClass(self, cls): - """Return tests in this test class. Class will *not* be a - unittest.TestCase subclass. Return None if you are not able to - load any tests, an iterable if you are. May be a generator. - - :param cls: The test case class. Must be **not** be subclass of - :class:`unittest.TestCase`. - """ - pass - loadTestsFromTestClass._new = True - loadTestsFromTestClass.generative = True - - def makeTest(self, obj, parent): - """Given an object and its parent, return or yield one or more - test cases. Each test must be a unittest.TestCase (or subclass) - instance. This is called before default test loading to allow - plugins to load an alternate test case or cases for an - object. May be a generator. - - :param obj: The object to be made into a test - :param parent: The parent of obj (eg, for a method, the class) - """ - pass - makeTest._new = True - makeTest.generative = True - - def options(self, parser, env): - """Called to allow plugin to register command line - options with the parser. - - DO NOT return a value from this method unless you want to stop - all other plugins from setting their options. - - :param parser: options parser instance - :type parser: :class:`ConfigParser.ConfigParser` - :param env: environment, default is os.environ - """ - pass - options._new = True - - def prepareTest(self, test): - """Called before the test is run by the test runner. Please - note the article *the* in the previous sentence: prepareTest - is called *only once*, and is passed the test case or test - suite that the test runner will execute. It is *not* called - for each individual test case. If you return a non-None value, - that return value will be run as the test. Use this hook to - wrap or decorate the test with another function. If you need - to modify or wrap individual test cases, use `prepareTestCase` - instead. - - :param test: the test case - :type test: :class:`nose.case.Test` - """ - pass - - def prepareTestCase(self, test): - """Prepare or wrap an individual test case. Called before - execution of the test. The test passed here is a - nose.case.Test instance; the case to be executed is in the - test attribute of the passed case. To modify the test to be - run, you should return a callable that takes one argument (the - test result object) -- it is recommended that you *do not* - side-effect the nose.case.Test instance you have been passed. - - Keep in mind that when you replace the test callable you are - replacing the run() method of the test case -- including the - exception handling and result calls, etc. - - :param test: the test case - :type test: :class:`nose.case.Test` - """ - pass - prepareTestCase._new = True - - def prepareTestLoader(self, loader): - """Called before tests are loaded. To replace the test loader, - return a test loader. To allow other plugins to process the - test loader, return None. Only one plugin may replace the test - loader. Only valid when using nose.TestProgram. - - :param loader: :class:`nose.loader.TestLoader` - (or other loader) instance - """ - pass - prepareTestLoader._new = True - - def prepareTestResult(self, result): - """Called before the first test is run. To use a different - test result handler for all tests than the given result, - return a test result handler. NOTE however that this handler - will only be seen by tests, that is, inside of the result - proxy system. The TestRunner and TestProgram -- whether nose's - or other -- will continue to see the original result - handler. For this reason, it is usually better to monkeypatch - the result (for instance, if you want to handle some - exceptions in a unique way). Only one plugin may replace the - result, but many may monkeypatch it. If you want to - monkeypatch and stop other plugins from doing so, monkeypatch - and return the patched result. - - :param result: :class:`nose.result.TextTestResult` - (or other result) instance - """ - pass - prepareTestResult._new = True - - def prepareTestRunner(self, runner): - """Called before tests are run. To replace the test runner, - return a test runner. To allow other plugins to process the - test runner, return None. Only valid when using nose.TestProgram. - - :param runner: :class:`nose.core.TextTestRunner` - (or other runner) instance - """ - pass - prepareTestRunner._new = True - - def report(self, stream): - """Called after all error output has been printed. Print your - plugin's report to the provided stream. Return None to allow - other plugins to print reports, any other value to stop them. - - :param stream: stream object; send your output here - :type stream: file-like object - """ - pass - - def setOutputStream(self, stream): - """Called before test output begins. To direct test output to a - new stream, return a stream object, which must implement a - `write(msg)` method. If you only want to note the stream, not - capture or redirect it, then return None. - - :param stream: stream object; send your output here - :type stream: file-like object - """ - - def startContext(self, context): - """Called before context setup and the running of tests in the - context. Note that tests have already been *loaded* from the - context before this call. - - :param context: the context about to be setup. May be a module or - class, or any other object that contains tests. - """ - pass - startContext._new = True - - def startTest(self, test): - """Called before each test is run. DO NOT return a value unless - you want to stop other plugins from seeing the test start. - - :param test: the test case - :type test: :class:`nose.case.Test` - """ - pass - - def stopContext(self, context): - """Called after the tests in a context have run and the - context has been torn down. - - :param context: the context that has been torn down. May be a module or - class, or any other object that contains tests. - """ - pass - stopContext._new = True - - def stopTest(self, test): - """Called after each test is run. DO NOT return a value unless - you want to stop other plugins from seeing that the test has stopped. - - :param test: the test case - :type test: :class:`nose.case.Test` - """ - pass - - def testName(self, test): - """Return a short test name. Called by `nose.case.Test.__str__`. - - :param test: the test case - :type test: :class:`nose.case.Test` - """ - pass - testName._new = True - - def wantClass(self, cls): - """Return true if you want the main test selector to collect - tests from this class, false if you don't, and None if you don't - care. - - :param cls: The class being examined by the selector - """ - pass - - def wantDirectory(self, dirname): - """Return true if you want test collection to descend into this - directory, false if you do not, and None if you don't care. - - :param dirname: Full path to directory being examined by the selector - """ - pass - - def wantFile(self, file): - """Return true if you want to collect tests from this file, - false if you do not and None if you don't care. - - Change from 0.9: The optional package parameter is no longer passed. - - :param file: Full path to file being examined by the selector - """ - pass - - def wantFunction(self, function): - """Return true to collect this function as a test, false to - prevent it from being collected, and None if you don't care. - - :param function: The function object being examined by the selector - """ - pass - - def wantMethod(self, method): - """Return true to collect this method as a test, false to - prevent it from being collected, and None if you don't care. - - :param method: The method object being examined by the selector - :type method: unbound method - """ - pass - - def wantModule(self, module): - """Return true if you want to collection to descend into this - module, false to prevent the collector from descending into the - module, and None if you don't care. - - :param module: The module object being examined by the selector - :type module: python module - """ - pass - - def wantModuleTests(self, module): - """ - .. warning:: DEPRECATED -- this method will not be called, it has - been folded into wantModule. - """ - pass - wantModuleTests.deprecated = True - diff --git a/lib/spack/external/nose/plugins/builtin.py b/lib/spack/external/nose/plugins/builtin.py deleted file mode 100644 index 4fcc0018ad..0000000000 --- a/lib/spack/external/nose/plugins/builtin.py +++ /dev/null @@ -1,34 +0,0 @@ -""" -Lists builtin plugins. -""" -plugins = [] -builtins = ( - ('nose.plugins.attrib', 'AttributeSelector'), - ('nose.plugins.capture', 'Capture'), - ('nose.plugins.logcapture', 'LogCapture'), - ('nose.plugins.cover', 'Coverage'), - ('nose.plugins.debug', 'Pdb'), - ('nose.plugins.deprecated', 'Deprecated'), - ('nose.plugins.doctests', 'Doctest'), - ('nose.plugins.isolate', 'IsolationPlugin'), - ('nose.plugins.failuredetail', 'FailureDetail'), - ('nose.plugins.prof', 'Profile'), - ('nose.plugins.skip', 'Skip'), - ('nose.plugins.testid', 'TestId'), - ('nose.plugins.multiprocess', 'MultiProcess'), - ('nose.plugins.xunit', 'Xunit'), - ('nose.plugins.allmodules', 'AllModules'), - ('nose.plugins.collect', 'CollectOnly'), - ) - -for module, cls in builtins: - try: - plugmod = __import__(module, globals(), locals(), [cls]) - except KeyboardInterrupt: - raise - except: - continue - plug = getattr(plugmod, cls) - plugins.append(plug) - globals()[cls] = plug - diff --git a/lib/spack/external/nose/plugins/capture.py b/lib/spack/external/nose/plugins/capture.py deleted file mode 100644 index fa4e5dcaaf..0000000000 --- a/lib/spack/external/nose/plugins/capture.py +++ /dev/null @@ -1,115 +0,0 @@ -""" -This plugin captures stdout during test execution. If the test fails -or raises an error, the captured output will be appended to the error -or failure output. It is enabled by default but can be disabled with -the options ``-s`` or ``--nocapture``. - -:Options: - ``--nocapture`` - Don't capture stdout (any stdout output will be printed immediately) - -""" -import logging -import os -import sys -from nose.plugins.base import Plugin -from nose.pyversion import exc_to_unicode, force_unicode -from nose.util import ln -from StringIO import StringIO - - -log = logging.getLogger(__name__) - -class Capture(Plugin): - """ - Output capture plugin. Enabled by default. Disable with ``-s`` or - ``--nocapture``. This plugin captures stdout during test execution, - appending any output captured to the error or failure output, - should the test fail or raise an error. - """ - enabled = True - env_opt = 'NOSE_NOCAPTURE' - name = 'capture' - score = 1600 - - def __init__(self): - self.stdout = [] - self._buf = None - - def options(self, parser, env): - """Register commandline options - """ - parser.add_option( - "-s", "--nocapture", action="store_false", - default=not env.get(self.env_opt), dest="capture", - help="Don't capture stdout (any stdout output " - "will be printed immediately) [NOSE_NOCAPTURE]") - - def configure(self, options, conf): - """Configure plugin. Plugin is enabled by default. - """ - self.conf = conf - if not options.capture: - self.enabled = False - - def afterTest(self, test): - """Clear capture buffer. - """ - self.end() - self._buf = None - - def begin(self): - """Replace sys.stdout with capture buffer. - """ - self.start() # get an early handle on sys.stdout - - def beforeTest(self, test): - """Flush capture buffer. - """ - self.start() - - def formatError(self, test, err): - """Add captured output to error report. - """ - test.capturedOutput = output = self.buffer - self._buf = None - if not output: - # Don't return None as that will prevent other - # formatters from formatting and remove earlier formatters - # formats, instead return the err we got - return err - ec, ev, tb = err - return (ec, self.addCaptureToErr(ev, output), tb) - - def formatFailure(self, test, err): - """Add captured output to failure report. - """ - return self.formatError(test, err) - - def addCaptureToErr(self, ev, output): - ev = exc_to_unicode(ev) - output = force_unicode(output) - return u'\n'.join([ev, ln(u'>> begin captured stdout <<'), - output, ln(u'>> end captured stdout <<')]) - - def start(self): - self.stdout.append(sys.stdout) - self._buf = StringIO() - sys.stdout = self._buf - - def end(self): - if self.stdout: - sys.stdout = self.stdout.pop() - - def finalize(self, result): - """Restore stdout. - """ - while self.stdout: - self.end() - - def _get_buffer(self): - if self._buf is not None: - return self._buf.getvalue() - - buffer = property(_get_buffer, None, None, - """Captured stdout output.""") diff --git a/lib/spack/external/nose/plugins/collect.py b/lib/spack/external/nose/plugins/collect.py deleted file mode 100644 index 6f9f0faa77..0000000000 --- a/lib/spack/external/nose/plugins/collect.py +++ /dev/null @@ -1,94 +0,0 @@ -""" -This plugin bypasses the actual execution of tests, and instead just collects -test names. Fixtures are also bypassed, so running nosetests with the -collection plugin enabled should be very quick. - -This plugin is useful in combination with the testid plugin (``--with-id``). -Run both together to get an indexed list of all tests, which will enable you to -run individual tests by index number. - -This plugin is also useful for counting tests in a test suite, and making -people watching your demo think all of your tests pass. -""" -from nose.plugins.base import Plugin -from nose.case import Test -import logging -import unittest - -log = logging.getLogger(__name__) - - -class CollectOnly(Plugin): - """ - Collect and output test names only, don't run any tests. - """ - name = "collect-only" - enableOpt = 'collect_only' - - def options(self, parser, env): - """Register commandline options. - """ - parser.add_option('--collect-only', - action='store_true', - dest=self.enableOpt, - default=env.get('NOSE_COLLECT_ONLY'), - help="Enable collect-only: %s [COLLECT_ONLY]" % - (self.help())) - - def prepareTestLoader(self, loader): - """Install collect-only suite class in TestLoader. - """ - # Disable context awareness - log.debug("Preparing test loader") - loader.suiteClass = TestSuiteFactory(self.conf) - - def prepareTestCase(self, test): - """Replace actual test with dummy that always passes. - """ - # Return something that always passes - log.debug("Preparing test case %s", test) - if not isinstance(test, Test): - return - def run(result): - # We need to make these plugin calls because there won't be - # a result proxy, due to using a stripped-down test suite - self.conf.plugins.startTest(test) - result.startTest(test) - self.conf.plugins.addSuccess(test) - result.addSuccess(test) - self.conf.plugins.stopTest(test) - result.stopTest(test) - return run - - -class TestSuiteFactory: - """ - Factory for producing configured test suites. - """ - def __init__(self, conf): - self.conf = conf - - def __call__(self, tests=(), **kw): - return TestSuite(tests, conf=self.conf) - - -class TestSuite(unittest.TestSuite): - """ - Basic test suite that bypasses most proxy and plugin calls, but does - wrap tests in a nose.case.Test so prepareTestCase will be called. - """ - def __init__(self, tests=(), conf=None): - self.conf = conf - # Exec lazy suites: makes discovery depth-first - if callable(tests): - tests = tests() - log.debug("TestSuite(%r)", tests) - unittest.TestSuite.__init__(self, tests) - - def addTest(self, test): - log.debug("Add test %s", test) - if isinstance(test, unittest.TestSuite): - self._tests.append(test) - else: - self._tests.append(Test(test, config=self.conf)) - diff --git a/lib/spack/external/nose/plugins/cover.py b/lib/spack/external/nose/plugins/cover.py deleted file mode 100644 index fbe2e30dcd..0000000000 --- a/lib/spack/external/nose/plugins/cover.py +++ /dev/null @@ -1,271 +0,0 @@ -"""If you have Ned Batchelder's coverage_ module installed, you may activate a -coverage report with the ``--with-coverage`` switch or NOSE_WITH_COVERAGE -environment variable. The coverage report will cover any python source module -imported after the start of the test run, excluding modules that match -testMatch. If you want to include those modules too, use the ``--cover-tests`` -switch, or set the NOSE_COVER_TESTS environment variable to a true value. To -restrict the coverage report to modules from a particular package or packages, -use the ``--cover-package`` switch or the NOSE_COVER_PACKAGE environment -variable. - -.. _coverage: http://www.nedbatchelder.com/code/modules/coverage.html -""" -import logging -import re -import sys -import StringIO -from nose.plugins.base import Plugin -from nose.util import src, tolist - -log = logging.getLogger(__name__) - - -class Coverage(Plugin): - """ - Activate a coverage report using Ned Batchelder's coverage module. - """ - coverTests = False - coverPackages = None - coverInstance = None - coverErase = False - coverMinPercentage = None - score = 200 - status = {} - - def options(self, parser, env): - """ - Add options to command line. - """ - super(Coverage, self).options(parser, env) - parser.add_option("--cover-package", action="append", - default=env.get('NOSE_COVER_PACKAGE'), - metavar="PACKAGE", - dest="cover_packages", - help="Restrict coverage output to selected packages " - "[NOSE_COVER_PACKAGE]") - parser.add_option("--cover-erase", action="store_true", - default=env.get('NOSE_COVER_ERASE'), - dest="cover_erase", - help="Erase previously collected coverage " - "statistics before run") - parser.add_option("--cover-tests", action="store_true", - dest="cover_tests", - default=env.get('NOSE_COVER_TESTS'), - help="Include test modules in coverage report " - "[NOSE_COVER_TESTS]") - parser.add_option("--cover-min-percentage", action="store", - dest="cover_min_percentage", - default=env.get('NOSE_COVER_MIN_PERCENTAGE'), - help="Minimum percentage of coverage for tests " - "to pass [NOSE_COVER_MIN_PERCENTAGE]") - parser.add_option("--cover-inclusive", action="store_true", - dest="cover_inclusive", - default=env.get('NOSE_COVER_INCLUSIVE'), - help="Include all python files under working " - "directory in coverage report. Useful for " - "discovering holes in test coverage if not all " - "files are imported by the test suite. " - "[NOSE_COVER_INCLUSIVE]") - parser.add_option("--cover-html", action="store_true", - default=env.get('NOSE_COVER_HTML'), - dest='cover_html', - help="Produce HTML coverage information") - parser.add_option('--cover-html-dir', action='store', - default=env.get('NOSE_COVER_HTML_DIR', 'cover'), - dest='cover_html_dir', - metavar='DIR', - help='Produce HTML coverage information in dir') - parser.add_option("--cover-branches", action="store_true", - default=env.get('NOSE_COVER_BRANCHES'), - dest="cover_branches", - help="Include branch coverage in coverage report " - "[NOSE_COVER_BRANCHES]") - parser.add_option("--cover-xml", action="store_true", - default=env.get('NOSE_COVER_XML'), - dest="cover_xml", - help="Produce XML coverage information") - parser.add_option("--cover-xml-file", action="store", - default=env.get('NOSE_COVER_XML_FILE', 'coverage.xml'), - dest="cover_xml_file", - metavar="FILE", - help="Produce XML coverage information in file") - - def configure(self, options, conf): - """ - Configure plugin. - """ - try: - self.status.pop('active') - except KeyError: - pass - super(Coverage, self).configure(options, conf) - if self.enabled: - try: - import coverage - if not hasattr(coverage, 'coverage'): - raise ImportError("Unable to import coverage module") - except ImportError: - log.error("Coverage not available: " - "unable to import coverage module") - self.enabled = False - return - self.conf = conf - self.coverErase = options.cover_erase - self.coverTests = options.cover_tests - self.coverPackages = [] - if options.cover_packages: - if isinstance(options.cover_packages, (list, tuple)): - cover_packages = options.cover_packages - else: - cover_packages = [options.cover_packages] - for pkgs in [tolist(x) for x in cover_packages]: - self.coverPackages.extend(pkgs) - self.coverInclusive = options.cover_inclusive - if self.coverPackages: - log.info("Coverage report will include only packages: %s", - self.coverPackages) - self.coverHtmlDir = None - if options.cover_html: - self.coverHtmlDir = options.cover_html_dir - log.debug('Will put HTML coverage report in %s', self.coverHtmlDir) - self.coverBranches = options.cover_branches - self.coverXmlFile = None - if options.cover_min_percentage: - self.coverMinPercentage = int(options.cover_min_percentage.rstrip('%')) - if options.cover_xml: - self.coverXmlFile = options.cover_xml_file - log.debug('Will put XML coverage report in %s', self.coverXmlFile) - if self.enabled: - self.status['active'] = True - self.coverInstance = coverage.coverage(auto_data=False, - branch=self.coverBranches, data_suffix=conf.worker, - source=self.coverPackages) - self.coverInstance._warn_no_data = False - self.coverInstance.is_worker = conf.worker - self.coverInstance.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]') - - log.debug("Coverage begin") - self.skipModules = sys.modules.keys()[:] - if self.coverErase: - log.debug("Clearing previously collected coverage statistics") - self.coverInstance.combine() - self.coverInstance.erase() - - if not self.coverInstance.is_worker: - self.coverInstance.load() - self.coverInstance.start() - - - def beforeTest(self, *args, **kwargs): - """ - Begin recording coverage information. - """ - - if self.coverInstance.is_worker: - self.coverInstance.load() - self.coverInstance.start() - - def afterTest(self, *args, **kwargs): - """ - Stop recording coverage information. - """ - - if self.coverInstance.is_worker: - self.coverInstance.stop() - self.coverInstance.save() - - - def report(self, stream): - """ - Output code coverage report. - """ - log.debug("Coverage report") - self.coverInstance.stop() - self.coverInstance.combine() - self.coverInstance.save() - modules = [module - for name, module in sys.modules.items() - if self.wantModuleCoverage(name, module)] - log.debug("Coverage report will cover modules: %s", modules) - self.coverInstance.report(modules, file=stream) - - import coverage - if self.coverHtmlDir: - log.debug("Generating HTML coverage report") - try: - self.coverInstance.html_report(modules, self.coverHtmlDir) - except coverage.misc.CoverageException, e: - log.warning("Failed to generate HTML report: %s" % str(e)) - - if self.coverXmlFile: - log.debug("Generating XML coverage report") - try: - self.coverInstance.xml_report(modules, self.coverXmlFile) - except coverage.misc.CoverageException, e: - log.warning("Failed to generate XML report: %s" % str(e)) - - # make sure we have minimum required coverage - if self.coverMinPercentage: - f = StringIO.StringIO() - self.coverInstance.report(modules, file=f) - - multiPackageRe = (r'-------\s\w+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?' - r'\s+(\d+)%\s+\d*\s{0,1}$') - singlePackageRe = (r'-------\s[\w./]+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?' - r'\s+(\d+)%(?:\s+[-\d, ]+)\s{0,1}$') - - m = re.search(multiPackageRe, f.getvalue()) - if m is None: - m = re.search(singlePackageRe, f.getvalue()) - - if m: - percentage = int(m.groups()[0]) - if percentage < self.coverMinPercentage: - log.error('TOTAL Coverage did not reach minimum ' - 'required: %d%%' % self.coverMinPercentage) - sys.exit(1) - else: - log.error("No total percentage was found in coverage output, " - "something went wrong.") - - - def wantModuleCoverage(self, name, module): - if not hasattr(module, '__file__'): - log.debug("no coverage of %s: no __file__", name) - return False - module_file = src(module.__file__) - if not module_file or not module_file.endswith('.py'): - log.debug("no coverage of %s: not a python file", name) - return False - if self.coverPackages: - for package in self.coverPackages: - if (re.findall(r'^%s\b' % re.escape(package), name) - and (self.coverTests - or not self.conf.testMatch.search(name))): - log.debug("coverage for %s", name) - return True - if name in self.skipModules: - log.debug("no coverage for %s: loaded before coverage start", - name) - return False - if self.conf.testMatch.search(name) and not self.coverTests: - log.debug("no coverage for %s: is a test", name) - return False - # accept any package that passed the previous tests, unless - # coverPackages is on -- in that case, if we wanted this - # module, we would have already returned True - return not self.coverPackages - - def wantFile(self, file, package=None): - """If inclusive coverage enabled, return true for all source files - in wanted packages. - """ - if self.coverInclusive: - if file.endswith(".py"): - if package and self.coverPackages: - for want in self.coverPackages: - if package.startswith(want): - return True - else: - return True - return None diff --git a/lib/spack/external/nose/plugins/debug.py b/lib/spack/external/nose/plugins/debug.py deleted file mode 100644 index 78243e60d0..0000000000 --- a/lib/spack/external/nose/plugins/debug.py +++ /dev/null @@ -1,67 +0,0 @@ -""" -This plugin provides ``--pdb`` and ``--pdb-failures`` options. The ``--pdb`` -option will drop the test runner into pdb when it encounters an error. To -drop into pdb on failure, use ``--pdb-failures``. -""" - -import pdb -from nose.plugins.base import Plugin - -class Pdb(Plugin): - """ - Provides --pdb and --pdb-failures options that cause the test runner to - drop into pdb if it encounters an error or failure, respectively. - """ - enabled_for_errors = False - enabled_for_failures = False - score = 5 # run last, among builtins - - def options(self, parser, env): - """Register commandline options. - """ - parser.add_option( - "--pdb", action="store_true", dest="debugBoth", - default=env.get('NOSE_PDB', False), - help="Drop into debugger on failures or errors") - parser.add_option( - "--pdb-failures", action="store_true", - dest="debugFailures", - default=env.get('NOSE_PDB_FAILURES', False), - help="Drop into debugger on failures") - parser.add_option( - "--pdb-errors", action="store_true", - dest="debugErrors", - default=env.get('NOSE_PDB_ERRORS', False), - help="Drop into debugger on errors") - - def configure(self, options, conf): - """Configure which kinds of exceptions trigger plugin. - """ - self.conf = conf - self.enabled_for_errors = options.debugErrors or options.debugBoth - self.enabled_for_failures = options.debugFailures or options.debugBoth - self.enabled = self.enabled_for_failures or self.enabled_for_errors - - def addError(self, test, err): - """Enter pdb if configured to debug errors. - """ - if not self.enabled_for_errors: - return - self.debug(err) - - def addFailure(self, test, err): - """Enter pdb if configured to debug failures. - """ - if not self.enabled_for_failures: - return - self.debug(err) - - def debug(self, err): - import sys # FIXME why is this import here? - ec, ev, tb = err - stdout = sys.stdout - sys.stdout = sys.__stdout__ - try: - pdb.post_mortem(tb) - finally: - sys.stdout = stdout diff --git a/lib/spack/external/nose/plugins/deprecated.py b/lib/spack/external/nose/plugins/deprecated.py deleted file mode 100644 index 461a26be63..0000000000 --- a/lib/spack/external/nose/plugins/deprecated.py +++ /dev/null @@ -1,45 +0,0 @@ -""" -This plugin installs a DEPRECATED error class for the :class:`DeprecatedTest` -exception. When :class:`DeprecatedTest` is raised, the exception will be logged -in the deprecated attribute of the result, ``D`` or ``DEPRECATED`` (verbose) -will be output, and the exception will not be counted as an error or failure. -It is enabled by default, but can be turned off by using ``--no-deprecated``. -""" - -from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin - - -class DeprecatedTest(Exception): - """Raise this exception to mark a test as deprecated. - """ - pass - - -class Deprecated(ErrorClassPlugin): - """ - Installs a DEPRECATED error class for the DeprecatedTest exception. Enabled - by default. - """ - enabled = True - deprecated = ErrorClass(DeprecatedTest, - label='DEPRECATED', - isfailure=False) - - def options(self, parser, env): - """Register commandline options. - """ - env_opt = 'NOSE_WITHOUT_DEPRECATED' - parser.add_option('--no-deprecated', action='store_true', - dest='noDeprecated', default=env.get(env_opt, False), - help="Disable special handling of DeprecatedTest " - "exceptions.") - - def configure(self, options, conf): - """Configure plugin. - """ - if not self.can_configure: - return - self.conf = conf - disable = getattr(options, 'noDeprecated', False) - if disable: - self.enabled = False diff --git a/lib/spack/external/nose/plugins/doctests.py b/lib/spack/external/nose/plugins/doctests.py deleted file mode 100644 index 5ef65799f3..0000000000 --- a/lib/spack/external/nose/plugins/doctests.py +++ /dev/null @@ -1,455 +0,0 @@ -"""Use the Doctest plugin with ``--with-doctest`` or the NOSE_WITH_DOCTEST -environment variable to enable collection and execution of :mod:`doctests -`. Because doctests are usually included in the tested package -(instead of being grouped into packages or modules of their own), nose only -looks for them in the non-test packages it discovers in the working directory. - -Doctests may also be placed into files other than python modules, in which -case they can be collected and executed by using the ``--doctest-extension`` -switch or NOSE_DOCTEST_EXTENSION environment variable to indicate which file -extension(s) to load. - -When loading doctests from non-module files, use the ``--doctest-fixtures`` -switch to specify how to find modules containing fixtures for the tests. A -module name will be produced by appending the value of that switch to the base -name of each doctest file loaded. For example, a doctest file "widgets.rst" -with the switch ``--doctest_fixtures=_fixt`` will load fixtures from the module -``widgets_fixt.py``. - -A fixtures module may define any or all of the following functions: - -* setup([module]) or setup_module([module]) - - Called before the test runs. You may raise SkipTest to skip all tests. - -* teardown([module]) or teardown_module([module]) - - Called after the test runs, if setup/setup_module did not raise an - unhandled exception. - -* setup_test(test) - - Called before the test. NOTE: the argument passed is a - doctest.DocTest instance, *not* a unittest.TestCase. - -* teardown_test(test) - - Called after the test, if setup_test did not raise an exception. NOTE: the - argument passed is a doctest.DocTest instance, *not* a unittest.TestCase. - -Doctests are run like any other test, with the exception that output -capture does not work; doctest does its own output capture while running a -test. - -.. note :: - - See :doc:`../doc_tests/test_doctest_fixtures/doctest_fixtures` for - additional documentation and examples. - -""" -from __future__ import generators - -import logging -import os -import sys -import unittest -from inspect import getmodule -from nose.plugins.base import Plugin -from nose.suite import ContextList -from nose.util import anyp, getpackage, test_address, resolve_name, \ - src, tolist, isproperty -try: - from cStringIO import StringIO -except ImportError: - from StringIO import StringIO -import sys -import __builtin__ as builtin_mod - -log = logging.getLogger(__name__) - -try: - import doctest - doctest.DocTestCase - # system version of doctest is acceptable, but needs a monkeypatch -except (ImportError, AttributeError): - # system version is too old - import nose.ext.dtcompat as doctest - - -# -# Doctest and coverage don't get along, so we need to create -# a monkeypatch that will replace the part of doctest that -# interferes with coverage reports. -# -# The monkeypatch is based on this zope patch: -# http://svn.zope.org/Zope3/trunk/src/zope/testing/doctest.py?rev=28679&r1=28703&r2=28705 -# -_orp = doctest._OutputRedirectingPdb - -class NoseOutputRedirectingPdb(_orp): - def __init__(self, out): - self.__debugger_used = False - _orp.__init__(self, out) - - def set_trace(self): - self.__debugger_used = True - _orp.set_trace(self, sys._getframe().f_back) - - def set_continue(self): - # Calling set_continue unconditionally would break unit test - # coverage reporting, as Bdb.set_continue calls sys.settrace(None). - if self.__debugger_used: - _orp.set_continue(self) -doctest._OutputRedirectingPdb = NoseOutputRedirectingPdb - - -class DoctestSuite(unittest.TestSuite): - """ - Doctest suites are parallelizable at the module or file level only, - since they may be attached to objects that are not individually - addressable (like properties). This suite subclass is used when - loading doctests from a module to ensure that behavior. - - This class is used only if the plugin is not fully prepared; - in normal use, the loader's suiteClass is used. - - """ - can_split = False - - def __init__(self, tests=(), context=None, can_split=False): - self.context = context - self.can_split = can_split - unittest.TestSuite.__init__(self, tests=tests) - - def address(self): - return test_address(self.context) - - def __iter__(self): - # 2.3 compat - return iter(self._tests) - - def __str__(self): - return str(self._tests) - - -class Doctest(Plugin): - """ - Activate doctest plugin to find and run doctests in non-test modules. - """ - extension = None - suiteClass = DoctestSuite - - def options(self, parser, env): - """Register commmandline options. - """ - Plugin.options(self, parser, env) - parser.add_option('--doctest-tests', action='store_true', - dest='doctest_tests', - default=env.get('NOSE_DOCTEST_TESTS'), - help="Also look for doctests in test modules. " - "Note that classes, methods and functions should " - "have either doctests or non-doctest tests, " - "not both. [NOSE_DOCTEST_TESTS]") - parser.add_option('--doctest-extension', action="append", - dest="doctestExtension", - metavar="EXT", - help="Also look for doctests in files with " - "this extension [NOSE_DOCTEST_EXTENSION]") - parser.add_option('--doctest-result-variable', - dest='doctest_result_var', - default=env.get('NOSE_DOCTEST_RESULT_VAR'), - metavar="VAR", - help="Change the variable name set to the result of " - "the last interpreter command from the default '_'. " - "Can be used to avoid conflicts with the _() " - "function used for text translation. " - "[NOSE_DOCTEST_RESULT_VAR]") - parser.add_option('--doctest-fixtures', action="store", - dest="doctestFixtures", - metavar="SUFFIX", - help="Find fixtures for a doctest file in module " - "with this name appended to the base name " - "of the doctest file") - parser.add_option('--doctest-options', action="append", - dest="doctestOptions", - metavar="OPTIONS", - help="Specify options to pass to doctest. " + - "Eg. '+ELLIPSIS,+NORMALIZE_WHITESPACE'") - # Set the default as a list, if given in env; otherwise - # an additional value set on the command line will cause - # an error. - env_setting = env.get('NOSE_DOCTEST_EXTENSION') - if env_setting is not None: - parser.set_defaults(doctestExtension=tolist(env_setting)) - - def configure(self, options, config): - """Configure plugin. - """ - Plugin.configure(self, options, config) - self.doctest_result_var = options.doctest_result_var - self.doctest_tests = options.doctest_tests - self.extension = tolist(options.doctestExtension) - self.fixtures = options.doctestFixtures - self.finder = doctest.DocTestFinder() - self.optionflags = 0 - if options.doctestOptions: - flags = ",".join(options.doctestOptions).split(',') - for flag in flags: - if not flag or flag[0] not in '+-': - raise ValueError( - "Must specify doctest options with starting " + - "'+' or '-'. Got %s" % (flag,)) - mode, option_name = flag[0], flag[1:] - option_flag = doctest.OPTIONFLAGS_BY_NAME.get(option_name) - if not option_flag: - raise ValueError("Unknown doctest option %s" % - (option_name,)) - if mode == '+': - self.optionflags |= option_flag - elif mode == '-': - self.optionflags &= ~option_flag - - def prepareTestLoader(self, loader): - """Capture loader's suiteClass. - - This is used to create test suites from doctest files. - - """ - self.suiteClass = loader.suiteClass - - def loadTestsFromModule(self, module): - """Load doctests from the module. - """ - log.debug("loading from %s", module) - if not self.matches(module.__name__): - log.debug("Doctest doesn't want module %s", module) - return - try: - tests = self.finder.find(module) - except AttributeError: - log.exception("Attribute error loading from %s", module) - # nose allows module.__test__ = False; doctest does not and throws - # AttributeError - return - if not tests: - log.debug("No tests found in %s", module) - return - tests.sort() - module_file = src(module.__file__) - # FIXME this breaks the id plugin somehow (tests probably don't - # get wrapped in result proxy or something) - cases = [] - for test in tests: - if not test.examples: - continue - if not test.filename: - test.filename = module_file - cases.append(DocTestCase(test, - optionflags=self.optionflags, - result_var=self.doctest_result_var)) - if cases: - yield self.suiteClass(cases, context=module, can_split=False) - - def loadTestsFromFile(self, filename): - """Load doctests from the file. - - Tests are loaded only if filename's extension matches - configured doctest extension. - - """ - if self.extension and anyp(filename.endswith, self.extension): - name = os.path.basename(filename) - dh = open(filename) - try: - doc = dh.read() - finally: - dh.close() - - fixture_context = None - globs = {'__file__': filename} - if self.fixtures: - base, ext = os.path.splitext(name) - dirname = os.path.dirname(filename) - sys.path.append(dirname) - fixt_mod = base + self.fixtures - try: - fixture_context = __import__( - fixt_mod, globals(), locals(), ["nop"]) - except ImportError, e: - log.debug( - "Could not import %s: %s (%s)", fixt_mod, e, sys.path) - log.debug("Fixture module %s resolved to %s", - fixt_mod, fixture_context) - if hasattr(fixture_context, 'globs'): - globs = fixture_context.globs(globs) - parser = doctest.DocTestParser() - test = parser.get_doctest( - doc, globs=globs, name=name, - filename=filename, lineno=0) - if test.examples: - case = DocFileCase( - test, - optionflags=self.optionflags, - setUp=getattr(fixture_context, 'setup_test', None), - tearDown=getattr(fixture_context, 'teardown_test', None), - result_var=self.doctest_result_var) - if fixture_context: - yield ContextList((case,), context=fixture_context) - else: - yield case - else: - yield False # no tests to load - - def makeTest(self, obj, parent): - """Look for doctests in the given object, which will be a - function, method or class. - """ - name = getattr(obj, '__name__', 'Unnammed %s' % type(obj)) - doctests = self.finder.find(obj, module=getmodule(parent), name=name) - if doctests: - for test in doctests: - if len(test.examples) == 0: - continue - yield DocTestCase(test, obj=obj, optionflags=self.optionflags, - result_var=self.doctest_result_var) - - def matches(self, name): - # FIXME this seems wrong -- nothing is ever going to - # fail this test, since we're given a module NAME not FILE - if name == '__init__.py': - return False - # FIXME don't think we need include/exclude checks here? - return ((self.doctest_tests or not self.conf.testMatch.search(name) - or (self.conf.include - and filter(None, - [inc.search(name) - for inc in self.conf.include]))) - and (not self.conf.exclude - or not filter(None, - [exc.search(name) - for exc in self.conf.exclude]))) - - def wantFile(self, file): - """Override to select all modules and any file ending with - configured doctest extension. - """ - # always want .py files - if file.endswith('.py'): - return True - # also want files that match my extension - if (self.extension - and anyp(file.endswith, self.extension) - and (not self.conf.exclude - or not filter(None, - [exc.search(file) - for exc in self.conf.exclude]))): - return True - return None - - -class DocTestCase(doctest.DocTestCase): - """Overrides DocTestCase to - provide an address() method that returns the correct address for - the doctest case. To provide hints for address(), an obj may also - be passed -- this will be used as the test object for purposes of - determining the test address, if it is provided. - """ - def __init__(self, test, optionflags=0, setUp=None, tearDown=None, - checker=None, obj=None, result_var='_'): - self._result_var = result_var - self._nose_obj = obj - super(DocTestCase, self).__init__( - test, optionflags=optionflags, setUp=setUp, tearDown=tearDown, - checker=checker) - - def address(self): - if self._nose_obj is not None: - return test_address(self._nose_obj) - obj = resolve_name(self._dt_test.name) - - if isproperty(obj): - # properties have no connection to the class they are in - # so we can't just look 'em up, we have to first look up - # the class, then stick the prop on the end - parts = self._dt_test.name.split('.') - class_name = '.'.join(parts[:-1]) - cls = resolve_name(class_name) - base_addr = test_address(cls) - return (base_addr[0], base_addr[1], - '.'.join([base_addr[2], parts[-1]])) - else: - return test_address(obj) - - # doctests loaded via find(obj) omit the module name - # so we need to override id, __repr__ and shortDescription - # bonus: this will squash a 2.3 vs 2.4 incompatiblity - def id(self): - name = self._dt_test.name - filename = self._dt_test.filename - if filename is not None: - pk = getpackage(filename) - if pk is None: - return name - if not name.startswith(pk): - name = "%s.%s" % (pk, name) - return name - - def __repr__(self): - name = self.id() - name = name.split('.') - return "%s (%s)" % (name[-1], '.'.join(name[:-1])) - __str__ = __repr__ - - def shortDescription(self): - return 'Doctest: %s' % self.id() - - def setUp(self): - if self._result_var is not None: - self._old_displayhook = sys.displayhook - sys.displayhook = self._displayhook - super(DocTestCase, self).setUp() - - def _displayhook(self, value): - if value is None: - return - setattr(builtin_mod, self._result_var, value) - print repr(value) - - def tearDown(self): - super(DocTestCase, self).tearDown() - if self._result_var is not None: - sys.displayhook = self._old_displayhook - delattr(builtin_mod, self._result_var) - - -class DocFileCase(doctest.DocFileCase): - """Overrides to provide address() method that returns the correct - address for the doc file case. - """ - def __init__(self, test, optionflags=0, setUp=None, tearDown=None, - checker=None, result_var='_'): - self._result_var = result_var - super(DocFileCase, self).__init__( - test, optionflags=optionflags, setUp=setUp, tearDown=tearDown, - checker=None) - - def address(self): - return (self._dt_test.filename, None, None) - - def setUp(self): - if self._result_var is not None: - self._old_displayhook = sys.displayhook - sys.displayhook = self._displayhook - super(DocFileCase, self).setUp() - - def _displayhook(self, value): - if value is None: - return - setattr(builtin_mod, self._result_var, value) - print repr(value) - - def tearDown(self): - super(DocFileCase, self).tearDown() - if self._result_var is not None: - sys.displayhook = self._old_displayhook - delattr(builtin_mod, self._result_var) diff --git a/lib/spack/external/nose/plugins/errorclass.py b/lib/spack/external/nose/plugins/errorclass.py deleted file mode 100644 index d1540e0070..0000000000 --- a/lib/spack/external/nose/plugins/errorclass.py +++ /dev/null @@ -1,210 +0,0 @@ -""" -ErrorClass Plugins ------------------- - -ErrorClass plugins provide an easy way to add support for custom -handling of particular classes of exceptions. - -An ErrorClass plugin defines one or more ErrorClasses and how each is -handled and reported on. Each error class is stored in a different -attribute on the result, and reported separately. Each error class must -indicate the exceptions that fall under that class, the label to use -for reporting, and whether exceptions of the class should be -considered as failures for the whole test run. - -ErrorClasses use a declarative syntax. Assign an ErrorClass to the -attribute you wish to add to the result object, defining the -exceptions, label and isfailure attributes. For example, to declare an -ErrorClassPlugin that defines TodoErrors (and subclasses of TodoError) -as an error class with the label 'TODO' that is considered a failure, -do this: - - >>> class Todo(Exception): - ... pass - >>> class TodoError(ErrorClassPlugin): - ... todo = ErrorClass(Todo, label='TODO', isfailure=True) - -The MetaErrorClass metaclass translates the ErrorClass declarations -into the tuples used by the error handling and reporting functions in -the result. This is an internal format and subject to change; you -should always use the declarative syntax for attaching ErrorClasses to -an ErrorClass plugin. - - >>> TodoError.errorClasses # doctest: +ELLIPSIS - ((, ('todo', 'TODO', True)),) - -Let's see the plugin in action. First some boilerplate. - - >>> import sys - >>> import unittest - >>> try: - ... # 2.7+ - ... from unittest.runner import _WritelnDecorator - ... except ImportError: - ... from unittest import _WritelnDecorator - ... - >>> buf = _WritelnDecorator(sys.stdout) - -Now define a test case that raises a Todo. - - >>> class TestTodo(unittest.TestCase): - ... def runTest(self): - ... raise Todo("I need to test something") - >>> case = TestTodo() - -Prepare the result using our plugin. Normally this happens during the -course of test execution within nose -- you won't be doing this -yourself. For the purposes of this testing document, I'm stepping -through the internal process of nose so you can see what happens at -each step. - - >>> plugin = TodoError() - >>> from nose.result import _TextTestResult - >>> result = _TextTestResult(stream=buf, descriptions=0, verbosity=2) - >>> plugin.prepareTestResult(result) - -Now run the test. TODO is printed. - - >>> _ = case(result) # doctest: +ELLIPSIS - runTest (....TestTodo) ... TODO: I need to test something - -Errors and failures are empty, but todo has our test: - - >>> result.errors - [] - >>> result.failures - [] - >>> result.todo # doctest: +ELLIPSIS - [(<....TestTodo testMethod=runTest>, '...Todo: I need to test something\\n')] - >>> result.printErrors() # doctest: +ELLIPSIS - - ====================================================================== - TODO: runTest (....TestTodo) - ---------------------------------------------------------------------- - Traceback (most recent call last): - ... - ...Todo: I need to test something - - -Since we defined a Todo as a failure, the run was not successful. - - >>> result.wasSuccessful() - False -""" - -from nose.pyversion import make_instancemethod -from nose.plugins.base import Plugin -from nose.result import TextTestResult -from nose.util import isclass - -class MetaErrorClass(type): - """Metaclass for ErrorClassPlugins that allows error classes to be - set up in a declarative manner. - """ - def __init__(self, name, bases, attr): - errorClasses = [] - for name, detail in attr.items(): - if isinstance(detail, ErrorClass): - attr.pop(name) - for cls in detail: - errorClasses.append( - (cls, (name, detail.label, detail.isfailure))) - super(MetaErrorClass, self).__init__(name, bases, attr) - self.errorClasses = tuple(errorClasses) - - -class ErrorClass(object): - def __init__(self, *errorClasses, **kw): - self.errorClasses = errorClasses - try: - for key in ('label', 'isfailure'): - setattr(self, key, kw.pop(key)) - except KeyError: - raise TypeError("%r is a required named argument for ErrorClass" - % key) - - def __iter__(self): - return iter(self.errorClasses) - - -class ErrorClassPlugin(Plugin): - """ - Base class for ErrorClass plugins. Subclass this class and declare the - exceptions that you wish to handle as attributes of the subclass. - """ - __metaclass__ = MetaErrorClass - score = 1000 - errorClasses = () - - def addError(self, test, err): - err_cls, a, b = err - if not isclass(err_cls): - return - classes = [e[0] for e in self.errorClasses] - if filter(lambda c: issubclass(err_cls, c), classes): - return True - - def prepareTestResult(self, result): - if not hasattr(result, 'errorClasses'): - self.patchResult(result) - for cls, (storage_attr, label, isfail) in self.errorClasses: - if cls not in result.errorClasses: - storage = getattr(result, storage_attr, []) - setattr(result, storage_attr, storage) - result.errorClasses[cls] = (storage, label, isfail) - - def patchResult(self, result): - result.printLabel = print_label_patch(result) - result._orig_addError, result.addError = \ - result.addError, add_error_patch(result) - result._orig_wasSuccessful, result.wasSuccessful = \ - result.wasSuccessful, wassuccessful_patch(result) - if hasattr(result, 'printErrors'): - result._orig_printErrors, result.printErrors = \ - result.printErrors, print_errors_patch(result) - if hasattr(result, 'addSkip'): - result._orig_addSkip, result.addSkip = \ - result.addSkip, add_skip_patch(result) - result.errorClasses = {} - - -def add_error_patch(result): - """Create a new addError method to patch into a result instance - that recognizes the errorClasses attribute and deals with - errorclasses correctly. - """ - return make_instancemethod(TextTestResult.addError, result) - - -def print_errors_patch(result): - """Create a new printErrors method that prints errorClasses items - as well. - """ - return make_instancemethod(TextTestResult.printErrors, result) - - -def print_label_patch(result): - """Create a new printLabel method that prints errorClasses items - as well. - """ - return make_instancemethod(TextTestResult.printLabel, result) - - -def wassuccessful_patch(result): - """Create a new wasSuccessful method that checks errorClasses for - exceptions that were put into other slots than error or failure - but that still count as not success. - """ - return make_instancemethod(TextTestResult.wasSuccessful, result) - - -def add_skip_patch(result): - """Create a new addSkip method to patch into a result instance - that delegates to addError. - """ - return make_instancemethod(TextTestResult.addSkip, result) - - -if __name__ == '__main__': - import doctest - doctest.testmod() diff --git a/lib/spack/external/nose/plugins/failuredetail.py b/lib/spack/external/nose/plugins/failuredetail.py deleted file mode 100644 index 6462865dd0..0000000000 --- a/lib/spack/external/nose/plugins/failuredetail.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -This plugin provides assert introspection. When the plugin is enabled -and a test failure occurs, the traceback is displayed with extra context -around the line in which the exception was raised. Simple variable -substitution is also performed in the context output to provide more -debugging information. -""" - -from nose.plugins import Plugin -from nose.pyversion import exc_to_unicode, force_unicode -from nose.inspector import inspect_traceback - -class FailureDetail(Plugin): - """ - Plugin that provides extra information in tracebacks of test failures. - """ - score = 1600 # before capture - - def options(self, parser, env): - """Register commmandline options. - """ - parser.add_option( - "-d", "--detailed-errors", "--failure-detail", - action="store_true", - default=env.get('NOSE_DETAILED_ERRORS'), - dest="detailedErrors", help="Add detail to error" - " output by attempting to evaluate failed" - " asserts [NOSE_DETAILED_ERRORS]") - - def configure(self, options, conf): - """Configure plugin. - """ - if not self.can_configure: - return - self.enabled = options.detailedErrors - self.conf = conf - - def formatFailure(self, test, err): - """Add detail from traceback inspection to error message of a failure. - """ - ec, ev, tb = err - tbinfo, str_ev = None, exc_to_unicode(ev) - - if tb: - tbinfo = force_unicode(inspect_traceback(tb)) - str_ev = '\n'.join([str_ev, tbinfo]) - test.tbinfo = tbinfo - return (ec, str_ev, tb) - diff --git a/lib/spack/external/nose/plugins/isolate.py b/lib/spack/external/nose/plugins/isolate.py deleted file mode 100644 index 13235dfbd1..0000000000 --- a/lib/spack/external/nose/plugins/isolate.py +++ /dev/null @@ -1,103 +0,0 @@ -"""The isolation plugin resets the contents of sys.modules after running -each test module or package. Use it by setting ``--with-isolation`` or the -NOSE_WITH_ISOLATION environment variable. - -The effects are similar to wrapping the following functions around the -import and execution of each test module:: - - def setup(module): - module._mods = sys.modules.copy() - - def teardown(module): - to_del = [ m for m in sys.modules.keys() if m not in - module._mods ] - for mod in to_del: - del sys.modules[mod] - sys.modules.update(module._mods) - -Isolation works only during lazy loading. In normal use, this is only -during discovery of modules within a directory, where the process of -importing, loading tests and running tests from each module is -encapsulated in a single loadTestsFromName call. This plugin -implements loadTestsFromNames to force the same lazy-loading there, -which allows isolation to work in directed mode as well as discovery, -at the cost of some efficiency: lazy-loading names forces full context -setup and teardown to run for each name, defeating the grouping that -is normally used to ensure that context setup and teardown are run the -fewest possible times for a given set of names. - -.. warning :: - - This plugin should not be used in conjunction with other plugins - that assume that modules, once imported, will stay imported; for - instance, it may cause very odd results when used with the coverage - plugin. - -""" - -import logging -import sys - -from nose.plugins import Plugin - - -log = logging.getLogger('nose.plugins.isolation') - -class IsolationPlugin(Plugin): - """ - Activate the isolation plugin to isolate changes to external - modules to a single test module or package. The isolation plugin - resets the contents of sys.modules after each test module or - package runs to its state before the test. PLEASE NOTE that this - plugin should not be used with the coverage plugin, or in any other case - where module reloading may produce undesirable side-effects. - """ - score = 10 # I want to be last - name = 'isolation' - - def configure(self, options, conf): - """Configure plugin. - """ - Plugin.configure(self, options, conf) - self._mod_stack = [] - - def beforeContext(self): - """Copy sys.modules onto my mod stack - """ - mods = sys.modules.copy() - self._mod_stack.append(mods) - - def afterContext(self): - """Pop my mod stack and restore sys.modules to the state - it was in when mod stack was pushed. - """ - mods = self._mod_stack.pop() - to_del = [ m for m in sys.modules.keys() if m not in mods ] - if to_del: - log.debug('removing sys modules entries: %s', to_del) - for mod in to_del: - del sys.modules[mod] - sys.modules.update(mods) - - def loadTestsFromNames(self, names, module=None): - """Create a lazy suite that calls beforeContext and afterContext - around each name. The side-effect of this is that full context - fixtures will be set up and torn down around each test named. - """ - # Fast path for when we don't care - if not names or len(names) == 1: - return - loader = self.loader - plugins = self.conf.plugins - def lazy(): - for name in names: - plugins.beforeContext() - yield loader.loadTestsFromName(name, module=module) - plugins.afterContext() - return (loader.suiteClass(lazy), []) - - def prepareTestLoader(self, loader): - """Get handle on test loader so we can use it in loadTestsFromNames. - """ - self.loader = loader - diff --git a/lib/spack/external/nose/plugins/logcapture.py b/lib/spack/external/nose/plugins/logcapture.py deleted file mode 100644 index 4c9a79f6fd..0000000000 --- a/lib/spack/external/nose/plugins/logcapture.py +++ /dev/null @@ -1,245 +0,0 @@ -""" -This plugin captures logging statements issued during test execution. When an -error or failure occurs, the captured log messages are attached to the running -test in the test.capturedLogging attribute, and displayed with the error failure -output. It is enabled by default but can be turned off with the option -``--nologcapture``. - -You can filter captured logging statements with the ``--logging-filter`` option. -If set, it specifies which logger(s) will be captured; loggers that do not match -will be passed. Example: specifying ``--logging-filter=sqlalchemy,myapp`` -will ensure that only statements logged via sqlalchemy.engine, myapp -or myapp.foo.bar logger will be logged. - -You can remove other installed logging handlers with the -``--logging-clear-handlers`` option. -""" - -import logging -from logging import Handler -import threading - -from nose.plugins.base import Plugin -from nose.util import anyp, ln, safe_str - -try: - from cStringIO import StringIO -except ImportError: - from StringIO import StringIO - -log = logging.getLogger(__name__) - -class FilterSet(object): - def __init__(self, filter_components): - self.inclusive, self.exclusive = self._partition(filter_components) - - # @staticmethod - def _partition(components): - inclusive, exclusive = [], [] - for component in components: - if component.startswith('-'): - exclusive.append(component[1:]) - else: - inclusive.append(component) - return inclusive, exclusive - _partition = staticmethod(_partition) - - def allow(self, record): - """returns whether this record should be printed""" - if not self: - # nothing to filter - return True - return self._allow(record) and not self._deny(record) - - # @staticmethod - def _any_match(matchers, record): - """return the bool of whether `record` starts with - any item in `matchers`""" - def record_matches_key(key): - return record == key or record.startswith(key + '.') - return anyp(bool, map(record_matches_key, matchers)) - _any_match = staticmethod(_any_match) - - def _allow(self, record): - if not self.inclusive: - return True - return self._any_match(self.inclusive, record) - - def _deny(self, record): - if not self.exclusive: - return False - return self._any_match(self.exclusive, record) - - -class MyMemoryHandler(Handler): - def __init__(self, logformat, logdatefmt, filters): - Handler.__init__(self) - fmt = logging.Formatter(logformat, logdatefmt) - self.setFormatter(fmt) - self.filterset = FilterSet(filters) - self.buffer = [] - def emit(self, record): - self.buffer.append(self.format(record)) - def flush(self): - pass # do nothing - def truncate(self): - self.buffer = [] - def filter(self, record): - if self.filterset.allow(record.name): - return Handler.filter(self, record) - def __getstate__(self): - state = self.__dict__.copy() - del state['lock'] - return state - def __setstate__(self, state): - self.__dict__.update(state) - self.lock = threading.RLock() - - -class LogCapture(Plugin): - """ - Log capture plugin. Enabled by default. Disable with --nologcapture. - This plugin captures logging statements issued during test execution, - appending any output captured to the error or failure output, - should the test fail or raise an error. - """ - enabled = True - env_opt = 'NOSE_NOLOGCAPTURE' - name = 'logcapture' - score = 500 - logformat = '%(name)s: %(levelname)s: %(message)s' - logdatefmt = None - clear = False - filters = ['-nose'] - - def options(self, parser, env): - """Register commandline options. - """ - parser.add_option( - "--nologcapture", action="store_false", - default=not env.get(self.env_opt), dest="logcapture", - help="Disable logging capture plugin. " - "Logging configuration will be left intact." - " [NOSE_NOLOGCAPTURE]") - parser.add_option( - "--logging-format", action="store", dest="logcapture_format", - default=env.get('NOSE_LOGFORMAT') or self.logformat, - metavar="FORMAT", - help="Specify custom format to print statements. " - "Uses the same format as used by standard logging handlers." - " [NOSE_LOGFORMAT]") - parser.add_option( - "--logging-datefmt", action="store", dest="logcapture_datefmt", - default=env.get('NOSE_LOGDATEFMT') or self.logdatefmt, - metavar="FORMAT", - help="Specify custom date/time format to print statements. " - "Uses the same format as used by standard logging handlers." - " [NOSE_LOGDATEFMT]") - parser.add_option( - "--logging-filter", action="store", dest="logcapture_filters", - default=env.get('NOSE_LOGFILTER'), - metavar="FILTER", - help="Specify which statements to filter in/out. " - "By default, everything is captured. If the output is too" - " verbose,\nuse this option to filter out needless output.\n" - "Example: filter=foo will capture statements issued ONLY to\n" - " foo or foo.what.ever.sub but not foobar or other logger.\n" - "Specify multiple loggers with comma: filter=foo,bar,baz.\n" - "If any logger name is prefixed with a minus, eg filter=-foo,\n" - "it will be excluded rather than included. Default: " - "exclude logging messages from nose itself (-nose)." - " [NOSE_LOGFILTER]\n") - parser.add_option( - "--logging-clear-handlers", action="store_true", - default=False, dest="logcapture_clear", - help="Clear all other logging handlers") - parser.add_option( - "--logging-level", action="store", - default='NOTSET', dest="logcapture_level", - help="Set the log level to capture") - - def configure(self, options, conf): - """Configure plugin. - """ - self.conf = conf - # Disable if explicitly disabled, or if logging is - # configured via logging config file - if not options.logcapture or conf.loggingConfig: - self.enabled = False - self.logformat = options.logcapture_format - self.logdatefmt = options.logcapture_datefmt - self.clear = options.logcapture_clear - self.loglevel = options.logcapture_level - if options.logcapture_filters: - self.filters = options.logcapture_filters.split(',') - - def setupLoghandler(self): - # setup our handler with root logger - root_logger = logging.getLogger() - if self.clear: - if hasattr(root_logger, "handlers"): - for handler in root_logger.handlers: - root_logger.removeHandler(handler) - for logger in logging.Logger.manager.loggerDict.values(): - if hasattr(logger, "handlers"): - for handler in logger.handlers: - logger.removeHandler(handler) - # make sure there isn't one already - # you can't simply use "if self.handler not in root_logger.handlers" - # since at least in unit tests this doesn't work -- - # LogCapture() is instantiated for each test case while root_logger - # is module global - # so we always add new MyMemoryHandler instance - for handler in root_logger.handlers[:]: - if isinstance(handler, MyMemoryHandler): - root_logger.handlers.remove(handler) - root_logger.addHandler(self.handler) - # to make sure everything gets captured - loglevel = getattr(self, "loglevel", "NOTSET") - root_logger.setLevel(getattr(logging, loglevel)) - - def begin(self): - """Set up logging handler before test run begins. - """ - self.start() - - def start(self): - self.handler = MyMemoryHandler(self.logformat, self.logdatefmt, - self.filters) - self.setupLoghandler() - - def end(self): - pass - - def beforeTest(self, test): - """Clear buffers and handlers before test. - """ - self.setupLoghandler() - - def afterTest(self, test): - """Clear buffers after test. - """ - self.handler.truncate() - - def formatFailure(self, test, err): - """Add captured log messages to failure output. - """ - return self.formatError(test, err) - - def formatError(self, test, err): - """Add captured log messages to error output. - """ - # logic flow copied from Capture.formatError - test.capturedLogging = records = self.formatLogRecords() - if not records: - return err - ec, ev, tb = err - return (ec, self.addCaptureToErr(ev, records), tb) - - def formatLogRecords(self): - return map(safe_str, self.handler.buffer) - - def addCaptureToErr(self, ev, records): - return '\n'.join([safe_str(ev), ln('>> begin captured logging <<')] + \ - records + \ - [ln('>> end captured logging <<')]) diff --git a/lib/spack/external/nose/plugins/manager.py b/lib/spack/external/nose/plugins/manager.py deleted file mode 100644 index 4d2ed22b6f..0000000000 --- a/lib/spack/external/nose/plugins/manager.py +++ /dev/null @@ -1,460 +0,0 @@ -""" -Plugin Manager --------------- - -A plugin manager class is used to load plugins, manage the list of -loaded plugins, and proxy calls to those plugins. - -The plugin managers provided with nose are: - -:class:`PluginManager` - This manager doesn't implement loadPlugins, so it can only work - with a static list of plugins. - -:class:`BuiltinPluginManager` - This manager loads plugins referenced in ``nose.plugins.builtin``. - -:class:`EntryPointPluginManager` - This manager uses setuptools entrypoints to load plugins. - -:class:`ExtraPluginsPluginManager` - This manager loads extra plugins specified with the keyword - `addplugins`. - -:class:`DefaultPluginMananger` - This is the manager class that will be used by default. If - setuptools is installed, it is a subclass of - :class:`EntryPointPluginManager` and :class:`BuiltinPluginManager`; - otherwise, an alias to :class:`BuiltinPluginManager`. - -:class:`RestrictedPluginManager` - This manager is for use in test runs where some plugin calls are - not available, such as runs started with ``python setup.py test``, - where the test runner is the default unittest :class:`TextTestRunner`. It - is a subclass of :class:`DefaultPluginManager`. - -Writing a plugin manager -======================== - -If you want to load plugins via some other means, you can write a -plugin manager and pass an instance of your plugin manager class when -instantiating the :class:`nose.config.Config` instance that you pass to -:class:`TestProgram` (or :func:`main` or :func:`run`). - -To implement your plugin loading scheme, implement ``loadPlugins()``, -and in that method, call ``addPlugin()`` with an instance of each plugin -you wish to make available. Make sure to call -``super(self).loadPlugins()`` as well if have subclassed a manager -other than ``PluginManager``. - -""" -import inspect -import logging -import os -import sys -from itertools import chain as iterchain -from warnings import warn -import nose.config -from nose.failure import Failure -from nose.plugins.base import IPluginInterface -from nose.pyversion import sort_list - -try: - import cPickle as pickle -except: - import pickle -try: - from cStringIO import StringIO -except: - from StringIO import StringIO - - -__all__ = ['DefaultPluginManager', 'PluginManager', 'EntryPointPluginManager', - 'BuiltinPluginManager', 'RestrictedPluginManager'] - -log = logging.getLogger(__name__) - - -class PluginProxy(object): - """Proxy for plugin calls. Essentially a closure bound to the - given call and plugin list. - - The plugin proxy also must be bound to a particular plugin - interface specification, so that it knows what calls are available - and any special handling that is required for each call. - """ - interface = IPluginInterface - def __init__(self, call, plugins): - try: - self.method = getattr(self.interface, call) - except AttributeError: - raise AttributeError("%s is not a valid %s method" - % (call, self.interface.__name__)) - self.call = self.makeCall(call) - self.plugins = [] - for p in plugins: - self.addPlugin(p, call) - - def __call__(self, *arg, **kw): - return self.call(*arg, **kw) - - def addPlugin(self, plugin, call): - """Add plugin to my list of plugins to call, if it has the attribute - I'm bound to. - """ - meth = getattr(plugin, call, None) - if meth is not None: - if call == 'loadTestsFromModule' and \ - len(inspect.getargspec(meth)[0]) == 2: - orig_meth = meth - meth = lambda module, path, **kwargs: orig_meth(module) - self.plugins.append((plugin, meth)) - - def makeCall(self, call): - if call == 'loadTestsFromNames': - # special case -- load tests from names behaves somewhat differently - # from other chainable calls, because plugins return a tuple, only - # part of which can be chained to the next plugin. - return self._loadTestsFromNames - - meth = self.method - if getattr(meth, 'generative', False): - # call all plugins and yield a flattened iterator of their results - return lambda *arg, **kw: list(self.generate(*arg, **kw)) - elif getattr(meth, 'chainable', False): - return self.chain - else: - # return a value from the first plugin that returns non-None - return self.simple - - def chain(self, *arg, **kw): - """Call plugins in a chain, where the result of each plugin call is - sent to the next plugin as input. The final output result is returned. - """ - result = None - # extract the static arguments (if any) from arg so they can - # be passed to each plugin call in the chain - static = [a for (static, a) - in zip(getattr(self.method, 'static_args', []), arg) - if static] - for p, meth in self.plugins: - result = meth(*arg, **kw) - arg = static[:] - arg.append(result) - return result - - def generate(self, *arg, **kw): - """Call all plugins, yielding each item in each non-None result. - """ - for p, meth in self.plugins: - result = None - try: - result = meth(*arg, **kw) - if result is not None: - for r in result: - yield r - except (KeyboardInterrupt, SystemExit): - raise - except: - exc = sys.exc_info() - yield Failure(*exc) - continue - - def simple(self, *arg, **kw): - """Call all plugins, returning the first non-None result. - """ - for p, meth in self.plugins: - result = meth(*arg, **kw) - if result is not None: - return result - - def _loadTestsFromNames(self, names, module=None): - """Chainable but not quite normal. Plugins return a tuple of - (tests, names) after processing the names. The tests are added - to a suite that is accumulated throughout the full call, while - names are input for the next plugin in the chain. - """ - suite = [] - for p, meth in self.plugins: - result = meth(names, module=module) - if result is not None: - suite_part, names = result - if suite_part: - suite.extend(suite_part) - return suite, names - - -class NoPlugins(object): - """Null Plugin manager that has no plugins.""" - interface = IPluginInterface - def __init__(self): - self._plugins = self.plugins = () - - def __iter__(self): - return () - - def _doNothing(self, *args, **kwds): - pass - - def _emptyIterator(self, *args, **kwds): - return () - - def __getattr__(self, call): - method = getattr(self.interface, call) - if getattr(method, "generative", False): - return self._emptyIterator - else: - return self._doNothing - - def addPlugin(self, plug): - raise NotImplementedError() - - def addPlugins(self, plugins): - raise NotImplementedError() - - def configure(self, options, config): - pass - - def loadPlugins(self): - pass - - def sort(self): - pass - - -class PluginManager(object): - """Base class for plugin managers. PluginManager is intended to be - used only with a static list of plugins. The loadPlugins() implementation - only reloads plugins from _extraplugins to prevent those from being - overridden by a subclass. - - The basic functionality of a plugin manager is to proxy all unknown - attributes through a ``PluginProxy`` to a list of plugins. - - Note that the list of plugins *may not* be changed after the first plugin - call. - """ - proxyClass = PluginProxy - - def __init__(self, plugins=(), proxyClass=None): - self._plugins = [] - self._extraplugins = () - self._proxies = {} - if plugins: - self.addPlugins(plugins) - if proxyClass is not None: - self.proxyClass = proxyClass - - def __getattr__(self, call): - try: - return self._proxies[call] - except KeyError: - proxy = self.proxyClass(call, self._plugins) - self._proxies[call] = proxy - return proxy - - def __iter__(self): - return iter(self.plugins) - - def addPlugin(self, plug): - # allow, for instance, plugins loaded via entry points to - # supplant builtin plugins. - new_name = getattr(plug, 'name', object()) - self._plugins[:] = [p for p in self._plugins - if getattr(p, 'name', None) != new_name] - self._plugins.append(plug) - - def addPlugins(self, plugins=(), extraplugins=()): - """extraplugins are maintained in a separate list and - re-added by loadPlugins() to prevent their being overwritten - by plugins added by a subclass of PluginManager - """ - self._extraplugins = extraplugins - for plug in iterchain(plugins, extraplugins): - self.addPlugin(plug) - - def configure(self, options, config): - """Configure the set of plugins with the given options - and config instance. After configuration, disabled plugins - are removed from the plugins list. - """ - log.debug("Configuring plugins") - self.config = config - cfg = PluginProxy('configure', self._plugins) - cfg(options, config) - enabled = [plug for plug in self._plugins if plug.enabled] - self.plugins = enabled - self.sort() - log.debug("Plugins enabled: %s", enabled) - - def loadPlugins(self): - for plug in self._extraplugins: - self.addPlugin(plug) - - def sort(self): - return sort_list(self._plugins, lambda x: getattr(x, 'score', 1), reverse=True) - - def _get_plugins(self): - return self._plugins - - def _set_plugins(self, plugins): - self._plugins = [] - self.addPlugins(plugins) - - plugins = property(_get_plugins, _set_plugins, None, - """Access the list of plugins managed by - this plugin manager""") - - -class ZeroNinePlugin: - """Proxy for 0.9 plugins, adapts 0.10 calls to 0.9 standard. - """ - def __init__(self, plugin): - self.plugin = plugin - - def options(self, parser, env=os.environ): - self.plugin.add_options(parser, env) - - def addError(self, test, err): - if not hasattr(self.plugin, 'addError'): - return - # switch off to addSkip, addDeprecated if those types - from nose.exc import SkipTest, DeprecatedTest - ec, ev, tb = err - if issubclass(ec, SkipTest): - if not hasattr(self.plugin, 'addSkip'): - return - return self.plugin.addSkip(test.test) - elif issubclass(ec, DeprecatedTest): - if not hasattr(self.plugin, 'addDeprecated'): - return - return self.plugin.addDeprecated(test.test) - # add capt - capt = test.capturedOutput - return self.plugin.addError(test.test, err, capt) - - def loadTestsFromFile(self, filename): - if hasattr(self.plugin, 'loadTestsFromPath'): - return self.plugin.loadTestsFromPath(filename) - - def addFailure(self, test, err): - if not hasattr(self.plugin, 'addFailure'): - return - # add capt and tbinfo - capt = test.capturedOutput - tbinfo = test.tbinfo - return self.plugin.addFailure(test.test, err, capt, tbinfo) - - def addSuccess(self, test): - if not hasattr(self.plugin, 'addSuccess'): - return - capt = test.capturedOutput - self.plugin.addSuccess(test.test, capt) - - def startTest(self, test): - if not hasattr(self.plugin, 'startTest'): - return - return self.plugin.startTest(test.test) - - def stopTest(self, test): - if not hasattr(self.plugin, 'stopTest'): - return - return self.plugin.stopTest(test.test) - - def __getattr__(self, val): - return getattr(self.plugin, val) - - -class EntryPointPluginManager(PluginManager): - """Plugin manager that loads plugins from the `nose.plugins` and - `nose.plugins.0.10` entry points. - """ - entry_points = (('nose.plugins.0.10', None), - ('nose.plugins', ZeroNinePlugin)) - - def loadPlugins(self): - """Load plugins by iterating the `nose.plugins` entry point. - """ - from pkg_resources import iter_entry_points - loaded = {} - for entry_point, adapt in self.entry_points: - for ep in iter_entry_points(entry_point): - if ep.name in loaded: - continue - loaded[ep.name] = True - log.debug('%s load plugin %s', self.__class__.__name__, ep) - try: - plugcls = ep.load() - except KeyboardInterrupt: - raise - except Exception, e: - # never want a plugin load to kill the test run - # but we can't log here because the logger is not yet - # configured - warn("Unable to load plugin %s: %s" % (ep, e), - RuntimeWarning) - continue - if adapt: - plug = adapt(plugcls()) - else: - plug = plugcls() - self.addPlugin(plug) - super(EntryPointPluginManager, self).loadPlugins() - - -class BuiltinPluginManager(PluginManager): - """Plugin manager that loads plugins from the list in - `nose.plugins.builtin`. - """ - def loadPlugins(self): - """Load plugins in nose.plugins.builtin - """ - from nose.plugins import builtin - for plug in builtin.plugins: - self.addPlugin(plug()) - super(BuiltinPluginManager, self).loadPlugins() - -try: - import pkg_resources - class DefaultPluginManager(EntryPointPluginManager, BuiltinPluginManager): - pass - -except ImportError: - class DefaultPluginManager(BuiltinPluginManager): - pass - -class RestrictedPluginManager(DefaultPluginManager): - """Plugin manager that restricts the plugin list to those not - excluded by a list of exclude methods. Any plugin that implements - an excluded method will be removed from the manager's plugin list - after plugins are loaded. - """ - def __init__(self, plugins=(), exclude=(), load=True): - DefaultPluginManager.__init__(self, plugins) - self.load = load - self.exclude = exclude - self.excluded = [] - self._excludedOpts = None - - def excludedOption(self, name): - if self._excludedOpts is None: - from optparse import OptionParser - self._excludedOpts = OptionParser(add_help_option=False) - for plugin in self.excluded: - plugin.options(self._excludedOpts, env={}) - return self._excludedOpts.get_option('--' + name) - - def loadPlugins(self): - if self.load: - DefaultPluginManager.loadPlugins(self) - allow = [] - for plugin in self.plugins: - ok = True - for method in self.exclude: - if hasattr(plugin, method): - ok = False - self.excluded.append(plugin) - break - if ok: - allow.append(plugin) - self.plugins = allow diff --git a/lib/spack/external/nose/plugins/multiprocess.py b/lib/spack/external/nose/plugins/multiprocess.py deleted file mode 100644 index 2cae744a11..0000000000 --- a/lib/spack/external/nose/plugins/multiprocess.py +++ /dev/null @@ -1,835 +0,0 @@ -""" -Overview -======== - -The multiprocess plugin enables you to distribute your test run among a set of -worker processes that run tests in parallel. This can speed up CPU-bound test -runs (as long as the number of work processeses is around the number of -processors or cores available), but is mainly useful for IO-bound tests that -spend most of their time waiting for data to arrive from someplace else. - -.. note :: - - See :doc:`../doc_tests/test_multiprocess/multiprocess` for - additional documentation and examples. Use of this plugin on python - 2.5 or earlier requires the multiprocessing_ module, also available - from PyPI. - -.. _multiprocessing : http://code.google.com/p/python-multiprocessing/ - -How tests are distributed -========================= - -The ideal case would be to dispatch each test to a worker process -separately. This ideal is not attainable in all cases, however, because many -test suites depend on context (class, module or package) fixtures. - -The plugin can't know (unless you tell it -- see below!) if a context fixture -can be called many times concurrently (is re-entrant), or if it can be shared -among tests running in different processes. Therefore, if a context has -fixtures, the default behavior is to dispatch the entire suite to a worker as -a unit. - -Controlling distribution -^^^^^^^^^^^^^^^^^^^^^^^^ - -There are two context-level variables that you can use to control this default -behavior. - -If a context's fixtures are re-entrant, set ``_multiprocess_can_split_ = True`` -in the context, and the plugin will dispatch tests in suites bound to that -context as if the context had no fixtures. This means that the fixtures will -execute concurrently and multiple times, typically once per test. - -If a context's fixtures can be shared by tests running in different processes --- such as a package-level fixture that starts an external http server or -initializes a shared database -- then set ``_multiprocess_shared_ = True`` in -the context. These fixtures will then execute in the primary nose process, and -tests in those contexts will be individually dispatched to run in parallel. - -How results are collected and reported -====================================== - -As each test or suite executes in a worker process, results (failures, errors, -and specially handled exceptions like SkipTest) are collected in that -process. When the worker process finishes, it returns results to the main -nose process. There, any progress output is printed (dots!), and the -results from the test run are combined into a consolidated result -set. When results have been received for all dispatched tests, or all -workers have died, the result summary is output as normal. - -Beware! -======= - -Not all test suites will benefit from, or even operate correctly using, this -plugin. For example, CPU-bound tests will run more slowly if you don't have -multiple processors. There are also some differences in plugin -interactions and behaviors due to the way in which tests are dispatched and -loaded. In general, test loading under this plugin operates as if it were -always in directed mode instead of discovered mode. For instance, doctests -in test modules will always be found when using this plugin with the doctest -plugin. - -But the biggest issue you will face is probably concurrency. Unless you -have kept your tests as religiously pure unit tests, with no side-effects, no -ordering issues, and no external dependencies, chances are you will experience -odd, intermittent and unexplainable failures and errors when using this -plugin. This doesn't necessarily mean the plugin is broken; it may mean that -your test suite is not safe for concurrency. - -New Features in 1.1.0 -===================== - -* functions generated by test generators are now added to the worker queue - making them multi-threaded. -* fixed timeout functionality, now functions will be terminated with a - TimedOutException exception when they exceed their execution time. The - worker processes are not terminated. -* added ``--process-restartworker`` option to restart workers once they are - done, this helps control memory usage. Sometimes memory leaks can accumulate - making long runs very difficult. -* added global _instantiate_plugins to configure which plugins are started - on the worker processes. - -""" - -import logging -import os -import sys -import time -import traceback -import unittest -import pickle -import signal -import nose.case -from nose.core import TextTestRunner -from nose import failure -from nose import loader -from nose.plugins.base import Plugin -from nose.pyversion import bytes_ -from nose.result import TextTestResult -from nose.suite import ContextSuite -from nose.util import test_address -try: - # 2.7+ - from unittest.runner import _WritelnDecorator -except ImportError: - from unittest import _WritelnDecorator -from Queue import Empty -from warnings import warn -try: - from cStringIO import StringIO -except ImportError: - import StringIO - -# this is a list of plugin classes that will be checked for and created inside -# each worker process -_instantiate_plugins = None - -log = logging.getLogger(__name__) - -Process = Queue = Pool = Event = Value = Array = None - -# have to inherit KeyboardInterrupt to it will interrupt process properly -class TimedOutException(KeyboardInterrupt): - def __init__(self, value = "Timed Out"): - self.value = value - def __str__(self): - return repr(self.value) - -def _import_mp(): - global Process, Queue, Pool, Event, Value, Array - try: - from multiprocessing import Manager, Process - #prevent the server process created in the manager which holds Python - #objects and allows other processes to manipulate them using proxies - #to interrupt on SIGINT (keyboardinterrupt) so that the communication - #channel between subprocesses and main process is still usable after - #ctrl+C is received in the main process. - old=signal.signal(signal.SIGINT, signal.SIG_IGN) - m = Manager() - #reset it back so main process will receive a KeyboardInterrupt - #exception on ctrl+c - signal.signal(signal.SIGINT, old) - Queue, Pool, Event, Value, Array = ( - m.Queue, m.Pool, m.Event, m.Value, m.Array - ) - except ImportError: - warn("multiprocessing module is not available, multiprocess plugin " - "cannot be used", RuntimeWarning) - - -class TestLet: - def __init__(self, case): - try: - self._id = case.id() - except AttributeError: - pass - self._short_description = case.shortDescription() - self._str = str(case) - - def id(self): - return self._id - - def shortDescription(self): - return self._short_description - - def __str__(self): - return self._str - -class MultiProcess(Plugin): - """ - Run tests in multiple processes. Requires processing module. - """ - score = 1000 - status = {} - - def options(self, parser, env): - """ - Register command-line options. - """ - parser.add_option("--processes", action="store", - default=env.get('NOSE_PROCESSES', 0), - dest="multiprocess_workers", - metavar="NUM", - help="Spread test run among this many processes. " - "Set a number equal to the number of processors " - "or cores in your machine for best results. " - "Pass a negative number to have the number of " - "processes automatically set to the number of " - "cores. Passing 0 means to disable parallel " - "testing. Default is 0 unless NOSE_PROCESSES is " - "set. " - "[NOSE_PROCESSES]") - parser.add_option("--process-timeout", action="store", - default=env.get('NOSE_PROCESS_TIMEOUT', 10), - dest="multiprocess_timeout", - metavar="SECONDS", - help="Set timeout for return of results from each " - "test runner process. Default is 10. " - "[NOSE_PROCESS_TIMEOUT]") - parser.add_option("--process-restartworker", action="store_true", - default=env.get('NOSE_PROCESS_RESTARTWORKER', False), - dest="multiprocess_restartworker", - help="If set, will restart each worker process once" - " their tests are done, this helps control memory " - "leaks from killing the system. " - "[NOSE_PROCESS_RESTARTWORKER]") - - def configure(self, options, config): - """ - Configure plugin. - """ - try: - self.status.pop('active') - except KeyError: - pass - if not hasattr(options, 'multiprocess_workers'): - self.enabled = False - return - # don't start inside of a worker process - if config.worker: - return - self.config = config - try: - workers = int(options.multiprocess_workers) - except (TypeError, ValueError): - workers = 0 - if workers: - _import_mp() - if Process is None: - self.enabled = False - return - # Negative number of workers will cause multiprocessing to hang. - # Set the number of workers to the CPU count to avoid this. - if workers < 0: - try: - import multiprocessing - workers = multiprocessing.cpu_count() - except NotImplementedError: - self.enabled = False - return - self.enabled = True - self.config.multiprocess_workers = workers - t = float(options.multiprocess_timeout) - self.config.multiprocess_timeout = t - r = int(options.multiprocess_restartworker) - self.config.multiprocess_restartworker = r - self.status['active'] = True - - def prepareTestLoader(self, loader): - """Remember loader class so MultiProcessTestRunner can instantiate - the right loader. - """ - self.loaderClass = loader.__class__ - - def prepareTestRunner(self, runner): - """Replace test runner with MultiProcessTestRunner. - """ - # replace with our runner class - return MultiProcessTestRunner(stream=runner.stream, - verbosity=self.config.verbosity, - config=self.config, - loaderClass=self.loaderClass) - -def signalhandler(sig, frame): - raise TimedOutException() - -class MultiProcessTestRunner(TextTestRunner): - waitkilltime = 5.0 # max time to wait to terminate a process that does not - # respond to SIGILL - def __init__(self, **kw): - self.loaderClass = kw.pop('loaderClass', loader.defaultTestLoader) - super(MultiProcessTestRunner, self).__init__(**kw) - - def collect(self, test, testQueue, tasks, to_teardown, result): - # dispatch and collect results - # put indexes only on queue because tests aren't picklable - for case in self.nextBatch(test): - log.debug("Next batch %s (%s)", case, type(case)) - if (isinstance(case, nose.case.Test) and - isinstance(case.test, failure.Failure)): - log.debug("Case is a Failure") - case(result) # run here to capture the failure - continue - # handle shared fixtures - if isinstance(case, ContextSuite) and case.context is failure.Failure: - log.debug("Case is a Failure") - case(result) # run here to capture the failure - continue - elif isinstance(case, ContextSuite) and self.sharedFixtures(case): - log.debug("%s has shared fixtures", case) - try: - case.setUp() - except (KeyboardInterrupt, SystemExit): - raise - except: - log.debug("%s setup failed", sys.exc_info()) - result.addError(case, sys.exc_info()) - else: - to_teardown.append(case) - if case.factory: - ancestors=case.factory.context.get(case, []) - for an in ancestors[:2]: - #log.debug('reset ancestor %s', an) - if getattr(an, '_multiprocess_shared_', False): - an._multiprocess_can_split_=True - #an._multiprocess_shared_=False - self.collect(case, testQueue, tasks, to_teardown, result) - - else: - test_addr = self.addtask(testQueue,tasks,case) - log.debug("Queued test %s (%s) to %s", - len(tasks), test_addr, testQueue) - - def startProcess(self, iworker, testQueue, resultQueue, shouldStop, result): - currentaddr = Value('c',bytes_('')) - currentstart = Value('d',time.time()) - keyboardCaught = Event() - p = Process(target=runner, - args=(iworker, testQueue, - resultQueue, - currentaddr, - currentstart, - keyboardCaught, - shouldStop, - self.loaderClass, - result.__class__, - pickle.dumps(self.config))) - p.currentaddr = currentaddr - p.currentstart = currentstart - p.keyboardCaught = keyboardCaught - old = signal.signal(signal.SIGILL, signalhandler) - p.start() - signal.signal(signal.SIGILL, old) - return p - - def run(self, test): - """ - Execute the test (which may be a test suite). If the test is a suite, - distribute it out among as many processes as have been configured, at - as fine a level as is possible given the context fixtures defined in - the suite or any sub-suites. - - """ - log.debug("%s.run(%s) (%s)", self, test, os.getpid()) - wrapper = self.config.plugins.prepareTest(test) - if wrapper is not None: - test = wrapper - - # plugins can decorate or capture the output stream - wrapped = self.config.plugins.setOutputStream(self.stream) - if wrapped is not None: - self.stream = wrapped - - testQueue = Queue() - resultQueue = Queue() - tasks = [] - completed = [] - workers = [] - to_teardown = [] - shouldStop = Event() - - result = self._makeResult() - start = time.time() - - self.collect(test, testQueue, tasks, to_teardown, result) - - log.debug("Starting %s workers", self.config.multiprocess_workers) - for i in range(self.config.multiprocess_workers): - p = self.startProcess(i, testQueue, resultQueue, shouldStop, result) - workers.append(p) - log.debug("Started worker process %s", i+1) - - total_tasks = len(tasks) - # need to keep track of the next time to check for timeouts in case - # more than one process times out at the same time. - nexttimeout=self.config.multiprocess_timeout - thrownError = None - - try: - while tasks: - log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs", - len(completed), total_tasks,nexttimeout) - try: - iworker, addr, newtask_addrs, batch_result = resultQueue.get( - timeout=nexttimeout) - log.debug('Results received for worker %d, %s, new tasks: %d', - iworker,addr,len(newtask_addrs)) - try: - try: - tasks.remove(addr) - except ValueError: - log.warn('worker %s failed to remove from tasks: %s', - iworker,addr) - total_tasks += len(newtask_addrs) - tasks.extend(newtask_addrs) - except KeyError: - log.debug("Got result for unknown task? %s", addr) - log.debug("current: %s",str(list(tasks)[0])) - else: - completed.append([addr,batch_result]) - self.consolidate(result, batch_result) - if (self.config.stopOnError - and not result.wasSuccessful()): - # set the stop condition - shouldStop.set() - break - if self.config.multiprocess_restartworker: - log.debug('joining worker %s',iworker) - # wait for working, but not that important if worker - # cannot be joined in fact, for workers that add to - # testQueue, they will not terminate until all their - # items are read - workers[iworker].join(timeout=1) - if not shouldStop.is_set() and not testQueue.empty(): - log.debug('starting new process on worker %s',iworker) - workers[iworker] = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result) - except Empty: - log.debug("Timed out with %s tasks pending " - "(empty testQueue=%r): %s", - len(tasks),testQueue.empty(),str(tasks)) - any_alive = False - for iworker, w in enumerate(workers): - if w.is_alive(): - worker_addr = bytes_(w.currentaddr.value,'ascii') - timeprocessing = time.time() - w.currentstart.value - if ( len(worker_addr) == 0 - and timeprocessing > self.config.multiprocess_timeout-0.1): - log.debug('worker %d has finished its work item, ' - 'but is not exiting? do we wait for it?', - iworker) - else: - any_alive = True - if (len(worker_addr) > 0 - and timeprocessing > self.config.multiprocess_timeout-0.1): - log.debug('timed out worker %s: %s', - iworker,worker_addr) - w.currentaddr.value = bytes_('') - # If the process is in C++ code, sending a SIGILL - # might not send a python KeybordInterrupt exception - # therefore, send multiple signals until an - # exception is caught. If this takes too long, then - # terminate the process - w.keyboardCaught.clear() - startkilltime = time.time() - while not w.keyboardCaught.is_set() and w.is_alive(): - if time.time()-startkilltime > self.waitkilltime: - # have to terminate... - log.error("terminating worker %s",iworker) - w.terminate() - # there is a small probability that the - # terminated process might send a result, - # which has to be specially handled or - # else processes might get orphaned. - workers[iworker] = w = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result) - break - os.kill(w.pid, signal.SIGILL) - time.sleep(0.1) - if not any_alive and testQueue.empty(): - log.debug("All workers dead") - break - nexttimeout=self.config.multiprocess_timeout - for w in workers: - if w.is_alive() and len(w.currentaddr.value) > 0: - timeprocessing = time.time()-w.currentstart.value - if timeprocessing <= self.config.multiprocess_timeout: - nexttimeout = min(nexttimeout, - self.config.multiprocess_timeout-timeprocessing) - log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks)) - - except (KeyboardInterrupt, SystemExit), e: - log.info('parent received ctrl-c when waiting for test results') - thrownError = e - #resultQueue.get(False) - - result.addError(test, sys.exc_info()) - - try: - for case in to_teardown: - log.debug("Tearing down shared fixtures for %s", case) - try: - case.tearDown() - except (KeyboardInterrupt, SystemExit): - raise - except: - result.addError(case, sys.exc_info()) - - stop = time.time() - - # first write since can freeze on shutting down processes - result.printErrors() - result.printSummary(start, stop) - self.config.plugins.finalize(result) - - if thrownError is None: - log.debug("Tell all workers to stop") - for w in workers: - if w.is_alive(): - testQueue.put('STOP', block=False) - - # wait for the workers to end - for iworker,worker in enumerate(workers): - if worker.is_alive(): - log.debug('joining worker %s',iworker) - worker.join() - if worker.is_alive(): - log.debug('failed to join worker %s',iworker) - except (KeyboardInterrupt, SystemExit): - log.info('parent received ctrl-c when shutting down: stop all processes') - for worker in workers: - if worker.is_alive(): - worker.terminate() - - if thrownError: raise thrownError - else: raise - - return result - - def addtask(testQueue,tasks,case): - arg = None - if isinstance(case,nose.case.Test) and hasattr(case.test,'arg'): - # this removes the top level descriptor and allows real function - # name to be returned - case.test.descriptor = None - arg = case.test.arg - test_addr = MultiProcessTestRunner.address(case) - testQueue.put((test_addr,arg), block=False) - if arg is not None: - test_addr += str(arg) - if tasks is not None: - tasks.append(test_addr) - return test_addr - addtask = staticmethod(addtask) - - def address(case): - if hasattr(case, 'address'): - file, mod, call = case.address() - elif hasattr(case, 'context'): - file, mod, call = test_address(case.context) - else: - raise Exception("Unable to convert %s to address" % case) - parts = [] - if file is None: - if mod is None: - raise Exception("Unaddressable case %s" % case) - else: - parts.append(mod) - else: - # strip __init__.py(c) from end of file part - # if present, having it there confuses loader - dirname, basename = os.path.split(file) - if basename.startswith('__init__'): - file = dirname - parts.append(file) - if call is not None: - parts.append(call) - return ':'.join(map(str, parts)) - address = staticmethod(address) - - def nextBatch(self, test): - # allows tests or suites to mark themselves as not safe - # for multiprocess execution - if hasattr(test, 'context'): - if not getattr(test.context, '_multiprocess_', True): - return - - if ((isinstance(test, ContextSuite) - and test.hasFixtures(self.checkCanSplit)) - or not getattr(test, 'can_split', True) - or not isinstance(test, unittest.TestSuite)): - # regular test case, or a suite with context fixtures - - # special case: when run like nosetests path/to/module.py - # the top-level suite has only one item, and it shares - # the same context as that item. In that case, we want the - # item, not the top-level suite - if isinstance(test, ContextSuite): - contained = list(test) - if (len(contained) == 1 - and getattr(contained[0], - 'context', None) == test.context): - test = contained[0] - yield test - else: - # Suite is without fixtures at this level; but it may have - # fixtures at any deeper level, so we need to examine it all - # the way down to the case level - for case in test: - for batch in self.nextBatch(case): - yield batch - - def checkCanSplit(context, fixt): - """ - Callback that we use to check whether the fixtures found in a - context or ancestor are ones we care about. - - Contexts can tell us that their fixtures are reentrant by setting - _multiprocess_can_split_. So if we see that, we return False to - disregard those fixtures. - """ - if not fixt: - return False - if getattr(context, '_multiprocess_can_split_', False): - return False - return True - checkCanSplit = staticmethod(checkCanSplit) - - def sharedFixtures(self, case): - context = getattr(case, 'context', None) - if not context: - return False - return getattr(context, '_multiprocess_shared_', False) - - def consolidate(self, result, batch_result): - log.debug("batch result is %s" , batch_result) - try: - output, testsRun, failures, errors, errorClasses = batch_result - except ValueError: - log.debug("result in unexpected format %s", batch_result) - failure.Failure(*sys.exc_info())(result) - return - self.stream.write(output) - result.testsRun += testsRun - result.failures.extend(failures) - result.errors.extend(errors) - for key, (storage, label, isfail) in errorClasses.items(): - if key not in result.errorClasses: - # Ordinarily storage is result attribute - # but it's only processed through the errorClasses - # dict, so it's ok to fake it here - result.errorClasses[key] = ([], label, isfail) - mystorage, _junk, _junk = result.errorClasses[key] - mystorage.extend(storage) - log.debug("Ran %s tests (total: %s)", testsRun, result.testsRun) - - -def runner(ix, testQueue, resultQueue, currentaddr, currentstart, - keyboardCaught, shouldStop, loaderClass, resultClass, config): - try: - try: - return __runner(ix, testQueue, resultQueue, currentaddr, currentstart, - keyboardCaught, shouldStop, loaderClass, resultClass, config) - except KeyboardInterrupt: - log.debug('Worker %s keyboard interrupt, stopping',ix) - except Empty: - log.debug("Worker %s timed out waiting for tasks", ix) - -def __runner(ix, testQueue, resultQueue, currentaddr, currentstart, - keyboardCaught, shouldStop, loaderClass, resultClass, config): - - config = pickle.loads(config) - dummy_parser = config.parserClass() - if _instantiate_plugins is not None: - for pluginclass in _instantiate_plugins: - plugin = pluginclass() - plugin.addOptions(dummy_parser,{}) - config.plugins.addPlugin(plugin) - config.plugins.configure(config.options,config) - config.plugins.begin() - log.debug("Worker %s executing, pid=%d", ix,os.getpid()) - loader = loaderClass(config=config) - loader.suiteClass.suiteClass = NoSharedFixtureContextSuite - - def get(): - return testQueue.get(timeout=config.multiprocess_timeout) - - def makeResult(): - stream = _WritelnDecorator(StringIO()) - result = resultClass(stream, descriptions=1, - verbosity=config.verbosity, - config=config) - plug_result = config.plugins.prepareTestResult(result) - if plug_result: - return plug_result - return result - - def batch(result): - failures = [(TestLet(c), err) for c, err in result.failures] - errors = [(TestLet(c), err) for c, err in result.errors] - errorClasses = {} - for key, (storage, label, isfail) in result.errorClasses.items(): - errorClasses[key] = ([(TestLet(c), err) for c, err in storage], - label, isfail) - return ( - result.stream.getvalue(), - result.testsRun, - failures, - errors, - errorClasses) - for test_addr, arg in iter(get, 'STOP'): - if shouldStop.is_set(): - log.exception('Worker %d STOPPED',ix) - break - result = makeResult() - test = loader.loadTestsFromNames([test_addr]) - test.testQueue = testQueue - test.tasks = [] - test.arg = arg - log.debug("Worker %s Test is %s (%s)", ix, test_addr, test) - try: - if arg is not None: - test_addr = test_addr + str(arg) - currentaddr.value = bytes_(test_addr) - currentstart.value = time.time() - test(result) - currentaddr.value = bytes_('') - resultQueue.put((ix, test_addr, test.tasks, batch(result))) - except KeyboardInterrupt, e: #TimedOutException: - timeout = isinstance(e, TimedOutException) - if timeout: - keyboardCaught.set() - if len(currentaddr.value): - if timeout: - msg = 'Worker %s timed out, failing current test %s' - else: - msg = 'Worker %s keyboard interrupt, failing current test %s' - log.exception(msg,ix,test_addr) - currentaddr.value = bytes_('') - failure.Failure(*sys.exc_info())(result) - resultQueue.put((ix, test_addr, test.tasks, batch(result))) - else: - if timeout: - msg = 'Worker %s test %s timed out' - else: - msg = 'Worker %s test %s keyboard interrupt' - log.debug(msg,ix,test_addr) - resultQueue.put((ix, test_addr, test.tasks, batch(result))) - if not timeout: - raise - except SystemExit: - currentaddr.value = bytes_('') - log.exception('Worker %s system exit',ix) - raise - except: - currentaddr.value = bytes_('') - log.exception("Worker %s error running test or returning " - "results",ix) - failure.Failure(*sys.exc_info())(result) - resultQueue.put((ix, test_addr, test.tasks, batch(result))) - if config.multiprocess_restartworker: - break - log.debug("Worker %s ending", ix) - - -class NoSharedFixtureContextSuite(ContextSuite): - """ - Context suite that never fires shared fixtures. - - When a context sets _multiprocess_shared_, fixtures in that context - are executed by the main process. Using this suite class prevents them - from executing in the runner process as well. - - """ - testQueue = None - tasks = None - arg = None - def setupContext(self, context): - if getattr(context, '_multiprocess_shared_', False): - return - super(NoSharedFixtureContextSuite, self).setupContext(context) - - def teardownContext(self, context): - if getattr(context, '_multiprocess_shared_', False): - return - super(NoSharedFixtureContextSuite, self).teardownContext(context) - def run(self, result): - """Run tests in suite inside of suite fixtures. - """ - # proxy the result for myself - log.debug("suite %s (%s) run called, tests: %s", - id(self), self, self._tests) - if self.resultProxy: - result, orig = self.resultProxy(result, self), result - else: - result, orig = result, result - try: - #log.debug('setUp for %s', id(self)); - self.setUp() - except KeyboardInterrupt: - raise - except: - self.error_context = 'setup' - result.addError(self, self._exc_info()) - return - try: - for test in self._tests: - if (isinstance(test,nose.case.Test) - and self.arg is not None): - test.test.arg = self.arg - else: - test.arg = self.arg - test.testQueue = self.testQueue - test.tasks = self.tasks - if result.shouldStop: - log.debug("stopping") - break - # each nose.case.Test will create its own result proxy - # so the cases need the original result, to avoid proxy - # chains - #log.debug('running test %s in suite %s', test, self); - try: - test(orig) - except KeyboardInterrupt, e: - timeout = isinstance(e, TimedOutException) - if timeout: - msg = 'Timeout when running test %s in suite %s' - else: - msg = 'KeyboardInterrupt when running test %s in suite %s' - log.debug(msg, test, self) - err = (TimedOutException,TimedOutException(str(test)), - sys.exc_info()[2]) - test.config.plugins.addError(test,err) - orig.addError(test,err) - if not timeout: - raise - finally: - self.has_run = True - try: - #log.debug('tearDown for %s', id(self)); - self.tearDown() - except KeyboardInterrupt: - raise - except: - self.error_context = 'teardown' - result.addError(self, self._exc_info()) diff --git a/lib/spack/external/nose/plugins/plugintest.py b/lib/spack/external/nose/plugins/plugintest.py deleted file mode 100644 index 76d0d2c48c..0000000000 --- a/lib/spack/external/nose/plugins/plugintest.py +++ /dev/null @@ -1,416 +0,0 @@ -""" -Testing Plugins -=============== - -The plugin interface is well-tested enough to safely unit test your -use of its hooks with some level of confidence. However, there is also -a mixin for unittest.TestCase called PluginTester that's designed to -test plugins in their native runtime environment. - -Here's a simple example with a do-nothing plugin and a composed suite. - - >>> import unittest - >>> from nose.plugins import Plugin, PluginTester - >>> class FooPlugin(Plugin): - ... pass - >>> class TestPluginFoo(PluginTester, unittest.TestCase): - ... activate = '--with-foo' - ... plugins = [FooPlugin()] - ... def test_foo(self): - ... for line in self.output: - ... # i.e. check for patterns - ... pass - ... - ... # or check for a line containing ... - ... assert "ValueError" in self.output - ... def makeSuite(self): - ... class TC(unittest.TestCase): - ... def runTest(self): - ... raise ValueError("I hate foo") - ... return [TC('runTest')] - ... - >>> res = unittest.TestResult() - >>> case = TestPluginFoo('test_foo') - >>> _ = case(res) - >>> res.errors - [] - >>> res.failures - [] - >>> res.wasSuccessful() - True - >>> res.testsRun - 1 - -And here is a more complex example of testing a plugin that has extra -arguments and reads environment variables. - - >>> import unittest, os - >>> from nose.plugins import Plugin, PluginTester - >>> class FancyOutputter(Plugin): - ... name = "fancy" - ... def configure(self, options, conf): - ... Plugin.configure(self, options, conf) - ... if not self.enabled: - ... return - ... self.fanciness = 1 - ... if options.more_fancy: - ... self.fanciness = 2 - ... if 'EVEN_FANCIER' in self.env: - ... self.fanciness = 3 - ... - ... def options(self, parser, env=os.environ): - ... self.env = env - ... parser.add_option('--more-fancy', action='store_true') - ... Plugin.options(self, parser, env=env) - ... - ... def report(self, stream): - ... stream.write("FANCY " * self.fanciness) - ... - >>> class TestFancyOutputter(PluginTester, unittest.TestCase): - ... activate = '--with-fancy' # enables the plugin - ... plugins = [FancyOutputter()] - ... args = ['--more-fancy'] - ... env = {'EVEN_FANCIER': '1'} - ... - ... def test_fancy_output(self): - ... assert "FANCY FANCY FANCY" in self.output, ( - ... "got: %s" % self.output) - ... def makeSuite(self): - ... class TC(unittest.TestCase): - ... def runTest(self): - ... raise ValueError("I hate fancy stuff") - ... return [TC('runTest')] - ... - >>> res = unittest.TestResult() - >>> case = TestFancyOutputter('test_fancy_output') - >>> _ = case(res) - >>> res.errors - [] - >>> res.failures - [] - >>> res.wasSuccessful() - True - >>> res.testsRun - 1 - -""" - -import re -import sys -from warnings import warn - -try: - from cStringIO import StringIO -except ImportError: - from StringIO import StringIO - -__all__ = ['PluginTester', 'run'] - -from os import getpid -class MultiProcessFile(object): - """ - helper for testing multiprocessing - - multiprocessing poses a problem for doctests, since the strategy - of replacing sys.stdout/stderr with file-like objects then - inspecting the results won't work: the child processes will - write to the objects, but the data will not be reflected - in the parent doctest-ing process. - - The solution is to create file-like objects which will interact with - multiprocessing in a more desirable way. - - All processes can write to this object, but only the creator can read. - This allows the testing system to see a unified picture of I/O. - """ - def __init__(self): - # per advice at: - # http://docs.python.org/library/multiprocessing.html#all-platforms - self.__master = getpid() - self.__queue = Manager().Queue() - self.__buffer = StringIO() - self.softspace = 0 - - def buffer(self): - if getpid() != self.__master: - return - - from Queue import Empty - from collections import defaultdict - cache = defaultdict(str) - while True: - try: - pid, data = self.__queue.get_nowait() - except Empty: - break - if pid == (): - #show parent output after children - #this is what users see, usually - pid = ( 1e100, ) # googol! - cache[pid] += data - for pid in sorted(cache): - #self.__buffer.write( '%s wrote: %r\n' % (pid, cache[pid]) ) #DEBUG - self.__buffer.write( cache[pid] ) - def write(self, data): - # note that these pids are in the form of current_process()._identity - # rather than OS pids - from multiprocessing import current_process - pid = current_process()._identity - self.__queue.put((pid, data)) - def __iter__(self): - "getattr doesn't work for iter()" - self.buffer() - return self.__buffer - def seek(self, offset, whence=0): - self.buffer() - return self.__buffer.seek(offset, whence) - def getvalue(self): - self.buffer() - return self.__buffer.getvalue() - def __getattr__(self, attr): - return getattr(self.__buffer, attr) - -try: - from multiprocessing import Manager - Buffer = MultiProcessFile -except ImportError: - Buffer = StringIO - -class PluginTester(object): - """A mixin for testing nose plugins in their runtime environment. - - Subclass this and mix in unittest.TestCase to run integration/functional - tests on your plugin. When setUp() is called, the stub test suite is - executed with your plugin so that during an actual test you can inspect the - artifacts of how your plugin interacted with the stub test suite. - - - activate - - - the argument to send nosetests to activate the plugin - - - suitepath - - - if set, this is the path of the suite to test. Otherwise, you - will need to use the hook, makeSuite() - - - plugins - - - the list of plugins to make available during the run. Note - that this does not mean these plugins will be *enabled* during - the run -- only the plugins enabled by the activate argument - or other settings in argv or env will be enabled. - - - args - - - a list of arguments to add to the nosetests command, in addition to - the activate argument - - - env - - - optional dict of environment variables to send nosetests - - """ - activate = None - suitepath = None - args = None - env = {} - argv = None - plugins = [] - ignoreFiles = None - - def makeSuite(self): - """returns a suite object of tests to run (unittest.TestSuite()) - - If self.suitepath is None, this must be implemented. The returned suite - object will be executed with all plugins activated. It may return - None. - - Here is an example of a basic suite object you can return :: - - >>> import unittest - >>> class SomeTest(unittest.TestCase): - ... def runTest(self): - ... raise ValueError("Now do something, plugin!") - ... - >>> unittest.TestSuite([SomeTest()]) # doctest: +ELLIPSIS - ]> - - """ - raise NotImplementedError - - def _execPlugin(self): - """execute the plugin on the internal test suite. - """ - from nose.config import Config - from nose.core import TestProgram - from nose.plugins.manager import PluginManager - - suite = None - stream = Buffer() - conf = Config(env=self.env, - stream=stream, - plugins=PluginManager(plugins=self.plugins)) - if self.ignoreFiles is not None: - conf.ignoreFiles = self.ignoreFiles - if not self.suitepath: - suite = self.makeSuite() - - self.nose = TestProgram(argv=self.argv, config=conf, suite=suite, - exit=False) - self.output = AccessDecorator(stream) - - def setUp(self): - """runs nosetests with the specified test suite, all plugins - activated. - """ - self.argv = ['nosetests', self.activate] - if self.args: - self.argv.extend(self.args) - if self.suitepath: - self.argv.append(self.suitepath) - - self._execPlugin() - - -class AccessDecorator(object): - stream = None - _buf = None - def __init__(self, stream): - self.stream = stream - stream.seek(0) - self._buf = stream.read() - stream.seek(0) - def __contains__(self, val): - return val in self._buf - def __iter__(self): - return iter(self.stream) - def __str__(self): - return self._buf - - -def blankline_separated_blocks(text): - "a bunch of === characters is also considered a blank line" - block = [] - for line in text.splitlines(True): - block.append(line) - line = line.strip() - if not line or line.startswith('===') and not line.strip('='): - yield "".join(block) - block = [] - if block: - yield "".join(block) - - -def remove_stack_traces(out): - # this regexp taken from Python 2.5's doctest - traceback_re = re.compile(r""" - # Grab the traceback header. Different versions of Python have - # said different things on the first traceback line. - ^(?P Traceback\ \( - (?: most\ recent\ call\ last - | innermost\ last - ) \) : - ) - \s* $ # toss trailing whitespace on the header. - (?P .*?) # don't blink: absorb stuff until... - ^(?=\w) # a line *starts* with alphanum. - .*?(?P \w+ ) # exception name - (?P [:\n] .*) # the rest - """, re.VERBOSE | re.MULTILINE | re.DOTALL) - blocks = [] - for block in blankline_separated_blocks(out): - blocks.append(traceback_re.sub(r"\g\n...\n\g\g", block)) - return "".join(blocks) - - -def simplify_warnings(out): - warn_re = re.compile(r""" - # Cut the file and line no, up to the warning name - ^.*:\d+:\s - (?P\w+): \s+ # warning category - (?P.+) $ \n? # warning message - ^ .* $ # stack frame - """, re.VERBOSE | re.MULTILINE) - return warn_re.sub(r"\g: \g", out) - - -def remove_timings(out): - return re.sub( - r"Ran (\d+ tests?) in [0-9.]+s", r"Ran \1 in ...s", out) - - -def munge_nose_output_for_doctest(out): - """Modify nose output to make it easy to use in doctests.""" - out = remove_stack_traces(out) - out = simplify_warnings(out) - out = remove_timings(out) - return out.strip() - - -def run(*arg, **kw): - """ - Specialized version of nose.run for use inside of doctests that - test test runs. - - This version of run() prints the result output to stdout. Before - printing, the output is processed by replacing the timing - information with an ellipsis (...), removing traceback stacks, and - removing trailing whitespace. - - Use this version of run wherever you are writing a doctest that - tests nose (or unittest) test result output. - - Note: do not use doctest: +ELLIPSIS when testing nose output, - since ellipses ("test_foo ... ok") in your expected test runner - output may match multiple lines of output, causing spurious test - passes! - """ - from nose import run - from nose.config import Config - from nose.plugins.manager import PluginManager - - buffer = Buffer() - if 'config' not in kw: - plugins = kw.pop('plugins', []) - if isinstance(plugins, list): - plugins = PluginManager(plugins=plugins) - env = kw.pop('env', {}) - kw['config'] = Config(env=env, plugins=plugins) - if 'argv' not in kw: - kw['argv'] = ['nosetests', '-v'] - kw['config'].stream = buffer - - # Set up buffering so that all output goes to our buffer, - # or warn user if deprecated behavior is active. If this is not - # done, prints and warnings will either be out of place or - # disappear. - stderr = sys.stderr - stdout = sys.stdout - if kw.pop('buffer_all', False): - sys.stdout = sys.stderr = buffer - restore = True - else: - restore = False - warn("The behavior of nose.plugins.plugintest.run() will change in " - "the next release of nose. The current behavior does not " - "correctly account for output to stdout and stderr. To enable " - "correct behavior, use run_buffered() instead, or pass " - "the keyword argument buffer_all=True to run().", - DeprecationWarning, stacklevel=2) - try: - run(*arg, **kw) - finally: - if restore: - sys.stderr = stderr - sys.stdout = stdout - out = buffer.getvalue() - print munge_nose_output_for_doctest(out) - - -def run_buffered(*arg, **kw): - kw['buffer_all'] = True - run(*arg, **kw) - -if __name__ == '__main__': - import doctest - doctest.testmod() diff --git a/lib/spack/external/nose/plugins/prof.py b/lib/spack/external/nose/plugins/prof.py deleted file mode 100644 index 4d304a934b..0000000000 --- a/lib/spack/external/nose/plugins/prof.py +++ /dev/null @@ -1,154 +0,0 @@ -"""This plugin will run tests using the hotshot profiler, which is part -of the standard library. To turn it on, use the ``--with-profile`` option -or set the NOSE_WITH_PROFILE environment variable. Profiler output can be -controlled with the ``--profile-sort`` and ``--profile-restrict`` options, -and the profiler output file may be changed with ``--profile-stats-file``. - -See the `hotshot documentation`_ in the standard library documentation for -more details on the various output options. - -.. _hotshot documentation: http://docs.python.org/library/hotshot.html -""" - -try: - import hotshot - from hotshot import stats -except ImportError: - hotshot, stats = None, None -import logging -import os -import sys -import tempfile -from nose.plugins.base import Plugin -from nose.util import tolist - -log = logging.getLogger('nose.plugins') - -class Profile(Plugin): - """ - Use this plugin to run tests using the hotshot profiler. - """ - pfile = None - clean_stats_file = False - def options(self, parser, env): - """Register commandline options. - """ - if not self.available(): - return - Plugin.options(self, parser, env) - parser.add_option('--profile-sort', action='store', dest='profile_sort', - default=env.get('NOSE_PROFILE_SORT', 'cumulative'), - metavar="SORT", - help="Set sort order for profiler output") - parser.add_option('--profile-stats-file', action='store', - dest='profile_stats_file', - metavar="FILE", - default=env.get('NOSE_PROFILE_STATS_FILE'), - help='Profiler stats file; default is a new ' - 'temp file on each run') - parser.add_option('--profile-restrict', action='append', - dest='profile_restrict', - metavar="RESTRICT", - default=env.get('NOSE_PROFILE_RESTRICT'), - help="Restrict profiler output. See help for " - "pstats.Stats for details") - - def available(cls): - return hotshot is not None - available = classmethod(available) - - def begin(self): - """Create profile stats file and load profiler. - """ - if not self.available(): - return - self._create_pfile() - self.prof = hotshot.Profile(self.pfile) - - def configure(self, options, conf): - """Configure plugin. - """ - if not self.available(): - self.enabled = False - return - Plugin.configure(self, options, conf) - self.conf = conf - if options.profile_stats_file: - self.pfile = options.profile_stats_file - self.clean_stats_file = False - else: - self.pfile = None - self.clean_stats_file = True - self.fileno = None - self.sort = options.profile_sort - self.restrict = tolist(options.profile_restrict) - - def prepareTest(self, test): - """Wrap entire test run in :func:`prof.runcall`. - """ - if not self.available(): - return - log.debug('preparing test %s' % test) - def run_and_profile(result, prof=self.prof, test=test): - self._create_pfile() - prof.runcall(test, result) - return run_and_profile - - def report(self, stream): - """Output profiler report. - """ - log.debug('printing profiler report') - self.prof.close() - prof_stats = stats.load(self.pfile) - prof_stats.sort_stats(self.sort) - - # 2.5 has completely different stream handling from 2.4 and earlier. - # Before 2.5, stats objects have no stream attribute; in 2.5 and later - # a reference sys.stdout is stored before we can tweak it. - compat_25 = hasattr(prof_stats, 'stream') - if compat_25: - tmp = prof_stats.stream - prof_stats.stream = stream - else: - tmp = sys.stdout - sys.stdout = stream - try: - if self.restrict: - log.debug('setting profiler restriction to %s', self.restrict) - prof_stats.print_stats(*self.restrict) - else: - prof_stats.print_stats() - finally: - if compat_25: - prof_stats.stream = tmp - else: - sys.stdout = tmp - - def finalize(self, result): - """Clean up stats file, if configured to do so. - """ - if not self.available(): - return - try: - self.prof.close() - except AttributeError: - # TODO: is this trying to catch just the case where not - # hasattr(self.prof, "close")? If so, the function call should be - # moved out of the try: suite. - pass - if self.clean_stats_file: - if self.fileno: - try: - os.close(self.fileno) - except OSError: - pass - try: - os.unlink(self.pfile) - except OSError: - pass - return None - - def _create_pfile(self): - if not self.pfile: - self.fileno, self.pfile = tempfile.mkstemp() - self.clean_stats_file = True diff --git a/lib/spack/external/nose/plugins/skip.py b/lib/spack/external/nose/plugins/skip.py deleted file mode 100644 index 9d1ac8f604..0000000000 --- a/lib/spack/external/nose/plugins/skip.py +++ /dev/null @@ -1,63 +0,0 @@ -""" -This plugin installs a SKIP error class for the SkipTest exception. -When SkipTest is raised, the exception will be logged in the skipped -attribute of the result, 'S' or 'SKIP' (verbose) will be output, and -the exception will not be counted as an error or failure. This plugin -is enabled by default but may be disabled with the ``--no-skip`` option. -""" - -from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin - - -# on SkipTest: -# - unittest SkipTest is first preference, but it's only available -# for >= 2.7 -# - unittest2 SkipTest is second preference for older pythons. This -# mirrors logic for choosing SkipTest exception in testtools -# - if none of the above, provide custom class -try: - from unittest.case import SkipTest -except ImportError: - try: - from unittest2.case import SkipTest - except ImportError: - class SkipTest(Exception): - """Raise this exception to mark a test as skipped. - """ - pass - - -class Skip(ErrorClassPlugin): - """ - Plugin that installs a SKIP error class for the SkipTest - exception. When SkipTest is raised, the exception will be logged - in the skipped attribute of the result, 'S' or 'SKIP' (verbose) - will be output, and the exception will not be counted as an error - or failure. - """ - enabled = True - skipped = ErrorClass(SkipTest, - label='SKIP', - isfailure=False) - - def options(self, parser, env): - """ - Add my options to command line. - """ - env_opt = 'NOSE_WITHOUT_SKIP' - parser.add_option('--no-skip', action='store_true', - dest='noSkip', default=env.get(env_opt, False), - help="Disable special handling of SkipTest " - "exceptions.") - - def configure(self, options, conf): - """ - Configure plugin. Skip plugin is enabled by default. - """ - if not self.can_configure: - return - self.conf = conf - disable = getattr(options, 'noSkip', False) - if disable: - self.enabled = False - diff --git a/lib/spack/external/nose/plugins/testid.py b/lib/spack/external/nose/plugins/testid.py deleted file mode 100644 index ae8119bd01..0000000000 --- a/lib/spack/external/nose/plugins/testid.py +++ /dev/null @@ -1,311 +0,0 @@ -""" -This plugin adds a test id (like #1) to each test name output. After -you've run once to generate test ids, you can re-run individual -tests by activating the plugin and passing the ids (with or -without the # prefix) instead of test names. - -For example, if your normal test run looks like:: - - % nosetests -v - tests.test_a ... ok - tests.test_b ... ok - tests.test_c ... ok - -When adding ``--with-id`` you'll see:: - - % nosetests -v --with-id - #1 tests.test_a ... ok - #2 tests.test_b ... ok - #3 tests.test_c ... ok - -Then you can re-run individual tests by supplying just an id number:: - - % nosetests -v --with-id 2 - #2 tests.test_b ... ok - -You can also pass multiple id numbers:: - - % nosetests -v --with-id 2 3 - #2 tests.test_b ... ok - #3 tests.test_c ... ok - -Since most shells consider '#' a special character, you can leave it out when -specifying a test id. - -Note that when run without the -v switch, no special output is displayed, but -the ids file is still written. - -Looping over failed tests -------------------------- - -This plugin also adds a mode that will direct the test runner to record -failed tests. Subsequent test runs will then run only the tests that failed -last time. Activate this mode with the ``--failed`` switch:: - - % nosetests -v --failed - #1 test.test_a ... ok - #2 test.test_b ... ERROR - #3 test.test_c ... FAILED - #4 test.test_d ... ok - -On the second run, only tests #2 and #3 will run:: - - % nosetests -v --failed - #2 test.test_b ... ERROR - #3 test.test_c ... FAILED - -As you correct errors and tests pass, they'll drop out of subsequent runs. - -First:: - - % nosetests -v --failed - #2 test.test_b ... ok - #3 test.test_c ... FAILED - -Second:: - - % nosetests -v --failed - #3 test.test_c ... FAILED - -When all tests pass, the full set will run on the next invocation. - -First:: - - % nosetests -v --failed - #3 test.test_c ... ok - -Second:: - - % nosetests -v --failed - #1 test.test_a ... ok - #2 test.test_b ... ok - #3 test.test_c ... ok - #4 test.test_d ... ok - -.. note :: - - If you expect to use ``--failed`` regularly, it's a good idea to always run - using the ``--with-id`` option. This will ensure that an id file is always - created, allowing you to add ``--failed`` to the command line as soon as - you have failing tests. Otherwise, your first run using ``--failed`` will - (perhaps surprisingly) run *all* tests, because there won't be an id file - containing the record of failed tests from your previous run. - -""" -__test__ = False - -import logging -import os -from nose.plugins import Plugin -from nose.util import src, set - -try: - from cPickle import dump, load -except ImportError: - from pickle import dump, load - -log = logging.getLogger(__name__) - - -class TestId(Plugin): - """ - Activate to add a test id (like #1) to each test name output. Activate - with --failed to rerun failing tests only. - """ - name = 'id' - idfile = None - collecting = True - loopOnFailed = False - - def options(self, parser, env): - """Register commandline options. - """ - Plugin.options(self, parser, env) - parser.add_option('--id-file', action='store', dest='testIdFile', - default='.noseids', metavar="FILE", - help="Store test ids found in test runs in this " - "file. Default is the file .noseids in the " - "working directory.") - parser.add_option('--failed', action='store_true', - dest='failed', default=False, - help="Run the tests that failed in the last " - "test run.") - - def configure(self, options, conf): - """Configure plugin. - """ - Plugin.configure(self, options, conf) - if options.failed: - self.enabled = True - self.loopOnFailed = True - log.debug("Looping on failed tests") - self.idfile = os.path.expanduser(options.testIdFile) - if not os.path.isabs(self.idfile): - self.idfile = os.path.join(conf.workingDir, self.idfile) - self.id = 1 - # Ids and tests are mirror images: ids are {id: test address} and - # tests are {test address: id} - self.ids = {} - self.tests = {} - self.failed = [] - self.source_names = [] - # used to track ids seen when tests is filled from - # loaded ids file - self._seen = {} - self._write_hashes = conf.verbosity >= 2 - - def finalize(self, result): - """Save new ids file, if needed. - """ - if result.wasSuccessful(): - self.failed = [] - if self.collecting: - ids = dict(list(zip(list(self.tests.values()), list(self.tests.keys())))) - else: - ids = self.ids - fh = open(self.idfile, 'wb') - dump({'ids': ids, - 'failed': self.failed, - 'source_names': self.source_names}, fh) - fh.close() - log.debug('Saved test ids: %s, failed %s to %s', - ids, self.failed, self.idfile) - - def loadTestsFromNames(self, names, module=None): - """Translate ids in the list of requested names into their - test addresses, if they are found in my dict of tests. - """ - log.debug('ltfn %s %s', names, module) - try: - fh = open(self.idfile, 'rb') - data = load(fh) - if 'ids' in data: - self.ids = data['ids'] - self.failed = data['failed'] - self.source_names = data['source_names'] - else: - # old ids field - self.ids = data - self.failed = [] - self.source_names = names - if self.ids: - self.id = max(self.ids) + 1 - self.tests = dict(list(zip(list(self.ids.values()), list(self.ids.keys())))) - else: - self.id = 1 - log.debug( - 'Loaded test ids %s tests %s failed %s sources %s from %s', - self.ids, self.tests, self.failed, self.source_names, - self.idfile) - fh.close() - except ValueError, e: - # load() may throw a ValueError when reading the ids file, if it - # was generated with a newer version of Python than we are currently - # running. - log.debug('Error loading %s : %s', self.idfile, str(e)) - except IOError: - log.debug('IO error reading %s', self.idfile) - - if self.loopOnFailed and self.failed: - self.collecting = False - names = self.failed - self.failed = [] - # I don't load any tests myself, only translate names like '#2' - # into the associated test addresses - translated = [] - new_source = [] - really_new = [] - for name in names: - trans = self.tr(name) - if trans != name: - translated.append(trans) - else: - new_source.append(name) - # names that are not ids and that are not in the current - # list of source names go into the list for next time - if new_source: - new_set = set(new_source) - old_set = set(self.source_names) - log.debug("old: %s new: %s", old_set, new_set) - really_new = [s for s in new_source - if not s in old_set] - if really_new: - # remember new sources - self.source_names.extend(really_new) - if not translated: - # new set of source names, no translations - # means "run the requested tests" - names = new_source - else: - # no new names to translate and add to id set - self.collecting = False - log.debug("translated: %s new sources %s names %s", - translated, really_new, names) - return (None, translated + really_new or names) - - def makeName(self, addr): - log.debug("Make name %s", addr) - filename, module, call = addr - if filename is not None: - head = src(filename) - else: - head = module - if call is not None: - return "%s:%s" % (head, call) - return head - - def setOutputStream(self, stream): - """Get handle on output stream so the plugin can print id #s - """ - self.stream = stream - - def startTest(self, test): - """Maybe output an id # before the test name. - - Example output:: - - #1 test.test ... ok - #2 test.test_two ... ok - - """ - adr = test.address() - log.debug('start test %s (%s)', adr, adr in self.tests) - if adr in self.tests: - if adr in self._seen: - self.write(' ') - else: - self.write('#%s ' % self.tests[adr]) - self._seen[adr] = 1 - return - self.tests[adr] = self.id - self.write('#%s ' % self.id) - self.id += 1 - - def afterTest(self, test): - # None means test never ran, False means failed/err - if test.passed is False: - try: - key = str(self.tests[test.address()]) - except KeyError: - # never saw this test -- startTest didn't run - pass - else: - if key not in self.failed: - self.failed.append(key) - - def tr(self, name): - log.debug("tr '%s'", name) - try: - key = int(name.replace('#', '')) - except ValueError: - return name - log.debug("Got key %s", key) - # I'm running tests mapped from the ids file, - # not collecting new ones - if key in self.ids: - return self.makeName(self.ids[key]) - return name - - def write(self, output): - if self._write_hashes: - self.stream.write(output) diff --git a/lib/spack/external/nose/plugins/xunit.py b/lib/spack/external/nose/plugins/xunit.py deleted file mode 100644 index 90b52f5f61..0000000000 --- a/lib/spack/external/nose/plugins/xunit.py +++ /dev/null @@ -1,341 +0,0 @@ -"""This plugin provides test results in the standard XUnit XML format. - -It's designed for the `Jenkins`_ (previously Hudson) continuous build -system, but will probably work for anything else that understands an -XUnit-formatted XML representation of test results. - -Add this shell command to your builder :: - - nosetests --with-xunit - -And by default a file named nosetests.xml will be written to the -working directory. - -In a Jenkins builder, tick the box named "Publish JUnit test result report" -under the Post-build Actions and enter this value for Test report XMLs:: - - **/nosetests.xml - -If you need to change the name or location of the file, you can set the -``--xunit-file`` option. - -If you need to change the name of the test suite, you can set the -``--xunit-testsuite-name`` option. - -Here is an abbreviated version of what an XML test report might look like:: - - - - - - Traceback (most recent call last): - ... - TypeError: oops, wrong type - - - - -.. _Jenkins: http://jenkins-ci.org/ - -""" -import codecs -import doctest -import os -import sys -import traceback -import re -import inspect -from StringIO import StringIO -from time import time -from xml.sax import saxutils - -from nose.plugins.base import Plugin -from nose.exc import SkipTest -from nose.pyversion import force_unicode, format_exception - -# Invalid XML characters, control characters 0-31 sans \t, \n and \r -CONTROL_CHARACTERS = re.compile(r"[\000-\010\013\014\016-\037]") - -TEST_ID = re.compile(r'^(.*?)(\(.*\))$') - -def xml_safe(value): - """Replaces invalid XML characters with '?'.""" - return CONTROL_CHARACTERS.sub('?', value) - -def escape_cdata(cdata): - """Escape a string for an XML CDATA section.""" - return xml_safe(cdata).replace(']]>', ']]>]]>>> nice_classname(Exception()) # doctest: +ELLIPSIS - '...Exception' - >>> nice_classname(Exception) # doctest: +ELLIPSIS - '...Exception' - - """ - if inspect.isclass(obj): - cls_name = obj.__name__ - else: - cls_name = obj.__class__.__name__ - mod = inspect.getmodule(obj) - if mod: - name = mod.__name__ - # jython - if name.startswith('org.python.core.'): - name = name[len('org.python.core.'):] - return "%s.%s" % (name, cls_name) - else: - return cls_name - -def exc_message(exc_info): - """Return the exception's message.""" - exc = exc_info[1] - if exc is None: - # str exception - result = exc_info[0] - else: - try: - result = str(exc) - except UnicodeEncodeError: - try: - result = unicode(exc) - except UnicodeError: - # Fallback to args as neither str nor - # unicode(Exception(u'\xe6')) work in Python < 2.6 - result = exc.args[0] - result = force_unicode(result, 'UTF-8') - return xml_safe(result) - -class Tee(object): - def __init__(self, encoding, *args): - self._encoding = encoding - self._streams = args - - def write(self, data): - data = force_unicode(data, self._encoding) - for s in self._streams: - s.write(data) - - def writelines(self, lines): - for line in lines: - self.write(line) - - def flush(self): - for s in self._streams: - s.flush() - - def isatty(self): - return False - - -class Xunit(Plugin): - """This plugin provides test results in the standard XUnit XML format.""" - name = 'xunit' - score = 1500 - encoding = 'UTF-8' - error_report_file = None - - def __init__(self): - super(Xunit, self).__init__() - self._capture_stack = [] - self._currentStdout = None - self._currentStderr = None - - def _timeTaken(self): - if hasattr(self, '_timer'): - taken = time() - self._timer - else: - # test died before it ran (probably error in setup()) - # or success/failure added before test started probably - # due to custom TestResult munging - taken = 0.0 - return taken - - def _quoteattr(self, attr): - """Escape an XML attribute. Value can be unicode.""" - attr = xml_safe(attr) - return saxutils.quoteattr(attr) - - def options(self, parser, env): - """Sets additional command line options.""" - Plugin.options(self, parser, env) - parser.add_option( - '--xunit-file', action='store', - dest='xunit_file', metavar="FILE", - default=env.get('NOSE_XUNIT_FILE', 'nosetests.xml'), - help=("Path to xml file to store the xunit report in. " - "Default is nosetests.xml in the working directory " - "[NOSE_XUNIT_FILE]")) - - parser.add_option( - '--xunit-testsuite-name', action='store', - dest='xunit_testsuite_name', metavar="PACKAGE", - default=env.get('NOSE_XUNIT_TESTSUITE_NAME', 'nosetests'), - help=("Name of the testsuite in the xunit xml, generated by plugin. " - "Default test suite name is nosetests.")) - - def configure(self, options, config): - """Configures the xunit plugin.""" - Plugin.configure(self, options, config) - self.config = config - if self.enabled: - self.stats = {'errors': 0, - 'failures': 0, - 'passes': 0, - 'skipped': 0 - } - self.errorlist = [] - self.error_report_file_name = os.path.realpath(options.xunit_file) - self.xunit_testsuite_name = options.xunit_testsuite_name - - def report(self, stream): - """Writes an Xunit-formatted XML file - - The file includes a report of test errors and failures. - - """ - self.error_report_file = codecs.open(self.error_report_file_name, 'w', - self.encoding, 'replace') - self.stats['encoding'] = self.encoding - self.stats['testsuite_name'] = self.xunit_testsuite_name - self.stats['total'] = (self.stats['errors'] + self.stats['failures'] - + self.stats['passes'] + self.stats['skipped']) - self.error_report_file.write( - u'' - u'' % self.stats) - self.error_report_file.write(u''.join([force_unicode(e, self.encoding) - for e in self.errorlist])) - self.error_report_file.write(u'') - self.error_report_file.close() - if self.config.verbosity > 1: - stream.writeln("-" * 70) - stream.writeln("XML: %s" % self.error_report_file.name) - - def _startCapture(self): - self._capture_stack.append((sys.stdout, sys.stderr)) - self._currentStdout = StringIO() - self._currentStderr = StringIO() - sys.stdout = Tee(self.encoding, self._currentStdout, sys.stdout) - sys.stderr = Tee(self.encoding, self._currentStderr, sys.stderr) - - def startContext(self, context): - self._startCapture() - - def stopContext(self, context): - self._endCapture() - - def beforeTest(self, test): - """Initializes a timer before starting a test.""" - self._timer = time() - self._startCapture() - - def _endCapture(self): - if self._capture_stack: - sys.stdout, sys.stderr = self._capture_stack.pop() - - def afterTest(self, test): - self._endCapture() - self._currentStdout = None - self._currentStderr = None - - def finalize(self, test): - while self._capture_stack: - self._endCapture() - - def _getCapturedStdout(self): - if self._currentStdout: - value = self._currentStdout.getvalue() - if value: - return '' % escape_cdata( - value) - return '' - - def _getCapturedStderr(self): - if self._currentStderr: - value = self._currentStderr.getvalue() - if value: - return '' % escape_cdata( - value) - return '' - - def addError(self, test, err, capt=None): - """Add error output to Xunit report. - """ - taken = self._timeTaken() - - if issubclass(err[0], SkipTest): - type = 'skipped' - self.stats['skipped'] += 1 - else: - type = 'error' - self.stats['errors'] += 1 - - tb = format_exception(err, self.encoding) - id = test.id() - - self.errorlist.append( - u'' - u'<%(type)s type=%(errtype)s message=%(message)s>' - u'%(systemout)s%(systemerr)s' % - {'cls': self._quoteattr(id_split(id)[0]), - 'name': self._quoteattr(id_split(id)[-1]), - 'taken': taken, - 'type': type, - 'errtype': self._quoteattr(nice_classname(err[0])), - 'message': self._quoteattr(exc_message(err)), - 'tb': escape_cdata(tb), - 'systemout': self._getCapturedStdout(), - 'systemerr': self._getCapturedStderr(), - }) - - def addFailure(self, test, err, capt=None, tb_info=None): - """Add failure output to Xunit report. - """ - taken = self._timeTaken() - tb = format_exception(err, self.encoding) - self.stats['failures'] += 1 - id = test.id() - - self.errorlist.append( - u'' - u'' - u'%(systemout)s%(systemerr)s' % - {'cls': self._quoteattr(id_split(id)[0]), - 'name': self._quoteattr(id_split(id)[-1]), - 'taken': taken, - 'errtype': self._quoteattr(nice_classname(err[0])), - 'message': self._quoteattr(exc_message(err)), - 'tb': escape_cdata(tb), - 'systemout': self._getCapturedStdout(), - 'systemerr': self._getCapturedStderr(), - }) - - def addSuccess(self, test, capt=None): - """Add success output to Xunit report. - """ - taken = self._timeTaken() - self.stats['passes'] += 1 - id = test.id() - self.errorlist.append( - '%(systemout)s%(systemerr)s' % - {'cls': self._quoteattr(id_split(id)[0]), - 'name': self._quoteattr(id_split(id)[-1]), - 'taken': taken, - 'systemout': self._getCapturedStdout(), - 'systemerr': self._getCapturedStderr(), - }) diff --git a/lib/spack/external/nose/proxy.py b/lib/spack/external/nose/proxy.py deleted file mode 100644 index c2676cb195..0000000000 --- a/lib/spack/external/nose/proxy.py +++ /dev/null @@ -1,188 +0,0 @@ -""" -Result Proxy ------------- - -The result proxy wraps the result instance given to each test. It -performs two functions: enabling extended error/failure reporting -and calling plugins. - -As each result event is fired, plugins are called with the same event; -however, plugins are called with the nose.case.Test instance that -wraps the actual test. So when a test fails and calls -result.addFailure(self, err), the result proxy calls -addFailure(self.test, err) for each plugin. This allows plugins to -have a single stable interface for all test types, and also to -manipulate the test object itself by setting the `test` attribute of -the nose.case.Test that they receive. -""" -import logging -from nose.config import Config - - -log = logging.getLogger(__name__) - - -def proxied_attribute(local_attr, proxied_attr, doc): - """Create a property that proxies attribute ``proxied_attr`` through - the local attribute ``local_attr``. - """ - def fget(self): - return getattr(getattr(self, local_attr), proxied_attr) - def fset(self, value): - setattr(getattr(self, local_attr), proxied_attr, value) - def fdel(self): - delattr(getattr(self, local_attr), proxied_attr) - return property(fget, fset, fdel, doc) - - -class ResultProxyFactory(object): - """Factory for result proxies. Generates a ResultProxy bound to each test - and the result passed to the test. - """ - def __init__(self, config=None): - if config is None: - config = Config() - self.config = config - self.__prepared = False - self.__result = None - - def __call__(self, result, test): - """Return a ResultProxy for the current test. - - On first call, plugins are given a chance to replace the - result used for the remaining tests. If a plugin returns a - value from prepareTestResult, that object will be used as the - result for all tests. - """ - if not self.__prepared: - self.__prepared = True - plug_result = self.config.plugins.prepareTestResult(result) - if plug_result is not None: - self.__result = result = plug_result - if self.__result is not None: - result = self.__result - return ResultProxy(result, test, config=self.config) - - -class ResultProxy(object): - """Proxy to TestResults (or other results handler). - - One ResultProxy is created for each nose.case.Test. The result - proxy calls plugins with the nose.case.Test instance (instead of - the wrapped test case) as each result call is made. Finally, the - real result method is called, also with the nose.case.Test - instance as the test parameter. - - """ - def __init__(self, result, test, config=None): - if config is None: - config = Config() - self.config = config - self.plugins = config.plugins - self.result = result - self.test = test - - def __repr__(self): - return repr(self.result) - - def _prepareErr(self, err): - if not isinstance(err[1], Exception) and isinstance(err[0], type): - # Turn value back into an Exception (required in Python 3.x). - # Plugins do all sorts of crazy things with exception values. - # Convert it to a custom subclass of Exception with the same - # name as the actual exception to make it print correctly. - value = type(err[0].__name__, (Exception,), {})(err[1]) - err = (err[0], value, err[2]) - return err - - def assertMyTest(self, test): - # The test I was called with must be my .test or my - # .test's .test. or my .test.test's .case - - case = getattr(self.test, 'test', None) - assert (test is self.test - or test is case - or test is getattr(case, '_nose_case', None)), ( - "ResultProxy for %r (%s) was called with test %r (%s)" - % (self.test, id(self.test), test, id(test))) - - def afterTest(self, test): - self.assertMyTest(test) - self.plugins.afterTest(self.test) - if hasattr(self.result, "afterTest"): - self.result.afterTest(self.test) - - def beforeTest(self, test): - self.assertMyTest(test) - self.plugins.beforeTest(self.test) - if hasattr(self.result, "beforeTest"): - self.result.beforeTest(self.test) - - def addError(self, test, err): - self.assertMyTest(test) - plugins = self.plugins - plugin_handled = plugins.handleError(self.test, err) - if plugin_handled: - return - # test.passed is set in result, to account for error classes - formatted = plugins.formatError(self.test, err) - if formatted is not None: - err = formatted - plugins.addError(self.test, err) - self.result.addError(self.test, self._prepareErr(err)) - if not self.result.wasSuccessful() and self.config.stopOnError: - self.shouldStop = True - - def addFailure(self, test, err): - self.assertMyTest(test) - plugins = self.plugins - plugin_handled = plugins.handleFailure(self.test, err) - if plugin_handled: - return - self.test.passed = False - formatted = plugins.formatFailure(self.test, err) - if formatted is not None: - err = formatted - plugins.addFailure(self.test, err) - self.result.addFailure(self.test, self._prepareErr(err)) - if self.config.stopOnError: - self.shouldStop = True - - def addSkip(self, test, reason): - # 2.7 compat shim - from nose.plugins.skip import SkipTest - self.assertMyTest(test) - plugins = self.plugins - if not isinstance(reason, Exception): - # for Python 3.2+ - reason = Exception(reason) - plugins.addError(self.test, (SkipTest, reason, None)) - self.result.addSkip(self.test, reason) - - def addSuccess(self, test): - self.assertMyTest(test) - self.plugins.addSuccess(self.test) - self.result.addSuccess(self.test) - - def startTest(self, test): - self.assertMyTest(test) - self.plugins.startTest(self.test) - self.result.startTest(self.test) - - def stop(self): - self.result.stop() - - def stopTest(self, test): - self.assertMyTest(test) - self.plugins.stopTest(self.test) - self.result.stopTest(self.test) - - # proxied attributes - shouldStop = proxied_attribute('result', 'shouldStop', - """Should the test run stop?""") - errors = proxied_attribute('result', 'errors', - """Tests that raised an exception""") - failures = proxied_attribute('result', 'failures', - """Tests that failed""") - testsRun = proxied_attribute('result', 'testsRun', - """Number of tests run""") diff --git a/lib/spack/external/nose/pyversion.py b/lib/spack/external/nose/pyversion.py deleted file mode 100644 index 091238da75..0000000000 --- a/lib/spack/external/nose/pyversion.py +++ /dev/null @@ -1,215 +0,0 @@ -""" -This module contains fixups for using nose under different versions of Python. -""" -import sys -import os -import traceback -import types -import inspect -import nose.util - -__all__ = ['make_instancemethod', 'cmp_to_key', 'sort_list', 'ClassType', - 'TypeType', 'UNICODE_STRINGS', 'unbound_method', 'ismethod', - 'bytes_', 'is_base_exception', 'force_unicode', 'exc_to_unicode', - 'format_exception'] - -# In Python 3.x, all strings are unicode (the call to 'unicode()' in the 2.x -# source will be replaced with 'str()' when running 2to3, so this test will -# then become true) -UNICODE_STRINGS = (type(unicode()) == type(str())) - -if sys.version_info[:2] < (3, 0): - def force_unicode(s, encoding='UTF-8'): - try: - s = unicode(s) - except UnicodeDecodeError: - s = str(s).decode(encoding, 'replace') - - return s -else: - def force_unicode(s, encoding='UTF-8'): - return str(s) - -# new.instancemethod() is obsolete for new-style classes (Python 3.x) -# We need to use descriptor methods instead. -try: - import new - def make_instancemethod(function, instance): - return new.instancemethod(function.im_func, instance, - instance.__class__) -except ImportError: - def make_instancemethod(function, instance): - return function.__get__(instance, instance.__class__) - -# To be forward-compatible, we do all list sorts using keys instead of cmp -# functions. However, part of the unittest.TestLoader API involves a -# user-provideable cmp function, so we need some way to convert that. -def cmp_to_key(mycmp): - 'Convert a cmp= function into a key= function' - class Key(object): - def __init__(self, obj): - self.obj = obj - def __lt__(self, other): - return mycmp(self.obj, other.obj) < 0 - def __gt__(self, other): - return mycmp(self.obj, other.obj) > 0 - def __eq__(self, other): - return mycmp(self.obj, other.obj) == 0 - return Key - -# Python 2.3 also does not support list-sorting by key, so we need to convert -# keys to cmp functions if we're running on old Python.. -if sys.version_info < (2, 4): - def sort_list(l, key, reverse=False): - if reverse: - return l.sort(lambda a, b: cmp(key(b), key(a))) - else: - return l.sort(lambda a, b: cmp(key(a), key(b))) -else: - def sort_list(l, key, reverse=False): - return l.sort(key=key, reverse=reverse) - -# In Python 3.x, all objects are "new style" objects descended from 'type', and -# thus types.ClassType and types.TypeType don't exist anymore. For -# compatibility, we make sure they still work. -if hasattr(types, 'ClassType'): - ClassType = types.ClassType - TypeType = types.TypeType -else: - ClassType = type - TypeType = type - -# The following emulates the behavior (we need) of an 'unbound method' under -# Python 3.x (namely, the ability to have a class associated with a function -# definition so that things can do stuff based on its associated class) -class UnboundMethod: - def __init__(self, cls, func): - # Make sure we have all the same attributes as the original function, - # so that the AttributeSelector plugin will work correctly... - self.__dict__ = func.__dict__.copy() - self._func = func - self.__self__ = UnboundSelf(cls) - if sys.version_info < (3, 0): - self.im_class = cls - self.__doc__ = getattr(func, '__doc__', None) - - def address(self): - cls = self.__self__.cls - modname = cls.__module__ - module = sys.modules[modname] - filename = getattr(module, '__file__', None) - if filename is not None: - filename = os.path.abspath(filename) - return (nose.util.src(filename), modname, "%s.%s" % (cls.__name__, - self._func.__name__)) - - def __call__(self, *args, **kwargs): - return self._func(*args, **kwargs) - - def __getattr__(self, attr): - return getattr(self._func, attr) - - def __repr__(self): - return '' % (self.__self__.cls.__name__, - self._func.__name__) - -class UnboundSelf: - def __init__(self, cls): - self.cls = cls - - # We have to do this hackery because Python won't let us override the - # __class__ attribute... - def __getattribute__(self, attr): - if attr == '__class__': - return self.cls - else: - return object.__getattribute__(self, attr) - -def unbound_method(cls, func): - if inspect.ismethod(func): - return func - if not inspect.isfunction(func): - raise TypeError('%s is not a function' % (repr(func),)) - return UnboundMethod(cls, func) - -def ismethod(obj): - return inspect.ismethod(obj) or isinstance(obj, UnboundMethod) - - -# Make a pseudo-bytes function that can be called without the encoding arg: -if sys.version_info >= (3, 0): - def bytes_(s, encoding='utf8'): - if isinstance(s, bytes): - return s - return bytes(s, encoding) -else: - def bytes_(s, encoding=None): - return str(s) - - -if sys.version_info[:2] >= (2, 6): - def isgenerator(o): - if isinstance(o, UnboundMethod): - o = o._func - return inspect.isgeneratorfunction(o) or inspect.isgenerator(o) -else: - try: - from compiler.consts import CO_GENERATOR - except ImportError: - # IronPython doesn't have a complier module - CO_GENERATOR=0x20 - - def isgenerator(func): - try: - return func.func_code.co_flags & CO_GENERATOR != 0 - except AttributeError: - return False - -# Make a function to help check if an exception is derived from BaseException. -# In Python 2.4, we just use Exception instead. -if sys.version_info[:2] < (2, 5): - def is_base_exception(exc): - return isinstance(exc, Exception) -else: - def is_base_exception(exc): - return isinstance(exc, BaseException) - -if sys.version_info[:2] < (3, 0): - def exc_to_unicode(ev, encoding='utf-8'): - if is_base_exception(ev): - if not hasattr(ev, '__unicode__'): - # 2.5- - if not hasattr(ev, 'message'): - # 2.4 - msg = len(ev.args) and ev.args[0] or '' - else: - msg = ev.message - msg = force_unicode(msg, encoding=encoding) - clsname = force_unicode(ev.__class__.__name__, - encoding=encoding) - ev = u'%s: %s' % (clsname, msg) - elif not isinstance(ev, unicode): - ev = repr(ev) - - return force_unicode(ev, encoding=encoding) -else: - def exc_to_unicode(ev, encoding='utf-8'): - return str(ev) - -def format_exception(exc_info, encoding='UTF-8'): - ec, ev, tb = exc_info - - # Our exception object may have been turned into a string, and Python 3's - # traceback.format_exception() doesn't take kindly to that (it expects an - # actual exception object). So we work around it, by doing the work - # ourselves if ev is not an exception object. - if not is_base_exception(ev): - tb_data = force_unicode( - ''.join(traceback.format_tb(tb)), - encoding) - ev = exc_to_unicode(ev) - return tb_data + ev - else: - return force_unicode( - ''.join(traceback.format_exception(*exc_info)), - encoding) diff --git a/lib/spack/external/nose/result.py b/lib/spack/external/nose/result.py deleted file mode 100644 index f974a14ae2..0000000000 --- a/lib/spack/external/nose/result.py +++ /dev/null @@ -1,200 +0,0 @@ -""" -Test Result ------------ - -Provides a TextTestResult that extends unittest's _TextTestResult to -provide support for error classes (such as the builtin skip and -deprecated classes), and hooks for plugins to take over or extend -reporting. -""" - -import logging -try: - # 2.7+ - from unittest.runner import _TextTestResult -except ImportError: - from unittest import _TextTestResult -from nose.config import Config -from nose.util import isclass, ln as _ln # backwards compat - -log = logging.getLogger('nose.result') - - -def _exception_detail(exc): - # this is what stdlib module traceback does - try: - return str(exc) - except: - return '' % type(exc).__name__ - - -class TextTestResult(_TextTestResult): - """Text test result that extends unittest's default test result - support for a configurable set of errorClasses (eg, Skip, - Deprecated, TODO) that extend the errors/failures/success triad. - """ - def __init__(self, stream, descriptions, verbosity, config=None, - errorClasses=None): - if errorClasses is None: - errorClasses = {} - self.errorClasses = errorClasses - if config is None: - config = Config() - self.config = config - _TextTestResult.__init__(self, stream, descriptions, verbosity) - - def addSkip(self, test, reason): - # 2.7 skip compat - from nose.plugins.skip import SkipTest - if SkipTest in self.errorClasses: - storage, label, isfail = self.errorClasses[SkipTest] - storage.append((test, reason)) - self.printLabel(label, (SkipTest, reason, None)) - - def addError(self, test, err): - """Overrides normal addError to add support for - errorClasses. If the exception is a registered class, the - error will be added to the list for that class, not errors. - """ - ec, ev, tb = err - try: - exc_info = self._exc_info_to_string(err, test) - except TypeError: - # 2.3 compat - exc_info = self._exc_info_to_string(err) - for cls, (storage, label, isfail) in self.errorClasses.items(): - #if 'Skip' in cls.__name__ or 'Skip' in ec.__name__: - # from nose.tools import set_trace - # set_trace() - if isclass(ec) and issubclass(ec, cls): - if isfail: - test.passed = False - storage.append((test, exc_info)) - self.printLabel(label, err) - return - self.errors.append((test, exc_info)) - test.passed = False - self.printLabel('ERROR') - - # override to bypass changes in 2.7 - def getDescription(self, test): - if self.descriptions: - return test.shortDescription() or str(test) - else: - return str(test) - - def printLabel(self, label, err=None): - # Might get patched into a streamless result - stream = getattr(self, 'stream', None) - if stream is not None: - if self.showAll: - message = [label] - if err: - detail = _exception_detail(err[1]) - if detail: - message.append(detail) - stream.writeln(": ".join(message)) - elif self.dots: - stream.write(label[:1]) - - def printErrors(self): - """Overrides to print all errorClasses errors as well. - """ - _TextTestResult.printErrors(self) - for cls in self.errorClasses.keys(): - storage, label, isfail = self.errorClasses[cls] - if isfail: - self.printErrorList(label, storage) - # Might get patched into a result with no config - if hasattr(self, 'config'): - self.config.plugins.report(self.stream) - - def printSummary(self, start, stop): - """Called by the test runner to print the final summary of test - run results. - """ - write = self.stream.write - writeln = self.stream.writeln - taken = float(stop - start) - run = self.testsRun - plural = run != 1 and "s" or "" - - writeln(self.separator2) - writeln("Ran %s test%s in %.3fs" % (run, plural, taken)) - writeln() - - summary = {} - eckeys = self.errorClasses.keys() - for cls in eckeys: - storage, label, isfail = self.errorClasses[cls] - count = len(storage) - if not count: - continue - summary[label] = count - if len(self.failures): - summary['failures'] = len(self.failures) - if len(self.errors): - summary['errors'] = len(self.errors) - - if not self.wasSuccessful(): - write("FAILED") - else: - write("OK") - items = summary.items() - if items: - items.sort() - write(" (") - write(", ".join(["%s=%s" % (label, count) for - label, count in items])) - writeln(")") - else: - writeln() - - def wasSuccessful(self): - """Overrides to check that there are no errors in errorClasses - lists that are marked as errors and should cause a run to - fail. - """ - if self.errors or self.failures: - return False - for cls in self.errorClasses.keys(): - storage, label, isfail = self.errorClasses[cls] - if not isfail: - continue - if storage: - return False - return True - - def _addError(self, test, err): - try: - exc_info = self._exc_info_to_string(err, test) - except TypeError: - # 2.3: does not take test arg - exc_info = self._exc_info_to_string(err) - self.errors.append((test, exc_info)) - if self.showAll: - self.stream.write('ERROR') - elif self.dots: - self.stream.write('E') - - def _exc_info_to_string(self, err, test=None): - # 2.7 skip compat - from nose.plugins.skip import SkipTest - if isclass(err[0]) and issubclass(err[0], SkipTest): - return str(err[1]) - # 2.3/2.4 -- 2.4 passes test, 2.3 does not - try: - return _TextTestResult._exc_info_to_string(self, err, test) - except TypeError: - # 2.3: does not take test arg - return _TextTestResult._exc_info_to_string(self, err) - - -def ln(*arg, **kw): - from warnings import warn - warn("ln() has moved to nose.util from nose.result and will be removed " - "from nose.result in a future release. Please update your imports ", - DeprecationWarning) - return _ln(*arg, **kw) - - diff --git a/lib/spack/external/nose/selector.py b/lib/spack/external/nose/selector.py deleted file mode 100644 index b63f7af0b1..0000000000 --- a/lib/spack/external/nose/selector.py +++ /dev/null @@ -1,251 +0,0 @@ -""" -Test Selection --------------- - -Test selection is handled by a Selector. The test loader calls the -appropriate selector method for each object it encounters that it -thinks may be a test. -""" -import logging -import os -import unittest -from nose.config import Config -from nose.util import split_test_name, src, getfilename, getpackage, ispackage, is_executable - -log = logging.getLogger(__name__) - -__all__ = ['Selector', 'defaultSelector', 'TestAddress'] - - -# for efficiency and easier mocking -op_join = os.path.join -op_basename = os.path.basename -op_exists = os.path.exists -op_splitext = os.path.splitext -op_isabs = os.path.isabs -op_abspath = os.path.abspath - - -class Selector(object): - """Core test selector. Examines test candidates and determines whether, - given the specified configuration, the test candidate should be selected - as a test. - """ - def __init__(self, config): - if config is None: - config = Config() - self.configure(config) - - def configure(self, config): - self.config = config - self.exclude = config.exclude - self.ignoreFiles = config.ignoreFiles - self.include = config.include - self.plugins = config.plugins - self.match = config.testMatch - - def matches(self, name): - """Does the name match my requirements? - - To match, a name must match config.testMatch OR config.include - and it must not match config.exclude - """ - return ((self.match.search(name) - or (self.include and - filter(None, - [inc.search(name) for inc in self.include]))) - and ((not self.exclude) - or not filter(None, - [exc.search(name) for exc in self.exclude]) - )) - - def wantClass(self, cls): - """Is the class a wanted test class? - - A class must be a unittest.TestCase subclass, or match test name - requirements. Classes that start with _ are always excluded. - """ - declared = getattr(cls, '__test__', None) - if declared is not None: - wanted = declared - else: - wanted = (not cls.__name__.startswith('_') - and (issubclass(cls, unittest.TestCase) - or self.matches(cls.__name__))) - - plug_wants = self.plugins.wantClass(cls) - if plug_wants is not None: - log.debug("Plugin setting selection of %s to %s", cls, plug_wants) - wanted = plug_wants - log.debug("wantClass %s? %s", cls, wanted) - return wanted - - def wantDirectory(self, dirname): - """Is the directory a wanted test directory? - - All package directories match, so long as they do not match exclude. - All other directories must match test requirements. - """ - tail = op_basename(dirname) - if ispackage(dirname): - wanted = (not self.exclude - or not filter(None, - [exc.search(tail) for exc in self.exclude] - )) - else: - wanted = (self.matches(tail) - or (self.config.srcDirs - and tail in self.config.srcDirs)) - plug_wants = self.plugins.wantDirectory(dirname) - if plug_wants is not None: - log.debug("Plugin setting selection of %s to %s", - dirname, plug_wants) - wanted = plug_wants - log.debug("wantDirectory %s? %s", dirname, wanted) - return wanted - - def wantFile(self, file): - """Is the file a wanted test file? - - The file must be a python source file and match testMatch or - include, and not match exclude. Files that match ignore are *never* - wanted, regardless of plugin, testMatch, include or exclude settings. - """ - # never, ever load files that match anything in ignore - # (.* _* and *setup*.py by default) - base = op_basename(file) - ignore_matches = [ ignore_this for ignore_this in self.ignoreFiles - if ignore_this.search(base) ] - if ignore_matches: - log.debug('%s matches ignoreFiles pattern; skipped', - base) - return False - if not self.config.includeExe and is_executable(file): - log.info('%s is executable; skipped', file) - return False - dummy, ext = op_splitext(base) - pysrc = ext == '.py' - - wanted = pysrc and self.matches(base) - plug_wants = self.plugins.wantFile(file) - if plug_wants is not None: - log.debug("plugin setting want %s to %s", file, plug_wants) - wanted = plug_wants - log.debug("wantFile %s? %s", file, wanted) - return wanted - - def wantFunction(self, function): - """Is the function a test function? - """ - try: - if hasattr(function, 'compat_func_name'): - funcname = function.compat_func_name - else: - funcname = function.__name__ - except AttributeError: - # not a function - return False - declared = getattr(function, '__test__', None) - if declared is not None: - wanted = declared - else: - wanted = not funcname.startswith('_') and self.matches(funcname) - plug_wants = self.plugins.wantFunction(function) - if plug_wants is not None: - wanted = plug_wants - log.debug("wantFunction %s? %s", function, wanted) - return wanted - - def wantMethod(self, method): - """Is the method a test method? - """ - try: - method_name = method.__name__ - except AttributeError: - # not a method - return False - if method_name.startswith('_'): - # never collect 'private' methods - return False - declared = getattr(method, '__test__', None) - if declared is not None: - wanted = declared - else: - wanted = self.matches(method_name) - plug_wants = self.plugins.wantMethod(method) - if plug_wants is not None: - wanted = plug_wants - log.debug("wantMethod %s? %s", method, wanted) - return wanted - - def wantModule(self, module): - """Is the module a test module? - - The tail of the module name must match test requirements. One exception: - we always want __main__. - """ - declared = getattr(module, '__test__', None) - if declared is not None: - wanted = declared - else: - wanted = self.matches(module.__name__.split('.')[-1]) \ - or module.__name__ == '__main__' - plug_wants = self.plugins.wantModule(module) - if plug_wants is not None: - wanted = plug_wants - log.debug("wantModule %s? %s", module, wanted) - return wanted - -defaultSelector = Selector - - -class TestAddress(object): - """A test address represents a user's request to run a particular - test. The user may specify a filename or module (or neither), - and/or a callable (a class, function, or method). The naming - format for test addresses is: - - filename_or_module:callable - - Filenames that are not absolute will be made absolute relative to - the working dir. - - The filename or module part will be considered a module name if it - doesn't look like a file, that is, if it doesn't exist on the file - system and it doesn't contain any directory separators and it - doesn't end in .py. - - Callables may be a class name, function name, method name, or - class.method specification. - """ - def __init__(self, name, workingDir=None): - if workingDir is None: - workingDir = os.getcwd() - self.name = name - self.workingDir = workingDir - self.filename, self.module, self.call = split_test_name(name) - log.debug('Test name %s resolved to file %s, module %s, call %s', - name, self.filename, self.module, self.call) - if self.filename is None: - if self.module is not None: - self.filename = getfilename(self.module, self.workingDir) - if self.filename: - self.filename = src(self.filename) - if not op_isabs(self.filename): - self.filename = op_abspath(op_join(workingDir, - self.filename)) - if self.module is None: - self.module = getpackage(self.filename) - log.debug( - 'Final resolution of test name %s: file %s module %s call %s', - name, self.filename, self.module, self.call) - - def totuple(self): - return (self.filename, self.module, self.call) - - def __str__(self): - return self.name - - def __repr__(self): - return "%s: (%s, %s, %s)" % (self.name, self.filename, - self.module, self.call) diff --git a/lib/spack/external/nose/sphinx/__init__.py b/lib/spack/external/nose/sphinx/__init__.py deleted file mode 100644 index 2ae28399f5..0000000000 --- a/lib/spack/external/nose/sphinx/__init__.py +++ /dev/null @@ -1 +0,0 @@ -pass diff --git a/lib/spack/external/nose/sphinx/pluginopts.py b/lib/spack/external/nose/sphinx/pluginopts.py deleted file mode 100644 index d2b284ab27..0000000000 --- a/lib/spack/external/nose/sphinx/pluginopts.py +++ /dev/null @@ -1,189 +0,0 @@ -""" -Adds a sphinx directive that can be used to automatically document a plugin. - -this:: - - .. autoplugin :: nose.plugins.foo - :plugin: Pluggy - -produces:: - - .. automodule :: nose.plugins.foo - - Options - ------- - - .. cmdoption :: --foo=BAR, --fooble=BAR - - Do the foo thing to the new thing. - - Plugin - ------ - - .. autoclass :: nose.plugins.foo.Pluggy - :members: - - Source - ------ - - .. include :: path/to/nose/plugins/foo.py - :literal: - -""" -import os -try: - from docutils import nodes, utils - from docutils.statemachine import ViewList - from docutils.parsers.rst import directives -except ImportError: - pass # won't run anyway - -from nose.util import resolve_name -from nose.plugins.base import Plugin -from nose.plugins.manager import BuiltinPluginManager -from nose.config import Config -from nose.core import TestProgram -from inspect import isclass - - -def autoplugin_directive(dirname, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - mod_name = arguments[0] - mod = resolve_name(mod_name) - plug_name = options.get('plugin', None) - if plug_name: - obj = getattr(mod, plug_name) - else: - for entry in dir(mod): - obj = getattr(mod, entry) - if isclass(obj) and issubclass(obj, Plugin) and obj is not Plugin: - plug_name = '%s.%s' % (mod_name, entry) - break - - # mod docstring - rst = ViewList() - rst.append('.. automodule :: %s\n' % mod_name, '') - rst.append('', '') - - # options - rst.append('Options', '') - rst.append('-------', '') - rst.append('', '') - - plug = obj() - opts = OptBucket() - plug.options(opts, {}) - for opt in opts: - rst.append(opt.options(), '') - rst.append(' \n', '') - rst.append(' ' + opt.help + '\n', '') - rst.append('\n', '') - - # plugin class - rst.append('Plugin', '') - rst.append('------', '') - rst.append('', '') - - rst.append('.. autoclass :: %s\n' % plug_name, '') - rst.append(' :members:\n', '') - rst.append(' :show-inheritance:\n', '') - rst.append('', '') - - # source - rst.append('Source', '') - rst.append('------', '') - rst.append( - '.. include :: %s\n' % utils.relative_path( - state_machine.document['source'], - os.path.abspath(mod.__file__.replace('.pyc', '.py'))), - '') - rst.append(' :literal:\n', '') - rst.append('', '') - - node = nodes.section() - node.document = state.document - surrounding_title_styles = state.memo.title_styles - surrounding_section_level = state.memo.section_level - state.memo.title_styles = [] - state.memo.section_level = 0 - state.nested_parse(rst, 0, node, match_titles=1) - state.memo.title_styles = surrounding_title_styles - state.memo.section_level = surrounding_section_level - - return node.children - - -def autohelp_directive(dirname, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - """produces rst from nose help""" - config = Config(parserClass=OptBucket, - plugins=BuiltinPluginManager()) - parser = config.getParser(TestProgram.usage()) - rst = ViewList() - for line in parser.format_help().split('\n'): - rst.append(line, '') - - rst.append('Options', '') - rst.append('-------', '') - rst.append('', '') - for opt in parser: - rst.append(opt.options(), '') - rst.append(' \n', '') - rst.append(' ' + opt.help + '\n', '') - rst.append('\n', '') - node = nodes.section() - node.document = state.document - surrounding_title_styles = state.memo.title_styles - surrounding_section_level = state.memo.section_level - state.memo.title_styles = [] - state.memo.section_level = 0 - state.nested_parse(rst, 0, node, match_titles=1) - state.memo.title_styles = surrounding_title_styles - state.memo.section_level = surrounding_section_level - - return node.children - - -class OptBucket(object): - def __init__(self, doc=None, prog='nosetests'): - self.opts = [] - self.doc = doc - self.prog = prog - - def __iter__(self): - return iter(self.opts) - - def format_help(self): - return self.doc.replace('%prog', self.prog).replace(':\n', '::\n') - - def add_option(self, *arg, **kw): - self.opts.append(Opt(*arg, **kw)) - - -class Opt(object): - def __init__(self, *arg, **kw): - self.opts = arg - self.action = kw.pop('action', None) - self.default = kw.pop('default', None) - self.metavar = kw.pop('metavar', None) - self.help = kw.pop('help', None) - - def options(self): - buf = [] - for optstring in self.opts: - desc = optstring - if self.action not in ('store_true', 'store_false'): - desc += '=%s' % self.meta(optstring) - buf.append(desc) - return '.. cmdoption :: ' + ', '.join(buf) - - def meta(self, optstring): - # FIXME optparser default metavar? - return self.metavar or 'DEFAULT' - - -def setup(app): - app.add_directive('autoplugin', - autoplugin_directive, 1, (1, 0, 1), - plugin=directives.unchanged) - app.add_directive('autohelp', autohelp_directive, 0, (0, 0, 1)) diff --git a/lib/spack/external/nose/suite.py b/lib/spack/external/nose/suite.py deleted file mode 100644 index a831105e34..0000000000 --- a/lib/spack/external/nose/suite.py +++ /dev/null @@ -1,609 +0,0 @@ -""" -Test Suites ------------ - -Provides a LazySuite, which is a suite whose test list is a generator -function, and ContextSuite,which can run fixtures (setup/teardown -functions or methods) for the context that contains its tests. - -""" -from __future__ import generators - -import logging -import sys -import unittest -from nose.case import Test -from nose.config import Config -from nose.proxy import ResultProxyFactory -from nose.util import isclass, resolve_name, try_run - -if sys.platform == 'cli': - if sys.version_info[:2] < (2, 6): - import clr - clr.AddReference("IronPython") - from IronPython.Runtime.Exceptions import StringException - else: - class StringException(Exception): - pass - -log = logging.getLogger(__name__) -#log.setLevel(logging.DEBUG) - -# Singleton for default value -- see ContextSuite.__init__ below -_def = object() - - -def _strclass(cls): - return "%s.%s" % (cls.__module__, cls.__name__) - -class MixedContextError(Exception): - """Error raised when a context suite sees tests from more than - one context. - """ - pass - - -class LazySuite(unittest.TestSuite): - """A suite that may use a generator as its list of tests - """ - def __init__(self, tests=()): - """Initialize the suite. tests may be an iterable or a generator - """ - super(LazySuite, self).__init__() - self._set_tests(tests) - - def __iter__(self): - return iter(self._tests) - - def __repr__(self): - return "<%s tests=generator (%s)>" % ( - _strclass(self.__class__), id(self)) - - def __hash__(self): - return object.__hash__(self) - - __str__ = __repr__ - - def addTest(self, test): - self._precache.append(test) - - # added to bypass run changes in 2.7's unittest - def run(self, result): - for test in self._tests: - if result.shouldStop: - break - test(result) - return result - - def __nonzero__(self): - log.debug("tests in %s?", id(self)) - if self._precache: - return True - if self.test_generator is None: - return False - try: - test = self.test_generator.next() - if test is not None: - self._precache.append(test) - return True - except StopIteration: - pass - return False - - def _get_tests(self): - log.debug("precache is %s", self._precache) - for test in self._precache: - yield test - if self.test_generator is None: - return - for test in self.test_generator: - yield test - - def _set_tests(self, tests): - self._precache = [] - is_suite = isinstance(tests, unittest.TestSuite) - if callable(tests) and not is_suite: - self.test_generator = tests() - elif is_suite: - # Suites need special treatment: they must be called like - # tests for their setup/teardown to run (if any) - self.addTests([tests]) - self.test_generator = None - else: - self.addTests(tests) - self.test_generator = None - - _tests = property(_get_tests, _set_tests, None, - "Access the tests in this suite. Access is through a " - "generator, so iteration may not be repeatable.") - - -class ContextSuite(LazySuite): - """A suite with context. - - A ContextSuite executes fixtures (setup and teardown functions or - methods) for the context containing its tests. - - The context may be explicitly passed. If it is not, a context (or - nested set of contexts) will be constructed by examining the tests - in the suite. - """ - failureException = unittest.TestCase.failureException - was_setup = False - was_torndown = False - classSetup = ('setup_class', 'setup_all', 'setupClass', 'setupAll', - 'setUpClass', 'setUpAll') - classTeardown = ('teardown_class', 'teardown_all', 'teardownClass', - 'teardownAll', 'tearDownClass', 'tearDownAll') - moduleSetup = ('setup_module', 'setupModule', 'setUpModule', 'setup', - 'setUp') - moduleTeardown = ('teardown_module', 'teardownModule', 'tearDownModule', - 'teardown', 'tearDown') - packageSetup = ('setup_package', 'setupPackage', 'setUpPackage') - packageTeardown = ('teardown_package', 'teardownPackage', - 'tearDownPackage') - - def __init__(self, tests=(), context=None, factory=None, - config=None, resultProxy=None, can_split=True): - log.debug("Context suite for %s (%s) (%s)", tests, context, id(self)) - self.context = context - self.factory = factory - if config is None: - config = Config() - self.config = config - self.resultProxy = resultProxy - self.has_run = False - self.can_split = can_split - self.error_context = None - super(ContextSuite, self).__init__(tests) - - def __repr__(self): - return "<%s context=%s>" % ( - _strclass(self.__class__), - getattr(self.context, '__name__', self.context)) - __str__ = __repr__ - - def id(self): - if self.error_context: - return '%s:%s' % (repr(self), self.error_context) - else: - return repr(self) - - def __hash__(self): - return object.__hash__(self) - - # 2.3 compat -- force 2.4 call sequence - def __call__(self, *arg, **kw): - return self.run(*arg, **kw) - - def exc_info(self): - """Hook for replacing error tuple output - """ - return sys.exc_info() - - def _exc_info(self): - """Bottleneck to fix up IronPython string exceptions - """ - e = self.exc_info() - if sys.platform == 'cli': - if isinstance(e[0], StringException): - # IronPython throws these StringExceptions, but - # traceback checks type(etype) == str. Make a real - # string here. - e = (str(e[0]), e[1], e[2]) - - return e - - def run(self, result): - """Run tests in suite inside of suite fixtures. - """ - # proxy the result for myself - log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests) - #import pdb - #pdb.set_trace() - if self.resultProxy: - result, orig = self.resultProxy(result, self), result - else: - result, orig = result, result - try: - self.setUp() - except KeyboardInterrupt: - raise - except: - self.error_context = 'setup' - result.addError(self, self._exc_info()) - return - try: - for test in self._tests: - if result.shouldStop: - log.debug("stopping") - break - # each nose.case.Test will create its own result proxy - # so the cases need the original result, to avoid proxy - # chains - test(orig) - finally: - self.has_run = True - try: - self.tearDown() - except KeyboardInterrupt: - raise - except: - self.error_context = 'teardown' - result.addError(self, self._exc_info()) - - def hasFixtures(self, ctx_callback=None): - context = self.context - if context is None: - return False - if self.implementsAnyFixture(context, ctx_callback=ctx_callback): - return True - # My context doesn't have any, but its ancestors might - factory = self.factory - if factory: - ancestors = factory.context.get(self, []) - for ancestor in ancestors: - if self.implementsAnyFixture( - ancestor, ctx_callback=ctx_callback): - return True - return False - - def implementsAnyFixture(self, context, ctx_callback): - if isclass(context): - names = self.classSetup + self.classTeardown - else: - names = self.moduleSetup + self.moduleTeardown - if hasattr(context, '__path__'): - names += self.packageSetup + self.packageTeardown - # If my context has any fixture attribute, I have fixtures - fixt = False - for m in names: - if hasattr(context, m): - fixt = True - break - if ctx_callback is None: - return fixt - return ctx_callback(context, fixt) - - def setUp(self): - log.debug("suite %s setUp called, tests: %s", id(self), self._tests) - if not self: - # I have no tests - log.debug("suite %s has no tests", id(self)) - return - if self.was_setup: - log.debug("suite %s already set up", id(self)) - return - context = self.context - if context is None: - return - # before running my own context's setup, I need to - # ask the factory if my context's contexts' setups have been run - factory = self.factory - if factory: - # get a copy, since we'll be destroying it as we go - ancestors = factory.context.get(self, [])[:] - while ancestors: - ancestor = ancestors.pop() - log.debug("ancestor %s may need setup", ancestor) - if ancestor in factory.was_setup: - continue - log.debug("ancestor %s does need setup", ancestor) - self.setupContext(ancestor) - if not context in factory.was_setup: - self.setupContext(context) - else: - self.setupContext(context) - self.was_setup = True - log.debug("completed suite setup") - - def setupContext(self, context): - self.config.plugins.startContext(context) - log.debug("%s setup context %s", self, context) - if self.factory: - if context in self.factory.was_setup: - return - # note that I ran the setup for this context, so that I'll run - # the teardown in my teardown - self.factory.was_setup[context] = self - if isclass(context): - names = self.classSetup - else: - names = self.moduleSetup - if hasattr(context, '__path__'): - names = self.packageSetup + names - try_run(context, names) - - def shortDescription(self): - if self.context is None: - return "test suite" - return "test suite for %s" % self.context - - def tearDown(self): - log.debug('context teardown') - if not self.was_setup or self.was_torndown: - log.debug( - "No reason to teardown (was_setup? %s was_torndown? %s)" - % (self.was_setup, self.was_torndown)) - return - self.was_torndown = True - context = self.context - if context is None: - log.debug("No context to tear down") - return - - # for each ancestor... if the ancestor was setup - # and I did the setup, I can do teardown - factory = self.factory - if factory: - ancestors = factory.context.get(self, []) + [context] - for ancestor in ancestors: - log.debug('ancestor %s may need teardown', ancestor) - if not ancestor in factory.was_setup: - log.debug('ancestor %s was not setup', ancestor) - continue - if ancestor in factory.was_torndown: - log.debug('ancestor %s already torn down', ancestor) - continue - setup = factory.was_setup[ancestor] - log.debug("%s setup ancestor %s", setup, ancestor) - if setup is self: - self.teardownContext(ancestor) - else: - self.teardownContext(context) - - def teardownContext(self, context): - log.debug("%s teardown context %s", self, context) - if self.factory: - if context in self.factory.was_torndown: - return - self.factory.was_torndown[context] = self - if isclass(context): - names = self.classTeardown - else: - names = self.moduleTeardown - if hasattr(context, '__path__'): - names = self.packageTeardown + names - try_run(context, names) - self.config.plugins.stopContext(context) - - # FIXME the wrapping has to move to the factory? - def _get_wrapped_tests(self): - for test in self._get_tests(): - if isinstance(test, Test) or isinstance(test, unittest.TestSuite): - yield test - else: - yield Test(test, - config=self.config, - resultProxy=self.resultProxy) - - _tests = property(_get_wrapped_tests, LazySuite._set_tests, None, - "Access the tests in this suite. Tests are returned " - "inside of a context wrapper.") - - -class ContextSuiteFactory(object): - """Factory for ContextSuites. Called with a collection of tests, - the factory decides on a hierarchy of contexts by introspecting - the collection or the tests themselves to find the objects - containing the test objects. It always returns one suite, but that - suite may consist of a hierarchy of nested suites. - """ - suiteClass = ContextSuite - def __init__(self, config=None, suiteClass=None, resultProxy=_def): - if config is None: - config = Config() - self.config = config - if suiteClass is not None: - self.suiteClass = suiteClass - # Using a singleton to represent default instead of None allows - # passing resultProxy=None to turn proxying off. - if resultProxy is _def: - resultProxy = ResultProxyFactory(config=config) - self.resultProxy = resultProxy - self.suites = {} - self.context = {} - self.was_setup = {} - self.was_torndown = {} - - def __call__(self, tests, **kw): - """Return ``ContextSuite`` for tests. ``tests`` may either - be a callable (in which case the resulting ContextSuite will - have no parent context and be evaluated lazily) or an - iterable. In that case the tests will wrapped in - nose.case.Test, be examined and the context of each found and a - suite of suites returned, organized into a stack with the - outermost suites belonging to the outermost contexts. - """ - log.debug("Create suite for %s", tests) - context = kw.pop('context', getattr(tests, 'context', None)) - log.debug("tests %s context %s", tests, context) - if context is None: - tests = self.wrapTests(tests) - try: - context = self.findContext(tests) - except MixedContextError: - return self.makeSuite(self.mixedSuites(tests), None, **kw) - return self.makeSuite(tests, context, **kw) - - def ancestry(self, context): - """Return the ancestry of the context (that is, all of the - packages and modules containing the context), in order of - descent with the outermost ancestor last. - This method is a generator. - """ - log.debug("get ancestry %s", context) - if context is None: - return - # Methods include reference to module they are defined in, we - # don't want that, instead want the module the class is in now - # (classes are re-ancestored elsewhere). - if hasattr(context, 'im_class'): - context = context.im_class - elif hasattr(context, '__self__'): - context = context.__self__.__class__ - if hasattr(context, '__module__'): - ancestors = context.__module__.split('.') - elif hasattr(context, '__name__'): - ancestors = context.__name__.split('.')[:-1] - else: - raise TypeError("%s has no ancestors?" % context) - while ancestors: - log.debug(" %s ancestors %s", context, ancestors) - yield resolve_name('.'.join(ancestors)) - ancestors.pop() - - def findContext(self, tests): - if callable(tests) or isinstance(tests, unittest.TestSuite): - return None - context = None - for test in tests: - # Don't look at suites for contexts, only tests - ctx = getattr(test, 'context', None) - if ctx is None: - continue - if context is None: - context = ctx - elif context != ctx: - raise MixedContextError( - "Tests with different contexts in same suite! %s != %s" - % (context, ctx)) - return context - - def makeSuite(self, tests, context, **kw): - suite = self.suiteClass( - tests, context=context, config=self.config, factory=self, - resultProxy=self.resultProxy, **kw) - if context is not None: - self.suites.setdefault(context, []).append(suite) - self.context.setdefault(suite, []).append(context) - log.debug("suite %s has context %s", suite, - getattr(context, '__name__', None)) - for ancestor in self.ancestry(context): - self.suites.setdefault(ancestor, []).append(suite) - self.context[suite].append(ancestor) - log.debug("suite %s has ancestor %s", suite, ancestor.__name__) - return suite - - def mixedSuites(self, tests): - """The complex case where there are tests that don't all share - the same context. Groups tests into suites with common ancestors, - according to the following (essentially tail-recursive) procedure: - - Starting with the context of the first test, if it is not - None, look for tests in the remaining tests that share that - ancestor. If any are found, group into a suite with that - ancestor as the context, and replace the current suite with - that suite. Continue this process for each ancestor of the - first test, until all ancestors have been processed. At this - point if any tests remain, recurse with those tests as the - input, returning a list of the common suite (which may be the - suite or test we started with, if no common tests were found) - plus the results of recursion. - """ - if not tests: - return [] - head = tests.pop(0) - if not tests: - return [head] # short circuit when none are left to combine - suite = head # the common ancestry suite, so far - tail = tests[:] - context = getattr(head, 'context', None) - if context is not None: - ancestors = [context] + [a for a in self.ancestry(context)] - for ancestor in ancestors: - common = [suite] # tests with ancestor in common, so far - remain = [] # tests that remain to be processed - for test in tail: - found_common = False - test_ctx = getattr(test, 'context', None) - if test_ctx is None: - remain.append(test) - continue - if test_ctx is ancestor: - common.append(test) - continue - for test_ancestor in self.ancestry(test_ctx): - if test_ancestor is ancestor: - common.append(test) - found_common = True - break - if not found_common: - remain.append(test) - if common: - suite = self.makeSuite(common, ancestor) - tail = self.mixedSuites(remain) - return [suite] + tail - - def wrapTests(self, tests): - log.debug("wrap %s", tests) - if callable(tests) or isinstance(tests, unittest.TestSuite): - log.debug("I won't wrap") - return tests - wrapped = [] - for test in tests: - log.debug("wrapping %s", test) - if isinstance(test, Test) or isinstance(test, unittest.TestSuite): - wrapped.append(test) - elif isinstance(test, ContextList): - wrapped.append(self.makeSuite(test, context=test.context)) - else: - wrapped.append( - Test(test, config=self.config, resultProxy=self.resultProxy) - ) - return wrapped - - -class ContextList(object): - """Not quite a suite -- a group of tests in a context. This is used - to hint the ContextSuiteFactory about what context the tests - belong to, in cases where it may be ambiguous or missing. - """ - def __init__(self, tests, context=None): - self.tests = tests - self.context = context - - def __iter__(self): - return iter(self.tests) - - -class FinalizingSuiteWrapper(unittest.TestSuite): - """Wraps suite and calls final function after suite has - executed. Used to call final functions in cases (like running in - the standard test runner) where test running is not under nose's - control. - """ - def __init__(self, suite, finalize): - super(FinalizingSuiteWrapper, self).__init__() - self.suite = suite - self.finalize = finalize - - def __call__(self, *arg, **kw): - return self.run(*arg, **kw) - - # 2.7 compat - def __iter__(self): - return iter(self.suite) - - def run(self, *arg, **kw): - try: - return self.suite(*arg, **kw) - finally: - self.finalize(*arg, **kw) - - -# backwards compat -- sort of -class TestDir: - def __init__(*arg, **kw): - raise NotImplementedError( - "TestDir is not usable with nose 0.10. The class is present " - "in nose.suite for backwards compatibility purposes but it " - "may not be used.") - - -class TestModule: - def __init__(*arg, **kw): - raise NotImplementedError( - "TestModule is not usable with nose 0.10. The class is present " - "in nose.suite for backwards compatibility purposes but it " - "may not be used.") diff --git a/lib/spack/external/nose/tools/__init__.py b/lib/spack/external/nose/tools/__init__.py deleted file mode 100644 index 74dab16a74..0000000000 --- a/lib/spack/external/nose/tools/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -""" -Tools for testing ------------------ - -nose.tools provides a few convenience functions to make writing tests -easier. You don't have to use them; nothing in the rest of nose depends -on any of these methods. - -""" -from nose.tools.nontrivial import * -from nose.tools.nontrivial import __all__ as nontrivial_all -from nose.tools.trivial import * -from nose.tools.trivial import __all__ as trivial_all - -__all__ = trivial_all + nontrivial_all diff --git a/lib/spack/external/nose/tools/nontrivial.py b/lib/spack/external/nose/tools/nontrivial.py deleted file mode 100644 index 283973245b..0000000000 --- a/lib/spack/external/nose/tools/nontrivial.py +++ /dev/null @@ -1,151 +0,0 @@ -"""Tools not exempt from being descended into in tracebacks""" - -import time - - -__all__ = ['make_decorator', 'raises', 'set_trace', 'timed', 'with_setup', - 'TimeExpired', 'istest', 'nottest'] - - -class TimeExpired(AssertionError): - pass - - -def make_decorator(func): - """ - Wraps a test decorator so as to properly replicate metadata - of the decorated function, including nose's additional stuff - (namely, setup and teardown). - """ - def decorate(newfunc): - if hasattr(func, 'compat_func_name'): - name = func.compat_func_name - else: - name = func.__name__ - newfunc.__dict__ = func.__dict__ - newfunc.__doc__ = func.__doc__ - newfunc.__module__ = func.__module__ - if not hasattr(newfunc, 'compat_co_firstlineno'): - newfunc.compat_co_firstlineno = func.func_code.co_firstlineno - try: - newfunc.__name__ = name - except TypeError: - # can't set func name in 2.3 - newfunc.compat_func_name = name - return newfunc - return decorate - - -def raises(*exceptions): - """Test must raise one of expected exceptions to pass. - - Example use:: - - @raises(TypeError, ValueError) - def test_raises_type_error(): - raise TypeError("This test passes") - - @raises(Exception) - def test_that_fails_by_passing(): - pass - - If you want to test many assertions about exceptions in a single test, - you may want to use `assert_raises` instead. - """ - valid = ' or '.join([e.__name__ for e in exceptions]) - def decorate(func): - name = func.__name__ - def newfunc(*arg, **kw): - try: - func(*arg, **kw) - except exceptions: - pass - except: - raise - else: - message = "%s() did not raise %s" % (name, valid) - raise AssertionError(message) - newfunc = make_decorator(func)(newfunc) - return newfunc - return decorate - - -def set_trace(): - """Call pdb.set_trace in the calling frame, first restoring - sys.stdout to the real output stream. Note that sys.stdout is NOT - reset to whatever it was before the call once pdb is done! - """ - import pdb - import sys - stdout = sys.stdout - sys.stdout = sys.__stdout__ - pdb.Pdb().set_trace(sys._getframe().f_back) - - -def timed(limit): - """Test must finish within specified time limit to pass. - - Example use:: - - @timed(.1) - def test_that_fails(): - time.sleep(.2) - """ - def decorate(func): - def newfunc(*arg, **kw): - start = time.time() - result = func(*arg, **kw) - end = time.time() - if end - start > limit: - raise TimeExpired("Time limit (%s) exceeded" % limit) - return result - newfunc = make_decorator(func)(newfunc) - return newfunc - return decorate - - -def with_setup(setup=None, teardown=None): - """Decorator to add setup and/or teardown methods to a test function:: - - @with_setup(setup, teardown) - def test_something(): - " ... " - - Note that `with_setup` is useful *only* for test functions, not for test - methods or inside of TestCase subclasses. - """ - def decorate(func, setup=setup, teardown=teardown): - if setup: - if hasattr(func, 'setup'): - _old_s = func.setup - def _s(): - setup() - _old_s() - func.setup = _s - else: - func.setup = setup - if teardown: - if hasattr(func, 'teardown'): - _old_t = func.teardown - def _t(): - _old_t() - teardown() - func.teardown = _t - else: - func.teardown = teardown - return func - return decorate - - -def istest(func): - """Decorator to mark a function or method as a test - """ - func.__test__ = True - return func - - -def nottest(func): - """Decorator to mark a function or method as *not* a test - """ - func.__test__ = False - return func diff --git a/lib/spack/external/nose/tools/trivial.py b/lib/spack/external/nose/tools/trivial.py deleted file mode 100644 index cf83efeda5..0000000000 --- a/lib/spack/external/nose/tools/trivial.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Tools so trivial that tracebacks should not descend into them - -We define the ``__unittest`` symbol in their module namespace so unittest will -skip them when printing tracebacks, just as it does for their corresponding -methods in ``unittest`` proper. - -""" -import re -import unittest - - -__all__ = ['ok_', 'eq_'] - -# Use the same flag as unittest itself to prevent descent into these functions: -__unittest = 1 - - -def ok_(expr, msg=None): - """Shorthand for assert. Saves 3 whole characters! - """ - if not expr: - raise AssertionError(msg) - - -def eq_(a, b, msg=None): - """Shorthand for 'assert a == b, "%r != %r" % (a, b) - """ - if not a == b: - raise AssertionError(msg or "%r != %r" % (a, b)) - - -# -# Expose assert* from unittest.TestCase -# - give them pep8 style names -# -caps = re.compile('([A-Z])') - -def pep8(name): - return caps.sub(lambda m: '_' + m.groups()[0].lower(), name) - -class Dummy(unittest.TestCase): - def nop(): - pass -_t = Dummy('nop') - -for at in [ at for at in dir(_t) - if at.startswith('assert') and not '_' in at ]: - pepd = pep8(at) - vars()[pepd] = getattr(_t, at) - __all__.append(pepd) - -del Dummy -del _t -del pep8 diff --git a/lib/spack/external/nose/twistedtools.py b/lib/spack/external/nose/twistedtools.py deleted file mode 100644 index 8d9c6ffe9b..0000000000 --- a/lib/spack/external/nose/twistedtools.py +++ /dev/null @@ -1,173 +0,0 @@ -""" -Twisted integration -------------------- - -This module provides a very simple way to integrate your tests with the -Twisted_ event loop. - -You must import this module *before* importing anything from Twisted itself! - -Example:: - - from nose.twistedtools import reactor, deferred - - @deferred() - def test_resolve(): - return reactor.resolve("www.python.org") - -Or, more realistically:: - - @deferred(timeout=5.0) - def test_resolve(): - d = reactor.resolve("www.python.org") - def check_ip(ip): - assert ip == "67.15.36.43" - d.addCallback(check_ip) - return d - -.. _Twisted: http://twistedmatrix.com/trac/ -""" - -import sys -from Queue import Queue, Empty -from nose.tools import make_decorator, TimeExpired - -__all__ = [ - 'threaded_reactor', 'reactor', 'deferred', 'TimeExpired', - 'stop_reactor' -] - -_twisted_thread = None - -def threaded_reactor(): - """ - Start the Twisted reactor in a separate thread, if not already done. - Returns the reactor. - The thread will automatically be destroyed when all the tests are done. - """ - global _twisted_thread - try: - from twisted.internet import reactor - except ImportError: - return None, None - if not _twisted_thread: - from twisted.python import threadable - from threading import Thread - _twisted_thread = Thread(target=lambda: reactor.run( \ - installSignalHandlers=False)) - _twisted_thread.setDaemon(True) - _twisted_thread.start() - return reactor, _twisted_thread - -# Export global reactor variable, as Twisted does -reactor, reactor_thread = threaded_reactor() - - -def stop_reactor(): - """Stop the reactor and join the reactor thread until it stops. - Call this function in teardown at the module or package level to - reset the twisted system after your tests. You *must* do this if - you mix tests using these tools and tests using twisted.trial. - """ - global _twisted_thread - - def stop_reactor(): - '''Helper for calling stop from withing the thread.''' - reactor.stop() - - reactor.callFromThread(stop_reactor) - reactor_thread.join() - for p in reactor.getDelayedCalls(): - if p.active(): - p.cancel() - _twisted_thread = None - - -def deferred(timeout=None): - """ - By wrapping a test function with this decorator, you can return a - twisted Deferred and the test will wait for the deferred to be triggered. - The whole test function will run inside the Twisted event loop. - - The optional timeout parameter specifies the maximum duration of the test. - The difference with timed() is that timed() will still wait for the test - to end, while deferred() will stop the test when its timeout has expired. - The latter is more desireable when dealing with network tests, because - the result may actually never arrive. - - If the callback is triggered, the test has passed. - If the errback is triggered or the timeout expires, the test has failed. - - Example:: - - @deferred(timeout=5.0) - def test_resolve(): - return reactor.resolve("www.python.org") - - Attention! If you combine this decorator with other decorators (like - "raises"), deferred() must be called *first*! - - In other words, this is good:: - - @raises(DNSLookupError) - @deferred() - def test_error(): - return reactor.resolve("xxxjhjhj.biz") - - and this is bad:: - - @deferred() - @raises(DNSLookupError) - def test_error(): - return reactor.resolve("xxxjhjhj.biz") - """ - reactor, reactor_thread = threaded_reactor() - if reactor is None: - raise ImportError("twisted is not available or could not be imported") - # Check for common syntax mistake - # (otherwise, tests can be silently ignored - # if one writes "@deferred" instead of "@deferred()") - try: - timeout is None or timeout + 0 - except TypeError: - raise TypeError("'timeout' argument must be a number or None") - - def decorate(func): - def wrapper(*args, **kargs): - q = Queue() - def callback(value): - q.put(None) - def errback(failure): - # Retrieve and save full exception info - try: - failure.raiseException() - except: - q.put(sys.exc_info()) - def g(): - try: - d = func(*args, **kargs) - try: - d.addCallbacks(callback, errback) - # Check for a common mistake and display a nice error - # message - except AttributeError: - raise TypeError("you must return a twisted Deferred " - "from your test case!") - # Catch exceptions raised in the test body (from the - # Twisted thread) - except: - q.put(sys.exc_info()) - reactor.callFromThread(g) - try: - error = q.get(timeout=timeout) - except Empty: - raise TimeExpired("timeout expired before end of test (%f s.)" - % timeout) - # Re-raise all exceptions - if error is not None: - exc_type, exc_value, tb = error - raise exc_type, exc_value, tb - wrapper = make_decorator(func)(wrapper) - return wrapper - return decorate - diff --git a/lib/spack/external/nose/usage.txt b/lib/spack/external/nose/usage.txt deleted file mode 100644 index bc96894ab7..0000000000 --- a/lib/spack/external/nose/usage.txt +++ /dev/null @@ -1,115 +0,0 @@ -nose collects tests automatically from python source files, -directories and packages found in its working directory (which -defaults to the current working directory). Any python source file, -directory or package that matches the testMatch regular expression -(by default: `(?:^|[\b_\.-])[Tt]est)` will be collected as a test (or -source for collection of tests). In addition, all other packages -found in the working directory will be examined for python source files -or directories that match testMatch. Package discovery descends all -the way down the tree, so package.tests and package.sub.tests and -package.sub.sub2.tests will all be collected. - -Within a test directory or package, any python source file matching -testMatch will be examined for test cases. Within a test module, -functions and classes whose names match testMatch and TestCase -subclasses with any name will be loaded and executed as tests. Tests -may use the assert keyword or raise AssertionErrors to indicate test -failure. TestCase subclasses may do the same or use the various -TestCase methods available. - -**It is important to note that the default behavior of nose is to -not include tests from files which are executable.** To include -tests from such files, remove their executable bit or use -the --exe flag (see 'Options' section below). - -Selecting Tests ---------------- - -To specify which tests to run, pass test names on the command line: - - %prog only_test_this.py - -Test names specified may be file or module names, and may optionally -indicate the test case to run by separating the module or file name -from the test case name with a colon. Filenames may be relative or -absolute. Examples: - - %prog test.module - %prog another.test:TestCase.test_method - %prog a.test:TestCase - %prog /path/to/test/file.py:test_function - -You may also change the working directory where nose looks for tests -by using the -w switch: - - %prog -w /path/to/tests - -Note, however, that support for multiple -w arguments is now deprecated -and will be removed in a future release. As of nose 0.10, you can get -the same behavior by specifying the target directories *without* -the -w switch: - - %prog /path/to/tests /another/path/to/tests - -Further customization of test selection and loading is possible -through the use of plugins. - -Test result output is identical to that of unittest, except for -the additional features (error classes, and plugin-supplied -features such as output capture and assert introspection) detailed -in the options below. - -Configuration -------------- - -In addition to passing command-line options, you may also put -configuration options in your project's *setup.cfg* file, or a .noserc -or nose.cfg file in your home directory. In any of these standard -ini-style config files, you put your nosetests configuration in a -``[nosetests]`` section. Options are the same as on the command line, -with the -- prefix removed. For options that are simple switches, you -must supply a value: - - [nosetests] - verbosity=3 - with-doctest=1 - -All configuration files that are found will be loaded and their -options combined. You can override the standard config file loading -with the ``-c`` option. - -Using Plugins -------------- - -There are numerous nose plugins available via easy_install and -elsewhere. To use a plugin, just install it. The plugin will add -command line options to nosetests. To verify that the plugin is installed, -run: - - nosetests --plugins - -You can add -v or -vv to that command to show more information -about each plugin. - -If you are running nose.main() or nose.run() from a script, you -can specify a list of plugins to use by passing a list of plugins -with the plugins keyword argument. - -0.9 plugins ------------ - -nose 1.0 can use SOME plugins that were written for nose 0.9. The -default plugin manager inserts a compatibility wrapper around 0.9 -plugins that adapts the changed plugin api calls. However, plugins -that access nose internals are likely to fail, especially if they -attempt to access test case or test suite classes. For example, -plugins that try to determine if a test passed to startTest is an -individual test or a suite will fail, partly because suites are no -longer passed to startTest and partly because it's likely that the -plugin is trying to find out if the test is an instance of a class -that no longer exists. - -0.10 and 0.11 plugins ---------------------- - -All plugins written for nose 0.10 and 0.11 should work with nose 1.0. diff --git a/lib/spack/external/nose/util.py b/lib/spack/external/nose/util.py deleted file mode 100644 index bfe16589ea..0000000000 --- a/lib/spack/external/nose/util.py +++ /dev/null @@ -1,668 +0,0 @@ -"""Utility functions and classes used by nose internally. -""" -import inspect -import itertools -import logging -import stat -import os -import re -import sys -import types -import unittest -from nose.pyversion import ClassType, TypeType, isgenerator, ismethod - - -log = logging.getLogger('nose') - -ident_re = re.compile(r'^[A-Za-z_][A-Za-z0-9_.]*$') -class_types = (ClassType, TypeType) -skip_pattern = r"(?:\.svn)|(?:[^.]+\.py[co])|(?:.*~)|(?:.*\$py\.class)|(?:__pycache__)" - -try: - set() - set = set # make from nose.util import set happy -except NameError: - try: - from sets import Set as set - except ImportError: - pass - - -def ls_tree(dir_path="", - skip_pattern=skip_pattern, - indent="|-- ", branch_indent="| ", - last_indent="`-- ", last_branch_indent=" "): - # TODO: empty directories look like non-directory files - return "\n".join(_ls_tree_lines(dir_path, skip_pattern, - indent, branch_indent, - last_indent, last_branch_indent)) - - -def _ls_tree_lines(dir_path, skip_pattern, - indent, branch_indent, last_indent, last_branch_indent): - if dir_path == "": - dir_path = os.getcwd() - - lines = [] - - names = os.listdir(dir_path) - names.sort() - dirs, nondirs = [], [] - for name in names: - if re.match(skip_pattern, name): - continue - if os.path.isdir(os.path.join(dir_path, name)): - dirs.append(name) - else: - nondirs.append(name) - - # list non-directories first - entries = list(itertools.chain([(name, False) for name in nondirs], - [(name, True) for name in dirs])) - def ls_entry(name, is_dir, ind, branch_ind): - if not is_dir: - yield ind + name - else: - path = os.path.join(dir_path, name) - if not os.path.islink(path): - yield ind + name - subtree = _ls_tree_lines(path, skip_pattern, - indent, branch_indent, - last_indent, last_branch_indent) - for x in subtree: - yield branch_ind + x - for name, is_dir in entries[:-1]: - for line in ls_entry(name, is_dir, indent, branch_indent): - yield line - if entries: - name, is_dir = entries[-1] - for line in ls_entry(name, is_dir, last_indent, last_branch_indent): - yield line - - -def absdir(path): - """Return absolute, normalized path to directory, if it exists; None - otherwise. - """ - if not os.path.isabs(path): - path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(), - path))) - if path is None or not os.path.isdir(path): - return None - return path - - -def absfile(path, where=None): - """Return absolute, normalized path to file (optionally in directory - where), or None if the file can't be found either in where or the current - working directory. - """ - orig = path - if where is None: - where = os.getcwd() - if isinstance(where, list) or isinstance(where, tuple): - for maybe_path in where: - maybe_abs = absfile(path, maybe_path) - if maybe_abs is not None: - return maybe_abs - return None - if not os.path.isabs(path): - path = os.path.normpath(os.path.abspath(os.path.join(where, path))) - if path is None or not os.path.exists(path): - if where != os.getcwd(): - # try the cwd instead - path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(), - orig))) - if path is None or not os.path.exists(path): - return None - if os.path.isdir(path): - # might want an __init__.py from pacakge - init = os.path.join(path,'__init__.py') - if os.path.isfile(init): - return init - elif os.path.isfile(path): - return path - return None - - -def anyp(predicate, iterable): - for item in iterable: - if predicate(item): - return True - return False - - -def file_like(name): - """A name is file-like if it is a path that exists, or it has a - directory part, or it ends in .py, or it isn't a legal python - identifier. - """ - return (os.path.exists(name) - or os.path.dirname(name) - or name.endswith('.py') - or not ident_re.match(os.path.splitext(name)[0])) - - -def func_lineno(func): - """Get the line number of a function. First looks for - compat_co_firstlineno, then func_code.co_first_lineno. - """ - try: - return func.compat_co_firstlineno - except AttributeError: - try: - return func.func_code.co_firstlineno - except AttributeError: - return -1 - - -def isclass(obj): - """Is obj a class? Inspect's isclass is too liberal and returns True - for objects that can't be subclasses of anything. - """ - obj_type = type(obj) - return obj_type in class_types or issubclass(obj_type, type) - - -# backwards compat (issue #64) -is_generator = isgenerator - - -def ispackage(path): - """ - Is this path a package directory? - - >>> ispackage('nose') - True - >>> ispackage('unit_tests') - False - >>> ispackage('nose/plugins') - True - >>> ispackage('nose/loader.py') - False - """ - if os.path.isdir(path): - # at least the end of the path must be a legal python identifier - # and __init__.py[co] must exist - end = os.path.basename(path) - if ident_re.match(end): - for init in ('__init__.py', '__init__.pyc', '__init__.pyo'): - if os.path.isfile(os.path.join(path, init)): - return True - if sys.platform.startswith('java') and \ - os.path.isfile(os.path.join(path, '__init__$py.class')): - return True - return False - - -def isproperty(obj): - """ - Is this a property? - - >>> class Foo: - ... def got(self): - ... return 2 - ... def get(self): - ... return 1 - ... get = property(get) - - >>> isproperty(Foo.got) - False - >>> isproperty(Foo.get) - True - """ - return type(obj) == property - - -def getfilename(package, relativeTo=None): - """Find the python source file for a package, relative to a - particular directory (defaults to current working directory if not - given). - """ - if relativeTo is None: - relativeTo = os.getcwd() - path = os.path.join(relativeTo, os.sep.join(package.split('.'))) - if os.path.exists(path + '/__init__.py'): - return path - filename = path + '.py' - if os.path.exists(filename): - return filename - return None - - -def getpackage(filename): - """ - Find the full dotted package name for a given python source file - name. Returns None if the file is not a python source file. - - >>> getpackage('foo.py') - 'foo' - >>> getpackage('biff/baf.py') - 'baf' - >>> getpackage('nose/util.py') - 'nose.util' - - Works for directories too. - - >>> getpackage('nose') - 'nose' - >>> getpackage('nose/plugins') - 'nose.plugins' - - And __init__ files stuck onto directories - - >>> getpackage('nose/plugins/__init__.py') - 'nose.plugins' - - Absolute paths also work. - - >>> path = os.path.abspath(os.path.join('nose', 'plugins')) - >>> getpackage(path) - 'nose.plugins' - """ - src_file = src(filename) - if (os.path.isdir(src_file) or not src_file.endswith('.py')) and not ispackage(src_file): - return None - base, ext = os.path.splitext(os.path.basename(src_file)) - if base == '__init__': - mod_parts = [] - else: - mod_parts = [base] - path, part = os.path.split(os.path.split(src_file)[0]) - while part: - if ispackage(os.path.join(path, part)): - mod_parts.append(part) - else: - break - path, part = os.path.split(path) - mod_parts.reverse() - return '.'.join(mod_parts) - - -def ln(label): - """Draw a 70-char-wide divider, with label in the middle. - - >>> ln('hello there') - '---------------------------- hello there -----------------------------' - """ - label_len = len(label) + 2 - chunk = (70 - label_len) // 2 - out = '%s %s %s' % ('-' * chunk, label, '-' * chunk) - pad = 70 - len(out) - if pad > 0: - out = out + ('-' * pad) - return out - - -def resolve_name(name, module=None): - """Resolve a dotted name to a module and its parts. This is stolen - wholesale from unittest.TestLoader.loadTestByName. - - >>> resolve_name('nose.util') #doctest: +ELLIPSIS - - >>> resolve_name('nose.util.resolve_name') #doctest: +ELLIPSIS - - """ - parts = name.split('.') - parts_copy = parts[:] - if module is None: - while parts_copy: - try: - log.debug("__import__ %s", name) - module = __import__('.'.join(parts_copy)) - break - except ImportError: - del parts_copy[-1] - if not parts_copy: - raise - parts = parts[1:] - obj = module - log.debug("resolve: %s, %s, %s, %s", parts, name, obj, module) - for part in parts: - obj = getattr(obj, part) - return obj - - -def split_test_name(test): - """Split a test name into a 3-tuple containing file, module, and callable - names, any of which (but not all) may be blank. - - Test names are in the form: - - file_or_module:callable - - Either side of the : may be dotted. To change the splitting behavior, you - can alter nose.util.split_test_re. - """ - norm = os.path.normpath - file_or_mod = test - fn = None - if not ':' in test: - # only a file or mod part - if file_like(test): - return (norm(test), None, None) - else: - return (None, test, None) - - # could be path|mod:callable, or a : in the file path someplace - head, tail = os.path.split(test) - if not head: - # this is a case like 'foo:bar' -- generally a module - # name followed by a callable, but also may be a windows - # drive letter followed by a path - try: - file_or_mod, fn = test.split(':') - if file_like(fn): - # must be a funny path - file_or_mod, fn = test, None - except ValueError: - # more than one : in the test - # this is a case like c:\some\path.py:a_test - parts = test.split(':') - if len(parts[0]) == 1: - file_or_mod, fn = ':'.join(parts[:-1]), parts[-1] - else: - # nonsense like foo:bar:baz - raise ValueError("Test name '%s' could not be parsed. Please " - "format test names as path:callable or " - "module:callable." % (test,)) - elif not tail: - # this is a case like 'foo:bar/' - # : must be part of the file path, so ignore it - file_or_mod = test - else: - if ':' in tail: - file_part, fn = tail.split(':') - else: - file_part = tail - file_or_mod = os.sep.join([head, file_part]) - if file_or_mod: - if file_like(file_or_mod): - return (norm(file_or_mod), None, fn) - else: - return (None, file_or_mod, fn) - else: - return (None, None, fn) -split_test_name.__test__ = False # do not collect - - -def test_address(test): - """Find the test address for a test, which may be a module, filename, - class, method or function. - """ - if hasattr(test, "address"): - return test.address() - # type-based polymorphism sucks in general, but I believe is - # appropriate here - t = type(test) - file = module = call = None - if t == types.ModuleType: - file = getattr(test, '__file__', None) - module = getattr(test, '__name__', None) - return (src(file), module, call) - if t == types.FunctionType or issubclass(t, type) or t == types.ClassType: - module = getattr(test, '__module__', None) - if module is not None: - m = sys.modules[module] - file = getattr(m, '__file__', None) - if file is not None: - file = os.path.abspath(file) - call = getattr(test, '__name__', None) - return (src(file), module, call) - if t == types.MethodType: - cls_adr = test_address(test.im_class) - return (src(cls_adr[0]), cls_adr[1], - "%s.%s" % (cls_adr[2], test.__name__)) - # handle unittest.TestCase instances - if isinstance(test, unittest.TestCase): - if (hasattr(test, '_FunctionTestCase__testFunc') # pre 2.7 - or hasattr(test, '_testFunc')): # 2.7 - # unittest FunctionTestCase - try: - return test_address(test._FunctionTestCase__testFunc) - except AttributeError: - return test_address(test._testFunc) - # regular unittest.TestCase - cls_adr = test_address(test.__class__) - # 2.5 compat: __testMethodName changed to _testMethodName - try: - method_name = test._TestCase__testMethodName - except AttributeError: - method_name = test._testMethodName - return (src(cls_adr[0]), cls_adr[1], - "%s.%s" % (cls_adr[2], method_name)) - if (hasattr(test, '__class__') and - test.__class__.__module__ not in ('__builtin__', 'builtins')): - return test_address(test.__class__) - raise TypeError("I don't know what %s is (%s)" % (test, t)) -test_address.__test__ = False # do not collect - - -def try_run(obj, names): - """Given a list of possible method names, try to run them with the - provided object. Keep going until something works. Used to run - setup/teardown methods for module, package, and function tests. - """ - for name in names: - func = getattr(obj, name, None) - if func is not None: - if type(obj) == types.ModuleType: - # py.test compatibility - if isinstance(func, types.FunctionType): - args, varargs, varkw, defaults = \ - inspect.getargspec(func) - else: - # Not a function. If it's callable, call it anyway - if hasattr(func, '__call__') and not inspect.ismethod(func): - func = func.__call__ - try: - args, varargs, varkw, defaults = \ - inspect.getargspec(func) - args.pop(0) # pop the self off - except TypeError: - raise TypeError("Attribute %s of %r is not a python " - "function. Only functions or callables" - " may be used as fixtures." % - (name, obj)) - if len(args): - log.debug("call fixture %s.%s(%s)", obj, name, obj) - return func(obj) - log.debug("call fixture %s.%s", obj, name) - return func() - - -def src(filename): - """Find the python source file for a .pyc, .pyo or $py.class file on - jython. Returns the filename provided if it is not a python source - file. - """ - if filename is None: - return filename - if sys.platform.startswith('java') and filename.endswith('$py.class'): - return '.'.join((filename[:-9], 'py')) - base, ext = os.path.splitext(filename) - if ext in ('.pyc', '.pyo', '.py'): - return '.'.join((base, 'py')) - return filename - - -def regex_last_key(regex): - """Sort key function factory that puts items that match a - regular expression last. - - >>> from nose.config import Config - >>> from nose.pyversion import sort_list - >>> c = Config() - >>> regex = c.testMatch - >>> entries = ['.', '..', 'a_test', 'src', 'lib', 'test', 'foo.py'] - >>> sort_list(entries, regex_last_key(regex)) - >>> entries - ['.', '..', 'foo.py', 'lib', 'src', 'a_test', 'test'] - """ - def k(obj): - if regex.search(obj): - return (1, obj) - return (0, obj) - return k - - -def tolist(val): - """Convert a value that may be a list or a (possibly comma-separated) - string into a list. The exception: None is returned as None, not [None]. - - >>> tolist(["one", "two"]) - ['one', 'two'] - >>> tolist("hello") - ['hello'] - >>> tolist("separate,values, with, commas, spaces , are ,ok") - ['separate', 'values', 'with', 'commas', 'spaces', 'are', 'ok'] - """ - if val is None: - return None - try: - # might already be a list - val.extend([]) - return val - except AttributeError: - pass - # might be a string - try: - return re.split(r'\s*,\s*', val) - except TypeError: - # who knows... - return list(val) - - -class odict(dict): - """Simple ordered dict implementation, based on: - - http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747 - """ - def __init__(self, *arg, **kw): - self._keys = [] - super(odict, self).__init__(*arg, **kw) - - def __delitem__(self, key): - super(odict, self).__delitem__(key) - self._keys.remove(key) - - def __setitem__(self, key, item): - super(odict, self).__setitem__(key, item) - if key not in self._keys: - self._keys.append(key) - - def __str__(self): - return "{%s}" % ', '.join(["%r: %r" % (k, v) for k, v in self.items()]) - - def clear(self): - super(odict, self).clear() - self._keys = [] - - def copy(self): - d = super(odict, self).copy() - d._keys = self._keys[:] - return d - - def items(self): - return zip(self._keys, self.values()) - - def keys(self): - return self._keys[:] - - def setdefault(self, key, failobj=None): - item = super(odict, self).setdefault(key, failobj) - if key not in self._keys: - self._keys.append(key) - return item - - def update(self, dict): - super(odict, self).update(dict) - for key in dict.keys(): - if key not in self._keys: - self._keys.append(key) - - def values(self): - return map(self.get, self._keys) - - -def transplant_func(func, module): - """ - Make a function imported from module A appear as if it is located - in module B. - - >>> from pprint import pprint - >>> pprint.__module__ - 'pprint' - >>> pp = transplant_func(pprint, __name__) - >>> pp.__module__ - 'nose.util' - - The original function is not modified. - - >>> pprint.__module__ - 'pprint' - - Calling the transplanted function calls the original. - - >>> pp([1, 2]) - [1, 2] - >>> pprint([1,2]) - [1, 2] - - """ - from nose.tools import make_decorator - if isgenerator(func): - def newfunc(*arg, **kw): - for v in func(*arg, **kw): - yield v - else: - def newfunc(*arg, **kw): - return func(*arg, **kw) - - newfunc = make_decorator(func)(newfunc) - newfunc.__module__ = module - return newfunc - - -def transplant_class(cls, module): - """ - Make a class appear to reside in `module`, rather than the module in which - it is actually defined. - - >>> from nose.failure import Failure - >>> Failure.__module__ - 'nose.failure' - >>> Nf = transplant_class(Failure, __name__) - >>> Nf.__module__ - 'nose.util' - >>> Nf.__name__ - 'Failure' - - """ - class C(cls): - pass - C.__module__ = module - C.__name__ = cls.__name__ - return C - - -def safe_str(val, encoding='utf-8'): - try: - return str(val) - except UnicodeEncodeError: - if isinstance(val, Exception): - return ' '.join([safe_str(arg, encoding) - for arg in val]) - return unicode(val).encode(encoding) - - -def is_executable(file): - if not os.path.exists(file): - return False - st = os.stat(file) - return bool(st.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)) - - -if __name__ == '__main__': - import doctest - doctest.testmod() diff --git a/lib/spack/external/pyqver2.py b/lib/spack/external/pyqver2.py index 4690239748..571e005524 100755 --- a/lib/spack/external/pyqver2.py +++ b/lib/spack/external/pyqver2.py @@ -57,7 +57,11 @@ StandardModules = { "hmac": (2, 2), "hotshot": (2, 2), "HTMLParser": (2, 2), - "importlib": (2, 7), +# skip importlib until we can conditionally skip for pytest. +# pytest tries to import this and catches the exception, but +# the test will still fail. +# TODO: can we excelude with a comment like '# flake: noqa?' +# "importlib": (2, 7), "inspect": (2, 1), "io": (2, 6), "itertools": (2, 3), diff --git a/lib/spack/external/pytest.py b/lib/spack/external/pytest.py new file mode 100644 index 0000000000..e376e417e8 --- /dev/null +++ b/lib/spack/external/pytest.py @@ -0,0 +1,28 @@ +# PYTHON_ARGCOMPLETE_OK +""" +pytest: unit and functional testing with Python. +""" +__all__ = [ + 'main', + 'UsageError', + 'cmdline', + 'hookspec', + 'hookimpl', + '__version__', +] + +if __name__ == '__main__': # if run as a script or by 'python -m pytest' + # we trigger the below "else" condition by the following import + import pytest + raise SystemExit(pytest.main()) + +# else we are imported + +from _pytest.config import ( + main, UsageError, _preloadplugins, cmdline, + hookspec, hookimpl +) +from _pytest import __version__ + +_preloadplugins() # to populate pytest.* namespace so help(pytest) works + diff --git a/lib/spack/llnl/util/lang.py b/lib/spack/llnl/util/lang.py index 637d18cd63..331cf2b3c5 100644 --- a/lib/spack/llnl/util/lang.py +++ b/lib/spack/llnl/util/lang.py @@ -362,3 +362,15 @@ class RequiredAttributeError(ValueError): def __init__(self, message): super(RequiredAttributeError, self).__init__(message) + + +def duplicate_stream(original): + """Duplicates a stream at the os level. + + :param stream original: original stream to be duplicated. Must have a + `fileno` callable attribute. + + :return: duplicate of the original stream + :rtype: file like object + """ + return os.fdopen(os.dup(original.fileno())) diff --git a/lib/spack/llnl/util/tty/log.py b/lib/spack/llnl/util/tty/log.py index 3d4972b3ae..b1d45214ab 100644 --- a/lib/spack/llnl/util/tty/log.py +++ b/lib/spack/llnl/util/tty/log.py @@ -30,6 +30,7 @@ import re import select import sys +import llnl.util.lang as lang import llnl.util.tty as tty import llnl.util.tty.color as color @@ -147,9 +148,7 @@ class log_output(object): def __enter__(self): # Sets a daemon that writes to file what it reads from a pipe try: - fwd_input_stream = os.fdopen( - os.dup(self.input_stream.fileno()) - ) + fwd_input_stream = lang.duplicate_stream(self.input_stream) self.p = multiprocessing.Process( target=self._spawn_writing_daemon, args=(self.read, fwd_input_stream), diff --git a/lib/spack/spack/__init__.py b/lib/spack/spack/__init__.py index 0646f5cb32..fcf140617e 100644 --- a/lib/spack/spack/__init__.py +++ b/lib/spack/spack/__init__.py @@ -41,6 +41,7 @@ spack_file = join_path(spack_root, "bin", "spack") # spack directory hierarchy lib_path = join_path(spack_root, "lib", "spack") +external_path = join_path(lib_path, "external") build_env_path = join_path(lib_path, "env") module_path = join_path(lib_path, "spack") platform_path = join_path(module_path, 'platforms') @@ -196,3 +197,8 @@ from spack.package import \ __all__ += [ 'install_dependency_symlinks', 'flatten_dependencies', 'DependencyConflictError', 'InstallError', 'ExternalPackageError'] + +# Add default values for attributes that would otherwise be modified from +# Spack main script +debug = True +spack_working_dir = None diff --git a/lib/spack/spack/build_environment.py b/lib/spack/spack/build_environment.py index 501ace29b1..d83288860b 100644 --- a/lib/spack/spack/build_environment.py +++ b/lib/spack/spack/build_environment.py @@ -51,13 +51,14 @@ There are two parts to the build environment: Skimming this module is a nice way to get acquainted with the types of calls you can make from within the install() function. """ +import inspect +import multiprocessing import os +import shutil import sys -import multiprocessing import traceback -import inspect -import shutil +import llnl.util.lang as lang import llnl.util.tty as tty import spack import spack.store @@ -579,7 +580,7 @@ def fork(pkg, function, dirty=False): try: # Forward sys.stdin to be able to activate / deactivate # verbosity pressing a key at run-time - input_stream = os.fdopen(os.dup(sys.stdin.fileno())) + input_stream = lang.duplicate_stream(sys.stdin) p = multiprocessing.Process( target=child_execution, args=(child_connection, input_stream) diff --git a/lib/spack/spack/cmd/__init__.py b/lib/spack/spack/cmd/__init__.py index e712ba8e1d..bcc4524b4f 100644 --- a/lib/spack/spack/cmd/__init__.py +++ b/lib/spack/spack/cmd/__init__.py @@ -61,9 +61,19 @@ for file in os.listdir(command_path): if file.endswith(".py") and not re.search(ignore_files, file): cmd = re.sub(r'.py$', '', file) commands.append(cmd) +commands.append('test') commands.sort() +def remove_options(parser, *options): + """Remove some options from a parser.""" + for option in options: + for action in parser._actions: + if vars(action)['option_strings'][0] == option: + parser._handle_conflict_resolve(None, [(option, action)]) + break + + def get_cmd_function_name(name): return name.replace("-", "_") diff --git a/lib/spack/spack/cmd/flake8.py b/lib/spack/spack/cmd/flake8.py index a4c607a640..b8e28b0860 100644 --- a/lib/spack/spack/cmd/flake8.py +++ b/lib/spack/spack/cmd/flake8.py @@ -35,17 +35,18 @@ import spack from spack.util.executable import * description = "Runs source code style checks on Spack. Requires flake8." - -changed_files_path = os.path.join(spack.share_path, 'qa', 'changed_files') -changed_files = Executable(changed_files_path) flake8 = None +include_untracked = True -# -# This is a dict that maps: -# filename pattern -> -# a flake8 exemption code -> -# list of patterns, for which matching lines should have codes applied. -# +"""List of directories to exclude from checks.""" +exclude_directories = [spack.external_path] + +""" +This is a dict that maps: + filename pattern -> + a flake8 exemption code -> + list of patterns, for which matching lines should have codes applied. +""" exemptions = { # exemptions applied only to package.py files. r'package.py$': { @@ -77,6 +78,37 @@ exemptions = dict((re.compile(file_pattern), for file_pattern, error_dict in exemptions.items()) +def changed_files(): + """Get list of changed files in the Spack repository.""" + + git = which('git', required=True) + + git_args = [ + # Add changed files committed since branching off of develop + ['diff', '--name-only', '--diff-filter=ACMR', 'develop'], + # Add changed files that have been staged but not yet committed + ['diff', '--name-only', '--diff-filter=ACMR', '--cached'], + # Add changed files that are unstaged + ['diff', '--name-only', '--diff-filter=ACMR']] + + # Add new files that are untracked + if include_untracked: + git_args.append(['ls-files', '--exclude-standard', '--other']) + + excludes = [os.path.realpath(f) for f in exclude_directories] + changed = set() + for git_arg_list in git_args: + arg_list = git_arg_list + ['--', '*.py'] + + files = [f for f in git(*arg_list, output=str).split('\n') if f] + for f in files: + # don't look at files that are in the exclude locations + if any(os.path.realpath(f).startswith(e) for e in excludes): + continue + changed.add(f) + return sorted(changed) + + def filter_file(source, dest, output=False): """Filter a single file through all the patterns in exemptions.""" with open(source) as infile: @@ -114,14 +146,18 @@ def setup_parser(subparser): subparser.add_argument( '-r', '--root-relative', action='store_true', default=False, help="print root-relative paths (default is cwd-relative)") + subparser.add_argument( + '-U', '--no-untracked', dest='untracked', action='store_false', + default=True, help="Exclude untracked files from checks.") subparser.add_argument( 'files', nargs=argparse.REMAINDER, help="specific files to check") def flake8(parser, args): # Just use this to check for flake8 -- we actually execute it with Popen. - global flake8 + global flake8, include_untracked flake8 = which('flake8', required=True) + include_untracked = args.untracked temp = tempfile.mkdtemp() try: @@ -135,9 +171,7 @@ def flake8(parser, args): with working_dir(spack.prefix): if not file_list: - file_list = changed_files('*.py', output=str) - file_list = [x for x in file_list.split('\n') if x] - + file_list = changed_files() shutil.copy('.flake8', os.path.join(temp, '.flake8')) print '=======================================================' diff --git a/lib/spack/spack/cmd/test.py b/lib/spack/spack/cmd/test.py index 52c2a06778..2e0ab8b49e 100644 --- a/lib/spack/spack/cmd/test.py +++ b/lib/spack/spack/cmd/test.py @@ -22,71 +22,86 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## +import sys import os +import re +import argparse +import pytest +from StringIO import StringIO -from llnl.util.filesystem import join_path, mkdirp +from llnl.util.filesystem import * from llnl.util.tty.colify import colify import spack -import spack.test -from spack.fetch_strategy import FetchError -description = "Run unit tests" +description = "A thin wrapper around the pytest command." def setup_parser(subparser): subparser.add_argument( - 'names', nargs='*', help="Names of tests to run.") + '-H', '--pytest-help', action='store_true', default=False, + help="print full pytest help message, showing advanced options.") + + list_group = subparser.add_mutually_exclusive_group() + list_group.add_argument( + '-l', '--list', action='store_true', default=False, + help="list basic test names.") + list_group.add_argument( + '-L', '--long-list', action='store_true', default=False, + help="list the entire hierarchy of tests.") subparser.add_argument( - '-l', '--list', action='store_true', dest='list', - help="Show available tests") - subparser.add_argument( - '--createXmlOutput', action='store_true', dest='createXmlOutput', - help="Create JUnit XML from test results") - subparser.add_argument( - '--xmlOutputDir', dest='xmlOutputDir', - help="Nose creates XML files in this directory") - subparser.add_argument( - '-v', '--verbose', action='store_true', dest='verbose', - help="verbose output") - - -class MockCache(object): - - def store(self, copyCmd, relativeDst): - pass - - def fetcher(self, targetPath, digest, **kwargs): - return MockCacheFetcher() - - -class MockCacheFetcher(object): - - def set_stage(self, stage): - pass - - def fetch(self): - raise FetchError("Mock cache always fails for tests") - - def __str__(self): - return "[mock fetcher]" - + 'tests', nargs=argparse.REMAINDER, + help="list of tests to run (will be passed to pytest -k).") + + +def do_list(args, unknown_args): + """Print a lists of tests than what pytest offers.""" + # Run test collection and get the tree out. + old_output = sys.stdout + try: + sys.stdout = output = StringIO() + pytest.main(['--collect-only']) + finally: + sys.stdout = old_output + + # put the output in a more readable tree format. + lines = output.getvalue().split('\n') + output_lines = [] + for line in lines: + match = re.match(r"(\s*)<([^ ]*) '([^']*)'", line) + if not match: + continue + indent, nodetype, name = match.groups() + + # only print top-level for short list + if args.list: + if not indent: + output_lines.append( + os.path.basename(name).replace('.py', '')) + else: + print indent + name -def test(parser, args): if args.list: - print "Available tests:" - colify(spack.test.list_tests(), indent=2) - - else: - if not args.createXmlOutput: - outputDir = None + colify(output_lines) + + +def test(parser, args, unknown_args): + if args.pytest_help: + # make the pytest.main help output more accurate + sys.argv[0] = 'spack test' + pytest.main(['-h']) + return + + # pytest.ini lives in the root of the sapck repository. + with working_dir(spack.prefix): + # --list and --long-list print the test output better. + if args.list or args.long_list: + do_list(args, unknown_args) + return + + if args.tests and not any(arg.startswith('-') for arg in args.tests): + # Allow keyword search without -k if no options are specified + return pytest.main(['-k'] + args.tests) else: - if not args.xmlOutputDir: - outputDir = join_path(os.getcwd(), "test-output") - else: - outputDir = os.path.abspath(args.xmlOutputDir) - - if not os.path.exists(outputDir): - mkdirp(outputDir) - spack.fetch_cache = MockCache() - spack.test.run(args.names, outputDir, args.verbose) + # Just run the pytest command. + return pytest.main(unknown_args + args.tests) diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py index 94b79accdb..d77700c01f 100644 --- a/lib/spack/spack/repository.py +++ b/lib/spack/spack/repository.py @@ -133,7 +133,7 @@ class RepoPath(object): " spack repo rm %s" % root) def swap(self, other): - """Convenience function to make swapping repostiories easier. + """Convenience function to make swapping repositories easier. This is currently used by mock tests. TODO: Maybe there is a cleaner way. diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py index 79122cc1de..ed1ec23bca 100644 --- a/lib/spack/spack/test/__init__.py +++ b/lib/spack/spack/test/__init__.py @@ -22,132 +22,3 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import sys -import os - -import llnl.util.tty as tty -import nose -import spack -import spack.architecture -from llnl.util.filesystem import join_path -from llnl.util.tty.colify import colify -from spack.test.tally_plugin import Tally -from spack.platforms.test import Test as TestPlatform -"""Names of tests to be included in Spack's test suite""" - -# All the tests Spack knows about. -# Keep these one per line so that it's easy to see changes in diffs. -test_names = [ - 'architecture', - 'build_system_guess', - 'cc', - 'cmd.find', - 'cmd.module', - 'cmd.install', - 'cmd.uninstall', - 'concretize', - 'concretize_preferences', - 'config', - 'database', - 'directory_layout', - 'environment', - 'file_cache', - 'git_fetch', - 'hg_fetch', - 'install', - 'library_list', - 'link_tree', - 'lock', - 'make_executable', - 'mirror', - 'modules', - 'multimethod', - 'namespace_trie', - 'optional_deps', - 'package_sanity', - 'packages', - 'pattern', - 'python_version', - 'sbang', - 'spec_dag', - 'spec_semantics', - 'spec_syntax', - 'spec_yaml', - 'stage', - 'svn_fetch', - 'url_extrapolate', - 'url_parse', - 'url_substitution', - 'versions', - 'provider_index', - 'spack_yaml', - # This test needs to be last until global compiler cache is fixed. - 'cmd.test_compiler_cmd', -] - - -def setup_tests(): - """Prepare the environment for the Spack tests to be run.""" - test_platform = TestPlatform() - spack.architecture.real_platform = spack.architecture.platform - spack.architecture.platform = lambda: test_platform - - -def list_tests(): - """Return names of all tests that can be run for Spack.""" - return test_names - - -def run(names, outputDir, verbose=False): - """Run tests with the supplied names. Names should be a list. If - it's empty, run ALL of Spack's tests.""" - # Print output to stdout if verbose is 1. - if verbose: - os.environ['NOSE_NOCAPTURE'] = '1' - - if not names: - names = test_names - else: - for test in names: - if test not in test_names: - tty.error("%s is not a valid spack test name." % test, - "Valid names are:") - colify(sorted(test_names), indent=4) - sys.exit(1) - - tally = Tally() - - modules = ['spack.test.' + test for test in names] - runOpts = ["--with-%s" % spack.test.tally_plugin.Tally.name] - - if outputDir: - xmlOutputFname = "unittests-{0}.xml".format(test) - xmlOutputPath = join_path(outputDir, xmlOutputFname) - runOpts += ["--with-xunit", - "--xunit-file={0}".format(xmlOutputPath)] - argv = [""] + runOpts + modules - - setup_tests() - nose.run(argv=argv, addplugins=[tally]) - - succeeded = not tally.failCount and not tally.errorCount - tty.msg( - "Tests Complete.", - "%5d tests run" % tally.numberOfTestsRun, - "%5d failures" % tally.failCount, - "%5d errors" % tally.errorCount - ) - - if tally.fail_list: - items = [x for x in tally.fail_list] - tty.msg('List of failing tests:', *items) - - if tally.error_list: - items = [x for x in tally.error_list] - tty.msg('List of tests with errors:', *items) - - if succeeded: - tty.info("OK", format='g') - else: - tty.info("FAIL", format='r') - sys.exit(1) diff --git a/lib/spack/spack/test/architecture.py b/lib/spack/spack/test/architecture.py index 0ce583c6ea..fb4113361c 100644 --- a/lib/spack/spack/test/architecture.py +++ b/lib/spack/spack/test/architecture.py @@ -30,48 +30,33 @@ import os import platform as py_platform import spack import spack.architecture -from spack.spec import * +from spack.spec import Spec from spack.platforms.cray import Cray from spack.platforms.linux import Linux from spack.platforms.bgq import Bgq from spack.platforms.darwin import Darwin -from spack.test.mock_packages_test import * +def test_dict_functions_for_architecture(): + arch = spack.architecture.Arch() + arch.platform = spack.architecture.platform() + arch.platform_os = arch.platform.operating_system('default_os') + arch.target = arch.platform.target('default_target') -class ArchitectureTest(MockPackagesTest): + new_arch = spack.architecture.Arch.from_dict(arch.to_dict()) - def setUp(self): - super(ArchitectureTest, self).setUp() - self.platform = spack.architecture.platform() + assert arch == new_arch + assert isinstance(arch, spack.architecture.Arch) + assert isinstance(arch.platform, spack.architecture.Platform) + assert isinstance(arch.platform_os, spack.architecture.OperatingSystem) + assert isinstance(arch.target, spack.architecture.Target) + assert isinstance(new_arch, spack.architecture.Arch) + assert isinstance(new_arch.platform, spack.architecture.Platform) + assert isinstance(new_arch.platform_os, spack.architecture.OperatingSystem) + assert isinstance(new_arch.target, spack.architecture.Target) - def tearDown(self): - super(ArchitectureTest, self).tearDown() - def test_dict_functions_for_architecture(self): - arch = spack.architecture.Arch() - arch.platform = spack.architecture.platform() - arch.platform_os = arch.platform.operating_system('default_os') - arch.target = arch.platform.target('default_target') - - new_arch = spack.architecture.Arch.from_dict(arch.to_dict()) - self.assertEqual(arch, new_arch) - - self.assertTrue(isinstance(arch, spack.architecture.Arch)) - self.assertTrue(isinstance(arch.platform, spack.architecture.Platform)) - self.assertTrue(isinstance(arch.platform_os, - spack.architecture.OperatingSystem)) - self.assertTrue(isinstance(arch.target, - spack.architecture.Target)) - self.assertTrue(isinstance(new_arch, spack.architecture.Arch)) - self.assertTrue(isinstance(new_arch.platform, - spack.architecture.Platform)) - self.assertTrue(isinstance(new_arch.platform_os, - spack.architecture.OperatingSystem)) - self.assertTrue(isinstance(new_arch.target, - spack.architecture.Target)) - - def test_platform(self): +def test_platform(): output_platform_class = spack.architecture.real_platform() if os.path.exists('/opt/cray/craype'): my_platform_class = Cray() @@ -82,85 +67,95 @@ class ArchitectureTest(MockPackagesTest): elif 'Darwin' in py_platform.system(): my_platform_class = Darwin() - self.assertEqual(str(output_platform_class), str(my_platform_class)) - - def test_boolness(self): - # Make sure architecture reports that it's False when nothing's set. - arch = spack.architecture.Arch() - self.assertFalse(arch) - - # Dummy architecture parts - plat = spack.architecture.platform() - plat_os = plat.operating_system('default_os') - plat_target = plat.target('default_target') - - # Make sure architecture reports that it's True when anything is set. - arch = spack.architecture.Arch() - arch.platform = plat - self.assertTrue(arch) - - arch = spack.architecture.Arch() - arch.platform_os = plat_os - self.assertTrue(arch) - - arch = spack.architecture.Arch() - arch.target = plat_target - self.assertTrue(arch) - - def test_user_front_end_input(self): - """Test when user inputs just frontend that both the frontend target - and frontend operating system match - """ - frontend_os = str(self.platform.operating_system("frontend")) - frontend_target = str(self.platform.target("frontend")) - - frontend_spec = Spec("libelf os=frontend target=frontend") - frontend_spec.concretize() - - self.assertEqual(frontend_os, frontend_spec.architecture.platform_os) - self.assertEqual(frontend_target, frontend_spec.architecture.target) - - def test_user_back_end_input(self): - """Test when user inputs backend that both the backend target and - backend operating system match - """ - backend_os = str(self.platform.operating_system("backend")) - backend_target = str(self.platform.target("backend")) - - backend_spec = Spec("libelf os=backend target=backend") - backend_spec.concretize() - - self.assertEqual(backend_os, backend_spec.architecture.platform_os) - self.assertEqual(backend_target, backend_spec.architecture.target) - - def test_user_defaults(self): - default_os = str(self.platform.operating_system("default_os")) - default_target = str(self.platform.target("default_target")) - - default_spec = Spec("libelf") # default is no args - default_spec.concretize() - - self.assertEqual(default_os, default_spec.architecture.platform_os) - self.assertEqual(default_target, default_spec.architecture.target) - - def test_user_input_combination(self): - os_list = self.platform.operating_sys.keys() - target_list = self.platform.targets.keys() - additional = ["fe", "be", "frontend", "backend"] - - os_list.extend(additional) - target_list.extend(additional) - - combinations = itertools.product(os_list, target_list) - results = [] - for arch in combinations: - o, t = arch - spec = Spec("libelf os=%s target=%s" % (o, t)) - spec.concretize() - results.append(spec.architecture.platform_os == - str(self.platform.operating_system(o))) - results.append(spec.architecture.target == - str(self.platform.target(t))) - res = all(results) - - self.assertTrue(res) + assert str(output_platform_class) == str(my_platform_class) + + +def test_boolness(): + # Make sure architecture reports that it's False when nothing's set. + arch = spack.architecture.Arch() + assert not arch + + # Dummy architecture parts + plat = spack.architecture.platform() + plat_os = plat.operating_system('default_os') + plat_target = plat.target('default_target') + + # Make sure architecture reports that it's True when anything is set. + arch = spack.architecture.Arch() + arch.platform = plat + assert arch + + arch = spack.architecture.Arch() + arch.platform_os = plat_os + assert arch + + arch = spack.architecture.Arch() + arch.target = plat_target + assert arch + + +def test_user_front_end_input(config): + """Test when user inputs just frontend that both the frontend target + and frontend operating system match + """ + platform = spack.architecture.platform() + frontend_os = str(platform.operating_system('frontend')) + frontend_target = str(platform.target('frontend')) + + frontend_spec = Spec('libelf os=frontend target=frontend') + frontend_spec.concretize() + + assert frontend_os == frontend_spec.architecture.platform_os + assert frontend_target == frontend_spec.architecture.target + + +def test_user_back_end_input(config): + """Test when user inputs backend that both the backend target and + backend operating system match + """ + platform = spack.architecture.platform() + backend_os = str(platform.operating_system("backend")) + backend_target = str(platform.target("backend")) + + backend_spec = Spec("libelf os=backend target=backend") + backend_spec.concretize() + + assert backend_os == backend_spec.architecture.platform_os + assert backend_target == backend_spec.architecture.target + + +def test_user_defaults(config): + platform = spack.architecture.platform() + default_os = str(platform.operating_system("default_os")) + default_target = str(platform.target("default_target")) + + default_spec = Spec("libelf") # default is no args + default_spec.concretize() + + assert default_os == default_spec.architecture.platform_os + assert default_target == default_spec.architecture.target + + +def test_user_input_combination(config): + platform = spack.architecture.platform() + os_list = platform.operating_sys.keys() + target_list = platform.targets.keys() + additional = ["fe", "be", "frontend", "backend"] + + os_list.extend(additional) + target_list.extend(additional) + + combinations = itertools.product(os_list, target_list) + results = [] + for arch in combinations: + o, t = arch + spec = Spec("libelf os=%s target=%s" % (o, t)) + spec.concretize() + results.append( + spec.architecture.platform_os == str(platform.operating_system(o)) + ) + results.append( + spec.architecture.target == str(platform.target(t)) + ) + res = all(results) + assert res diff --git a/lib/spack/spack/test/build_system_guess.py b/lib/spack/spack/test/build_system_guess.py index e728a47cf4..97a9d67b47 100644 --- a/lib/spack/spack/test/build_system_guess.py +++ b/lib/spack/spack/test/build_system_guess.py @@ -22,60 +22,43 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import os -import shutil -import tempfile -import unittest -from llnl.util.filesystem import * -from spack.cmd.create import BuildSystemGuesser -from spack.stage import Stage -from spack.test.mock_packages_test import * -from spack.util.executable import which +import pytest +import spack.cmd.create +import spack.util.executable +import spack.stage -class InstallTest(unittest.TestCase): - """Tests the build system guesser in spack create""" +@pytest.fixture( + scope='function', + params=[ + ('configure', 'autotools'), + ('CMakeLists.txt', 'cmake'), + ('SConstruct', 'scons'), + ('setup.py', 'python'), + ('NAMESPACE', 'R'), + ('foobar', 'unknown') + ] +) +def url_and_build_system(request, tmpdir): + """Sets up the resources to be pulled by the stage with + the appropriate file name and returns their url along with + the correct build-system guess + """ + tar = spack.util.executable.which('tar') + orig_dir = tmpdir.chdir() + filename, system = request.param + tmpdir.ensure('archive', filename) + tar('czf', 'archive.tar.gz', 'archive') + url = 'file://' + str(tmpdir.join('archive.tar.gz')) + yield url, system + orig_dir.chdir() - def setUp(self): - self.tar = which('tar') - self.tmpdir = tempfile.mkdtemp() - self.orig_dir = os.getcwd() - os.chdir(self.tmpdir) - self.stage = None - def tearDown(self): - shutil.rmtree(self.tmpdir, ignore_errors=True) - os.chdir(self.orig_dir) - - def check_archive(self, filename, system): - mkdirp('archive') - touch(join_path('archive', filename)) - self.tar('czf', 'archive.tar.gz', 'archive') - - url = 'file://' + join_path(os.getcwd(), 'archive.tar.gz') - print url - with Stage(url) as stage: - stage.fetch() - - guesser = BuildSystemGuesser() - guesser(stage, url) - self.assertEqual(system, guesser.build_system) - - def test_autotools(self): - self.check_archive('configure', 'autotools') - - def test_cmake(self): - self.check_archive('CMakeLists.txt', 'cmake') - - def test_scons(self): - self.check_archive('SConstruct', 'scons') - - def test_python(self): - self.check_archive('setup.py', 'python') - - def test_R(self): - self.check_archive('NAMESPACE', 'R') - - def test_unknown(self): - self.check_archive('foobar', 'unknown') +def test_build_systems(url_and_build_system): + url, build_system = url_and_build_system + with spack.stage.Stage(url) as stage: + stage.fetch() + guesser = spack.cmd.create.BuildSystemGuesser() + guesser(stage, url) + assert build_system == guesser.build_system diff --git a/lib/spack/spack/test/cmd/find.py b/lib/spack/spack/test/cmd/find.py index 4788da8ec6..dcd123d46e 100644 --- a/lib/spack/spack/test/cmd/find.py +++ b/lib/spack/spack/test/cmd/find.py @@ -22,33 +22,32 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## - - import spack.cmd.find -import unittest - from spack.util.pattern import Bunch -class FindTest(unittest.TestCase): - - def test_query_arguments(self): - query_arguments = spack.cmd.find.query_arguments - # Default arguments - args = Bunch(only_missing=False, missing=False, - unknown=False, explicit=False, implicit=False) - q_args = query_arguments(args) - self.assertTrue('installed' in q_args) - self.assertTrue('known' in q_args) - self.assertTrue('explicit' in q_args) - self.assertEqual(q_args['installed'], True) - self.assertEqual(q_args['known'], any) - self.assertEqual(q_args['explicit'], any) - # Check that explicit works correctly - args.explicit = True - q_args = query_arguments(args) - self.assertEqual(q_args['explicit'], True) - args.explicit = False - args.implicit = True - q_args = query_arguments(args) - self.assertEqual(q_args['explicit'], False) +def test_query_arguments(): + query_arguments = spack.cmd.find.query_arguments + # Default arguments + args = Bunch( + only_missing=False, + missing=False, + unknown=False, + explicit=False, + implicit=False + ) + q_args = query_arguments(args) + assert 'installed' in q_args + assert 'known' in q_args + assert 'explicit' in q_args + assert q_args['installed'] is True + assert q_args['known'] is any + assert q_args['explicit'] is any + # Check that explicit works correctly + args.explicit = True + q_args = query_arguments(args) + assert q_args['explicit'] is True + args.explicit = False + args.implicit = True + q_args = query_arguments(args) + assert q_args['explicit'] is False diff --git a/lib/spack/spack/test/cmd/module.py b/lib/spack/spack/test/cmd/module.py index 39f9c5649f..03ce1ef206 100644 --- a/lib/spack/spack/test/cmd/module.py +++ b/lib/spack/spack/test/cmd/module.py @@ -25,67 +25,82 @@ import argparse import os.path +import pytest import spack.cmd.module as module import spack.modules as modules -import spack.test.mock_database -class TestModule(spack.test.mock_database.MockDatabase): +def _get_module_files(args): + return [modules.module_types[args.module_type](spec).file_name + for spec in args.specs()] - def _get_module_files(self, args): - return [modules.module_types[args.module_type](spec).file_name - for spec in args.specs()] - def test_module_common_operations(self): - parser = argparse.ArgumentParser() - module.setup_parser(parser) +@pytest.fixture(scope='module') +def parser(): + """Returns the parser for the module command""" + parser = argparse.ArgumentParser() + module.setup_parser(parser) + return parser - # Try to remove a non existing module [tcl] - args = parser.parse_args(['rm', 'doesnotexist']) - self.assertRaises(SystemExit, module.module, parser, args) - # Remove existing modules [tcl] - args = parser.parse_args(['rm', '-y', 'mpileaks']) - module_files = self._get_module_files(args) - for item in module_files: - self.assertTrue(os.path.exists(item)) - module.module(parser, args) - for item in module_files: - self.assertFalse(os.path.exists(item)) +@pytest.fixture( + params=[ + ['rm', 'doesnotexist'], # Try to remove a non existing module [tcl] + ['find', 'mpileaks'], # Try to find a module with multiple matches + ['find', 'doesnotexist'], # Try to find a module with no matches + ] +) +def failure_args(request): + """A list of arguments that will cause a failure""" + return request.param + + +# TODO : test the --delete-tree option +# TODO : this requires having a separate directory for test modules +# TODO : add tests for loads and find to check the prompt format - # Add them back [tcl] - args = parser.parse_args(['refresh', '-y', 'mpileaks']) + +def test_exit_with_failure(database, parser, failure_args): + args = parser.parse_args(failure_args) + with pytest.raises(SystemExit): module.module(parser, args) - for item in module_files: - self.assertTrue(os.path.exists(item)) - # TODO : test the --delete-tree option - # TODO : this requires having a separate directory for test modules - # Try to find a module with multiple matches - args = parser.parse_args(['find', 'mpileaks']) - self.assertRaises(SystemExit, module.module, parser, args) +def test_remove_and_add_tcl(database, parser): + # Remove existing modules [tcl] + args = parser.parse_args(['rm', '-y', 'mpileaks']) + module_files = _get_module_files(args) + for item in module_files: + assert os.path.exists(item) + module.module(parser, args) + for item in module_files: + assert not os.path.exists(item) - # Try to find a module with no matches - args = parser.parse_args(['find', 'doesnotexist']) - self.assertRaises(SystemExit, module.module, parser, args) + # Add them back [tcl] + args = parser.parse_args(['refresh', '-y', 'mpileaks']) + module.module(parser, args) + for item in module_files: + assert os.path.exists(item) - # Try to find a module - args = parser.parse_args(['find', 'libelf']) - module.module(parser, args) - # Remove existing modules [dotkit] - args = parser.parse_args(['rm', '-y', '-m', 'dotkit', 'mpileaks']) - module_files = self._get_module_files(args) - for item in module_files: - self.assertTrue(os.path.exists(item)) - module.module(parser, args) - for item in module_files: - self.assertFalse(os.path.exists(item)) +def test_find(database, parser): + # Try to find a module + args = parser.parse_args(['find', 'libelf']) + module.module(parser, args) - # Add them back [dotkit] - args = parser.parse_args(['refresh', '-y', '-m', 'dotkit', 'mpileaks']) - module.module(parser, args) - for item in module_files: - self.assertTrue(os.path.exists(item)) - # TODO : add tests for loads and find to check the prompt format + +def test_remove_and_add_dotkit(database, parser): + # Remove existing modules [dotkit] + args = parser.parse_args(['rm', '-y', '-m', 'dotkit', 'mpileaks']) + module_files = _get_module_files(args) + for item in module_files: + assert os.path.exists(item) + module.module(parser, args) + for item in module_files: + assert not os.path.exists(item) + + # Add them back [dotkit] + args = parser.parse_args(['refresh', '-y', '-m', 'dotkit', 'mpileaks']) + module.module(parser, args) + for item in module_files: + assert os.path.exists(item) diff --git a/lib/spack/spack/test/cmd/test_compiler_cmd.py b/lib/spack/spack/test/cmd/test_compiler_cmd.py index f6e7cdeb64..647404e6da 100644 --- a/lib/spack/spack/test/cmd/test_compiler_cmd.py +++ b/lib/spack/spack/test/cmd/test_compiler_cmd.py @@ -22,42 +22,30 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import os -import shutil -from tempfile import mkdtemp +import pytest +import llnl.util.filesystem -from llnl.util.filesystem import set_executable, mkdirp - -import spack.spec import spack.cmd.compiler import spack.compilers +import spack.spec +import spack.util.pattern from spack.version import Version -from spack.test.mock_packages_test import * test_version = '4.5-spacktest' -class MockArgs(object): - - def __init__(self, add_paths=[], scope=None, compiler_spec=None, all=None): - self.add_paths = add_paths - self.scope = scope - self.compiler_spec = compiler_spec - self.all = all +@pytest.fixture() +def mock_compiler_dir(tmpdir): + """Return a directory containing a fake, but detectable compiler.""" + tmpdir.ensure('bin', dir=True) + bin_dir = tmpdir.join('bin') -def make_mock_compiler(): - """Make a directory containing a fake, but detectable compiler.""" - mock_compiler_dir = mkdtemp() - bin_dir = os.path.join(mock_compiler_dir, 'bin') - mkdirp(bin_dir) + gcc_path = bin_dir.join('gcc') + gxx_path = bin_dir.join('g++') + gfortran_path = bin_dir.join('gfortran') - gcc_path = os.path.join(bin_dir, 'gcc') - gxx_path = os.path.join(bin_dir, 'g++') - gfortran_path = os.path.join(bin_dir, 'gfortran') - - with open(gcc_path, 'w') as f: - f.write("""\ + gcc_path.write("""\ #!/bin/sh for arg in "$@"; do @@ -68,39 +56,39 @@ done """ % test_version) # Create some mock compilers in the temporary directory - set_executable(gcc_path) - shutil.copy(gcc_path, gxx_path) - shutil.copy(gcc_path, gfortran_path) + llnl.util.filesystem.set_executable(str(gcc_path)) + gcc_path.copy(gxx_path, mode=True) + gcc_path.copy(gfortran_path, mode=True) - return mock_compiler_dir + return str(tmpdir) -class CompilerCmdTest(MockPackagesTest): - """ Test compiler commands for add and remove """ +@pytest.mark.usefixtures('config', 'builtin_mock') +class TestCompilerCommand(object): def test_compiler_remove(self): - args = MockArgs(all=True, compiler_spec='gcc@4.5.0') + args = spack.util.pattern.Bunch( + all=True, compiler_spec='gcc@4.5.0', add_paths=[], scope=None + ) spack.cmd.compiler.compiler_remove(args) compilers = spack.compilers.all_compilers() - self.assertTrue(spack.spec.CompilerSpec("gcc@4.5.0") not in compilers) + assert spack.spec.CompilerSpec("gcc@4.5.0") not in compilers - def test_compiler_add(self): - # compilers available by default. + def test_compiler_add(self, mock_compiler_dir): + # Compilers available by default. old_compilers = set(spack.compilers.all_compilers()) - # add our new compiler and find again. - compiler_dir = make_mock_compiler() - - try: - args = MockArgs(add_paths=[compiler_dir]) - spack.cmd.compiler.compiler_find(args) - - # ensure new compiler is in there - new_compilers = set(spack.compilers.all_compilers()) - new_compiler = new_compilers - old_compilers - self.assertTrue(new_compiler) - self.assertTrue(new_compiler.pop().version == - Version(test_version)) - - finally: - shutil.rmtree(compiler_dir, ignore_errors=True) + args = spack.util.pattern.Bunch( + all=None, + compiler_spec=None, + add_paths=[mock_compiler_dir], + scope=None + ) + spack.cmd.compiler.compiler_find(args) + + # Ensure new compiler is in there + new_compilers = set(spack.compilers.all_compilers()) + new_compiler = new_compilers - old_compilers + assert new_compiler + c = new_compiler.pop() + assert c.version == Version(test_version) diff --git a/lib/spack/spack/test/cmd/uninstall.py b/lib/spack/spack/test/cmd/uninstall.py index 6a86a1543f..bfbb9b8148 100644 --- a/lib/spack/spack/test/cmd/uninstall.py +++ b/lib/spack/spack/test/cmd/uninstall.py @@ -22,9 +22,9 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import spack.test.mock_database +import pytest import spack.store -from spack.cmd.uninstall import uninstall +import spack.cmd.uninstall class MockArgs(object): @@ -37,27 +37,28 @@ class MockArgs(object): self.yes_to_all = True -class TestUninstall(spack.test.mock_database.MockDatabase): - - def test_uninstall(self): - parser = None - # Multiple matches - args = MockArgs(['mpileaks']) - self.assertRaises(SystemExit, uninstall, parser, args) - # Installed dependents - args = MockArgs(['libelf']) - self.assertRaises(SystemExit, uninstall, parser, args) - # Recursive uninstall - args = MockArgs(['callpath'], all=True, dependents=True) +def test_uninstall(database): + parser = None + uninstall = spack.cmd.uninstall.uninstall + # Multiple matches + args = MockArgs(['mpileaks']) + with pytest.raises(SystemExit): + uninstall(parser, args) + # Installed dependents + args = MockArgs(['libelf']) + with pytest.raises(SystemExit): uninstall(parser, args) + # Recursive uninstall + args = MockArgs(['callpath'], all=True, dependents=True) + uninstall(parser, args) - all_specs = spack.store.layout.all_specs() - self.assertEqual(len(all_specs), 7) - # query specs with multiple configurations - mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')] - callpath_specs = [s for s in all_specs if s.satisfies('callpath')] - mpi_specs = [s for s in all_specs if s.satisfies('mpi')] + all_specs = spack.store.layout.all_specs() + assert len(all_specs) == 7 + # query specs with multiple configurations + mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')] + callpath_specs = [s for s in all_specs if s.satisfies('callpath')] + mpi_specs = [s for s in all_specs if s.satisfies('mpi')] - self.assertEqual(len(mpileaks_specs), 0) - self.assertEqual(len(callpath_specs), 0) - self.assertEqual(len(mpi_specs), 3) + assert len(mpileaks_specs) == 0 + assert len(callpath_specs) == 0 + assert len(mpi_specs) == 3 diff --git a/lib/spack/spack/test/concretize.py b/lib/spack/spack/test/concretize.py index 42ae9aa18e..1f8eeaa29e 100644 --- a/lib/spack/spack/test/concretize.py +++ b/lib/spack/spack/test/concretize.py @@ -22,160 +22,152 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## +import pytest import spack import spack.architecture +from spack.concretize import find_spec from spack.spec import Spec, CompilerSpec from spack.version import ver -from spack.concretize import find_spec -from spack.test.mock_packages_test import * - - -class ConcretizeTest(MockPackagesTest): - - def check_spec(self, abstract, concrete): - if abstract.versions.concrete: - self.assertEqual(abstract.versions, concrete.versions) - - if abstract.variants: - for name in abstract.variants: - avariant = abstract.variants[name] - cvariant = concrete.variants[name] - self.assertEqual(avariant.value, cvariant.value) - - if abstract.compiler_flags: - for flag in abstract.compiler_flags: - aflag = abstract.compiler_flags[flag] - cflag = concrete.compiler_flags[flag] - self.assertTrue(set(aflag) <= set(cflag)) - - for name in abstract.package.variants: - self.assertTrue(name in concrete.variants) - for flag in concrete.compiler_flags.valid_compiler_flags(): - self.assertTrue(flag in concrete.compiler_flags) - if abstract.compiler and abstract.compiler.concrete: - self.assertEqual(abstract.compiler, concrete.compiler) - - if abstract.architecture and abstract.architecture.concrete: - self.assertEqual(abstract.architecture, concrete.architecture) - - def check_concretize(self, abstract_spec): - abstract = Spec(abstract_spec) - concrete = abstract.concretized() - - self.assertFalse(abstract.concrete) - self.assertTrue(concrete.concrete) - self.check_spec(abstract, concrete) - - return concrete - - def test_concretize_no_deps(self): - self.check_concretize('libelf') - self.check_concretize('libelf@0.8.13') - - def test_concretize_dag(self): - self.check_concretize('callpath') - self.check_concretize('mpileaks') - self.check_concretize('libelf') +def check_spec(abstract, concrete): + if abstract.versions.concrete: + assert abstract.versions == concrete.versions + + if abstract.variants: + for name in abstract.variants: + avariant = abstract.variants[name] + cvariant = concrete.variants[name] + assert avariant.value == cvariant.value + + if abstract.compiler_flags: + for flag in abstract.compiler_flags: + aflag = abstract.compiler_flags[flag] + cflag = concrete.compiler_flags[flag] + assert set(aflag) <= set(cflag) + + for name in abstract.package.variants: + assert name in concrete.variants + + for flag in concrete.compiler_flags.valid_compiler_flags(): + assert flag in concrete.compiler_flags + + if abstract.compiler and abstract.compiler.concrete: + assert abstract.compiler == concrete.compiler + + if abstract.architecture and abstract.architecture.concrete: + assert abstract.architecture == concrete.architecture + + +def check_concretize(abstract_spec): + abstract = Spec(abstract_spec) + concrete = abstract.concretized() + assert not abstract.concrete + assert concrete.concrete + check_spec(abstract, concrete) + return concrete + + +@pytest.fixture( + params=[ + # no_deps + 'libelf', 'libelf@0.8.13', + # dag + 'callpath', 'mpileaks', 'libelf', + # variant + 'mpich+debug', 'mpich~debug', 'mpich debug=2', 'mpich', + # compiler flags + 'mpich cppflags="-O3"', + # with virtual + 'mpileaks ^mpi', 'mpileaks ^mpi@:1.1', 'mpileaks ^mpi@2:', + 'mpileaks ^mpi@2.1', 'mpileaks ^mpi@2.2', 'mpileaks ^mpi@2.2', + 'mpileaks ^mpi@:1', 'mpileaks ^mpi@1.2:2' + ] +) +def spec(request): + """Spec to be concretized""" + return request.param + + +@pytest.mark.usefixtures('config', 'builtin_mock') +class TestConcretize(object): + def test_concretize(self, spec): + check_concretize(spec) def test_concretize_mention_build_dep(self): - spec = self.check_concretize('cmake-client ^cmake@3.4.3') - + spec = check_concretize('cmake-client ^cmake@3.4.3') # Check parent's perspective of child dependency = spec.dependencies_dict()['cmake'] - self.assertEqual(set(dependency.deptypes), set(['build'])) - + assert set(dependency.deptypes) == set(['build']) # Check child's perspective of parent cmake = spec['cmake'] dependent = cmake.dependents_dict()['cmake-client'] - self.assertEqual(set(dependent.deptypes), set(['build'])) - - def test_concretize_variant(self): - self.check_concretize('mpich+debug') - self.check_concretize('mpich~debug') - self.check_concretize('mpich debug=2') - self.check_concretize('mpich') - - def test_conretize_compiler_flags(self): - self.check_concretize('mpich cppflags="-O3"') + assert set(dependent.deptypes) == set(['build']) def test_concretize_preferred_version(self): - spec = self.check_concretize('python') - self.assertEqual(spec.versions, ver('2.7.11')) - - spec = self.check_concretize('python@3.5.1') - self.assertEqual(spec.versions, ver('3.5.1')) - - def test_concretize_with_virtual(self): - self.check_concretize('mpileaks ^mpi') - self.check_concretize('mpileaks ^mpi@:1.1') - self.check_concretize('mpileaks ^mpi@2:') - self.check_concretize('mpileaks ^mpi@2.1') - self.check_concretize('mpileaks ^mpi@2.2') - self.check_concretize('mpileaks ^mpi@2.2') - self.check_concretize('mpileaks ^mpi@:1') - self.check_concretize('mpileaks ^mpi@1.2:2') + spec = check_concretize('python') + assert spec.versions == ver('2.7.11') + spec = check_concretize('python@3.5.1') + assert spec.versions == ver('3.5.1') def test_concretize_with_restricted_virtual(self): - self.check_concretize('mpileaks ^mpich2') + check_concretize('mpileaks ^mpich2') - concrete = self.check_concretize('mpileaks ^mpich2@1.1') - self.assertTrue(concrete['mpich2'].satisfies('mpich2@1.1')) + concrete = check_concretize('mpileaks ^mpich2@1.1') + assert concrete['mpich2'].satisfies('mpich2@1.1') - concrete = self.check_concretize('mpileaks ^mpich2@1.2') - self.assertTrue(concrete['mpich2'].satisfies('mpich2@1.2')) + concrete = check_concretize('mpileaks ^mpich2@1.2') + assert concrete['mpich2'].satisfies('mpich2@1.2') - concrete = self.check_concretize('mpileaks ^mpich2@:1.5') - self.assertTrue(concrete['mpich2'].satisfies('mpich2@:1.5')) + concrete = check_concretize('mpileaks ^mpich2@:1.5') + assert concrete['mpich2'].satisfies('mpich2@:1.5') - concrete = self.check_concretize('mpileaks ^mpich2@:1.3') - self.assertTrue(concrete['mpich2'].satisfies('mpich2@:1.3')) + concrete = check_concretize('mpileaks ^mpich2@:1.3') + assert concrete['mpich2'].satisfies('mpich2@:1.3') - concrete = self.check_concretize('mpileaks ^mpich2@:1.2') - self.assertTrue(concrete['mpich2'].satisfies('mpich2@:1.2')) + concrete = check_concretize('mpileaks ^mpich2@:1.2') + assert concrete['mpich2'].satisfies('mpich2@:1.2') - concrete = self.check_concretize('mpileaks ^mpich2@:1.1') - self.assertTrue(concrete['mpich2'].satisfies('mpich2@:1.1')) + concrete = check_concretize('mpileaks ^mpich2@:1.1') + assert concrete['mpich2'].satisfies('mpich2@:1.1') - concrete = self.check_concretize('mpileaks ^mpich2@1.1:') - self.assertTrue(concrete['mpich2'].satisfies('mpich2@1.1:')) + concrete = check_concretize('mpileaks ^mpich2@1.1:') + assert concrete['mpich2'].satisfies('mpich2@1.1:') - concrete = self.check_concretize('mpileaks ^mpich2@1.5:') - self.assertTrue(concrete['mpich2'].satisfies('mpich2@1.5:')) + concrete = check_concretize('mpileaks ^mpich2@1.5:') + assert concrete['mpich2'].satisfies('mpich2@1.5:') - concrete = self.check_concretize('mpileaks ^mpich2@1.3.1:1.4') - self.assertTrue(concrete['mpich2'].satisfies('mpich2@1.3.1:1.4')) + concrete = check_concretize('mpileaks ^mpich2@1.3.1:1.4') + assert concrete['mpich2'].satisfies('mpich2@1.3.1:1.4') def test_concretize_with_provides_when(self): """Make sure insufficient versions of MPI are not in providers list when - we ask for some advanced version. + we ask for some advanced version. """ - self.assertTrue( - not any(spec.satisfies('mpich2@:1.0') - for spec in spack.repo.providers_for('mpi@2.1'))) - - self.assertTrue( - not any(spec.satisfies('mpich2@:1.1') - for spec in spack.repo.providers_for('mpi@2.2'))) - - self.assertTrue( - not any(spec.satisfies('mpich@:1') - for spec in spack.repo.providers_for('mpi@2'))) - - self.assertTrue( - not any(spec.satisfies('mpich@:1') - for spec in spack.repo.providers_for('mpi@3'))) - - self.assertTrue( - not any(spec.satisfies('mpich2') - for spec in spack.repo.providers_for('mpi@3'))) + repo = spack.repo + assert not any( + s.satisfies('mpich2@:1.0') for s in repo.providers_for('mpi@2.1') + ) + assert not any( + s.satisfies('mpich2@:1.1') for s in repo.providers_for('mpi@2.2') + ) + assert not any( + s.satisfies('mpich@:1') for s in repo.providers_for('mpi@2') + ) + assert not any( + s.satisfies('mpich@:1') for s in repo.providers_for('mpi@3') + ) + assert not any( + s.satisfies('mpich2') for s in repo.providers_for('mpi@3') + ) def test_concretize_two_virtuals(self): """Test a package with multiple virtual dependencies.""" Spec('hypre').concretize() - def test_concretize_two_virtuals_with_one_bound(self): + def test_concretize_two_virtuals_with_one_bound( + self, refresh_builtin_mock + ): """Test a package with multiple virtual dependencies and one preset.""" Spec('hypre ^openblas').concretize() @@ -185,54 +177,48 @@ class ConcretizeTest(MockPackagesTest): def test_concretize_two_virtuals_with_dual_provider(self): """Test a package with multiple virtual dependencies and force a provider - that provides both.""" + that provides both. + """ Spec('hypre ^openblas-with-lapack').concretize() - def test_concretize_two_virtuals_with_dual_provider_and_a_conflict(self): + def test_concretize_two_virtuals_with_dual_provider_and_a_conflict( + self + ): """Test a package with multiple virtual dependencies and force a - provider that provides both, and another conflicting package that - provides one. + provider that provides both, and another conflicting package that + provides one. """ s = Spec('hypre ^openblas-with-lapack ^netlib-lapack') - self.assertRaises(spack.spec.MultipleProviderError, s.concretize) + with pytest.raises(spack.spec.MultipleProviderError): + s.concretize() def test_virtual_is_fully_expanded_for_callpath(self): # force dependence on fake "zmpi" by asking for MPI 10.0 spec = Spec('callpath ^mpi@10.0') - self.assertTrue('mpi' in spec._dependencies) - self.assertFalse('fake' in spec) - + assert 'mpi' in spec._dependencies + assert 'fake' not in spec spec.concretize() - - self.assertTrue('zmpi' in spec._dependencies) - self.assertTrue(all('mpi' not in d._dependencies - for d in spec.traverse())) - self.assertTrue('zmpi' in spec) - self.assertTrue('mpi' in spec) - - self.assertTrue('fake' in spec._dependencies['zmpi'].spec) - - def test_virtual_is_fully_expanded_for_mpileaks(self): + assert 'zmpi' in spec._dependencies + assert all('mpi' not in d._dependencies for d in spec.traverse()) + assert 'zmpi' in spec + assert 'mpi' in spec + assert 'fake' in spec._dependencies['zmpi'].spec + + def test_virtual_is_fully_expanded_for_mpileaks( + self + ): spec = Spec('mpileaks ^mpi@10.0') - self.assertTrue('mpi' in spec._dependencies) - self.assertFalse('fake' in spec) - + assert 'mpi' in spec._dependencies + assert 'fake' not in spec spec.concretize() - - self.assertTrue('zmpi' in spec._dependencies) - self.assertTrue('callpath' in spec._dependencies) - self.assertTrue( - 'zmpi' in spec._dependencies['callpath'] - .spec._dependencies) - self.assertTrue( - 'fake' in spec._dependencies['callpath'] - .spec._dependencies['zmpi'] - .spec._dependencies) - - self.assertTrue( - all('mpi' not in d._dependencies for d in spec.traverse())) - self.assertTrue('zmpi' in spec) - self.assertTrue('mpi' in spec) + assert 'zmpi' in spec._dependencies + assert 'callpath' in spec._dependencies + assert 'zmpi' in spec._dependencies['callpath'].spec._dependencies + assert 'fake' in spec._dependencies['callpath'].spec._dependencies[ + 'zmpi'].spec._dependencies # NOQA: ignore=E501 + assert all('mpi' not in d._dependencies for d in spec.traverse()) + assert 'zmpi' in spec + assert 'mpi' in spec def test_my_dep_depends_on_provider_of_my_virtual_dep(self): spec = Spec('indirect_mpich') @@ -242,36 +228,31 @@ class ConcretizeTest(MockPackagesTest): def test_compiler_inheritance(self): spec = Spec('mpileaks') spec.normalize() - spec['dyninst'].compiler = CompilerSpec('clang') spec.concretize() - # TODO: not exactly the syntax I would like. - self.assertTrue(spec['libdwarf'].compiler.satisfies('clang')) - self.assertTrue(spec['libelf'].compiler.satisfies('clang')) + assert spec['libdwarf'].compiler.satisfies('clang') + assert spec['libelf'].compiler.satisfies('clang') def test_external_package(self): spec = Spec('externaltool%gcc') spec.concretize() - - self.assertEqual( - spec['externaltool'].external, '/path/to/external_tool') - self.assertFalse('externalprereq' in spec) - self.assertTrue(spec['externaltool'].compiler.satisfies('gcc')) + assert spec['externaltool'].external == '/path/to/external_tool' + assert 'externalprereq' not in spec + assert spec['externaltool'].compiler.satisfies('gcc') def test_external_package_module(self): # No tcl modules on darwin/linux machines # TODO: improved way to check for this. platform = spack.architecture.real_platform().name - if (platform == 'darwin' or platform == 'linux'): + if platform == 'darwin' or platform == 'linux': return spec = Spec('externalmodule') spec.concretize() - self.assertEqual( - spec['externalmodule'].external_module, 'external-module') - self.assertFalse('externalprereq' in spec) - self.assertTrue(spec['externalmodule'].compiler.satisfies('gcc')) + assert spec['externalmodule'].external_module == 'external-module' + assert 'externalprereq' not in spec + assert spec['externalmodule'].compiler.satisfies('gcc') def test_nobuild_package(self): got_error = False @@ -280,17 +261,15 @@ class ConcretizeTest(MockPackagesTest): spec.concretize() except spack.concretize.NoBuildError: got_error = True - self.assertTrue(got_error) + assert got_error def test_external_and_virtual(self): spec = Spec('externaltest') spec.concretize() - self.assertEqual( - spec['externaltool'].external, '/path/to/external_tool') - self.assertEqual( - spec['stuff'].external, '/path/to/external_virtual_gcc') - self.assertTrue(spec['externaltool'].compiler.satisfies('gcc')) - self.assertTrue(spec['stuff'].compiler.satisfies('gcc')) + assert spec['externaltool'].external == '/path/to/external_tool' + assert spec['stuff'].external == '/path/to/external_virtual_gcc' + assert spec['externaltool'].compiler.satisfies('gcc') + assert spec['stuff'].compiler.satisfies('gcc') def test_find_spec_parents(self): """Tests the spec finding logic used by concretization. """ @@ -300,7 +279,7 @@ class ConcretizeTest(MockPackagesTest): Spec('d +foo')), Spec('e +foo')) - self.assertEqual('a', find_spec(s['b'], lambda s: '+foo' in s).name) + assert 'a' == find_spec(s['b'], lambda s: '+foo' in s).name def test_find_spec_children(self): s = Spec('a', @@ -308,13 +287,13 @@ class ConcretizeTest(MockPackagesTest): Spec('c'), Spec('d +foo')), Spec('e +foo')) - self.assertEqual('d', find_spec(s['b'], lambda s: '+foo' in s).name) + assert 'd' == find_spec(s['b'], lambda s: '+foo' in s).name s = Spec('a', Spec('b +foo', Spec('c +foo'), Spec('d')), Spec('e +foo')) - self.assertEqual('c', find_spec(s['b'], lambda s: '+foo' in s).name) + assert 'c' == find_spec(s['b'], lambda s: '+foo' in s).name def test_find_spec_sibling(self): s = Spec('a', @@ -322,8 +301,8 @@ class ConcretizeTest(MockPackagesTest): Spec('c'), Spec('d')), Spec('e +foo')) - self.assertEqual('e', find_spec(s['b'], lambda s: '+foo' in s).name) - self.assertEqual('b', find_spec(s['e'], lambda s: '+foo' in s).name) + assert 'e' == find_spec(s['b'], lambda s: '+foo' in s).name + assert 'b' == find_spec(s['e'], lambda s: '+foo' in s).name s = Spec('a', Spec('b +foo', @@ -331,7 +310,7 @@ class ConcretizeTest(MockPackagesTest): Spec('d')), Spec('e', Spec('f +foo'))) - self.assertEqual('f', find_spec(s['b'], lambda s: '+foo' in s).name) + assert 'f' == find_spec(s['b'], lambda s: '+foo' in s).name def test_find_spec_self(self): s = Spec('a', @@ -339,7 +318,7 @@ class ConcretizeTest(MockPackagesTest): Spec('c'), Spec('d')), Spec('e')) - self.assertEqual('b', find_spec(s['b'], lambda s: '+foo' in s).name) + assert 'b' == find_spec(s['b'], lambda s: '+foo' in s).name def test_find_spec_none(self): s = Spec('a', @@ -347,10 +326,10 @@ class ConcretizeTest(MockPackagesTest): Spec('c'), Spec('d')), Spec('e')) - self.assertEqual(None, find_spec(s['b'], lambda s: '+foo' in s)) + assert find_spec(s['b'], lambda s: '+foo' in s) is None def test_compiler_child(self): s = Spec('mpileaks%clang ^dyninst%gcc') s.concretize() - self.assertTrue(s['mpileaks'].satisfies('%clang')) - self.assertTrue(s['dyninst'].satisfies('%gcc')) + assert s['mpileaks'].satisfies('%clang') + assert s['dyninst'].satisfies('%gcc') diff --git a/lib/spack/spack/test/concretize_preferences.py b/lib/spack/spack/test/concretize_preferences.py index 575e912609..21d457d2e0 100644 --- a/lib/spack/spack/test/concretize_preferences.py +++ b/lib/spack/spack/test/concretize_preferences.py @@ -22,92 +22,95 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## +import pytest + import spack -import spack.architecture -from spack.test.mock_packages_test import * -from tempfile import mkdtemp +from spack.spec import Spec -class ConcretizePreferencesTest(MockPackagesTest): - """Test concretization preferences are being applied correctly. - """ +@pytest.fixture() +def concretize_scope(config, tmpdir): + """Adds a scope for concretization preferences""" + tmpdir.ensure_dir('concretize') + spack.config.ConfigScope( + 'concretize', str(tmpdir.join('concretize')) + ) + yield + # This is kind of weird, but that's how config scopes are + # set in ConfigScope.__init__ + spack.config.config_scopes.pop('concretize') + spack.pkgsort = spack.PreferredPackages() + + +def concretize(abstract_spec): + return Spec(abstract_spec).concretized() + + +def update_packages(pkgname, section, value): + """Update config and reread package list""" + conf = {pkgname: {section: value}} + spack.config.update_config('packages', conf, 'concretize') + spack.pkgsort = spack.PreferredPackages() - def setUp(self): - """Create config section to store concretization preferences - """ - super(ConcretizePreferencesTest, self).setUp() - self.tmp_dir = mkdtemp('.tmp', 'spack-config-test-') - spack.config.ConfigScope('concretize', - os.path.join(self.tmp_dir, 'concretize')) - - def tearDown(self): - super(ConcretizePreferencesTest, self).tearDown() - shutil.rmtree(self.tmp_dir, True) - spack.pkgsort = spack.PreferredPackages() - - def concretize(self, abstract_spec): - return Spec(abstract_spec).concretized() - - def update_packages(self, pkgname, section, value): - """Update config and reread package list""" - conf = {pkgname: {section: value}} - spack.config.update_config('packages', conf, 'concretize') - spack.pkgsort = spack.PreferredPackages() - - def assert_variant_values(self, spec, **variants): - concrete = self.concretize(spec) - for variant, value in variants.items(): - self.assertEqual(concrete.variants[variant].value, value) +def assert_variant_values(spec, **variants): + concrete = concretize(spec) + for variant, value in variants.items(): + assert concrete.variants[variant].value == value + + +@pytest.mark.usefixtures('concretize_scope', 'builtin_mock') +class TestConcretizePreferences(object): def test_preferred_variants(self): """Test preferred variants are applied correctly """ - self.update_packages('mpileaks', 'variants', - '~debug~opt+shared+static') - self.assert_variant_values('mpileaks', debug=False, opt=False, - shared=True, static=True) - - self.update_packages('mpileaks', 'variants', - ['+debug', '+opt', '~shared', '-static']) - self.assert_variant_values('mpileaks', debug=True, opt=True, - shared=False, static=False) - - def test_preferred_compilers(self): + update_packages('mpileaks', 'variants', '~debug~opt+shared+static') + assert_variant_values( + 'mpileaks', debug=False, opt=False, shared=True, static=True + ) + update_packages( + 'mpileaks', 'variants', ['+debug', '+opt', '~shared', '-static'] + ) + assert_variant_values( + 'mpileaks', debug=True, opt=True, shared=False, static=False + ) + + def test_preferred_compilers(self, refresh_builtin_mock): """Test preferred compilers are applied correctly """ - self.update_packages('mpileaks', 'compiler', ['clang@3.3']) - spec = self.concretize('mpileaks') - self.assertEqual(spec.compiler, spack.spec.CompilerSpec('clang@3.3')) + update_packages('mpileaks', 'compiler', ['clang@3.3']) + spec = concretize('mpileaks') + assert spec.compiler == spack.spec.CompilerSpec('clang@3.3') - self.update_packages('mpileaks', 'compiler', ['gcc@4.5.0']) - spec = self.concretize('mpileaks') - self.assertEqual(spec.compiler, spack.spec.CompilerSpec('gcc@4.5.0')) + update_packages('mpileaks', 'compiler', ['gcc@4.5.0']) + spec = concretize('mpileaks') + assert spec.compiler == spack.spec.CompilerSpec('gcc@4.5.0') def test_preferred_versions(self): """Test preferred package versions are applied correctly """ - self.update_packages('mpileaks', 'version', ['2.3']) - spec = self.concretize('mpileaks') - self.assertEqual(spec.version, spack.spec.Version('2.3')) + update_packages('mpileaks', 'version', ['2.3']) + spec = concretize('mpileaks') + assert spec.version == spack.spec.Version('2.3') - self.update_packages('mpileaks', 'version', ['2.2']) - spec = self.concretize('mpileaks') - self.assertEqual(spec.version, spack.spec.Version('2.2')) + update_packages('mpileaks', 'version', ['2.2']) + spec = concretize('mpileaks') + assert spec.version == spack.spec.Version('2.2') def test_preferred_providers(self): - """Test preferred providers of virtual packages are applied correctly + """Test preferred providers of virtual packages are + applied correctly """ - self.update_packages('all', 'providers', {'mpi': ['mpich']}) - spec = self.concretize('mpileaks') - self.assertTrue('mpich' in spec) + update_packages('all', 'providers', {'mpi': ['mpich']}) + spec = concretize('mpileaks') + assert 'mpich' in spec - self.update_packages('all', 'providers', {'mpi': ['zmpi']}) - spec = self.concretize('mpileaks') - self.assertTrue('zmpi', spec) + update_packages('all', 'providers', {'mpi': ['zmpi']}) + spec = concretize('mpileaks') + assert 'zmpi' in spec def test_develop(self): - """Test conretization with develop version - """ + """Test concretization with develop version""" spec = Spec('builtin.mock.develop-test') spec.concretize() - self.assertEqual(spec.version, spack.spec.Version('0.2.15')) + assert spec.version == spack.spec.Version('0.2.15') diff --git a/lib/spack/spack/test/config.py b/lib/spack/spack/test/config.py index adc0795916..ed8f78ceb4 100644 --- a/lib/spack/spack/test/config.py +++ b/lib/spack/spack/test/config.py @@ -22,17 +22,17 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import os -import shutil +import collections import getpass -import yaml -from tempfile import mkdtemp +import os +import tempfile +import ordereddict_backport +import pytest import spack import spack.config +import yaml from spack.util.path import canonicalize_path -from ordereddict_backport import OrderedDict -from spack.test.mock_packages_test import * # Some sample compiler config data a_comps = { @@ -167,104 +167,115 @@ config_override_list = { 'build_stage:': ['patha', 'pathb']}} -class ConfigTest(MockPackagesTest): - - def setUp(self): - super(ConfigTest, self).setUp() - self.tmp_dir = mkdtemp('.tmp', 'spack-config-test-') - self.a_comp_specs = [ - ac['compiler']['spec'] for ac in a_comps['compilers']] - self.b_comp_specs = [ - bc['compiler']['spec'] for bc in b_comps['compilers']] +def check_compiler_config(comps, *compiler_names): + """Check that named compilers in comps match Spack's config.""" + config = spack.config.get_config('compilers') + compiler_list = ['cc', 'cxx', 'f77', 'fc'] + flag_list = ['cflags', 'cxxflags', 'fflags', 'cppflags', + 'ldflags', 'ldlibs'] + param_list = ['modules', 'paths', 'spec', 'operating_system'] + for compiler in config: + conf = compiler['compiler'] + if conf['spec'] in compiler_names: + comp = next((c['compiler'] for c in comps if + c['compiler']['spec'] == conf['spec']), None) + if not comp: + raise ValueError('Bad config spec') + for p in param_list: + assert conf[p] == comp[p] + for f in flag_list: + expected = comp.get('flags', {}).get(f, None) + actual = conf.get('flags', {}).get(f, None) + assert expected == actual + for c in compiler_list: + expected = comp['paths'][c] + actual = conf['paths'][c] + assert expected == actual + + +@pytest.fixture() +def config(tmpdir): + """Mocks the configuration scope.""" + spack.config.clear_config_caches() + real_scope = spack.config.config_scopes + spack.config.config_scopes = ordereddict_backport.OrderedDict() + for priority in ['low', 'high']: + spack.config.ConfigScope(priority, str(tmpdir.join(priority))) + Config = collections.namedtuple('Config', ['real', 'mock']) + yield Config(real=real_scope, mock=spack.config.config_scopes) + spack.config.config_scopes = real_scope + spack.config.clear_config_caches() + + +@pytest.fixture() +def write_config_file(tmpdir): + """Returns a function that writes a config file.""" + def _write(config, data, scope): + config_yaml = tmpdir.join(scope, config + '.yaml') + config_yaml.ensure() + with config_yaml.open('w') as f: + yaml.dump(data, f) + return _write - spack.config.config_scopes = OrderedDict() - for priority in ['low', 'high']: - scope_dir = os.path.join(self.tmp_dir, priority) - spack.config.ConfigScope(priority, scope_dir) - def tearDown(self): - super(ConfigTest, self).tearDown() - shutil.rmtree(self.tmp_dir, True) +@pytest.fixture() +def compiler_specs(): + """Returns a couple of compiler specs needed for the tests""" + a = [ac['compiler']['spec'] for ac in a_comps['compilers']] + b = [bc['compiler']['spec'] for bc in b_comps['compilers']] + CompilerSpecs = collections.namedtuple('CompilerSpecs', ['a', 'b']) + return CompilerSpecs(a=a, b=b) - def write_config_file(self, config, data, scope): - scope_dir = os.path.join(self.tmp_dir, scope) - mkdirp(scope_dir) - path = os.path.join(scope_dir, config + '.yaml') - with open(path, 'w') as f: - print yaml - yaml.dump(data, f) - - def check_compiler_config(self, comps, *compiler_names): - """Check that named compilers in comps match Spack's config.""" - config = spack.config.get_config('compilers') - compiler_list = ['cc', 'cxx', 'f77', 'fc'] - flag_list = ['cflags', 'cxxflags', 'fflags', 'cppflags', - 'ldflags', 'ldlibs'] - param_list = ['modules', 'paths', 'spec', 'operating_system'] - for compiler in config: - conf = compiler['compiler'] - if conf['spec'] in compiler_names: - comp = next((c['compiler'] for c in comps if - c['compiler']['spec'] == conf['spec']), None) - if not comp: - self.fail('Bad config spec') - for p in param_list: - self.assertEqual(conf[p], comp[p]) - for f in flag_list: - expected = comp.get('flags', {}).get(f, None) - actual = conf.get('flags', {}).get(f, None) - self.assertEqual(expected, actual) - for c in compiler_list: - expected = comp['paths'][c] - actual = conf['paths'][c] - self.assertEqual(expected, actual) +@pytest.mark.usefixtures('config') +class TestConfig(object): def test_write_list_in_memory(self): spack.config.update_config('repos', repos_low['repos'], scope='low') spack.config.update_config('repos', repos_high['repos'], scope='high') config = spack.config.get_config('repos') - self.assertEqual(config, repos_high['repos'] + repos_low['repos']) + assert config == repos_high['repos'] + repos_low['repos'] - def test_write_key_in_memory(self): + def test_write_key_in_memory(self, compiler_specs): # Write b_comps "on top of" a_comps. spack.config.update_config( - 'compilers', a_comps['compilers'], scope='low') + 'compilers', a_comps['compilers'], scope='low' + ) spack.config.update_config( - 'compilers', b_comps['compilers'], scope='high') - + 'compilers', b_comps['compilers'], scope='high' + ) # Make sure the config looks how we expect. - self.check_compiler_config(a_comps['compilers'], *self.a_comp_specs) - self.check_compiler_config(b_comps['compilers'], *self.b_comp_specs) + check_compiler_config(a_comps['compilers'], *compiler_specs.a) + check_compiler_config(b_comps['compilers'], *compiler_specs.b) - def test_write_key_to_disk(self): + def test_write_key_to_disk(self, compiler_specs): # Write b_comps "on top of" a_comps. spack.config.update_config( - 'compilers', a_comps['compilers'], scope='low') + 'compilers', a_comps['compilers'], scope='low' + ) spack.config.update_config( - 'compilers', b_comps['compilers'], scope='high') - + 'compilers', b_comps['compilers'], scope='high' + ) # Clear caches so we're forced to read from disk. spack.config.clear_config_caches() - # Same check again, to ensure consistency. - self.check_compiler_config(a_comps['compilers'], *self.a_comp_specs) - self.check_compiler_config(b_comps['compilers'], *self.b_comp_specs) + check_compiler_config(a_comps['compilers'], *compiler_specs.a) + check_compiler_config(b_comps['compilers'], *compiler_specs.b) - def test_write_to_same_priority_file(self): + def test_write_to_same_priority_file(self, compiler_specs): # Write b_comps in the same file as a_comps. spack.config.update_config( - 'compilers', a_comps['compilers'], scope='low') + 'compilers', a_comps['compilers'], scope='low' + ) spack.config.update_config( - 'compilers', b_comps['compilers'], scope='low') - + 'compilers', b_comps['compilers'], scope='low' + ) # Clear caches so we're forced to read from disk. spack.config.clear_config_caches() - # Same check again, to ensure consistency. - self.check_compiler_config(a_comps['compilers'], *self.a_comp_specs) - self.check_compiler_config(b_comps['compilers'], *self.b_comp_specs) + check_compiler_config(a_comps['compilers'], *compiler_specs.a) + check_compiler_config(b_comps['compilers'], *compiler_specs.b) def check_canonical(self, var, expected): """Ensure that is substituted properly for in strings @@ -283,72 +294,78 @@ class ConfigTest(MockPackagesTest): def test_substitute_config_variables(self): prefix = spack.prefix.lstrip('/') - self.assertEqual(os.path.join('/foo/bar/baz', prefix), - canonicalize_path('/foo/bar/baz/$spack')) + assert os.path.join( + '/foo/bar/baz', prefix + ) == canonicalize_path('/foo/bar/baz/$spack') - self.assertEqual(os.path.join(spack.prefix, 'foo/bar/baz'), - canonicalize_path('$spack/foo/bar/baz/')) + assert os.path.join( + spack.prefix, 'foo/bar/baz' + ) == canonicalize_path('$spack/foo/bar/baz/') - self.assertEqual(os.path.join('/foo/bar/baz', prefix, 'foo/bar/baz'), - canonicalize_path('/foo/bar/baz/$spack/foo/bar/baz/')) + assert os.path.join( + '/foo/bar/baz', prefix, 'foo/bar/baz' + ) == canonicalize_path('/foo/bar/baz/$spack/foo/bar/baz/') - self.assertEqual(os.path.join('/foo/bar/baz', prefix), - canonicalize_path('/foo/bar/baz/${spack}')) + assert os.path.join( + '/foo/bar/baz', prefix + ) == canonicalize_path('/foo/bar/baz/${spack}') - self.assertEqual(os.path.join(spack.prefix, 'foo/bar/baz'), - canonicalize_path('${spack}/foo/bar/baz/')) + assert os.path.join( + spack.prefix, 'foo/bar/baz' + ) == canonicalize_path('${spack}/foo/bar/baz/') - self.assertEqual( - os.path.join('/foo/bar/baz', prefix, 'foo/bar/baz'), - canonicalize_path('/foo/bar/baz/${spack}/foo/bar/baz/')) + assert os.path.join( + '/foo/bar/baz', prefix, 'foo/bar/baz' + ) == canonicalize_path('/foo/bar/baz/${spack}/foo/bar/baz/') - self.assertNotEqual( - os.path.join('/foo/bar/baz', prefix, 'foo/bar/baz'), - canonicalize_path('/foo/bar/baz/${spack/foo/bar/baz/')) + assert os.path.join( + '/foo/bar/baz', prefix, 'foo/bar/baz' + ) != canonicalize_path('/foo/bar/baz/${spack/foo/bar/baz/') def test_substitute_user(self): user = getpass.getuser() - self.assertEqual('/foo/bar/' + user + '/baz', - canonicalize_path('/foo/bar/$user/baz')) + assert '/foo/bar/' + user + '/baz' == canonicalize_path( + '/foo/bar/$user/baz' + ) def test_substitute_tempdir(self): tempdir = tempfile.gettempdir() - self.assertEqual(tempdir, canonicalize_path('$tempdir')) - self.assertEqual(tempdir + '/foo/bar/baz', - canonicalize_path('$tempdir/foo/bar/baz')) - - def test_read_config(self): - self.write_config_file('config', config_low, 'low') - self.assertEqual(spack.config.get_config('config'), - config_low['config']) - - def test_read_config_override_all(self): - self.write_config_file('config', config_low, 'low') - self.write_config_file('config', config_override_all, 'high') - self.assertEqual(spack.config.get_config('config'), { + assert tempdir == canonicalize_path('$tempdir') + assert tempdir + '/foo/bar/baz' == canonicalize_path( + '$tempdir/foo/bar/baz' + ) + + def test_read_config(self, write_config_file): + write_config_file('config', config_low, 'low') + assert spack.config.get_config('config') == config_low['config'] + + def test_read_config_override_all(self, write_config_file): + write_config_file('config', config_low, 'low') + write_config_file('config', config_override_all, 'high') + assert spack.config.get_config('config') == { 'install_tree': 'override_all' - }) + } - def test_read_config_override_key(self): - self.write_config_file('config', config_low, 'low') - self.write_config_file('config', config_override_key, 'high') - self.assertEqual(spack.config.get_config('config'), { + def test_read_config_override_key(self, write_config_file): + write_config_file('config', config_low, 'low') + write_config_file('config', config_override_key, 'high') + assert spack.config.get_config('config') == { 'install_tree': 'override_key', 'build_stage': ['path1', 'path2', 'path3'] - }) + } - def test_read_config_merge_list(self): - self.write_config_file('config', config_low, 'low') - self.write_config_file('config', config_merge_list, 'high') - self.assertEqual(spack.config.get_config('config'), { + def test_read_config_merge_list(self, write_config_file): + write_config_file('config', config_low, 'low') + write_config_file('config', config_merge_list, 'high') + assert spack.config.get_config('config') == { 'install_tree': 'install_tree_path', 'build_stage': ['patha', 'pathb', 'path1', 'path2', 'path3'] - }) + } - def test_read_config_override_list(self): - self.write_config_file('config', config_low, 'low') - self.write_config_file('config', config_override_list, 'high') - self.assertEqual(spack.config.get_config('config'), { + def test_read_config_override_list(self, write_config_file): + write_config_file('config', config_low, 'low') + write_config_file('config', config_override_list, 'high') + assert spack.config.get_config('config') == { 'install_tree': 'install_tree_path', 'build_stage': ['patha', 'pathb'] - }) + } diff --git a/lib/spack/spack/test/conftest.py b/lib/spack/spack/test/conftest.py new file mode 100644 index 0000000000..11127d8735 --- /dev/null +++ b/lib/spack/spack/test/conftest.py @@ -0,0 +1,515 @@ +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/llnl/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +import collections +import copy +import os +import re +import shutil + +import cStringIO +import llnl.util.filesystem +import llnl.util.lang +import ordereddict_backport +import py +import pytest +import spack +import spack.architecture +import spack.database +import spack.directory_layout +import spack.fetch_strategy +import spack.platforms.test +import spack.repository +import spack.stage +import spack.util.executable +import spack.util.pattern + + +########## +# Monkey-patching that is applied to all tests +########## + + +@pytest.fixture(autouse=True) +def no_stdin_duplication(monkeypatch): + """Duplicating stdin (or any other stream) returns an empty + cStringIO object. + """ + monkeypatch.setattr( + llnl.util.lang, + 'duplicate_stream', + lambda x: cStringIO.StringIO() + ) + + +@pytest.fixture(autouse=True) +def mock_fetch_cache(monkeypatch): + """Substitutes spack.fetch_cache with a mock object that does nothing + and raises on fetch. + """ + class MockCache(object): + def store(self, copyCmd, relativeDst): + pass + + def fetcher(self, targetPath, digest, **kwargs): + return MockCacheFetcher() + + class MockCacheFetcher(object): + def set_stage(self, stage): + pass + + def fetch(self): + raise spack.fetch_strategy.FetchError( + 'Mock cache always fails for tests' + ) + + def __str__(self): + return "[mock fetcher]" + + monkeypatch.setattr(spack, 'fetch_cache', MockCache()) + + +# FIXME: The lines below should better be added to a fixture with +# FIXME: session-scope. Anyhow doing it is not easy, as it seems +# FIXME: there's some weird interaction with compilers during concretization. +spack.architecture.real_platform = spack.architecture.platform +spack.architecture.platform = lambda: spack.platforms.test.Test() + +########## +# Test-specific fixtures +########## + + +@pytest.fixture(scope='session') +def repo_path(): + """Session scoped RepoPath object pointing to the mock repository""" + return spack.repository.RepoPath(spack.mock_packages_path) + + +@pytest.fixture(scope='module') +def builtin_mock(repo_path): + """Uses the 'builtin.mock' repository instead of 'builtin'""" + mock_repo = copy.deepcopy(repo_path) + spack.repo.swap(mock_repo) + BuiltinMock = collections.namedtuple('BuiltinMock', ['real', 'mock']) + # Confusing, but we swapped above + yield BuiltinMock(real=mock_repo, mock=spack.repo) + spack.repo.swap(mock_repo) + + +@pytest.fixture() +def refresh_builtin_mock(builtin_mock, repo_path): + """Refreshes the state of spack.repo""" + # Get back the real repository + spack.repo.swap(builtin_mock.real) + mock_repo = copy.deepcopy(repo_path) + spack.repo.swap(mock_repo) + return builtin_mock + + +@pytest.fixture(scope='session') +def linux_os(): + """Returns a named tuple with attributes 'name' and 'version' + representing the OS. + """ + platform = spack.architecture.platform() + name, version = 'debian', '6' + if platform.name == 'linux': + platform = spack.architecture.platform() + current_os = platform.operating_system('default_os') + name, version = current_os.name, current_os.version + LinuxOS = collections.namedtuple('LinuxOS', ['name', 'version']) + return LinuxOS(name=name, version=version) + + +@pytest.fixture(scope='session') +def configuration_dir(tmpdir_factory, linux_os): + """Copies mock configuration files in a temporary directory. Returns the + directory path. + """ + tmpdir = tmpdir_factory.mktemp('configurations') + # Name of the yaml files in the test/data folder + test_path = py.path.local(spack.test_path) + compilers_yaml = test_path.join('data', 'compilers.yaml') + packages_yaml = test_path.join('data', 'packages.yaml') + config_yaml = test_path.join('data', 'config.yaml') + # Create temporary 'site' and 'user' folders + tmpdir.ensure('site', dir=True) + tmpdir.ensure('user', dir=True) + # Copy the configurations that don't need further work + packages_yaml.copy(tmpdir.join('site', 'packages.yaml')) + config_yaml.copy(tmpdir.join('site', 'config.yaml')) + # Write the one that needs modifications + content = ''.join(compilers_yaml.read()).format(linux_os) + t = tmpdir.join('site', 'compilers.yaml') + t.write(content) + return tmpdir + + +@pytest.fixture(scope='module') +def config(configuration_dir): + """Hooks the mock configuration files into spack.config""" + # Set up a mock config scope + spack.config.clear_config_caches() + real_scope = spack.config.config_scopes + spack.config.config_scopes = ordereddict_backport.OrderedDict() + spack.config.ConfigScope('site', str(configuration_dir.join('site'))) + spack.config.ConfigScope('user', str(configuration_dir.join('user'))) + Config = collections.namedtuple('Config', ['real', 'mock']) + yield Config(real=real_scope, mock=spack.config.config_scopes) + spack.config.config_scopes = real_scope + spack.config.clear_config_caches() + + +@pytest.fixture(scope='module') +def database(tmpdir_factory, builtin_mock, config): + """Creates a mock database with some packages installed note that + the ref count for dyninst here will be 3, as it's recycled + across each install. + """ + + # Here is what the mock DB looks like: + # + # o mpileaks o mpileaks' o mpileaks'' + # |\ |\ |\ + # | o callpath | o callpath' | o callpath'' + # |/| |/| |/| + # o | mpich o | mpich2 o | zmpi + # | | o | fake + # | | | + # | |______________/ + # | .____________/ + # |/ + # o dyninst + # |\ + # | o libdwarf + # |/ + # o libelf + + # Make a fake install directory + install_path = tmpdir_factory.mktemp('install_for_database') + spack_install_path = py.path.local(spack.store.root) + spack.store.root = str(install_path) + + install_layout = spack.directory_layout.YamlDirectoryLayout( + str(install_path) + ) + spack_install_layout = spack.store.layout + spack.store.layout = install_layout + + # Make fake database and fake install directory. + install_db = spack.database.Database(str(install_path)) + spack_install_db = spack.store.db + spack.store.db = install_db + + Entry = collections.namedtuple('Entry', ['path', 'layout', 'db']) + Database = collections.namedtuple( + 'Database', ['real', 'mock', 'install', 'uninstall', 'refresh'] + ) + + real = Entry( + path=spack_install_path, + layout=spack_install_layout, + db=spack_install_db + ) + mock = Entry(path=install_path, layout=install_layout, db=install_db) + + def _install(spec): + s = spack.spec.Spec(spec) + s.concretize() + pkg = spack.repo.get(s) + pkg.do_install(fake=True) + + def _uninstall(spec): + spec.package.do_uninstall(spec) + + def _refresh(): + with spack.store.db.write_transaction(): + for spec in spack.store.db.query(): + _uninstall(spec) + _install('mpileaks ^mpich') + _install('mpileaks ^mpich2') + _install('mpileaks ^zmpi') + + t = Database( + real=real, + mock=mock, + install=_install, + uninstall=_uninstall, + refresh=_refresh + ) + # Transaction used to avoid repeated writes. + with spack.store.db.write_transaction(): + t.install('mpileaks ^mpich') + t.install('mpileaks ^mpich2') + t.install('mpileaks ^zmpi') + + yield t + + with spack.store.db.write_transaction(): + for spec in spack.store.db.query(): + t.uninstall(spec) + + install_path.remove(rec=1) + spack.store.root = str(spack_install_path) + spack.store.layout = spack_install_layout + spack.store.db = spack_install_db + + +@pytest.fixture() +def refresh_db_on_exit(database): + """"Restores the state of the database after a test.""" + yield + database.refresh() + +########## +# Fake archives and repositories +########## + + +@pytest.fixture(scope='session') +def mock_archive(): + """Creates a very simple archive directory with a configure script and a + makefile that installs to a prefix. Tars it up into an archive. + """ + tar = spack.util.executable.which('tar', required=True) + stage = spack.stage.Stage('mock-archive-stage') + tmpdir = py.path.local(stage.path) + repo_name = 'mock-archive-repo' + tmpdir.ensure(repo_name, dir=True) + repodir = tmpdir.join(repo_name) + # Create the configure script + configure_path = str(tmpdir.join(repo_name, 'configure')) + with open(configure_path, 'w') as f: + f.write( + "#!/bin/sh\n" + "prefix=$(echo $1 | sed 's/--prefix=//')\n" + "cat > Makefile < to pkg. - Use this to mock up constraints. - """ - spec = Spec(spec) - - # Save original dependencies before making any changes. - pkg = spack.repo.get(pkg_name) - if pkg_name not in self.saved_deps: - self.saved_deps[pkg_name] = (pkg, pkg.dependencies.copy()) - - # Change dep spec - # XXX(deptype): handle deptypes. - pkg.dependencies[spec.name] = {Spec(pkg_name): spec} - pkg.dependency_types[spec.name] = set(deptypes) - - def cleanmock(self): - """Restore the real packages path after any test.""" - spack.repo.swap(self.db) - spack.config.config_scopes = self.real_scopes - - shutil.rmtree(self.temp_config, ignore_errors=True) - spack.config.clear_config_caches() - - # XXX(deptype): handle deptypes. - # Restore dependency changes that happened during the test - for pkg_name, (pkg, deps) in self.saved_deps.items(): - pkg.dependencies.clear() - pkg.dependencies.update(deps) - - shutil.rmtree(spack.share_path, ignore_errors=True) - spack.share_path = self.real_share_path - - def setUp(self): - self.initmock() - - def tearDown(self): - self.cleanmock() diff --git a/lib/spack/spack/test/mock_repo.py b/lib/spack/spack/test/mock_repo.py deleted file mode 100644 index 0ae7dbd516..0000000000 --- a/lib/spack/spack/test/mock_repo.py +++ /dev/null @@ -1,202 +0,0 @@ -############################################################################## -# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# -# This file is part of Spack. -# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. -# LLNL-CODE-647188 -# -# For details, see https://github.com/llnl/spack -# Please also see the LICENSE file for our notice and the LGPL. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License (as -# published by the Free Software Foundation) version 2.1, February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and -# conditions of the GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -############################################################################## -import os -import shutil - -from llnl.util.filesystem import * -from spack.stage import Stage -from spack.util.executable import which - -# -# VCS Systems used by mock repo code. -# -git = which('git', required=True) -svn = which('svn', required=True) -svnadmin = which('svnadmin', required=True) -hg = which('hg', required=True) -tar = which('tar', required=True) - - -class MockRepo(object): - - def __init__(self, stage_name, repo_name): - """This creates a stage where some archive/repo files can be staged - for testing spack's fetch strategies.""" - # Stage where this repo has been created - self.stage = Stage(stage_name) - - # Full path to the repo within the stage. - self.path = join_path(self.stage.path, repo_name) - mkdirp(self.path) - - def destroy(self): - """Destroy resources associated with this mock repo.""" - if self.stage: - self.stage.destroy() - - -class MockArchive(MockRepo): - """Creates a very simple archive directory with a configure script and a - makefile that installs to a prefix. Tars it up into an archive.""" - - def __init__(self): - repo_name = 'mock-archive-repo' - super(MockArchive, self).__init__('mock-archive-stage', repo_name) - - with working_dir(self.path): - configure = join_path(self.path, 'configure') - - with open(configure, 'w') as cfg_file: - cfg_file.write( - "#!/bin/sh\n" - "prefix=$(echo $1 | sed 's/--prefix=//')\n" - "cat > Makefile < to pkg. Use this to mock up constraints. + """ + spec = Spec(spec) + # Save original dependencies before making any changes. + pkg = spack.repo.get(pkg_name) + if pkg_name not in saved_deps: + saved_deps[pkg_name] = (pkg, pkg.dependencies.copy()) + # Change dep spec + # XXX(deptype): handle deptypes. + pkg.dependencies[spec.name] = {Spec(pkg_name): spec} + pkg.dependency_types[spec.name] = set(deptypes) + return _mock + + +@pytest.mark.usefixtures('refresh_builtin_mock') +class TestSpecDag(object): + + def test_conflicting_package_constraints(self, set_dependency): + set_dependency('mpileaks', 'mpich@1.0') + set_dependency('callpath', 'mpich@2.0') spec = Spec('mpileaks ^mpich ^callpath ^dyninst ^libelf ^libdwarf') - # TODO: try to do something to showt that the issue was with + # TODO: try to do something to show that the issue was with # TODO: the user's input or with package inconsistencies. - self.assertRaises(spack.spec.UnsatisfiableVersionSpecError, - spec.normalize) + with pytest.raises(spack.spec.UnsatisfiableVersionSpecError): + spec.normalize() def test_preorder_node_traversal(self): dag = Spec('mpileaks ^zmpi') @@ -58,10 +96,10 @@ class SpecDagTest(MockPackagesTest): pairs = zip([0, 1, 2, 3, 4, 2, 3], names) traversal = dag.traverse() - self.assertEqual([x.name for x in traversal], names) + assert [x.name for x in traversal] == names traversal = dag.traverse(depth=True) - self.assertEqual([(x, y.name) for x, y in traversal], pairs) + assert [(x, y.name) for x, y in traversal] == pairs def test_preorder_edge_traversal(self): dag = Spec('mpileaks ^zmpi') @@ -72,10 +110,10 @@ class SpecDagTest(MockPackagesTest): pairs = zip([0, 1, 2, 3, 4, 3, 2, 3, 1], names) traversal = dag.traverse(cover='edges') - self.assertEqual([x.name for x in traversal], names) + assert [x.name for x in traversal] == names traversal = dag.traverse(cover='edges', depth=True) - self.assertEqual([(x, y.name) for x, y in traversal], pairs) + assert [(x, y.name) for x, y in traversal] == pairs def test_preorder_path_traversal(self): dag = Spec('mpileaks ^zmpi') @@ -86,10 +124,10 @@ class SpecDagTest(MockPackagesTest): pairs = zip([0, 1, 2, 3, 4, 3, 2, 3, 1, 2], names) traversal = dag.traverse(cover='paths') - self.assertEqual([x.name for x in traversal], names) + assert [x.name for x in traversal] == names traversal = dag.traverse(cover='paths', depth=True) - self.assertEqual([(x, y.name) for x, y in traversal], pairs) + assert [(x, y.name) for x, y in traversal] == pairs def test_postorder_node_traversal(self): dag = Spec('mpileaks ^zmpi') @@ -100,10 +138,10 @@ class SpecDagTest(MockPackagesTest): pairs = zip([4, 3, 2, 3, 2, 1, 0], names) traversal = dag.traverse(order='post') - self.assertEqual([x.name for x in traversal], names) + assert [x.name for x in traversal] == names traversal = dag.traverse(depth=True, order='post') - self.assertEqual([(x, y.name) for x, y in traversal], pairs) + assert [(x, y.name) for x, y in traversal] == pairs def test_postorder_edge_traversal(self): dag = Spec('mpileaks ^zmpi') @@ -114,10 +152,10 @@ class SpecDagTest(MockPackagesTest): pairs = zip([4, 3, 3, 2, 3, 2, 1, 1, 0], names) traversal = dag.traverse(cover='edges', order='post') - self.assertEqual([x.name for x in traversal], names) + assert [x.name for x in traversal] == names traversal = dag.traverse(cover='edges', depth=True, order='post') - self.assertEqual([(x, y.name) for x, y in traversal], pairs) + assert [(x, y.name) for x, y in traversal] == pairs def test_postorder_path_traversal(self): dag = Spec('mpileaks ^zmpi') @@ -128,10 +166,10 @@ class SpecDagTest(MockPackagesTest): pairs = zip([4, 3, 3, 2, 3, 2, 1, 2, 1, 0], names) traversal = dag.traverse(cover='paths', order='post') - self.assertEqual([x.name for x in traversal], names) + assert [x.name for x in traversal] == names traversal = dag.traverse(cover='paths', depth=True, order='post') - self.assertEqual([(x, y.name) for x, y in traversal], pairs) + assert [(x, y.name) for x, y in traversal] == pairs def test_conflicting_spec_constraints(self): mpileaks = Spec('mpileaks ^mpich ^callpath ^dyninst ^libelf ^libdwarf') @@ -143,8 +181,8 @@ class SpecDagTest(MockPackagesTest): mpileaks._dependencies['callpath']. \ spec._dependencies['mpich'].spec = Spec('mpich@2.0') - self.assertRaises(spack.spec.InconsistentSpecError, - lambda: mpileaks.flat_dependencies(copy=False)) + with pytest.raises(spack.spec.InconsistentSpecError): + mpileaks.flat_dependencies(copy=False) def test_normalize_twice(self): """Make sure normalize can be run twice on the same spec, @@ -154,7 +192,7 @@ class SpecDagTest(MockPackagesTest): n1 = spec.copy() spec.normalize() - self.assertEqual(n1, spec) + assert n1 == spec def test_normalize_a_lot(self): spec = Spec('mpileaks') @@ -182,21 +220,7 @@ class SpecDagTest(MockPackagesTest): counts[spec.name] += 1 for name in counts: - self.assertEqual(counts[name], 1, "Count for %s was not 1!" % name) - - def check_links(self, spec_to_check): - for spec in spec_to_check.traverse(): - for dependent in spec.dependents(): - self.assertTrue( - spec.name in dependent.dependencies_dict(), - "%s not in dependencies of %s" % - (spec.name, dependent.name)) - - for dependency in spec.dependencies(): - self.assertTrue( - spec.name in dependency.dependents_dict(), - "%s not in dependents of %s" % - (spec.name, dependency.name)) + assert counts[name] == 1 def test_dependents_and_dependencies_are_correct(self): spec = Spec('mpileaks', @@ -208,49 +232,49 @@ class SpecDagTest(MockPackagesTest): Spec('mpi')), Spec('mpi')) - self.check_links(spec) + check_links(spec) spec.normalize() - self.check_links(spec) + check_links(spec) - def test_unsatisfiable_version(self): - self.set_pkg_dep('mpileaks', 'mpich@1.0') + def test_unsatisfiable_version(self, set_dependency): + set_dependency('mpileaks', 'mpich@1.0') spec = Spec('mpileaks ^mpich@2.0 ^callpath ^dyninst ^libelf ^libdwarf') - self.assertRaises(spack.spec.UnsatisfiableVersionSpecError, - spec.normalize) + with pytest.raises(spack.spec.UnsatisfiableVersionSpecError): + spec.normalize() - def test_unsatisfiable_compiler(self): - self.set_pkg_dep('mpileaks', 'mpich%gcc') + def test_unsatisfiable_compiler(self, set_dependency): + set_dependency('mpileaks', 'mpich%gcc') spec = Spec('mpileaks ^mpich%intel ^callpath ^dyninst ^libelf' ' ^libdwarf') - self.assertRaises(spack.spec.UnsatisfiableCompilerSpecError, - spec.normalize) + with pytest.raises(spack.spec.UnsatisfiableCompilerSpecError): + spec.normalize() - def test_unsatisfiable_compiler_version(self): - self.set_pkg_dep('mpileaks', 'mpich%gcc@4.6') + def test_unsatisfiable_compiler_version(self, set_dependency): + set_dependency('mpileaks', 'mpich%gcc@4.6') spec = Spec('mpileaks ^mpich%gcc@4.5 ^callpath ^dyninst ^libelf' ' ^libdwarf') - self.assertRaises(spack.spec.UnsatisfiableCompilerSpecError, - spec.normalize) + with pytest.raises(spack.spec.UnsatisfiableCompilerSpecError): + spec.normalize() - def test_unsatisfiable_architecture(self): - self.set_pkg_dep('mpileaks', 'mpich platform=test target=be') + def test_unsatisfiable_architecture(self, set_dependency): + set_dependency('mpileaks', 'mpich platform=test target=be') spec = Spec('mpileaks ^mpich platform=test target=fe ^callpath' ' ^dyninst ^libelf ^libdwarf') - self.assertRaises(spack.spec.UnsatisfiableArchitectureSpecError, - spec.normalize) + with pytest.raises(spack.spec.UnsatisfiableArchitectureSpecError): + spec.normalize() def test_invalid_dep(self): spec = Spec('libelf ^mpich') - self.assertRaises(spack.spec.InvalidDependencyError, - spec.normalize) + with pytest.raises(spack.spec.InvalidDependencyError): + spec.normalize() spec = Spec('libelf ^libdwarf') - self.assertRaises(spack.spec.InvalidDependencyError, - spec.normalize) + with pytest.raises(spack.spec.InvalidDependencyError): + spec.normalize() spec = Spec('mpich ^dyninst ^libelf') - self.assertRaises(spack.spec.InvalidDependencyError, - spec.normalize) + with pytest.raises(spack.spec.InvalidDependencyError): + spec.normalize() def test_equal(self): # Different spec structures to test for equality @@ -273,21 +297,21 @@ class SpecDagTest(MockPackagesTest): # All these are equal to each other with regular == specs = (flat, flat_init, flip_flat, dag, flip_dag) for lhs, rhs in zip(specs, specs): - self.assertEqual(lhs, rhs) - self.assertEqual(str(lhs), str(rhs)) + assert lhs == rhs + assert str(lhs) == str(rhs) # Same DAGs constructed different ways are equal - self.assertTrue(flat.eq_dag(flat_init)) + assert flat.eq_dag(flat_init) # order at same level does not matter -- (dep on same parent) - self.assertTrue(flat.eq_dag(flip_flat)) + assert flat.eq_dag(flip_flat) # DAGs should be unequal if nesting is different - self.assertFalse(flat.eq_dag(dag)) - self.assertFalse(flat.eq_dag(flip_dag)) - self.assertFalse(flip_flat.eq_dag(dag)) - self.assertFalse(flip_flat.eq_dag(flip_dag)) - self.assertFalse(dag.eq_dag(flip_dag)) + assert not flat.eq_dag(dag) + assert not flat.eq_dag(flip_dag) + assert not flip_flat.eq_dag(dag) + assert not flip_flat.eq_dag(flip_dag) + assert not dag.eq_dag(flip_dag) def test_normalize_mpileaks(self): # Spec parsed in from a string @@ -328,32 +352,32 @@ class SpecDagTest(MockPackagesTest): # All specs here should be equal under regular equality specs = (spec, expected_flat, expected_normalized, non_unique_nodes) for lhs, rhs in zip(specs, specs): - self.assertEqual(lhs, rhs) - self.assertEqual(str(lhs), str(rhs)) + assert lhs == rhs + assert str(lhs) == str(rhs) # Test that equal and equal_dag are doing the right thing - self.assertEqual(spec, expected_flat) - self.assertTrue(spec.eq_dag(expected_flat)) + assert spec == expected_flat + assert spec.eq_dag(expected_flat) # Normalized has different DAG structure, so NOT equal. - self.assertNotEqual(spec, expected_normalized) - self.assertFalse(spec.eq_dag(expected_normalized)) + assert spec != expected_normalized + assert not spec.eq_dag(expected_normalized) # Again, different DAG structure so not equal. - self.assertNotEqual(spec, non_unique_nodes) - self.assertFalse(spec.eq_dag(non_unique_nodes)) + assert spec != non_unique_nodes + assert not spec.eq_dag(non_unique_nodes) spec.normalize() # After normalizing, spec_dag_equal should match the normalized spec. - self.assertNotEqual(spec, expected_flat) - self.assertFalse(spec.eq_dag(expected_flat)) + assert spec != expected_flat + assert not spec.eq_dag(expected_flat) - self.assertEqual(spec, expected_normalized) - self.assertTrue(spec.eq_dag(expected_normalized)) + assert spec == expected_normalized + assert spec.eq_dag(expected_normalized) - self.assertEqual(spec, non_unique_nodes) - self.assertFalse(spec.eq_dag(non_unique_nodes)) + assert spec == non_unique_nodes + assert not spec.eq_dag(non_unique_nodes) def test_normalize_with_virtual_package(self): spec = Spec('mpileaks ^mpi ^libelf@1.8.11 ^libdwarf') @@ -368,67 +392,66 @@ class SpecDagTest(MockPackagesTest): Spec('libelf@1.8.11')), Spec('mpi')), Spec('mpi')) - self.assertEqual(str(spec), str(expected_normalized)) + assert str(spec) == str(expected_normalized) def test_contains(self): spec = Spec('mpileaks ^mpi ^libelf@1.8.11 ^libdwarf') - self.assertTrue(Spec('mpi') in spec) - self.assertTrue(Spec('libelf') in spec) - self.assertTrue(Spec('libelf@1.8.11') in spec) - self.assertFalse(Spec('libelf@1.8.12') in spec) - self.assertTrue(Spec('libdwarf') in spec) - self.assertFalse(Spec('libgoblin') in spec) - self.assertTrue(Spec('mpileaks') in spec) + assert Spec('mpi') in spec + assert Spec('libelf') in spec + assert Spec('libelf@1.8.11') in spec + assert Spec('libelf@1.8.12') not in spec + assert Spec('libdwarf') in spec + assert Spec('libgoblin') not in spec + assert Spec('mpileaks') in spec def test_copy_simple(self): orig = Spec('mpileaks') copy = orig.copy() + check_links(copy) - self.check_links(copy) - - self.assertEqual(orig, copy) - self.assertTrue(orig.eq_dag(copy)) - self.assertEqual(orig._normal, copy._normal) - self.assertEqual(orig._concrete, copy._concrete) + assert orig == copy + assert orig.eq_dag(copy) + assert orig._normal == copy._normal + assert orig._concrete == copy._concrete # ensure no shared nodes bt/w orig and copy. orig_ids = set(id(s) for s in orig.traverse()) copy_ids = set(id(s) for s in copy.traverse()) - self.assertFalse(orig_ids.intersection(copy_ids)) + assert not orig_ids.intersection(copy_ids) def test_copy_normalized(self): orig = Spec('mpileaks') orig.normalize() copy = orig.copy() + check_links(copy) - self.check_links(copy) - - self.assertEqual(orig, copy) - self.assertTrue(orig.eq_dag(copy)) - self.assertEqual(orig._normal, copy._normal) - self.assertEqual(orig._concrete, copy._concrete) + assert orig == copy + assert orig.eq_dag(copy) + assert orig._normal == copy._normal + assert orig._concrete == copy._concrete # ensure no shared nodes bt/w orig and copy. orig_ids = set(id(s) for s in orig.traverse()) copy_ids = set(id(s) for s in copy.traverse()) - self.assertFalse(orig_ids.intersection(copy_ids)) + assert not orig_ids.intersection(copy_ids) + @pytest.mark.usefixtures('config') def test_copy_concretized(self): orig = Spec('mpileaks') orig.concretize() copy = orig.copy() - self.check_links(copy) + check_links(copy) - self.assertEqual(orig, copy) - self.assertTrue(orig.eq_dag(copy)) - self.assertEqual(orig._normal, copy._normal) - self.assertEqual(orig._concrete, copy._concrete) + assert orig == copy + assert orig.eq_dag(copy) + assert orig._normal == copy._normal + assert orig._concrete == copy._concrete # ensure no shared nodes bt/w orig and copy. orig_ids = set(id(s) for s in orig.traverse()) copy_ids = set(id(s) for s in copy.traverse()) - self.assertFalse(orig_ids.intersection(copy_ids)) + assert not orig_ids.intersection(copy_ids) """ Here is the graph with deptypes labeled (assume all packages have a 'dt' @@ -464,7 +487,7 @@ class SpecDagTest(MockPackagesTest): 'dtlink1', 'dtlink3', 'dtlink4'] traversal = dag.traverse(deptype=('build', 'link')) - self.assertEqual([x.name for x in traversal], names) + assert [x.name for x in traversal] == names def test_deptype_traversal_with_builddeps(self): dag = Spec('dttop') @@ -474,7 +497,7 @@ class SpecDagTest(MockPackagesTest): 'dtlink1', 'dtlink3', 'dtlink4'] traversal = dag.traverse(deptype=('build', 'link')) - self.assertEqual([x.name for x in traversal], names) + assert [x.name for x in traversal] == names def test_deptype_traversal_full(self): dag = Spec('dttop') @@ -485,7 +508,7 @@ class SpecDagTest(MockPackagesTest): 'dtrun3', 'dtbuild3'] traversal = dag.traverse(deptype=spack.alldeps) - self.assertEqual([x.name for x in traversal], names) + assert [x.name for x in traversal] == names def test_deptype_traversal_run(self): dag = Spec('dttop') @@ -494,7 +517,7 @@ class SpecDagTest(MockPackagesTest): names = ['dttop', 'dtrun1', 'dtrun3'] traversal = dag.traverse(deptype='run') - self.assertEqual([x.name for x in traversal], names) + assert [x.name for x in traversal] == names def test_hash_bits(self): """Ensure getting first n bits of a base32-encoded DAG hash works.""" @@ -522,10 +545,10 @@ class SpecDagTest(MockPackagesTest): fmt = "#0%sb" % (bits + 2) actual = format(actual_int, fmt).replace('0b', '') - self.assertEqual(expected[:bits], actual) + assert expected[:bits] == actual - self.assertRaises( - ValueError, spack.spec.base32_prefix_bits, test_hash, 161) + with pytest.raises(ValueError): + spack.spec.base32_prefix_bits(test_hash, 161) - self.assertRaises( - ValueError, spack.spec.base32_prefix_bits, test_hash, 256) + with pytest.raises(ValueError): + spack.spec.base32_prefix_bits(test_hash, 256) diff --git a/lib/spack/spack/test/spec_semantics.py b/lib/spack/spack/test/spec_semantics.py index 16d6121dea..84c8650f15 100644 --- a/lib/spack/spack/test/spec_semantics.py +++ b/lib/spack/spack/test/spec_semantics.py @@ -23,340 +23,344 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import spack.architecture +import pytest from spack.spec import * -from spack.test.mock_packages_test import * -class SpecSematicsTest(MockPackagesTest): - """This tests satisfies(), constrain() and other semantic operations - on specs.""" +def check_satisfies(spec, anon_spec, concrete=False): + left = Spec(spec, concrete=concrete) + try: + right = Spec(anon_spec) # if it's not anonymous, allow it. + except Exception: + right = parse_anonymous_spec(anon_spec, left.name) - # ======================================================================== - # Utility functions to set everything up. - # ======================================================================== - def check_satisfies(self, spec, anon_spec, concrete=False): - left = Spec(spec, concrete=concrete) - try: - right = Spec(anon_spec) # if it's not anonymous, allow it. - except: - right = parse_anonymous_spec(anon_spec, left.name) - - # Satisfies is one-directional. - self.assertTrue(left.satisfies(right)) - self.assertTrue(left.satisfies(anon_spec)) - - # if left satisfies right, then we should be able to consrain - # right by left. Reverse is not always true. + # Satisfies is one-directional. + assert left.satisfies(right) + assert left.satisfies(anon_spec) + + # if left satisfies right, then we should be able to consrain + # right by left. Reverse is not always true. + right.copy().constrain(left) + + +def check_unsatisfiable(spec, anon_spec, concrete=False): + left = Spec(spec, concrete=concrete) + try: + right = Spec(anon_spec) # if it's not anonymous, allow it. + except Exception: + right = parse_anonymous_spec(anon_spec, left.name) + + assert not left.satisfies(right) + assert not left.satisfies(anon_spec) + + with pytest.raises(UnsatisfiableSpecError): right.copy().constrain(left) - def check_unsatisfiable(self, spec, anon_spec, concrete=False): - left = Spec(spec, concrete=concrete) - try: - right = Spec(anon_spec) # if it's not anonymous, allow it. - except: - right = parse_anonymous_spec(anon_spec, left.name) - self.assertFalse(left.satisfies(right)) - self.assertFalse(left.satisfies(anon_spec)) +def check_constrain(expected, spec, constraint): + exp = Spec(expected) + spec = Spec(spec) + constraint = Spec(constraint) + spec.constrain(constraint) + assert exp == spec - self.assertRaises(UnsatisfiableSpecError, right.copy().constrain, left) - def check_constrain(self, expected, spec, constraint): - exp = Spec(expected) - spec = Spec(spec) - constraint = Spec(constraint) - spec.constrain(constraint) - self.assertEqual(exp, spec) +def check_constrain_changed(spec, constraint): + spec = Spec(spec) + assert spec.constrain(constraint) - def check_constrain_changed(self, spec, constraint): - spec = Spec(spec) - self.assertTrue(spec.constrain(constraint)) - def check_constrain_not_changed(self, spec, constraint): - spec = Spec(spec) - self.assertFalse(spec.constrain(constraint)) +def check_constrain_not_changed(spec, constraint): + spec = Spec(spec) + assert not spec.constrain(constraint) - def check_invalid_constraint(self, spec, constraint): - spec = Spec(spec) - constraint = Spec(constraint) - self.assertRaises(UnsatisfiableSpecError, spec.constrain, constraint) - # ======================================================================== - # Satisfiability - # ======================================================================== +def check_invalid_constraint(spec, constraint): + spec = Spec(spec) + constraint = Spec(constraint) + with pytest.raises(UnsatisfiableSpecError): + spec.constrain(constraint) + + +@pytest.mark.usefixtures('config', 'builtin_mock') +class TestSpecSematics(object): + """This tests satisfies(), constrain() and other semantic operations + on specs. + """ def test_satisfies(self): - self.check_satisfies('libelf@0.8.13', '@0:1') - self.check_satisfies('libdwarf^libelf@0.8.13', '^libelf@0:1') + check_satisfies('libelf@0.8.13', '@0:1') + check_satisfies('libdwarf^libelf@0.8.13', '^libelf@0:1') def test_satisfies_namespace(self): - self.check_satisfies('builtin.mpich', 'mpich') - self.check_satisfies('builtin.mock.mpich', 'mpich') + check_satisfies('builtin.mpich', 'mpich') + check_satisfies('builtin.mock.mpich', 'mpich') # TODO: only works for deps now, but shouldn't we allow for root spec? - # self.check_satisfies('builtin.mock.mpich', 'mpi') + # check_satisfies('builtin.mock.mpich', 'mpi') - self.check_satisfies('builtin.mock.mpich', 'builtin.mock.mpich') + check_satisfies('builtin.mock.mpich', 'builtin.mock.mpich') - self.check_unsatisfiable('builtin.mock.mpich', 'builtin.mpich') + check_unsatisfiable('builtin.mock.mpich', 'builtin.mpich') def test_satisfies_namespaced_dep(self): """Ensure spec from same or unspecified namespace satisfies namespace constraint.""" - self.check_satisfies('mpileaks ^builtin.mock.mpich', '^mpich') + check_satisfies('mpileaks ^builtin.mock.mpich', '^mpich') - self.check_satisfies('mpileaks ^builtin.mock.mpich', '^mpi') - self.check_satisfies( + check_satisfies('mpileaks ^builtin.mock.mpich', '^mpi') + check_satisfies( 'mpileaks ^builtin.mock.mpich', '^builtin.mock.mpich') - self.check_unsatisfiable( + check_unsatisfiable( 'mpileaks ^builtin.mock.mpich', '^builtin.mpich') def test_satisfies_compiler(self): - self.check_satisfies('foo%gcc', '%gcc') - self.check_satisfies('foo%intel', '%intel') - self.check_unsatisfiable('foo%intel', '%gcc') - self.check_unsatisfiable('foo%intel', '%pgi') + check_satisfies('foo%gcc', '%gcc') + check_satisfies('foo%intel', '%intel') + check_unsatisfiable('foo%intel', '%gcc') + check_unsatisfiable('foo%intel', '%pgi') def test_satisfies_compiler_version(self): - self.check_satisfies('foo%gcc', '%gcc@4.7.2') - self.check_satisfies('foo%intel', '%intel@4.7.2') + check_satisfies('foo%gcc', '%gcc@4.7.2') + check_satisfies('foo%intel', '%intel@4.7.2') - self.check_satisfies('foo%pgi@4.5', '%pgi@4.4:4.6') - self.check_satisfies('foo@2.0%pgi@4.5', '@1:3%pgi@4.4:4.6') + check_satisfies('foo%pgi@4.5', '%pgi@4.4:4.6') + check_satisfies('foo@2.0%pgi@4.5', '@1:3%pgi@4.4:4.6') - self.check_unsatisfiable('foo%pgi@4.3', '%pgi@4.4:4.6') - self.check_unsatisfiable('foo@4.0%pgi', '@1:3%pgi') - self.check_unsatisfiable('foo@4.0%pgi@4.5', '@1:3%pgi@4.4:4.6') + check_unsatisfiable('foo%pgi@4.3', '%pgi@4.4:4.6') + check_unsatisfiable('foo@4.0%pgi', '@1:3%pgi') + check_unsatisfiable('foo@4.0%pgi@4.5', '@1:3%pgi@4.4:4.6') - self.check_satisfies('foo %gcc@4.7.3', '%gcc@4.7') - self.check_unsatisfiable('foo %gcc@4.7', '%gcc@4.7.3') + check_satisfies('foo %gcc@4.7.3', '%gcc@4.7') + check_unsatisfiable('foo %gcc@4.7', '%gcc@4.7.3') def test_satisfies_architecture(self): - self.check_satisfies( + check_satisfies( 'foo platform=test', 'platform=test') - self.check_satisfies( + check_satisfies( 'foo platform=linux', 'platform=linux') - self.check_satisfies( + check_satisfies( 'foo platform=test', 'platform=test target=frontend') - self.check_satisfies( + check_satisfies( 'foo platform=test', 'platform=test os=frontend target=frontend') - self.check_satisfies( + check_satisfies( 'foo platform=test os=frontend target=frontend', 'platform=test') - self.check_unsatisfiable( + check_unsatisfiable( 'foo platform=linux', 'platform=test os=redhat6 target=x86_32') - self.check_unsatisfiable( + check_unsatisfiable( 'foo os=redhat6', 'platform=test os=debian6 target=x86_64') - self.check_unsatisfiable( + check_unsatisfiable( 'foo target=x86_64', 'platform=test os=redhat6 target=x86_32') - self.check_satisfies( + check_satisfies( 'foo arch=test-None-None', 'platform=test') - self.check_satisfies( + check_satisfies( 'foo arch=test-None-frontend', 'platform=test target=frontend') - self.check_satisfies( + check_satisfies( 'foo arch=test-frontend-frontend', 'platform=test os=frontend target=frontend') - self.check_satisfies( + check_satisfies( 'foo arch=test-frontend-frontend', 'platform=test') - self.check_unsatisfiable( + check_unsatisfiable( 'foo arch=test-frontend-frontend', 'platform=test os=frontend target=backend') - self.check_satisfies( + check_satisfies( 'foo platform=test target=frontend os=frontend', 'platform=test target=frontend os=frontend') - self.check_satisfies( + check_satisfies( 'foo platform=test target=backend os=backend', 'platform=test target=backend os=backend') - self.check_satisfies( + check_satisfies( 'foo platform=test target=default_target os=default_os', 'platform=test os=default_os') - self.check_unsatisfiable( + check_unsatisfiable( 'foo platform=test target=x86_32 os=redhat6', 'platform=linux target=x86_32 os=redhat6') def test_satisfies_dependencies(self): - self.check_satisfies('mpileaks^mpich', '^mpich') - self.check_satisfies('mpileaks^zmpi', '^zmpi') + check_satisfies('mpileaks^mpich', '^mpich') + check_satisfies('mpileaks^zmpi', '^zmpi') - self.check_unsatisfiable('mpileaks^mpich', '^zmpi') - self.check_unsatisfiable('mpileaks^zmpi', '^mpich') + check_unsatisfiable('mpileaks^mpich', '^zmpi') + check_unsatisfiable('mpileaks^zmpi', '^mpich') def test_satisfies_dependency_versions(self): - self.check_satisfies('mpileaks^mpich@2.0', '^mpich@1:3') - self.check_unsatisfiable('mpileaks^mpich@1.2', '^mpich@2.0') + check_satisfies('mpileaks^mpich@2.0', '^mpich@1:3') + check_unsatisfiable('mpileaks^mpich@1.2', '^mpich@2.0') - self.check_satisfies( + check_satisfies( 'mpileaks^mpich@2.0^callpath@1.5', '^mpich@1:3^callpath@1.4:1.6') - self.check_unsatisfiable( + check_unsatisfiable( 'mpileaks^mpich@4.0^callpath@1.5', '^mpich@1:3^callpath@1.4:1.6') - self.check_unsatisfiable( + check_unsatisfiable( 'mpileaks^mpich@2.0^callpath@1.7', '^mpich@1:3^callpath@1.4:1.6') - self.check_unsatisfiable( + check_unsatisfiable( 'mpileaks^mpich@4.0^callpath@1.7', '^mpich@1:3^callpath@1.4:1.6') def test_satisfies_virtual_dependencies(self): - self.check_satisfies('mpileaks^mpi', '^mpi') - self.check_satisfies('mpileaks^mpi', '^mpich') + check_satisfies('mpileaks^mpi', '^mpi') + check_satisfies('mpileaks^mpi', '^mpich') - self.check_satisfies('mpileaks^mpi', '^zmpi') - self.check_unsatisfiable('mpileaks^mpich', '^zmpi') + check_satisfies('mpileaks^mpi', '^zmpi') + check_unsatisfiable('mpileaks^mpich', '^zmpi') def test_satisfies_virtual_dependency_versions(self): - self.check_satisfies('mpileaks^mpi@1.5', '^mpi@1.2:1.6') - self.check_unsatisfiable('mpileaks^mpi@3', '^mpi@1.2:1.6') + check_satisfies('mpileaks^mpi@1.5', '^mpi@1.2:1.6') + check_unsatisfiable('mpileaks^mpi@3', '^mpi@1.2:1.6') - self.check_satisfies('mpileaks^mpi@2:', '^mpich') - self.check_satisfies('mpileaks^mpi@2:', '^mpich@3.0.4') - self.check_satisfies('mpileaks^mpi@2:', '^mpich2@1.4') + check_satisfies('mpileaks^mpi@2:', '^mpich') + check_satisfies('mpileaks^mpi@2:', '^mpich@3.0.4') + check_satisfies('mpileaks^mpi@2:', '^mpich2@1.4') - self.check_satisfies('mpileaks^mpi@1:', '^mpich2') - self.check_satisfies('mpileaks^mpi@2:', '^mpich2') + check_satisfies('mpileaks^mpi@1:', '^mpich2') + check_satisfies('mpileaks^mpi@2:', '^mpich2') - self.check_unsatisfiable('mpileaks^mpi@3:', '^mpich2@1.4') - self.check_unsatisfiable('mpileaks^mpi@3:', '^mpich2') - self.check_unsatisfiable('mpileaks^mpi@3:', '^mpich@1.0') + check_unsatisfiable('mpileaks^mpi@3:', '^mpich2@1.4') + check_unsatisfiable('mpileaks^mpi@3:', '^mpich2') + check_unsatisfiable('mpileaks^mpi@3:', '^mpich@1.0') def test_satisfies_matching_variant(self): - self.check_satisfies('mpich+foo', 'mpich+foo') - self.check_satisfies('mpich~foo', 'mpich~foo') - self.check_satisfies('mpich foo=1', 'mpich foo=1') + check_satisfies('mpich+foo', 'mpich+foo') + check_satisfies('mpich~foo', 'mpich~foo') + check_satisfies('mpich foo=1', 'mpich foo=1') # confirm that synonymous syntax works correctly - self.check_satisfies('mpich+foo', 'mpich foo=True') - self.check_satisfies('mpich foo=true', 'mpich+foo') - self.check_satisfies('mpich~foo', 'mpich foo=FALSE') - self.check_satisfies('mpich foo=False', 'mpich~foo') + check_satisfies('mpich+foo', 'mpich foo=True') + check_satisfies('mpich foo=true', 'mpich+foo') + check_satisfies('mpich~foo', 'mpich foo=FALSE') + check_satisfies('mpich foo=False', 'mpich~foo') def test_satisfies_unconstrained_variant(self): # only asked for mpich, no constraints. Either will do. - self.check_satisfies('mpich+foo', 'mpich') - self.check_satisfies('mpich~foo', 'mpich') - self.check_satisfies('mpich foo=1', 'mpich') + check_satisfies('mpich+foo', 'mpich') + check_satisfies('mpich~foo', 'mpich') + check_satisfies('mpich foo=1', 'mpich') def test_unsatisfiable_variants(self): # This case is different depending on whether the specs are concrete. # 'mpich' is not concrete: - self.check_satisfies('mpich', 'mpich+foo', False) - self.check_satisfies('mpich', 'mpich~foo', False) - self.check_satisfies('mpich', 'mpich foo=1', False) + check_satisfies('mpich', 'mpich+foo', False) + check_satisfies('mpich', 'mpich~foo', False) + check_satisfies('mpich', 'mpich foo=1', False) # 'mpich' is concrete: - self.check_unsatisfiable('mpich', 'mpich+foo', True) - self.check_unsatisfiable('mpich', 'mpich~foo', True) - self.check_unsatisfiable('mpich', 'mpich foo=1', True) + check_unsatisfiable('mpich', 'mpich+foo', True) + check_unsatisfiable('mpich', 'mpich~foo', True) + check_unsatisfiable('mpich', 'mpich foo=1', True) def test_unsatisfiable_variant_mismatch(self): # No matchi in specs - self.check_unsatisfiable('mpich~foo', 'mpich+foo') - self.check_unsatisfiable('mpich+foo', 'mpich~foo') - self.check_unsatisfiable('mpich foo=1', 'mpich foo=2') + check_unsatisfiable('mpich~foo', 'mpich+foo') + check_unsatisfiable('mpich+foo', 'mpich~foo') + check_unsatisfiable('mpich foo=1', 'mpich foo=2') def test_satisfies_matching_compiler_flag(self): - self.check_satisfies('mpich cppflags="-O3"', 'mpich cppflags="-O3"') - self.check_satisfies('mpich cppflags="-O3 -Wall"', - 'mpich cppflags="-O3 -Wall"') + check_satisfies('mpich cppflags="-O3"', 'mpich cppflags="-O3"') + check_satisfies( + 'mpich cppflags="-O3 -Wall"', 'mpich cppflags="-O3 -Wall"' + ) def test_satisfies_unconstrained_compiler_flag(self): # only asked for mpich, no constraints. Any will do. - self.check_satisfies('mpich cppflags="-O3"', 'mpich') + check_satisfies('mpich cppflags="-O3"', 'mpich') def test_unsatisfiable_compiler_flag(self): # This case is different depending on whether the specs are concrete. # 'mpich' is not concrete: - self.check_satisfies('mpich', 'mpich cppflags="-O3"', False) + check_satisfies('mpich', 'mpich cppflags="-O3"', False) # 'mpich' is concrete: - self.check_unsatisfiable('mpich', 'mpich cppflags="-O3"', True) + check_unsatisfiable('mpich', 'mpich cppflags="-O3"', True) def test_unsatisfiable_compiler_flag_mismatch(self): # No matchi in specs - self.check_unsatisfiable( + check_unsatisfiable( 'mpich cppflags="-O3"', 'mpich cppflags="-O2"') def test_satisfies_virtual(self): # Don't use check_satisfies: it checks constrain() too, and # you can't constrain a non-virtual by a virtual. - self.assertTrue(Spec('mpich').satisfies(Spec('mpi'))) - self.assertTrue(Spec('mpich2').satisfies(Spec('mpi'))) - self.assertTrue(Spec('zmpi').satisfies(Spec('mpi'))) + assert Spec('mpich').satisfies(Spec('mpi')) + assert Spec('mpich2').satisfies(Spec('mpi')) + assert Spec('zmpi').satisfies(Spec('mpi')) def test_satisfies_virtual_dep_with_virtual_constraint(self): """Ensure we can satisfy virtual constraints when there are multiple vdep providers in the specs.""" - self.assertTrue( - Spec('netlib-lapack ^openblas').satisfies( - 'netlib-lapack ^openblas')) - self.assertFalse( - Spec('netlib-lapack ^netlib-blas').satisfies( - 'netlib-lapack ^openblas')) - - self.assertFalse( - Spec('netlib-lapack ^openblas').satisfies( - 'netlib-lapack ^netlib-blas')) - self.assertTrue( - Spec('netlib-lapack ^netlib-blas').satisfies( - 'netlib-lapack ^netlib-blas')) + assert Spec('netlib-lapack ^openblas').satisfies( + 'netlib-lapack ^openblas' + ) + assert not Spec('netlib-lapack ^netlib-blas').satisfies( + 'netlib-lapack ^openblas' + ) + assert not Spec('netlib-lapack ^openblas').satisfies( + 'netlib-lapack ^netlib-blas' + ) + assert Spec('netlib-lapack ^netlib-blas').satisfies( + 'netlib-lapack ^netlib-blas' + ) def test_satisfies_same_spec_with_different_hash(self): """Ensure that concrete specs are matched *exactly* by hash.""" s1 = Spec('mpileaks').concretized() s2 = s1.copy() - self.assertTrue(s1.satisfies(s2)) - self.assertTrue(s2.satisfies(s1)) + assert s1.satisfies(s2) + assert s2.satisfies(s1) # Simulate specs that were installed before and after a change to # Spack's hashing algorithm. This just reverses s2's hash. s2._hash = s1.dag_hash()[-1::-1] - self.assertFalse(s1.satisfies(s2)) - self.assertFalse(s2.satisfies(s1)) + assert not s1.satisfies(s2) + assert not s2.satisfies(s1) # ======================================================================== # Indexing specs # ======================================================================== def test_self_index(self): s = Spec('callpath') - self.assertTrue(s['callpath'] == s) + assert s['callpath'] == s def test_dep_index(self): s = Spec('callpath') s.normalize() - self.assertTrue(s['callpath'] == s) - self.assertTrue(type(s['dyninst']) == Spec) - self.assertTrue(type(s['libdwarf']) == Spec) - self.assertTrue(type(s['libelf']) == Spec) - self.assertTrue(type(s['mpi']) == Spec) + assert s['callpath'] == s + assert type(s['dyninst']) == Spec + assert type(s['libdwarf']) == Spec + assert type(s['libelf']) == Spec + assert type(s['mpi']) == Spec - self.assertTrue(s['dyninst'].name == 'dyninst') - self.assertTrue(s['libdwarf'].name == 'libdwarf') - self.assertTrue(s['libelf'].name == 'libelf') - self.assertTrue(s['mpi'].name == 'mpi') + assert s['dyninst'].name == 'dyninst' + assert s['libdwarf'].name == 'libdwarf' + assert s['libelf'].name == 'libelf' + assert s['mpi'].name == 'mpi' def test_spec_contains_deps(self): s = Spec('callpath') s.normalize() - self.assertTrue('dyninst' in s) - self.assertTrue('libdwarf' in s) - self.assertTrue('libelf' in s) - self.assertTrue('mpi' in s) + assert 'dyninst' in s + assert 'libdwarf' in s + assert 'libelf' in s + assert 'mpi' in s + @pytest.mark.usefixtures('config') def test_virtual_index(self): s = Spec('callpath') s.concretize() @@ -370,133 +374,149 @@ class SpecSematicsTest(MockPackagesTest): s_zmpi = Spec('callpath ^zmpi') s_zmpi.concretize() - self.assertTrue(s['mpi'].name != 'mpi') - self.assertTrue(s_mpich['mpi'].name == 'mpich') - self.assertTrue(s_mpich2['mpi'].name == 'mpich2') - self.assertTrue(s_zmpi['zmpi'].name == 'zmpi') + assert s['mpi'].name != 'mpi' + assert s_mpich['mpi'].name == 'mpich' + assert s_mpich2['mpi'].name == 'mpich2' + assert s_zmpi['zmpi'].name == 'zmpi' for spec in [s, s_mpich, s_mpich2, s_zmpi]: - self.assertTrue('mpi' in spec) + assert 'mpi' in spec # ======================================================================== # Constraints # ======================================================================== def test_constrain_variants(self): - self.check_constrain('libelf@2.1:2.5', 'libelf@0:2.5', 'libelf@2.1:3') - self.check_constrain('libelf@2.1:2.5%gcc@4.5:4.6', - 'libelf@0:2.5%gcc@2:4.6', - 'libelf@2.1:3%gcc@4.5:4.7') - - self.check_constrain('libelf+debug+foo', 'libelf+debug', 'libelf+foo') - self.check_constrain('libelf+debug+foo', - 'libelf+debug', 'libelf+debug+foo') - - self.check_constrain('libelf debug=2 foo=1', - 'libelf debug=2', 'libelf foo=1') - self.check_constrain('libelf debug=2 foo=1', - 'libelf debug=2', 'libelf debug=2 foo=1') - - self.check_constrain('libelf+debug~foo', 'libelf+debug', 'libelf~foo') - self.check_constrain('libelf+debug~foo', - 'libelf+debug', 'libelf+debug~foo') + check_constrain('libelf@2.1:2.5', 'libelf@0:2.5', 'libelf@2.1:3') + check_constrain( + 'libelf@2.1:2.5%gcc@4.5:4.6', + 'libelf@0:2.5%gcc@2:4.6', + 'libelf@2.1:3%gcc@4.5:4.7' + ) + check_constrain('libelf+debug+foo', 'libelf+debug', 'libelf+foo') + check_constrain( + 'libelf+debug+foo', 'libelf+debug', 'libelf+debug+foo' + ) + check_constrain( + 'libelf debug=2 foo=1', 'libelf debug=2', 'libelf foo=1' + ) + check_constrain( + 'libelf debug=2 foo=1', 'libelf debug=2', 'libelf debug=2 foo=1' + ) + + check_constrain('libelf+debug~foo', 'libelf+debug', 'libelf~foo') + check_constrain( + 'libelf+debug~foo', 'libelf+debug', 'libelf+debug~foo' + ) def test_constrain_compiler_flags(self): - self.check_constrain('libelf cflags="-O3" cppflags="-Wall"', - 'libelf cflags="-O3"', 'libelf cppflags="-Wall"') - self.check_constrain('libelf cflags="-O3" cppflags="-Wall"', - 'libelf cflags="-O3"', - 'libelf cflags="-O3" cppflags="-Wall"') + check_constrain( + 'libelf cflags="-O3" cppflags="-Wall"', + 'libelf cflags="-O3"', + 'libelf cppflags="-Wall"' + ) + check_constrain( + 'libelf cflags="-O3" cppflags="-Wall"', + 'libelf cflags="-O3"', + 'libelf cflags="-O3" cppflags="-Wall"' + ) def test_constrain_architecture(self): - self.check_constrain('libelf target=default_target os=default_os', - 'libelf target=default_target os=default_os', - 'libelf target=default_target os=default_os') - self.check_constrain('libelf target=default_target os=default_os', - 'libelf', - 'libelf target=default_target os=default_os') + check_constrain( + 'libelf target=default_target os=default_os', + 'libelf target=default_target os=default_os', + 'libelf target=default_target os=default_os' + ) + check_constrain( + 'libelf target=default_target os=default_os', + 'libelf', + 'libelf target=default_target os=default_os' + ) def test_constrain_compiler(self): - self.check_constrain('libelf %gcc@4.4.7', - 'libelf %gcc@4.4.7', 'libelf %gcc@4.4.7') - self.check_constrain('libelf %gcc@4.4.7', - 'libelf', 'libelf %gcc@4.4.7') + check_constrain( + 'libelf %gcc@4.4.7', 'libelf %gcc@4.4.7', 'libelf %gcc@4.4.7' + ) + check_constrain( + 'libelf %gcc@4.4.7', 'libelf', 'libelf %gcc@4.4.7' + ) def test_invalid_constraint(self): - self.check_invalid_constraint('libelf@0:2.0', 'libelf@2.1:3') - self.check_invalid_constraint( + check_invalid_constraint('libelf@0:2.0', 'libelf@2.1:3') + check_invalid_constraint( 'libelf@0:2.5%gcc@4.8:4.9', 'libelf@2.1:3%gcc@4.5:4.7') - self.check_invalid_constraint('libelf+debug', 'libelf~debug') - self.check_invalid_constraint('libelf+debug~foo', 'libelf+debug+foo') - self.check_invalid_constraint('libelf debug=2', 'libelf debug=1') + check_invalid_constraint('libelf+debug', 'libelf~debug') + check_invalid_constraint('libelf+debug~foo', 'libelf+debug+foo') + check_invalid_constraint('libelf debug=2', 'libelf debug=1') - self.check_invalid_constraint( + check_invalid_constraint( 'libelf cppflags="-O3"', 'libelf cppflags="-O2"') - self.check_invalid_constraint('libelf platform=test target=be os=be', - 'libelf target=fe os=fe') + check_invalid_constraint( + 'libelf platform=test target=be os=be', 'libelf target=fe os=fe' + ) def test_constrain_changed(self): - self.check_constrain_changed('libelf', '@1.0') - self.check_constrain_changed('libelf', '@1.0:5.0') - self.check_constrain_changed('libelf', '%gcc') - self.check_constrain_changed('libelf%gcc', '%gcc@4.5') - self.check_constrain_changed('libelf', '+debug') - self.check_constrain_changed('libelf', '~debug') - self.check_constrain_changed('libelf', 'debug=2') - self.check_constrain_changed('libelf', 'cppflags="-O3"') + check_constrain_changed('libelf', '@1.0') + check_constrain_changed('libelf', '@1.0:5.0') + check_constrain_changed('libelf', '%gcc') + check_constrain_changed('libelf%gcc', '%gcc@4.5') + check_constrain_changed('libelf', '+debug') + check_constrain_changed('libelf', '~debug') + check_constrain_changed('libelf', 'debug=2') + check_constrain_changed('libelf', 'cppflags="-O3"') platform = spack.architecture.platform() - self.check_constrain_changed( + check_constrain_changed( 'libelf', 'target=' + platform.target('default_target').name) - self.check_constrain_changed( + check_constrain_changed( 'libelf', 'os=' + platform.operating_system('default_os').name) def test_constrain_not_changed(self): - self.check_constrain_not_changed('libelf', 'libelf') - self.check_constrain_not_changed('libelf@1.0', '@1.0') - self.check_constrain_not_changed('libelf@1.0:5.0', '@1.0:5.0') - self.check_constrain_not_changed('libelf%gcc', '%gcc') - self.check_constrain_not_changed('libelf%gcc@4.5', '%gcc@4.5') - self.check_constrain_not_changed('libelf+debug', '+debug') - self.check_constrain_not_changed('libelf~debug', '~debug') - self.check_constrain_not_changed('libelf debug=2', 'debug=2') - self.check_constrain_not_changed( + check_constrain_not_changed('libelf', 'libelf') + check_constrain_not_changed('libelf@1.0', '@1.0') + check_constrain_not_changed('libelf@1.0:5.0', '@1.0:5.0') + check_constrain_not_changed('libelf%gcc', '%gcc') + check_constrain_not_changed('libelf%gcc@4.5', '%gcc@4.5') + check_constrain_not_changed('libelf+debug', '+debug') + check_constrain_not_changed('libelf~debug', '~debug') + check_constrain_not_changed('libelf debug=2', 'debug=2') + check_constrain_not_changed( 'libelf cppflags="-O3"', 'cppflags="-O3"') platform = spack.architecture.platform() default_target = platform.target('default_target').name - self.check_constrain_not_changed( + check_constrain_not_changed( 'libelf target=' + default_target, 'target=' + default_target) def test_constrain_dependency_changed(self): - self.check_constrain_changed('libelf^foo', 'libelf^foo@1.0') - self.check_constrain_changed('libelf^foo', 'libelf^foo@1.0:5.0') - self.check_constrain_changed('libelf^foo', 'libelf^foo%gcc') - self.check_constrain_changed('libelf^foo%gcc', 'libelf^foo%gcc@4.5') - self.check_constrain_changed('libelf^foo', 'libelf^foo+debug') - self.check_constrain_changed('libelf^foo', 'libelf^foo~debug') + check_constrain_changed('libelf^foo', 'libelf^foo@1.0') + check_constrain_changed('libelf^foo', 'libelf^foo@1.0:5.0') + check_constrain_changed('libelf^foo', 'libelf^foo%gcc') + check_constrain_changed('libelf^foo%gcc', 'libelf^foo%gcc@4.5') + check_constrain_changed('libelf^foo', 'libelf^foo+debug') + check_constrain_changed('libelf^foo', 'libelf^foo~debug') platform = spack.architecture.platform() default_target = platform.target('default_target').name - self.check_constrain_changed( + check_constrain_changed( 'libelf^foo', 'libelf^foo target=' + default_target) def test_constrain_dependency_not_changed(self): - self.check_constrain_not_changed('libelf^foo@1.0', 'libelf^foo@1.0') - self.check_constrain_not_changed( + check_constrain_not_changed('libelf^foo@1.0', 'libelf^foo@1.0') + check_constrain_not_changed( 'libelf^foo@1.0:5.0', 'libelf^foo@1.0:5.0') - self.check_constrain_not_changed('libelf^foo%gcc', 'libelf^foo%gcc') - self.check_constrain_not_changed( + check_constrain_not_changed('libelf^foo%gcc', 'libelf^foo%gcc') + check_constrain_not_changed( 'libelf^foo%gcc@4.5', 'libelf^foo%gcc@4.5') - self.check_constrain_not_changed( + check_constrain_not_changed( 'libelf^foo+debug', 'libelf^foo+debug') - self.check_constrain_not_changed( + check_constrain_not_changed( 'libelf^foo~debug', 'libelf^foo~debug') - self.check_constrain_not_changed( + check_constrain_not_changed( 'libelf^foo cppflags="-O3"', 'libelf^foo cppflags="-O3"') platform = spack.architecture.platform() default_target = platform.target('default_target').name - self.check_constrain_not_changed( + check_constrain_not_changed( 'libelf^foo target=' + default_target, 'libelf^foo target=' + default_target) diff --git a/lib/spack/spack/test/spec_syntax.py b/lib/spack/spack/test/spec_syntax.py index 1e072fe970..3cf094f25a 100644 --- a/lib/spack/spack/test/spec_syntax.py +++ b/lib/spack/spack/test/spec_syntax.py @@ -22,7 +22,7 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -import unittest +import pytest import spack.spec as sp from spack.parse import Token @@ -54,7 +54,7 @@ complex_lex = [Token(sp.ID, 'mvapich_foo'), Token(sp.ID, '8.1_1e')] -class SpecSyntaxTest(unittest.TestCase): +class TestSpecSyntax(object): # ======================================================================== # Parse checks # ======================================================================== @@ -77,17 +77,22 @@ class SpecSyntaxTest(unittest.TestCase): output = sp.parse(spec) parsed = (" ".join(str(spec) for spec in output)) - self.assertEqual(expected, parsed) + assert expected == parsed def check_lex(self, tokens, spec): """Check that the provided spec parses to the provided token list.""" lex_output = sp.SpecLexer().lex(spec) for tok, spec_tok in zip(tokens, lex_output): if tok.type == sp.ID: - self.assertEqual(tok, spec_tok) + assert tok == spec_tok else: # Only check the type for non-identifiers. - self.assertEqual(tok.type, spec_tok.type) + assert tok.type == spec_tok.type + + def _check_raises(self, exc_type, items): + for item in items: + with pytest.raises(exc_type): + self.check_parse(item) # ======================================================================== # Parse checks @@ -107,6 +112,10 @@ class SpecSyntaxTest(unittest.TestCase): self.check_parse("openmpi^hwloc@:1.4b7-rc3") self.check_parse("openmpi^hwloc@1.2e6:1.4b7-rc3") + @pytest.mark.xfail + def test_multiple_specs(self): + self.check_parse("mvapich emacs") + def test_full_specs(self): self.check_parse( "mvapich_foo" @@ -167,88 +176,53 @@ class SpecSyntaxTest(unittest.TestCase): self.check_parse("x^y", "x@: ^y@:") def test_parse_errors(self): - self.assertRaises(SpecParseError, self.check_parse, "x@@1.2") - self.assertRaises(SpecParseError, self.check_parse, "x ^y@@1.2") - self.assertRaises(SpecParseError, self.check_parse, "x@1.2::") - self.assertRaises(SpecParseError, self.check_parse, "x::") + errors = ['x@@1.2', 'x ^y@@1.2', 'x@1.2::', 'x::'] + self._check_raises(SpecParseError, errors) def test_duplicate_variant(self): - self.assertRaises(DuplicateVariantError, - self.check_parse, "x@1.2+debug+debug") - self.assertRaises(DuplicateVariantError, - self.check_parse, "x ^y@1.2+debug debug=true") - self.assertRaises(DuplicateVariantError, self.check_parse, - "x ^y@1.2 debug=false debug=true") - self.assertRaises(DuplicateVariantError, - self.check_parse, "x ^y@1.2 debug=false~debug") - - def test_duplicate_depdendence(self): - self.assertRaises(DuplicateDependencyError, - self.check_parse, "x ^y ^y") - - def test_duplicate_compiler(self): - self.assertRaises(DuplicateCompilerSpecError, - self.check_parse, "x%intel%intel") + duplicates = [ + 'x@1.2+debug+debug', + 'x ^y@1.2+debug debug=true', + 'x ^y@1.2 debug=false debug=true', + 'x ^y@1.2 debug=false~debug' + ] + self._check_raises(DuplicateVariantError, duplicates) - self.assertRaises(DuplicateCompilerSpecError, - self.check_parse, "x%intel%gcc") - self.assertRaises(DuplicateCompilerSpecError, - self.check_parse, "x%gcc%intel") + def test_duplicate_dependency(self): + self._check_raises(DuplicateDependencyError, ["x ^y ^y"]) - self.assertRaises(DuplicateCompilerSpecError, - self.check_parse, "x ^y%intel%intel") - self.assertRaises(DuplicateCompilerSpecError, - self.check_parse, "x ^y%intel%gcc") - self.assertRaises(DuplicateCompilerSpecError, - self.check_parse, "x ^y%gcc%intel") + def test_duplicate_compiler(self): + duplicates = [ + "x%intel%intel", + "x%intel%gcc", + "x%gcc%intel", + "x ^y%intel%intel", + "x ^y%intel%gcc", + "x ^y%gcc%intel" + ] + self._check_raises(DuplicateCompilerSpecError, duplicates) def test_duplicate_architecture(self): - self.assertRaises( - DuplicateArchitectureError, self.check_parse, - "x arch=linux-rhel7-x86_64 arch=linux-rhel7-x86_64") - - self.assertRaises( - DuplicateArchitectureError, self.check_parse, - "x arch=linux-rhel7-x86_64 arch=linux-rhel7-ppc64le") - self.assertRaises( - DuplicateArchitectureError, self.check_parse, - "x arch=linux-rhel7-ppc64le arch=linux-rhel7-x86_64") - - self.assertRaises( - DuplicateArchitectureError, self.check_parse, - "y ^x arch=linux-rhel7-x86_64 arch=linux-rhel7-x86_64") - self.assertRaises( - DuplicateArchitectureError, self.check_parse, - "y ^x arch=linux-rhel7-x86_64 arch=linux-rhel7-ppc64le") + duplicates = [ + "x arch=linux-rhel7-x86_64 arch=linux-rhel7-x86_64", + "x arch=linux-rhel7-x86_64 arch=linux-rhel7-ppc64le", + "x arch=linux-rhel7-ppc64le arch=linux-rhel7-x86_64", + "y ^x arch=linux-rhel7-x86_64 arch=linux-rhel7-x86_64", + "y ^x arch=linux-rhel7-x86_64 arch=linux-rhel7-ppc64le" + ] + self._check_raises(DuplicateArchitectureError, duplicates) def test_duplicate_architecture_component(self): - self.assertRaises( - DuplicateArchitectureError, self.check_parse, - "x os=fe os=fe") - self.assertRaises( - DuplicateArchitectureError, self.check_parse, - "x os=fe os=be") - - self.assertRaises( - DuplicateArchitectureError, self.check_parse, - "x target=fe target=fe") - self.assertRaises( - DuplicateArchitectureError, self.check_parse, - "x target=fe target=be") - - self.assertRaises( - DuplicateArchitectureError, self.check_parse, - "x platform=test platform=test") - self.assertRaises( - DuplicateArchitectureError, self.check_parse, - "x platform=test platform=test") - - self.assertRaises( - DuplicateArchitectureError, self.check_parse, - "x os=fe platform=test target=fe os=fe") - self.assertRaises( - DuplicateArchitectureError, self.check_parse, - "x target=be platform=test os=be os=fe") + duplicates = [ + "x os=fe os=fe", + "x os=fe os=be", + "x target=fe target=fe", + "x target=fe target=be", + "x platform=test platform=test", + "x os=fe platform=test target=fe os=fe", + "x target=be platform=test os=be os=fe" + ] + self._check_raises(DuplicateArchitectureError, duplicates) # ======================================================================== # Lex checks @@ -256,11 +230,13 @@ class SpecSyntaxTest(unittest.TestCase): def test_ambiguous(self): # This first one is ambiguous because - can be in an identifier AND # indicate disabling an option. - self.assertRaises( - AssertionError, self.check_lex, complex_lex, - "mvapich_foo" - "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug-qt_4" - "^stackwalker@8.1_1e") + with pytest.raises(AssertionError): + self.check_lex( + complex_lex, + "mvapich_foo" + "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug-qt_4" + "^stackwalker@8.1_1e" + ) # The following lexes are non-ambiguous (add a space before -qt_4) # and should all result in the tokens in complex_lex diff --git a/lib/spack/spack/test/spec_yaml.py b/lib/spack/spack/test/spec_yaml.py index 442c6e6e81..e913dc8412 100644 --- a/lib/spack/spack/test/spec_yaml.py +++ b/lib/spack/spack/test/spec_yaml.py @@ -27,155 +27,153 @@ YAML format preserves DAG informatoin in the spec. """ -import spack.util.spack_yaml as syaml import spack.util.spack_json as sjson +import spack.util.spack_yaml as syaml +from spack.spec import Spec from spack.util.spack_yaml import syaml_dict -from spack.spec import Spec -from spack.test.mock_packages_test import * +def check_yaml_round_trip(spec): + yaml_text = spec.to_yaml() + spec_from_yaml = Spec.from_yaml(yaml_text) + assert spec.eq_dag(spec_from_yaml) -class SpecYamlTest(MockPackagesTest): - def check_yaml_round_trip(self, spec): - yaml_text = spec.to_yaml() - spec_from_yaml = Spec.from_yaml(yaml_text) - self.assertTrue(spec.eq_dag(spec_from_yaml)) +def test_simple_spec(): + spec = Spec('mpileaks') + check_yaml_round_trip(spec) - def test_simple_spec(self): - spec = Spec('mpileaks') - self.check_yaml_round_trip(spec) - def test_normal_spec(self): - spec = Spec('mpileaks+debug~opt') - spec.normalize() - self.check_yaml_round_trip(spec) +def test_normal_spec(builtin_mock): + spec = Spec('mpileaks+debug~opt') + spec.normalize() + check_yaml_round_trip(spec) - def test_ambiguous_version_spec(self): - spec = Spec('mpileaks@1.0:5.0,6.1,7.3+debug~opt') - spec.normalize() - self.check_yaml_round_trip(spec) - def test_concrete_spec(self): - spec = Spec('mpileaks+debug~opt') - spec.concretize() - self.check_yaml_round_trip(spec) +def test_ambiguous_version_spec(builtin_mock): + spec = Spec('mpileaks@1.0:5.0,6.1,7.3+debug~opt') + spec.normalize() + check_yaml_round_trip(spec) + + +def test_concrete_spec(config, builtin_mock): + spec = Spec('mpileaks+debug~opt') + spec.concretize() + check_yaml_round_trip(spec) + + +def test_yaml_subdag(config, builtin_mock): + spec = Spec('mpileaks^mpich+debug') + spec.concretize() + yaml_spec = Spec.from_yaml(spec.to_yaml()) - def test_yaml_subdag(self): - spec = Spec('mpileaks^mpich+debug') + for dep in ('callpath', 'mpich', 'dyninst', 'libdwarf', 'libelf'): + assert spec[dep].eq_dag(yaml_spec[dep]) + + +def test_using_ordered_dict(builtin_mock): + """ Checks that dicts are ordered + + Necessary to make sure that dag_hash is stable across python + versions and processes. + """ + def descend_and_check(iterable, level=0): + from spack.util.spack_yaml import syaml_dict + from collections import Iterable, Mapping + if isinstance(iterable, Mapping): + assert isinstance(iterable, syaml_dict) + return descend_and_check(iterable.values(), level=level + 1) + max_level = level + for value in iterable: + if isinstance(value, Iterable) and not isinstance(value, str): + nlevel = descend_and_check(value, level=level + 1) + if nlevel > max_level: + max_level = nlevel + return max_level + + specs = ['mpileaks ^zmpi', 'dttop', 'dtuse'] + for spec in specs: + dag = Spec(spec) + dag.normalize() + level = descend_and_check(dag.to_node_dict()) + # level just makes sure we are doing something here + assert level >= 5 + + +def test_ordered_read_not_required_for_consistent_dag_hash( + config, builtin_mock +): + """Make sure ordered serialization isn't required to preserve hashes. + + For consistent hashes, we require that YAML and json documents + have their keys serialized in a deterministic order. However, we + don't want to require them to be serialized in order. This + ensures that is not required. + """ + specs = ['mpileaks ^zmpi', 'dttop', 'dtuse'] + for spec in specs: + spec = Spec(spec) spec.concretize() - yaml_spec = Spec.from_yaml(spec.to_yaml()) - - for dep in ('callpath', 'mpich', 'dyninst', 'libdwarf', 'libelf'): - self.assertTrue(spec[dep].eq_dag(yaml_spec[dep])) - - def test_using_ordered_dict(self): - """ Checks that dicts are ordered - - Necessary to make sure that dag_hash is stable across python - versions and processes. - """ - def descend_and_check(iterable, level=0): - from spack.util.spack_yaml import syaml_dict - from collections import Iterable, Mapping - if isinstance(iterable, Mapping): - self.assertTrue(isinstance(iterable, syaml_dict)) - return descend_and_check(iterable.values(), level=level + 1) - max_level = level - for value in iterable: - if isinstance(value, Iterable) and not isinstance(value, str): - nlevel = descend_and_check(value, level=level + 1) - if nlevel > max_level: - max_level = nlevel - return max_level - - specs = ['mpileaks ^zmpi', 'dttop', 'dtuse'] - for spec in specs: - dag = Spec(spec) - dag.normalize() - level = descend_and_check(dag.to_node_dict()) - # level just makes sure we are doing something here - self.assertTrue(level >= 5) - - def test_ordered_read_not_required_for_consistent_dag_hash(self): - """Make sure ordered serialization isn't required to preserve hashes. - - For consistent hashes, we require that YAML and json documents - have their keys serialized in a deterministic order. However, we - don't want to require them to be serialized in order. This - ensures that is not reauired. - - """ - specs = ['mpileaks ^zmpi', 'dttop', 'dtuse'] - for spec in specs: - spec = Spec(spec) - spec.concretize() - - # - # Dict & corresponding YAML & JSON from the original spec. - # - spec_dict = spec.to_dict() - spec_yaml = spec.to_yaml() - spec_json = spec.to_json() - - # - # Make a spec with reversed OrderedDicts for every - # OrderedDict in the original. - # - reversed_spec_dict = reverse_all_dicts(spec.to_dict()) - - # - # Dump to YAML and JSON - # - yaml_string = syaml.dump(spec_dict, default_flow_style=False) - reversed_yaml_string = syaml.dump(reversed_spec_dict, - default_flow_style=False) - json_string = sjson.dump(spec_dict) - reversed_json_string = sjson.dump(reversed_spec_dict) - - # - # Do many consistency checks - # - - # spec yaml is ordered like the spec dict - self.assertEqual(yaml_string, spec_yaml) - self.assertEqual(json_string, spec_json) - - # reversed string is different from the original, so it - # *would* generate a different hash - self.assertNotEqual(yaml_string, reversed_yaml_string) - self.assertNotEqual(json_string, reversed_json_string) - - # build specs from the "wrongly" ordered data - round_trip_yaml_spec = Spec.from_yaml(yaml_string) - round_trip_json_spec = Spec.from_json(json_string) - round_trip_reversed_yaml_spec = Spec.from_yaml( - reversed_yaml_string) - round_trip_reversed_json_spec = Spec.from_yaml( - reversed_json_string) - - # TODO: remove this when build deps are in provenance. - spec = spec.copy(deps=('link', 'run')) - - # specs are equal to the original - self.assertEqual(spec, round_trip_yaml_spec) - self.assertEqual(spec, round_trip_json_spec) - self.assertEqual(spec, round_trip_reversed_yaml_spec) - self.assertEqual(spec, round_trip_reversed_json_spec) - self.assertEqual(round_trip_yaml_spec, - round_trip_reversed_yaml_spec) - self.assertEqual(round_trip_json_spec, - round_trip_reversed_json_spec) - - # dag_hashes are equal - self.assertEqual( - spec.dag_hash(), round_trip_yaml_spec.dag_hash()) - self.assertEqual( - spec.dag_hash(), round_trip_json_spec.dag_hash()) - self.assertEqual( - spec.dag_hash(), round_trip_reversed_yaml_spec.dag_hash()) - self.assertEqual( - spec.dag_hash(), round_trip_reversed_json_spec.dag_hash()) + + # + # Dict & corresponding YAML & JSON from the original spec. + # + spec_dict = spec.to_dict() + spec_yaml = spec.to_yaml() + spec_json = spec.to_json() + + # + # Make a spec with reversed OrderedDicts for every + # OrderedDict in the original. + # + reversed_spec_dict = reverse_all_dicts(spec.to_dict()) + + # + # Dump to YAML and JSON + # + yaml_string = syaml.dump(spec_dict, default_flow_style=False) + reversed_yaml_string = syaml.dump(reversed_spec_dict, + default_flow_style=False) + json_string = sjson.dump(spec_dict) + reversed_json_string = sjson.dump(reversed_spec_dict) + + # + # Do many consistency checks + # + + # spec yaml is ordered like the spec dict + assert yaml_string == spec_yaml + assert json_string == spec_json + + # reversed string is different from the original, so it + # *would* generate a different hash + assert yaml_string != reversed_yaml_string + assert json_string != reversed_json_string + + # build specs from the "wrongly" ordered data + round_trip_yaml_spec = Spec.from_yaml(yaml_string) + round_trip_json_spec = Spec.from_json(json_string) + round_trip_reversed_yaml_spec = Spec.from_yaml( + reversed_yaml_string + ) + round_trip_reversed_json_spec = Spec.from_yaml( + reversed_json_string + ) + + # TODO: remove this when build deps are in provenance. + spec = spec.copy(deps=('link', 'run')) + # specs are equal to the original + assert spec == round_trip_yaml_spec + assert spec == round_trip_json_spec + assert spec == round_trip_reversed_yaml_spec + assert spec == round_trip_reversed_json_spec + assert round_trip_yaml_spec == round_trip_reversed_yaml_spec + assert round_trip_json_spec == round_trip_reversed_json_spec + # dag_hashes are equal + assert spec.dag_hash() == round_trip_yaml_spec.dag_hash() + assert spec.dag_hash() == round_trip_json_spec.dag_hash() + assert spec.dag_hash() == round_trip_reversed_yaml_spec.dag_hash() + assert spec.dag_hash() == round_trip_reversed_json_spec.dag_hash() def reverse_all_dicts(data): diff --git a/lib/spack/spack/test/stage.py b/lib/spack/spack/test/stage.py index cfeb80dd35..5b4c46e0bf 100644 --- a/lib/spack/spack/test/stage.py +++ b/lib/spack/spack/test/stage.py @@ -22,355 +22,360 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -"""\ -Test that the Stage class works correctly. -""" +"""Test that the Stage class works correctly.""" +import collections import os -import shutil -import tempfile -from contextlib import * +import pytest import spack import spack.stage -from llnl.util.filesystem import * +import spack.util.executable +from llnl.util.filesystem import join_path from spack.stage import Stage -from spack.util.executable import which -from spack.test.mock_packages_test import * -_test_tmp_path = None +def check_chdir_to_source(stage, stage_name): + stage_path = get_stage_path(stage, stage_name) + archive_dir = 'test-files' + assert join_path( + os.path.realpath(stage_path), archive_dir + ) == os.getcwd() -@contextmanager -def use_tmp(use_tmp): - """Allow some test code to be executed such that spack will either use or - not use temporary space for stages. - """ - # mock up config - assert(_test_tmp_path is not None) - if use_tmp: - path = _test_tmp_path # use temporary stage +def check_expand_archive(stage, stage_name, mock_archive): + stage_path = get_stage_path(stage, stage_name) + archive_name = 'test-files.tar.gz' + archive_dir = 'test-files' + assert archive_name in os.listdir(stage_path) + assert archive_dir in os.listdir(stage_path) + + assert join_path(stage_path, archive_dir) == stage.source_path + + readme = join_path(stage_path, archive_dir, 'README.txt') + assert os.path.isfile(readme) + with open(readme) as file: + 'hello world!\n' == file.read() + + +def check_fetch(stage, stage_name): + archive_name = 'test-files.tar.gz' + stage_path = get_stage_path(stage, stage_name) + assert archive_name in os.listdir(stage_path) + assert join_path(stage_path, archive_name) == stage.fetcher.archive_file + + +def check_chdir(stage, stage_name): + stage_path = get_stage_path(stage, stage_name) + assert os.path.realpath(stage_path) == os.getcwd() + + +def check_destroy(stage, stage_name): + """Figure out whether a stage was destroyed correctly.""" + stage_path = get_stage_path(stage, stage_name) + + # check that the stage dir/link was removed. + assert not os.path.exists(stage_path) + + # tmp stage needs to remove tmp dir too. + if spack.stage._use_tmp_stage: + target = os.path.realpath(stage_path) + assert not os.path.exists(target) + + +def check_setup(stage, stage_name, archive): + """Figure out whether a stage was set up correctly.""" + stage_path = get_stage_path(stage, stage_name) + + # Ensure stage was created in the spack stage directory + assert os.path.isdir(stage_path) + + if spack.stage.get_tmp_root(): + # Check that the stage dir is really a symlink. + assert os.path.islink(stage_path) + + # Make sure it points to a valid directory + target = os.path.realpath(stage_path) + assert os.path.isdir(target) + assert not os.path.islink(target) + + # Make sure the directory is in the place we asked it to + # be (see setUp, tearDown, and use_tmp) + assert target.startswith(str(archive.test_tmp_dir)) + else: - path = spack.stage_path # Use Spack's stage dir (no links) + # Make sure the stage path is NOT a link for a non-tmp stage + assert os.path.islink(stage_path) - spack.config.update_config( - 'config', {'build_stage': [path]}, scope='user') - yield +def get_stage_path(stage, stage_name): + """Figure out where a stage should be living. This depends on + whether it's named. + """ + if stage_name is not None: + # If it is a named stage, we know where the stage should be + return join_path(spack.stage_path, stage_name) + else: + # If it's unnamed, ensure that we ran mkdtemp in the right spot. + assert stage.path is not None + assert stage.path.startswith(spack.stage_path) + return stage.path -def fail_search_fn(): - raise Exception("This should not have been called") - - -class FailingFetchStrategy(spack.fetch_strategy.FetchStrategy): - def fetch(self): - raise spack.fetch_strategy.FailedDownloadError( - "", - "This implementation of FetchStrategy always fails") - - -class MockSearchFunction(object): - def __init__(self): - self.performed_search = False - - def __call__(self): - self.performed_search = True - return [] - - -class StageTest(MockPackagesTest): - - def setUp(self): - """This sets up a mock archive to fetch, and a mock temp space for use - by the Stage class. It doesn't actually create the Stage -- that - is done by individual tests. - """ - super(StageTest, self).setUp() - - global _test_tmp_path - - # - # Mock up a stage area that looks like this: - # - # TMPDIR/ test_files_dir - # tmp/ test_tmp_path (where stage should be) - # test-files/ archive_dir_path - # README.txt test_readme (contains "hello world!\n") - # test-files.tar.gz archive_url = file:///path/to/this - # - self.test_files_dir = tempfile.mkdtemp() - self.test_tmp_path = os.path.realpath( - os.path.join(self.test_files_dir, 'tmp')) - _test_tmp_path = self.test_tmp_path - - # set _test_tmp_path as the default test directory to use for stages. - spack.config.update_config( - 'config', {'build_stage': [_test_tmp_path]}, scope='user') - - self.archive_dir = 'test-files' - self.archive_name = self.archive_dir + '.tar.gz' - archive_dir_path = os.path.join(self.test_files_dir, - self.archive_dir) - self.archive_url = 'file://' + os.path.join(self.test_files_dir, - self.archive_name) - test_readme = join_path(archive_dir_path, 'README.txt') - self.readme_text = "hello world!\n" - - self.stage_name = 'spack-test-stage' - - mkdirp(archive_dir_path) - mkdirp(self.test_tmp_path) - - with open(test_readme, 'w') as readme: - readme.write(self.readme_text) - - with working_dir(self.test_files_dir): - tar = which('tar', required=True) - tar('czf', self.archive_name, self.archive_dir) - - # Make spack use the test environment for tmp stuff. - self._old_tmp_root = spack.stage._tmp_root - self._old_use_tmp_stage = spack.stage._use_tmp_stage - spack.stage._tmp_root = None - spack.stage._use_tmp_stage = True - - # record this since this test changes to directories that will - # be removed. - self.working_dir = os.getcwd() - - def tearDown(self): - """Blows away the test environment directory.""" - super(StageTest, self).tearDown() - - shutil.rmtree(self.test_files_dir, ignore_errors=True) - - # chdir back to original working dir - os.chdir(self.working_dir) - - # restore spack's original tmp environment - spack.stage._tmp_root = self._old_tmp_root - spack.stage._use_tmp_stage = self._old_use_tmp_stage - - def get_stage_path(self, stage, stage_name): - """Figure out where a stage should be living. This depends on - whether it's named. - """ - if stage_name is not None: - # If it is a named stage, we know where the stage should be - return join_path(spack.stage_path, stage_name) - else: - # If it's unnamed, ensure that we ran mkdtemp in the right spot. - self.assertTrue(stage.path is not None) - self.assertTrue(stage.path.startswith(spack.stage_path)) - return stage.path - - def check_setup(self, stage, stage_name): - """Figure out whether a stage was set up correctly.""" - stage_path = self.get_stage_path(stage, stage_name) - - # Ensure stage was created in the spack stage directory - self.assertTrue(os.path.isdir(stage_path)) - - if spack.stage.get_tmp_root(): - # Check that the stage dir is really a symlink. - self.assertTrue(os.path.islink(stage_path)) - - # Make sure it points to a valid directory - target = os.path.realpath(stage_path) - self.assertTrue(os.path.isdir(target)) - self.assertFalse(os.path.islink(target)) - - # Make sure the directory is in the place we asked it to - # be (see setUp, tearDown, and use_tmp) - self.assertTrue(target.startswith(self.test_tmp_path)) - - else: - # Make sure the stage path is NOT a link for a non-tmp stage - self.assertFalse(os.path.islink(stage_path)) - - def check_fetch(self, stage, stage_name): - stage_path = self.get_stage_path(stage, stage_name) - self.assertTrue(self.archive_name in os.listdir(stage_path)) - self.assertEqual(join_path(stage_path, self.archive_name), - stage.fetcher.archive_file) - - def check_expand_archive(self, stage, stage_name): - stage_path = self.get_stage_path(stage, stage_name) - self.assertTrue(self.archive_name in os.listdir(stage_path)) - self.assertTrue(self.archive_dir in os.listdir(stage_path)) - - self.assertEqual( - join_path(stage_path, self.archive_dir), - stage.source_path) - - readme = join_path(stage_path, self.archive_dir, 'README.txt') - self.assertTrue(os.path.isfile(readme)) - - with open(readme) as file: - self.assertEqual(self.readme_text, file.read()) - - def check_chdir(self, stage, stage_name): - stage_path = self.get_stage_path(stage, stage_name) - self.assertEqual(os.path.realpath(stage_path), os.getcwd()) - - def check_chdir_to_source(self, stage, stage_name): - stage_path = self.get_stage_path(stage, stage_name) - self.assertEqual( - join_path(os.path.realpath(stage_path), self.archive_dir), - os.getcwd()) - - def check_destroy(self, stage, stage_name): - """Figure out whether a stage was destroyed correctly.""" - stage_path = self.get_stage_path(stage, stage_name) - - # check that the stage dir/link was removed. - self.assertFalse(os.path.exists(stage_path)) - - # tmp stage needs to remove tmp dir too. - if spack.stage._use_tmp_stage: - target = os.path.realpath(stage_path) - self.assertFalse(os.path.exists(target)) - - def test_setup_and_destroy_name_with_tmp(self): - with use_tmp(True): - with Stage(self.archive_url, name=self.stage_name) as stage: - self.check_setup(stage, self.stage_name) - self.check_destroy(stage, self.stage_name) - - def test_setup_and_destroy_name_without_tmp(self): - with use_tmp(False): - with Stage(self.archive_url, name=self.stage_name) as stage: - self.check_setup(stage, self.stage_name) - self.check_destroy(stage, self.stage_name) - - def test_setup_and_destroy_no_name_with_tmp(self): - with use_tmp(True): - with Stage(self.archive_url) as stage: - self.check_setup(stage, None) - self.check_destroy(stage, None) - - def test_setup_and_destroy_no_name_without_tmp(self): - with use_tmp(False): - with Stage(self.archive_url) as stage: - self.check_setup(stage, None) - self.check_destroy(stage, None) - - def test_chdir(self): - with Stage(self.archive_url, name=self.stage_name) as stage: +@pytest.fixture() +def tmpdir_for_stage(mock_archive): + """Uses a temporary directory for staging""" + current = spack.stage_path + spack.config.update_config( + 'config', + {'build_stage': [str(mock_archive.test_tmp_dir)]}, + scope='user' + ) + yield + spack.config.update_config( + 'config', {'build_stage': [current]}, scope='user' + ) + + +@pytest.fixture() +def mock_archive(tmpdir, monkeypatch): + """Creates a mock archive with the structure expected by the tests""" + # Mock up a stage area that looks like this: + # + # TMPDIR/ test_files_dir + # tmp/ test_tmp_path (where stage should be) + # test-files/ archive_dir_path + # README.txt test_readme (contains "hello world!\n") + # test-files.tar.gz archive_url = file:///path/to/this + # + test_tmp_path = tmpdir.join('tmp') + # set _test_tmp_path as the default test directory to use for stages. + spack.config.update_config( + 'config', {'build_stage': [str(test_tmp_path)]}, scope='user' + ) + + archive_dir = tmpdir.join('test-files') + archive_name = 'test-files.tar.gz' + archive = tmpdir.join(archive_name) + archive_url = 'file://' + str(archive) + test_readme = archive_dir.join('README.txt') + archive_dir.ensure(dir=True) + test_tmp_path.ensure(dir=True) + test_readme.write('hello world!\n') + + current = tmpdir.chdir() + tar = spack.util.executable.which('tar', required=True) + tar('czf', str(archive_name), 'test-files') + current.chdir() + + # Make spack use the test environment for tmp stuff. + monkeypatch.setattr(spack.stage, '_tmp_root', None) + monkeypatch.setattr(spack.stage, '_use_tmp_stage', True) + + Archive = collections.namedtuple( + 'Archive', ['url', 'tmpdir', 'test_tmp_dir', 'archive_dir'] + ) + yield Archive( + url=archive_url, + tmpdir=tmpdir, + test_tmp_dir=test_tmp_path, + archive_dir=archive_dir + ) + # record this since this test changes to directories that will + # be removed. + current.chdir() + + +@pytest.fixture() +def failing_search_fn(): + """Returns a search function that fails! Always!""" + def _mock(): + raise Exception("This should not have been called") + return _mock + + +@pytest.fixture() +def failing_fetch_strategy(): + """Returns a fetch strategy that fails.""" + class FailingFetchStrategy(spack.fetch_strategy.FetchStrategy): + def fetch(self): + raise spack.fetch_strategy.FailedDownloadError( + "", + "This implementation of FetchStrategy always fails" + ) + return FailingFetchStrategy() + + +@pytest.fixture() +def search_fn(): + """Returns a search function that always succeeds.""" + class _Mock(object): + performed_search = False + + def __call__(self): + self.performed_search = True + return [] + + return _Mock() + + +@pytest.mark.usefixtures('builtin_mock') +class TestStage(object): + + stage_name = 'spack-test-stage' + + @pytest.mark.usefixtures('tmpdir_for_stage') + def test_setup_and_destroy_name_with_tmp(self, mock_archive): + with Stage(mock_archive.url, name=self.stage_name) as stage: + check_setup(stage, self.stage_name, mock_archive) + check_destroy(stage, self.stage_name) + + def test_setup_and_destroy_name_without_tmp(self, mock_archive): + with Stage(mock_archive.url, name=self.stage_name) as stage: + check_setup(stage, self.stage_name, mock_archive) + check_destroy(stage, self.stage_name) + + @pytest.mark.usefixtures('tmpdir_for_stage') + def test_setup_and_destroy_no_name_with_tmp(self, mock_archive): + with Stage(mock_archive.url) as stage: + check_setup(stage, None, mock_archive) + check_destroy(stage, None) + + def test_setup_and_destroy_no_name_without_tmp(self, mock_archive): + with Stage(mock_archive.url) as stage: + check_setup(stage, None, mock_archive) + check_destroy(stage, None) + + def test_chdir(self, mock_archive): + with Stage(mock_archive.url, name=self.stage_name) as stage: stage.chdir() - self.check_setup(stage, self.stage_name) - self.check_chdir(stage, self.stage_name) - self.check_destroy(stage, self.stage_name) + check_setup(stage, self.stage_name, mock_archive) + check_chdir(stage, self.stage_name) + check_destroy(stage, self.stage_name) - def test_fetch(self): - with Stage(self.archive_url, name=self.stage_name) as stage: + def test_fetch(self, mock_archive): + with Stage(mock_archive.url, name=self.stage_name) as stage: stage.fetch() - self.check_setup(stage, self.stage_name) - self.check_chdir(stage, self.stage_name) - self.check_fetch(stage, self.stage_name) - self.check_destroy(stage, self.stage_name) - - def test_no_search_if_default_succeeds(self): - with Stage(self.archive_url, name=self.stage_name, - search_fn=fail_search_fn) as stage: + check_setup(stage, self.stage_name, mock_archive) + check_chdir(stage, self.stage_name) + check_fetch(stage, self.stage_name) + check_destroy(stage, self.stage_name) + + def test_no_search_if_default_succeeds( + self, mock_archive, failing_search_fn + ): + with Stage( + mock_archive.url, + name=self.stage_name, + search_fn=failing_search_fn + ) as stage: stage.fetch() - self.check_destroy(stage, self.stage_name) - - def test_no_search_mirror_only(self): - with Stage(FailingFetchStrategy(), name=self.stage_name, - search_fn=fail_search_fn) as stage: + check_destroy(stage, self.stage_name) + + def test_no_search_mirror_only( + self, failing_fetch_strategy, failing_search_fn + ): + with Stage( + failing_fetch_strategy, + name=self.stage_name, + search_fn=failing_search_fn + ) as stage: try: stage.fetch(mirror_only=True) except spack.fetch_strategy.FetchError: pass - self.check_destroy(stage, self.stage_name) - - def test_search_if_default_fails(self): - test_search = MockSearchFunction() - with Stage(FailingFetchStrategy(), name=self.stage_name, - search_fn=test_search) as stage: + check_destroy(stage, self.stage_name) + + def test_search_if_default_fails(self, failing_fetch_strategy, search_fn): + with Stage( + failing_fetch_strategy, + name=self.stage_name, + search_fn=search_fn + ) as stage: try: stage.fetch(mirror_only=False) except spack.fetch_strategy.FetchError: pass - self.check_destroy(stage, self.stage_name) - self.assertTrue(test_search.performed_search) + check_destroy(stage, self.stage_name) + assert search_fn.performed_search - def test_expand_archive(self): - with Stage(self.archive_url, name=self.stage_name) as stage: + def test_expand_archive(self, mock_archive): + with Stage(mock_archive.url, name=self.stage_name) as stage: stage.fetch() - self.check_setup(stage, self.stage_name) - self.check_fetch(stage, self.stage_name) + check_setup(stage, self.stage_name, mock_archive) + check_fetch(stage, self.stage_name) stage.expand_archive() - self.check_expand_archive(stage, self.stage_name) - self.check_destroy(stage, self.stage_name) + check_expand_archive(stage, self.stage_name, mock_archive) + check_destroy(stage, self.stage_name) - def test_expand_archive_with_chdir(self): - with Stage(self.archive_url, name=self.stage_name) as stage: + def test_expand_archive_with_chdir(self, mock_archive): + with Stage(mock_archive.url, name=self.stage_name) as stage: stage.fetch() - self.check_setup(stage, self.stage_name) - self.check_fetch(stage, self.stage_name) + check_setup(stage, self.stage_name, mock_archive) + check_fetch(stage, self.stage_name) stage.expand_archive() stage.chdir_to_source() - self.check_expand_archive(stage, self.stage_name) - self.check_chdir_to_source(stage, self.stage_name) - self.check_destroy(stage, self.stage_name) + check_expand_archive(stage, self.stage_name, mock_archive) + check_chdir_to_source(stage, self.stage_name) + check_destroy(stage, self.stage_name) - def test_restage(self): - with Stage(self.archive_url, name=self.stage_name) as stage: + def test_restage(self, mock_archive): + with Stage(mock_archive.url, name=self.stage_name) as stage: stage.fetch() stage.expand_archive() stage.chdir_to_source() - self.check_expand_archive(stage, self.stage_name) - self.check_chdir_to_source(stage, self.stage_name) + check_expand_archive(stage, self.stage_name, mock_archive) + check_chdir_to_source(stage, self.stage_name) # Try to make a file in the old archive dir with open('foobar', 'w') as file: file.write("this file is to be destroyed.") - self.assertTrue('foobar' in os.listdir(stage.source_path)) + assert 'foobar' in os.listdir(stage.source_path) # Make sure the file is not there after restage. stage.restage() - self.check_chdir(stage, self.stage_name) - self.check_fetch(stage, self.stage_name) + check_chdir(stage, self.stage_name) + check_fetch(stage, self.stage_name) stage.chdir_to_source() - self.check_chdir_to_source(stage, self.stage_name) - self.assertFalse('foobar' in os.listdir(stage.source_path)) - self.check_destroy(stage, self.stage_name) - - def test_no_keep_without_exceptions(self): - with Stage(self.archive_url, - name=self.stage_name, keep=False) as stage: + check_chdir_to_source(stage, self.stage_name) + assert 'foobar' not in os.listdir(stage.source_path) + check_destroy(stage, self.stage_name) + + def test_no_keep_without_exceptions(self, mock_archive): + with Stage( + mock_archive.url, name=self.stage_name, keep=False + ) as stage: pass - self.check_destroy(stage, self.stage_name) + check_destroy(stage, self.stage_name) - def test_keep_without_exceptions(self): - with Stage(self.archive_url, - name=self.stage_name, keep=True) as stage: + def test_keep_without_exceptions(self, mock_archive): + with Stage( + mock_archive.url, name=self.stage_name, keep=True + ) as stage: pass - path = self.get_stage_path(stage, self.stage_name) - self.assertTrue(os.path.isdir(path)) + path = get_stage_path(stage, self.stage_name) + assert os.path.isdir(path) - def test_no_keep_with_exceptions(self): + def test_no_keep_with_exceptions(self, mock_archive): + class ThisMustFailHere(Exception): + pass try: - with Stage(self.archive_url, - name=self.stage_name, keep=False) as stage: - raise Exception() - - path = self.get_stage_path(stage, self.stage_name) - self.assertTrue(os.path.isdir(path)) - except: - pass # ignore here. - - def test_keep_exceptions(self): + with Stage( + mock_archive.url, name=self.stage_name, keep=False + ) as stage: + raise ThisMustFailHere() + except ThisMustFailHere: + path = get_stage_path(stage, self.stage_name) + assert os.path.isdir(path) + + def test_keep_exceptions(self, mock_archive): + class ThisMustFailHere(Exception): + pass try: - with Stage(self.archive_url, - name=self.stage_name, keep=True) as stage: - raise Exception() - - path = self.get_stage_path(stage, self.stage_name) - self.assertTrue(os.path.isdir(path)) - except: - pass # ignore here. + with Stage( + mock_archive.url, name=self.stage_name, keep=True + ) as stage: + raise ThisMustFailHere() + except ThisMustFailHere: + path = get_stage_path(stage, self.stage_name) + assert os.path.isdir(path) diff --git a/lib/spack/spack/test/svn_fetch.py b/lib/spack/spack/test/svn_fetch.py index 01ffc488a7..962a150909 100644 --- a/lib/spack/spack/test/svn_fetch.py +++ b/lib/spack/spack/test/svn_fetch.py @@ -23,87 +23,62 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import os -import re -import spack -from spack.test.mock_repo import svn, MockSvnRepo -from spack.version import ver -from spack.test.mock_packages_test import * +import pytest +import spack from llnl.util.filesystem import * +from spack.spec import Spec +from spack.version import ver -class SvnFetchTest(MockPackagesTest): - """Tests fetching from a dummy git repository.""" - - def setUp(self): - """Create an svn repository with two revisions.""" - super(SvnFetchTest, self).setUp() - - self.repo = MockSvnRepo() - - spec = Spec('svn-test') - spec.concretize() - self.pkg = spack.repo.get(spec, new=True) - - def tearDown(self): - """Destroy the stage space used by this test.""" - super(SvnFetchTest, self).tearDown() - self.repo.destroy() - - def assert_rev(self, rev): - """Check that the current revision is equal to the supplied rev.""" - def get_rev(): - output = svn('info', output=str) - self.assertTrue("Revision" in output) - for line in output.split('\n'): - match = re.match(r'Revision: (\d+)', line) - if match: - return match.group(1) - self.assertEqual(get_rev(), rev) - - def try_fetch(self, rev, test_file, args): - """Tries to: - - 1. Fetch the repo using a fetch strategy constructed with - supplied args. - 2. Check if the test_file is in the checked out repository. - 3. Assert that the repository is at the revision supplied. - 4. Add and remove some files, then reset the repo, and - ensure it's all there again. - """ - self.pkg.versions[ver('svn')] = args - - with self.pkg.stage: - self.pkg.do_stage() - self.assert_rev(rev) - - file_path = join_path(self.pkg.stage.source_path, test_file) - self.assertTrue(os.path.isdir(self.pkg.stage.source_path)) - self.assertTrue(os.path.isfile(file_path)) - - os.unlink(file_path) - self.assertFalse(os.path.isfile(file_path)) - - untracked = 'foobarbaz' - touch(untracked) - self.assertTrue(os.path.isfile(untracked)) - self.pkg.do_restage() - self.assertFalse(os.path.isfile(untracked)) - - self.assertTrue(os.path.isdir(self.pkg.stage.source_path)) - self.assertTrue(os.path.isfile(file_path)) - - self.assert_rev(rev) - - def test_fetch_default(self): - """Test a default checkout and make sure it's on rev 1""" - self.try_fetch(self.repo.r1, self.repo.r1_file, { - 'svn': self.repo.url - }) - - def test_fetch_r1(self): - """Test fetching an older revision (0).""" - self.try_fetch(self.repo.r0, self.repo.r0_file, { - 'svn': self.repo.url, - 'revision': self.repo.r0 - }) +@pytest.fixture(params=['default', 'rev0']) +def type_of_test(request): + """Returns one of the test type available for the mock_hg_repository""" + return request.param + + +def test_fetch( + type_of_test, + mock_svn_repository, + config, + refresh_builtin_mock +): + """Tries to: + + 1. Fetch the repo using a fetch strategy constructed with + supplied args (they depend on type_of_test). + 2. Check if the test_file is in the checked out repository. + 3. Assert that the repository is at the revision supplied. + 4. Add and remove some files, then reset the repo, and + ensure it's all there again. + """ + # Retrieve the right test parameters + t = mock_svn_repository.checks[type_of_test] + h = mock_svn_repository.hash + # Construct the package under test + spec = Spec('hg-test') + spec.concretize() + pkg = spack.repo.get(spec, new=True) + pkg.versions[ver('hg')] = t.args + # Enter the stage directory and check some properties + with pkg.stage: + pkg.do_stage() + assert h() == t.revision + + file_path = join_path(pkg.stage.source_path, t.file) + assert os.path.isdir(pkg.stage.source_path) + assert os.path.isfile(file_path) + + os.unlink(file_path) + assert not os.path.isfile(file_path) + + untracked_file = 'foobarbaz' + touch(untracked_file) + assert os.path.isfile(untracked_file) + pkg.do_restage() + assert not os.path.isfile(untracked_file) + + assert os.path.isdir(pkg.stage.source_path) + assert os.path.isfile(file_path) + + assert h() == t.revision diff --git a/lib/spack/spack/test/tally_plugin.py b/lib/spack/spack/test/tally_plugin.py deleted file mode 100644 index d848f2cb9f..0000000000 --- a/lib/spack/spack/test/tally_plugin.py +++ /dev/null @@ -1,64 +0,0 @@ -############################################################################## -# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# -# This file is part of Spack. -# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. -# LLNL-CODE-647188 -# -# For details, see https://github.com/llnl/spack -# Please also see the LICENSE file for our notice and the LGPL. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License (as -# published by the Free Software Foundation) version 2.1, February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and -# conditions of the GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -############################################################################## -import os - -from nose.plugins import Plugin - - -class Tally(Plugin): - name = 'tally' - - def __init__(self): - super(Tally, self).__init__() - self.successCount = 0 - self.failCount = 0 - self.errorCount = 0 - self.error_list = [] - self.fail_list = [] - - @property - def numberOfTestsRun(self): - """Excludes skipped tests""" - return self.errorCount + self.failCount + self.successCount - - def options(self, parser, env=os.environ): - super(Tally, self).options(parser, env=env) - - def configure(self, options, conf): - super(Tally, self).configure(options, conf) - - def addSuccess(self, test): - self.successCount += 1 - - def addError(self, test, err): - self.errorCount += 1 - self.error_list.append(test) - - def addFailure(self, test, err): - self.failCount += 1 - self.fail_list.append(test) - - def finalize(self, result): - pass diff --git a/lib/spack/spack/test/url_extrapolate.py b/lib/spack/spack/test/url_extrapolate.py index ca14dab958..5f5cf555ae 100644 --- a/lib/spack/spack/test/url_extrapolate.py +++ b/lib/spack/spack/test/url_extrapolate.py @@ -22,11 +22,12 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -"""\ -Tests ability of spack to extrapolate URL versions from existing versions. +"""Tests ability of spack to extrapolate URL versions from +existing versions. """ +import unittest + import spack.url as url -from spack.test.mock_packages_test import * class UrlExtrapolateTest(unittest.TestCase): diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000000..0d8d2b271f --- /dev/null +++ b/pytest.ini @@ -0,0 +1,5 @@ +# content of pytest.ini +[pytest] +addopts = --durations=20 -ra +testpaths = lib/spack/spack/test +python_files = *.py \ No newline at end of file diff --git a/share/spack/qa/changed_files b/share/spack/qa/changed_files deleted file mode 100755 index c1fa55c053..0000000000 --- a/share/spack/qa/changed_files +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash -# -# Description: -# Returns a list of changed files. -# -# Usage: -# changed_files [ ...] -# changed_files [ ...] -# changed_files ["*." ...] -# -# Options: -# Directories, files, or globs to search for changed files. -# - -# Move to root directory of Spack -# Allows script to be run from anywhere -SPACK_ROOT="$(dirname "$0")/../../.." -cd "$SPACK_ROOT" - -# Add changed files that have been committed since branching off of develop -changed=($(git diff --name-only --diff-filter=ACMR develop... -- "$@")) -# Add changed files that have been staged but not yet committed -changed+=($(git diff --name-only --diff-filter=ACMR --cached -- "$@")) -# Add changed files that are unstaged -changed+=($(git diff --name-only --diff-filter=ACMR -- "$@")) -# Add new files that are untracked -changed+=($(git ls-files --exclude-standard --other -- "$@")) - -# Return array -# Ensure that each file in the array is unique -printf '%s\n' "${changed[@]}" | sort -u diff --git a/share/spack/qa/run-unit-tests b/share/spack/qa/run-unit-tests index 6da919e18d..0728614bc8 100755 --- a/share/spack/qa/run-unit-tests +++ b/share/spack/qa/run-unit-tests @@ -43,4 +43,9 @@ spack config get compilers spack install -v libdwarf # Run unit tests with code coverage -coverage run bin/spack test "$@" +if [[ "$TRAVIS_PYTHON_VERSION" == 2.7 ]]; then + coverage run bin/spack test "$@" + coverage combine +else + spack test "$@" +fi -- cgit v1.2.3-60-g2f50 From d6390c159f979179be9e0231b3ba1916e579ae55 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Thu, 29 Dec 2016 11:59:45 -0800 Subject: Fix issues related to the switchover to pytest. (#2685) - Remove stale reference to `import nose` from `bin/spack` script. - Add `py` to externals (missing dependency for `pytest`) --- bin/spack | 8 - lib/spack/external/__init__.py | 7 +- lib/spack/external/py/AUTHORS | 24 + lib/spack/external/py/LICENSE | 19 + lib/spack/external/py/README.rst | 21 + lib/spack/external/py/__init__.py | 150 ++++ lib/spack/external/py/__metainfo.py | 2 + lib/spack/external/py/_apipkg.py | 181 ++++ lib/spack/external/py/_builtin.py | 248 ++++++ lib/spack/external/py/_code/__init__.py | 1 + lib/spack/external/py/_code/_assertionnew.py | 339 +++++++ lib/spack/external/py/_code/_assertionold.py | 555 ++++++++++++ lib/spack/external/py/_code/_py2traceback.py | 79 ++ lib/spack/external/py/_code/assertion.py | 94 ++ lib/spack/external/py/_code/code.py | 787 ++++++++++++++++ lib/spack/external/py/_code/source.py | 411 +++++++++ lib/spack/external/py/_error.py | 89 ++ lib/spack/external/py/_iniconfig.py | 162 ++++ lib/spack/external/py/_io/__init__.py | 1 + lib/spack/external/py/_io/capture.py | 371 ++++++++ lib/spack/external/py/_io/saferepr.py | 71 ++ lib/spack/external/py/_io/terminalwriter.py | 357 ++++++++ lib/spack/external/py/_log/__init__.py | 2 + lib/spack/external/py/_log/log.py | 186 ++++ lib/spack/external/py/_log/warning.py | 76 ++ lib/spack/external/py/_path/__init__.py | 1 + lib/spack/external/py/_path/cacheutil.py | 114 +++ lib/spack/external/py/_path/common.py | 439 +++++++++ lib/spack/external/py/_path/local.py | 928 +++++++++++++++++++ lib/spack/external/py/_path/svnurl.py | 380 ++++++++ lib/spack/external/py/_path/svnwc.py | 1240 ++++++++++++++++++++++++++ lib/spack/external/py/_process/__init__.py | 1 + lib/spack/external/py/_process/cmdexec.py | 49 + lib/spack/external/py/_process/forkedfunc.py | 120 +++ lib/spack/external/py/_process/killproc.py | 23 + lib/spack/external/py/_std.py | 18 + lib/spack/external/py/_xmlgen.py | 255 ++++++ lib/spack/external/py/test.py | 10 + 38 files changed, 7809 insertions(+), 10 deletions(-) create mode 100644 lib/spack/external/py/AUTHORS create mode 100644 lib/spack/external/py/LICENSE create mode 100644 lib/spack/external/py/README.rst create mode 100644 lib/spack/external/py/__init__.py create mode 100644 lib/spack/external/py/__metainfo.py create mode 100644 lib/spack/external/py/_apipkg.py create mode 100644 lib/spack/external/py/_builtin.py create mode 100644 lib/spack/external/py/_code/__init__.py create mode 100644 lib/spack/external/py/_code/_assertionnew.py create mode 100644 lib/spack/external/py/_code/_assertionold.py create mode 100644 lib/spack/external/py/_code/_py2traceback.py create mode 100644 lib/spack/external/py/_code/assertion.py create mode 100644 lib/spack/external/py/_code/code.py create mode 100644 lib/spack/external/py/_code/source.py create mode 100644 lib/spack/external/py/_error.py create mode 100644 lib/spack/external/py/_iniconfig.py create mode 100644 lib/spack/external/py/_io/__init__.py create mode 100644 lib/spack/external/py/_io/capture.py create mode 100644 lib/spack/external/py/_io/saferepr.py create mode 100644 lib/spack/external/py/_io/terminalwriter.py create mode 100644 lib/spack/external/py/_log/__init__.py create mode 100644 lib/spack/external/py/_log/log.py create mode 100644 lib/spack/external/py/_log/warning.py create mode 100644 lib/spack/external/py/_path/__init__.py create mode 100644 lib/spack/external/py/_path/cacheutil.py create mode 100644 lib/spack/external/py/_path/common.py create mode 100644 lib/spack/external/py/_path/local.py create mode 100644 lib/spack/external/py/_path/svnurl.py create mode 100644 lib/spack/external/py/_path/svnwc.py create mode 100644 lib/spack/external/py/_process/__init__.py create mode 100644 lib/spack/external/py/_process/cmdexec.py create mode 100644 lib/spack/external/py/_process/forkedfunc.py create mode 100644 lib/spack/external/py/_process/killproc.py create mode 100644 lib/spack/external/py/_std.py create mode 100644 lib/spack/external/py/_xmlgen.py create mode 100644 lib/spack/external/py/test.py (limited to 'bin') diff --git a/bin/spack b/bin/spack index 2ff55a486b..66bebe57e7 100755 --- a/bin/spack +++ b/bin/spack @@ -46,14 +46,6 @@ sys.path.insert(0, SPACK_LIB_PATH) SPACK_EXTERNAL_LIBS = os.path.join(SPACK_LIB_PATH, "external") sys.path.insert(0, SPACK_EXTERNAL_LIBS) -import warnings -# Avoid warnings when nose is installed with the python exe being used to run -# spack. Note this must be done after Spack's external libs directory is added -# to sys.path. -with warnings.catch_warnings(): - warnings.filterwarnings("ignore", ".*nose was already imported") - import nose - # Quick and dirty check to clean orphaned .pyc files left over from # previous revisions. These files were present in earlier versions of # Spack, were removed, but shadow system modules that Spack still diff --git a/lib/spack/external/__init__.py b/lib/spack/external/__init__.py index 49886ae595..48fe4ec5ac 100644 --- a/lib/spack/external/__init__.py +++ b/lib/spack/external/__init__.py @@ -35,12 +35,15 @@ So far: jsonschema: An implementation of JSON Schema for Python. - pytest: Testing framework used by Spack. - ordereddict: We include our own version to be Python 2.6 compatible. + py: Needed by pytest. Library with cross-python path, + ini-parsing, io, code, and log facilities. + pyqver2: External script to query required python version of python source code. Used for ensuring 2.6 compatibility. + pytest: Testing framework used by Spack. + yaml: Used for config files. """ diff --git a/lib/spack/external/py/AUTHORS b/lib/spack/external/py/AUTHORS new file mode 100644 index 0000000000..8c0cf9b71b --- /dev/null +++ b/lib/spack/external/py/AUTHORS @@ -0,0 +1,24 @@ +Holger Krekel, holger at merlinux eu +Benjamin Peterson, benjamin at python org +Ronny Pfannschmidt, Ronny.Pfannschmidt at gmx de +Guido Wesdorp, johnny at johnnydebris net +Samuele Pedroni, pedronis at openend se +Carl Friedrich Bolz, cfbolz at gmx de +Armin Rigo, arigo at tunes org +Maciek Fijalkowski, fijal at genesilico pl +Brian Dorsey, briandorsey at gmail com +Floris Bruynooghe, flub at devork be +merlinux GmbH, Germany, office at merlinux eu + +Contributors include:: + +Ross Lawley +Ralf Schmitt +Chris Lamb +Harald Armin Massa +Martijn Faassen +Ian Bicking +Jan Balster +Grig Gheorghiu +Bob Ippolito +Christian Tismer diff --git a/lib/spack/external/py/LICENSE b/lib/spack/external/py/LICENSE new file mode 100644 index 0000000000..31ecdfb1db --- /dev/null +++ b/lib/spack/external/py/LICENSE @@ -0,0 +1,19 @@ + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + diff --git a/lib/spack/external/py/README.rst b/lib/spack/external/py/README.rst new file mode 100644 index 0000000000..e836b7b50a --- /dev/null +++ b/lib/spack/external/py/README.rst @@ -0,0 +1,21 @@ +.. image:: https://img.shields.io/pypi/pyversions/pytest.svg + :target: https://pypi.org/project/py +.. image:: https://img.shields.io/travis/pytest-dev/py.svg + :target: https://travis-ci.org/pytest-dev/py + +The py lib is a Python development support library featuring +the following tools and modules: + +* ``py.path``: uniform local and svn path objects +* ``py.apipkg``: explicit API control and lazy-importing +* ``py.iniconfig``: easy parsing of .ini files +* ``py.code``: dynamic code generation and introspection + +NOTE: prior to the 1.4 release this distribution used to +contain py.test which is now its own package, see http://pytest.org + +For questions and more information please visit http://pylib.readthedocs.org + +Bugs and issues: https://github.com/pytest-dev/py + +Authors: Holger Krekel and others, 2004-2016 diff --git a/lib/spack/external/py/__init__.py b/lib/spack/external/py/__init__.py new file mode 100644 index 0000000000..c2273a2e64 --- /dev/null +++ b/lib/spack/external/py/__init__.py @@ -0,0 +1,150 @@ +""" +py.test and pylib: rapid testing and development utils + +this module uses apipkg.py for lazy-loading sub modules +and classes. The initpkg-dictionary below specifies +name->value mappings where value can be another namespace +dictionary or an import path. + +(c) Holger Krekel and others, 2004-2014 +""" +__version__ = '1.4.32' + +from py import _apipkg + +# so that py.error.* instances are picklable +import sys +sys.modules['py.error'] = _apipkg.AliasModule("py.error", "py._error", 'error') + +_apipkg.initpkg(__name__, attr={'_apipkg': _apipkg}, exportdefs={ + # access to all standard lib modules + 'std': '._std:std', + # access to all posix errno's as classes + 'error': '._error:error', + + '_pydir' : '.__metainfo:pydir', + 'version': 'py:__version__', # backward compatibility + + # pytest-2.0 has a flat namespace, we use alias modules + # to keep old references compatible + 'test' : 'pytest', + 'test.collect' : 'pytest', + 'test.cmdline' : 'pytest', + + # hook into the top-level standard library + 'process' : { + '__doc__' : '._process:__doc__', + 'cmdexec' : '._process.cmdexec:cmdexec', + 'kill' : '._process.killproc:kill', + 'ForkedFunc' : '._process.forkedfunc:ForkedFunc', + }, + + 'apipkg' : { + 'initpkg' : '._apipkg:initpkg', + 'ApiModule' : '._apipkg:ApiModule', + }, + + 'iniconfig' : { + 'IniConfig' : '._iniconfig:IniConfig', + 'ParseError' : '._iniconfig:ParseError', + }, + + 'path' : { + '__doc__' : '._path:__doc__', + 'svnwc' : '._path.svnwc:SvnWCCommandPath', + 'svnurl' : '._path.svnurl:SvnCommandPath', + 'local' : '._path.local:LocalPath', + 'SvnAuth' : '._path.svnwc:SvnAuth', + }, + + # python inspection/code-generation API + 'code' : { + '__doc__' : '._code:__doc__', + 'compile' : '._code.source:compile_', + 'Source' : '._code.source:Source', + 'Code' : '._code.code:Code', + 'Frame' : '._code.code:Frame', + 'ExceptionInfo' : '._code.code:ExceptionInfo', + 'Traceback' : '._code.code:Traceback', + 'getfslineno' : '._code.source:getfslineno', + 'getrawcode' : '._code.code:getrawcode', + 'patch_builtins' : '._code.code:patch_builtins', + 'unpatch_builtins' : '._code.code:unpatch_builtins', + '_AssertionError' : '._code.assertion:AssertionError', + '_reinterpret_old' : '._code.assertion:reinterpret_old', + '_reinterpret' : '._code.assertion:reinterpret', + '_reprcompare' : '._code.assertion:_reprcompare', + '_format_explanation' : '._code.assertion:_format_explanation', + }, + + # backports and additions of builtins + 'builtin' : { + '__doc__' : '._builtin:__doc__', + 'enumerate' : '._builtin:enumerate', + 'reversed' : '._builtin:reversed', + 'sorted' : '._builtin:sorted', + 'any' : '._builtin:any', + 'all' : '._builtin:all', + 'set' : '._builtin:set', + 'frozenset' : '._builtin:frozenset', + 'BaseException' : '._builtin:BaseException', + 'GeneratorExit' : '._builtin:GeneratorExit', + '_sysex' : '._builtin:_sysex', + 'print_' : '._builtin:print_', + '_reraise' : '._builtin:_reraise', + '_tryimport' : '._builtin:_tryimport', + 'exec_' : '._builtin:exec_', + '_basestring' : '._builtin:_basestring', + '_totext' : '._builtin:_totext', + '_isbytes' : '._builtin:_isbytes', + '_istext' : '._builtin:_istext', + '_getimself' : '._builtin:_getimself', + '_getfuncdict' : '._builtin:_getfuncdict', + '_getcode' : '._builtin:_getcode', + 'builtins' : '._builtin:builtins', + 'execfile' : '._builtin:execfile', + 'callable' : '._builtin:callable', + 'bytes' : '._builtin:bytes', + 'text' : '._builtin:text', + }, + + # input-output helping + 'io' : { + '__doc__' : '._io:__doc__', + 'dupfile' : '._io.capture:dupfile', + 'TextIO' : '._io.capture:TextIO', + 'BytesIO' : '._io.capture:BytesIO', + 'FDCapture' : '._io.capture:FDCapture', + 'StdCapture' : '._io.capture:StdCapture', + 'StdCaptureFD' : '._io.capture:StdCaptureFD', + 'TerminalWriter' : '._io.terminalwriter:TerminalWriter', + 'ansi_print' : '._io.terminalwriter:ansi_print', + 'get_terminal_width' : '._io.terminalwriter:get_terminal_width', + 'saferepr' : '._io.saferepr:saferepr', + }, + + # small and mean xml/html generation + 'xml' : { + '__doc__' : '._xmlgen:__doc__', + 'html' : '._xmlgen:html', + 'Tag' : '._xmlgen:Tag', + 'raw' : '._xmlgen:raw', + 'Namespace' : '._xmlgen:Namespace', + 'escape' : '._xmlgen:escape', + }, + + 'log' : { + # logging API ('producers' and 'consumers' connected via keywords) + '__doc__' : '._log:__doc__', + '_apiwarn' : '._log.warning:_apiwarn', + 'Producer' : '._log.log:Producer', + 'setconsumer' : '._log.log:setconsumer', + '_setstate' : '._log.log:setstate', + '_getstate' : '._log.log:getstate', + 'Path' : '._log.log:Path', + 'STDOUT' : '._log.log:STDOUT', + 'STDERR' : '._log.log:STDERR', + 'Syslog' : '._log.log:Syslog', + }, + +}) diff --git a/lib/spack/external/py/__metainfo.py b/lib/spack/external/py/__metainfo.py new file mode 100644 index 0000000000..12581eb7af --- /dev/null +++ b/lib/spack/external/py/__metainfo.py @@ -0,0 +1,2 @@ +import py +pydir = py.path.local(py.__file__).dirpath() diff --git a/lib/spack/external/py/_apipkg.py b/lib/spack/external/py/_apipkg.py new file mode 100644 index 0000000000..a73b8f6d0b --- /dev/null +++ b/lib/spack/external/py/_apipkg.py @@ -0,0 +1,181 @@ +""" +apipkg: control the exported namespace of a python package. + +see http://pypi.python.org/pypi/apipkg + +(c) holger krekel, 2009 - MIT license +""" +import os +import sys +from types import ModuleType + +__version__ = '1.3.dev' + +def _py_abspath(path): + """ + special version of abspath + that will leave paths from jython jars alone + """ + if path.startswith('__pyclasspath__'): + + return path + else: + return os.path.abspath(path) + +def initpkg(pkgname, exportdefs, attr=dict()): + """ initialize given package from the export definitions. """ + oldmod = sys.modules.get(pkgname) + d = {} + f = getattr(oldmod, '__file__', None) + if f: + f = _py_abspath(f) + d['__file__'] = f + if hasattr(oldmod, '__version__'): + d['__version__'] = oldmod.__version__ + if hasattr(oldmod, '__loader__'): + d['__loader__'] = oldmod.__loader__ + if hasattr(oldmod, '__path__'): + d['__path__'] = [_py_abspath(p) for p in oldmod.__path__] + if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None): + d['__doc__'] = oldmod.__doc__ + d.update(attr) + if hasattr(oldmod, "__dict__"): + oldmod.__dict__.update(d) + mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d) + sys.modules[pkgname] = mod + +def importobj(modpath, attrname): + module = __import__(modpath, None, None, ['__doc__']) + if not attrname: + return module + + retval = module + names = attrname.split(".") + for x in names: + retval = getattr(retval, x) + return retval + +class ApiModule(ModuleType): + def __docget(self): + try: + return self.__doc + except AttributeError: + if '__doc__' in self.__map__: + return self.__makeattr('__doc__') + def __docset(self, value): + self.__doc = value + __doc__ = property(__docget, __docset) + + def __init__(self, name, importspec, implprefix=None, attr=None): + self.__name__ = name + self.__all__ = [x for x in importspec if x != '__onfirstaccess__'] + self.__map__ = {} + self.__implprefix__ = implprefix or name + if attr: + for name, val in attr.items(): + # print "setting", self.__name__, name, val + setattr(self, name, val) + for name, importspec in importspec.items(): + if isinstance(importspec, dict): + subname = '%s.%s' % (self.__name__, name) + apimod = ApiModule(subname, importspec, implprefix) + sys.modules[subname] = apimod + setattr(self, name, apimod) + else: + parts = importspec.split(':') + modpath = parts.pop(0) + attrname = parts and parts[0] or "" + if modpath[0] == '.': + modpath = implprefix + modpath + + if not attrname: + subname = '%s.%s' % (self.__name__, name) + apimod = AliasModule(subname, modpath) + sys.modules[subname] = apimod + if '.' not in name: + setattr(self, name, apimod) + else: + self.__map__[name] = (modpath, attrname) + + def __repr__(self): + l = [] + if hasattr(self, '__version__'): + l.append("version=" + repr(self.__version__)) + if hasattr(self, '__file__'): + l.append('from ' + repr(self.__file__)) + if l: + return '' % (self.__name__, " ".join(l)) + return '' % (self.__name__,) + + def __makeattr(self, name): + """lazily compute value for name or raise AttributeError if unknown.""" + # print "makeattr", self.__name__, name + target = None + if '__onfirstaccess__' in self.__map__: + target = self.__map__.pop('__onfirstaccess__') + importobj(*target)() + try: + modpath, attrname = self.__map__[name] + except KeyError: + if target is not None and name != '__onfirstaccess__': + # retry, onfirstaccess might have set attrs + return getattr(self, name) + raise AttributeError(name) + else: + result = importobj(modpath, attrname) + setattr(self, name, result) + try: + del self.__map__[name] + except KeyError: + pass # in a recursive-import situation a double-del can happen + return result + + __getattr__ = __makeattr + + def __dict__(self): + # force all the content of the module to be loaded when __dict__ is read + dictdescr = ModuleType.__dict__['__dict__'] + dict = dictdescr.__get__(self) + if dict is not None: + hasattr(self, 'some') + for name in self.__all__: + try: + self.__makeattr(name) + except AttributeError: + pass + return dict + __dict__ = property(__dict__) + + +def AliasModule(modname, modpath, attrname=None): + mod = [] + + def getmod(): + if not mod: + x = importobj(modpath, None) + if attrname is not None: + x = getattr(x, attrname) + mod.append(x) + return mod[0] + + class AliasModule(ModuleType): + + def __repr__(self): + x = modpath + if attrname: + x += "." + attrname + return '' % (modname, x) + + def __getattribute__(self, name): + try: + return getattr(getmod(), name) + except ImportError: + return None + + def __setattr__(self, name, value): + setattr(getmod(), name, value) + + def __delattr__(self, name): + delattr(getmod(), name) + + return AliasModule(str(modname)) diff --git a/lib/spack/external/py/_builtin.py b/lib/spack/external/py/_builtin.py new file mode 100644 index 0000000000..52ee9d79ca --- /dev/null +++ b/lib/spack/external/py/_builtin.py @@ -0,0 +1,248 @@ +import sys + +try: + reversed = reversed +except NameError: + def reversed(sequence): + """reversed(sequence) -> reverse iterator over values of the sequence + + Return a reverse iterator + """ + if hasattr(sequence, '__reversed__'): + return sequence.__reversed__() + if not hasattr(sequence, '__getitem__'): + raise TypeError("argument to reversed() must be a sequence") + return reversed_iterator(sequence) + + class reversed_iterator(object): + + def __init__(self, seq): + self.seq = seq + self.remaining = len(seq) + + def __iter__(self): + return self + + def next(self): + i = self.remaining + if i > 0: + i -= 1 + item = self.seq[i] + self.remaining = i + return item + raise StopIteration + + def __length_hint__(self): + return self.remaining + +try: + any = any +except NameError: + def any(iterable): + for x in iterable: + if x: + return True + return False + +try: + all = all +except NameError: + def all(iterable): + for x in iterable: + if not x: + return False + return True + +try: + sorted = sorted +except NameError: + builtin_cmp = cmp # need to use cmp as keyword arg + + def sorted(iterable, cmp=None, key=None, reverse=0): + use_cmp = None + if key is not None: + if cmp is None: + def use_cmp(x, y): + return builtin_cmp(x[0], y[0]) + else: + def use_cmp(x, y): + return cmp(x[0], y[0]) + l = [(key(element), element) for element in iterable] + else: + if cmp is not None: + use_cmp = cmp + l = list(iterable) + if use_cmp is not None: + l.sort(use_cmp) + else: + l.sort() + if reverse: + l.reverse() + if key is not None: + return [element for (_, element) in l] + return l + +try: + set, frozenset = set, frozenset +except NameError: + from sets import set, frozenset + +# pass through +enumerate = enumerate + +try: + BaseException = BaseException +except NameError: + BaseException = Exception + +try: + GeneratorExit = GeneratorExit +except NameError: + class GeneratorExit(Exception): + """ This exception is never raised, it is there to make it possible to + write code compatible with CPython 2.5 even in lower CPython + versions.""" + pass + GeneratorExit.__module__ = 'exceptions' + +_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit) + +try: + callable = callable +except NameError: + def callable(obj): + return hasattr(obj, "__call__") + +if sys.version_info >= (3, 0): + exec ("print_ = print ; exec_=exec") + import builtins + + # some backward compatibility helpers + _basestring = str + def _totext(obj, encoding=None, errors=None): + if isinstance(obj, bytes): + if errors is None: + obj = obj.decode(encoding) + else: + obj = obj.decode(encoding, errors) + elif not isinstance(obj, str): + obj = str(obj) + return obj + + def _isbytes(x): + return isinstance(x, bytes) + def _istext(x): + return isinstance(x, str) + + text = str + bytes = bytes + + + def _getimself(function): + return getattr(function, '__self__', None) + + def _getfuncdict(function): + return getattr(function, "__dict__", None) + + def _getcode(function): + return getattr(function, "__code__", None) + + def execfile(fn, globs=None, locs=None): + if globs is None: + back = sys._getframe(1) + globs = back.f_globals + locs = back.f_locals + del back + elif locs is None: + locs = globs + fp = open(fn, "r") + try: + source = fp.read() + finally: + fp.close() + co = compile(source, fn, "exec", dont_inherit=True) + exec_(co, globs, locs) + +else: + import __builtin__ as builtins + _totext = unicode + _basestring = basestring + text = unicode + bytes = str + execfile = execfile + callable = callable + def _isbytes(x): + return isinstance(x, str) + def _istext(x): + return isinstance(x, unicode) + + def _getimself(function): + return getattr(function, 'im_self', None) + + def _getfuncdict(function): + return getattr(function, "__dict__", None) + + def _getcode(function): + try: + return getattr(function, "__code__") + except AttributeError: + return getattr(function, "func_code", None) + + def print_(*args, **kwargs): + """ minimal backport of py3k print statement. """ + sep = ' ' + if 'sep' in kwargs: + sep = kwargs.pop('sep') + end = '\n' + if 'end' in kwargs: + end = kwargs.pop('end') + file = 'file' in kwargs and kwargs.pop('file') or sys.stdout + if kwargs: + args = ", ".join([str(x) for x in kwargs]) + raise TypeError("invalid keyword arguments: %s" % args) + at_start = True + for x in args: + if not at_start: + file.write(sep) + file.write(str(x)) + at_start = False + file.write(end) + + def exec_(obj, globals=None, locals=None): + """ minimal backport of py3k exec statement. """ + __tracebackhide__ = True + if globals is None: + frame = sys._getframe(1) + globals = frame.f_globals + if locals is None: + locals = frame.f_locals + elif locals is None: + locals = globals + exec2(obj, globals, locals) + +if sys.version_info >= (3, 0): + def _reraise(cls, val, tb): + __tracebackhide__ = True + assert hasattr(val, '__traceback__') + raise cls.with_traceback(val, tb) +else: + exec (""" +def _reraise(cls, val, tb): + __tracebackhide__ = True + raise cls, val, tb +def exec2(obj, globals, locals): + __tracebackhide__ = True + exec obj in globals, locals +""") + +def _tryimport(*names): + """ return the first successfully imported module. """ + assert names + for name in names: + try: + __import__(name) + except ImportError: + excinfo = sys.exc_info() + else: + return sys.modules[name] + _reraise(*excinfo) diff --git a/lib/spack/external/py/_code/__init__.py b/lib/spack/external/py/_code/__init__.py new file mode 100644 index 0000000000..f15acf8513 --- /dev/null +++ b/lib/spack/external/py/_code/__init__.py @@ -0,0 +1 @@ +""" python inspection/code generation API """ diff --git a/lib/spack/external/py/_code/_assertionnew.py b/lib/spack/external/py/_code/_assertionnew.py new file mode 100644 index 0000000000..afb1b31ff0 --- /dev/null +++ b/lib/spack/external/py/_code/_assertionnew.py @@ -0,0 +1,339 @@ +""" +Find intermediate evalutation results in assert statements through builtin AST. +This should replace _assertionold.py eventually. +""" + +import sys +import ast + +import py +from py._code.assertion import _format_explanation, BuiltinAssertionError + + +if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): + # See http://bugs.jython.org/issue1497 + _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", + "ListComp", "GeneratorExp", "Yield", "Compare", "Call", + "Repr", "Num", "Str", "Attribute", "Subscript", "Name", + "List", "Tuple") + _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign", + "AugAssign", "Print", "For", "While", "If", "With", "Raise", + "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom", + "Exec", "Global", "Expr", "Pass", "Break", "Continue") + _expr_nodes = set(getattr(ast, name) for name in _exprs) + _stmt_nodes = set(getattr(ast, name) for name in _stmts) + def _is_ast_expr(node): + return node.__class__ in _expr_nodes + def _is_ast_stmt(node): + return node.__class__ in _stmt_nodes +else: + def _is_ast_expr(node): + return isinstance(node, ast.expr) + def _is_ast_stmt(node): + return isinstance(node, ast.stmt) + + +class Failure(Exception): + """Error found while interpreting AST.""" + + def __init__(self, explanation=""): + self.cause = sys.exc_info() + self.explanation = explanation + + +def interpret(source, frame, should_fail=False): + mod = ast.parse(source) + visitor = DebugInterpreter(frame) + try: + visitor.visit(mod) + except Failure: + failure = sys.exc_info()[1] + return getfailure(failure) + if should_fail: + return ("(assertion failed, but when it was re-run for " + "printing intermediate values, it did not fail. Suggestions: " + "compute assert expression before the assert or use --no-assert)") + +def run(offending_line, frame=None): + if frame is None: + frame = py.code.Frame(sys._getframe(1)) + return interpret(offending_line, frame) + +def getfailure(failure): + explanation = _format_explanation(failure.explanation) + value = failure.cause[1] + if str(value): + lines = explanation.splitlines() + if not lines: + lines.append("") + lines[0] += " << %s" % (value,) + explanation = "\n".join(lines) + text = "%s: %s" % (failure.cause[0].__name__, explanation) + if text.startswith("AssertionError: assert "): + text = text[16:] + return text + + +operator_map = { + ast.BitOr : "|", + ast.BitXor : "^", + ast.BitAnd : "&", + ast.LShift : "<<", + ast.RShift : ">>", + ast.Add : "+", + ast.Sub : "-", + ast.Mult : "*", + ast.Div : "/", + ast.FloorDiv : "//", + ast.Mod : "%", + ast.Eq : "==", + ast.NotEq : "!=", + ast.Lt : "<", + ast.LtE : "<=", + ast.Gt : ">", + ast.GtE : ">=", + ast.Pow : "**", + ast.Is : "is", + ast.IsNot : "is not", + ast.In : "in", + ast.NotIn : "not in" +} + +unary_map = { + ast.Not : "not %s", + ast.Invert : "~%s", + ast.USub : "-%s", + ast.UAdd : "+%s" +} + + +class DebugInterpreter(ast.NodeVisitor): + """Interpret AST nodes to gleam useful debugging information. """ + + def __init__(self, frame): + self.frame = frame + + def generic_visit(self, node): + # Fallback when we don't have a special implementation. + if _is_ast_expr(node): + mod = ast.Expression(node) + co = self._compile(mod) + try: + result = self.frame.eval(co) + except Exception: + raise Failure() + explanation = self.frame.repr(result) + return explanation, result + elif _is_ast_stmt(node): + mod = ast.Module([node]) + co = self._compile(mod, "exec") + try: + self.frame.exec_(co) + except Exception: + raise Failure() + return None, None + else: + raise AssertionError("can't handle %s" %(node,)) + + def _compile(self, source, mode="eval"): + return compile(source, "", mode) + + def visit_Expr(self, expr): + return self.visit(expr.value) + + def visit_Module(self, mod): + for stmt in mod.body: + self.visit(stmt) + + def visit_Name(self, name): + explanation, result = self.generic_visit(name) + # See if the name is local. + source = "%r in locals() is not globals()" % (name.id,) + co = self._compile(source) + try: + local = self.frame.eval(co) + except Exception: + # have to assume it isn't + local = False + if not local: + return name.id, result + return explanation, result + + def visit_Compare(self, comp): + left = comp.left + left_explanation, left_result = self.visit(left) + for op, next_op in zip(comp.ops, comp.comparators): + next_explanation, next_result = self.visit(next_op) + op_symbol = operator_map[op.__class__] + explanation = "%s %s %s" % (left_explanation, op_symbol, + next_explanation) + source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_left=left_result, + __exprinfo_right=next_result) + except Exception: + raise Failure(explanation) + try: + if not result: + break + except KeyboardInterrupt: + raise + except: + break + left_explanation, left_result = next_explanation, next_result + + rcomp = py.code._reprcompare + if rcomp: + res = rcomp(op_symbol, left_result, next_result) + if res: + explanation = res + return explanation, result + + def visit_BoolOp(self, boolop): + is_or = isinstance(boolop.op, ast.Or) + explanations = [] + for operand in boolop.values: + explanation, result = self.visit(operand) + explanations.append(explanation) + if result == is_or: + break + name = is_or and " or " or " and " + explanation = "(" + name.join(explanations) + ")" + return explanation, result + + def visit_UnaryOp(self, unary): + pattern = unary_map[unary.op.__class__] + operand_explanation, operand_result = self.visit(unary.operand) + explanation = pattern % (operand_explanation,) + co = self._compile(pattern % ("__exprinfo_expr",)) + try: + result = self.frame.eval(co, __exprinfo_expr=operand_result) + except Exception: + raise Failure(explanation) + return explanation, result + + def visit_BinOp(self, binop): + left_explanation, left_result = self.visit(binop.left) + right_explanation, right_result = self.visit(binop.right) + symbol = operator_map[binop.op.__class__] + explanation = "(%s %s %s)" % (left_explanation, symbol, + right_explanation) + source = "__exprinfo_left %s __exprinfo_right" % (symbol,) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_left=left_result, + __exprinfo_right=right_result) + except Exception: + raise Failure(explanation) + return explanation, result + + def visit_Call(self, call): + func_explanation, func = self.visit(call.func) + arg_explanations = [] + ns = {"__exprinfo_func" : func} + arguments = [] + for arg in call.args: + arg_explanation, arg_result = self.visit(arg) + arg_name = "__exprinfo_%s" % (len(ns),) + ns[arg_name] = arg_result + arguments.append(arg_name) + arg_explanations.append(arg_explanation) + for keyword in call.keywords: + arg_explanation, arg_result = self.visit(keyword.value) + arg_name = "__exprinfo_%s" % (len(ns),) + ns[arg_name] = arg_result + keyword_source = "%s=%%s" % (keyword.arg) + arguments.append(keyword_source % (arg_name,)) + arg_explanations.append(keyword_source % (arg_explanation,)) + if call.starargs: + arg_explanation, arg_result = self.visit(call.starargs) + arg_name = "__exprinfo_star" + ns[arg_name] = arg_result + arguments.append("*%s" % (arg_name,)) + arg_explanations.append("*%s" % (arg_explanation,)) + if call.kwargs: + arg_explanation, arg_result = self.visit(call.kwargs) + arg_name = "__exprinfo_kwds" + ns[arg_name] = arg_result + arguments.append("**%s" % (arg_name,)) + arg_explanations.append("**%s" % (arg_explanation,)) + args_explained = ", ".join(arg_explanations) + explanation = "%s(%s)" % (func_explanation, args_explained) + args = ", ".join(arguments) + source = "__exprinfo_func(%s)" % (args,) + co = self._compile(source) + try: + result = self.frame.eval(co, **ns) + except Exception: + raise Failure(explanation) + pattern = "%s\n{%s = %s\n}" + rep = self.frame.repr(result) + explanation = pattern % (rep, rep, explanation) + return explanation, result + + def _is_builtin_name(self, name): + pattern = "%r not in globals() and %r not in locals()" + source = pattern % (name.id, name.id) + co = self._compile(source) + try: + return self.frame.eval(co) + except Exception: + return False + + def visit_Attribute(self, attr): + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + source_explanation, source_result = self.visit(attr.value) + explanation = "%s.%s" % (source_explanation, attr.attr) + source = "__exprinfo_expr.%s" % (attr.attr,) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_expr=source_result) + except Exception: + raise Failure(explanation) + explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result), + self.frame.repr(result), + source_explanation, attr.attr) + # Check if the attr is from an instance. + source = "%r in getattr(__exprinfo_expr, '__dict__', {})" + source = source % (attr.attr,) + co = self._compile(source) + try: + from_instance = self.frame.eval(co, __exprinfo_expr=source_result) + except Exception: + from_instance = True + if from_instance: + rep = self.frame.repr(result) + pattern = "%s\n{%s = %s\n}" + explanation = pattern % (rep, rep, explanation) + return explanation, result + + def visit_Assert(self, assrt): + test_explanation, test_result = self.visit(assrt.test) + if test_explanation.startswith("False\n{False =") and \ + test_explanation.endswith("\n"): + test_explanation = test_explanation[15:-2] + explanation = "assert %s" % (test_explanation,) + if not test_result: + try: + raise BuiltinAssertionError + except Exception: + raise Failure(explanation) + return explanation, test_result + + def visit_Assign(self, assign): + value_explanation, value_result = self.visit(assign.value) + explanation = "... = %s" % (value_explanation,) + name = ast.Name("__exprinfo_expr", ast.Load(), + lineno=assign.value.lineno, + col_offset=assign.value.col_offset) + new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno, + col_offset=assign.col_offset) + mod = ast.Module([new_assign]) + co = self._compile(mod, "exec") + try: + self.frame.exec_(co, __exprinfo_expr=value_result) + except Exception: + raise Failure(explanation) + return explanation, value_result diff --git a/lib/spack/external/py/_code/_assertionold.py b/lib/spack/external/py/_code/_assertionold.py new file mode 100644 index 0000000000..4e81fb3ef6 --- /dev/null +++ b/lib/spack/external/py/_code/_assertionold.py @@ -0,0 +1,555 @@ +import py +import sys, inspect +from compiler import parse, ast, pycodegen +from py._code.assertion import BuiltinAssertionError, _format_explanation + +passthroughex = py.builtin._sysex + +class Failure: + def __init__(self, node): + self.exc, self.value, self.tb = sys.exc_info() + self.node = node + +class View(object): + """View base class. + + If C is a subclass of View, then C(x) creates a proxy object around + the object x. The actual class of the proxy is not C in general, + but a *subclass* of C determined by the rules below. To avoid confusion + we call view class the class of the proxy (a subclass of C, so of View) + and object class the class of x. + + Attributes and methods not found in the proxy are automatically read on x. + Other operations like setting attributes are performed on the proxy, as + determined by its view class. The object x is available from the proxy + as its __obj__ attribute. + + The view class selection is determined by the __view__ tuples and the + optional __viewkey__ method. By default, the selected view class is the + most specific subclass of C whose __view__ mentions the class of x. + If no such subclass is found, the search proceeds with the parent + object classes. For example, C(True) will first look for a subclass + of C with __view__ = (..., bool, ...) and only if it doesn't find any + look for one with __view__ = (..., int, ...), and then ..., object,... + If everything fails the class C itself is considered to be the default. + + Alternatively, the view class selection can be driven by another aspect + of the object x, instead of the class of x, by overriding __viewkey__. + See last example at the end of this module. + """ + + _viewcache = {} + __view__ = () + + def __new__(rootclass, obj, *args, **kwds): + self = object.__new__(rootclass) + self.__obj__ = obj + self.__rootclass__ = rootclass + key = self.__viewkey__() + try: + self.__class__ = self._viewcache[key] + except KeyError: + self.__class__ = self._selectsubclass(key) + return self + + def __getattr__(self, attr): + # attributes not found in the normal hierarchy rooted on View + # are looked up in the object's real class + return getattr(self.__obj__, attr) + + def __viewkey__(self): + return self.__obj__.__class__ + + def __matchkey__(self, key, subclasses): + if inspect.isclass(key): + keys = inspect.getmro(key) + else: + keys = [key] + for key in keys: + result = [C for C in subclasses if key in C.__view__] + if result: + return result + return [] + + def _selectsubclass(self, key): + subclasses = list(enumsubclasses(self.__rootclass__)) + for C in subclasses: + if not isinstance(C.__view__, tuple): + C.__view__ = (C.__view__,) + choices = self.__matchkey__(key, subclasses) + if not choices: + return self.__rootclass__ + elif len(choices) == 1: + return choices[0] + else: + # combine the multiple choices + return type('?', tuple(choices), {}) + + def __repr__(self): + return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__) + + +def enumsubclasses(cls): + for subcls in cls.__subclasses__(): + for subsubclass in enumsubclasses(subcls): + yield subsubclass + yield cls + + +class Interpretable(View): + """A parse tree node with a few extra methods.""" + explanation = None + + def is_builtin(self, frame): + return False + + def eval(self, frame): + # fall-back for unknown expression nodes + try: + expr = ast.Expression(self.__obj__) + expr.filename = '' + self.__obj__.filename = '' + co = pycodegen.ExpressionCodeGenerator(expr).getCode() + result = frame.eval(co) + except passthroughex: + raise + except: + raise Failure(self) + self.result = result + self.explanation = self.explanation or frame.repr(self.result) + + def run(self, frame): + # fall-back for unknown statement nodes + try: + expr = ast.Module(None, ast.Stmt([self.__obj__])) + expr.filename = '' + co = pycodegen.ModuleCodeGenerator(expr).getCode() + frame.exec_(co) + except passthroughex: + raise + except: + raise Failure(self) + + def nice_explanation(self): + return _format_explanation(self.explanation) + + +class Name(Interpretable): + __view__ = ast.Name + + def is_local(self, frame): + source = '%r in locals() is not globals()' % self.name + try: + return frame.is_true(frame.eval(source)) + except passthroughex: + raise + except: + return False + + def is_global(self, frame): + source = '%r in globals()' % self.name + try: + return frame.is_true(frame.eval(source)) + except passthroughex: + raise + except: + return False + + def is_builtin(self, frame): + source = '%r not in locals() and %r not in globals()' % ( + self.name, self.name) + try: + return frame.is_true(frame.eval(source)) + except passthroughex: + raise + except: + return False + + def eval(self, frame): + super(Name, self).eval(frame) + if not self.is_local(frame): + self.explanation = self.name + +class Compare(Interpretable): + __view__ = ast.Compare + + def eval(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + for operation, expr2 in self.ops: + if hasattr(self, 'result'): + # shortcutting in chained expressions + if not frame.is_true(self.result): + break + expr2 = Interpretable(expr2) + expr2.eval(frame) + self.explanation = "%s %s %s" % ( + expr.explanation, operation, expr2.explanation) + source = "__exprinfo_left %s __exprinfo_right" % operation + try: + self.result = frame.eval(source, + __exprinfo_left=expr.result, + __exprinfo_right=expr2.result) + except passthroughex: + raise + except: + raise Failure(self) + expr = expr2 + +class And(Interpretable): + __view__ = ast.And + + def eval(self, frame): + explanations = [] + for expr in self.nodes: + expr = Interpretable(expr) + expr.eval(frame) + explanations.append(expr.explanation) + self.result = expr.result + if not frame.is_true(expr.result): + break + self.explanation = '(' + ' and '.join(explanations) + ')' + +class Or(Interpretable): + __view__ = ast.Or + + def eval(self, frame): + explanations = [] + for expr in self.nodes: + expr = Interpretable(expr) + expr.eval(frame) + explanations.append(expr.explanation) + self.result = expr.result + if frame.is_true(expr.result): + break + self.explanation = '(' + ' or '.join(explanations) + ')' + + +# == Unary operations == +keepalive = [] +for astclass, astpattern in { + ast.Not : 'not __exprinfo_expr', + ast.Invert : '(~__exprinfo_expr)', + }.items(): + + class UnaryArith(Interpretable): + __view__ = astclass + + def eval(self, frame, astpattern=astpattern): + expr = Interpretable(self.expr) + expr.eval(frame) + self.explanation = astpattern.replace('__exprinfo_expr', + expr.explanation) + try: + self.result = frame.eval(astpattern, + __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + + keepalive.append(UnaryArith) + +# == Binary operations == +for astclass, astpattern in { + ast.Add : '(__exprinfo_left + __exprinfo_right)', + ast.Sub : '(__exprinfo_left - __exprinfo_right)', + ast.Mul : '(__exprinfo_left * __exprinfo_right)', + ast.Div : '(__exprinfo_left / __exprinfo_right)', + ast.Mod : '(__exprinfo_left % __exprinfo_right)', + ast.Power : '(__exprinfo_left ** __exprinfo_right)', + }.items(): + + class BinaryArith(Interpretable): + __view__ = astclass + + def eval(self, frame, astpattern=astpattern): + left = Interpretable(self.left) + left.eval(frame) + right = Interpretable(self.right) + right.eval(frame) + self.explanation = (astpattern + .replace('__exprinfo_left', left .explanation) + .replace('__exprinfo_right', right.explanation)) + try: + self.result = frame.eval(astpattern, + __exprinfo_left=left.result, + __exprinfo_right=right.result) + except passthroughex: + raise + except: + raise Failure(self) + + keepalive.append(BinaryArith) + + +class CallFunc(Interpretable): + __view__ = ast.CallFunc + + def is_bool(self, frame): + source = 'isinstance(__exprinfo_value, bool)' + try: + return frame.is_true(frame.eval(source, + __exprinfo_value=self.result)) + except passthroughex: + raise + except: + return False + + def eval(self, frame): + node = Interpretable(self.node) + node.eval(frame) + explanations = [] + vars = {'__exprinfo_fn': node.result} + source = '__exprinfo_fn(' + for a in self.args: + if isinstance(a, ast.Keyword): + keyword = a.name + a = a.expr + else: + keyword = None + a = Interpretable(a) + a.eval(frame) + argname = '__exprinfo_%d' % len(vars) + vars[argname] = a.result + if keyword is None: + source += argname + ',' + explanations.append(a.explanation) + else: + source += '%s=%s,' % (keyword, argname) + explanations.append('%s=%s' % (keyword, a.explanation)) + if self.star_args: + star_args = Interpretable(self.star_args) + star_args.eval(frame) + argname = '__exprinfo_star' + vars[argname] = star_args.result + source += '*' + argname + ',' + explanations.append('*' + star_args.explanation) + if self.dstar_args: + dstar_args = Interpretable(self.dstar_args) + dstar_args.eval(frame) + argname = '__exprinfo_kwds' + vars[argname] = dstar_args.result + source += '**' + argname + ',' + explanations.append('**' + dstar_args.explanation) + self.explanation = "%s(%s)" % ( + node.explanation, ', '.join(explanations)) + if source.endswith(','): + source = source[:-1] + source += ')' + try: + self.result = frame.eval(source, **vars) + except passthroughex: + raise + except: + raise Failure(self) + if not node.is_builtin(frame) or not self.is_bool(frame): + r = frame.repr(self.result) + self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) + +class Getattr(Interpretable): + __view__ = ast.Getattr + + def eval(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + source = '__exprinfo_expr.%s' % self.attrname + try: + self.result = frame.eval(source, __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + self.explanation = '%s.%s' % (expr.explanation, self.attrname) + # if the attribute comes from the instance, its value is interesting + source = ('hasattr(__exprinfo_expr, "__dict__") and ' + '%r in __exprinfo_expr.__dict__' % self.attrname) + try: + from_instance = frame.is_true( + frame.eval(source, __exprinfo_expr=expr.result)) + except passthroughex: + raise + except: + from_instance = True + if from_instance: + r = frame.repr(self.result) + self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) + +# == Re-interpretation of full statements == + +class Assert(Interpretable): + __view__ = ast.Assert + + def run(self, frame): + test = Interpretable(self.test) + test.eval(frame) + # simplify 'assert False where False = ...' + if (test.explanation.startswith('False\n{False = ') and + test.explanation.endswith('\n}')): + test.explanation = test.explanation[15:-2] + # print the result as 'assert ' + self.result = test.result + self.explanation = 'assert ' + test.explanation + if not frame.is_true(test.result): + try: + raise BuiltinAssertionError + except passthroughex: + raise + except: + raise Failure(self) + +class Assign(Interpretable): + __view__ = ast.Assign + + def run(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + self.result = expr.result + self.explanation = '... = ' + expr.explanation + # fall-back-run the rest of the assignment + ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr')) + mod = ast.Module(None, ast.Stmt([ass])) + mod.filename = '' + co = pycodegen.ModuleCodeGenerator(mod).getCode() + try: + frame.exec_(co, __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + +class Discard(Interpretable): + __view__ = ast.Discard + + def run(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + self.result = expr.result + self.explanation = expr.explanation + +class Stmt(Interpretable): + __view__ = ast.Stmt + + def run(self, frame): + for stmt in self.nodes: + stmt = Interpretable(stmt) + stmt.run(frame) + + +def report_failure(e): + explanation = e.node.nice_explanation() + if explanation: + explanation = ", in: " + explanation + else: + explanation = "" + sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation)) + +def check(s, frame=None): + if frame is None: + frame = sys._getframe(1) + frame = py.code.Frame(frame) + expr = parse(s, 'eval') + assert isinstance(expr, ast.Expression) + node = Interpretable(expr.node) + try: + node.eval(frame) + except passthroughex: + raise + except Failure: + e = sys.exc_info()[1] + report_failure(e) + else: + if not frame.is_true(node.result): + sys.stderr.write("assertion failed: %s\n" % node.nice_explanation()) + + +########################################################### +# API / Entry points +# ######################################################### + +def interpret(source, frame, should_fail=False): + module = Interpretable(parse(source, 'exec').node) + #print "got module", module + if isinstance(frame, py.std.types.FrameType): + frame = py.code.Frame(frame) + try: + module.run(frame) + except Failure: + e = sys.exc_info()[1] + return getfailure(e) + except passthroughex: + raise + except: + import traceback + traceback.print_exc() + if should_fail: + return ("(assertion failed, but when it was re-run for " + "printing intermediate values, it did not fail. Suggestions: " + "compute assert expression before the assert or use --nomagic)") + else: + return None + +def getmsg(excinfo): + if isinstance(excinfo, tuple): + excinfo = py.code.ExceptionInfo(excinfo) + #frame, line = gettbline(tb) + #frame = py.code.Frame(frame) + #return interpret(line, frame) + + tb = excinfo.traceback[-1] + source = str(tb.statement).strip() + x = interpret(source, tb.frame, should_fail=True) + if not isinstance(x, str): + raise TypeError("interpret returned non-string %r" % (x,)) + return x + +def getfailure(e): + explanation = e.node.nice_explanation() + if str(e.value): + lines = explanation.split('\n') + lines[0] += " << %s" % (e.value,) + explanation = '\n'.join(lines) + text = "%s: %s" % (e.exc.__name__, explanation) + if text.startswith('AssertionError: assert '): + text = text[16:] + return text + +def run(s, frame=None): + if frame is None: + frame = sys._getframe(1) + frame = py.code.Frame(frame) + module = Interpretable(parse(s, 'exec').node) + try: + module.run(frame) + except Failure: + e = sys.exc_info()[1] + report_failure(e) + + +if __name__ == '__main__': + # example: + def f(): + return 5 + def g(): + return 3 + def h(x): + return 'never' + check("f() * g() == 5") + check("not f()") + check("not (f() and g() or 0)") + check("f() == g()") + i = 4 + check("i == f()") + check("len(f()) == 0") + check("isinstance(2+3+4, float)") + + run("x = i") + check("x == 5") + + run("assert not f(), 'oops'") + run("a, b, c = 1, 2") + run("a, b, c = f()") + + check("max([f(),g()]) == 4") + check("'hello'[g()] == 'h'") + run("'guk%d' % h(f())") diff --git a/lib/spack/external/py/_code/_py2traceback.py b/lib/spack/external/py/_code/_py2traceback.py new file mode 100644 index 0000000000..d65e27cb73 --- /dev/null +++ b/lib/spack/external/py/_code/_py2traceback.py @@ -0,0 +1,79 @@ +# copied from python-2.7.3's traceback.py +# CHANGES: +# - some_str is replaced, trying to create unicode strings +# +import types + +def format_exception_only(etype, value): + """Format the exception part of a traceback. + + The arguments are the exception type and value such as given by + sys.last_type and sys.last_value. The return value is a list of + strings, each ending in a newline. + + Normally, the list contains a single string; however, for + SyntaxError exceptions, it contains several lines that (when + printed) display detailed information about where the syntax + error occurred. + + The message indicating which exception occurred is always the last + string in the list. + + """ + + # An instance should not have a meaningful value parameter, but + # sometimes does, particularly for string exceptions, such as + # >>> raise string1, string2 # deprecated + # + # Clear these out first because issubtype(string1, SyntaxError) + # would throw another exception and mask the original problem. + if (isinstance(etype, BaseException) or + isinstance(etype, types.InstanceType) or + etype is None or type(etype) is str): + return [_format_final_exc_line(etype, value)] + + stype = etype.__name__ + + if not issubclass(etype, SyntaxError): + return [_format_final_exc_line(stype, value)] + + # It was a syntax error; show exactly where the problem was found. + lines = [] + try: + msg, (filename, lineno, offset, badline) = value.args + except Exception: + pass + else: + filename = filename or "" + lines.append(' File "%s", line %d\n' % (filename, lineno)) + if badline is not None: + lines.append(' %s\n' % badline.strip()) + if offset is not None: + caretspace = badline.rstrip('\n')[:offset].lstrip() + # non-space whitespace (likes tabs) must be kept for alignment + caretspace = ((c.isspace() and c or ' ') for c in caretspace) + # only three spaces to account for offset1 == pos 0 + lines.append(' %s^\n' % ''.join(caretspace)) + value = msg + + lines.append(_format_final_exc_line(stype, value)) + return lines + +def _format_final_exc_line(etype, value): + """Return a list of a single line -- normal case for format_exception_only""" + valuestr = _some_str(value) + if value is None or not valuestr: + line = "%s\n" % etype + else: + line = "%s: %s\n" % (etype, valuestr) + return line + +def _some_str(value): + try: + return unicode(value) + except Exception: + try: + return str(value) + except Exception: + pass + return '' % type(value).__name__ diff --git a/lib/spack/external/py/_code/assertion.py b/lib/spack/external/py/_code/assertion.py new file mode 100644 index 0000000000..4ce80c75b1 --- /dev/null +++ b/lib/spack/external/py/_code/assertion.py @@ -0,0 +1,94 @@ +import sys +import py + +BuiltinAssertionError = py.builtin.builtins.AssertionError + +_reprcompare = None # if set, will be called by assert reinterp for comparison ops + +def _format_explanation(explanation): + """This formats an explanation + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ + raw_lines = (explanation or '').split('\n') + # escape newlines not followed by {, } and ~ + lines = [raw_lines[0]] + for l in raw_lines[1:]: + if l.startswith('{') or l.startswith('}') or l.startswith('~'): + lines.append(l) + else: + lines[-1] += '\\n' + l + + result = lines[:1] + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith('{'): + if stackcnt[-1]: + s = 'and ' + else: + s = 'where ' + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + elif line.startswith('}'): + assert line.startswith('}') + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + else: + assert line.startswith('~') + result.append(' '*len(stack) + line[1:]) + assert len(stack) == 1 + return '\n'.join(result) + + +class AssertionError(BuiltinAssertionError): + def __init__(self, *args): + BuiltinAssertionError.__init__(self, *args) + if args: + try: + self.msg = str(args[0]) + except py.builtin._sysex: + raise + except: + self.msg = "<[broken __repr__] %s at %0xd>" %( + args[0].__class__, id(args[0])) + else: + f = py.code.Frame(sys._getframe(1)) + try: + source = f.code.fullsource + if source is not None: + try: + source = source.getstatement(f.lineno, assertion=True) + except IndexError: + source = None + else: + source = str(source.deindent()).strip() + except py.error.ENOENT: + source = None + # this can also occur during reinterpretation, when the + # co_filename is set to "". + if source: + self.msg = reinterpret(source, f, should_fail=True) + else: + self.msg = "" + if not self.args: + self.args = (self.msg,) + +if sys.version_info > (3, 0): + AssertionError.__module__ = "builtins" + reinterpret_old = "old reinterpretation not available for py3" +else: + from py._code._assertionold import interpret as reinterpret_old +if sys.version_info >= (2, 6) or (sys.platform.startswith("java")): + from py._code._assertionnew import interpret as reinterpret +else: + reinterpret = reinterpret_old + diff --git a/lib/spack/external/py/_code/code.py b/lib/spack/external/py/_code/code.py new file mode 100644 index 0000000000..f14c562a29 --- /dev/null +++ b/lib/spack/external/py/_code/code.py @@ -0,0 +1,787 @@ +import py +import sys +from inspect import CO_VARARGS, CO_VARKEYWORDS + +builtin_repr = repr + +reprlib = py.builtin._tryimport('repr', 'reprlib') + +if sys.version_info[0] >= 3: + from traceback import format_exception_only +else: + from py._code._py2traceback import format_exception_only + +class Code(object): + """ wrapper around Python code objects """ + def __init__(self, rawcode): + if not hasattr(rawcode, "co_filename"): + rawcode = py.code.getrawcode(rawcode) + try: + self.filename = rawcode.co_filename + self.firstlineno = rawcode.co_firstlineno - 1 + self.name = rawcode.co_name + except AttributeError: + raise TypeError("not a code object: %r" %(rawcode,)) + self.raw = rawcode + + def __eq__(self, other): + return self.raw == other.raw + + def __ne__(self, other): + return not self == other + + @property + def path(self): + """ return a path object pointing to source code (note that it + might not point to an actually existing file). """ + p = py.path.local(self.raw.co_filename) + # maybe don't try this checking + if not p.check(): + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + p = self.raw.co_filename + return p + + @property + def fullsource(self): + """ return a py.code.Source object for the full source file of the code + """ + from py._code import source + full, _ = source.findsource(self.raw) + return full + + def source(self): + """ return a py.code.Source object for the code object's source only + """ + # return source only for that part of code + return py.code.Source(self.raw) + + def getargs(self, var=False): + """ return a tuple with the argument names for the code object + + if 'var' is set True also return the names of the variable and + keyword arguments when present + """ + # handfull shortcut for getting args + raw = self.raw + argcount = raw.co_argcount + if var: + argcount += raw.co_flags & CO_VARARGS + argcount += raw.co_flags & CO_VARKEYWORDS + return raw.co_varnames[:argcount] + +class Frame(object): + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + def __init__(self, frame): + self.lineno = frame.f_lineno - 1 + self.f_globals = frame.f_globals + self.f_locals = frame.f_locals + self.raw = frame + self.code = py.code.Code(frame.f_code) + + @property + def statement(self): + """ statement this frame is at """ + if self.code.fullsource is None: + return py.code.Source("") + return self.code.fullsource.getstatement(self.lineno) + + def eval(self, code, **vars): + """ evaluate 'code' in the frame + + 'vars' are optional additional local variables + + returns the result of the evaluation + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def exec_(self, code, **vars): + """ exec 'code' in the frame + + 'vars' are optiona; additional local variables + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + py.builtin.exec_(code, self.f_globals, f_locals ) + + def repr(self, object): + """ return a 'safe' (non-recursive, one-line) string repr for 'object' + """ + return py.io.saferepr(object) + + def is_true(self, object): + return object + + def getargs(self, var=False): + """ return a list of tuples (name, value) for all arguments + + if 'var' is set True also include the variable and keyword + arguments when present + """ + retval = [] + for arg in self.code.getargs(var): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + +class TracebackEntry(object): + """ a single entry in a traceback """ + + _repr_style = None + exprinfo = None + + def __init__(self, rawentry): + self._rawentry = rawentry + self.lineno = rawentry.tb_lineno - 1 + + def set_repr_style(self, mode): + assert mode in ("short", "long") + self._repr_style = mode + + @property + def frame(self): + return py.code.Frame(self._rawentry.tb_frame) + + @property + def relline(self): + return self.lineno - self.frame.code.firstlineno + + def __repr__(self): + return "" %(self.frame.code.path, self.lineno+1) + + @property + def statement(self): + """ py.code.Source object for the current statement """ + source = self.frame.code.fullsource + return source.getstatement(self.lineno) + + @property + def path(self): + """ path to the source code """ + return self.frame.code.path + + def getlocals(self): + return self.frame.f_locals + locals = property(getlocals, None, None, "locals of underlaying frame") + + def reinterpret(self): + """Reinterpret the failing statement and returns a detailed information + about what operations are performed.""" + if self.exprinfo is None: + source = str(self.statement).strip() + x = py.code._reinterpret(source, self.frame, should_fail=True) + if not isinstance(x, str): + raise TypeError("interpret returned non-string %r" % (x,)) + self.exprinfo = x + return self.exprinfo + + def getfirstlinesource(self): + # on Jython this firstlineno can be -1 apparently + return max(self.frame.code.firstlineno, 0) + + def getsource(self, astcache=None): + """ return failing source code. """ + # we use the passed in astcache to not reparse asttrees + # within exception info printing + from py._code.source import getstatementrange_ast + source = self.frame.code.fullsource + if source is None: + return None + key = astnode = None + if astcache is not None: + key = self.frame.code.path + if key is not None: + astnode = astcache.get(key, None) + start = self.getfirstlinesource() + try: + astnode, _, end = getstatementrange_ast(self.lineno, source, + astnode=astnode) + except SyntaxError: + end = self.lineno + 1 + else: + if key is not None: + astcache[key] = astnode + return source[start:end] + + source = property(getsource) + + def ishidden(self): + """ return True if the current frame has a var __tracebackhide__ + resolving to True + + mostly for internal use + """ + try: + return self.frame.f_locals['__tracebackhide__'] + except KeyError: + try: + return self.frame.f_globals['__tracebackhide__'] + except KeyError: + return False + + def __str__(self): + try: + fn = str(self.path) + except py.error.Error: + fn = '???' + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except: + line = "???" + return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line) + + def name(self): + return self.frame.code.raw.co_name + name = property(name, None, None, "co_name of underlaying code") + +class Traceback(list): + """ Traceback objects encapsulate and offer higher level + access to Traceback entries. + """ + Entry = TracebackEntry + def __init__(self, tb): + """ initialize from given python traceback object. """ + if hasattr(tb, 'tb_next'): + def f(cur): + while cur is not None: + yield self.Entry(cur) + cur = cur.tb_next + list.__init__(self, f(tb)) + else: + list.__init__(self, tb) + + def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None): + """ return a Traceback instance wrapping part of this Traceback + + by provding any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined + + this allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback) + """ + for x in self: + code = x.frame.code + codepath = code.path + if ((path is None or codepath == path) and + (excludepath is None or not hasattr(codepath, 'relto') or + not codepath.relto(excludepath)) and + (lineno is None or x.lineno == lineno) and + (firstlineno is None or x.frame.code.firstlineno == firstlineno)): + return Traceback(x._rawentry) + return self + + def __getitem__(self, key): + val = super(Traceback, self).__getitem__(key) + if isinstance(key, type(slice(0))): + val = self.__class__(val) + return val + + def filter(self, fn=lambda x: not x.ishidden()): + """ return a Traceback instance with certain items removed + + fn is a function that gets a single argument, a TracebackItem + instance, and should return True when the item should be added + to the Traceback, False when not + + by default this removes all the TracebackItems which are hidden + (see ishidden() above) + """ + return Traceback(filter(fn, self)) + + def getcrashentry(self): + """ return last non-hidden traceback entry that lead + to the exception of a traceback. + """ + for i in range(-1, -len(self)-1, -1): + entry = self[i] + if not entry.ishidden(): + return entry + return self[-1] + + def recursionindex(self): + """ return the index of the frame/TracebackItem where recursion + originates if appropriate, None if no recursion occurred + """ + cache = {} + for i, entry in enumerate(self): + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + #XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno + #print "checking for recursion at", key + l = cache.setdefault(key, []) + if l: + f = entry.frame + loc = f.f_locals + for otherloc in l: + if f.is_true(f.eval(co_equal, + __recursioncache_locals_1=loc, + __recursioncache_locals_2=otherloc)): + return i + l.append(entry.frame.f_locals) + return None + +co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2', + '?', 'eval') + +class ExceptionInfo(object): + """ wraps sys.exc_info() objects and offers + help for navigating the traceback. + """ + _striptext = '' + def __init__(self, tup=None, exprinfo=None): + if tup is None: + tup = sys.exc_info() + if exprinfo is None and isinstance(tup[1], AssertionError): + exprinfo = getattr(tup[1], 'msg', None) + if exprinfo is None: + exprinfo = str(tup[1]) + if exprinfo and exprinfo.startswith('assert '): + self._striptext = 'AssertionError: ' + self._excinfo = tup + #: the exception class + self.type = tup[0] + #: the exception instance + self.value = tup[1] + #: the exception raw traceback + self.tb = tup[2] + #: the exception type name + self.typename = self.type.__name__ + #: the exception traceback (py.code.Traceback instance) + self.traceback = py.code.Traceback(self.tb) + + def __repr__(self): + return "" % (self.typename, len(self.traceback)) + + def exconly(self, tryshort=False): + """ return the exception as a string + + when 'tryshort' resolves to True, and the exception is a + py.code._AssertionError, only the actual exception part of + the exception representation is returned (so 'AssertionError: ' is + removed from the beginning) + """ + lines = format_exception_only(self.type, self.value) + text = ''.join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext):] + return text + + def errisinstance(self, exc): + """ return True if the exception is an instance of exc """ + return isinstance(self.value, exc) + + def _getreprcrash(self): + exconly = self.exconly(tryshort=True) + entry = self.traceback.getcrashentry() + path, lineno = entry.frame.code.raw.co_filename, entry.lineno + return ReprFileLocation(path, lineno+1, exconly) + + def getrepr(self, showlocals=False, style="long", + abspath=False, tbfilter=True, funcargs=False): + """ return str()able representation of this exception info. + showlocals: show locals per traceback entry + style: long|short|no|native traceback style + tbfilter: hide entries (where __tracebackhide__ is true) + + in case of style==native, tbfilter and showlocals is ignored. + """ + if style == 'native': + return ReprExceptionInfo(ReprTracebackNative( + py.std.traceback.format_exception( + self.type, + self.value, + self.traceback[0]._rawentry, + )), self._getreprcrash()) + + fmt = FormattedExcinfo(showlocals=showlocals, style=style, + abspath=abspath, tbfilter=tbfilter, funcargs=funcargs) + return fmt.repr_excinfo(self) + + def __str__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return str(loc) + + def __unicode__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return unicode(loc) + + +class FormattedExcinfo(object): + """ presenting information about failing Functions and Generators. """ + # for traceback entries + flow_marker = ">" + fail_marker = "E" + + def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False): + self.showlocals = showlocals + self.style = style + self.tbfilter = tbfilter + self.funcargs = funcargs + self.abspath = abspath + self.astcache = {} + + def _getindent(self, source): + # figure out indent for given source + try: + s = str(source.getstatement(len(source)-1)) + except KeyboardInterrupt: + raise + except: + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except: + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry): + source = entry.getsource(self.astcache) + if source is not None: + source = source.deindent() + return source + + def _saferepr(self, obj): + return py.io.saferepr(obj) + + def repr_args(self, entry): + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(var=True): + args.append((argname, self._saferepr(argvalue))) + return ReprFuncArgs(args) + + def get_source(self, source, line_index=-1, excinfo=None, short=False): + """ return formatted and marked up source lines. """ + lines = [] + if source is None or line_index >= len(source.lines): + source = py.code.Source("???") + line_index = 0 + if line_index < 0: + line_index += len(source) + space_prefix = " " + if short: + lines.append(space_prefix + source.lines[line_index].strip()) + else: + for line in source.lines[:line_index]: + lines.append(space_prefix + line) + lines.append(self.flow_marker + " " + source.lines[line_index]) + for line in source.lines[line_index+1:]: + lines.append(space_prefix + line) + if excinfo is not None: + indent = 4 if short else self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_exconly(self, excinfo, indent=4, markall=False): + lines = [] + indent = " " * indent + # get the real exception information out + exlines = excinfo.exconly(tryshort=True).split('\n') + failindent = self.fail_marker + indent[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indent + return lines + + def repr_locals(self, locals): + if self.showlocals: + lines = [] + keys = [loc for loc in locals if loc[0] != "@"] + keys.sort() + for name in keys: + value = locals[name] + if name == '__builtins__': + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only reprlib.Repr in + # disguise, so is very configurable. + str_repr = self._saferepr(value) + #if len(str_repr) < 70 or not isinstance(value, + # (list, tuple, dict)): + lines.append("%-10s = %s" %(name, str_repr)) + #else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # py.std.pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + + def repr_traceback_entry(self, entry, excinfo=None): + source = self._getentrysource(entry) + if source is None: + source = py.code.Source("???") + line_index = 0 + else: + # entry.getfirstlinesource() can be -1, should be 0 on jython + line_index = entry.lineno - max(entry.getfirstlinesource(), 0) + + lines = [] + style = entry._repr_style + if style is None: + style = self.style + if style in ("short", "long"): + short = style == "short" + reprargs = self.repr_args(entry) if not short else None + s = self.get_source(source, line_index, excinfo, short=short) + lines.extend(s) + if short: + message = "in %s" %(entry.name) + else: + message = excinfo and excinfo.typename or "" + path = self._makepath(entry.path) + filelocrepr = ReprFileLocation(path, entry.lineno+1, message) + localsrepr = None + if not short: + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style) + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None, style) + + def _makepath(self, path): + if not self.abspath: + try: + np = py.path.local().bestrelpath(path) + except OSError: + return path + if len(np) < len(str(path)): + path = np + return path + + def repr_traceback(self, excinfo): + traceback = excinfo.traceback + if self.tbfilter: + traceback = traceback.filter() + recursionindex = None + if excinfo.errisinstance(RuntimeError): + if "maximum recursion depth exceeded" in str(excinfo.value): + recursionindex = traceback.recursionindex() + last = traceback[-1] + entries = [] + extraline = None + for index, entry in enumerate(traceback): + einfo = (last == entry) and excinfo or None + reprentry = self.repr_traceback_entry(entry, einfo) + entries.append(reprentry) + if index == recursionindex: + extraline = "!!! Recursion detected (same locals & position)" + break + return ReprTraceback(entries, extraline, style=self.style) + + def repr_excinfo(self, excinfo): + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() + return ReprExceptionInfo(reprtraceback, reprcrash) + +class TerminalRepr: + def __str__(self): + s = self.__unicode__() + if sys.version_info[0] < 3: + s = s.encode('utf-8') + return s + + def __unicode__(self): + # FYI this is called from pytest-xdist's serialization of exception + # information. + io = py.io.TextIO() + tw = py.io.TerminalWriter(file=io) + self.toterminal(tw) + return io.getvalue().strip() + + def __repr__(self): + return "<%s instance at %0x>" %(self.__class__, id(self)) + + +class ReprExceptionInfo(TerminalRepr): + def __init__(self, reprtraceback, reprcrash): + self.reprtraceback = reprtraceback + self.reprcrash = reprcrash + self.sections = [] + + def addsection(self, name, content, sep="-"): + self.sections.append((name, content, sep)) + + def toterminal(self, tw): + self.reprtraceback.toterminal(tw) + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + +class ReprTraceback(TerminalRepr): + entrysep = "_ " + + def __init__(self, reprentries, extraline, style): + self.reprentries = reprentries + self.extraline = extraline + self.style = style + + def toterminal(self, tw): + # the entries might have different styles + last_style = None + for i, entry in enumerate(self.reprentries): + if entry.style == "long": + tw.line("") + entry.toterminal(tw) + if i < len(self.reprentries) - 1: + next_entry = self.reprentries[i+1] + if entry.style == "long" or \ + entry.style == "short" and next_entry.style == "long": + tw.sep(self.entrysep) + + if self.extraline: + tw.line(self.extraline) + +class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines): + self.style = "native" + self.reprentries = [ReprEntryNative(tblines)] + self.extraline = None + +class ReprEntryNative(TerminalRepr): + style = "native" + + def __init__(self, tblines): + self.lines = tblines + + def toterminal(self, tw): + tw.write("".join(self.lines)) + +class ReprEntry(TerminalRepr): + localssep = "_ " + + def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style): + self.lines = lines + self.reprfuncargs = reprfuncargs + self.reprlocals = reprlocals + self.reprfileloc = filelocrepr + self.style = style + + def toterminal(self, tw): + if self.style == "short": + self.reprfileloc.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + #tw.line("") + return + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + if self.reprlocals: + #tw.sep(self.localssep, "Locals") + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + if self.lines: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self): + return "%s\n%s\n%s" % ("\n".join(self.lines), + self.reprlocals, + self.reprfileloc) + +class ReprFileLocation(TerminalRepr): + def __init__(self, path, lineno, message): + self.path = str(path) + self.lineno = lineno + self.message = message + + def toterminal(self, tw): + # filename and lineno output for each entry, + # using an output format that most editors unterstand + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.line("%s:%s: %s" %(self.path, self.lineno, msg)) + +class ReprLocals(TerminalRepr): + def __init__(self, lines): + self.lines = lines + + def toterminal(self, tw): + for line in self.lines: + tw.line(line) + +class ReprFuncArgs(TerminalRepr): + def __init__(self, args): + self.args = args + + def toterminal(self, tw): + if self.args: + linesofar = "" + for name, value in self.args: + ns = "%s = %s" %(name, value) + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + + +oldbuiltins = {} + +def patch_builtins(assertion=True, compile=True): + """ put compile and AssertionError builtins to Python's builtins. """ + if assertion: + from py._code import assertion + l = oldbuiltins.setdefault('AssertionError', []) + l.append(py.builtin.builtins.AssertionError) + py.builtin.builtins.AssertionError = assertion.AssertionError + if compile: + l = oldbuiltins.setdefault('compile', []) + l.append(py.builtin.builtins.compile) + py.builtin.builtins.compile = py.code.compile + +def unpatch_builtins(assertion=True, compile=True): + """ remove compile and AssertionError builtins from Python builtins. """ + if assertion: + py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop() + if compile: + py.builtin.builtins.compile = oldbuiltins['compile'].pop() + +def getrawcode(obj, trycall=True): + """ return code object for given function. """ + try: + return obj.__code__ + except AttributeError: + obj = getattr(obj, 'im_func', obj) + obj = getattr(obj, 'func_code', obj) + obj = getattr(obj, 'f_code', obj) + obj = getattr(obj, '__code__', obj) + if trycall and not hasattr(obj, 'co_firstlineno'): + if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj): + x = getrawcode(obj.__call__, trycall=False) + if hasattr(x, 'co_firstlineno'): + return x + return obj + diff --git a/lib/spack/external/py/_code/source.py b/lib/spack/external/py/_code/source.py new file mode 100644 index 0000000000..c8b668b2fb --- /dev/null +++ b/lib/spack/external/py/_code/source.py @@ -0,0 +1,411 @@ +from __future__ import generators + +from bisect import bisect_right +import sys +import inspect, tokenize +import py +from types import ModuleType +cpy_compile = compile + +try: + import _ast + from _ast import PyCF_ONLY_AST as _AST_FLAG +except ImportError: + _AST_FLAG = 0 + _ast = None + + +class Source(object): + """ a immutable object holding a source code fragment, + possibly deindenting it. + """ + _compilecounter = 0 + def __init__(self, *parts, **kwargs): + self.lines = lines = [] + de = kwargs.get('deindent', True) + rstrip = kwargs.get('rstrip', True) + for part in parts: + if not part: + partlines = [] + if isinstance(part, Source): + partlines = part.lines + elif isinstance(part, (tuple, list)): + partlines = [x.rstrip("\n") for x in part] + elif isinstance(part, py.builtin._basestring): + partlines = part.split('\n') + if rstrip: + while partlines: + if partlines[-1].strip(): + break + partlines.pop() + else: + partlines = getsource(part, deindent=de).lines + if de: + partlines = deindent(partlines) + lines.extend(partlines) + + def __eq__(self, other): + try: + return self.lines == other.lines + except AttributeError: + if isinstance(other, str): + return str(self) == other + return False + + def __getitem__(self, key): + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + return self.__getslice__(key.start, key.stop) + + def __len__(self): + return len(self.lines) + + def __getslice__(self, start, end): + newsource = Source() + newsource.lines = self.lines[start:end] + return newsource + + def strip(self): + """ return new source object with trailing + and leading blank lines removed. + """ + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end-1].strip(): + end -= 1 + source = Source() + source.lines[:] = self.lines[start:end] + return source + + def putaround(self, before='', after='', indent=' ' * 4): + """ return a copy of the source object with + 'before' and 'after' wrapped around it. + """ + before = Source(before) + after = Source(after) + newsource = Source() + lines = [ (indent + line) for line in self.lines] + newsource.lines = before.lines + lines + after.lines + return newsource + + def indent(self, indent=' ' * 4): + """ return a copy of the source object with + all lines indented by the given indent-string. + """ + newsource = Source() + newsource.lines = [(indent+line) for line in self.lines] + return newsource + + def getstatement(self, lineno, assertion=False): + """ return Source statement which contains the + given linenumber (counted from 0). + """ + start, end = self.getstatementrange(lineno, assertion) + return self[start:end] + + def getstatementrange(self, lineno, assertion=False): + """ return (start, end) tuple which spans the minimal + statement region which containing the given lineno. + """ + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + ast, start, end = getstatementrange_ast(lineno, self) + return start, end + + def deindent(self, offset=None): + """ return a new source object deindented by offset. + If offset is None then guess an indentation offset from + the first non-blank line. Subsequent lines which have a + lower indentation offset will be copied verbatim as + they are assumed to be part of multilines. + """ + # XXX maybe use the tokenizer to properly handle multiline + # strings etc.pp? + newsource = Source() + newsource.lines[:] = deindent(self.lines, offset) + return newsource + + def isparseable(self, deindent=True): + """ return True if source is parseable, heuristically + deindenting it by default. + """ + try: + import parser + except ImportError: + syntax_checker = lambda x: compile(x, 'asd', 'exec') + else: + syntax_checker = parser.suite + + if deindent: + source = str(self.deindent()) + else: + source = str(self) + try: + #compile(source+'\n', "x", "exec") + syntax_checker(source+'\n') + except KeyboardInterrupt: + raise + except Exception: + return False + else: + return True + + def __str__(self): + return "\n".join(self.lines) + + def compile(self, filename=None, mode='exec', + flag=generators.compiler_flag, + dont_inherit=0, _genframe=None): + """ return compiled code object. if filename is None + invent an artificial filename which displays + the source/line position of the caller frame. + """ + if not filename or py.path.local(filename).check(file=0): + if _genframe is None: + _genframe = sys._getframe(1) # the caller + fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno + base = "<%d-codegen " % self._compilecounter + self.__class__._compilecounter += 1 + if not filename: + filename = base + '%s:%d>' % (fn, lineno) + else: + filename = base + '%r %s:%d>' % (filename, fn, lineno) + source = "\n".join(self.lines) + '\n' + try: + co = cpy_compile(source, filename, mode, flag) + except SyntaxError: + ex = sys.exc_info()[1] + # re-represent syntax errors from parsing python strings + msglines = self.lines[:ex.lineno] + if ex.offset: + msglines.append(" "*ex.offset + '^') + msglines.append("(code was compiled probably from here: %s)" % filename) + newex = SyntaxError('\n'.join(msglines)) + newex.offset = ex.offset + newex.lineno = ex.lineno + newex.text = ex.text + raise newex + else: + if flag & _AST_FLAG: + return co + lines = [(x + "\n") for x in self.lines] + py.std.linecache.cache[filename] = (1, None, lines, filename) + return co + +# +# public API shortcut functions +# + +def compile_(source, filename=None, mode='exec', flags= + generators.compiler_flag, dont_inherit=0): + """ compile the given source to a raw code object, + and maintain an internal cache which allows later + retrieval of the source code for the code object + and any recursively created code objects. + """ + if _ast is not None and isinstance(source, _ast.AST): + # XXX should Source support having AST? + return cpy_compile(source, filename, mode, flags, dont_inherit) + _genframe = sys._getframe(1) # the caller + s = Source(source) + co = s.compile(filename, mode, flags, _genframe=_genframe) + return co + + +def getfslineno(obj): + """ Return source location (path, lineno) for the given object. + If the source cannot be determined return ("", -1) + """ + try: + code = py.code.Code(obj) + except TypeError: + try: + fn = (py.std.inspect.getsourcefile(obj) or + py.std.inspect.getfile(obj)) + except TypeError: + return "", -1 + + fspath = fn and py.path.local(fn) or None + lineno = -1 + if fspath: + try: + _, lineno = findsource(obj) + except IOError: + pass + else: + fspath = code.path + lineno = code.firstlineno + assert isinstance(lineno, int) + return fspath, lineno + +# +# helper functions +# + +def findsource(obj): + try: + sourcelines, lineno = py.std.inspect.findsource(obj) + except py.builtin._sysex: + raise + except: + return None, -1 + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + return source, lineno + +def getsource(obj, **kwargs): + obj = py.code.getrawcode(obj) + try: + strsrc = inspect.getsource(obj) + except IndentationError: + strsrc = "\"Buggy python version consider upgrading, cannot get source\"" + assert isinstance(strsrc, str) + return Source(strsrc, **kwargs) + +def deindent(lines, offset=None): + if offset is None: + for line in lines: + line = line.expandtabs() + s = line.lstrip() + if s: + offset = len(line)-len(s) + break + else: + offset = 0 + if offset == 0: + return list(lines) + newlines = [] + def readline_generator(lines): + for line in lines: + yield line + '\n' + while True: + yield '' + + it = readline_generator(lines) + + try: + for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)): + if sline > len(lines): + break # End of input reached + if sline > len(newlines): + line = lines[sline - 1].expandtabs() + if line.lstrip() and line[:offset].isspace(): + line = line[offset:] # Deindent + newlines.append(line) + + for i in range(sline, eline): + # Don't deindent continuing lines of + # multiline tokens (i.e. multiline strings) + newlines.append(lines[i]) + except (IndentationError, tokenize.TokenError): + pass + # Add any lines we didn't see. E.g. if an exception was raised. + newlines.extend(lines[len(newlines):]) + return newlines + + +def get_statement_startend2(lineno, node): + import ast + # flatten all statements and except handlers into one lineno-list + # AST's line numbers start indexing at 1 + l = [] + for x in ast.walk(node): + if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler): + l.append(x.lineno - 1) + for name in "finalbody", "orelse": + val = getattr(x, name, None) + if val: + # treat the finally/orelse part as its own statement + l.append(val[0].lineno - 1 - 1) + l.sort() + insert_index = bisect_right(l, lineno) + start = l[insert_index - 1] + if insert_index >= len(l): + end = None + else: + end = l[insert_index] + return start, end + + +def getstatementrange_ast(lineno, source, assertion=False, astnode=None): + if astnode is None: + content = str(source) + if sys.version_info < (2,7): + content += "\n" + try: + astnode = compile(content, "source", "exec", 1024) # 1024 for AST + except ValueError: + start, end = getstatementrange_old(lineno, source, assertion) + return None, start, end + start, end = get_statement_startend2(lineno, astnode) + # we need to correct the end: + # - ast-parsing strips comments + # - there might be empty lines + # - we might have lesser indented code blocks at the end + if end is None: + end = len(source.lines) + + if end > start + 1: + # make sure we don't span differently indented code blocks + # by using the BlockFinder helper used which inspect.getsource() uses itself + block_finder = inspect.BlockFinder() + # if we start with an indented line, put blockfinder to "started" mode + block_finder.started = source.lines[start][0].isspace() + it = ((x + "\n") for x in source.lines[start:end]) + try: + for tok in tokenize.generate_tokens(lambda: next(it)): + block_finder.tokeneater(*tok) + except (inspect.EndOfBlock, IndentationError): + end = block_finder.last + start + except Exception: + pass + + # the end might still point to a comment or empty line, correct it + while end: + line = source.lines[end - 1].lstrip() + if line.startswith("#") or not line: + end -= 1 + else: + break + return astnode, start, end + + +def getstatementrange_old(lineno, source, assertion=False): + """ return (start, end) tuple which spans the minimal + statement region which containing the given lineno. + raise an IndexError if no such statementrange can be found. + """ + # XXX this logic is only used on python2.4 and below + # 1. find the start of the statement + from codeop import compile_command + for start in range(lineno, -1, -1): + if assertion: + line = source.lines[start] + # the following lines are not fully tested, change with care + if 'super' in line and 'self' in line and '__init__' in line: + raise IndexError("likely a subclass") + if "assert" not in line and "raise" not in line: + continue + trylines = source.lines[start:lineno+1] + # quick hack to prepare parsing an indented line with + # compile_command() (which errors on "return" outside defs) + trylines.insert(0, 'def xxx():') + trysource = '\n '.join(trylines) + # ^ space here + try: + compile_command(trysource) + except (SyntaxError, OverflowError, ValueError): + continue + + # 2. find the end of the statement + for end in range(lineno+1, len(source)+1): + trysource = source[start:end] + if trysource.isparseable(): + return start, end + raise SyntaxError("no valid source range around line %d " % (lineno,)) + + diff --git a/lib/spack/external/py/_error.py b/lib/spack/external/py/_error.py new file mode 100644 index 0000000000..8ca339beba --- /dev/null +++ b/lib/spack/external/py/_error.py @@ -0,0 +1,89 @@ +""" +create errno-specific classes for IO or os calls. + +""" +import sys, os, errno + +class Error(EnvironmentError): + def __repr__(self): + return "%s.%s %r: %s " %(self.__class__.__module__, + self.__class__.__name__, + self.__class__.__doc__, + " ".join(map(str, self.args)), + #repr(self.args) + ) + + def __str__(self): + s = "[%s]: %s" %(self.__class__.__doc__, + " ".join(map(str, self.args)), + ) + return s + +_winerrnomap = { + 2: errno.ENOENT, + 3: errno.ENOENT, + 17: errno.EEXIST, + 18: errno.EXDEV, + 13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable + 22: errno.ENOTDIR, + 20: errno.ENOTDIR, + 267: errno.ENOTDIR, + 5: errno.EACCES, # anything better? +} + +class ErrorMaker(object): + """ lazily provides Exception classes for each possible POSIX errno + (as defined per the 'errno' module). All such instances + subclass EnvironmentError. + """ + Error = Error + _errno2class = {} + + def __getattr__(self, name): + if name[0] == "_": + raise AttributeError(name) + eno = getattr(errno, name) + cls = self._geterrnoclass(eno) + setattr(self, name, cls) + return cls + + def _geterrnoclass(self, eno): + try: + return self._errno2class[eno] + except KeyError: + clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,)) + errorcls = type(Error)(clsname, (Error,), + {'__module__':'py.error', + '__doc__': os.strerror(eno)}) + self._errno2class[eno] = errorcls + return errorcls + + def checked_call(self, func, *args, **kwargs): + """ call a function and raise an errno-exception if applicable. """ + __tracebackhide__ = True + try: + return func(*args, **kwargs) + except self.Error: + raise + except (OSError, EnvironmentError): + cls, value, tb = sys.exc_info() + if not hasattr(value, 'errno'): + raise + __tracebackhide__ = False + errno = value.errno + try: + if not isinstance(value, WindowsError): + raise NameError + except NameError: + # we are not on Windows, or we got a proper OSError + cls = self._geterrnoclass(errno) + else: + try: + cls = self._geterrnoclass(_winerrnomap[errno]) + except KeyError: + raise value + raise cls("%s%r" % (func.__name__, args)) + __tracebackhide__ = True + + +error = ErrorMaker() diff --git a/lib/spack/external/py/_iniconfig.py b/lib/spack/external/py/_iniconfig.py new file mode 100644 index 0000000000..92b50bd853 --- /dev/null +++ b/lib/spack/external/py/_iniconfig.py @@ -0,0 +1,162 @@ +""" brain-dead simple parser for ini-style files. +(C) Ronny Pfannschmidt, Holger Krekel -- MIT licensed +""" +__version__ = "0.2.dev2" + +__all__ = ['IniConfig', 'ParseError'] + +COMMENTCHARS = "#;" + +class ParseError(Exception): + def __init__(self, path, lineno, msg): + Exception.__init__(self, path, lineno, msg) + self.path = path + self.lineno = lineno + self.msg = msg + + def __str__(self): + return "%s:%s: %s" %(self.path, self.lineno+1, self.msg) + +class SectionWrapper(object): + def __init__(self, config, name): + self.config = config + self.name = name + + def lineof(self, name): + return self.config.lineof(self.name, name) + + def get(self, key, default=None, convert=str): + return self.config.get(self.name, key, convert=convert, default=default) + + def __getitem__(self, key): + return self.config.sections[self.name][key] + + def __iter__(self): + section = self.config.sections.get(self.name, []) + def lineof(key): + return self.config.lineof(self.name, key) + for name in sorted(section, key=lineof): + yield name + + def items(self): + for name in self: + yield name, self[name] + + +class IniConfig(object): + def __init__(self, path, data=None): + self.path = str(path) # convenience + if data is None: + f = open(self.path) + try: + tokens = self._parse(iter(f)) + finally: + f.close() + else: + tokens = self._parse(data.splitlines(True)) + + self._sources = {} + self.sections = {} + + for lineno, section, name, value in tokens: + if section is None: + self._raise(lineno, 'no section header defined') + self._sources[section, name] = lineno + if name is None: + if section in self.sections: + self._raise(lineno, 'duplicate section %r'%(section, )) + self.sections[section] = {} + else: + if name in self.sections[section]: + self._raise(lineno, 'duplicate name %r'%(name, )) + self.sections[section][name] = value + + def _raise(self, lineno, msg): + raise ParseError(self.path, lineno, msg) + + def _parse(self, line_iter): + result = [] + section = None + for lineno, line in enumerate(line_iter): + name, data = self._parseline(line, lineno) + # new value + if name is not None and data is not None: + result.append((lineno, section, name, data)) + # new section + elif name is not None and data is None: + if not name: + self._raise(lineno, 'empty section name') + section = name + result.append((lineno, section, None, None)) + # continuation + elif name is None and data is not None: + if not result: + self._raise(lineno, 'unexpected value continuation') + last = result.pop() + last_name, last_data = last[-2:] + if last_name is None: + self._raise(lineno, 'unexpected value continuation') + + if last_data: + data = '%s\n%s' % (last_data, data) + result.append(last[:-1] + (data,)) + return result + + def _parseline(self, line, lineno): + # blank lines + if iscommentline(line): + line = "" + else: + line = line.rstrip() + if not line: + return None, None + # section + if line[0] == '[': + realline = line + for c in COMMENTCHARS: + line = line.split(c)[0].rstrip() + if line[-1] == "]": + return line[1:-1], None + return None, realline.strip() + # value + elif not line[0].isspace(): + try: + name, value = line.split('=', 1) + if ":" in name: + raise ValueError() + except ValueError: + try: + name, value = line.split(":", 1) + except ValueError: + self._raise(lineno, 'unexpected line: %r' % line) + return name.strip(), value.strip() + # continuation + else: + return None, line.strip() + + def lineof(self, section, name=None): + lineno = self._sources.get((section, name)) + if lineno is not None: + return lineno + 1 + + def get(self, section, name, default=None, convert=str): + try: + return convert(self.sections[section][name]) + except KeyError: + return default + + def __getitem__(self, name): + if name not in self.sections: + raise KeyError(name) + return SectionWrapper(self, name) + + def __iter__(self): + for name in sorted(self.sections, key=self.lineof): + yield SectionWrapper(self, name) + + def __contains__(self, arg): + return arg in self.sections + +def iscommentline(line): + c = line.lstrip()[:1] + return c in COMMENTCHARS diff --git a/lib/spack/external/py/_io/__init__.py b/lib/spack/external/py/_io/__init__.py new file mode 100644 index 0000000000..835f01f3ab --- /dev/null +++ b/lib/spack/external/py/_io/__init__.py @@ -0,0 +1 @@ +""" input/output helping """ diff --git a/lib/spack/external/py/_io/capture.py b/lib/spack/external/py/_io/capture.py new file mode 100644 index 0000000000..bc157ed978 --- /dev/null +++ b/lib/spack/external/py/_io/capture.py @@ -0,0 +1,371 @@ +import os +import sys +import py +import tempfile + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +if sys.version_info < (3,0): + class TextIO(StringIO): + def write(self, data): + if not isinstance(data, unicode): + data = unicode(data, getattr(self, '_encoding', 'UTF-8'), 'replace') + StringIO.write(self, data) +else: + TextIO = StringIO + +try: + from io import BytesIO +except ImportError: + class BytesIO(StringIO): + def write(self, data): + if isinstance(data, unicode): + raise TypeError("not a byte value: %r" %(data,)) + StringIO.write(self, data) + +patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None, now=True, patchsys=False): + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is + specified a tempfile.Tempfile() will be opened + in text mode. + """ + self.targetfd = targetfd + if tmpfile is None and targetfd != 0: + f = tempfile.TemporaryFile('wb+') + tmpfile = dupfile(f, encoding="UTF-8") + f.close() + self.tmpfile = tmpfile + self._savefd = os.dup(self.targetfd) + if patchsys: + self._oldsys = getattr(sys, patchsysdict[targetfd]) + if now: + self.start() + + def start(self): + try: + os.fstat(self._savefd) + except OSError: + raise ValueError("saved filedescriptor not valid, " + "did you call start() twice?") + if self.targetfd == 0 and not self.tmpfile: + fd = os.open(devnullpath, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], DontReadFromInput()) + else: + os.dup2(self.tmpfile.fileno(), self.targetfd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self.tmpfile) + + def done(self): + """ unpatch and clean up, returns the self.tmpfile (file object) + """ + os.dup2(self._savefd, self.targetfd) + os.close(self._savefd) + if self.targetfd != 0: + self.tmpfile.seek(0) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self._oldsys) + return self.tmpfile + + def writeorg(self, data): + """ write a string to the original file descriptor + """ + tempfp = tempfile.TemporaryFile() + try: + os.dup2(self._savefd, tempfp.fileno()) + tempfp.write(data) + finally: + tempfp.close() + + +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): + """ return a new open file object that's a duplicate of f + + mode is duplicated if not given, 'buffering' controls + buffer size (defaulting to no buffering) and 'raising' + defines whether an exception is raised when an incompatible + file object is passed in (if raising is False, the file + object itself will be returned) + """ + try: + fd = f.fileno() + mode = mode or f.mode + except AttributeError: + if raising: + raise + return f + newfd = os.dup(fd) + if sys.version_info >= (3,0): + if encoding is not None: + mode = mode.replace("b", "") + buffering = True + return os.fdopen(newfd, mode, buffering, encoding, closefd=True) + else: + f = os.fdopen(newfd, mode, buffering) + if encoding is not None: + return EncodedFile(f, encoding) + return f + +class EncodedFile(object): + def __init__(self, _stream, encoding): + self._stream = _stream + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding) + elif isinstance(obj, str): + pass + else: + obj = str(obj) + self._stream.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(self._stream, name) + +class Capture(object): + def call(cls, func, *args, **kwargs): + """ return a (res, out, err) tuple where + out and err represent the output/error output + during function execution. + call the given function with args/kwargs + and capture output/error during its execution. + """ + so = cls() + try: + res = func(*args, **kwargs) + finally: + out, err = so.reset() + return res, out, err + call = classmethod(call) + + def reset(self): + """ reset sys.stdout/stderr and return captured output as strings. """ + if hasattr(self, '_reset'): + raise ValueError("was already reset") + self._reset = True + outfile, errfile = self.done(save=False) + out, err = "", "" + if outfile and not outfile.closed: + out = outfile.read() + outfile.close() + if errfile and errfile != outfile and not errfile.closed: + err = errfile.read() + errfile.close() + return out, err + + def suspend(self): + """ return current snapshot captures, memorize tempfiles. """ + outerr = self.readouterr() + outfile, errfile = self.done() + return outerr + + +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 + and may connect a NULL file to FD0 (and prevent + reads from sys.stdin). If any of the 0,1,2 file descriptors + is invalid it will not be captured. + """ + def __init__(self, out=True, err=True, mixed=False, + in_=True, patchsys=True, now=True): + self._options = { + "out": out, + "err": err, + "mixed": mixed, + "in_": in_, + "patchsys": patchsys, + "now": now, + } + self._save() + if now: + self.startall() + + def _save(self): + in_ = self._options['in_'] + out = self._options['out'] + err = self._options['err'] + mixed = self._options['mixed'] + patchsys = self._options['patchsys'] + if in_: + try: + self.in_ = FDCapture(0, tmpfile=None, now=False, + patchsys=patchsys) + except OSError: + pass + if out: + tmpfile = None + if hasattr(out, 'write'): + tmpfile = out + try: + self.out = FDCapture(1, tmpfile=tmpfile, + now=False, patchsys=patchsys) + self._options['out'] = self.out.tmpfile + except OSError: + pass + if err: + if out and mixed: + tmpfile = self.out.tmpfile + elif hasattr(err, 'write'): + tmpfile = err + else: + tmpfile = None + try: + self.err = FDCapture(2, tmpfile=tmpfile, + now=False, patchsys=patchsys) + self._options['err'] = self.err.tmpfile + except OSError: + pass + + def startall(self): + if hasattr(self, 'in_'): + self.in_.start() + if hasattr(self, 'out'): + self.out.start() + if hasattr(self, 'err'): + self.err.start() + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if hasattr(self, 'out') and not self.out.tmpfile.closed: + outfile = self.out.done() + if hasattr(self, 'err') and not self.err.tmpfile.closed: + errfile = self.err.done() + if hasattr(self, 'in_'): + tmpfile = self.in_.done() + if save: + self._save() + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + if hasattr(self, "out"): + out = self._readsnapshot(self.out.tmpfile) + else: + out = "" + if hasattr(self, "err"): + err = self._readsnapshot(self.err.tmpfile) + else: + err = "" + return [out, err] + + def _readsnapshot(self, f): + f.seek(0) + res = f.read() + enc = getattr(f, "encoding", None) + if enc: + res = py.builtin._totext(res, enc, "replace") + f.truncate(0) + f.seek(0) + return res + + +class StdCapture(Capture): + """ This class allows to capture writes to sys.stdout|stderr "in-memory" + and will raise errors on tries to read from sys.stdin. It only + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). + """ + def __init__(self, out=True, err=True, in_=True, mixed=False, now=True): + self._oldout = sys.stdout + self._olderr = sys.stderr + self._oldin = sys.stdin + if out and not hasattr(out, 'file'): + out = TextIO() + self.out = out + if err: + if mixed: + err = out + elif not hasattr(err, 'write'): + err = TextIO() + self.err = err + self.in_ = in_ + if now: + self.startall() + + def startall(self): + if self.out: + sys.stdout = self.out + if self.err: + sys.stderr = self.err + if self.in_: + sys.stdin = self.in_ = DontReadFromInput() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if self.out and not self.out.closed: + sys.stdout = self._oldout + outfile = self.out + outfile.seek(0) + if self.err and not self.err.closed: + sys.stderr = self._olderr + errfile = self.err + errfile.seek(0) + if self.in_: + sys.stdin = self._oldin + return outfile, errfile + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = err = "" + if self.out: + out = self.out.getvalue() + self.out.truncate(0) + self.out.seek(0) + if self.err: + err = self.err.getvalue() + self.err.truncate(0) + self.err.seek(0) + return out, err + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + def isatty(self): + return False + def close(self): + pass + +try: + devnullpath = os.devnull +except AttributeError: + if os.name == 'nt': + devnullpath = 'NUL' + else: + devnullpath = '/dev/null' diff --git a/lib/spack/external/py/_io/saferepr.py b/lib/spack/external/py/_io/saferepr.py new file mode 100644 index 0000000000..8518290efd --- /dev/null +++ b/lib/spack/external/py/_io/saferepr.py @@ -0,0 +1,71 @@ +import py +import sys + +builtin_repr = repr + +reprlib = py.builtin._tryimport('repr', 'reprlib') + +class SafeRepr(reprlib.Repr): + """ subclass of repr.Repr that limits the resulting size of repr() + and includes information on exceptions raised during the call. + """ + def repr(self, x): + return self._callhelper(reprlib.Repr.repr, self, x) + + def repr_unicode(self, x, level): + # Strictly speaking wrong on narrow builds + def repr(u): + if "'" not in u: + return py.builtin._totext("'%s'") % u + elif '"' not in u: + return py.builtin._totext('"%s"') % u + else: + return py.builtin._totext("'%s'") % u.replace("'", r"\'") + s = repr(x[:self.maxstring]) + if len(s) > self.maxstring: + i = max(0, (self.maxstring-3)//2) + j = max(0, self.maxstring-3-i) + s = repr(x[:i] + x[len(x)-j:]) + s = s[:i] + '...' + s[len(s)-j:] + return s + + def repr_instance(self, x, level): + return self._callhelper(builtin_repr, x) + + def _callhelper(self, call, x, *args): + try: + # Try the vanilla repr and make sure that the result is a string + s = call(x, *args) + except py.builtin._sysex: + raise + except: + cls, e, tb = sys.exc_info() + exc_name = getattr(cls, '__name__', 'unknown') + try: + exc_info = str(e) + except py.builtin._sysex: + raise + except: + exc_info = 'unknown' + return '<[%s("%s") raised in repr()] %s object at 0x%x>' % ( + exc_name, exc_info, x.__class__.__name__, id(x)) + else: + if len(s) > self.maxsize: + i = max(0, (self.maxsize-3)//2) + j = max(0, self.maxsize-3-i) + s = s[:i] + '...' + s[len(s)-j:] + return s + +def saferepr(obj, maxsize=240): + """ return a size-limited safe repr-string for the given object. + Failing __repr__ functions of user instances will be represented + with a short exception info and 'saferepr' generally takes + care to never raise exceptions itself. This function is a wrapper + around the Repr/reprlib functionality of the standard 2.6 lib. + """ + # review exception handling + srepr = SafeRepr() + srepr.maxstring = maxsize + srepr.maxsize = maxsize + srepr.maxother = 160 + return srepr.repr(obj) diff --git a/lib/spack/external/py/_io/terminalwriter.py b/lib/spack/external/py/_io/terminalwriter.py new file mode 100644 index 0000000000..390e8ca7b9 --- /dev/null +++ b/lib/spack/external/py/_io/terminalwriter.py @@ -0,0 +1,357 @@ +""" + +Helper functions for writing to terminals and files. + +""" + + +import sys, os +import py +py3k = sys.version_info[0] >= 3 +from py.builtin import text, bytes + +win32_and_ctypes = False +colorama = None +if sys.platform == "win32": + try: + import colorama + except ImportError: + try: + import ctypes + win32_and_ctypes = True + except ImportError: + pass + + +def _getdimensions(): + import termios,fcntl,struct + call = fcntl.ioctl(1,termios.TIOCGWINSZ,"\000"*8) + height,width = struct.unpack( "hhhh", call ) [:2] + return height, width + + +def get_terminal_width(): + height = width = 0 + try: + height, width = _getdimensions() + except py.builtin._sysex: + raise + except: + # pass to fallback below + pass + + if width == 0: + # FALLBACK: + # * some exception happened + # * or this is emacs terminal which reports (0,0) + width = int(os.environ.get('COLUMNS', 80)) + + # XXX the windows getdimensions may be bogus, let's sanify a bit + if width < 40: + width = 80 + return width + +terminal_width = get_terminal_width() + +# XXX unify with _escaped func below +def ansi_print(text, esc, file=None, newline=True, flush=False): + if file is None: + file = sys.stderr + text = text.rstrip() + if esc and not isinstance(esc, tuple): + esc = (esc,) + if esc and sys.platform != "win32" and file.isatty(): + text = (''.join(['\x1b[%sm' % cod for cod in esc]) + + text + + '\x1b[0m') # ANSI color code "reset" + if newline: + text += '\n' + + if esc and win32_and_ctypes and file.isatty(): + if 1 in esc: + bold = True + esc = tuple([x for x in esc if x != 1]) + else: + bold = False + esctable = {() : FOREGROUND_WHITE, # normal + (31,): FOREGROUND_RED, # red + (32,): FOREGROUND_GREEN, # green + (33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow + (34,): FOREGROUND_BLUE, # blue + (35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple + (36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan + (37,): FOREGROUND_WHITE, # white + (39,): FOREGROUND_WHITE, # reset + } + attr = esctable.get(esc, FOREGROUND_WHITE) + if bold: + attr |= FOREGROUND_INTENSITY + STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 + if file is sys.stderr: + handle = GetStdHandle(STD_ERROR_HANDLE) + else: + handle = GetStdHandle(STD_OUTPUT_HANDLE) + oldcolors = GetConsoleInfo(handle).wAttributes + attr |= (oldcolors & 0x0f0) + SetConsoleTextAttribute(handle, attr) + while len(text) > 32768: + file.write(text[:32768]) + text = text[32768:] + if text: + file.write(text) + SetConsoleTextAttribute(handle, oldcolors) + else: + file.write(text) + + if flush: + file.flush() + +def should_do_markup(file): + if os.environ.get('PY_COLORS') == '1': + return True + if os.environ.get('PY_COLORS') == '0': + return False + return hasattr(file, 'isatty') and file.isatty() \ + and os.environ.get('TERM') != 'dumb' \ + and not (sys.platform.startswith('java') and os._name == 'nt') + +class TerminalWriter(object): + _esctable = dict(black=30, red=31, green=32, yellow=33, + blue=34, purple=35, cyan=36, white=37, + Black=40, Red=41, Green=42, Yellow=43, + Blue=44, Purple=45, Cyan=46, White=47, + bold=1, light=2, blink=5, invert=7) + + # XXX deprecate stringio argument + def __init__(self, file=None, stringio=False, encoding=None): + if file is None: + if stringio: + self.stringio = file = py.io.TextIO() + else: + file = py.std.sys.stdout + elif py.builtin.callable(file) and not ( + hasattr(file, "write") and hasattr(file, "flush")): + file = WriteFile(file, encoding=encoding) + if hasattr(file, "isatty") and file.isatty() and colorama: + file = colorama.AnsiToWin32(file).stream + self.encoding = encoding or getattr(file, 'encoding', "utf-8") + self._file = file + self.hasmarkup = should_do_markup(file) + self._lastlen = 0 + + @property + def fullwidth(self): + if hasattr(self, '_terminal_width'): + return self._terminal_width + return get_terminal_width() + + @fullwidth.setter + def fullwidth(self, value): + self._terminal_width = value + + def _escaped(self, text, esc): + if esc and self.hasmarkup: + text = (''.join(['\x1b[%sm' % cod for cod in esc]) + + text +'\x1b[0m') + return text + + def markup(self, text, **kw): + esc = [] + for name in kw: + if name not in self._esctable: + raise ValueError("unknown markup: %r" %(name,)) + if kw[name]: + esc.append(self._esctable[name]) + return self._escaped(text, tuple(esc)) + + def sep(self, sepchar, title=None, fullwidth=None, **kw): + if fullwidth is None: + fullwidth = self.fullwidth + # the goal is to have the line be as long as possible + # under the condition that len(line) <= fullwidth + if sys.platform == "win32": + # if we print in the last column on windows we are on a + # new line but there is no way to verify/neutralize this + # (we may not know the exact line width) + # so let's be defensive to avoid empty lines in the output + fullwidth -= 1 + if title is not None: + # we want 2 + 2*len(fill) + len(title) <= fullwidth + # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth + # 2*len(sepchar)*N <= fullwidth - len(title) - 2 + # N <= (fullwidth - len(title) - 2) // (2*len(sepchar)) + N = (fullwidth - len(title) - 2) // (2*len(sepchar)) + fill = sepchar * N + line = "%s %s %s" % (fill, title, fill) + else: + # we want len(sepchar)*N <= fullwidth + # i.e. N <= fullwidth // len(sepchar) + line = sepchar * (fullwidth // len(sepchar)) + # in some situations there is room for an extra sepchar at the right, + # in particular if we consider that with a sepchar like "_ " the + # trailing space is not important at the end of the line + if len(line) + len(sepchar.rstrip()) <= fullwidth: + line += sepchar.rstrip() + + self.line(line, **kw) + + def write(self, msg, **kw): + if msg: + if not isinstance(msg, (bytes, text)): + msg = text(msg) + if self.hasmarkup and kw: + markupmsg = self.markup(msg, **kw) + else: + markupmsg = msg + write_out(self._file, markupmsg) + + def line(self, s='', **kw): + self.write(s, **kw) + self._checkfill(s) + self.write('\n') + + def reline(self, line, **kw): + if not self.hasmarkup: + raise ValueError("cannot use rewrite-line without terminal") + self.write(line, **kw) + self._checkfill(line) + self.write('\r') + self._lastlen = len(line) + + def _checkfill(self, line): + diff2last = self._lastlen - len(line) + if diff2last > 0: + self.write(" " * diff2last) + +class Win32ConsoleWriter(TerminalWriter): + def write(self, msg, **kw): + if msg: + if not isinstance(msg, (bytes, text)): + msg = text(msg) + oldcolors = None + if self.hasmarkup and kw: + handle = GetStdHandle(STD_OUTPUT_HANDLE) + oldcolors = GetConsoleInfo(handle).wAttributes + default_bg = oldcolors & 0x00F0 + attr = default_bg + if kw.pop('bold', False): + attr |= FOREGROUND_INTENSITY + + if kw.pop('red', False): + attr |= FOREGROUND_RED + elif kw.pop('blue', False): + attr |= FOREGROUND_BLUE + elif kw.pop('green', False): + attr |= FOREGROUND_GREEN + elif kw.pop('yellow', False): + attr |= FOREGROUND_GREEN|FOREGROUND_RED + else: + attr |= oldcolors & 0x0007 + + SetConsoleTextAttribute(handle, attr) + write_out(self._file, msg) + if oldcolors: + SetConsoleTextAttribute(handle, oldcolors) + +class WriteFile(object): + def __init__(self, writemethod, encoding=None): + self.encoding = encoding + self._writemethod = writemethod + + def write(self, data): + if self.encoding: + data = data.encode(self.encoding, "replace") + self._writemethod(data) + + def flush(self): + return + + +if win32_and_ctypes: + TerminalWriter = Win32ConsoleWriter + import ctypes + from ctypes import wintypes + + # ctypes access to the Windows console + STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 + FOREGROUND_BLACK = 0x0000 # black text + FOREGROUND_BLUE = 0x0001 # text color contains blue. + FOREGROUND_GREEN = 0x0002 # text color contains green. + FOREGROUND_RED = 0x0004 # text color contains red. + FOREGROUND_WHITE = 0x0007 + FOREGROUND_INTENSITY = 0x0008 # text color is intensified. + BACKGROUND_BLACK = 0x0000 # background color black + BACKGROUND_BLUE = 0x0010 # background color contains blue. + BACKGROUND_GREEN = 0x0020 # background color contains green. + BACKGROUND_RED = 0x0040 # background color contains red. + BACKGROUND_WHITE = 0x0070 + BACKGROUND_INTENSITY = 0x0080 # background color is intensified. + + SHORT = ctypes.c_short + class COORD(ctypes.Structure): + _fields_ = [('X', SHORT), + ('Y', SHORT)] + class SMALL_RECT(ctypes.Structure): + _fields_ = [('Left', SHORT), + ('Top', SHORT), + ('Right', SHORT), + ('Bottom', SHORT)] + class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): + _fields_ = [('dwSize', COORD), + ('dwCursorPosition', COORD), + ('wAttributes', wintypes.WORD), + ('srWindow', SMALL_RECT), + ('dwMaximumWindowSize', COORD)] + + _GetStdHandle = ctypes.windll.kernel32.GetStdHandle + _GetStdHandle.argtypes = [wintypes.DWORD] + _GetStdHandle.restype = wintypes.HANDLE + def GetStdHandle(kind): + return _GetStdHandle(kind) + + SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute + SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD] + SetConsoleTextAttribute.restype = wintypes.BOOL + + _GetConsoleScreenBufferInfo = \ + ctypes.windll.kernel32.GetConsoleScreenBufferInfo + _GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE, + ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)] + _GetConsoleScreenBufferInfo.restype = wintypes.BOOL + def GetConsoleInfo(handle): + info = CONSOLE_SCREEN_BUFFER_INFO() + _GetConsoleScreenBufferInfo(handle, ctypes.byref(info)) + return info + + def _getdimensions(): + handle = GetStdHandle(STD_OUTPUT_HANDLE) + info = GetConsoleInfo(handle) + # Substract one from the width, otherwise the cursor wraps + # and the ending \n causes an empty line to display. + return info.dwSize.Y, info.dwSize.X - 1 + +def write_out(fil, msg): + # XXX sometimes "msg" is of type bytes, sometimes text which + # complicates the situation. Should we try to enforce unicode? + try: + # on py27 and above writing out to sys.stdout with an encoding + # should usually work for unicode messages (if the encoding is + # capable of it) + fil.write(msg) + except UnicodeEncodeError: + # on py26 it might not work because stdout expects bytes + if fil.encoding: + try: + fil.write(msg.encode(fil.encoding)) + except UnicodeEncodeError: + # it might still fail if the encoding is not capable + pass + else: + fil.flush() + return + # fallback: escape all unicode characters + msg = msg.encode("unicode-escape").decode("ascii") + fil.write(msg) + fil.flush() diff --git a/lib/spack/external/py/_log/__init__.py b/lib/spack/external/py/_log/__init__.py new file mode 100644 index 0000000000..fad62e960d --- /dev/null +++ b/lib/spack/external/py/_log/__init__.py @@ -0,0 +1,2 @@ +""" logging API ('producers' and 'consumers' connected via keywords) """ + diff --git a/lib/spack/external/py/_log/log.py b/lib/spack/external/py/_log/log.py new file mode 100644 index 0000000000..ce47e8c754 --- /dev/null +++ b/lib/spack/external/py/_log/log.py @@ -0,0 +1,186 @@ +""" +basic logging functionality based on a producer/consumer scheme. + +XXX implement this API: (maybe put it into slogger.py?) + + log = Logger( + info=py.log.STDOUT, + debug=py.log.STDOUT, + command=None) + log.info("hello", "world") + log.command("hello", "world") + + log = Logger(info=Logger(something=...), + debug=py.log.STDOUT, + command=None) +""" +import py, sys + +class Message(object): + def __init__(self, keywords, args): + self.keywords = keywords + self.args = args + + def content(self): + return " ".join(map(str, self.args)) + + def prefix(self): + return "[%s] " % (":".join(self.keywords)) + + def __str__(self): + return self.prefix() + self.content() + + +class Producer(object): + """ (deprecated) Log producer API which sends messages to be logged + to a 'consumer' object, which then prints them to stdout, + stderr, files, etc. Used extensively by PyPy-1.1. + """ + + Message = Message # to allow later customization + keywords2consumer = {} + + def __init__(self, keywords, keywordmapper=None, **kw): + if hasattr(keywords, 'split'): + keywords = tuple(keywords.split()) + self._keywords = keywords + if keywordmapper is None: + keywordmapper = default_keywordmapper + self._keywordmapper = keywordmapper + + def __repr__(self): + return "" % ":".join(self._keywords) + + def __getattr__(self, name): + if '_' in name: + raise AttributeError(name) + producer = self.__class__(self._keywords + (name,)) + setattr(self, name, producer) + return producer + + def __call__(self, *args): + """ write a message to the appropriate consumer(s) """ + func = self._keywordmapper.getconsumer(self._keywords) + if func is not None: + func(self.Message(self._keywords, args)) + +class KeywordMapper: + def __init__(self): + self.keywords2consumer = {} + + def getstate(self): + return self.keywords2consumer.copy() + def setstate(self, state): + self.keywords2consumer.clear() + self.keywords2consumer.update(state) + + def getconsumer(self, keywords): + """ return a consumer matching the given keywords. + + tries to find the most suitable consumer by walking, starting from + the back, the list of keywords, the first consumer matching a + keyword is returned (falling back to py.log.default) + """ + for i in range(len(keywords), 0, -1): + try: + return self.keywords2consumer[keywords[:i]] + except KeyError: + continue + return self.keywords2consumer.get('default', default_consumer) + + def setconsumer(self, keywords, consumer): + """ set a consumer for a set of keywords. """ + # normalize to tuples + if isinstance(keywords, str): + keywords = tuple(filter(None, keywords.split())) + elif hasattr(keywords, '_keywords'): + keywords = keywords._keywords + elif not isinstance(keywords, tuple): + raise TypeError("key %r is not a string or tuple" % (keywords,)) + if consumer is not None and not py.builtin.callable(consumer): + if not hasattr(consumer, 'write'): + raise TypeError( + "%r should be None, callable or file-like" % (consumer,)) + consumer = File(consumer) + self.keywords2consumer[keywords] = consumer + +def default_consumer(msg): + """ the default consumer, prints the message to stdout (using 'print') """ + sys.stderr.write(str(msg)+"\n") + +default_keywordmapper = KeywordMapper() + +def setconsumer(keywords, consumer): + default_keywordmapper.setconsumer(keywords, consumer) + +def setstate(state): + default_keywordmapper.setstate(state) +def getstate(): + return default_keywordmapper.getstate() + +# +# Consumers +# + +class File(object): + """ log consumer wrapping a file(-like) object """ + def __init__(self, f): + assert hasattr(f, 'write') + #assert isinstance(f, file) or not hasattr(f, 'open') + self._file = f + + def __call__(self, msg): + """ write a message to the log """ + self._file.write(str(msg) + "\n") + if hasattr(self._file, 'flush'): + self._file.flush() + +class Path(object): + """ log consumer that opens and writes to a Path """ + def __init__(self, filename, append=False, + delayed_create=False, buffering=False): + self._append = append + self._filename = str(filename) + self._buffering = buffering + if not delayed_create: + self._openfile() + + def _openfile(self): + mode = self._append and 'a' or 'w' + f = open(self._filename, mode) + self._file = f + + def __call__(self, msg): + """ write a message to the log """ + if not hasattr(self, "_file"): + self._openfile() + self._file.write(str(msg) + "\n") + if not self._buffering: + self._file.flush() + +def STDOUT(msg): + """ consumer that writes to sys.stdout """ + sys.stdout.write(str(msg)+"\n") + +def STDERR(msg): + """ consumer that writes to sys.stderr """ + sys.stderr.write(str(msg)+"\n") + +class Syslog: + """ consumer that writes to the syslog daemon """ + + def __init__(self, priority = None): + if priority is None: + priority = self.LOG_INFO + self.priority = priority + + def __call__(self, msg): + """ write a message to the log """ + py.std.syslog.syslog(self.priority, str(msg)) + +for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split(): + _prio = "LOG_" + _prio + try: + setattr(Syslog, _prio, getattr(py.std.syslog, _prio)) + except AttributeError: + pass diff --git a/lib/spack/external/py/_log/warning.py b/lib/spack/external/py/_log/warning.py new file mode 100644 index 0000000000..722e31e910 --- /dev/null +++ b/lib/spack/external/py/_log/warning.py @@ -0,0 +1,76 @@ +import py, sys + +class DeprecationWarning(DeprecationWarning): + def __init__(self, msg, path, lineno): + self.msg = msg + self.path = path + self.lineno = lineno + def __repr__(self): + return "%s:%d: %s" %(self.path, self.lineno+1, self.msg) + def __str__(self): + return self.msg + +def _apiwarn(startversion, msg, stacklevel=2, function=None): + # below is mostly COPIED from python2.4/warnings.py's def warn() + # Get context information + if isinstance(stacklevel, str): + frame = sys._getframe(1) + level = 1 + found = frame.f_code.co_filename.find(stacklevel) != -1 + while frame: + co = frame.f_code + if co.co_filename.find(stacklevel) == -1: + if found: + stacklevel = level + break + else: + found = True + level += 1 + frame = frame.f_back + else: + stacklevel = 1 + msg = "%s (since version %s)" %(msg, startversion) + warn(msg, stacklevel=stacklevel+1, function=function) + +def warn(msg, stacklevel=1, function=None): + if function is not None: + filename = py.std.inspect.getfile(function) + lineno = py.code.getrawcode(function).co_firstlineno + else: + try: + caller = sys._getframe(stacklevel) + except ValueError: + globals = sys.__dict__ + lineno = 1 + else: + globals = caller.f_globals + lineno = caller.f_lineno + if '__name__' in globals: + module = globals['__name__'] + else: + module = "" + filename = globals.get('__file__') + if filename: + fnl = filename.lower() + if fnl.endswith(".pyc") or fnl.endswith(".pyo"): + filename = filename[:-1] + elif fnl.endswith("$py.class"): + filename = filename.replace('$py.class', '.py') + else: + if module == "__main__": + try: + filename = sys.argv[0] + except AttributeError: + # embedded interpreters don't have sys.argv, see bug #839151 + filename = '__main__' + if not filename: + filename = module + path = py.path.local(filename) + warning = DeprecationWarning(msg, path, lineno) + py.std.warnings.warn_explicit(warning, category=Warning, + filename=str(warning.path), + lineno=warning.lineno, + registry=py.std.warnings.__dict__.setdefault( + "__warningsregistry__", {}) + ) + diff --git a/lib/spack/external/py/_path/__init__.py b/lib/spack/external/py/_path/__init__.py new file mode 100644 index 0000000000..51f3246f80 --- /dev/null +++ b/lib/spack/external/py/_path/__init__.py @@ -0,0 +1 @@ +""" unified file system api """ diff --git a/lib/spack/external/py/_path/cacheutil.py b/lib/spack/external/py/_path/cacheutil.py new file mode 100644 index 0000000000..9922504750 --- /dev/null +++ b/lib/spack/external/py/_path/cacheutil.py @@ -0,0 +1,114 @@ +""" +This module contains multithread-safe cache implementations. + +All Caches have + + getorbuild(key, builder) + delentry(key) + +methods and allow configuration when instantiating the cache class. +""" +from time import time as gettime + +class BasicCache(object): + def __init__(self, maxentries=128): + self.maxentries = maxentries + self.prunenum = int(maxentries - maxentries/8) + self._dict = {} + + def clear(self): + self._dict.clear() + + def _getentry(self, key): + return self._dict[key] + + def _putentry(self, key, entry): + self._prunelowestweight() + self._dict[key] = entry + + def delentry(self, key, raising=False): + try: + del self._dict[key] + except KeyError: + if raising: + raise + + def getorbuild(self, key, builder): + try: + entry = self._getentry(key) + except KeyError: + entry = self._build(key, builder) + self._putentry(key, entry) + return entry.value + + def _prunelowestweight(self): + """ prune out entries with lowest weight. """ + numentries = len(self._dict) + if numentries >= self.maxentries: + # evict according to entry's weight + items = [(entry.weight, key) + for key, entry in self._dict.items()] + items.sort() + index = numentries - self.prunenum + if index > 0: + for weight, key in items[:index]: + # in MT situations the element might be gone + self.delentry(key, raising=False) + +class BuildcostAccessCache(BasicCache): + """ A BuildTime/Access-counting cache implementation. + the weight of a value is computed as the product of + + num-accesses-of-a-value * time-to-build-the-value + + The values with the least such weights are evicted + if the cache maxentries threshold is superceded. + For implementation flexibility more than one object + might be evicted at a time. + """ + # time function to use for measuring build-times + + def _build(self, key, builder): + start = gettime() + val = builder() + end = gettime() + return WeightedCountingEntry(val, end-start) + + +class WeightedCountingEntry(object): + def __init__(self, value, oneweight): + self._value = value + self.weight = self._oneweight = oneweight + + def value(self): + self.weight += self._oneweight + return self._value + value = property(value) + +class AgingCache(BasicCache): + """ This cache prunes out cache entries that are too old. + """ + def __init__(self, maxentries=128, maxseconds=10.0): + super(AgingCache, self).__init__(maxentries) + self.maxseconds = maxseconds + + def _getentry(self, key): + entry = self._dict[key] + if entry.isexpired(): + self.delentry(key) + raise KeyError(key) + return entry + + def _build(self, key, builder): + val = builder() + entry = AgingEntry(val, gettime() + self.maxseconds) + return entry + +class AgingEntry(object): + def __init__(self, value, expirationtime): + self.value = value + self.weight = expirationtime + + def isexpired(self): + t = gettime() + return t >= self.weight diff --git a/lib/spack/external/py/_path/common.py b/lib/spack/external/py/_path/common.py new file mode 100644 index 0000000000..bf42ed5092 --- /dev/null +++ b/lib/spack/external/py/_path/common.py @@ -0,0 +1,439 @@ +""" +""" +import os, sys, posixpath +import py + +# Moved from local.py. +iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt') + +try: + from os import fspath +except ImportError: + def fspath(path): + """ + Return the string representation of the path. + If str or bytes is passed in, it is returned unchanged. + This code comes from PEP 519, modified to support earlier versions of + python. + + This is required for python < 3.6. + """ + if isinstance(path, (py.builtin.text, py.builtin.bytes)): + return path + + # Work from the object's type to match method resolution of other magic + # methods. + path_type = type(path) + try: + return path_type.__fspath__(path) + except AttributeError: + if hasattr(path_type, '__fspath__'): + raise + try: + import pathlib + except ImportError: + pass + else: + if isinstance(path, pathlib.PurePath): + return py.builtin.text(path) + + raise TypeError("expected str, bytes or os.PathLike object, not " + + path_type.__name__) + +class Checkers: + _depend_on_existence = 'exists', 'link', 'dir', 'file' + + def __init__(self, path): + self.path = path + + def dir(self): + raise NotImplementedError + + def file(self): + raise NotImplementedError + + def dotfile(self): + return self.path.basename.startswith('.') + + def ext(self, arg): + if not arg.startswith('.'): + arg = '.' + arg + return self.path.ext == arg + + def exists(self): + raise NotImplementedError + + def basename(self, arg): + return self.path.basename == arg + + def basestarts(self, arg): + return self.path.basename.startswith(arg) + + def relto(self, arg): + return self.path.relto(arg) + + def fnmatch(self, arg): + return self.path.fnmatch(arg) + + def endswith(self, arg): + return str(self.path).endswith(arg) + + def _evaluate(self, kw): + for name, value in kw.items(): + invert = False + meth = None + try: + meth = getattr(self, name) + except AttributeError: + if name[:3] == 'not': + invert = True + try: + meth = getattr(self, name[3:]) + except AttributeError: + pass + if meth is None: + raise TypeError( + "no %r checker available for %r" % (name, self.path)) + try: + if py.code.getrawcode(meth).co_argcount > 1: + if (not meth(value)) ^ invert: + return False + else: + if bool(value) ^ bool(meth()) ^ invert: + return False + except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY): + # EBUSY feels not entirely correct, + # but its kind of necessary since ENOMEDIUM + # is not accessible in python + for name in self._depend_on_existence: + if name in kw: + if kw.get(name): + return False + name = 'not' + name + if name in kw: + if not kw.get(name): + return False + return True + +class NeverRaised(Exception): + pass + +class PathBase(object): + """ shared implementation for filesystem path objects.""" + Checkers = Checkers + + def __div__(self, other): + return self.join(fspath(other)) + __truediv__ = __div__ # py3k + + def basename(self): + """ basename part of path. """ + return self._getbyspec('basename')[0] + basename = property(basename, None, None, basename.__doc__) + + def dirname(self): + """ dirname part of path. """ + return self._getbyspec('dirname')[0] + dirname = property(dirname, None, None, dirname.__doc__) + + def purebasename(self): + """ pure base name of the path.""" + return self._getbyspec('purebasename')[0] + purebasename = property(purebasename, None, None, purebasename.__doc__) + + def ext(self): + """ extension of the path (including the '.').""" + return self._getbyspec('ext')[0] + ext = property(ext, None, None, ext.__doc__) + + def dirpath(self, *args, **kwargs): + """ return the directory path joined with any given path arguments. """ + return self.new(basename='').join(*args, **kwargs) + + def read_binary(self): + """ read and return a bytestring from reading the path. """ + with self.open('rb') as f: + return f.read() + + def read_text(self, encoding): + """ read and return a Unicode string from reading the path. """ + with self.open("r", encoding=encoding) as f: + return f.read() + + + def read(self, mode='r'): + """ read and return a bytestring from reading the path. """ + with self.open(mode) as f: + return f.read() + + def readlines(self, cr=1): + """ read and return a list of lines from the path. if cr is False, the +newline will be removed from the end of each line. """ + if not cr: + content = self.read('rU') + return content.split('\n') + else: + f = self.open('rU') + try: + return f.readlines() + finally: + f.close() + + def load(self): + """ (deprecated) return object unpickled from self.read() """ + f = self.open('rb') + try: + return py.error.checked_call(py.std.pickle.load, f) + finally: + f.close() + + def move(self, target): + """ move this path to target. """ + if target.relto(self): + raise py.error.EINVAL(target, + "cannot move path into a subdirectory of itself") + try: + self.rename(target) + except py.error.EXDEV: # invalid cross-device link + self.copy(target) + self.remove() + + def __repr__(self): + """ return a string representation of this path. """ + return repr(str(self)) + + def check(self, **kw): + """ check a path for existence and properties. + + Without arguments, return True if the path exists, otherwise False. + + valid checkers:: + + file=1 # is a file + file=0 # is not a file (may not even exist) + dir=1 # is a dir + link=1 # is a link + exists=1 # exists + + You can specify multiple checker definitions, for example:: + + path.check(file=1, link=1) # a link pointing to a file + """ + if not kw: + kw = {'exists' : 1} + return self.Checkers(self)._evaluate(kw) + + def fnmatch(self, pattern): + """return true if the basename/fullname matches the glob-'pattern'. + + valid pattern characters:: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + If the pattern contains a path-separator then the full path + is used for pattern matching and a '*' is prepended to the + pattern. + + if the pattern doesn't contain a path-separator the pattern + is only matched against the basename. + """ + return FNMatcher(pattern)(self) + + def relto(self, relpath): + """ return a string which is the relative part of the path + to the given 'relpath'. + """ + if not isinstance(relpath, (str, PathBase)): + raise TypeError("%r: not a string or path object" %(relpath,)) + strrelpath = str(relpath) + if strrelpath and strrelpath[-1] != self.sep: + strrelpath += self.sep + #assert strrelpath[-1] == self.sep + #assert strrelpath[-2] != self.sep + strself = self.strpath + if sys.platform == "win32" or getattr(os, '_name', None) == 'nt': + if os.path.normcase(strself).startswith( + os.path.normcase(strrelpath)): + return strself[len(strrelpath):] + elif strself.startswith(strrelpath): + return strself[len(strrelpath):] + return "" + + def ensure_dir(self, *args): + """ ensure the path joined with args is a directory. """ + return self.ensure(*args, **{"dir": True}) + + def bestrelpath(self, dest): + """ return a string which is a relative path from self + (assumed to be a directory) to dest such that + self.join(bestrelpath) == dest and if not such + path can be determined return dest. + """ + try: + if self == dest: + return os.curdir + base = self.common(dest) + if not base: # can be the case on windows + return str(dest) + self2base = self.relto(base) + reldest = dest.relto(base) + if self2base: + n = self2base.count(self.sep) + 1 + else: + n = 0 + l = [os.pardir] * n + if reldest: + l.append(reldest) + target = dest.sep.join(l) + return target + except AttributeError: + return str(dest) + + def exists(self): + return self.check() + + def isdir(self): + return self.check(dir=1) + + def isfile(self): + return self.check(file=1) + + def parts(self, reverse=False): + """ return a root-first list of all ancestor directories + plus the path itself. + """ + current = self + l = [self] + while 1: + last = current + current = current.dirpath() + if last == current: + break + l.append(current) + if not reverse: + l.reverse() + return l + + def common(self, other): + """ return the common part shared with the other path + or None if there is no common part. + """ + last = None + for x, y in zip(self.parts(), other.parts()): + if x != y: + return last + last = x + return last + + def __add__(self, other): + """ return new path object with 'other' added to the basename""" + return self.new(basename=self.basename+str(other)) + + def __cmp__(self, other): + """ return sort value (-1, 0, +1). """ + try: + return cmp(self.strpath, other.strpath) + except AttributeError: + return cmp(str(self), str(other)) # self.path, other.path) + + def __lt__(self, other): + try: + return self.strpath < other.strpath + except AttributeError: + return str(self) < str(other) + + def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False): + """ yields all paths below the current one + + fil is a filter (glob pattern or callable), if not matching the + path will not be yielded, defaulting to None (everything is + returned) + + rec is a filter (glob pattern or callable) that controls whether + a node is descended, defaulting to None + + ignore is an Exception class that is ignoredwhen calling dirlist() + on any of the paths (by default, all exceptions are reported) + + bf if True will cause a breadthfirst search instead of the + default depthfirst. Default: False + + sort if True will sort entries within each directory level. + """ + for x in Visitor(fil, rec, ignore, bf, sort).gen(self): + yield x + + def _sortlist(self, res, sort): + if sort: + if hasattr(sort, '__call__'): + res.sort(sort) + else: + res.sort() + + def samefile(self, other): + """ return True if other refers to the same stat object as self. """ + return self.strpath == str(other) + + def __fspath__(self): + return str(self) + +class Visitor: + def __init__(self, fil, rec, ignore, bf, sort): + if isinstance(fil, py.builtin._basestring): + fil = FNMatcher(fil) + if isinstance(rec, py.builtin._basestring): + self.rec = FNMatcher(rec) + elif not hasattr(rec, '__call__') and rec: + self.rec = lambda path: True + else: + self.rec = rec + self.fil = fil + self.ignore = ignore + self.breadthfirst = bf + self.optsort = sort and sorted or (lambda x: x) + + def gen(self, path): + try: + entries = path.listdir() + except self.ignore: + return + rec = self.rec + dirs = self.optsort([p for p in entries + if p.check(dir=1) and (rec is None or rec(p))]) + if not self.breadthfirst: + for subdir in dirs: + for p in self.gen(subdir): + yield p + for p in self.optsort(entries): + if self.fil is None or self.fil(p): + yield p + if self.breadthfirst: + for subdir in dirs: + for p in self.gen(subdir): + yield p + +class FNMatcher: + def __init__(self, pattern): + self.pattern = pattern + + def __call__(self, path): + pattern = self.pattern + + if (pattern.find(path.sep) == -1 and + iswin32 and + pattern.find(posixpath.sep) != -1): + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posixpath.sep, path.sep) + + if pattern.find(path.sep) == -1: + name = path.basename + else: + name = str(path) # path.strpath # XXX svn? + if not os.path.isabs(pattern): + pattern = '*' + path.sep + pattern + return py.std.fnmatch.fnmatch(name, pattern) diff --git a/lib/spack/external/py/_path/local.py b/lib/spack/external/py/_path/local.py new file mode 100644 index 0000000000..0d4e4c93d1 --- /dev/null +++ b/lib/spack/external/py/_path/local.py @@ -0,0 +1,928 @@ +""" +local path implementation. +""" +from __future__ import with_statement + +from contextlib import contextmanager +import sys, os, re, atexit, io +import py +from py._path import common +from py._path.common import iswin32, fspath +from stat import S_ISLNK, S_ISDIR, S_ISREG + +from os.path import abspath, normpath, isabs, exists, isdir, isfile, islink, dirname + +if sys.version_info > (3,0): + def map_as_list(func, iter): + return list(map(func, iter)) +else: + map_as_list = map + +class Stat(object): + def __getattr__(self, name): + return getattr(self._osstatresult, "st_" + name) + + def __init__(self, path, osstatresult): + self.path = path + self._osstatresult = osstatresult + + @property + def owner(self): + if iswin32: + raise NotImplementedError("XXX win32") + import pwd + entry = py.error.checked_call(pwd.getpwuid, self.uid) + return entry[0] + + @property + def group(self): + """ return group name of file. """ + if iswin32: + raise NotImplementedError("XXX win32") + import grp + entry = py.error.checked_call(grp.getgrgid, self.gid) + return entry[0] + + def isdir(self): + return S_ISDIR(self._osstatresult.st_mode) + + def isfile(self): + return S_ISREG(self._osstatresult.st_mode) + + def islink(self): + st = self.path.lstat() + return S_ISLNK(self._osstatresult.st_mode) + +class PosixPath(common.PathBase): + def chown(self, user, group, rec=0): + """ change ownership to the given user and group. + user and group may be specified by a number or + by a name. if rec is True change ownership + recursively. + """ + uid = getuserid(user) + gid = getgroupid(group) + if rec: + for x in self.visit(rec=lambda x: x.check(link=0)): + if x.check(link=0): + py.error.checked_call(os.chown, str(x), uid, gid) + py.error.checked_call(os.chown, str(self), uid, gid) + + def readlink(self): + """ return value of a symbolic link. """ + return py.error.checked_call(os.readlink, self.strpath) + + def mklinkto(self, oldname): + """ posix style hard link to another name. """ + py.error.checked_call(os.link, str(oldname), str(self)) + + def mksymlinkto(self, value, absolute=1): + """ create a symbolic link with the given value (pointing to another name). """ + if absolute: + py.error.checked_call(os.symlink, str(value), self.strpath) + else: + base = self.common(value) + # with posix local paths '/' is always a common base + relsource = self.__class__(value).relto(base) + reldest = self.relto(base) + n = reldest.count(self.sep) + target = self.sep.join(('..', )*n + (relsource, )) + py.error.checked_call(os.symlink, target, self.strpath) + +def getuserid(user): + import pwd + if not isinstance(user, int): + user = pwd.getpwnam(user)[2] + return user + +def getgroupid(group): + import grp + if not isinstance(group, int): + group = grp.getgrnam(group)[2] + return group + +FSBase = not iswin32 and PosixPath or common.PathBase + +class LocalPath(FSBase): + """ object oriented interface to os.path and other local filesystem + related information. + """ + class ImportMismatchError(ImportError): + """ raised on pyimport() if there is a mismatch of __file__'s""" + + sep = os.sep + class Checkers(common.Checkers): + def _stat(self): + try: + return self._statcache + except AttributeError: + try: + self._statcache = self.path.stat() + except py.error.ELOOP: + self._statcache = self.path.lstat() + return self._statcache + + def dir(self): + return S_ISDIR(self._stat().mode) + + def file(self): + return S_ISREG(self._stat().mode) + + def exists(self): + return self._stat() + + def link(self): + st = self.path.lstat() + return S_ISLNK(st.mode) + + def __init__(self, path=None, expanduser=False): + """ Initialize and return a local Path instance. + + Path can be relative to the current directory. + If path is None it defaults to the current working directory. + If expanduser is True, tilde-expansion is performed. + Note that Path instances always carry an absolute path. + Note also that passing in a local path object will simply return + the exact same path object. Use new() to get a new copy. + """ + if path is None: + self.strpath = py.error.checked_call(os.getcwd) + else: + try: + path = fspath(path) + except TypeError: + raise ValueError("can only pass None, Path instances " + "or non-empty strings to LocalPath") + if expanduser: + path = os.path.expanduser(path) + self.strpath = abspath(path) + + def __hash__(self): + return hash(self.strpath) + + def __eq__(self, other): + s1 = fspath(self) + try: + s2 = fspath(other) + except TypeError: + return False + if iswin32: + s1 = s1.lower() + try: + s2 = s2.lower() + except AttributeError: + return False + return s1 == s2 + + def __ne__(self, other): + return not (self == other) + + def __lt__(self, other): + return fspath(self) < fspath(other) + + def __gt__(self, other): + return fspath(self) > fspath(other) + + def samefile(self, other): + """ return True if 'other' references the same file as 'self'. + """ + other = fspath(other) + if not isabs(other): + other = abspath(other) + if self == other: + return True + if iswin32: + return False # there is no samefile + return py.error.checked_call( + os.path.samefile, self.strpath, other) + + def remove(self, rec=1, ignore_errors=False): + """ remove a file or directory (or a directory tree if rec=1). + if ignore_errors is True, errors while removing directories will + be ignored. + """ + if self.check(dir=1, link=0): + if rec: + # force remove of readonly files on windows + if iswin32: + self.chmod(448, rec=1) # octcal 0700 + py.error.checked_call(py.std.shutil.rmtree, self.strpath, + ignore_errors=ignore_errors) + else: + py.error.checked_call(os.rmdir, self.strpath) + else: + if iswin32: + self.chmod(448) # octcal 0700 + py.error.checked_call(os.remove, self.strpath) + + def computehash(self, hashtype="md5", chunksize=524288): + """ return hexdigest of hashvalue for this file. """ + try: + try: + import hashlib as mod + except ImportError: + if hashtype == "sha1": + hashtype = "sha" + mod = __import__(hashtype) + hash = getattr(mod, hashtype)() + except (AttributeError, ImportError): + raise ValueError("Don't know how to compute %r hash" %(hashtype,)) + f = self.open('rb') + try: + while 1: + buf = f.read(chunksize) + if not buf: + return hash.hexdigest() + hash.update(buf) + finally: + f.close() + + def new(self, **kw): + """ create a modified version of this path. + the following keyword arguments modify various path parts:: + + a:/some/path/to/a/file.ext + xx drive + xxxxxxxxxxxxxxxxx dirname + xxxxxxxx basename + xxxx purebasename + xxx ext + """ + obj = object.__new__(self.__class__) + if not kw: + obj.strpath = self.strpath + return obj + drive, dirname, basename, purebasename,ext = self._getbyspec( + "drive,dirname,basename,purebasename,ext") + if 'basename' in kw: + if 'purebasename' in kw or 'ext' in kw: + raise ValueError("invalid specification %r" % kw) + else: + pb = kw.setdefault('purebasename', purebasename) + try: + ext = kw['ext'] + except KeyError: + pass + else: + if ext and not ext.startswith('.'): + ext = '.' + ext + kw['basename'] = pb + ext + + if ('dirname' in kw and not kw['dirname']): + kw['dirname'] = drive + else: + kw.setdefault('dirname', dirname) + kw.setdefault('sep', self.sep) + obj.strpath = normpath( + "%(dirname)s%(sep)s%(basename)s" % kw) + return obj + + def _getbyspec(self, spec): + """ see new for what 'spec' can be. """ + res = [] + parts = self.strpath.split(self.sep) + + args = filter(None, spec.split(',') ) + append = res.append + for name in args: + if name == 'drive': + append(parts[0]) + elif name == 'dirname': + append(self.sep.join(parts[:-1])) + else: + basename = parts[-1] + if name == 'basename': + append(basename) + else: + i = basename.rfind('.') + if i == -1: + purebasename, ext = basename, '' + else: + purebasename, ext = basename[:i], basename[i:] + if name == 'purebasename': + append(purebasename) + elif name == 'ext': + append(ext) + else: + raise ValueError("invalid part specification %r" % name) + return res + + def dirpath(self, *args, **kwargs): + """ return the directory path joined with any given path arguments. """ + if not kwargs: + path = object.__new__(self.__class__) + path.strpath = dirname(self.strpath) + if args: + path = path.join(*args) + return path + return super(LocalPath, self).dirpath(*args, **kwargs) + + def join(self, *args, **kwargs): + """ return a new path by appending all 'args' as path + components. if abs=1 is used restart from root if any + of the args is an absolute path. + """ + sep = self.sep + strargs = [fspath(arg) for arg in args] + strpath = self.strpath + if kwargs.get('abs'): + newargs = [] + for arg in reversed(strargs): + if isabs(arg): + strpath = arg + strargs = newargs + break + newargs.insert(0, arg) + for arg in strargs: + arg = arg.strip(sep) + if iswin32: + # allow unix style paths even on windows. + arg = arg.strip('/') + arg = arg.replace('/', sep) + strpath = strpath + sep + arg + obj = object.__new__(self.__class__) + obj.strpath = normpath(strpath) + return obj + + def open(self, mode='r', ensure=False, encoding=None): + """ return an opened file with the given mode. + + If ensure is True, create parent directories if needed. + """ + if ensure: + self.dirpath().ensure(dir=1) + if encoding: + return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding) + return py.error.checked_call(open, self.strpath, mode) + + def _fastjoin(self, name): + child = object.__new__(self.__class__) + child.strpath = self.strpath + self.sep + name + return child + + def islink(self): + return islink(self.strpath) + + def check(self, **kw): + if not kw: + return exists(self.strpath) + if len(kw) == 1: + if "dir" in kw: + return not kw["dir"] ^ isdir(self.strpath) + if "file" in kw: + return not kw["file"] ^ isfile(self.strpath) + return super(LocalPath, self).check(**kw) + + _patternchars = set("*?[" + os.path.sep) + def listdir(self, fil=None, sort=None): + """ list directory contents, possibly filter by the given fil func + and possibly sorted. + """ + if fil is None and sort is None: + names = py.error.checked_call(os.listdir, self.strpath) + return map_as_list(self._fastjoin, names) + if isinstance(fil, py.builtin._basestring): + if not self._patternchars.intersection(fil): + child = self._fastjoin(fil) + if exists(child.strpath): + return [child] + return [] + fil = common.FNMatcher(fil) + names = py.error.checked_call(os.listdir, self.strpath) + res = [] + for name in names: + child = self._fastjoin(name) + if fil is None or fil(child): + res.append(child) + self._sortlist(res, sort) + return res + + def size(self): + """ return size of the underlying file object """ + return self.stat().size + + def mtime(self): + """ return last modification time of the path. """ + return self.stat().mtime + + def copy(self, target, mode=False, stat=False): + """ copy path to target. + + If mode is True, will copy copy permission from path to target. + If stat is True, copy permission, last modification + time, last access time, and flags from path to target. + """ + if self.check(file=1): + if target.check(dir=1): + target = target.join(self.basename) + assert self!=target + copychunked(self, target) + if mode: + copymode(self.strpath, target.strpath) + if stat: + copystat(self, target) + else: + def rec(p): + return p.check(link=0) + for x in self.visit(rec=rec): + relpath = x.relto(self) + newx = target.join(relpath) + newx.dirpath().ensure(dir=1) + if x.check(link=1): + newx.mksymlinkto(x.readlink()) + continue + elif x.check(file=1): + copychunked(x, newx) + elif x.check(dir=1): + newx.ensure(dir=1) + if mode: + copymode(x.strpath, newx.strpath) + if stat: + copystat(x, newx) + + def rename(self, target): + """ rename this path to target. """ + target = fspath(target) + return py.error.checked_call(os.rename, self.strpath, target) + + def dump(self, obj, bin=1): + """ pickle object into path location""" + f = self.open('wb') + try: + py.error.checked_call(py.std.pickle.dump, obj, f, bin) + finally: + f.close() + + def mkdir(self, *args): + """ create & return the directory joined with args. """ + p = self.join(*args) + py.error.checked_call(os.mkdir, fspath(p)) + return p + + def write_binary(self, data, ensure=False): + """ write binary data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open('wb') as f: + f.write(data) + + def write_text(self, data, encoding, ensure=False): + """ write text data into path using the specified encoding. + If ensure is True create missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open('w', encoding=encoding) as f: + f.write(data) + + def write(self, data, mode='w', ensure=False): + """ write data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + if 'b' in mode: + if not py.builtin._isbytes(data): + raise ValueError("can only process bytes") + else: + if not py.builtin._istext(data): + if not py.builtin._isbytes(data): + data = str(data) + else: + data = py.builtin._totext(data, sys.getdefaultencoding()) + f = self.open(mode) + try: + f.write(data) + finally: + f.close() + + def _ensuredirs(self): + parent = self.dirpath() + if parent == self: + return self + if parent.check(dir=0): + parent._ensuredirs() + if self.check(dir=0): + try: + self.mkdir() + except py.error.EEXIST: + # race condition: file/dir created by another thread/process. + # complain if it is not a dir + if self.check(dir=0): + raise + return self + + def ensure(self, *args, **kwargs): + """ ensure that an args-joined path exists (by default as + a file). if you specify a keyword argument 'dir=True' + then the path is forced to be a directory path. + """ + p = self.join(*args) + if kwargs.get('dir', 0): + return p._ensuredirs() + else: + p.dirpath()._ensuredirs() + if not p.check(file=1): + p.open('w').close() + return p + + def stat(self, raising=True): + """ Return an os.stat() tuple. """ + if raising == True: + return Stat(self, py.error.checked_call(os.stat, self.strpath)) + try: + return Stat(self, os.stat(self.strpath)) + except KeyboardInterrupt: + raise + except Exception: + return None + + def lstat(self): + """ Return an os.lstat() tuple. """ + return Stat(self, py.error.checked_call(os.lstat, self.strpath)) + + def setmtime(self, mtime=None): + """ set modification time for the given path. if 'mtime' is None + (the default) then the file's mtime is set to current time. + + Note that the resolution for 'mtime' is platform dependent. + """ + if mtime is None: + return py.error.checked_call(os.utime, self.strpath, mtime) + try: + return py.error.checked_call(os.utime, self.strpath, (-1, mtime)) + except py.error.EINVAL: + return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime)) + + def chdir(self): + """ change directory to self and return old current directory """ + try: + old = self.__class__() + except py.error.ENOENT: + old = None + py.error.checked_call(os.chdir, self.strpath) + return old + + + @contextmanager + def as_cwd(self): + """ return context manager which changes to current dir during the + managed "with" context. On __enter__ it returns the old dir. + """ + old = self.chdir() + try: + yield old + finally: + old.chdir() + + def realpath(self): + """ return a new path which contains no symbolic links.""" + return self.__class__(os.path.realpath(self.strpath)) + + def atime(self): + """ return last access time of the path. """ + return self.stat().atime + + def __repr__(self): + return 'local(%r)' % self.strpath + + def __str__(self): + """ return string representation of the Path. """ + return self.strpath + + def chmod(self, mode, rec=0): + """ change permissions to the given mode. If mode is an + integer it directly encodes the os-specific modes. + if rec is True perform recursively. + """ + if not isinstance(mode, int): + raise TypeError("mode %r must be an integer" % (mode,)) + if rec: + for x in self.visit(rec=rec): + py.error.checked_call(os.chmod, str(x), mode) + py.error.checked_call(os.chmod, self.strpath, mode) + + def pypkgpath(self): + """ return the Python package path by looking for the last + directory upwards which still contains an __init__.py. + Return None if a pkgpath can not be determined. + """ + pkgpath = None + for parent in self.parts(reverse=True): + if parent.isdir(): + if not parent.join('__init__.py').exists(): + break + if not isimportable(parent.basename): + break + pkgpath = parent + return pkgpath + + def _ensuresyspath(self, ensuremode, path): + if ensuremode: + s = str(path) + if ensuremode == "append": + if s not in sys.path: + sys.path.append(s) + else: + if s != sys.path[0]: + sys.path.insert(0, s) + + def pyimport(self, modname=None, ensuresyspath=True): + """ return path as an imported python module. + + If modname is None, look for the containing package + and construct an according module name. + The module will be put/looked up in sys.modules. + if ensuresyspath is True then the root dir for importing + the file (taking __init__.py files into account) will + be prepended to sys.path if it isn't there already. + If ensuresyspath=="append" the root dir will be appended + if it isn't already contained in sys.path. + if ensuresyspath is False no modification of syspath happens. + """ + if not self.check(): + raise py.error.ENOENT(self) + + pkgpath = None + if modname is None: + pkgpath = self.pypkgpath() + if pkgpath is not None: + pkgroot = pkgpath.dirpath() + names = self.new(ext="").relto(pkgroot).split(self.sep) + if names[-1] == "__init__": + names.pop() + modname = ".".join(names) + else: + pkgroot = self.dirpath() + modname = self.purebasename + + self._ensuresyspath(ensuresyspath, pkgroot) + __import__(modname) + mod = sys.modules[modname] + if self.basename == "__init__.py": + return mod # we don't check anything as we might + # we in a namespace package ... too icky to check + modfile = mod.__file__ + if modfile[-4:] in ('.pyc', '.pyo'): + modfile = modfile[:-1] + elif modfile.endswith('$py.class'): + modfile = modfile[:-9] + '.py' + if modfile.endswith(os.path.sep + "__init__.py"): + if self.basename != "__init__.py": + modfile = modfile[:-12] + try: + issame = self.samefile(modfile) + except py.error.ENOENT: + issame = False + if not issame: + raise self.ImportMismatchError(modname, modfile, self) + return mod + else: + try: + return sys.modules[modname] + except KeyError: + # we have a custom modname, do a pseudo-import + mod = py.std.types.ModuleType(modname) + mod.__file__ = str(self) + sys.modules[modname] = mod + try: + py.builtin.execfile(str(self), mod.__dict__) + except: + del sys.modules[modname] + raise + return mod + + def sysexec(self, *argv, **popen_opts): + """ return stdout text from executing a system child process, + where the 'self' path points to executable. + The process is directly invoked and not through a system shell. + """ + from subprocess import Popen, PIPE + argv = map_as_list(str, argv) + popen_opts['stdout'] = popen_opts['stderr'] = PIPE + proc = Popen([str(self)] + argv, **popen_opts) + stdout, stderr = proc.communicate() + ret = proc.wait() + if py.builtin._isbytes(stdout): + stdout = py.builtin._totext(stdout, sys.getdefaultencoding()) + if ret != 0: + if py.builtin._isbytes(stderr): + stderr = py.builtin._totext(stderr, sys.getdefaultencoding()) + raise py.process.cmdexec.Error(ret, ret, str(self), + stdout, stderr,) + return stdout + + def sysfind(cls, name, checker=None, paths=None): + """ return a path object found by looking at the systems + underlying PATH specification. If the checker is not None + it will be invoked to filter matching paths. If a binary + cannot be found, None is returned + Note: This is probably not working on plain win32 systems + but may work on cygwin. + """ + if isabs(name): + p = py.path.local(name) + if p.check(file=1): + return p + else: + if paths is None: + if iswin32: + paths = py.std.os.environ['Path'].split(';') + if '' not in paths and '.' not in paths: + paths.append('.') + try: + systemroot = os.environ['SYSTEMROOT'] + except KeyError: + pass + else: + paths = [re.sub('%SystemRoot%', systemroot, path) + for path in paths] + else: + paths = py.std.os.environ['PATH'].split(':') + tryadd = [] + if iswin32: + tryadd += os.environ['PATHEXT'].split(os.pathsep) + tryadd.append("") + + for x in paths: + for addext in tryadd: + p = py.path.local(x).join(name, abs=True) + addext + try: + if p.check(file=1): + if checker: + if not checker(p): + continue + return p + except py.error.EACCES: + pass + return None + sysfind = classmethod(sysfind) + + def _gethomedir(cls): + try: + x = os.environ['HOME'] + except KeyError: + try: + x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH'] + except KeyError: + return None + return cls(x) + _gethomedir = classmethod(_gethomedir) + + #""" + #special class constructors for local filesystem paths + #""" + def get_temproot(cls): + """ return the system's temporary directory + (where tempfiles are usually created in) + """ + return py.path.local(py.std.tempfile.gettempdir()) + get_temproot = classmethod(get_temproot) + + def mkdtemp(cls, rootdir=None): + """ return a Path object pointing to a fresh new temporary directory + (which we created ourself). + """ + import tempfile + if rootdir is None: + rootdir = cls.get_temproot() + return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir))) + mkdtemp = classmethod(mkdtemp) + + def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3, + lock_timeout = 172800): # two days + """ return unique directory with a number greater than the current + maximum one. The number is assumed to start directly after prefix. + if keep is true directories with a number less than (maxnum-keep) + will be removed. + """ + if rootdir is None: + rootdir = cls.get_temproot() + + def parse_num(path): + """ parse the number out of a path (if it matches the prefix) """ + bn = path.basename + if bn.startswith(prefix): + try: + return int(bn[len(prefix):]) + except ValueError: + pass + + # compute the maximum number currently in use with the + # prefix + lastmax = None + while True: + maxnum = -1 + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None: + maxnum = max(maxnum, num) + + # make the new directory + try: + udir = rootdir.mkdir(prefix + str(maxnum+1)) + except py.error.EEXIST: + # race condition: another thread/process created the dir + # in the meantime. Try counting again + if lastmax == maxnum: + raise + lastmax = maxnum + continue + break + + # put a .lock file in the new directory that will be removed at + # process exit + if lock_timeout: + lockfile = udir.join('.lock') + mypid = os.getpid() + if hasattr(lockfile, 'mksymlinkto'): + lockfile.mksymlinkto(str(mypid)) + else: + lockfile.write(str(mypid)) + def try_remove_lockfile(): + # in a fork() situation, only the last process should + # remove the .lock, otherwise the other processes run the + # risk of seeing their temporary dir disappear. For now + # we remove the .lock in the parent only (i.e. we assume + # that the children finish before the parent). + if os.getpid() != mypid: + return + try: + lockfile.remove() + except py.error.Error: + pass + atexit.register(try_remove_lockfile) + + # prune old directories + if keep: + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None and num <= (maxnum - keep): + lf = path.join('.lock') + try: + t1 = lf.lstat().mtime + t2 = lockfile.lstat().mtime + if not lock_timeout or abs(t2-t1) < lock_timeout: + continue # skip directories still locked + except py.error.Error: + pass # assume that it means that there is no 'lf' + try: + path.remove(rec=1) + except KeyboardInterrupt: + raise + except: # this might be py.error.Error, WindowsError ... + pass + + # make link... + try: + username = os.environ['USER'] #linux, et al + except KeyError: + try: + username = os.environ['USERNAME'] #windows + except KeyError: + username = 'current' + + src = str(udir) + dest = src[:src.rfind('-')] + '-' + username + try: + os.unlink(dest) + except OSError: + pass + try: + os.symlink(src, dest) + except (OSError, AttributeError, NotImplementedError): + pass + + return udir + make_numbered_dir = classmethod(make_numbered_dir) + +def copymode(src, dest): + """ copy permission from src to dst. """ + py.std.shutil.copymode(src, dest) + +def copystat(src, dest): + """ copy permission, last modification time, last access time, and flags from src to dst.""" + py.std.shutil.copystat(str(src), str(dest)) + +def copychunked(src, dest): + chunksize = 524288 # half a meg of bytes + fsrc = src.open('rb') + try: + fdest = dest.open('wb') + try: + while 1: + buf = fsrc.read(chunksize) + if not buf: + break + fdest.write(buf) + finally: + fdest.close() + finally: + fsrc.close() + +def isimportable(name): + if name and (name[0].isalpha() or name[0] == '_'): + name = name.replace("_", '') + return not name or name.isalnum() diff --git a/lib/spack/external/py/_path/svnurl.py b/lib/spack/external/py/_path/svnurl.py new file mode 100644 index 0000000000..78d71317ac --- /dev/null +++ b/lib/spack/external/py/_path/svnurl.py @@ -0,0 +1,380 @@ +""" +module defining a subversion path object based on the external +command 'svn'. This modules aims to work with svn 1.3 and higher +but might also interact well with earlier versions. +""" + +import os, sys, time, re +import py +from py import path, process +from py._path import common +from py._path import svnwc as svncommon +from py._path.cacheutil import BuildcostAccessCache, AgingCache + +DEBUG=False + +class SvnCommandPath(svncommon.SvnPathBase): + """ path implementation that offers access to (possibly remote) subversion + repositories. """ + + _lsrevcache = BuildcostAccessCache(maxentries=128) + _lsnorevcache = AgingCache(maxentries=1000, maxseconds=60.0) + + def __new__(cls, path, rev=None, auth=None): + self = object.__new__(cls) + if isinstance(path, cls): + rev = path.rev + auth = path.auth + path = path.strpath + svncommon.checkbadchars(path) + path = path.rstrip('/') + self.strpath = path + self.rev = rev + self.auth = auth + return self + + def __repr__(self): + if self.rev == -1: + return 'svnurl(%r)' % self.strpath + else: + return 'svnurl(%r, %r)' % (self.strpath, self.rev) + + def _svnwithrev(self, cmd, *args): + """ execute an svn command, append our own url and revision """ + if self.rev is None: + return self._svnwrite(cmd, *args) + else: + args = ['-r', self.rev] + list(args) + return self._svnwrite(cmd, *args) + + def _svnwrite(self, cmd, *args): + """ execute an svn command, append our own url """ + l = ['svn %s' % cmd] + args = ['"%s"' % self._escape(item) for item in args] + l.extend(args) + l.append('"%s"' % self._encodedurl()) + # fixing the locale because we can't otherwise parse + string = " ".join(l) + if DEBUG: + print("execing %s" % string) + out = self._svncmdexecauth(string) + return out + + def _svncmdexecauth(self, cmd): + """ execute an svn command 'as is' """ + cmd = svncommon.fixlocale() + cmd + if self.auth is not None: + cmd += ' ' + self.auth.makecmdoptions() + return self._cmdexec(cmd) + + def _cmdexec(self, cmd): + try: + out = process.cmdexec(cmd) + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + if (e.err.find('File Exists') != -1 or + e.err.find('File already exists') != -1): + raise py.error.EEXIST(self) + raise + return out + + def _svnpopenauth(self, cmd): + """ execute an svn command, return a pipe for reading stdin """ + cmd = svncommon.fixlocale() + cmd + if self.auth is not None: + cmd += ' ' + self.auth.makecmdoptions() + return self._popen(cmd) + + def _popen(self, cmd): + return os.popen(cmd) + + def _encodedurl(self): + return self._escape(self.strpath) + + def _norev_delentry(self, path): + auth = self.auth and self.auth.makecmdoptions() or None + self._lsnorevcache.delentry((str(path), auth)) + + def open(self, mode='r'): + """ return an opened file with the given mode. """ + if mode not in ("r", "rU",): + raise ValueError("mode %r not supported" % (mode,)) + assert self.check(file=1) # svn cat returns an empty file otherwise + if self.rev is None: + return self._svnpopenauth('svn cat "%s"' % ( + self._escape(self.strpath), )) + else: + return self._svnpopenauth('svn cat -r %s "%s"' % ( + self.rev, self._escape(self.strpath))) + + def dirpath(self, *args, **kwargs): + """ return the directory path of the current path joined + with any given path arguments. + """ + l = self.strpath.split(self.sep) + if len(l) < 4: + raise py.error.EINVAL(self, "base is not valid") + elif len(l) == 4: + return self.join(*args, **kwargs) + else: + return self.new(basename='').join(*args, **kwargs) + + # modifying methods (cache must be invalidated) + def mkdir(self, *args, **kwargs): + """ create & return the directory joined with args. + pass a 'msg' keyword argument to set the commit message. + """ + commit_msg = kwargs.get('msg', "mkdir by py lib invocation") + createpath = self.join(*args) + createpath._svnwrite('mkdir', '-m', commit_msg) + self._norev_delentry(createpath.dirpath()) + return createpath + + def copy(self, target, msg='copied by py lib invocation'): + """ copy path to target with checkin message msg.""" + if getattr(target, 'rev', None) is not None: + raise py.error.EINVAL(target, "revisions are immutable") + self._svncmdexecauth('svn copy -m "%s" "%s" "%s"' %(msg, + self._escape(self), self._escape(target))) + self._norev_delentry(target.dirpath()) + + def rename(self, target, msg="renamed by py lib invocation"): + """ rename this path to target with checkin message msg. """ + if getattr(self, 'rev', None) is not None: + raise py.error.EINVAL(self, "revisions are immutable") + self._svncmdexecauth('svn move -m "%s" --force "%s" "%s"' %( + msg, self._escape(self), self._escape(target))) + self._norev_delentry(self.dirpath()) + self._norev_delentry(self) + + def remove(self, rec=1, msg='removed by py lib invocation'): + """ remove a file or directory (or a directory tree if rec=1) with +checkin message msg.""" + if self.rev is not None: + raise py.error.EINVAL(self, "revisions are immutable") + self._svncmdexecauth('svn rm -m "%s" "%s"' %(msg, self._escape(self))) + self._norev_delentry(self.dirpath()) + + def export(self, topath): + """ export to a local path + + topath should not exist prior to calling this, returns a + py.path.local instance + """ + topath = py.path.local(topath) + args = ['"%s"' % (self._escape(self),), + '"%s"' % (self._escape(topath),)] + if self.rev is not None: + args = ['-r', str(self.rev)] + args + self._svncmdexecauth('svn export %s' % (' '.join(args),)) + return topath + + def ensure(self, *args, **kwargs): + """ ensure that an args-joined path exists (by default as + a file). If you specify a keyword argument 'dir=True' + then the path is forced to be a directory path. + """ + if getattr(self, 'rev', None) is not None: + raise py.error.EINVAL(self, "revisions are immutable") + target = self.join(*args) + dir = kwargs.get('dir', 0) + for x in target.parts(reverse=True): + if x.check(): + break + else: + raise py.error.ENOENT(target, "has not any valid base!") + if x == target: + if not x.check(dir=dir): + raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x) + return x + tocreate = target.relto(x) + basename = tocreate.split(self.sep, 1)[0] + tempdir = py.path.local.mkdtemp() + try: + tempdir.ensure(tocreate, dir=dir) + cmd = 'svn import -m "%s" "%s" "%s"' % ( + "ensure %s" % self._escape(tocreate), + self._escape(tempdir.join(basename)), + x.join(basename)._encodedurl()) + self._svncmdexecauth(cmd) + self._norev_delentry(x) + finally: + tempdir.remove() + return target + + # end of modifying methods + def _propget(self, name): + res = self._svnwithrev('propget', name) + return res[:-1] # strip trailing newline + + def _proplist(self): + res = self._svnwithrev('proplist') + lines = res.split('\n') + lines = [x.strip() for x in lines[1:]] + return svncommon.PropListDict(self, lines) + + def info(self): + """ return an Info structure with svn-provided information. """ + parent = self.dirpath() + nameinfo_seq = parent._listdir_nameinfo() + bn = self.basename + for name, info in nameinfo_seq: + if name == bn: + return info + raise py.error.ENOENT(self) + + + def _listdir_nameinfo(self): + """ return sequence of name-info directory entries of self """ + def builder(): + try: + res = self._svnwithrev('ls', '-v') + except process.cmdexec.Error: + e = sys.exc_info()[1] + if e.err.find('non-existent in that revision') != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find("E200009:") != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find('File not found') != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find('not part of a repository')!=-1: + raise py.error.ENOENT(self, e.err) + elif e.err.find('Unable to open')!=-1: + raise py.error.ENOENT(self, e.err) + elif e.err.lower().find('method not allowed')!=-1: + raise py.error.EACCES(self, e.err) + raise py.error.Error(e.err) + lines = res.split('\n') + nameinfo_seq = [] + for lsline in lines: + if lsline: + info = InfoSvnCommand(lsline) + if info._name != '.': # svn 1.5 produces '.' dirs, + nameinfo_seq.append((info._name, info)) + nameinfo_seq.sort() + return nameinfo_seq + auth = self.auth and self.auth.makecmdoptions() or None + if self.rev is not None: + return self._lsrevcache.getorbuild((self.strpath, self.rev, auth), + builder) + else: + return self._lsnorevcache.getorbuild((self.strpath, auth), + builder) + + def listdir(self, fil=None, sort=None): + """ list directory contents, possibly filter by the given fil func + and possibly sorted. + """ + if isinstance(fil, str): + fil = common.FNMatcher(fil) + nameinfo_seq = self._listdir_nameinfo() + if len(nameinfo_seq) == 1: + name, info = nameinfo_seq[0] + if name == self.basename and info.kind == 'file': + #if not self.check(dir=1): + raise py.error.ENOTDIR(self) + paths = [self.join(name) for (name, info) in nameinfo_seq] + if fil: + paths = [x for x in paths if fil(x)] + self._sortlist(paths, sort) + return paths + + + def log(self, rev_start=None, rev_end=1, verbose=False): + """ return a list of LogEntry instances for this path. +rev_start is the starting revision (defaulting to the first one). +rev_end is the last revision (defaulting to HEAD). +if verbose is True, then the LogEntry instances also know which files changed. +""" + assert self.check() #make it simpler for the pipe + rev_start = rev_start is None and "HEAD" or rev_start + rev_end = rev_end is None and "HEAD" or rev_end + + if rev_start == "HEAD" and rev_end == 1: + rev_opt = "" + else: + rev_opt = "-r %s:%s" % (rev_start, rev_end) + verbose_opt = verbose and "-v" or "" + xmlpipe = self._svnpopenauth('svn log --xml %s %s "%s"' % + (rev_opt, verbose_opt, self.strpath)) + from xml.dom import minidom + tree = minidom.parse(xmlpipe) + result = [] + for logentry in filter(None, tree.firstChild.childNodes): + if logentry.nodeType == logentry.ELEMENT_NODE: + result.append(svncommon.LogEntry(logentry)) + return result + +#01234567890123456789012345678901234567890123467 +# 2256 hpk 165 Nov 24 17:55 __init__.py +# XXX spotted by Guido, SVN 1.3.0 has different aligning, breaks the code!!! +# 1312 johnny 1627 May 05 14:32 test_decorators.py +# +class InfoSvnCommand: + # the '0?' part in the middle is an indication of whether the resource is + # locked, see 'svn help ls' + lspattern = re.compile( + r'^ *(?P\d+) +(?P.+?) +(0? *(?P\d+))? ' + '*(?P\w+ +\d{2} +[\d:]+) +(?P.*)$') + def __init__(self, line): + # this is a typical line from 'svn ls http://...' + #_ 1127 jum 0 Jul 13 15:28 branch/ + match = self.lspattern.match(line) + data = match.groupdict() + self._name = data['file'] + if self._name[-1] == '/': + self._name = self._name[:-1] + self.kind = 'dir' + else: + self.kind = 'file' + #self.has_props = l.pop(0) == 'P' + self.created_rev = int(data['rev']) + self.last_author = data['author'] + self.size = data['size'] and int(data['size']) or 0 + self.mtime = parse_time_with_missing_year(data['date']) + self.time = self.mtime * 1000000 + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + +#____________________________________________________ +# +# helper functions +#____________________________________________________ +def parse_time_with_missing_year(timestr): + """ analyze the time part from a single line of "svn ls -v" + the svn output doesn't show the year makes the 'timestr' + ambigous. + """ + import calendar + t_now = time.gmtime() + + tparts = timestr.split() + month = time.strptime(tparts.pop(0), '%b')[1] + day = time.strptime(tparts.pop(0), '%d')[2] + last = tparts.pop(0) # year or hour:minute + try: + if ":" in last: + raise ValueError() + year = time.strptime(last, '%Y')[0] + hour = minute = 0 + except ValueError: + hour, minute = time.strptime(last, '%H:%M')[3:5] + year = t_now[0] + + t_result = (year, month, day, hour, minute, 0,0,0,0) + if t_result > t_now: + year -= 1 + t_result = (year, month, day, hour, minute, 0,0,0,0) + return calendar.timegm(t_result) + +class PathEntry: + def __init__(self, ppart): + self.strpath = ppart.firstChild.nodeValue.encode('UTF-8') + self.action = ppart.getAttribute('action').encode('UTF-8') + if self.action == 'A': + self.copyfrom_path = ppart.getAttribute('copyfrom-path').encode('UTF-8') + if self.copyfrom_path: + self.copyfrom_rev = int(ppart.getAttribute('copyfrom-rev')) + diff --git a/lib/spack/external/py/_path/svnwc.py b/lib/spack/external/py/_path/svnwc.py new file mode 100644 index 0000000000..00d3b4bbaf --- /dev/null +++ b/lib/spack/external/py/_path/svnwc.py @@ -0,0 +1,1240 @@ +""" +svn-Command based Implementation of a Subversion WorkingCopy Path. + + SvnWCCommandPath is the main class. + +""" + +import os, sys, time, re, calendar +import py +import subprocess +from py._path import common + +#----------------------------------------------------------- +# Caching latest repository revision and repo-paths +# (getting them is slow with the current implementations) +# +# XXX make mt-safe +#----------------------------------------------------------- + +class cache: + proplist = {} + info = {} + entries = {} + prop = {} + +class RepoEntry: + def __init__(self, url, rev, timestamp): + self.url = url + self.rev = rev + self.timestamp = timestamp + + def __str__(self): + return "repo: %s;%s %s" %(self.url, self.rev, self.timestamp) + +class RepoCache: + """ The Repocache manages discovered repository paths + and their revisions. If inside a timeout the cache + will even return the revision of the root. + """ + timeout = 20 # seconds after which we forget that we know the last revision + + def __init__(self): + self.repos = [] + + def clear(self): + self.repos = [] + + def put(self, url, rev, timestamp=None): + if rev is None: + return + if timestamp is None: + timestamp = time.time() + + for entry in self.repos: + if url == entry.url: + entry.timestamp = timestamp + entry.rev = rev + #print "set repo", entry + break + else: + entry = RepoEntry(url, rev, timestamp) + self.repos.append(entry) + #print "appended repo", entry + + def get(self, url): + now = time.time() + for entry in self.repos: + if url.startswith(entry.url): + if now < entry.timestamp + self.timeout: + #print "returning immediate Etrny", entry + return entry.url, entry.rev + return entry.url, -1 + return url, -1 + +repositories = RepoCache() + + +# svn support code + +ALLOWED_CHARS = "_ -/\\=$.~+%" #add characters as necessary when tested +if sys.platform == "win32": + ALLOWED_CHARS += ":" +ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:' + +def _getsvnversion(ver=[]): + try: + return ver[0] + except IndexError: + v = py.process.cmdexec("svn -q --version") + v.strip() + v = '.'.join(v.split('.')[:2]) + ver.append(v) + return v + +def _escape_helper(text): + text = str(text) + if py.std.sys.platform != 'win32': + text = str(text).replace('$', '\\$') + return text + +def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS): + for c in str(text): + if c.isalnum(): + continue + if c in allowed_chars: + continue + return True + return False + +def checkbadchars(url): + # (hpk) not quite sure about the exact purpose, guido w.? + proto, uri = url.split("://", 1) + if proto != "file": + host, uripath = uri.split('/', 1) + # only check for bad chars in the non-protocol parts + if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \ + or _check_for_bad_chars(uripath, ALLOWED_CHARS)): + raise ValueError("bad char in %r" % (url, )) + + +#_______________________________________________________________ + +class SvnPathBase(common.PathBase): + """ Base implementation for SvnPath implementations. """ + sep = '/' + + def _geturl(self): + return self.strpath + url = property(_geturl, None, None, "url of this svn-path.") + + def __str__(self): + """ return a string representation (including rev-number) """ + return self.strpath + + def __hash__(self): + return hash(self.strpath) + + def new(self, **kw): + """ create a modified version of this path. A 'rev' argument + indicates a new revision. + the following keyword arguments modify various path parts:: + + http://host.com/repo/path/file.ext + |-----------------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + obj = object.__new__(self.__class__) + obj.rev = kw.get('rev', self.rev) + obj.auth = kw.get('auth', self.auth) + dirname, basename, purebasename, ext = self._getbyspec( + "dirname,basename,purebasename,ext") + if 'basename' in kw: + if 'purebasename' in kw or 'ext' in kw: + raise ValueError("invalid specification %r" % kw) + else: + pb = kw.setdefault('purebasename', purebasename) + ext = kw.setdefault('ext', ext) + if ext and not ext.startswith('.'): + ext = '.' + ext + kw['basename'] = pb + ext + + kw.setdefault('dirname', dirname) + kw.setdefault('sep', self.sep) + if kw['basename']: + obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw + else: + obj.strpath = "%(dirname)s" % kw + return obj + + def _getbyspec(self, spec): + """ get specified parts of the path. 'arg' is a string + with comma separated path parts. The parts are returned + in exactly the order of the specification. + + you may specify the following parts: + + http://host.com/repo/path/file.ext + |-----------------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + res = [] + parts = self.strpath.split(self.sep) + for name in spec.split(','): + name = name.strip() + if name == 'dirname': + res.append(self.sep.join(parts[:-1])) + elif name == 'basename': + res.append(parts[-1]) + else: + basename = parts[-1] + i = basename.rfind('.') + if i == -1: + purebasename, ext = basename, '' + else: + purebasename, ext = basename[:i], basename[i:] + if name == 'purebasename': + res.append(purebasename) + elif name == 'ext': + res.append(ext) + else: + raise NameError("Don't know part %r" % name) + return res + + def __eq__(self, other): + """ return true if path and rev attributes each match """ + return (str(self) == str(other) and + (self.rev == other.rev or self.rev == other.rev)) + + def __ne__(self, other): + return not self == other + + def join(self, *args): + """ return a new Path (with the same revision) which is composed + of the self Path followed by 'args' path components. + """ + if not args: + return self + + args = tuple([arg.strip(self.sep) for arg in args]) + parts = (self.strpath, ) + args + newpath = self.__class__(self.sep.join(parts), self.rev, self.auth) + return newpath + + def propget(self, name): + """ return the content of the given property. """ + value = self._propget(name) + return value + + def proplist(self): + """ list all property names. """ + content = self._proplist() + return content + + def size(self): + """ Return the size of the file content of the Path. """ + return self.info().size + + def mtime(self): + """ Return the last modification time of the file. """ + return self.info().mtime + + # shared help methods + + def _escape(self, cmd): + return _escape_helper(cmd) + + + #def _childmaxrev(self): + # """ return maximum revision number of childs (or self.rev if no childs) """ + # rev = self.rev + # for name, info in self._listdir_nameinfo(): + # rev = max(rev, info.created_rev) + # return rev + + #def _getlatestrevision(self): + # """ return latest repo-revision for this path. """ + # url = self.strpath + # path = self.__class__(url, None) + # + # # we need a long walk to find the root-repo and revision + # while 1: + # try: + # rev = max(rev, path._childmaxrev()) + # previous = path + # path = path.dirpath() + # except (IOError, process.cmdexec.Error): + # break + # if rev is None: + # raise IOError, "could not determine newest repo revision for %s" % self + # return rev + + class Checkers(common.Checkers): + def dir(self): + try: + return self.path.info().kind == 'dir' + except py.error.Error: + return self._listdirworks() + + def _listdirworks(self): + try: + self.path.listdir() + except py.error.ENOENT: + return False + else: + return True + + def file(self): + try: + return self.path.info().kind == 'file' + except py.error.ENOENT: + return False + + def exists(self): + try: + return self.path.info() + except py.error.ENOENT: + return self._listdirworks() + +def parse_apr_time(timestr): + i = timestr.rfind('.') + if i == -1: + raise ValueError("could not parse %s" % timestr) + timestr = timestr[:i] + parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S") + return time.mktime(parsedtime) + +class PropListDict(dict): + """ a Dictionary which fetches values (InfoSvnCommand instances) lazily""" + def __init__(self, path, keynames): + dict.__init__(self, [(x, None) for x in keynames]) + self.path = path + + def __getitem__(self, key): + value = dict.__getitem__(self, key) + if value is None: + value = self.path.propget(key) + dict.__setitem__(self, key, value) + return value + +def fixlocale(): + if sys.platform != 'win32': + return 'LC_ALL=C ' + return '' + +# some nasty chunk of code to solve path and url conversion and quoting issues +ILLEGAL_CHARS = '* | \ / : < > ? \t \n \x0b \x0c \r'.split(' ') +if os.sep in ILLEGAL_CHARS: + ILLEGAL_CHARS.remove(os.sep) +ISWINDOWS = sys.platform == 'win32' +_reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I) +def _check_path(path): + illegal = ILLEGAL_CHARS[:] + sp = path.strpath + if ISWINDOWS: + illegal.remove(':') + if not _reg_allow_disk.match(sp): + raise ValueError('path may not contain a colon (:)') + for char in sp: + if char not in string.printable or char in illegal: + raise ValueError('illegal character %r in path' % (char,)) + +def path_to_fspath(path, addat=True): + _check_path(path) + sp = path.strpath + if addat and path.rev != -1: + sp = '%s@%s' % (sp, path.rev) + elif addat: + sp = '%s@HEAD' % (sp,) + return sp + +def url_from_path(path): + fspath = path_to_fspath(path, False) + quote = py.std.urllib.quote + if ISWINDOWS: + match = _reg_allow_disk.match(fspath) + fspath = fspath.replace('\\', '/') + if match.group(1): + fspath = '/%s%s' % (match.group(1).replace('\\', '/'), + quote(fspath[len(match.group(1)):])) + else: + fspath = quote(fspath) + else: + fspath = quote(fspath) + if path.rev != -1: + fspath = '%s@%s' % (fspath, path.rev) + else: + fspath = '%s@HEAD' % (fspath,) + return 'file://%s' % (fspath,) + +class SvnAuth(object): + """ container for auth information for Subversion """ + def __init__(self, username, password, cache_auth=True, interactive=True): + self.username = username + self.password = password + self.cache_auth = cache_auth + self.interactive = interactive + + def makecmdoptions(self): + uname = self.username.replace('"', '\\"') + passwd = self.password.replace('"', '\\"') + ret = [] + if uname: + ret.append('--username="%s"' % (uname,)) + if passwd: + ret.append('--password="%s"' % (passwd,)) + if not self.cache_auth: + ret.append('--no-auth-cache') + if not self.interactive: + ret.append('--non-interactive') + return ' '.join(ret) + + def __str__(self): + return "" %(self.username,) + +rex_blame = re.compile(r'\s*(\d+)\s*(\S+) (.*)') + +class SvnWCCommandPath(common.PathBase): + """ path implementation offering access/modification to svn working copies. + It has methods similar to the functions in os.path and similar to the + commands of the svn client. + """ + sep = os.sep + + def __new__(cls, wcpath=None, auth=None): + self = object.__new__(cls) + if isinstance(wcpath, cls): + if wcpath.__class__ == cls: + return wcpath + wcpath = wcpath.localpath + if _check_for_bad_chars(str(wcpath), + ALLOWED_CHARS): + raise ValueError("bad char in wcpath %s" % (wcpath, )) + self.localpath = py.path.local(wcpath) + self.auth = auth + return self + + strpath = property(lambda x: str(x.localpath), None, None, "string path") + rev = property(lambda x: x.info(usecache=0).rev, None, None, "revision") + + def __eq__(self, other): + return self.localpath == getattr(other, 'localpath', None) + + def _geturl(self): + if getattr(self, '_url', None) is None: + info = self.info() + self._url = info.url #SvnPath(info.url, info.rev) + assert isinstance(self._url, py.builtin._basestring) + return self._url + + url = property(_geturl, None, None, "url of this WC item") + + def _escape(self, cmd): + return _escape_helper(cmd) + + def dump(self, obj): + """ pickle object into path location""" + return self.localpath.dump(obj) + + def svnurl(self): + """ return current SvnPath for this WC-item. """ + info = self.info() + return py.path.svnurl(info.url) + + def __repr__(self): + return "svnwc(%r)" % (self.strpath) # , self._url) + + def __str__(self): + return str(self.localpath) + + def _makeauthoptions(self): + if self.auth is None: + return '' + return self.auth.makecmdoptions() + + def _authsvn(self, cmd, args=None): + args = args and list(args) or [] + args.append(self._makeauthoptions()) + return self._svn(cmd, *args) + + def _svn(self, cmd, *args): + l = ['svn %s' % cmd] + args = [self._escape(item) for item in args] + l.extend(args) + l.append('"%s"' % self._escape(self.strpath)) + # try fixing the locale because we can't otherwise parse + string = fixlocale() + " ".join(l) + try: + try: + key = 'LC_MESSAGES' + hold = os.environ.get(key) + os.environ[key] = 'C' + out = py.process.cmdexec(string) + finally: + if hold: + os.environ[key] = hold + else: + del os.environ[key] + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + strerr = e.err.lower() + if strerr.find('not found') != -1: + raise py.error.ENOENT(self) + elif strerr.find("E200009:") != -1: + raise py.error.ENOENT(self) + if (strerr.find('file exists') != -1 or + strerr.find('file already exists') != -1 or + strerr.find('w150002:') != -1 or + strerr.find("can't create directory") != -1): + raise py.error.EEXIST(strerr) #self) + raise + return out + + def switch(self, url): + """ switch to given URL. """ + self._authsvn('switch', [url]) + + def checkout(self, url=None, rev=None): + """ checkout from url to local wcpath. """ + args = [] + if url is None: + url = self.url + if rev is None or rev == -1: + if (py.std.sys.platform != 'win32' and + _getsvnversion() == '1.3'): + url += "@HEAD" + else: + if _getsvnversion() == '1.3': + url += "@%d" % rev + else: + args.append('-r' + str(rev)) + args.append(url) + self._authsvn('co', args) + + def update(self, rev='HEAD', interactive=True): + """ update working copy item to given revision. (None -> HEAD). """ + opts = ['-r', rev] + if not interactive: + opts.append("--non-interactive") + self._authsvn('up', opts) + + def write(self, content, mode='w'): + """ write content into local filesystem wc. """ + self.localpath.write(content, mode) + + def dirpath(self, *args): + """ return the directory Path of the current Path. """ + return self.__class__(self.localpath.dirpath(*args), auth=self.auth) + + def _ensuredirs(self): + parent = self.dirpath() + if parent.check(dir=0): + parent._ensuredirs() + if self.check(dir=0): + self.mkdir() + return self + + def ensure(self, *args, **kwargs): + """ ensure that an args-joined path exists (by default as + a file). if you specify a keyword argument 'directory=True' + then the path is forced to be a directory path. + """ + p = self.join(*args) + if p.check(): + if p.check(versioned=False): + p.add() + return p + if kwargs.get('dir', 0): + return p._ensuredirs() + parent = p.dirpath() + parent._ensuredirs() + p.write("") + p.add() + return p + + def mkdir(self, *args): + """ create & return the directory joined with args. """ + if args: + return self.join(*args).mkdir() + else: + self._svn('mkdir') + return self + + def add(self): + """ add ourself to svn """ + self._svn('add') + + def remove(self, rec=1, force=1): + """ remove a file or a directory tree. 'rec'ursive is + ignored and considered always true (because of + underlying svn semantics. + """ + assert rec, "svn cannot remove non-recursively" + if not self.check(versioned=True): + # not added to svn (anymore?), just remove + py.path.local(self).remove() + return + flags = [] + if force: + flags.append('--force') + self._svn('remove', *flags) + + def copy(self, target): + """ copy path to target.""" + py.process.cmdexec("svn copy %s %s" %(str(self), str(target))) + + def rename(self, target): + """ rename this path to target. """ + py.process.cmdexec("svn move --force %s %s" %(str(self), str(target))) + + def lock(self): + """ set a lock (exclusive) on the resource """ + out = self._authsvn('lock').strip() + if not out: + # warning or error, raise exception + raise ValueError("unknown error in svn lock command") + + def unlock(self): + """ unset a previously set lock """ + out = self._authsvn('unlock').strip() + if out.startswith('svn:'): + # warning or error, raise exception + raise Exception(out[4:]) + + def cleanup(self): + """ remove any locks from the resource """ + # XXX should be fixed properly!!! + try: + self.unlock() + except: + pass + + def status(self, updates=0, rec=0, externals=0): + """ return (collective) Status object for this file. """ + # http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1 + # 2201 2192 jum test + # XXX + if externals: + raise ValueError("XXX cannot perform status() " + "on external items yet") + else: + #1.2 supports: externals = '--ignore-externals' + externals = '' + if rec: + rec= '' + else: + rec = '--non-recursive' + + # XXX does not work on all subversion versions + #if not externals: + # externals = '--ignore-externals' + + if updates: + updates = '-u' + else: + updates = '' + + try: + cmd = 'status -v --xml --no-ignore %s %s %s' % ( + updates, rec, externals) + out = self._authsvn(cmd) + except py.process.cmdexec.Error: + cmd = 'status -v --no-ignore %s %s %s' % ( + updates, rec, externals) + out = self._authsvn(cmd) + rootstatus = WCStatus(self).fromstring(out, self) + else: + rootstatus = XMLWCStatus(self).fromstring(out, self) + return rootstatus + + def diff(self, rev=None): + """ return a diff of the current path against revision rev (defaulting + to the last one). + """ + args = [] + if rev is not None: + args.append("-r %d" % rev) + out = self._authsvn('diff', args) + return out + + def blame(self): + """ return a list of tuples of three elements: + (revision, commiter, line) + """ + out = self._svn('blame') + result = [] + blamelines = out.splitlines() + reallines = py.path.svnurl(self.url).readlines() + for i, (blameline, line) in enumerate( + zip(blamelines, reallines)): + m = rex_blame.match(blameline) + if not m: + raise ValueError("output line %r of svn blame does not match " + "expected format" % (line, )) + rev, name, _ = m.groups() + result.append((int(rev), name, line)) + return result + + _rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL) + def commit(self, msg='', rec=1): + """ commit with support for non-recursive commits """ + # XXX i guess escaping should be done better here?!? + cmd = 'commit -m "%s" --force-log' % (msg.replace('"', '\\"'),) + if not rec: + cmd += ' -N' + out = self._authsvn(cmd) + try: + del cache.info[self] + except KeyError: + pass + if out: + m = self._rex_commit.match(out) + return int(m.group(1)) + + def propset(self, name, value, *args): + """ set property name to value on this path. """ + d = py.path.local.mkdtemp() + try: + p = d.join('value') + p.write(value) + self._svn('propset', name, '--file', str(p), *args) + finally: + d.remove() + + def propget(self, name): + """ get property name on this path. """ + res = self._svn('propget', name) + return res[:-1] # strip trailing newline + + def propdel(self, name): + """ delete property name on this path. """ + res = self._svn('propdel', name) + return res[:-1] # strip trailing newline + + def proplist(self, rec=0): + """ return a mapping of property names to property values. +If rec is True, then return a dictionary mapping sub-paths to such mappings. +""" + if rec: + res = self._svn('proplist -R') + return make_recursive_propdict(self, res) + else: + res = self._svn('proplist') + lines = res.split('\n') + lines = [x.strip() for x in lines[1:]] + return PropListDict(self, lines) + + def revert(self, rec=0): + """ revert the local changes of this path. if rec is True, do so +recursively. """ + if rec: + result = self._svn('revert -R') + else: + result = self._svn('revert') + return result + + def new(self, **kw): + """ create a modified version of this path. A 'rev' argument + indicates a new revision. + the following keyword arguments modify various path parts: + + http://host.com/repo/path/file.ext + |-----------------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + if kw: + localpath = self.localpath.new(**kw) + else: + localpath = self.localpath + return self.__class__(localpath, auth=self.auth) + + def join(self, *args, **kwargs): + """ return a new Path (with the same revision) which is composed + of the self Path followed by 'args' path components. + """ + if not args: + return self + localpath = self.localpath.join(*args, **kwargs) + return self.__class__(localpath, auth=self.auth) + + def info(self, usecache=1): + """ return an Info structure with svn-provided information. """ + info = usecache and cache.info.get(self) + if not info: + try: + output = self._svn('info') + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + if e.err.find('Path is not a working copy directory') != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find("is not under version control") != -1: + raise py.error.ENOENT(self, e.err) + raise + # XXX SVN 1.3 has output on stderr instead of stdout (while it does + # return 0!), so a bit nasty, but we assume no output is output + # to stderr... + if (output.strip() == '' or + output.lower().find('not a versioned resource') != -1): + raise py.error.ENOENT(self, output) + info = InfoSvnWCCommand(output) + + # Can't reliably compare on Windows without access to win32api + if py.std.sys.platform != 'win32': + if info.path != self.localpath: + raise py.error.ENOENT(self, "not a versioned resource:" + + " %s != %s" % (info.path, self.localpath)) + cache.info[self] = info + return info + + def listdir(self, fil=None, sort=None): + """ return a sequence of Paths. + + listdir will return either a tuple or a list of paths + depending on implementation choices. + """ + if isinstance(fil, str): + fil = common.FNMatcher(fil) + # XXX unify argument naming with LocalPath.listdir + def notsvn(path): + return path.basename != '.svn' + + paths = [] + for localpath in self.localpath.listdir(notsvn): + p = self.__class__(localpath, auth=self.auth) + if notsvn(p) and (not fil or fil(p)): + paths.append(p) + self._sortlist(paths, sort) + return paths + + def open(self, mode='r'): + """ return an opened file with the given mode. """ + return open(self.strpath, mode) + + def _getbyspec(self, spec): + return self.localpath._getbyspec(spec) + + class Checkers(py.path.local.Checkers): + def __init__(self, path): + self.svnwcpath = path + self.path = path.localpath + def versioned(self): + try: + s = self.svnwcpath.info() + except (py.error.ENOENT, py.error.EEXIST): + return False + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + if e.err.find('is not a working copy')!=-1: + return False + if e.err.lower().find('not a versioned resource') != -1: + return False + raise + else: + return True + + def log(self, rev_start=None, rev_end=1, verbose=False): + """ return a list of LogEntry instances for this path. +rev_start is the starting revision (defaulting to the first one). +rev_end is the last revision (defaulting to HEAD). +if verbose is True, then the LogEntry instances also know which files changed. +""" + assert self.check() # make it simpler for the pipe + rev_start = rev_start is None and "HEAD" or rev_start + rev_end = rev_end is None and "HEAD" or rev_end + if rev_start == "HEAD" and rev_end == 1: + rev_opt = "" + else: + rev_opt = "-r %s:%s" % (rev_start, rev_end) + verbose_opt = verbose and "-v" or "" + locale_env = fixlocale() + # some blather on stderr + auth_opt = self._makeauthoptions() + #stdin, stdout, stderr = os.popen3(locale_env + + # 'svn log --xml %s %s %s "%s"' % ( + # rev_opt, verbose_opt, auth_opt, + # self.strpath)) + cmd = locale_env + 'svn log --xml %s %s %s "%s"' % ( + rev_opt, verbose_opt, auth_opt, self.strpath) + + popen = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + ) + stdout, stderr = popen.communicate() + stdout = py.builtin._totext(stdout, sys.getdefaultencoding()) + minidom,ExpatError = importxml() + try: + tree = minidom.parseString(stdout) + except ExpatError: + raise ValueError('no such revision') + result = [] + for logentry in filter(None, tree.firstChild.childNodes): + if logentry.nodeType == logentry.ELEMENT_NODE: + result.append(LogEntry(logentry)) + return result + + def size(self): + """ Return the size of the file content of the Path. """ + return self.info().size + + def mtime(self): + """ Return the last modification time of the file. """ + return self.info().mtime + + def __hash__(self): + return hash((self.strpath, self.__class__, self.auth)) + + +class WCStatus: + attrnames = ('modified','added', 'conflict', 'unchanged', 'external', + 'deleted', 'prop_modified', 'unknown', 'update_available', + 'incomplete', 'kindmismatch', 'ignored', 'locked', 'replaced' + ) + + def __init__(self, wcpath, rev=None, modrev=None, author=None): + self.wcpath = wcpath + self.rev = rev + self.modrev = modrev + self.author = author + + for name in self.attrnames: + setattr(self, name, []) + + def allpath(self, sort=True, **kw): + d = {} + for name in self.attrnames: + if name not in kw or kw[name]: + for path in getattr(self, name): + d[path] = 1 + l = d.keys() + if sort: + l.sort() + return l + + # XXX a bit scary to assume there's always 2 spaces between username and + # path, however with win32 allowing spaces in user names there doesn't + # seem to be a more solid approach :( + _rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(.+?)\s{2,}(.*)') + + def fromstring(data, rootwcpath, rev=None, modrev=None, author=None): + """ return a new WCStatus object from data 's' + """ + rootstatus = WCStatus(rootwcpath, rev, modrev, author) + update_rev = None + for line in data.split('\n'): + if not line.strip(): + continue + #print "processing %r" % line + flags, rest = line[:8], line[8:] + # first column + c0,c1,c2,c3,c4,c5,x6,c7 = flags + #if '*' in line: + # print "flags", repr(flags), "rest", repr(rest) + + if c0 in '?XI': + fn = line.split(None, 1)[1] + if c0 == '?': + wcpath = rootwcpath.join(fn, abs=1) + rootstatus.unknown.append(wcpath) + elif c0 == 'X': + wcpath = rootwcpath.__class__( + rootwcpath.localpath.join(fn, abs=1), + auth=rootwcpath.auth) + rootstatus.external.append(wcpath) + elif c0 == 'I': + wcpath = rootwcpath.join(fn, abs=1) + rootstatus.ignored.append(wcpath) + + continue + + #elif c0 in '~!' or c4 == 'S': + # raise NotImplementedError("received flag %r" % c0) + + m = WCStatus._rex_status.match(rest) + if not m: + if c7 == '*': + fn = rest.strip() + wcpath = rootwcpath.join(fn, abs=1) + rootstatus.update_available.append(wcpath) + continue + if line.lower().find('against revision:')!=-1: + update_rev = int(rest.split(':')[1].strip()) + continue + if line.lower().find('status on external') > -1: + # XXX not sure what to do here... perhaps we want to + # store some state instead of just continuing, as right + # now it makes the top-level external get added twice + # (once as external, once as 'normal' unchanged item) + # because of the way SVN presents external items + continue + # keep trying + raise ValueError("could not parse line %r" % line) + else: + rev, modrev, author, fn = m.groups() + wcpath = rootwcpath.join(fn, abs=1) + #assert wcpath.check() + if c0 == 'M': + assert wcpath.check(file=1), "didn't expect a directory with changed content here" + rootstatus.modified.append(wcpath) + elif c0 == 'A' or c3 == '+' : + rootstatus.added.append(wcpath) + elif c0 == 'D': + rootstatus.deleted.append(wcpath) + elif c0 == 'C': + rootstatus.conflict.append(wcpath) + elif c0 == '~': + rootstatus.kindmismatch.append(wcpath) + elif c0 == '!': + rootstatus.incomplete.append(wcpath) + elif c0 == 'R': + rootstatus.replaced.append(wcpath) + elif not c0.strip(): + rootstatus.unchanged.append(wcpath) + else: + raise NotImplementedError("received flag %r" % c0) + + if c1 == 'M': + rootstatus.prop_modified.append(wcpath) + # XXX do we cover all client versions here? + if c2 == 'L' or c5 == 'K': + rootstatus.locked.append(wcpath) + if c7 == '*': + rootstatus.update_available.append(wcpath) + + if wcpath == rootwcpath: + rootstatus.rev = rev + rootstatus.modrev = modrev + rootstatus.author = author + if update_rev: + rootstatus.update_rev = update_rev + continue + return rootstatus + fromstring = staticmethod(fromstring) + +class XMLWCStatus(WCStatus): + def fromstring(data, rootwcpath, rev=None, modrev=None, author=None): + """ parse 'data' (XML string as outputted by svn st) into a status obj + """ + # XXX for externals, the path is shown twice: once + # with external information, and once with full info as if + # the item was a normal non-external... the current way of + # dealing with this issue is by ignoring it - this does make + # externals appear as external items as well as 'normal', + # unchanged ones in the status object so this is far from ideal + rootstatus = WCStatus(rootwcpath, rev, modrev, author) + update_rev = None + minidom, ExpatError = importxml() + try: + doc = minidom.parseString(data) + except ExpatError: + e = sys.exc_info()[1] + raise ValueError(str(e)) + urevels = doc.getElementsByTagName('against') + if urevels: + rootstatus.update_rev = urevels[-1].getAttribute('revision') + for entryel in doc.getElementsByTagName('entry'): + path = entryel.getAttribute('path') + statusel = entryel.getElementsByTagName('wc-status')[0] + itemstatus = statusel.getAttribute('item') + + if itemstatus == 'unversioned': + wcpath = rootwcpath.join(path, abs=1) + rootstatus.unknown.append(wcpath) + continue + elif itemstatus == 'external': + wcpath = rootwcpath.__class__( + rootwcpath.localpath.join(path, abs=1), + auth=rootwcpath.auth) + rootstatus.external.append(wcpath) + continue + elif itemstatus == 'ignored': + wcpath = rootwcpath.join(path, abs=1) + rootstatus.ignored.append(wcpath) + continue + elif itemstatus == 'incomplete': + wcpath = rootwcpath.join(path, abs=1) + rootstatus.incomplete.append(wcpath) + continue + + rev = statusel.getAttribute('revision') + if itemstatus == 'added' or itemstatus == 'none': + rev = '0' + modrev = '?' + author = '?' + date = '' + elif itemstatus == "replaced": + pass + else: + #print entryel.toxml() + commitel = entryel.getElementsByTagName('commit')[0] + if commitel: + modrev = commitel.getAttribute('revision') + author = '' + author_els = commitel.getElementsByTagName('author') + if author_els: + for c in author_els[0].childNodes: + author += c.nodeValue + date = '' + for c in commitel.getElementsByTagName('date')[0]\ + .childNodes: + date += c.nodeValue + + wcpath = rootwcpath.join(path, abs=1) + + assert itemstatus != 'modified' or wcpath.check(file=1), ( + 'did\'t expect a directory with changed content here') + + itemattrname = { + 'normal': 'unchanged', + 'unversioned': 'unknown', + 'conflicted': 'conflict', + 'none': 'added', + }.get(itemstatus, itemstatus) + + attr = getattr(rootstatus, itemattrname) + attr.append(wcpath) + + propsstatus = statusel.getAttribute('props') + if propsstatus not in ('none', 'normal'): + rootstatus.prop_modified.append(wcpath) + + if wcpath == rootwcpath: + rootstatus.rev = rev + rootstatus.modrev = modrev + rootstatus.author = author + rootstatus.date = date + + # handle repos-status element (remote info) + rstatusels = entryel.getElementsByTagName('repos-status') + if rstatusels: + rstatusel = rstatusels[0] + ritemstatus = rstatusel.getAttribute('item') + if ritemstatus in ('added', 'modified'): + rootstatus.update_available.append(wcpath) + + lockels = entryel.getElementsByTagName('lock') + if len(lockels): + rootstatus.locked.append(wcpath) + + return rootstatus + fromstring = staticmethod(fromstring) + +class InfoSvnWCCommand: + def __init__(self, output): + # Path: test + # URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test + # Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada + # Revision: 2151 + # Node Kind: directory + # Schedule: normal + # Last Changed Author: hpk + # Last Changed Rev: 2100 + # Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003) + # Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003) + + d = {} + for line in output.split('\n'): + if not line.strip(): + continue + key, value = line.split(':', 1) + key = key.lower().replace(' ', '') + value = value.strip() + d[key] = value + try: + self.url = d['url'] + except KeyError: + raise ValueError("Not a versioned resource") + #raise ValueError, "Not a versioned resource %r" % path + self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind'] + try: + self.rev = int(d['revision']) + except KeyError: + self.rev = None + + self.path = py.path.local(d['path']) + self.size = self.path.size() + if 'lastchangedrev' in d: + self.created_rev = int(d['lastchangedrev']) + if 'lastchangedauthor' in d: + self.last_author = d['lastchangedauthor'] + if 'lastchangeddate' in d: + self.mtime = parse_wcinfotime(d['lastchangeddate']) + self.time = self.mtime * 1000000 + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + +def parse_wcinfotime(timestr): + """ Returns seconds since epoch, UTC. """ + # example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003) + m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr) + if not m: + raise ValueError("timestring %r does not match" % timestr) + timestr, timezone = m.groups() + # do not handle timezone specially, return value should be UTC + parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S") + return calendar.timegm(parsedtime) + +def make_recursive_propdict(wcroot, + output, + rex = re.compile("Properties on '(.*)':")): + """ Return a dictionary of path->PropListDict mappings. """ + lines = [x for x in output.split('\n') if x] + pdict = {} + while lines: + line = lines.pop(0) + m = rex.match(line) + if not m: + raise ValueError("could not parse propget-line: %r" % line) + path = m.groups()[0] + wcpath = wcroot.join(path, abs=1) + propnames = [] + while lines and lines[0].startswith(' '): + propname = lines.pop(0).strip() + propnames.append(propname) + assert propnames, "must have found properties!" + pdict[wcpath] = PropListDict(wcpath, propnames) + return pdict + + +def importxml(cache=[]): + if cache: + return cache + from xml.dom import minidom + from xml.parsers.expat import ExpatError + cache.extend([minidom, ExpatError]) + return cache + +class LogEntry: + def __init__(self, logentry): + self.rev = int(logentry.getAttribute('revision')) + for lpart in filter(None, logentry.childNodes): + if lpart.nodeType == lpart.ELEMENT_NODE: + if lpart.nodeName == 'author': + self.author = lpart.firstChild.nodeValue + elif lpart.nodeName == 'msg': + if lpart.firstChild: + self.msg = lpart.firstChild.nodeValue + else: + self.msg = '' + elif lpart.nodeName == 'date': + #2003-07-29T20:05:11.598637Z + timestr = lpart.firstChild.nodeValue + self.date = parse_apr_time(timestr) + elif lpart.nodeName == 'paths': + self.strpaths = [] + for ppart in filter(None, lpart.childNodes): + if ppart.nodeType == ppart.ELEMENT_NODE: + self.strpaths.append(PathEntry(ppart)) + def __repr__(self): + return '' % ( + self.rev, self.author, self.date) + + diff --git a/lib/spack/external/py/_process/__init__.py b/lib/spack/external/py/_process/__init__.py new file mode 100644 index 0000000000..86c714ad1a --- /dev/null +++ b/lib/spack/external/py/_process/__init__.py @@ -0,0 +1 @@ +""" high-level sub-process handling """ diff --git a/lib/spack/external/py/_process/cmdexec.py b/lib/spack/external/py/_process/cmdexec.py new file mode 100644 index 0000000000..f83a249402 --- /dev/null +++ b/lib/spack/external/py/_process/cmdexec.py @@ -0,0 +1,49 @@ +import sys +import subprocess +import py +from subprocess import Popen, PIPE + +def cmdexec(cmd): + """ return unicode output of executing 'cmd' in a separate process. + + raise cmdexec.Error exeception if the command failed. + the exception will provide an 'err' attribute containing + the error-output from the command. + if the subprocess module does not provide a proper encoding/unicode strings + sys.getdefaultencoding() will be used, if that does not exist, 'UTF-8'. + """ + process = subprocess.Popen(cmd, shell=True, + universal_newlines=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = process.communicate() + if sys.version_info[0] < 3: # on py3 we get unicode strings, on py2 not + try: + default_encoding = sys.getdefaultencoding() # jython may not have it + except AttributeError: + default_encoding = sys.stdout.encoding or 'UTF-8' + out = unicode(out, process.stdout.encoding or default_encoding) + err = unicode(err, process.stderr.encoding or default_encoding) + status = process.poll() + if status: + raise ExecutionFailed(status, status, cmd, out, err) + return out + +class ExecutionFailed(py.error.Error): + def __init__(self, status, systemstatus, cmd, out, err): + Exception.__init__(self) + self.status = status + self.systemstatus = systemstatus + self.cmd = cmd + self.err = err + self.out = out + + def __str__(self): + return "ExecutionFailed: %d %s\n%s" %(self.status, self.cmd, self.err) + +# export the exception under the name 'py.process.cmdexec.Error' +cmdexec.Error = ExecutionFailed +try: + ExecutionFailed.__module__ = 'py.process.cmdexec' + ExecutionFailed.__name__ = 'Error' +except (AttributeError, TypeError): + pass diff --git a/lib/spack/external/py/_process/forkedfunc.py b/lib/spack/external/py/_process/forkedfunc.py new file mode 100644 index 0000000000..1c28530688 --- /dev/null +++ b/lib/spack/external/py/_process/forkedfunc.py @@ -0,0 +1,120 @@ + +""" + ForkedFunc provides a way to run a function in a forked process + and get at its return value, stdout and stderr output as well + as signals and exitstatusus. +""" + +import py +import os +import sys +import marshal + + +def get_unbuffered_io(fd, filename): + f = open(str(filename), "w") + if fd != f.fileno(): + os.dup2(f.fileno(), fd) + class AutoFlush: + def write(self, data): + f.write(data) + f.flush() + def __getattr__(self, name): + return getattr(f, name) + return AutoFlush() + + +class ForkedFunc: + EXITSTATUS_EXCEPTION = 3 + + + def __init__(self, fun, args=None, kwargs=None, nice_level=0, + child_on_start=None, child_on_exit=None): + if args is None: + args = [] + if kwargs is None: + kwargs = {} + self.fun = fun + self.args = args + self.kwargs = kwargs + self.tempdir = tempdir = py.path.local.mkdtemp() + self.RETVAL = tempdir.ensure('retval') + self.STDOUT = tempdir.ensure('stdout') + self.STDERR = tempdir.ensure('stderr') + + pid = os.fork() + if pid: # in parent process + self.pid = pid + else: # in child process + self.pid = None + self._child(nice_level, child_on_start, child_on_exit) + + def _child(self, nice_level, child_on_start, child_on_exit): + # right now we need to call a function, but first we need to + # map all IO that might happen + sys.stdout = stdout = get_unbuffered_io(1, self.STDOUT) + sys.stderr = stderr = get_unbuffered_io(2, self.STDERR) + retvalf = self.RETVAL.open("wb") + EXITSTATUS = 0 + try: + if nice_level: + os.nice(nice_level) + try: + if child_on_start is not None: + child_on_start() + retval = self.fun(*self.args, **self.kwargs) + retvalf.write(marshal.dumps(retval)) + if child_on_exit is not None: + child_on_exit() + except: + excinfo = py.code.ExceptionInfo() + stderr.write(str(excinfo._getreprcrash())) + EXITSTATUS = self.EXITSTATUS_EXCEPTION + finally: + stdout.close() + stderr.close() + retvalf.close() + os.close(1) + os.close(2) + os._exit(EXITSTATUS) + + def waitfinish(self, waiter=os.waitpid): + pid, systemstatus = waiter(self.pid, 0) + if systemstatus: + if os.WIFSIGNALED(systemstatus): + exitstatus = os.WTERMSIG(systemstatus) + 128 + else: + exitstatus = os.WEXITSTATUS(systemstatus) + else: + exitstatus = 0 + signal = systemstatus & 0x7f + if not exitstatus and not signal: + retval = self.RETVAL.open('rb') + try: + retval_data = retval.read() + finally: + retval.close() + retval = marshal.loads(retval_data) + else: + retval = None + stdout = self.STDOUT.read() + stderr = self.STDERR.read() + self._removetemp() + return Result(exitstatus, signal, retval, stdout, stderr) + + def _removetemp(self): + if self.tempdir.check(): + self.tempdir.remove() + + def __del__(self): + if self.pid is not None: # only clean up in main process + self._removetemp() + + +class Result(object): + def __init__(self, exitstatus, signal, retval, stdout, stderr): + self.exitstatus = exitstatus + self.signal = signal + self.retval = retval + self.out = stdout + self.err = stderr diff --git a/lib/spack/external/py/_process/killproc.py b/lib/spack/external/py/_process/killproc.py new file mode 100644 index 0000000000..18e8310b5f --- /dev/null +++ b/lib/spack/external/py/_process/killproc.py @@ -0,0 +1,23 @@ +import py +import os, sys + +if sys.platform == "win32" or getattr(os, '_name', '') == 'nt': + try: + import ctypes + except ImportError: + def dokill(pid): + py.process.cmdexec("taskkill /F /PID %d" %(pid,)) + else: + def dokill(pid): + PROCESS_TERMINATE = 1 + handle = ctypes.windll.kernel32.OpenProcess( + PROCESS_TERMINATE, False, pid) + ctypes.windll.kernel32.TerminateProcess(handle, -1) + ctypes.windll.kernel32.CloseHandle(handle) +else: + def dokill(pid): + os.kill(pid, 15) + +def kill(pid): + """ kill process by id. """ + dokill(pid) diff --git a/lib/spack/external/py/_std.py b/lib/spack/external/py/_std.py new file mode 100644 index 0000000000..97a9853323 --- /dev/null +++ b/lib/spack/external/py/_std.py @@ -0,0 +1,18 @@ +import sys + +class Std(object): + """ makes top-level python modules available as an attribute, + importing them on first access. + """ + + def __init__(self): + self.__dict__ = sys.modules + + def __getattr__(self, name): + try: + m = __import__(name) + except ImportError: + raise AttributeError("py.std: could not import %s" % name) + return m + +std = Std() diff --git a/lib/spack/external/py/_xmlgen.py b/lib/spack/external/py/_xmlgen.py new file mode 100644 index 0000000000..1c83545884 --- /dev/null +++ b/lib/spack/external/py/_xmlgen.py @@ -0,0 +1,255 @@ +""" +module for generating and serializing xml and html structures +by using simple python objects. + +(c) holger krekel, holger at merlinux eu. 2009 +""" +import sys, re + +if sys.version_info >= (3,0): + def u(s): + return s + def unicode(x, errors=None): + if hasattr(x, '__unicode__'): + return x.__unicode__() + return str(x) +else: + def u(s): + return unicode(s) + unicode = unicode + + +class NamespaceMetaclass(type): + def __getattr__(self, name): + if name[:1] == '_': + raise AttributeError(name) + if self == Namespace: + raise ValueError("Namespace class is abstract") + tagspec = self.__tagspec__ + if tagspec is not None and name not in tagspec: + raise AttributeError(name) + classattr = {} + if self.__stickyname__: + classattr['xmlname'] = name + cls = type(name, (self.__tagclass__,), classattr) + setattr(self, name, cls) + return cls + +class Tag(list): + class Attr(object): + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + def __init__(self, *args, **kwargs): + super(Tag, self).__init__(args) + self.attr = self.Attr(**kwargs) + + def __unicode__(self): + return self.unicode(indent=0) + __str__ = __unicode__ + + def unicode(self, indent=2): + l = [] + SimpleUnicodeVisitor(l.append, indent).visit(self) + return u("").join(l) + + def __repr__(self): + name = self.__class__.__name__ + return "<%r tag object %d>" % (name, id(self)) + +Namespace = NamespaceMetaclass('Namespace', (object, ), { + '__tagspec__': None, + '__tagclass__': Tag, + '__stickyname__': False, +}) + +class HtmlTag(Tag): + def unicode(self, indent=2): + l = [] + HtmlVisitor(l.append, indent, shortempty=False).visit(self) + return u("").join(l) + +# exported plain html namespace +class html(Namespace): + __tagclass__ = HtmlTag + __stickyname__ = True + __tagspec__ = dict([(x,1) for x in ( + 'a,abbr,acronym,address,applet,area,article,aside,audio,b,' + 'base,basefont,bdi,bdo,big,blink,blockquote,body,br,button,' + 'canvas,caption,center,cite,code,col,colgroup,command,comment,' + 'datalist,dd,del,details,dfn,dir,div,dl,dt,em,embed,' + 'fieldset,figcaption,figure,footer,font,form,frame,frameset,h1,' + 'h2,h3,h4,h5,h6,head,header,hgroup,hr,html,i,iframe,img,input,' + 'ins,isindex,kbd,keygen,label,legend,li,link,listing,map,mark,' + 'marquee,menu,meta,meter,multicol,nav,nobr,noembed,noframes,' + 'noscript,object,ol,optgroup,option,output,p,param,pre,progress,' + 'q,rp,rt,ruby,s,samp,script,section,select,small,source,span,' + 'strike,strong,style,sub,summary,sup,table,tbody,td,textarea,' + 'tfoot,th,thead,time,title,tr,track,tt,u,ul,xmp,var,video,wbr' + ).split(',') if x]) + + class Style(object): + def __init__(self, **kw): + for x, y in kw.items(): + x = x.replace('_', '-') + setattr(self, x, y) + + +class raw(object): + """just a box that can contain a unicode string that will be + included directly in the output""" + def __init__(self, uniobj): + self.uniobj = uniobj + +class SimpleUnicodeVisitor(object): + """ recursive visitor to write unicode. """ + def __init__(self, write, indent=0, curindent=0, shortempty=True): + self.write = write + self.cache = {} + self.visited = {} # for detection of recursion + self.indent = indent + self.curindent = curindent + self.parents = [] + self.shortempty = shortempty # short empty tags or not + + def visit(self, node): + """ dispatcher on node's class/bases name. """ + cls = node.__class__ + try: + visitmethod = self.cache[cls] + except KeyError: + for subclass in cls.__mro__: + visitmethod = getattr(self, subclass.__name__, None) + if visitmethod is not None: + break + else: + visitmethod = self.__object + self.cache[cls] = visitmethod + visitmethod(node) + + # the default fallback handler is marked private + # to avoid clashes with the tag name object + def __object(self, obj): + #self.write(obj) + self.write(escape(unicode(obj))) + + def raw(self, obj): + self.write(obj.uniobj) + + def list(self, obj): + assert id(obj) not in self.visited + self.visited[id(obj)] = 1 + for elem in obj: + self.visit(elem) + + def Tag(self, tag): + assert id(tag) not in self.visited + try: + tag.parent = self.parents[-1] + except IndexError: + tag.parent = None + self.visited[id(tag)] = 1 + tagname = getattr(tag, 'xmlname', tag.__class__.__name__) + if self.curindent and not self._isinline(tagname): + self.write("\n" + u(' ') * self.curindent) + if tag: + self.curindent += self.indent + self.write(u('<%s%s>') % (tagname, self.attributes(tag))) + self.parents.append(tag) + for x in tag: + self.visit(x) + self.parents.pop() + self.write(u('') % tagname) + self.curindent -= self.indent + else: + nameattr = tagname+self.attributes(tag) + if self._issingleton(tagname): + self.write(u('<%s/>') % (nameattr,)) + else: + self.write(u('<%s>') % (nameattr, tagname)) + + def attributes(self, tag): + # serialize attributes + attrlist = dir(tag.attr) + attrlist.sort() + l = [] + for name in attrlist: + res = self.repr_attribute(tag.attr, name) + if res is not None: + l.append(res) + l.extend(self.getstyle(tag)) + return u("").join(l) + + def repr_attribute(self, attrs, name): + if name[:2] != '__': + value = getattr(attrs, name) + if name.endswith('_'): + name = name[:-1] + if isinstance(value, raw): + insert = value.uniobj + else: + insert = escape(unicode(value)) + return ' %s="%s"' % (name, insert) + + def getstyle(self, tag): + """ return attribute list suitable for styling. """ + try: + styledict = tag.style.__dict__ + except AttributeError: + return [] + else: + stylelist = [x+': ' + y for x,y in styledict.items()] + return [u(' style="%s"') % u('; ').join(stylelist)] + + def _issingleton(self, tagname): + """can (and will) be overridden in subclasses""" + return self.shortempty + + def _isinline(self, tagname): + """can (and will) be overridden in subclasses""" + return False + +class HtmlVisitor(SimpleUnicodeVisitor): + + single = dict([(x, 1) for x in + ('br,img,area,param,col,hr,meta,link,base,' + 'input,frame').split(',')]) + inline = dict([(x, 1) for x in + ('a abbr acronym b basefont bdo big br cite code dfn em font ' + 'i img input kbd label q s samp select small span strike ' + 'strong sub sup textarea tt u var'.split(' '))]) + + def repr_attribute(self, attrs, name): + if name == 'class_': + value = getattr(attrs, name) + if value is None: + return + return super(HtmlVisitor, self).repr_attribute(attrs, name) + + def _issingleton(self, tagname): + return tagname in self.single + + def _isinline(self, tagname): + return tagname in self.inline + + +class _escape: + def __init__(self): + self.escape = { + u('"') : u('"'), u('<') : u('<'), u('>') : u('>'), + u('&') : u('&'), u("'") : u('''), + } + self.charef_rex = re.compile(u("|").join(self.escape.keys())) + + def _replacer(self, match): + return self.escape[match.group(0)] + + def __call__(self, ustring): + """ xml-escape the given unicode string. """ + try: + ustring = unicode(ustring) + except UnicodeDecodeError: + ustring = unicode(ustring, 'utf-8', errors='replace') + return self.charef_rex.sub(self._replacer, ustring) + +escape = _escape() diff --git a/lib/spack/external/py/test.py b/lib/spack/external/py/test.py new file mode 100644 index 0000000000..aa5beb1789 --- /dev/null +++ b/lib/spack/external/py/test.py @@ -0,0 +1,10 @@ +import sys +if __name__ == '__main__': + import pytest + sys.exit(pytest.main()) +else: + import sys, pytest + sys.modules['py.test'] = pytest + +# for more API entry points see the 'tests' definition +# in __init__.py -- cgit v1.2.3-60-g2f50