summaryrefslogtreecommitdiff
path: root/lib/spack/external/pytest-fallback/_pytest
diff options
context:
space:
mode:
Diffstat (limited to 'lib/spack/external/pytest-fallback/_pytest')
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/LICENSE21
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/__init__.py8
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/_argcomplete.py106
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/_code/__init__.py10
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/_code/_py2traceback.py85
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/_code/code.py908
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/_code/source.py416
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/_pluggy.py11
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/_version.py4
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/assertion/__init__.py148
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/assertion/rewrite.py952
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/assertion/truncate.py102
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/assertion/util.py310
-rwxr-xr-xlib/spack/external/pytest-fallback/_pytest/cacheprovider.py260
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/capture.py577
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/compat.py326
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/config.py1398
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/debugging.py123
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/deprecated.py42
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/doctest.py362
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/fixtures.py1135
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/freeze_support.py43
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/helpconfig.py184
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/hookspec.py423
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/junitxml.py453
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/main.py838
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/mark.py465
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/monkeypatch.py259
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/nodes.py37
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/nose.py73
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/outcomes.py140
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/pastebin.py100
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/pytester.py1167
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/python.py1173
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/python_api.py629
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/recwarn.py205
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/resultlog.py113
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/runner.py508
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/setuponly.py74
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/setupplan.py25
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/skipping.py372
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/terminal.py650
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/tmpdir.py126
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/unittest.py239
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/README.md13
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/__init__.py0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst11
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER1
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt22
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA40
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD9
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL6
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json1
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt1
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy.py782
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/warnings.py94
56 files changed, 0 insertions, 16580 deletions
diff --git a/lib/spack/external/pytest-fallback/_pytest/LICENSE b/lib/spack/external/pytest-fallback/_pytest/LICENSE
deleted file mode 100644
index 629df45ac4..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2004-2017 Holger Krekel and others
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/lib/spack/external/pytest-fallback/_pytest/__init__.py b/lib/spack/external/pytest-fallback/_pytest/__init__.py
deleted file mode 100644
index 6e41f0504e..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-__all__ = ['__version__']
-
-try:
- from ._version import version as __version__
-except ImportError:
- # broken installation, we don't even try
- # unknown only works because we do poor mans version compare
- __version__ = 'unknown'
diff --git a/lib/spack/external/pytest-fallback/_pytest/_argcomplete.py b/lib/spack/external/pytest-fallback/_pytest/_argcomplete.py
deleted file mode 100644
index 965ec79513..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/_argcomplete.py
+++ /dev/null
@@ -1,106 +0,0 @@
-
-"""allow bash-completion for argparse with argcomplete if installed
-needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
-to find the magic string, so _ARGCOMPLETE env. var is never set, and
-this does not need special code.
-
-argcomplete does not support python 2.5 (although the changes for that
-are minor).
-
-Function try_argcomplete(parser) should be called directly before
-the call to ArgumentParser.parse_args().
-
-The filescompleter is what you normally would use on the positional
-arguments specification, in order to get "dirname/" after "dirn<TAB>"
-instead of the default "dirname ":
-
- optparser.add_argument(Config._file_or_dir, nargs='*'
- ).completer=filescompleter
-
-Other, application specific, completers should go in the file
-doing the add_argument calls as they need to be specified as .completer
-attributes as well. (If argcomplete is not installed, the function the
-attribute points to will not be used).
-
-SPEEDUP
-=======
-The generic argcomplete script for bash-completion
-(/etc/bash_completion.d/python-argcomplete.sh )
-uses a python program to determine startup script generated by pip.
-You can speed up completion somewhat by changing this script to include
- # PYTHON_ARGCOMPLETE_OK
-so the the python-argcomplete-check-easy-install-script does not
-need to be called to find the entry point of the code and see if that is
-marked with PYTHON_ARGCOMPLETE_OK
-
-INSTALL/DEBUGGING
-=================
-To include this support in another application that has setup.py generated
-scripts:
-- add the line:
- # PYTHON_ARGCOMPLETE_OK
- near the top of the main python entry point
-- include in the file calling parse_args():
- from _argcomplete import try_argcomplete, filescompleter
- , call try_argcomplete just before parse_args(), and optionally add
- filescompleter to the positional arguments' add_argument()
-If things do not work right away:
-- switch on argcomplete debugging with (also helpful when doing custom
- completers):
- export _ARC_DEBUG=1
-- run:
- python-argcomplete-check-easy-install-script $(which appname)
- echo $?
- will echo 0 if the magic line has been found, 1 if not
-- sometimes it helps to find early on errors using:
- _ARGCOMPLETE=1 _ARC_DEBUG=1 appname
- which should throw a KeyError: 'COMPLINE' (which is properly set by the
- global argcomplete script).
-"""
-from __future__ import absolute_import, division, print_function
-import sys
-import os
-from glob import glob
-
-
-class FastFilesCompleter:
- 'Fast file completer class'
-
- def __init__(self, directories=True):
- self.directories = directories
-
- def __call__(self, prefix, **kwargs):
- """only called on non option completions"""
- if os.path.sep in prefix[1:]:
- prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
- else:
- prefix_dir = 0
- completion = []
- globbed = []
- if '*' not in prefix and '?' not in prefix:
- # we are on unix, otherwise no bash
- if not prefix or prefix[-1] == os.path.sep:
- globbed.extend(glob(prefix + '.*'))
- prefix += '*'
- globbed.extend(glob(prefix))
- for x in sorted(globbed):
- if os.path.isdir(x):
- x += '/'
- # append stripping the prefix (like bash, not like compgen)
- completion.append(x[prefix_dir:])
- return completion
-
-
-if os.environ.get('_ARGCOMPLETE'):
- try:
- import argcomplete.completers
- except ImportError:
- sys.exit(-1)
- filescompleter = FastFilesCompleter()
-
- def try_argcomplete(parser):
- argcomplete.autocomplete(parser, always_complete_options=False)
-else:
- def try_argcomplete(parser):
- pass
- filescompleter = None
diff --git a/lib/spack/external/pytest-fallback/_pytest/_code/__init__.py b/lib/spack/external/pytest-fallback/_pytest/_code/__init__.py
deleted file mode 100644
index 815c13b42c..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/_code/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-""" python inspection/code generation API """
-from __future__ import absolute_import, division, print_function
-from .code import Code # noqa
-from .code import ExceptionInfo # noqa
-from .code import Frame # noqa
-from .code import Traceback # noqa
-from .code import getrawcode # noqa
-from .source import Source # noqa
-from .source import compile_ as compile # noqa
-from .source import getfslineno # noqa
diff --git a/lib/spack/external/pytest-fallback/_pytest/_code/_py2traceback.py b/lib/spack/external/pytest-fallback/_pytest/_code/_py2traceback.py
deleted file mode 100644
index 5aacf0a428..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/_code/_py2traceback.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# copied from python-2.7.3's traceback.py
-# CHANGES:
-# - some_str is replaced, trying to create unicode strings
-#
-from __future__ import absolute_import, division, print_function
-import types
-
-
-def format_exception_only(etype, value):
- """Format the exception part of a traceback.
-
- The arguments are the exception type and value such as given by
- sys.last_type and sys.last_value. The return value is a list of
- strings, each ending in a newline.
-
- Normally, the list contains a single string; however, for
- SyntaxError exceptions, it contains several lines that (when
- printed) display detailed information about where the syntax
- error occurred.
-
- The message indicating which exception occurred is always the last
- string in the list.
-
- """
-
- # An instance should not have a meaningful value parameter, but
- # sometimes does, particularly for string exceptions, such as
- # >>> raise string1, string2 # deprecated
- #
- # Clear these out first because issubtype(string1, SyntaxError)
- # would throw another exception and mask the original problem.
- if (isinstance(etype, BaseException) or
- isinstance(etype, types.InstanceType) or
- etype is None or type(etype) is str):
- return [_format_final_exc_line(etype, value)]
-
- stype = etype.__name__
-
- if not issubclass(etype, SyntaxError):
- return [_format_final_exc_line(stype, value)]
-
- # It was a syntax error; show exactly where the problem was found.
- lines = []
- try:
- msg, (filename, lineno, offset, badline) = value.args
- except Exception:
- pass
- else:
- filename = filename or "<string>"
- lines.append(' File "%s", line %d\n' % (filename, lineno))
- if badline is not None:
- if isinstance(badline, bytes): # python 2 only
- badline = badline.decode('utf-8', 'replace')
- lines.append(u' %s\n' % badline.strip())
- if offset is not None:
- caretspace = badline.rstrip('\n')[:offset].lstrip()
- # non-space whitespace (likes tabs) must be kept for alignment
- caretspace = ((c.isspace() and c or ' ') for c in caretspace)
- # only three spaces to account for offset1 == pos 0
- lines.append(' %s^\n' % ''.join(caretspace))
- value = msg
-
- lines.append(_format_final_exc_line(stype, value))
- return lines
-
-
-def _format_final_exc_line(etype, value):
- """Return a list of a single line -- normal case for format_exception_only"""
- valuestr = _some_str(value)
- if value is None or not valuestr:
- line = "%s\n" % etype
- else:
- line = "%s: %s\n" % (etype, valuestr)
- return line
-
-
-def _some_str(value):
- try:
- return unicode(value)
- except Exception:
- try:
- return str(value)
- except Exception:
- pass
- return '<unprintable %s object>' % type(value).__name__
diff --git a/lib/spack/external/pytest-fallback/_pytest/_code/code.py b/lib/spack/external/pytest-fallback/_pytest/_code/code.py
deleted file mode 100644
index f3b7eedfce..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/_code/code.py
+++ /dev/null
@@ -1,908 +0,0 @@
-from __future__ import absolute_import, division, print_function
-import sys
-from inspect import CO_VARARGS, CO_VARKEYWORDS
-import re
-from weakref import ref
-from _pytest.compat import _PY2, _PY3, PY35, safe_str
-
-import py
-builtin_repr = repr
-
-reprlib = py.builtin._tryimport('repr', 'reprlib')
-
-if _PY3:
- from traceback import format_exception_only
-else:
- from ._py2traceback import format_exception_only
-
-
-class Code(object):
- """ wrapper around Python code objects """
-
- def __init__(self, rawcode):
- if not hasattr(rawcode, "co_filename"):
- rawcode = getrawcode(rawcode)
- try:
- self.filename = rawcode.co_filename
- self.firstlineno = rawcode.co_firstlineno - 1
- self.name = rawcode.co_name
- except AttributeError:
- raise TypeError("not a code object: %r" % (rawcode,))
- self.raw = rawcode
-
- def __eq__(self, other):
- return self.raw == other.raw
-
- __hash__ = None
-
- def __ne__(self, other):
- return not self == other
-
- @property
- def path(self):
- """ return a path object pointing to source code (note that it
- might not point to an actually existing file). """
- try:
- p = py.path.local(self.raw.co_filename)
- # maybe don't try this checking
- if not p.check():
- raise OSError("py.path check failed.")
- except OSError:
- # XXX maybe try harder like the weird logic
- # in the standard lib [linecache.updatecache] does?
- p = self.raw.co_filename
-
- return p
-
- @property
- def fullsource(self):
- """ return a _pytest._code.Source object for the full source file of the code
- """
- from _pytest._code import source
- full, _ = source.findsource(self.raw)
- return full
-
- def source(self):
- """ return a _pytest._code.Source object for the code object's source only
- """
- # return source only for that part of code
- import _pytest._code
- return _pytest._code.Source(self.raw)
-
- def getargs(self, var=False):
- """ return a tuple with the argument names for the code object
-
- if 'var' is set True also return the names of the variable and
- keyword arguments when present
- """
- # handfull shortcut for getting args
- raw = self.raw
- argcount = raw.co_argcount
- if var:
- argcount += raw.co_flags & CO_VARARGS
- argcount += raw.co_flags & CO_VARKEYWORDS
- return raw.co_varnames[:argcount]
-
-
-class Frame(object):
- """Wrapper around a Python frame holding f_locals and f_globals
- in which expressions can be evaluated."""
-
- def __init__(self, frame):
- self.lineno = frame.f_lineno - 1
- self.f_globals = frame.f_globals
- self.f_locals = frame.f_locals
- self.raw = frame
- self.code = Code(frame.f_code)
-
- @property
- def statement(self):
- """ statement this frame is at """
- import _pytest._code
- if self.code.fullsource is None:
- return _pytest._code.Source("")
- return self.code.fullsource.getstatement(self.lineno)
-
- def eval(self, code, **vars):
- """ evaluate 'code' in the frame
-
- 'vars' are optional additional local variables
-
- returns the result of the evaluation
- """
- f_locals = self.f_locals.copy()
- f_locals.update(vars)
- return eval(code, self.f_globals, f_locals)
-
- def exec_(self, code, **vars):
- """ exec 'code' in the frame
-
- 'vars' are optiona; additional local variables
- """
- f_locals = self.f_locals.copy()
- f_locals.update(vars)
- py.builtin.exec_(code, self.f_globals, f_locals)
-
- def repr(self, object):
- """ return a 'safe' (non-recursive, one-line) string repr for 'object'
- """
- return py.io.saferepr(object)
-
- def is_true(self, object):
- return object
-
- def getargs(self, var=False):
- """ return a list of tuples (name, value) for all arguments
-
- if 'var' is set True also include the variable and keyword
- arguments when present
- """
- retval = []
- for arg in self.code.getargs(var):
- try:
- retval.append((arg, self.f_locals[arg]))
- except KeyError:
- pass # this can occur when using Psyco
- return retval
-
-
-class TracebackEntry(object):
- """ a single entry in a traceback """
-
- _repr_style = None
- exprinfo = None
-
- def __init__(self, rawentry, excinfo=None):
- self._excinfo = excinfo
- self._rawentry = rawentry
- self.lineno = rawentry.tb_lineno - 1
-
- def set_repr_style(self, mode):
- assert mode in ("short", "long")
- self._repr_style = mode
-
- @property
- def frame(self):
- import _pytest._code
- return _pytest._code.Frame(self._rawentry.tb_frame)
-
- @property
- def relline(self):
- return self.lineno - self.frame.code.firstlineno
-
- def __repr__(self):
- return "<TracebackEntry %s:%d>" % (self.frame.code.path, self.lineno + 1)
-
- @property
- def statement(self):
- """ _pytest._code.Source object for the current statement """
- source = self.frame.code.fullsource
- return source.getstatement(self.lineno)
-
- @property
- def path(self):
- """ path to the source code """
- return self.frame.code.path
-
- def getlocals(self):
- return self.frame.f_locals
- locals = property(getlocals, None, None, "locals of underlaying frame")
-
- def getfirstlinesource(self):
- # on Jython this firstlineno can be -1 apparently
- return max(self.frame.code.firstlineno, 0)
-
- def getsource(self, astcache=None):
- """ return failing source code. """
- # we use the passed in astcache to not reparse asttrees
- # within exception info printing
- from _pytest._code.source import getstatementrange_ast
- source = self.frame.code.fullsource
- if source is None:
- return None
- key = astnode = None
- if astcache is not None:
- key = self.frame.code.path
- if key is not None:
- astnode = astcache.get(key, None)
- start = self.getfirstlinesource()
- try:
- astnode, _, end = getstatementrange_ast(self.lineno, source,
- astnode=astnode)
- except SyntaxError:
- end = self.lineno + 1
- else:
- if key is not None:
- astcache[key] = astnode
- return source[start:end]
-
- source = property(getsource)
-
- def ishidden(self):
- """ return True if the current frame has a var __tracebackhide__
- resolving to True
-
- If __tracebackhide__ is a callable, it gets called with the
- ExceptionInfo instance and can decide whether to hide the traceback.
-
- mostly for internal use
- """
- try:
- tbh = self.frame.f_locals['__tracebackhide__']
- except KeyError:
- try:
- tbh = self.frame.f_globals['__tracebackhide__']
- except KeyError:
- return False
-
- if py.builtin.callable(tbh):
- return tbh(None if self._excinfo is None else self._excinfo())
- else:
- return tbh
-
- def __str__(self):
- try:
- fn = str(self.path)
- except py.error.Error:
- fn = '???'
- name = self.frame.code.name
- try:
- line = str(self.statement).lstrip()
- except KeyboardInterrupt:
- raise
- except: # noqa
- line = "???"
- return " File %r:%d in %s\n %s\n" % (fn, self.lineno + 1, name, line)
-
- def name(self):
- return self.frame.code.raw.co_name
- name = property(name, None, None, "co_name of underlaying code")
-
-
-class Traceback(list):
- """ Traceback objects encapsulate and offer higher level
- access to Traceback entries.
- """
- Entry = TracebackEntry
-
- def __init__(self, tb, excinfo=None):
- """ initialize from given python traceback object and ExceptionInfo """
- self._excinfo = excinfo
- if hasattr(tb, 'tb_next'):
- def f(cur):
- while cur is not None:
- yield self.Entry(cur, excinfo=excinfo)
- cur = cur.tb_next
- list.__init__(self, f(tb))
- else:
- list.__init__(self, tb)
-
- def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
- """ return a Traceback instance wrapping part of this Traceback
-
- by provding any combination of path, lineno and firstlineno, the
- first frame to start the to-be-returned traceback is determined
-
- this allows cutting the first part of a Traceback instance e.g.
- for formatting reasons (removing some uninteresting bits that deal
- with handling of the exception/traceback)
- """
- for x in self:
- code = x.frame.code
- codepath = code.path
- if ((path is None or codepath == path) and
- (excludepath is None or not hasattr(codepath, 'relto') or
- not codepath.relto(excludepath)) and
- (lineno is None or x.lineno == lineno) and
- (firstlineno is None or x.frame.code.firstlineno == firstlineno)):
- return Traceback(x._rawentry, self._excinfo)
- return self
-
- def __getitem__(self, key):
- val = super(Traceback, self).__getitem__(key)
- if isinstance(key, type(slice(0))):
- val = self.__class__(val)
- return val
-
- def filter(self, fn=lambda x: not x.ishidden()):
- """ return a Traceback instance with certain items removed
-
- fn is a function that gets a single argument, a TracebackEntry
- instance, and should return True when the item should be added
- to the Traceback, False when not
-
- by default this removes all the TracebackEntries which are hidden
- (see ishidden() above)
- """
- return Traceback(filter(fn, self), self._excinfo)
-
- def getcrashentry(self):
- """ return last non-hidden traceback entry that lead
- to the exception of a traceback.
- """
- for i in range(-1, -len(self) - 1, -1):
- entry = self[i]
- if not entry.ishidden():
- return entry
- return self[-1]
-
- def recursionindex(self):
- """ return the index of the frame/TracebackEntry where recursion
- originates if appropriate, None if no recursion occurred
- """
- cache = {}
- for i, entry in enumerate(self):
- # id for the code.raw is needed to work around
- # the strange metaprogramming in the decorator lib from pypi
- # which generates code objects that have hash/value equality
- # XXX needs a test
- key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
- # print "checking for recursion at", key
- values = cache.setdefault(key, [])
- if values:
- f = entry.frame
- loc = f.f_locals
- for otherloc in values:
- if f.is_true(f.eval(co_equal,
- __recursioncache_locals_1=loc,
- __recursioncache_locals_2=otherloc)):
- return i
- values.append(entry.frame.f_locals)
- return None
-
-
-co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
- '?', 'eval')
-
-
-class ExceptionInfo(object):
- """ wraps sys.exc_info() objects and offers
- help for navigating the traceback.
- """
- _striptext = ''
- _assert_start_repr = "AssertionError(u\'assert " if _PY2 else "AssertionError(\'assert "
-
- def __init__(self, tup=None, exprinfo=None):
- import _pytest._code
- if tup is None:
- tup = sys.exc_info()
- if exprinfo is None and isinstance(tup[1], AssertionError):
- exprinfo = getattr(tup[1], 'msg', None)
- if exprinfo is None:
- exprinfo = py.io.saferepr(tup[1])
- if exprinfo and exprinfo.startswith(self._assert_start_repr):
- self._striptext = 'AssertionError: '
- self._excinfo = tup
- #: the exception class
- self.type = tup[0]
- #: the exception instance
- self.value = tup[1]
- #: the exception raw traceback
- self.tb = tup[2]
- #: the exception type name
- self.typename = self.type.__name__
- #: the exception traceback (_pytest._code.Traceback instance)
- self.traceback = _pytest._code.Traceback(self.tb, excinfo=ref(self))
-
- def __repr__(self):
- return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
-
- def exconly(self, tryshort=False):
- """ return the exception as a string
-
- when 'tryshort' resolves to True, and the exception is a
- _pytest._code._AssertionError, only the actual exception part of
- the exception representation is returned (so 'AssertionError: ' is
- removed from the beginning)
- """
- lines = format_exception_only(self.type, self.value)
- text = ''.join(lines)
- text = text.rstrip()
- if tryshort:
- if text.startswith(self._striptext):
- text = text[len(self._striptext):]
- return text
-
- def errisinstance(self, exc):
- """ return True if the exception is an instance of exc """
- return isinstance(self.value, exc)
-
- def _getreprcrash(self):
- exconly = self.exconly(tryshort=True)
- entry = self.traceback.getcrashentry()
- path, lineno = entry.frame.code.raw.co_filename, entry.lineno
- return ReprFileLocation(path, lineno + 1, exconly)
-
- def getrepr(self, showlocals=False, style="long",
- abspath=False, tbfilter=True, funcargs=False):
- """ return str()able representation of this exception info.
- showlocals: show locals per traceback entry
- style: long|short|no|native traceback style
- tbfilter: hide entries (where __tracebackhide__ is true)
-
- in case of style==native, tbfilter and showlocals is ignored.
- """
- if style == 'native':
- return ReprExceptionInfo(ReprTracebackNative(
- py.std.traceback.format_exception(
- self.type,
- self.value,
- self.traceback[0]._rawentry,
- )), self._getreprcrash())
-
- fmt = FormattedExcinfo(showlocals=showlocals, style=style,
- abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
- return fmt.repr_excinfo(self)
-
- def __str__(self):
- entry = self.traceback[-1]
- loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
- return str(loc)
-
- def __unicode__(self):
- entry = self.traceback[-1]
- loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
- return unicode(loc)
-
- def match(self, regexp):
- """
- Match the regular expression 'regexp' on the string representation of
- the exception. If it matches then True is returned (so that it is
- possible to write 'assert excinfo.match()'). If it doesn't match an
- AssertionError is raised.
- """
- __tracebackhide__ = True
- if not re.search(regexp, str(self.value)):
- assert 0, "Pattern '{0!s}' not found in '{1!s}'".format(
- regexp, self.value)
- return True
-
-
-class FormattedExcinfo(object):
- """ presenting information about failing Functions and Generators. """
- # for traceback entries
- flow_marker = ">"
- fail_marker = "E"
-
- def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
- self.showlocals = showlocals
- self.style = style
- self.tbfilter = tbfilter
- self.funcargs = funcargs
- self.abspath = abspath
- self.astcache = {}
-
- def _getindent(self, source):
- # figure out indent for given source
- try:
- s = str(source.getstatement(len(source) - 1))
- except KeyboardInterrupt:
- raise
- except: # noqa
- try:
- s = str(source[-1])
- except KeyboardInterrupt:
- raise
- except: # noqa
- return 0
- return 4 + (len(s) - len(s.lstrip()))
-
- def _getentrysource(self, entry):
- source = entry.getsource(self.astcache)
- if source is not None:
- source = source.deindent()
- return source
-
- def _saferepr(self, obj):
- return py.io.saferepr(obj)
-
- def repr_args(self, entry):
- if self.funcargs:
- args = []
- for argname, argvalue in entry.frame.getargs(var=True):
- args.append((argname, self._saferepr(argvalue)))
- return ReprFuncArgs(args)
-
- def get_source(self, source, line_index=-1, excinfo=None, short=False):
- """ return formatted and marked up source lines. """
- import _pytest._code
- lines = []
- if source is None or line_index >= len(source.lines):
- source = _pytest._code.Source("???")
- line_index = 0
- if line_index < 0:
- line_index += len(source)
- space_prefix = " "
- if short:
- lines.append(space_prefix + source.lines[line_index].strip())
- else:
- for line in source.lines[:line_index]:
- lines.append(space_prefix + line)
- lines.append(self.flow_marker + " " + source.lines[line_index])
- for line in source.lines[line_index + 1:]:
- lines.append(space_prefix + line)
- if excinfo is not None:
- indent = 4 if short else self._getindent(source)
- lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
- return lines
-
- def get_exconly(self, excinfo, indent=4, markall=False):
- lines = []
- indent = " " * indent
- # get the real exception information out
- exlines = excinfo.exconly(tryshort=True).split('\n')
- failindent = self.fail_marker + indent[1:]
- for line in exlines:
- lines.append(failindent + line)
- if not markall:
- failindent = indent
- return lines
-
- def repr_locals(self, locals):
- if self.showlocals:
- lines = []
- keys = [loc for loc in locals if loc[0] != "@"]
- keys.sort()
- for name in keys:
- value = locals[name]
- if name == '__builtins__':
- lines.append("__builtins__ = <builtins>")
- else:
- # This formatting could all be handled by the
- # _repr() function, which is only reprlib.Repr in
- # disguise, so is very configurable.
- str_repr = self._saferepr(value)
- # if len(str_repr) < 70 or not isinstance(value,
- # (list, tuple, dict)):
- lines.append("%-10s = %s" % (name, str_repr))
- # else:
- # self._line("%-10s =\\" % (name,))
- # # XXX
- # py.std.pprint.pprint(value, stream=self.excinfowriter)
- return ReprLocals(lines)
-
- def repr_traceback_entry(self, entry, excinfo=None):
- import _pytest._code
- source = self._getentrysource(entry)
- if source is None:
- source = _pytest._code.Source("???")
- line_index = 0
- else:
- # entry.getfirstlinesource() can be -1, should be 0 on jython
- line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
-
- lines = []
- style = entry._repr_style
- if style is None:
- style = self.style
- if style in ("short", "long"):
- short = style == "short"
- reprargs = self.repr_args(entry) if not short else None
- s = self.get_source(source, line_index, excinfo, short=short)
- lines.extend(s)
- if short:
- message = "in %s" % (entry.name)
- else:
- message = excinfo and excinfo.typename or ""
- path = self._makepath(entry.path)
- filelocrepr = ReprFileLocation(path, entry.lineno + 1, message)
- localsrepr = None
- if not short:
- localsrepr = self.repr_locals(entry.locals)
- return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
- if excinfo:
- lines.extend(self.get_exconly(excinfo, indent=4))
- return ReprEntry(lines, None, None, None, style)
-
- def _makepath(self, path):
- if not self.abspath:
- try:
- np = py.path.local().bestrelpath(path)
- except OSError:
- return path
- if len(np) < len(str(path)):
- path = np
- return path
-
- def repr_traceback(self, excinfo):
- traceback = excinfo.traceback
- if self.tbfilter:
- traceback = traceback.filter()
-
- if is_recursion_error(excinfo):
- traceback, extraline = self._truncate_recursive_traceback(traceback)
- else:
- extraline = None
-
- last = traceback[-1]
- entries = []
- for index, entry in enumerate(traceback):
- einfo = (last == entry) and excinfo or None
- reprentry = self.repr_traceback_entry(entry, einfo)
- entries.append(reprentry)
- return ReprTraceback(entries, extraline, style=self.style)
-
- def _truncate_recursive_traceback(self, traceback):
- """
- Truncate the given recursive traceback trying to find the starting point
- of the recursion.
-
- The detection is done by going through each traceback entry and finding the
- point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``.
-
- Handle the situation where the recursion process might raise an exception (for example
- comparing numpy arrays using equality raises a TypeError), in which case we do our best to
- warn the user of the error and show a limited traceback.
- """
- try:
- recursionindex = traceback.recursionindex()
- except Exception as e:
- max_frames = 10
- extraline = (
- '!!! Recursion error detected, but an error occurred locating the origin of recursion.\n'
- ' The following exception happened when comparing locals in the stack frame:\n'
- ' {exc_type}: {exc_msg}\n'
- ' Displaying first and last {max_frames} stack frames out of {total}.'
- ).format(exc_type=type(e).__name__, exc_msg=safe_str(e), max_frames=max_frames, total=len(traceback))
- traceback = traceback[:max_frames] + traceback[-max_frames:]
- else:
- if recursionindex is not None:
- extraline = "!!! Recursion detected (same locals & position)"
- traceback = traceback[:recursionindex + 1]
- else:
- extraline = None
-
- return traceback, extraline
-
- def repr_excinfo(self, excinfo):
- if _PY2:
- reprtraceback = self.repr_traceback(excinfo)
- reprcrash = excinfo._getreprcrash()
-
- return ReprExceptionInfo(reprtraceback, reprcrash)
- else:
- repr_chain = []
- e = excinfo.value
- descr = None
- while e is not None:
- if excinfo:
- reprtraceback = self.repr_traceback(excinfo)
- reprcrash = excinfo._getreprcrash()
- else:
- # fallback to native repr if the exception doesn't have a traceback:
- # ExceptionInfo objects require a full traceback to work
- reprtraceback = ReprTracebackNative(py.std.traceback.format_exception(type(e), e, None))
- reprcrash = None
-
- repr_chain += [(reprtraceback, reprcrash, descr)]
- if e.__cause__ is not None:
- e = e.__cause__
- excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None
- descr = 'The above exception was the direct cause of the following exception:'
- elif (e.__context__ is not None and not e.__suppress_context__):
- e = e.__context__
- excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None
- descr = 'During handling of the above exception, another exception occurred:'
- else:
- e = None
- repr_chain.reverse()
- return ExceptionChainRepr(repr_chain)
-
-
-class TerminalRepr(object):
- def __str__(self):
- s = self.__unicode__()
- if _PY2:
- s = s.encode('utf-8')
- return s
-
- def __unicode__(self):
- # FYI this is called from pytest-xdist's serialization of exception
- # information.
- io = py.io.TextIO()
- tw = py.io.TerminalWriter(file=io)
- self.toterminal(tw)
- return io.getvalue().strip()
-
- def __repr__(self):
- return "<%s instance at %0x>" % (self.__class__, id(self))
-
-
-class ExceptionRepr(TerminalRepr):
- def __init__(self):
- self.sections = []
-
- def addsection(self, name, content, sep="-"):
- self.sections.append((name, content, sep))
-
- def toterminal(self, tw):
- for name, content, sep in self.sections:
- tw.sep(sep, name)
- tw.line(content)
-
-
-class ExceptionChainRepr(ExceptionRepr):
- def __init__(self, chain):
- super(ExceptionChainRepr, self).__init__()
- self.chain = chain
- # reprcrash and reprtraceback of the outermost (the newest) exception
- # in the chain
- self.reprtraceback = chain[-1][0]
- self.reprcrash = chain[-1][1]
-
- def toterminal(self, tw):
- for element in self.chain:
- element[0].toterminal(tw)
- if element[2] is not None:
- tw.line("")
- tw.line(element[2], yellow=True)
- super(ExceptionChainRepr, self).toterminal(tw)
-
-
-class ReprExceptionInfo(ExceptionRepr):
- def __init__(self, reprtraceback, reprcrash):
- super(ReprExceptionInfo, self).__init__()
- self.reprtraceback = reprtraceback
- self.reprcrash = reprcrash
-
- def toterminal(self, tw):
- self.reprtraceback.toterminal(tw)
- super(ReprExceptionInfo, self).toterminal(tw)
-
-
-class ReprTraceback(TerminalRepr):
- entrysep = "_ "
-
- def __init__(self, reprentries, extraline, style):
- self.reprentries = reprentries
- self.extraline = extraline
- self.style = style
-
- def toterminal(self, tw):
- # the entries might have different styles
- for i, entry in enumerate(self.reprentries):
- if entry.style == "long":
- tw.line("")
- entry.toterminal(tw)
- if i < len(self.reprentries) - 1:
- next_entry = self.reprentries[i + 1]
- if entry.style == "long" or \
- entry.style == "short" and next_entry.style == "long":
- tw.sep(self.entrysep)
-
- if self.extraline:
- tw.line(self.extraline)
-
-
-class ReprTracebackNative(ReprTraceback):
- def __init__(self, tblines):
- self.style = "native"
- self.reprentries = [ReprEntryNative(tblines)]
- self.extraline = None
-
-
-class ReprEntryNative(TerminalRepr):
- style = "native"
-
- def __init__(self, tblines):
- self.lines = tblines
-
- def toterminal(self, tw):
- tw.write("".join(self.lines))
-
-
-class ReprEntry(TerminalRepr):
- localssep = "_ "
-
- def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
- self.lines = lines
- self.reprfuncargs = reprfuncargs
- self.reprlocals = reprlocals
- self.reprfileloc = filelocrepr
- self.style = style
-
- def toterminal(self, tw):
- if self.style == "short":
- self.reprfileloc.toterminal(tw)
- for line in self.lines:
- red = line.startswith("E ")
- tw.line(line, bold=True, red=red)
- # tw.line("")
- return
- if self.reprfuncargs:
- self.reprfuncargs.toterminal(tw)
- for line in self.lines:
- red = line.startswith("E ")
- tw.line(line, bold=True, red=red)
- if self.reprlocals:
- # tw.sep(self.localssep, "Locals")
- tw.line("")
- self.reprlocals.toterminal(tw)
- if self.reprfileloc:
- if self.lines:
- tw.line("")
- self.reprfileloc.toterminal(tw)
-
- def __str__(self):
- return "%s\n%s\n%s" % ("\n".join(self.lines),
- self.reprlocals,
- self.reprfileloc)
-
-
-class ReprFileLocation(TerminalRepr):
- def __init__(self, path, lineno, message):
- self.path = str(path)
- self.lineno = lineno
- self.message = message
-
- def toterminal(self, tw):
- # filename and lineno output for each entry,
- # using an output format that most editors unterstand
- msg = self.message
- i = msg.find("\n")
- if i != -1:
- msg = msg[:i]
- tw.write(self.path, bold=True, red=True)
- tw.line(":%s: %s" % (self.lineno, msg))
-
-
-class ReprLocals(TerminalRepr):
- def __init__(self, lines):
- self.lines = lines
-
- def toterminal(self, tw):
- for line in self.lines:
- tw.line(line)
-
-
-class ReprFuncArgs(TerminalRepr):
- def __init__(self, args):
- self.args = args
-
- def toterminal(self, tw):
- if self.args:
- linesofar = ""
- for name, value in self.args:
- ns = "%s = %s" % (safe_str(name), safe_str(value))
- if len(ns) + len(linesofar) + 2 > tw.fullwidth:
- if linesofar:
- tw.line(linesofar)
- linesofar = ns
- else:
- if linesofar:
- linesofar += ", " + ns
- else:
- linesofar = ns
- if linesofar:
- tw.line(linesofar)
- tw.line("")
-
-
-def getrawcode(obj, trycall=True):
- """ return code object for given function. """
- try:
- return obj.__code__
- except AttributeError:
- obj = getattr(obj, 'im_func', obj)
- obj = getattr(obj, 'func_code', obj)
- obj = getattr(obj, 'f_code', obj)
- obj = getattr(obj, '__code__', obj)
- if trycall and not hasattr(obj, 'co_firstlineno'):
- if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
- x = getrawcode(obj.__call__, trycall=False)
- if hasattr(x, 'co_firstlineno'):
- return x
- return obj
-
-
-if PY35: # RecursionError introduced in 3.5
- def is_recursion_error(excinfo):
- return excinfo.errisinstance(RecursionError) # noqa
-else:
- def is_recursion_error(excinfo):
- if not excinfo.errisinstance(RuntimeError):
- return False
- try:
- return "maximum recursion depth exceeded" in str(excinfo.value)
- except UnicodeError:
- return False
diff --git a/lib/spack/external/pytest-fallback/_pytest/_code/source.py b/lib/spack/external/pytest-fallback/_pytest/_code/source.py
deleted file mode 100644
index fc41712649..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/_code/source.py
+++ /dev/null
@@ -1,416 +0,0 @@
-from __future__ import absolute_import, division, generators, print_function
-
-from bisect import bisect_right
-import sys
-import inspect
-import tokenize
-import py
-cpy_compile = compile
-
-try:
- import _ast
- from _ast import PyCF_ONLY_AST as _AST_FLAG
-except ImportError:
- _AST_FLAG = 0
- _ast = None
-
-
-class Source(object):
- """ a immutable object holding a source code fragment,
- possibly deindenting it.
- """
- _compilecounter = 0
-
- def __init__(self, *parts, **kwargs):
- self.lines = lines = []
- de = kwargs.get('deindent', True)
- rstrip = kwargs.get('rstrip', True)
- for part in parts:
- if not part:
- partlines = []
- if isinstance(part, Source):
- partlines = part.lines
- elif isinstance(part, (tuple, list)):
- partlines = [x.rstrip("\n") for x in part]
- elif isinstance(part, py.builtin._basestring):
- partlines = part.split('\n')
- if rstrip:
- while partlines:
- if partlines[-1].strip():
- break
- partlines.pop()
- else:
- partlines = getsource(part, deindent=de).lines
- if de:
- partlines = deindent(partlines)
- lines.extend(partlines)
-
- def __eq__(self, other):
- try:
- return self.lines == other.lines
- except AttributeError:
- if isinstance(other, str):
- return str(self) == other
- return False
-
- __hash__ = None
-
- def __getitem__(self, key):
- if isinstance(key, int):
- return self.lines[key]
- else:
- if key.step not in (None, 1):
- raise IndexError("cannot slice a Source with a step")
- newsource = Source()
- newsource.lines = self.lines[key.start:key.stop]
- return newsource
-
- def __len__(self):
- return len(self.lines)
-
- def strip(self):
- """ return new source object with trailing
- and leading blank lines removed.
- """
- start, end = 0, len(self)
- while start < end and not self.lines[start].strip():
- start += 1
- while end > start and not self.lines[end - 1].strip():
- end -= 1
- source = Source()
- source.lines[:] = self.lines[start:end]
- return source
-
- def putaround(self, before='', after='', indent=' ' * 4):
- """ return a copy of the source object with
- 'before' and 'after' wrapped around it.
- """
- before = Source(before)
- after = Source(after)
- newsource = Source()
- lines = [(indent + line) for line in self.lines]
- newsource.lines = before.lines + lines + after.lines
- return newsource
-
- def indent(self, indent=' ' * 4):
- """ return a copy of the source object with
- all lines indented by the given indent-string.
- """
- newsource = Source()
- newsource.lines = [(indent + line) for line in self.lines]
- return newsource
-
- def getstatement(self, lineno, assertion=False):
- """ return Source statement which contains the
- given linenumber (counted from 0).
- """
- start, end = self.getstatementrange(lineno, assertion)
- return self[start:end]
-
- def getstatementrange(self, lineno, assertion=False):
- """ return (start, end) tuple which spans the minimal
- statement region which containing the given lineno.
- """
- if not (0 <= lineno < len(self)):
- raise IndexError("lineno out of range")
- ast, start, end = getstatementrange_ast(lineno, self)
- return start, end
-
- def deindent(self, offset=None):
- """ return a new source object deindented by offset.
- If offset is None then guess an indentation offset from
- the first non-blank line. Subsequent lines which have a
- lower indentation offset will be copied verbatim as
- they are assumed to be part of multilines.
- """
- # XXX maybe use the tokenizer to properly handle multiline
- # strings etc.pp?
- newsource = Source()
- newsource.lines[:] = deindent(self.lines, offset)
- return newsource
-
- def isparseable(self, deindent=True):
- """ return True if source is parseable, heuristically
- deindenting it by default.
- """
- try:
- import parser
- except ImportError:
- def syntax_checker(x):
- return compile(x, 'asd', 'exec')
- else:
- syntax_checker = parser.suite
-
- if deindent:
- source = str(self.deindent())
- else:
- source = str(self)
- try:
- # compile(source+'\n', "x", "exec")
- syntax_checker(source + '\n')
- except KeyboardInterrupt:
- raise
- except Exception:
- return False
- else:
- return True
-
- def __str__(self):
- return "\n".join(self.lines)
-
- def compile(self, filename=None, mode='exec',
- flag=generators.compiler_flag,
- dont_inherit=0, _genframe=None):
- """ return compiled code object. if filename is None
- invent an artificial filename which displays
- the source/line position of the caller frame.
- """
- if not filename or py.path.local(filename).check(file=0):
- if _genframe is None:
- _genframe = sys._getframe(1) # the caller
- fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno
- base = "<%d-codegen " % self._compilecounter
- self.__class__._compilecounter += 1
- if not filename:
- filename = base + '%s:%d>' % (fn, lineno)
- else:
- filename = base + '%r %s:%d>' % (filename, fn, lineno)
- source = "\n".join(self.lines) + '\n'
- try:
- co = cpy_compile(source, filename, mode, flag)
- except SyntaxError:
- ex = sys.exc_info()[1]
- # re-represent syntax errors from parsing python strings
- msglines = self.lines[:ex.lineno]
- if ex.offset:
- msglines.append(" " * ex.offset + '^')
- msglines.append("(code was compiled probably from here: %s)" % filename)
- newex = SyntaxError('\n'.join(msglines))
- newex.offset = ex.offset
- newex.lineno = ex.lineno
- newex.text = ex.text
- raise newex
- else:
- if flag & _AST_FLAG:
- return co
- lines = [(x + "\n") for x in self.lines]
- py.std.linecache.cache[filename] = (1, None, lines, filename)
- return co
-
-#
-# public API shortcut functions
-#
-
-
-def compile_(source, filename=None, mode='exec', flags=generators.compiler_flag, dont_inherit=0):
- """ compile the given source to a raw code object,
- and maintain an internal cache which allows later
- retrieval of the source code for the code object
- and any recursively created code objects.
- """
- if _ast is not None and isinstance(source, _ast.AST):
- # XXX should Source support having AST?
- return cpy_compile(source, filename, mode, flags, dont_inherit)
- _genframe = sys._getframe(1) # the caller
- s = Source(source)
- co = s.compile(filename, mode, flags, _genframe=_genframe)
- return co
-
-
-def getfslineno(obj):
- """ Return source location (path, lineno) for the given object.
- If the source cannot be determined return ("", -1)
- """
- import _pytest._code
- try:
- code = _pytest._code.Code(obj)
- except TypeError:
- try:
- fn = (py.std.inspect.getsourcefile(obj) or
- py.std.inspect.getfile(obj))
- except TypeError:
- return "", -1
-
- fspath = fn and py.path.local(fn) or None
- lineno = -1
- if fspath:
- try:
- _, lineno = findsource(obj)
- except IOError:
- pass
- else:
- fspath = code.path
- lineno = code.firstlineno
- assert isinstance(lineno, int)
- return fspath, lineno
-
-#
-# helper functions
-#
-
-
-def findsource(obj):
- try:
- sourcelines, lineno = py.std.inspect.findsource(obj)
- except py.builtin._sysex:
- raise
- except: # noqa
- return None, -1
- source = Source()
- source.lines = [line.rstrip() for line in sourcelines]
- return source, lineno
-
-
-def getsource(obj, **kwargs):
- import _pytest._code
- obj = _pytest._code.getrawcode(obj)
- try:
- strsrc = inspect.getsource(obj)
- except IndentationError:
- strsrc = "\"Buggy python version consider upgrading, cannot get source\""
- assert isinstance(strsrc, str)
- return Source(strsrc, **kwargs)
-
-
-def deindent(lines, offset=None):
- if offset is None:
- for line in lines:
- line = line.expandtabs()
- s = line.lstrip()
- if s:
- offset = len(line) - len(s)
- break
- else:
- offset = 0
- if offset == 0:
- return list(lines)
- newlines = []
-
- def readline_generator(lines):
- for line in lines:
- yield line + '\n'
- while True:
- yield ''
-
- it = readline_generator(lines)
-
- try:
- for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)):
- if sline > len(lines):
- break # End of input reached
- if sline > len(newlines):
- line = lines[sline - 1].expandtabs()
- if line.lstrip() and line[:offset].isspace():
- line = line[offset:] # Deindent
- newlines.append(line)
-
- for i in range(sline, eline):
- # Don't deindent continuing lines of
- # multiline tokens (i.e. multiline strings)
- newlines.append(lines[i])
- except (IndentationError, tokenize.TokenError):
- pass
- # Add any lines we didn't see. E.g. if an exception was raised.
- newlines.extend(lines[len(newlines):])
- return newlines
-
-
-def get_statement_startend2(lineno, node):
- import ast
- # flatten all statements and except handlers into one lineno-list
- # AST's line numbers start indexing at 1
- values = []
- for x in ast.walk(node):
- if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler):
- values.append(x.lineno - 1)
- for name in "finalbody", "orelse":
- val = getattr(x, name, None)
- if val:
- # treat the finally/orelse part as its own statement
- values.append(val[0].lineno - 1 - 1)
- values.sort()
- insert_index = bisect_right(values, lineno)
- start = values[insert_index - 1]
- if insert_index >= len(values):
- end = None
- else:
- end = values[insert_index]
- return start, end
-
-
-def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
- if astnode is None:
- content = str(source)
- if sys.version_info < (2, 7):
- content += "\n"
- try:
- astnode = compile(content, "source", "exec", 1024) # 1024 for AST
- except ValueError:
- start, end = getstatementrange_old(lineno, source, assertion)
- return None, start, end
- start, end = get_statement_startend2(lineno, astnode)
- # we need to correct the end:
- # - ast-parsing strips comments
- # - there might be empty lines
- # - we might have lesser indented code blocks at the end
- if end is None:
- end = len(source.lines)
-
- if end > start + 1:
- # make sure we don't span differently indented code blocks
- # by using the BlockFinder helper used which inspect.getsource() uses itself
- block_finder = inspect.BlockFinder()
- # if we start with an indented line, put blockfinder to "started" mode
- block_finder.started = source.lines[start][0].isspace()
- it = ((x + "\n") for x in source.lines[start:end])
- try:
- for tok in tokenize.generate_tokens(lambda: next(it)):
- block_finder.tokeneater(*tok)
- except (inspect.EndOfBlock, IndentationError):
- end = block_finder.last + start
- except Exception:
- pass
-
- # the end might still point to a comment or empty line, correct it
- while end:
- line = source.lines[end - 1].lstrip()
- if line.startswith("#") or not line:
- end -= 1
- else:
- break
- return astnode, start, end
-
-
-def getstatementrange_old(lineno, source, assertion=False):
- """ return (start, end) tuple which spans the minimal
- statement region which containing the given lineno.
- raise an IndexError if no such statementrange can be found.
- """
- # XXX this logic is only used on python2.4 and below
- # 1. find the start of the statement
- from codeop import compile_command
- for start in range(lineno, -1, -1):
- if assertion:
- line = source.lines[start]
- # the following lines are not fully tested, change with care
- if 'super' in line and 'self' in line and '__init__' in line:
- raise IndexError("likely a subclass")
- if "assert" not in line and "raise" not in line:
- continue
- trylines = source.lines[start:lineno + 1]
- # quick hack to prepare parsing an indented line with
- # compile_command() (which errors on "return" outside defs)
- trylines.insert(0, 'def xxx():')
- trysource = '\n '.join(trylines)
- # ^ space here
- try:
- compile_command(trysource)
- except (SyntaxError, OverflowError, ValueError):
- continue
-
- # 2. find the end of the statement
- for end in range(lineno + 1, len(source) + 1):
- trysource = source[start:end]
- if trysource.isparseable():
- return start, end
- raise SyntaxError("no valid source range around line %d " % (lineno,))
diff --git a/lib/spack/external/pytest-fallback/_pytest/_pluggy.py b/lib/spack/external/pytest-fallback/_pytest/_pluggy.py
deleted file mode 100644
index 6cc1d3d54a..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/_pluggy.py
+++ /dev/null
@@ -1,11 +0,0 @@
-"""
-imports symbols from vendored "pluggy" if available, otherwise
-falls back to importing "pluggy" from the default namespace.
-"""
-from __future__ import absolute_import, division, print_function
-try:
- from _pytest.vendored_packages.pluggy import * # noqa
- from _pytest.vendored_packages.pluggy import __version__ # noqa
-except ImportError:
- from pluggy import * # noqa
- from pluggy import __version__ # noqa
diff --git a/lib/spack/external/pytest-fallback/_pytest/_version.py b/lib/spack/external/pytest-fallback/_pytest/_version.py
deleted file mode 100644
index 3edb7da9ad..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/_version.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# coding: utf-8
-# file generated by setuptools_scm
-# don't change, don't track in version control
-version = '3.2.5'
diff --git a/lib/spack/external/pytest-fallback/_pytest/assertion/__init__.py b/lib/spack/external/pytest-fallback/_pytest/assertion/__init__.py
deleted file mode 100644
index b0ef667d56..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/assertion/__init__.py
+++ /dev/null
@@ -1,148 +0,0 @@
-"""
-support for presenting detailed information in failing assertions.
-"""
-from __future__ import absolute_import, division, print_function
-import py
-import sys
-
-from _pytest.assertion import util
-from _pytest.assertion import rewrite
-from _pytest.assertion import truncate
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("debugconfig")
- group.addoption('--assert',
- action="store",
- dest="assertmode",
- choices=("rewrite", "plain",),
- default="rewrite",
- metavar="MODE",
- help="""Control assertion debugging tools. 'plain'
- performs no assertion debugging. 'rewrite'
- (the default) rewrites assert statements in
- test modules on import to provide assert
- expression information.""")
-
-
-def register_assert_rewrite(*names):
- """Register one or more module names to be rewritten on import.
-
- This function will make sure that this module or all modules inside
- the package will get their assert statements rewritten.
- Thus you should make sure to call this before the module is
- actually imported, usually in your __init__.py if you are a plugin
- using a package.
-
- :raise TypeError: if the given module names are not strings.
- """
- for name in names:
- if not isinstance(name, str):
- msg = 'expected module names as *args, got {0} instead'
- raise TypeError(msg.format(repr(names)))
- for hook in sys.meta_path:
- if isinstance(hook, rewrite.AssertionRewritingHook):
- importhook = hook
- break
- else:
- importhook = DummyRewriteHook()
- importhook.mark_rewrite(*names)
-
-
-class DummyRewriteHook(object):
- """A no-op import hook for when rewriting is disabled."""
-
- def mark_rewrite(self, *names):
- pass
-
-
-class AssertionState:
- """State for the assertion plugin."""
-
- def __init__(self, config, mode):
- self.mode = mode
- self.trace = config.trace.root.get("assertion")
- self.hook = None
-
-
-def install_importhook(config):
- """Try to install the rewrite hook, raise SystemError if it fails."""
- # Both Jython and CPython 2.6.0 have AST bugs that make the
- # assertion rewriting hook malfunction.
- if (sys.platform.startswith('java') or
- sys.version_info[:3] == (2, 6, 0)):
- raise SystemError('rewrite not supported')
-
- config._assertstate = AssertionState(config, 'rewrite')
- config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config)
- sys.meta_path.insert(0, hook)
- config._assertstate.trace('installed rewrite import hook')
-
- def undo():
- hook = config._assertstate.hook
- if hook is not None and hook in sys.meta_path:
- sys.meta_path.remove(hook)
-
- config.add_cleanup(undo)
- return hook
-
-
-def pytest_collection(session):
- # this hook is only called when test modules are collected
- # so for example not in the master process of pytest-xdist
- # (which does not collect test modules)
- assertstate = getattr(session.config, '_assertstate', None)
- if assertstate:
- if assertstate.hook is not None:
- assertstate.hook.set_session(session)
-
-
-def pytest_runtest_setup(item):
- """Setup the pytest_assertrepr_compare hook
-
- The newinterpret and rewrite modules will use util._reprcompare if
- it exists to use custom reporting via the
- pytest_assertrepr_compare hook. This sets up this custom
- comparison for the test.
- """
- def callbinrepr(op, left, right):
- """Call the pytest_assertrepr_compare hook and prepare the result
-
- This uses the first result from the hook and then ensures the
- following:
- * Overly verbose explanations are truncated unless configured otherwise
- (eg. if running in verbose mode).
- * Embedded newlines are escaped to help util.format_explanation()
- later.
- * If the rewrite mode is used embedded %-characters are replaced
- to protect later % formatting.
-
- The result can be formatted by util.format_explanation() for
- pretty printing.
- """
- hook_result = item.ihook.pytest_assertrepr_compare(
- config=item.config, op=op, left=left, right=right)
- for new_expl in hook_result:
- if new_expl:
- new_expl = truncate.truncate_if_required(new_expl, item)
- new_expl = [line.replace("\n", "\\n") for line in new_expl]
- res = py.builtin._totext("\n~").join(new_expl)
- if item.config.getvalue("assertmode") == "rewrite":
- res = res.replace("%", "%%")
- return res
- util._reprcompare = callbinrepr
-
-
-def pytest_runtest_teardown(item):
- util._reprcompare = None
-
-
-def pytest_sessionfinish(session):
- assertstate = getattr(session.config, '_assertstate', None)
- if assertstate:
- if assertstate.hook is not None:
- assertstate.hook.set_session(None)
-
-
-# Expose this plugin's implementation for the pytest_assertrepr_compare hook
-pytest_assertrepr_compare = util.assertrepr_compare
diff --git a/lib/spack/external/pytest-fallback/_pytest/assertion/rewrite.py b/lib/spack/external/pytest-fallback/_pytest/assertion/rewrite.py
deleted file mode 100644
index d48b6648fb..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/assertion/rewrite.py
+++ /dev/null
@@ -1,952 +0,0 @@
-"""Rewrite assertion AST to produce nice error messages"""
-from __future__ import absolute_import, division, print_function
-import ast
-import _ast
-import errno
-import itertools
-import imp
-import marshal
-import os
-import re
-import struct
-import sys
-import types
-
-import py
-from _pytest.assertion import util
-
-
-# pytest caches rewritten pycs in __pycache__.
-if hasattr(imp, "get_tag"):
- PYTEST_TAG = imp.get_tag() + "-PYTEST"
-else:
- if hasattr(sys, "pypy_version_info"):
- impl = "pypy"
- elif sys.platform == "java":
- impl = "jython"
- else:
- impl = "cpython"
- ver = sys.version_info
- PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
- del ver, impl
-
-PYC_EXT = ".py" + (__debug__ and "c" or "o")
-PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
-
-REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
-ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
-
-if sys.version_info >= (3, 5):
- ast_Call = ast.Call
-else:
- def ast_Call(a, b, c):
- return ast.Call(a, b, c, None, None)
-
-
-class AssertionRewritingHook(object):
- """PEP302 Import hook which rewrites asserts."""
-
- def __init__(self, config):
- self.config = config
- self.fnpats = config.getini("python_files")
- self.session = None
- self.modules = {}
- self._rewritten_names = set()
- self._register_with_pkg_resources()
- self._must_rewrite = set()
-
- def set_session(self, session):
- self.session = session
-
- def find_module(self, name, path=None):
- state = self.config._assertstate
- state.trace("find_module called for: %s" % name)
- names = name.rsplit(".", 1)
- lastname = names[-1]
- pth = None
- if path is not None:
- # Starting with Python 3.3, path is a _NamespacePath(), which
- # causes problems if not converted to list.
- path = list(path)
- if len(path) == 1:
- pth = path[0]
- if pth is None:
- try:
- fd, fn, desc = imp.find_module(lastname, path)
- except ImportError:
- return None
- if fd is not None:
- fd.close()
- tp = desc[2]
- if tp == imp.PY_COMPILED:
- if hasattr(imp, "source_from_cache"):
- try:
- fn = imp.source_from_cache(fn)
- except ValueError:
- # Python 3 doesn't like orphaned but still-importable
- # .pyc files.
- fn = fn[:-1]
- else:
- fn = fn[:-1]
- elif tp != imp.PY_SOURCE:
- # Don't know what this is.
- return None
- else:
- fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
-
- fn_pypath = py.path.local(fn)
- if not self._should_rewrite(name, fn_pypath, state):
- return None
-
- self._rewritten_names.add(name)
-
- # The requested module looks like a test file, so rewrite it. This is
- # the most magical part of the process: load the source, rewrite the
- # asserts, and load the rewritten source. We also cache the rewritten
- # module code in a special pyc. We must be aware of the possibility of
- # concurrent pytest processes rewriting and loading pycs. To avoid
- # tricky race conditions, we maintain the following invariant: The
- # cached pyc is always a complete, valid pyc. Operations on it must be
- # atomic. POSIX's atomic rename comes in handy.
- write = not sys.dont_write_bytecode
- cache_dir = os.path.join(fn_pypath.dirname, "__pycache__")
- if write:
- try:
- os.mkdir(cache_dir)
- except OSError:
- e = sys.exc_info()[1].errno
- if e == errno.EEXIST:
- # Either the __pycache__ directory already exists (the
- # common case) or it's blocked by a non-dir node. In the
- # latter case, we'll ignore it in _write_pyc.
- pass
- elif e in [errno.ENOENT, errno.ENOTDIR]:
- # One of the path components was not a directory, likely
- # because we're in a zip file.
- write = False
- elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
- state.trace("read only directory: %r" % fn_pypath.dirname)
- write = False
- else:
- raise
- cache_name = fn_pypath.basename[:-3] + PYC_TAIL
- pyc = os.path.join(cache_dir, cache_name)
- # Notice that even if we're in a read-only directory, I'm going
- # to check for a cached pyc. This may not be optimal...
- co = _read_pyc(fn_pypath, pyc, state.trace)
- if co is None:
- state.trace("rewriting %r" % (fn,))
- source_stat, co = _rewrite_test(self.config, fn_pypath)
- if co is None:
- # Probably a SyntaxError in the test.
- return None
- if write:
- _make_rewritten_pyc(state, source_stat, pyc, co)
- else:
- state.trace("found cached rewritten pyc for %r" % (fn,))
- self.modules[name] = co, pyc
- return self
-
- def _should_rewrite(self, name, fn_pypath, state):
- # always rewrite conftest files
- fn = str(fn_pypath)
- if fn_pypath.basename == 'conftest.py':
- state.trace("rewriting conftest file: %r" % (fn,))
- return True
-
- if self.session is not None:
- if self.session.isinitpath(fn):
- state.trace("matched test file (was specified on cmdline): %r" %
- (fn,))
- return True
-
- # modules not passed explicitly on the command line are only
- # rewritten if they match the naming convention for test files
- for pat in self.fnpats:
- if fn_pypath.fnmatch(pat):
- state.trace("matched test file %r" % (fn,))
- return True
-
- for marked in self._must_rewrite:
- if name.startswith(marked):
- state.trace("matched marked file %r (from %r)" % (name, marked))
- return True
-
- return False
-
- def mark_rewrite(self, *names):
- """Mark import names as needing to be re-written.
-
- The named module or package as well as any nested modules will
- be re-written on import.
- """
- already_imported = set(names).intersection(set(sys.modules))
- if already_imported:
- for name in already_imported:
- if name not in self._rewritten_names:
- self._warn_already_imported(name)
- self._must_rewrite.update(names)
-
- def _warn_already_imported(self, name):
- self.config.warn(
- 'P1',
- 'Module already imported so can not be re-written: %s' % name)
-
- def load_module(self, name):
- # If there is an existing module object named 'fullname' in
- # sys.modules, the loader must use that existing module. (Otherwise,
- # the reload() builtin will not work correctly.)
- if name in sys.modules:
- return sys.modules[name]
-
- co, pyc = self.modules.pop(name)
- # I wish I could just call imp.load_compiled here, but __file__ has to
- # be set properly. In Python 3.2+, this all would be handled correctly
- # by load_compiled.
- mod = sys.modules[name] = imp.new_module(name)
- try:
- mod.__file__ = co.co_filename
- # Normally, this attribute is 3.2+.
- mod.__cached__ = pyc
- mod.__loader__ = self
- py.builtin.exec_(co, mod.__dict__)
- except: # noqa
- if name in sys.modules:
- del sys.modules[name]
- raise
- return sys.modules[name]
-
- def is_package(self, name):
- try:
- fd, fn, desc = imp.find_module(name)
- except ImportError:
- return False
- if fd is not None:
- fd.close()
- tp = desc[2]
- return tp == imp.PKG_DIRECTORY
-
- @classmethod
- def _register_with_pkg_resources(cls):
- """
- Ensure package resources can be loaded from this loader. May be called
- multiple times, as the operation is idempotent.
- """
- try:
- import pkg_resources
- # access an attribute in case a deferred importer is present
- pkg_resources.__name__
- except ImportError:
- return
-
- # Since pytest tests are always located in the file system, the
- # DefaultProvider is appropriate.
- pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
-
- def get_data(self, pathname):
- """Optional PEP302 get_data API.
- """
- with open(pathname, 'rb') as f:
- return f.read()
-
-
-def _write_pyc(state, co, source_stat, pyc):
- # Technically, we don't have to have the same pyc format as
- # (C)Python, since these "pycs" should never be seen by builtin
- # import. However, there's little reason deviate, and I hope
- # sometime to be able to use imp.load_compiled to load them. (See
- # the comment in load_module above.)
- try:
- fp = open(pyc, "wb")
- except IOError:
- err = sys.exc_info()[1].errno
- state.trace("error writing pyc file at %s: errno=%s" % (pyc, err))
- # we ignore any failure to write the cache file
- # there are many reasons, permission-denied, __pycache__ being a
- # file etc.
- return False
- try:
- fp.write(imp.get_magic())
- mtime = int(source_stat.mtime)
- size = source_stat.size & 0xFFFFFFFF
- fp.write(struct.pack("<ll", mtime, size))
- marshal.dump(co, fp)
- finally:
- fp.close()
- return True
-
-
-RN = "\r\n".encode("utf-8")
-N = "\n".encode("utf-8")
-
-cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
-BOM_UTF8 = '\xef\xbb\xbf'
-
-
-def _rewrite_test(config, fn):
- """Try to read and rewrite *fn* and return the code object."""
- state = config._assertstate
- try:
- stat = fn.stat()
- source = fn.read("rb")
- except EnvironmentError:
- return None, None
- if ASCII_IS_DEFAULT_ENCODING:
- # ASCII is the default encoding in Python 2. Without a coding
- # declaration, Python 2 will complain about any bytes in the file
- # outside the ASCII range. Sadly, this behavior does not extend to
- # compile() or ast.parse(), which prefer to interpret the bytes as
- # latin-1. (At least they properly handle explicit coding cookies.) To
- # preserve this error behavior, we could force ast.parse() to use ASCII
- # as the encoding by inserting a coding cookie. Unfortunately, that
- # messes up line numbers. Thus, we have to check ourselves if anything
- # is outside the ASCII range in the case no encoding is explicitly
- # declared. For more context, see issue #269. Yay for Python 3 which
- # gets this right.
- end1 = source.find("\n")
- end2 = source.find("\n", end1 + 1)
- if (not source.startswith(BOM_UTF8) and
- cookie_re.match(source[0:end1]) is None and
- cookie_re.match(source[end1 + 1:end2]) is None):
- if hasattr(state, "_indecode"):
- # encodings imported us again, so don't rewrite.
- return None, None
- state._indecode = True
- try:
- try:
- source.decode("ascii")
- except UnicodeDecodeError:
- # Let it fail in real import.
- return None, None
- finally:
- del state._indecode
- # On Python versions which are not 2.7 and less than or equal to 3.1, the
- # parser expects *nix newlines.
- if REWRITE_NEWLINES:
- source = source.replace(RN, N) + N
- try:
- tree = ast.parse(source)
- except SyntaxError:
- # Let this pop up again in the real import.
- state.trace("failed to parse: %r" % (fn,))
- return None, None
- rewrite_asserts(tree, fn, config)
- try:
- co = compile(tree, fn.strpath, "exec", dont_inherit=True)
- except SyntaxError:
- # It's possible that this error is from some bug in the
- # assertion rewriting, but I don't know of a fast way to tell.
- state.trace("failed to compile: %r" % (fn,))
- return None, None
- return stat, co
-
-
-def _make_rewritten_pyc(state, source_stat, pyc, co):
- """Try to dump rewritten code to *pyc*."""
- if sys.platform.startswith("win"):
- # Windows grants exclusive access to open files and doesn't have atomic
- # rename, so just write into the final file.
- _write_pyc(state, co, source_stat, pyc)
- else:
- # When not on windows, assume rename is atomic. Dump the code object
- # into a file specific to this process and atomically replace it.
- proc_pyc = pyc + "." + str(os.getpid())
- if _write_pyc(state, co, source_stat, proc_pyc):
- os.rename(proc_pyc, pyc)
-
-
-def _read_pyc(source, pyc, trace=lambda x: None):
- """Possibly read a pytest pyc containing rewritten code.
-
- Return rewritten code if successful or None if not.
- """
- try:
- fp = open(pyc, "rb")
- except IOError:
- return None
- with fp:
- try:
- mtime = int(source.mtime())
- size = source.size()
- data = fp.read(12)
- except EnvironmentError as e:
- trace('_read_pyc(%s): EnvironmentError %s' % (source, e))
- return None
- # Check for invalid or out of date pyc file.
- if (len(data) != 12 or data[:4] != imp.get_magic() or
- struct.unpack("<ll", data[4:]) != (mtime, size)):
- trace('_read_pyc(%s): invalid or out of date pyc' % source)
- return None
- try:
- co = marshal.load(fp)
- except Exception as e:
- trace('_read_pyc(%s): marshal.load error %s' % (source, e))
- return None
- if not isinstance(co, types.CodeType):
- trace('_read_pyc(%s): not a code object' % source)
- return None
- return co
-
-
-def rewrite_asserts(mod, module_path=None, config=None):
- """Rewrite the assert statements in mod."""
- AssertionRewriter(module_path, config).run(mod)
-
-
-def _saferepr(obj):
- """Get a safe repr of an object for assertion error messages.
-
- The assertion formatting (util.format_explanation()) requires
- newlines to be escaped since they are a special character for it.
- Normally assertion.util.format_explanation() does this but for a
- custom repr it is possible to contain one of the special escape
- sequences, especially '\n{' and '\n}' are likely to be present in
- JSON reprs.
-
- """
- repr = py.io.saferepr(obj)
- if py.builtin._istext(repr):
- t = py.builtin.text
- else:
- t = py.builtin.bytes
- return repr.replace(t("\n"), t("\\n"))
-
-
-from _pytest.assertion.util import format_explanation as _format_explanation # noqa
-
-
-def _format_assertmsg(obj):
- """Format the custom assertion message given.
-
- For strings this simply replaces newlines with '\n~' so that
- util.format_explanation() will preserve them instead of escaping
- newlines. For other objects py.io.saferepr() is used first.
-
- """
- # reprlib appears to have a bug which means that if a string
- # contains a newline it gets escaped, however if an object has a
- # .__repr__() which contains newlines it does not get escaped.
- # However in either case we want to preserve the newline.
- if py.builtin._istext(obj) or py.builtin._isbytes(obj):
- s = obj
- is_repr = False
- else:
- s = py.io.saferepr(obj)
- is_repr = True
- if py.builtin._istext(s):
- t = py.builtin.text
- else:
- t = py.builtin.bytes
- s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%"))
- if is_repr:
- s = s.replace(t("\\n"), t("\n~"))
- return s
-
-
-def _should_repr_global_name(obj):
- return not hasattr(obj, "__name__") and not py.builtin.callable(obj)
-
-
-def _format_boolop(explanations, is_or):
- explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
- if py.builtin._istext(explanation):
- t = py.builtin.text
- else:
- t = py.builtin.bytes
- return explanation.replace(t('%'), t('%%'))
-
-
-def _call_reprcompare(ops, results, expls, each_obj):
- for i, res, expl in zip(range(len(ops)), results, expls):
- try:
- done = not res
- except Exception:
- done = True
- if done:
- break
- if util._reprcompare is not None:
- custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
- if custom is not None:
- return custom
- return expl
-
-
-unary_map = {
- ast.Not: "not %s",
- ast.Invert: "~%s",
- ast.USub: "-%s",
- ast.UAdd: "+%s"
-}
-
-binop_map = {
- ast.BitOr: "|",
- ast.BitXor: "^",
- ast.BitAnd: "&",
- ast.LShift: "<<",
- ast.RShift: ">>",
- ast.Add: "+",
- ast.Sub: "-",
- ast.Mult: "*",
- ast.Div: "/",
- ast.FloorDiv: "//",
- ast.Mod: "%%", # escaped for string formatting
- ast.Eq: "==",
- ast.NotEq: "!=",
- ast.Lt: "<",
- ast.LtE: "<=",
- ast.Gt: ">",
- ast.GtE: ">=",
- ast.Pow: "**",
- ast.Is: "is",
- ast.IsNot: "is not",
- ast.In: "in",
- ast.NotIn: "not in"
-}
-# Python 3.5+ compatibility
-try:
- binop_map[ast.MatMult] = "@"
-except AttributeError:
- pass
-
-# Python 3.4+ compatibility
-if hasattr(ast, "NameConstant"):
- _NameConstant = ast.NameConstant
-else:
- def _NameConstant(c):
- return ast.Name(str(c), ast.Load())
-
-
-def set_location(node, lineno, col_offset):
- """Set node location information recursively."""
- def _fix(node, lineno, col_offset):
- if "lineno" in node._attributes:
- node.lineno = lineno
- if "col_offset" in node._attributes:
- node.col_offset = col_offset
- for child in ast.iter_child_nodes(node):
- _fix(child, lineno, col_offset)
- _fix(node, lineno, col_offset)
- return node
-
-
-class AssertionRewriter(ast.NodeVisitor):
- """Assertion rewriting implementation.
-
- The main entrypoint is to call .run() with an ast.Module instance,
- this will then find all the assert statements and re-write them to
- provide intermediate values and a detailed assertion error. See
- http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html
- for an overview of how this works.
-
- The entry point here is .run() which will iterate over all the
- statements in an ast.Module and for each ast.Assert statement it
- finds call .visit() with it. Then .visit_Assert() takes over and
- is responsible for creating new ast statements to replace the
- original assert statement: it re-writes the test of an assertion
- to provide intermediate values and replace it with an if statement
- which raises an assertion error with a detailed explanation in
- case the expression is false.
-
- For this .visit_Assert() uses the visitor pattern to visit all the
- AST nodes of the ast.Assert.test field, each visit call returning
- an AST node and the corresponding explanation string. During this
- state is kept in several instance attributes:
-
- :statements: All the AST statements which will replace the assert
- statement.
-
- :variables: This is populated by .variable() with each variable
- used by the statements so that they can all be set to None at
- the end of the statements.
-
- :variable_counter: Counter to create new unique variables needed
- by statements. Variables are created using .variable() and
- have the form of "@py_assert0".
-
- :on_failure: The AST statements which will be executed if the
- assertion test fails. This is the code which will construct
- the failure message and raises the AssertionError.
-
- :explanation_specifiers: A dict filled by .explanation_param()
- with %-formatting placeholders and their corresponding
- expressions to use in the building of an assertion message.
- This is used by .pop_format_context() to build a message.
-
- :stack: A stack of the explanation_specifiers dicts maintained by
- .push_format_context() and .pop_format_context() which allows
- to build another %-formatted string while already building one.
-
- This state is reset on every new assert statement visited and used
- by the other visitors.
-
- """
-
- def __init__(self, module_path, config):
- super(AssertionRewriter, self).__init__()
- self.module_path = module_path
- self.config = config
-
- def run(self, mod):
- """Find all assert statements in *mod* and rewrite them."""
- if not mod.body:
- # Nothing to do.
- return
- # Insert some special imports at the top of the module but after any
- # docstrings and __future__ imports.
- aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
- ast.alias("_pytest.assertion.rewrite", "@pytest_ar")]
- doc = getattr(mod, "docstring", None)
- expect_docstring = doc is None
- if doc is not None and self.is_rewrite_disabled(doc):
- return
- pos = 0
- lineno = 1
- for item in mod.body:
- if (expect_docstring and isinstance(item, ast.Expr) and
- isinstance(item.value, ast.Str)):
- doc = item.value.s
- if self.is_rewrite_disabled(doc):
- return
- expect_docstring = False
- elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
- item.module != "__future__"):
- lineno = item.lineno
- break
- pos += 1
- else:
- lineno = item.lineno
- imports = [ast.Import([alias], lineno=lineno, col_offset=0)
- for alias in aliases]
- mod.body[pos:pos] = imports
- # Collect asserts.
- nodes = [mod]
- while nodes:
- node = nodes.pop()
- for name, field in ast.iter_fields(node):
- if isinstance(field, list):
- new = []
- for i, child in enumerate(field):
- if isinstance(child, ast.Assert):
- # Transform assert.
- new.extend(self.visit(child))
- else:
- new.append(child)
- if isinstance(child, ast.AST):
- nodes.append(child)
- setattr(node, name, new)
- elif (isinstance(field, ast.AST) and
- # Don't recurse into expressions as they can't contain
- # asserts.
- not isinstance(field, ast.expr)):
- nodes.append(field)
-
- def is_rewrite_disabled(self, docstring):
- return "PYTEST_DONT_REWRITE" in docstring
-
- def variable(self):
- """Get a new variable."""
- # Use a character invalid in python identifiers to avoid clashing.
- name = "@py_assert" + str(next(self.variable_counter))
- self.variables.append(name)
- return name
-
- def assign(self, expr):
- """Give *expr* a name."""
- name = self.variable()
- self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
- return ast.Name(name, ast.Load())
-
- def display(self, expr):
- """Call py.io.saferepr on the expression."""
- return self.helper("saferepr", expr)
-
- def helper(self, name, *args):
- """Call a helper in this module."""
- py_name = ast.Name("@pytest_ar", ast.Load())
- attr = ast.Attribute(py_name, "_" + name, ast.Load())
- return ast_Call(attr, list(args), [])
-
- def builtin(self, name):
- """Return the builtin called *name*."""
- builtin_name = ast.Name("@py_builtins", ast.Load())
- return ast.Attribute(builtin_name, name, ast.Load())
-
- def explanation_param(self, expr):
- """Return a new named %-formatting placeholder for expr.
-
- This creates a %-formatting placeholder for expr in the
- current formatting context, e.g. ``%(py0)s``. The placeholder
- and expr are placed in the current format context so that it
- can be used on the next call to .pop_format_context().
-
- """
- specifier = "py" + str(next(self.variable_counter))
- self.explanation_specifiers[specifier] = expr
- return "%(" + specifier + ")s"
-
- def push_format_context(self):
- """Create a new formatting context.
-
- The format context is used for when an explanation wants to
- have a variable value formatted in the assertion message. In
- this case the value required can be added using
- .explanation_param(). Finally .pop_format_context() is used
- to format a string of %-formatted values as added by
- .explanation_param().
-
- """
- self.explanation_specifiers = {}
- self.stack.append(self.explanation_specifiers)
-
- def pop_format_context(self, expl_expr):
- """Format the %-formatted string with current format context.
-
- The expl_expr should be an ast.Str instance constructed from
- the %-placeholders created by .explanation_param(). This will
- add the required code to format said string to .on_failure and
- return the ast.Name instance of the formatted string.
-
- """
- current = self.stack.pop()
- if self.stack:
- self.explanation_specifiers = self.stack[-1]
- keys = [ast.Str(key) for key in current.keys()]
- format_dict = ast.Dict(keys, list(current.values()))
- form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
- name = "@py_format" + str(next(self.variable_counter))
- self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
- return ast.Name(name, ast.Load())
-
- def generic_visit(self, node):
- """Handle expressions we don't have custom code for."""
- assert isinstance(node, ast.expr)
- res = self.assign(node)
- return res, self.explanation_param(self.display(res))
-
- def visit_Assert(self, assert_):
- """Return the AST statements to replace the ast.Assert instance.
-
- This re-writes the test of an assertion to provide
- intermediate values and replace it with an if statement which
- raises an assertion error with a detailed explanation in case
- the expression is false.
-
- """
- if isinstance(assert_.test, ast.Tuple) and self.config is not None:
- fslocation = (self.module_path, assert_.lineno)
- self.config.warn('R1', 'assertion is always true, perhaps '
- 'remove parentheses?', fslocation=fslocation)
- self.statements = []
- self.variables = []
- self.variable_counter = itertools.count()
- self.stack = []
- self.on_failure = []
- self.push_format_context()
- # Rewrite assert into a bunch of statements.
- top_condition, explanation = self.visit(assert_.test)
- # Create failure message.
- body = self.on_failure
- negation = ast.UnaryOp(ast.Not(), top_condition)
- self.statements.append(ast.If(negation, body, []))
- if assert_.msg:
- assertmsg = self.helper('format_assertmsg', assert_.msg)
- explanation = "\n>assert " + explanation
- else:
- assertmsg = ast.Str("")
- explanation = "assert " + explanation
- template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
- msg = self.pop_format_context(template)
- fmt = self.helper("format_explanation", msg)
- err_name = ast.Name("AssertionError", ast.Load())
- exc = ast_Call(err_name, [fmt], [])
- if sys.version_info[0] >= 3:
- raise_ = ast.Raise(exc, None)
- else:
- raise_ = ast.Raise(exc, None, None)
- body.append(raise_)
- # Clear temporary variables by setting them to None.
- if self.variables:
- variables = [ast.Name(name, ast.Store())
- for name in self.variables]
- clear = ast.Assign(variables, _NameConstant(None))
- self.statements.append(clear)
- # Fix line numbers.
- for stmt in self.statements:
- set_location(stmt, assert_.lineno, assert_.col_offset)
- return self.statements
-
- def visit_Name(self, name):
- # Display the repr of the name if it's a local variable or
- # _should_repr_global_name() thinks it's acceptable.
- locs = ast_Call(self.builtin("locals"), [], [])
- inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
- dorepr = self.helper("should_repr_global_name", name)
- test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
- expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
- return name, self.explanation_param(expr)
-
- def visit_BoolOp(self, boolop):
- res_var = self.variable()
- expl_list = self.assign(ast.List([], ast.Load()))
- app = ast.Attribute(expl_list, "append", ast.Load())
- is_or = int(isinstance(boolop.op, ast.Or))
- body = save = self.statements
- fail_save = self.on_failure
- levels = len(boolop.values) - 1
- self.push_format_context()
- # Process each operand, short-circuting if needed.
- for i, v in enumerate(boolop.values):
- if i:
- fail_inner = []
- # cond is set in a prior loop iteration below
- self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
- self.on_failure = fail_inner
- self.push_format_context()
- res, expl = self.visit(v)
- body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
- expl_format = self.pop_format_context(ast.Str(expl))
- call = ast_Call(app, [expl_format], [])
- self.on_failure.append(ast.Expr(call))
- if i < levels:
- cond = res
- if is_or:
- cond = ast.UnaryOp(ast.Not(), cond)
- inner = []
- self.statements.append(ast.If(cond, inner, []))
- self.statements = body = inner
- self.statements = save
- self.on_failure = fail_save
- expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
- expl = self.pop_format_context(expl_template)
- return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
-
- def visit_UnaryOp(self, unary):
- pattern = unary_map[unary.op.__class__]
- operand_res, operand_expl = self.visit(unary.operand)
- res = self.assign(ast.UnaryOp(unary.op, operand_res))
- return res, pattern % (operand_expl,)
-
- def visit_BinOp(self, binop):
- symbol = binop_map[binop.op.__class__]
- left_expr, left_expl = self.visit(binop.left)
- right_expr, right_expl = self.visit(binop.right)
- explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
- res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
- return res, explanation
-
- def visit_Call_35(self, call):
- """
- visit `ast.Call` nodes on Python3.5 and after
- """
- new_func, func_expl = self.visit(call.func)
- arg_expls = []
- new_args = []
- new_kwargs = []
- for arg in call.args:
- res, expl = self.visit(arg)
- arg_expls.append(expl)
- new_args.append(res)
- for keyword in call.keywords:
- res, expl = self.visit(keyword.value)
- new_kwargs.append(ast.keyword(keyword.arg, res))
- if keyword.arg:
- arg_expls.append(keyword.arg + "=" + expl)
- else: # **args have `arg` keywords with an .arg of None
- arg_expls.append("**" + expl)
-
- expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
- new_call = ast.Call(new_func, new_args, new_kwargs)
- res = self.assign(new_call)
- res_expl = self.explanation_param(self.display(res))
- outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
- return res, outer_expl
-
- def visit_Starred(self, starred):
- # From Python 3.5, a Starred node can appear in a function call
- res, expl = self.visit(starred.value)
- return starred, '*' + expl
-
- def visit_Call_legacy(self, call):
- """
- visit `ast.Call nodes on 3.4 and below`
- """
- new_func, func_expl = self.visit(call.func)
- arg_expls = []
- new_args = []
- new_kwargs = []
- new_star = new_kwarg = None
- for arg in call.args:
- res, expl = self.visit(arg)
- new_args.append(res)
- arg_expls.append(expl)
- for keyword in call.keywords:
- res, expl = self.visit(keyword.value)
- new_kwargs.append(ast.keyword(keyword.arg, res))
- arg_expls.append(keyword.arg + "=" + expl)
- if call.starargs:
- new_star, expl = self.visit(call.starargs)
- arg_expls.append("*" + expl)
- if call.kwargs:
- new_kwarg, expl = self.visit(call.kwargs)
- arg_expls.append("**" + expl)
- expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
- new_call = ast.Call(new_func, new_args, new_kwargs,
- new_star, new_kwarg)
- res = self.assign(new_call)
- res_expl = self.explanation_param(self.display(res))
- outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
- return res, outer_expl
-
- # ast.Call signature changed on 3.5,
- # conditionally change which methods is named
- # visit_Call depending on Python version
- if sys.version_info >= (3, 5):
- visit_Call = visit_Call_35
- else:
- visit_Call = visit_Call_legacy
-
- def visit_Attribute(self, attr):
- if not isinstance(attr.ctx, ast.Load):
- return self.generic_visit(attr)
- value, value_expl = self.visit(attr.value)
- res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
- res_expl = self.explanation_param(self.display(res))
- pat = "%s\n{%s = %s.%s\n}"
- expl = pat % (res_expl, res_expl, value_expl, attr.attr)
- return res, expl
-
- def visit_Compare(self, comp):
- self.push_format_context()
- left_res, left_expl = self.visit(comp.left)
- if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
- left_expl = "({0})".format(left_expl)
- res_variables = [self.variable() for i in range(len(comp.ops))]
- load_names = [ast.Name(v, ast.Load()) for v in res_variables]
- store_names = [ast.Name(v, ast.Store()) for v in res_variables]
- it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
- expls = []
- syms = []
- results = [left_res]
- for i, op, next_operand in it:
- next_res, next_expl = self.visit(next_operand)
- if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
- next_expl = "({0})".format(next_expl)
- results.append(next_res)
- sym = binop_map[op.__class__]
- syms.append(ast.Str(sym))
- expl = "%s %s %s" % (left_expl, sym, next_expl)
- expls.append(ast.Str(expl))
- res_expr = ast.Compare(left_res, [op], [next_res])
- self.statements.append(ast.Assign([store_names[i]], res_expr))
- left_res, left_expl = next_res, next_expl
- # Use pytest.assertion.util._reprcompare if that's available.
- expl_call = self.helper("call_reprcompare",
- ast.Tuple(syms, ast.Load()),
- ast.Tuple(load_names, ast.Load()),
- ast.Tuple(expls, ast.Load()),
- ast.Tuple(results, ast.Load()))
- if len(comp.ops) > 1:
- res = ast.BoolOp(ast.And(), load_names)
- else:
- res = load_names[0]
- return res, self.explanation_param(self.pop_format_context(expl_call))
diff --git a/lib/spack/external/pytest-fallback/_pytest/assertion/truncate.py b/lib/spack/external/pytest-fallback/_pytest/assertion/truncate.py
deleted file mode 100644
index 1e13063569..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/assertion/truncate.py
+++ /dev/null
@@ -1,102 +0,0 @@
-"""
-Utilities for truncating assertion output.
-
-Current default behaviour is to truncate assertion explanations at
-~8 terminal lines, unless running in "-vv" mode or running on CI.
-"""
-from __future__ import absolute_import, division, print_function
-import os
-
-import py
-
-
-DEFAULT_MAX_LINES = 8
-DEFAULT_MAX_CHARS = 8 * 80
-USAGE_MSG = "use '-vv' to show"
-
-
-def truncate_if_required(explanation, item, max_length=None):
- """
- Truncate this assertion explanation if the given test item is eligible.
- """
- if _should_truncate_item(item):
- return _truncate_explanation(explanation)
- return explanation
-
-
-def _should_truncate_item(item):
- """
- Whether or not this test item is eligible for truncation.
- """
- verbose = item.config.option.verbose
- return verbose < 2 and not _running_on_ci()
-
-
-def _running_on_ci():
- """Check if we're currently running on a CI system."""
- env_vars = ['CI', 'BUILD_NUMBER']
- return any(var in os.environ for var in env_vars)
-
-
-def _truncate_explanation(input_lines, max_lines=None, max_chars=None):
- """
- Truncate given list of strings that makes up the assertion explanation.
-
- Truncates to either 8 lines, or 640 characters - whichever the input reaches
- first. The remaining lines will be replaced by a usage message.
- """
-
- if max_lines is None:
- max_lines = DEFAULT_MAX_LINES
- if max_chars is None:
- max_chars = DEFAULT_MAX_CHARS
-
- # Check if truncation required
- input_char_count = len("".join(input_lines))
- if len(input_lines) <= max_lines and input_char_count <= max_chars:
- return input_lines
-
- # Truncate first to max_lines, and then truncate to max_chars if max_chars
- # is exceeded.
- truncated_explanation = input_lines[:max_lines]
- truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars)
-
- # Add ellipsis to final line
- truncated_explanation[-1] = truncated_explanation[-1] + "..."
-
- # Append useful message to explanation
- truncated_line_count = len(input_lines) - len(truncated_explanation)
- truncated_line_count += 1 # Account for the part-truncated final line
- msg = '...Full output truncated'
- if truncated_line_count == 1:
- msg += ' ({0} line hidden)'.format(truncated_line_count)
- else:
- msg += ' ({0} lines hidden)'.format(truncated_line_count)
- msg += ", {0}" .format(USAGE_MSG)
- truncated_explanation.extend([
- py.builtin._totext(""),
- py.builtin._totext(msg),
- ])
- return truncated_explanation
-
-
-def _truncate_by_char_count(input_lines, max_chars):
- # Check if truncation required
- if len("".join(input_lines)) <= max_chars:
- return input_lines
-
- # Find point at which input length exceeds total allowed length
- iterated_char_count = 0
- for iterated_index, input_line in enumerate(input_lines):
- if iterated_char_count + len(input_line) > max_chars:
- break
- iterated_char_count += len(input_line)
-
- # Create truncated explanation with modified final line
- truncated_result = input_lines[:iterated_index]
- final_line = input_lines[iterated_index]
- if final_line:
- final_line_truncate_point = max_chars - iterated_char_count
- final_line = final_line[:final_line_truncate_point]
- truncated_result.append(final_line)
- return truncated_result
diff --git a/lib/spack/external/pytest-fallback/_pytest/assertion/util.py b/lib/spack/external/pytest-fallback/_pytest/assertion/util.py
deleted file mode 100644
index c09eff06b0..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/assertion/util.py
+++ /dev/null
@@ -1,310 +0,0 @@
-"""Utilities for assertion debugging"""
-from __future__ import absolute_import, division, print_function
-import pprint
-
-import _pytest._code
-import py
-try:
- from collections.abc import Sequence
-except ImportError:
- try:
- from collections import Sequence
- except ImportError:
- Sequence = list
-
-
-u = py.builtin._totext
-
-# The _reprcompare attribute on the util module is used by the new assertion
-# interpretation code and assertion rewriter to detect this plugin was
-# loaded and in turn call the hooks defined here as part of the
-# DebugInterpreter.
-_reprcompare = None
-
-
-# the re-encoding is needed for python2 repr
-# with non-ascii characters (see issue 877 and 1379)
-def ecu(s):
- try:
- return u(s, 'utf-8', 'replace')
- except TypeError:
- return s
-
-
-def format_explanation(explanation):
- """This formats an explanation
-
- Normally all embedded newlines are escaped, however there are
- three exceptions: \n{, \n} and \n~. The first two are intended
- cover nested explanations, see function and attribute explanations
- for examples (.visit_Call(), visit_Attribute()). The last one is
- for when one explanation needs to span multiple lines, e.g. when
- displaying diffs.
- """
- explanation = ecu(explanation)
- lines = _split_explanation(explanation)
- result = _format_lines(lines)
- return u('\n').join(result)
-
-
-def _split_explanation(explanation):
- """Return a list of individual lines in the explanation
-
- This will return a list of lines split on '\n{', '\n}' and '\n~'.
- Any other newlines will be escaped and appear in the line as the
- literal '\n' characters.
- """
- raw_lines = (explanation or u('')).split('\n')
- lines = [raw_lines[0]]
- for values in raw_lines[1:]:
- if values and values[0] in ['{', '}', '~', '>']:
- lines.append(values)
- else:
- lines[-1] += '\\n' + values
- return lines
-
-
-def _format_lines(lines):
- """Format the individual lines
-
- This will replace the '{', '}' and '~' characters of our mini
- formatting language with the proper 'where ...', 'and ...' and ' +
- ...' text, taking care of indentation along the way.
-
- Return a list of formatted lines.
- """
- result = lines[:1]
- stack = [0]
- stackcnt = [0]
- for line in lines[1:]:
- if line.startswith('{'):
- if stackcnt[-1]:
- s = u('and ')
- else:
- s = u('where ')
- stack.append(len(result))
- stackcnt[-1] += 1
- stackcnt.append(0)
- result.append(u(' +') + u(' ') * (len(stack) - 1) + s + line[1:])
- elif line.startswith('}'):
- stack.pop()
- stackcnt.pop()
- result[stack[-1]] += line[1:]
- else:
- assert line[0] in ['~', '>']
- stack[-1] += 1
- indent = len(stack) if line.startswith('~') else len(stack) - 1
- result.append(u(' ') * indent + line[1:])
- assert len(stack) == 1
- return result
-
-
-# Provide basestring in python3
-try:
- basestring = basestring
-except NameError:
- basestring = str
-
-
-def assertrepr_compare(config, op, left, right):
- """Return specialised explanations for some operators/operands"""
- width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
- left_repr = py.io.saferepr(left, maxsize=int(width // 2))
- right_repr = py.io.saferepr(right, maxsize=width - len(left_repr))
-
- summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr))
-
- def issequence(x):
- return (isinstance(x, (list, tuple, Sequence)) and not isinstance(x, basestring))
-
- def istext(x):
- return isinstance(x, basestring)
-
- def isdict(x):
- return isinstance(x, dict)
-
- def isset(x):
- return isinstance(x, (set, frozenset))
-
- def isiterable(obj):
- try:
- iter(obj)
- return not istext(obj)
- except TypeError:
- return False
-
- verbose = config.getoption('verbose')
- explanation = None
- try:
- if op == '==':
- if istext(left) and istext(right):
- explanation = _diff_text(left, right, verbose)
- else:
- if issequence(left) and issequence(right):
- explanation = _compare_eq_sequence(left, right, verbose)
- elif isset(left) and isset(right):
- explanation = _compare_eq_set(left, right, verbose)
- elif isdict(left) and isdict(right):
- explanation = _compare_eq_dict(left, right, verbose)
- if isiterable(left) and isiterable(right):
- expl = _compare_eq_iterable(left, right, verbose)
- if explanation is not None:
- explanation.extend(expl)
- else:
- explanation = expl
- elif op == 'not in':
- if istext(left) and istext(right):
- explanation = _notin_text(left, right, verbose)
- except Exception:
- explanation = [
- u('(pytest_assertion plugin: representation of details failed. '
- 'Probably an object has a faulty __repr__.)'),
- u(_pytest._code.ExceptionInfo())]
-
- if not explanation:
- return None
-
- return [summary] + explanation
-
-
-def _diff_text(left, right, verbose=False):
- """Return the explanation for the diff between text or bytes
-
- Unless --verbose is used this will skip leading and trailing
- characters which are identical to keep the diff minimal.
-
- If the input are bytes they will be safely converted to text.
- """
- from difflib import ndiff
- explanation = []
- if isinstance(left, py.builtin.bytes):
- left = u(repr(left)[1:-1]).replace(r'\n', '\n')
- if isinstance(right, py.builtin.bytes):
- right = u(repr(right)[1:-1]).replace(r'\n', '\n')
- if not verbose:
- i = 0 # just in case left or right has zero length
- for i in range(min(len(left), len(right))):
- if left[i] != right[i]:
- break
- if i > 42:
- i -= 10 # Provide some context
- explanation = [u('Skipping %s identical leading '
- 'characters in diff, use -v to show') % i]
- left = left[i:]
- right = right[i:]
- if len(left) == len(right):
- for i in range(len(left)):
- if left[-i] != right[-i]:
- break
- if i > 42:
- i -= 10 # Provide some context
- explanation += [u('Skipping %s identical trailing '
- 'characters in diff, use -v to show') % i]
- left = left[:-i]
- right = right[:-i]
- keepends = True
- explanation += [line.strip('\n')
- for line in ndiff(left.splitlines(keepends),
- right.splitlines(keepends))]
- return explanation
-
-
-def _compare_eq_iterable(left, right, verbose=False):
- if not verbose:
- return [u('Use -v to get the full diff')]
- # dynamic import to speedup pytest
- import difflib
-
- try:
- left_formatting = pprint.pformat(left).splitlines()
- right_formatting = pprint.pformat(right).splitlines()
- explanation = [u('Full diff:')]
- except Exception:
- # hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling
- # sorted() on a list would raise. See issue #718.
- # As a workaround, the full diff is generated by using the repr() string of each item of each container.
- left_formatting = sorted(repr(x) for x in left)
- right_formatting = sorted(repr(x) for x in right)
- explanation = [u('Full diff (fallback to calling repr on each item):')]
- explanation.extend(line.strip() for line in difflib.ndiff(left_formatting, right_formatting))
- return explanation
-
-
-def _compare_eq_sequence(left, right, verbose=False):
- explanation = []
- for i in range(min(len(left), len(right))):
- if left[i] != right[i]:
- explanation += [u('At index %s diff: %r != %r')
- % (i, left[i], right[i])]
- break
- if len(left) > len(right):
- explanation += [u('Left contains more items, first extra item: %s')
- % py.io.saferepr(left[len(right)],)]
- elif len(left) < len(right):
- explanation += [
- u('Right contains more items, first extra item: %s') %
- py.io.saferepr(right[len(left)],)]
- return explanation
-
-
-def _compare_eq_set(left, right, verbose=False):
- explanation = []
- diff_left = left - right
- diff_right = right - left
- if diff_left:
- explanation.append(u('Extra items in the left set:'))
- for item in diff_left:
- explanation.append(py.io.saferepr(item))
- if diff_right:
- explanation.append(u('Extra items in the right set:'))
- for item in diff_right:
- explanation.append(py.io.saferepr(item))
- return explanation
-
-
-def _compare_eq_dict(left, right, verbose=False):
- explanation = []
- common = set(left).intersection(set(right))
- same = dict((k, left[k]) for k in common if left[k] == right[k])
- if same and verbose < 2:
- explanation += [u('Omitting %s identical items, use -vv to show') %
- len(same)]
- elif same:
- explanation += [u('Common items:')]
- explanation += pprint.pformat(same).splitlines()
- diff = set(k for k in common if left[k] != right[k])
- if diff:
- explanation += [u('Differing items:')]
- for k in diff:
- explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
- py.io.saferepr({k: right[k]})]
- extra_left = set(left) - set(right)
- if extra_left:
- explanation.append(u('Left contains more items:'))
- explanation.extend(pprint.pformat(
- dict((k, left[k]) for k in extra_left)).splitlines())
- extra_right = set(right) - set(left)
- if extra_right:
- explanation.append(u('Right contains more items:'))
- explanation.extend(pprint.pformat(
- dict((k, right[k]) for k in extra_right)).splitlines())
- return explanation
-
-
-def _notin_text(term, text, verbose=False):
- index = text.find(term)
- head = text[:index]
- tail = text[index + len(term):]
- correct_text = head + tail
- diff = _diff_text(correct_text, text, verbose)
- newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
- for line in diff:
- if line.startswith(u('Skipping')):
- continue
- if line.startswith(u('- ')):
- continue
- if line.startswith(u('+ ')):
- newdiff.append(u(' ') + line[2:])
- else:
- newdiff.append(line)
- return newdiff
diff --git a/lib/spack/external/pytest-fallback/_pytest/cacheprovider.py b/lib/spack/external/pytest-fallback/_pytest/cacheprovider.py
deleted file mode 100755
index c537c14472..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/cacheprovider.py
+++ /dev/null
@@ -1,260 +0,0 @@
-"""
-merged implementation of the cache provider
-
-the name cache was not chosen to ensure pluggy automatically
-ignores the external pytest-cache
-"""
-from __future__ import absolute_import, division, print_function
-import py
-import pytest
-import json
-import os
-from os.path import sep as _sep, altsep as _altsep
-
-
-class Cache(object):
- def __init__(self, config):
- self.config = config
- self._cachedir = Cache.cache_dir_from_config(config)
- self.trace = config.trace.root.get("cache")
- if config.getvalue("cacheclear"):
- self.trace("clearing cachedir")
- if self._cachedir.check():
- self._cachedir.remove()
- self._cachedir.mkdir()
-
- @staticmethod
- def cache_dir_from_config(config):
- cache_dir = config.getini("cache_dir")
- cache_dir = os.path.expanduser(cache_dir)
- cache_dir = os.path.expandvars(cache_dir)
- if os.path.isabs(cache_dir):
- return py.path.local(cache_dir)
- else:
- return config.rootdir.join(cache_dir)
-
- def makedir(self, name):
- """ return a directory path object with the given name. If the
- directory does not yet exist, it will be created. You can use it
- to manage files likes e. g. store/retrieve database
- dumps across test sessions.
-
- :param name: must be a string not containing a ``/`` separator.
- Make sure the name contains your plugin or application
- identifiers to prevent clashes with other cache users.
- """
- if _sep in name or _altsep is not None and _altsep in name:
- raise ValueError("name is not allowed to contain path separators")
- return self._cachedir.ensure_dir("d", name)
-
- def _getvaluepath(self, key):
- return self._cachedir.join('v', *key.split('/'))
-
- def get(self, key, default):
- """ return cached value for the given key. If no value
- was yet cached or the value cannot be read, the specified
- default is returned.
-
- :param key: must be a ``/`` separated value. Usually the first
- name is the name of your plugin or your application.
- :param default: must be provided in case of a cache-miss or
- invalid cache values.
-
- """
- path = self._getvaluepath(key)
- if path.check():
- try:
- with path.open("r") as f:
- return json.load(f)
- except ValueError:
- self.trace("cache-invalid at %s" % (path,))
- return default
-
- def set(self, key, value):
- """ save value for the given key.
-
- :param key: must be a ``/`` separated value. Usually the first
- name is the name of your plugin or your application.
- :param value: must be of any combination of basic
- python types, including nested types
- like e. g. lists of dictionaries.
- """
- path = self._getvaluepath(key)
- try:
- path.dirpath().ensure_dir()
- except (py.error.EEXIST, py.error.EACCES):
- self.config.warn(
- code='I9', message='could not create cache path %s' % (path,)
- )
- return
- try:
- f = path.open('w')
- except py.error.ENOTDIR:
- self.config.warn(
- code='I9', message='cache could not write path %s' % (path,))
- else:
- with f:
- self.trace("cache-write %s: %r" % (key, value,))
- json.dump(value, f, indent=2, sort_keys=True)
-
-
-class LFPlugin:
- """ Plugin which implements the --lf (run last-failing) option """
-
- def __init__(self, config):
- self.config = config
- active_keys = 'lf', 'failedfirst'
- self.active = any(config.getvalue(key) for key in active_keys)
- self.lastfailed = config.cache.get("cache/lastfailed", {})
- self._previously_failed_count = None
-
- def pytest_report_collectionfinish(self):
- if self.active:
- if not self._previously_failed_count:
- mode = "run all (no recorded failures)"
- else:
- noun = 'failure' if self._previously_failed_count == 1 else 'failures'
- suffix = " first" if self.config.getvalue("failedfirst") else ""
- mode = "rerun previous {count} {noun}{suffix}".format(
- count=self._previously_failed_count, suffix=suffix, noun=noun
- )
- return "run-last-failure: %s" % mode
-
- def pytest_runtest_logreport(self, report):
- if (report.when == 'call' and report.passed) or report.skipped:
- self.lastfailed.pop(report.nodeid, None)
- elif report.failed:
- self.lastfailed[report.nodeid] = True
-
- def pytest_collectreport(self, report):
- passed = report.outcome in ('passed', 'skipped')
- if passed:
- if report.nodeid in self.lastfailed:
- self.lastfailed.pop(report.nodeid)
- self.lastfailed.update(
- (item.nodeid, True)
- for item in report.result)
- else:
- self.lastfailed[report.nodeid] = True
-
- def pytest_collection_modifyitems(self, session, config, items):
- if self.active and self.lastfailed:
- previously_failed = []
- previously_passed = []
- for item in items:
- if item.nodeid in self.lastfailed:
- previously_failed.append(item)
- else:
- previously_passed.append(item)
- self._previously_failed_count = len(previously_failed)
- if not previously_failed:
- # running a subset of all tests with recorded failures outside
- # of the set of tests currently executing
- return
- if self.config.getvalue("lf"):
- items[:] = previously_failed
- config.hook.pytest_deselected(items=previously_passed)
- else:
- items[:] = previously_failed + previously_passed
-
- def pytest_sessionfinish(self, session):
- config = self.config
- if config.getvalue("cacheshow") or hasattr(config, "slaveinput"):
- return
-
- saved_lastfailed = config.cache.get("cache/lastfailed", {})
- if saved_lastfailed != self.lastfailed:
- config.cache.set("cache/lastfailed", self.lastfailed)
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("general")
- group.addoption(
- '--lf', '--last-failed', action='store_true', dest="lf",
- help="rerun only the tests that failed "
- "at the last run (or all if none failed)")
- group.addoption(
- '--ff', '--failed-first', action='store_true', dest="failedfirst",
- help="run all tests but run the last failures first. "
- "This may re-order tests and thus lead to "
- "repeated fixture setup/teardown")
- group.addoption(
- '--cache-show', action='store_true', dest="cacheshow",
- help="show cache contents, don't perform collection or tests")
- group.addoption(
- '--cache-clear', action='store_true', dest="cacheclear",
- help="remove all cache contents at start of test run.")
- parser.addini(
- "cache_dir", default='.cache',
- help="cache directory path.")
-
-
-def pytest_cmdline_main(config):
- if config.option.cacheshow:
- from _pytest.main import wrap_session
- return wrap_session(config, cacheshow)
-
-
-@pytest.hookimpl(tryfirst=True)
-def pytest_configure(config):
- config.cache = Cache(config)
- config.pluginmanager.register(LFPlugin(config), "lfplugin")
-
-
-@pytest.fixture
-def cache(request):
- """
- Return a cache object that can persist state between testing sessions.
-
- cache.get(key, default)
- cache.set(key, value)
-
- Keys must be a ``/`` separated value, where the first part is usually the
- name of your plugin or application to avoid clashes with other cache users.
-
- Values can be any object handled by the json stdlib module.
- """
- return request.config.cache
-
-
-def pytest_report_header(config):
- if config.option.verbose:
- relpath = py.path.local().bestrelpath(config.cache._cachedir)
- return "cachedir: %s" % relpath
-
-
-def cacheshow(config, session):
- from pprint import pprint
- tw = py.io.TerminalWriter()
- tw.line("cachedir: " + str(config.cache._cachedir))
- if not config.cache._cachedir.check():
- tw.line("cache is empty")
- return 0
- dummy = object()
- basedir = config.cache._cachedir
- vdir = basedir.join("v")
- tw.sep("-", "cache values")
- for valpath in sorted(vdir.visit(lambda x: x.isfile())):
- key = valpath.relto(vdir).replace(valpath.sep, "/")
- val = config.cache.get(key, dummy)
- if val is dummy:
- tw.line("%s contains unreadable content, "
- "will be ignored" % key)
- else:
- tw.line("%s contains:" % key)
- stream = py.io.TextIO()
- pprint(val, stream=stream)
- for line in stream.getvalue().splitlines():
- tw.line(" " + line)
-
- ddir = basedir.join("d")
- if ddir.isdir() and ddir.listdir():
- tw.sep("-", "cache directories")
- for p in sorted(basedir.join("d").visit()):
- # if p.check(dir=1):
- # print("%s/" % p.relto(basedir))
- if p.isfile():
- key = p.relto(basedir)
- tw.line("%s is a file of length %d" % (
- key, p.size()))
- return 0
diff --git a/lib/spack/external/pytest-fallback/_pytest/capture.py b/lib/spack/external/pytest-fallback/_pytest/capture.py
deleted file mode 100644
index cb5af6fcb3..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/capture.py
+++ /dev/null
@@ -1,577 +0,0 @@
-"""
-per-test stdout/stderr capturing mechanism.
-
-"""
-from __future__ import absolute_import, division, print_function
-
-import contextlib
-import sys
-import os
-import io
-from io import UnsupportedOperation
-from tempfile import TemporaryFile
-
-import py
-import pytest
-from _pytest.compat import CaptureIO
-
-unicode = py.builtin.text
-
-patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("general")
- group._addoption(
- '--capture', action="store",
- default="fd" if hasattr(os, "dup") else "sys",
- metavar="method", choices=['fd', 'sys', 'no'],
- help="per-test capturing method: one of fd|sys|no.")
- group._addoption(
- '-s', action="store_const", const="no", dest="capture",
- help="shortcut for --capture=no.")
-
-
-@pytest.hookimpl(hookwrapper=True)
-def pytest_load_initial_conftests(early_config, parser, args):
- ns = early_config.known_args_namespace
- if ns.capture == "fd":
- _py36_windowsconsoleio_workaround(sys.stdout)
- _colorama_workaround()
- _readline_workaround()
- pluginmanager = early_config.pluginmanager
- capman = CaptureManager(ns.capture)
- pluginmanager.register(capman, "capturemanager")
-
- # make sure that capturemanager is properly reset at final shutdown
- early_config.add_cleanup(capman.reset_capturings)
-
- # make sure logging does not raise exceptions at the end
- def silence_logging_at_shutdown():
- if "logging" in sys.modules:
- sys.modules["logging"].raiseExceptions = False
- early_config.add_cleanup(silence_logging_at_shutdown)
-
- # finally trigger conftest loading but while capturing (issue93)
- capman.init_capturings()
- outcome = yield
- out, err = capman.suspendcapture()
- if outcome.excinfo is not None:
- sys.stdout.write(out)
- sys.stderr.write(err)
-
-
-class CaptureManager:
- def __init__(self, method):
- self._method = method
-
- def _getcapture(self, method):
- if method == "fd":
- return MultiCapture(out=True, err=True, Capture=FDCapture)
- elif method == "sys":
- return MultiCapture(out=True, err=True, Capture=SysCapture)
- elif method == "no":
- return MultiCapture(out=False, err=False, in_=False)
- else:
- raise ValueError("unknown capturing method: %r" % method)
-
- def init_capturings(self):
- assert not hasattr(self, "_capturing")
- self._capturing = self._getcapture(self._method)
- self._capturing.start_capturing()
-
- def reset_capturings(self):
- cap = self.__dict__.pop("_capturing", None)
- if cap is not None:
- cap.pop_outerr_to_orig()
- cap.stop_capturing()
-
- def resumecapture(self):
- self._capturing.resume_capturing()
-
- def suspendcapture(self, in_=False):
- self.deactivate_funcargs()
- cap = getattr(self, "_capturing", None)
- if cap is not None:
- try:
- outerr = cap.readouterr()
- finally:
- cap.suspend_capturing(in_=in_)
- return outerr
-
- def activate_funcargs(self, pyfuncitem):
- capfuncarg = pyfuncitem.__dict__.pop("_capfuncarg", None)
- if capfuncarg is not None:
- capfuncarg._start()
- self._capfuncarg = capfuncarg
-
- def deactivate_funcargs(self):
- capfuncarg = self.__dict__.pop("_capfuncarg", None)
- if capfuncarg is not None:
- capfuncarg.close()
-
- @pytest.hookimpl(hookwrapper=True)
- def pytest_make_collect_report(self, collector):
- if isinstance(collector, pytest.File):
- self.resumecapture()
- outcome = yield
- out, err = self.suspendcapture()
- rep = outcome.get_result()
- if out:
- rep.sections.append(("Captured stdout", out))
- if err:
- rep.sections.append(("Captured stderr", err))
- else:
- yield
-
- @pytest.hookimpl(hookwrapper=True)
- def pytest_runtest_setup(self, item):
- self.resumecapture()
- yield
- self.suspendcapture_item(item, "setup")
-
- @pytest.hookimpl(hookwrapper=True)
- def pytest_runtest_call(self, item):
- self.resumecapture()
- self.activate_funcargs(item)
- yield
- # self.deactivate_funcargs() called from suspendcapture()
- self.suspendcapture_item(item, "call")
-
- @pytest.hookimpl(hookwrapper=True)
- def pytest_runtest_teardown(self, item):
- self.resumecapture()
- yield
- self.suspendcapture_item(item, "teardown")
-
- @pytest.hookimpl(tryfirst=True)
- def pytest_keyboard_interrupt(self, excinfo):
- self.reset_capturings()
-
- @pytest.hookimpl(tryfirst=True)
- def pytest_internalerror(self, excinfo):
- self.reset_capturings()
-
- def suspendcapture_item(self, item, when, in_=False):
- out, err = self.suspendcapture(in_=in_)
- item.add_report_section(when, "stdout", out)
- item.add_report_section(when, "stderr", err)
-
-
-error_capsysfderror = "cannot use capsys and capfd at the same time"
-
-
-@pytest.fixture
-def capsys(request):
- """Enable capturing of writes to sys.stdout/sys.stderr and make
- captured output available via ``capsys.readouterr()`` method calls
- which return a ``(out, err)`` tuple.
- """
- if "capfd" in request.fixturenames:
- raise request.raiseerror(error_capsysfderror)
- request.node._capfuncarg = c = CaptureFixture(SysCapture, request)
- return c
-
-
-@pytest.fixture
-def capfd(request):
- """Enable capturing of writes to file descriptors 1 and 2 and make
- captured output available via ``capfd.readouterr()`` method calls
- which return a ``(out, err)`` tuple.
- """
- if "capsys" in request.fixturenames:
- request.raiseerror(error_capsysfderror)
- if not hasattr(os, 'dup'):
- pytest.skip("capfd funcarg needs os.dup")
- request.node._capfuncarg = c = CaptureFixture(FDCapture, request)
- return c
-
-
-class CaptureFixture:
- def __init__(self, captureclass, request):
- self.captureclass = captureclass
- self.request = request
-
- def _start(self):
- self._capture = MultiCapture(out=True, err=True, in_=False,
- Capture=self.captureclass)
- self._capture.start_capturing()
-
- def close(self):
- cap = self.__dict__.pop("_capture", None)
- if cap is not None:
- self._outerr = cap.pop_outerr_to_orig()
- cap.stop_capturing()
-
- def readouterr(self):
- try:
- return self._capture.readouterr()
- except AttributeError:
- return self._outerr
-
- @contextlib.contextmanager
- def disabled(self):
- capmanager = self.request.config.pluginmanager.getplugin('capturemanager')
- capmanager.suspendcapture_item(self.request.node, "call", in_=True)
- try:
- yield
- finally:
- capmanager.resumecapture()
-
-
-def safe_text_dupfile(f, mode, default_encoding="UTF8"):
- """ return a open text file object that's a duplicate of f on the
- FD-level if possible.
- """
- encoding = getattr(f, "encoding", None)
- try:
- fd = f.fileno()
- except Exception:
- if "b" not in getattr(f, "mode", "") and hasattr(f, "encoding"):
- # we seem to have a text stream, let's just use it
- return f
- else:
- newfd = os.dup(fd)
- if "b" not in mode:
- mode += "b"
- f = os.fdopen(newfd, mode, 0) # no buffering
- return EncodedFile(f, encoding or default_encoding)
-
-
-class EncodedFile(object):
- errors = "strict" # possibly needed by py3 code (issue555)
-
- def __init__(self, buffer, encoding):
- self.buffer = buffer
- self.encoding = encoding
-
- def write(self, obj):
- if isinstance(obj, unicode):
- obj = obj.encode(self.encoding, "replace")
- self.buffer.write(obj)
-
- def writelines(self, linelist):
- data = ''.join(linelist)
- self.write(data)
-
- @property
- def name(self):
- """Ensure that file.name is a string."""
- return repr(self.buffer)
-
- def __getattr__(self, name):
- return getattr(object.__getattribute__(self, "buffer"), name)
-
-
-class MultiCapture(object):
- out = err = in_ = None
-
- def __init__(self, out=True, err=True, in_=True, Capture=None):
- if in_:
- self.in_ = Capture(0)
- if out:
- self.out = Capture(1)
- if err:
- self.err = Capture(2)
-
- def start_capturing(self):
- if self.in_:
- self.in_.start()
- if self.out:
- self.out.start()
- if self.err:
- self.err.start()
-
- def pop_outerr_to_orig(self):
- """ pop current snapshot out/err capture and flush to orig streams. """
- out, err = self.readouterr()
- if out:
- self.out.writeorg(out)
- if err:
- self.err.writeorg(err)
- return out, err
-
- def suspend_capturing(self, in_=False):
- if self.out:
- self.out.suspend()
- if self.err:
- self.err.suspend()
- if in_ and self.in_:
- self.in_.suspend()
- self._in_suspended = True
-
- def resume_capturing(self):
- if self.out:
- self.out.resume()
- if self.err:
- self.err.resume()
- if hasattr(self, "_in_suspended"):
- self.in_.resume()
- del self._in_suspended
-
- def stop_capturing(self):
- """ stop capturing and reset capturing streams """
- if hasattr(self, '_reset'):
- raise ValueError("was already stopped")
- self._reset = True
- if self.out:
- self.out.done()
- if self.err:
- self.err.done()
- if self.in_:
- self.in_.done()
-
- def readouterr(self):
- """ return snapshot unicode value of stdout/stderr capturings. """
- return (self.out.snap() if self.out is not None else "",
- self.err.snap() if self.err is not None else "")
-
-
-class NoCapture:
- __init__ = start = done = suspend = resume = lambda *args: None
-
-
-class FDCapture:
- """ Capture IO to/from a given os-level filedescriptor. """
-
- def __init__(self, targetfd, tmpfile=None):
- self.targetfd = targetfd
- try:
- self.targetfd_save = os.dup(self.targetfd)
- except OSError:
- self.start = lambda: None
- self.done = lambda: None
- else:
- if targetfd == 0:
- assert not tmpfile, "cannot set tmpfile with stdin"
- tmpfile = open(os.devnull, "r")
- self.syscapture = SysCapture(targetfd)
- else:
- if tmpfile is None:
- f = TemporaryFile()
- with f:
- tmpfile = safe_text_dupfile(f, mode="wb+")
- if targetfd in patchsysdict:
- self.syscapture = SysCapture(targetfd, tmpfile)
- else:
- self.syscapture = NoCapture()
- self.tmpfile = tmpfile
- self.tmpfile_fd = tmpfile.fileno()
-
- def __repr__(self):
- return "<FDCapture %s oldfd=%s>" % (self.targetfd, self.targetfd_save)
-
- def start(self):
- """ Start capturing on targetfd using memorized tmpfile. """
- try:
- os.fstat(self.targetfd_save)
- except (AttributeError, OSError):
- raise ValueError("saved filedescriptor not valid anymore")
- os.dup2(self.tmpfile_fd, self.targetfd)
- self.syscapture.start()
-
- def snap(self):
- f = self.tmpfile
- f.seek(0)
- res = f.read()
- if res:
- enc = getattr(f, "encoding", None)
- if enc and isinstance(res, bytes):
- res = py.builtin._totext(res, enc, "replace")
- f.truncate(0)
- f.seek(0)
- return res
- return ''
-
- def done(self):
- """ stop capturing, restore streams, return original capture file,
- seeked to position zero. """
- targetfd_save = self.__dict__.pop("targetfd_save")
- os.dup2(targetfd_save, self.targetfd)
- os.close(targetfd_save)
- self.syscapture.done()
- self.tmpfile.close()
-
- def suspend(self):
- self.syscapture.suspend()
- os.dup2(self.targetfd_save, self.targetfd)
-
- def resume(self):
- self.syscapture.resume()
- os.dup2(self.tmpfile_fd, self.targetfd)
-
- def writeorg(self, data):
- """ write to original file descriptor. """
- if py.builtin._istext(data):
- data = data.encode("utf8") # XXX use encoding of original stream
- os.write(self.targetfd_save, data)
-
-
-class SysCapture:
- def __init__(self, fd, tmpfile=None):
- name = patchsysdict[fd]
- self._old = getattr(sys, name)
- self.name = name
- if tmpfile is None:
- if name == "stdin":
- tmpfile = DontReadFromInput()
- else:
- tmpfile = CaptureIO()
- self.tmpfile = tmpfile
-
- def start(self):
- setattr(sys, self.name, self.tmpfile)
-
- def snap(self):
- f = self.tmpfile
- res = f.getvalue()
- f.truncate(0)
- f.seek(0)
- return res
-
- def done(self):
- setattr(sys, self.name, self._old)
- del self._old
- self.tmpfile.close()
-
- def suspend(self):
- setattr(sys, self.name, self._old)
-
- def resume(self):
- setattr(sys, self.name, self.tmpfile)
-
- def writeorg(self, data):
- self._old.write(data)
- self._old.flush()
-
-
-class DontReadFromInput:
- """Temporary stub class. Ideally when stdin is accessed, the
- capturing should be turned off, with possibly all data captured
- so far sent to the screen. This should be configurable, though,
- because in automated test runs it is better to crash than
- hang indefinitely.
- """
-
- encoding = None
-
- def read(self, *args):
- raise IOError("reading from stdin while output is captured")
- readline = read
- readlines = read
- __iter__ = read
-
- def fileno(self):
- raise UnsupportedOperation("redirected stdin is pseudofile, "
- "has no fileno()")
-
- def isatty(self):
- return False
-
- def close(self):
- pass
-
- @property
- def buffer(self):
- if sys.version_info >= (3, 0):
- return self
- else:
- raise AttributeError('redirected stdin has no attribute buffer')
-
-
-def _colorama_workaround():
- """
- Ensure colorama is imported so that it attaches to the correct stdio
- handles on Windows.
-
- colorama uses the terminal on import time. So if something does the
- first import of colorama while I/O capture is active, colorama will
- fail in various ways.
- """
-
- if not sys.platform.startswith('win32'):
- return
- try:
- import colorama # noqa
- except ImportError:
- pass
-
-
-def _readline_workaround():
- """
- Ensure readline is imported so that it attaches to the correct stdio
- handles on Windows.
-
- Pdb uses readline support where available--when not running from the Python
- prompt, the readline module is not imported until running the pdb REPL. If
- running pytest with the --pdb option this means the readline module is not
- imported until after I/O capture has been started.
-
- This is a problem for pyreadline, which is often used to implement readline
- support on Windows, as it does not attach to the correct handles for stdout
- and/or stdin if they have been redirected by the FDCapture mechanism. This
- workaround ensures that readline is imported before I/O capture is setup so
- that it can attach to the actual stdin/out for the console.
-
- See https://github.com/pytest-dev/pytest/pull/1281
- """
-
- if not sys.platform.startswith('win32'):
- return
- try:
- import readline # noqa
- except ImportError:
- pass
-
-
-def _py36_windowsconsoleio_workaround(stream):
- """
- Python 3.6 implemented unicode console handling for Windows. This works
- by reading/writing to the raw console handle using
- ``{Read,Write}ConsoleW``.
-
- The problem is that we are going to ``dup2`` over the stdio file
- descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the
- handles used by Python to write to the console. Though there is still some
- weirdness and the console handle seems to only be closed randomly and not
- on the first call to ``CloseHandle``, or maybe it gets reopened with the
- same handle value when we suspend capturing.
-
- The workaround in this case will reopen stdio with a different fd which
- also means a different handle by replicating the logic in
- "Py_lifecycle.c:initstdio/create_stdio".
-
- :param stream: in practice ``sys.stdout`` or ``sys.stderr``, but given
- here as parameter for unittesting purposes.
-
- See https://github.com/pytest-dev/py/issues/103
- """
- if not sys.platform.startswith('win32') or sys.version_info[:2] < (3, 6):
- return
-
- # bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666)
- if not hasattr(stream, 'buffer'):
- return
-
- buffered = hasattr(stream.buffer, 'raw')
- raw_stdout = stream.buffer.raw if buffered else stream.buffer
-
- if not isinstance(raw_stdout, io._WindowsConsoleIO):
- return
-
- def _reopen_stdio(f, mode):
- if not buffered and mode[0] == 'w':
- buffering = 0
- else:
- buffering = -1
-
- return io.TextIOWrapper(
- open(os.dup(f.fileno()), mode, buffering),
- f.encoding,
- f.errors,
- f.newlines,
- f.line_buffering)
-
- sys.__stdin__ = sys.stdin = _reopen_stdio(sys.stdin, 'rb')
- sys.__stdout__ = sys.stdout = _reopen_stdio(sys.stdout, 'wb')
- sys.__stderr__ = sys.stderr = _reopen_stdio(sys.stderr, 'wb')
diff --git a/lib/spack/external/pytest-fallback/_pytest/compat.py b/lib/spack/external/pytest-fallback/_pytest/compat.py
deleted file mode 100644
index 255f69ce0d..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/compat.py
+++ /dev/null
@@ -1,326 +0,0 @@
-"""
-python version compatibility code
-"""
-from __future__ import absolute_import, division, print_function
-import sys
-import inspect
-import types
-import re
-import functools
-
-import py
-
-import _pytest
-from _pytest.outcomes import TEST_OUTCOME
-
-
-try:
- import enum
-except ImportError: # pragma: no cover
- # Only available in Python 3.4+ or as a backport
- enum = None
-
-
-_PY3 = sys.version_info > (3, 0)
-_PY2 = not _PY3
-
-
-NoneType = type(None)
-NOTSET = object()
-
-PY35 = sys.version_info[:2] >= (3, 5)
-PY36 = sys.version_info[:2] >= (3, 6)
-MODULE_NOT_FOUND_ERROR = 'ModuleNotFoundError' if PY36 else 'ImportError'
-
-if hasattr(inspect, 'signature'):
- def _format_args(func):
- return str(inspect.signature(func))
-else:
- def _format_args(func):
- return inspect.formatargspec(*inspect.getargspec(func))
-
-isfunction = inspect.isfunction
-isclass = inspect.isclass
-# used to work around a python2 exception info leak
-exc_clear = getattr(sys, 'exc_clear', lambda: None)
-# The type of re.compile objects is not exposed in Python.
-REGEX_TYPE = type(re.compile(''))
-
-
-def is_generator(func):
- genfunc = inspect.isgeneratorfunction(func)
- return genfunc and not iscoroutinefunction(func)
-
-
-def iscoroutinefunction(func):
- """Return True if func is a decorated coroutine function.
-
- Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly,
- which in turns also initializes the "logging" module as side-effect (see issue #8).
- """
- return (getattr(func, '_is_coroutine', False) or
- (hasattr(inspect, 'iscoroutinefunction') and inspect.iscoroutinefunction(func)))
-
-
-def getlocation(function, curdir):
- import inspect
- fn = py.path.local(inspect.getfile(function))
- lineno = py.builtin._getcode(function).co_firstlineno
- if fn.relto(curdir):
- fn = fn.relto(curdir)
- return "%s:%d" % (fn, lineno + 1)
-
-
-def num_mock_patch_args(function):
- """ return number of arguments used up by mock arguments (if any) """
- patchings = getattr(function, "patchings", None)
- if not patchings:
- return 0
- mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
- if mock is not None:
- return len([p for p in patchings
- if not p.attribute_name and p.new is mock.DEFAULT])
- return len(patchings)
-
-
-def getfuncargnames(function, startindex=None, cls=None):
- """
- @RonnyPfannschmidt: This function should be refactored when we revisit fixtures. The
- fixture mechanism should ask the node for the fixture names, and not try to obtain
- directly from the function object well after collection has occurred.
- """
- if startindex is None and cls is not None:
- is_staticmethod = isinstance(cls.__dict__.get(function.__name__, None), staticmethod)
- startindex = 0 if is_staticmethod else 1
- # XXX merge with main.py's varnames
- # assert not isclass(function)
- realfunction = function
- while hasattr(realfunction, "__wrapped__"):
- realfunction = realfunction.__wrapped__
- if startindex is None:
- startindex = inspect.ismethod(function) and 1 or 0
- if realfunction != function:
- startindex += num_mock_patch_args(function)
- function = realfunction
- if isinstance(function, functools.partial):
- argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0]
- partial = function
- argnames = argnames[len(partial.args):]
- if partial.keywords:
- for kw in partial.keywords:
- argnames.remove(kw)
- else:
- argnames = inspect.getargs(_pytest._code.getrawcode(function))[0]
- defaults = getattr(function, 'func_defaults',
- getattr(function, '__defaults__', None)) or ()
- numdefaults = len(defaults)
- if numdefaults:
- return tuple(argnames[startindex:-numdefaults])
- return tuple(argnames[startindex:])
-
-
-if sys.version_info[:2] == (2, 6):
- def isclass(object):
- """ Return true if the object is a class. Overrides inspect.isclass for
- python 2.6 because it will return True for objects which always return
- something on __getattr__ calls (see #1035).
- Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc
- """
- return isinstance(object, (type, types.ClassType))
-
-
-if _PY3:
- import codecs
- imap = map
- izip = zip
- STRING_TYPES = bytes, str
- UNICODE_TYPES = str,
-
- def _ascii_escaped(val):
- """If val is pure ascii, returns it as a str(). Otherwise, escapes
- bytes objects into a sequence of escaped bytes:
-
- b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
-
- and escapes unicode objects into a sequence of escaped unicode
- ids, e.g.:
-
- '4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944'
-
- note:
- the obvious "v.decode('unicode-escape')" will return
- valid utf-8 unicode if it finds them in bytes, but we
- want to return escaped bytes for any byte, even if they match
- a utf-8 string.
-
- """
- if isinstance(val, bytes):
- if val:
- # source: http://goo.gl/bGsnwC
- encoded_bytes, _ = codecs.escape_encode(val)
- return encoded_bytes.decode('ascii')
- else:
- # empty bytes crashes codecs.escape_encode (#1087)
- return ''
- else:
- return val.encode('unicode_escape').decode('ascii')
-else:
- STRING_TYPES = bytes, str, unicode
- UNICODE_TYPES = unicode,
-
- from itertools import imap, izip # NOQA
-
- def _ascii_escaped(val):
- """In py2 bytes and str are the same type, so return if it's a bytes
- object, return it unchanged if it is a full ascii string,
- otherwise escape it into its binary form.
-
- If it's a unicode string, change the unicode characters into
- unicode escapes.
-
- """
- if isinstance(val, bytes):
- try:
- return val.encode('ascii')
- except UnicodeDecodeError:
- return val.encode('string-escape')
- else:
- return val.encode('unicode-escape')
-
-
-def get_real_func(obj):
- """ gets the real function object of the (possibly) wrapped object by
- functools.wraps or functools.partial.
- """
- start_obj = obj
- for i in range(100):
- new_obj = getattr(obj, '__wrapped__', None)
- if new_obj is None:
- break
- obj = new_obj
- else:
- raise ValueError(
- ("could not find real function of {start}"
- "\nstopped at {current}").format(
- start=py.io.saferepr(start_obj),
- current=py.io.saferepr(obj)))
- if isinstance(obj, functools.partial):
- obj = obj.func
- return obj
-
-
-def getfslineno(obj):
- # xxx let decorators etc specify a sane ordering
- obj = get_real_func(obj)
- if hasattr(obj, 'place_as'):
- obj = obj.place_as
- fslineno = _pytest._code.getfslineno(obj)
- assert isinstance(fslineno[1], int), obj
- return fslineno
-
-
-def getimfunc(func):
- try:
- return func.__func__
- except AttributeError:
- try:
- return func.im_func
- except AttributeError:
- return func
-
-
-def safe_getattr(object, name, default):
- """ Like getattr but return default upon any Exception or any OutcomeException.
-
- Attribute access can potentially fail for 'evil' Python objects.
- See issue #214.
- It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException
- instead of Exception (for more details check #2707)
- """
- try:
- return getattr(object, name, default)
- except TEST_OUTCOME:
- return default
-
-
-def _is_unittest_unexpected_success_a_failure():
- """Return if the test suite should fail if a @expectedFailure unittest test PASSES.
-
- From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful:
- Changed in version 3.4: Returns False if there were any
- unexpectedSuccesses from tests marked with the expectedFailure() decorator.
- """
- return sys.version_info >= (3, 4)
-
-
-if _PY3:
- def safe_str(v):
- """returns v as string"""
- return str(v)
-else:
- def safe_str(v):
- """returns v as string, converting to ascii if necessary"""
- try:
- return str(v)
- except UnicodeError:
- if not isinstance(v, unicode):
- v = unicode(v)
- errors = 'replace'
- return v.encode('utf-8', errors)
-
-
-COLLECT_FAKEMODULE_ATTRIBUTES = (
- 'Collector',
- 'Module',
- 'Generator',
- 'Function',
- 'Instance',
- 'Session',
- 'Item',
- 'Class',
- 'File',
- '_fillfuncargs',
-)
-
-
-def _setup_collect_fakemodule():
- from types import ModuleType
- import pytest
- pytest.collect = ModuleType('pytest.collect')
- pytest.collect.__all__ = [] # used for setns
- for attr in COLLECT_FAKEMODULE_ATTRIBUTES:
- setattr(pytest.collect, attr, getattr(pytest, attr))
-
-
-if _PY2:
- # Without this the test_dupfile_on_textio will fail, otherwise CaptureIO could directly inherit from StringIO.
- from py.io import TextIO
-
- class CaptureIO(TextIO):
-
- @property
- def encoding(self):
- return getattr(self, '_encoding', 'UTF-8')
-
-else:
- import io
-
- class CaptureIO(io.TextIOWrapper):
- def __init__(self):
- super(CaptureIO, self).__init__(
- io.BytesIO(),
- encoding='UTF-8', newline='', write_through=True,
- )
-
- def getvalue(self):
- return self.buffer.getvalue().decode('UTF-8')
-
-
-class FuncargnamesCompatAttr(object):
- """ helper class so that Metafunc, Function and FixtureRequest
- don't need to each define the "funcargnames" compatibility attribute.
- """
- @property
- def funcargnames(self):
- """ alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
- return self.fixturenames
diff --git a/lib/spack/external/pytest-fallback/_pytest/config.py b/lib/spack/external/pytest-fallback/_pytest/config.py
deleted file mode 100644
index 513478a972..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/config.py
+++ /dev/null
@@ -1,1398 +0,0 @@
-""" command line options, ini-file and conftest.py processing. """
-from __future__ import absolute_import, division, print_function
-import argparse
-import shlex
-import traceback
-import types
-import warnings
-
-import py
-# DON't import pytest here because it causes import cycle troubles
-import sys
-import os
-import _pytest._code
-import _pytest.hookspec # the extension point definitions
-import _pytest.assertion
-from _pytest._pluggy import PluginManager, HookimplMarker, HookspecMarker
-from _pytest.compat import safe_str
-
-hookimpl = HookimplMarker("pytest")
-hookspec = HookspecMarker("pytest")
-
-# pytest startup
-#
-
-
-class ConftestImportFailure(Exception):
- def __init__(self, path, excinfo):
- Exception.__init__(self, path, excinfo)
- self.path = path
- self.excinfo = excinfo
-
- def __str__(self):
- etype, evalue, etb = self.excinfo
- formatted = traceback.format_tb(etb)
- # The level of the tracebacks we want to print is hand crafted :(
- return repr(evalue) + '\n' + ''.join(formatted[2:])
-
-
-def main(args=None, plugins=None):
- """ return exit code, after performing an in-process test run.
-
- :arg args: list of command line arguments.
-
- :arg plugins: list of plugin objects to be auto-registered during
- initialization.
- """
- try:
- try:
- config = _prepareconfig(args, plugins)
- except ConftestImportFailure as e:
- tw = py.io.TerminalWriter(sys.stderr)
- for line in traceback.format_exception(*e.excinfo):
- tw.line(line.rstrip(), red=True)
- tw.line("ERROR: could not load %s\n" % (e.path), red=True)
- return 4
- else:
- try:
- return config.hook.pytest_cmdline_main(config=config)
- finally:
- config._ensure_unconfigure()
- except UsageError as e:
- for msg in e.args:
- sys.stderr.write("ERROR: %s\n" % (msg,))
- return 4
-
-
-class cmdline: # compatibility namespace
- main = staticmethod(main)
-
-
-class UsageError(Exception):
- """ error in pytest usage or invocation"""
-
-
-class PrintHelp(Exception):
- """Raised when pytest should print it's help to skip the rest of the
- argument parsing and validation."""
- pass
-
-
-def filename_arg(path, optname):
- """ Argparse type validator for filename arguments.
-
- :path: path of filename
- :optname: name of the option
- """
- if os.path.isdir(path):
- raise UsageError("{0} must be a filename, given: {1}".format(optname, path))
- return path
-
-
-def directory_arg(path, optname):
- """Argparse type validator for directory arguments.
-
- :path: path of directory
- :optname: name of the option
- """
- if not os.path.isdir(path):
- raise UsageError("{0} must be a directory, given: {1}".format(optname, path))
- return path
-
-
-_preinit = []
-
-default_plugins = (
- "mark main terminal runner python fixtures debugging unittest capture skipping "
- "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion "
- "junitxml resultlog doctest cacheprovider freeze_support "
- "setuponly setupplan warnings").split()
-
-
-builtin_plugins = set(default_plugins)
-builtin_plugins.add("pytester")
-
-
-def _preloadplugins():
- assert not _preinit
- _preinit.append(get_config())
-
-
-def get_config():
- if _preinit:
- return _preinit.pop(0)
- # subsequent calls to main will create a fresh instance
- pluginmanager = PytestPluginManager()
- config = Config(pluginmanager)
- for spec in default_plugins:
- pluginmanager.import_plugin(spec)
- return config
-
-
-def get_plugin_manager():
- """
- Obtain a new instance of the
- :py:class:`_pytest.config.PytestPluginManager`, with default plugins
- already loaded.
-
- This function can be used by integration with other tools, like hooking
- into pytest to run tests into an IDE.
- """
- return get_config().pluginmanager
-
-
-def _prepareconfig(args=None, plugins=None):
- warning = None
- if args is None:
- args = sys.argv[1:]
- elif isinstance(args, py.path.local):
- args = [str(args)]
- elif not isinstance(args, (tuple, list)):
- if not isinstance(args, str):
- raise ValueError("not a string or argument list: %r" % (args,))
- args = shlex.split(args, posix=sys.platform != "win32")
- from _pytest import deprecated
- warning = deprecated.MAIN_STR_ARGS
- config = get_config()
- pluginmanager = config.pluginmanager
- try:
- if plugins:
- for plugin in plugins:
- if isinstance(plugin, py.builtin._basestring):
- pluginmanager.consider_pluginarg(plugin)
- else:
- pluginmanager.register(plugin)
- if warning:
- config.warn('C1', warning)
- return pluginmanager.hook.pytest_cmdline_parse(
- pluginmanager=pluginmanager, args=args)
- except BaseException:
- config._ensure_unconfigure()
- raise
-
-
-class PytestPluginManager(PluginManager):
- """
- Overwrites :py:class:`pluggy.PluginManager <_pytest.vendored_packages.pluggy.PluginManager>` to add pytest-specific
- functionality:
-
- * loading plugins from the command line, ``PYTEST_PLUGIN`` env variable and
- ``pytest_plugins`` global variables found in plugins being loaded;
- * ``conftest.py`` loading during start-up;
- """
-
- def __init__(self):
- super(PytestPluginManager, self).__init__("pytest", implprefix="pytest_")
- self._conftest_plugins = set()
-
- # state related to local conftest plugins
- self._path2confmods = {}
- self._conftestpath2mod = {}
- self._confcutdir = None
- self._noconftest = False
- self._duplicatepaths = set()
-
- self.add_hookspecs(_pytest.hookspec)
- self.register(self)
- if os.environ.get('PYTEST_DEBUG'):
- err = sys.stderr
- encoding = getattr(err, 'encoding', 'utf8')
- try:
- err = py.io.dupfile(err, encoding=encoding)
- except Exception:
- pass
- self.trace.root.setwriter(err.write)
- self.enable_tracing()
-
- # Config._consider_importhook will set a real object if required.
- self.rewrite_hook = _pytest.assertion.DummyRewriteHook()
-
- def addhooks(self, module_or_class):
- """
- .. deprecated:: 2.8
-
- Use :py:meth:`pluggy.PluginManager.add_hookspecs <_pytest.vendored_packages.pluggy.PluginManager.add_hookspecs>`
- instead.
- """
- warning = dict(code="I2",
- fslocation=_pytest._code.getfslineno(sys._getframe(1)),
- nodeid=None,
- message="use pluginmanager.add_hookspecs instead of "
- "deprecated addhooks() method.")
- self._warn(warning)
- return self.add_hookspecs(module_or_class)
-
- def parse_hookimpl_opts(self, plugin, name):
- # pytest hooks are always prefixed with pytest_
- # so we avoid accessing possibly non-readable attributes
- # (see issue #1073)
- if not name.startswith("pytest_"):
- return
- # ignore some historic special names which can not be hooks anyway
- if name == "pytest_plugins" or name.startswith("pytest_funcarg__"):
- return
-
- method = getattr(plugin, name)
- opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name)
- if opts is not None:
- for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"):
- opts.setdefault(name, hasattr(method, name))
- return opts
-
- def parse_hookspec_opts(self, module_or_class, name):
- opts = super(PytestPluginManager, self).parse_hookspec_opts(
- module_or_class, name)
- if opts is None:
- method = getattr(module_or_class, name)
- if name.startswith("pytest_"):
- opts = {"firstresult": hasattr(method, "firstresult"),
- "historic": hasattr(method, "historic")}
- return opts
-
- def _verify_hook(self, hook, hookmethod):
- super(PytestPluginManager, self)._verify_hook(hook, hookmethod)
- if "__multicall__" in hookmethod.argnames:
- fslineno = _pytest._code.getfslineno(hookmethod.function)
- warning = dict(code="I1",
- fslocation=fslineno,
- nodeid=None,
- message="%r hook uses deprecated __multicall__ "
- "argument" % (hook.name))
- self._warn(warning)
-
- def register(self, plugin, name=None):
- ret = super(PytestPluginManager, self).register(plugin, name)
- if ret:
- self.hook.pytest_plugin_registered.call_historic(
- kwargs=dict(plugin=plugin, manager=self))
-
- if isinstance(plugin, types.ModuleType):
- self.consider_module(plugin)
- return ret
-
- def getplugin(self, name):
- # support deprecated naming because plugins (xdist e.g.) use it
- return self.get_plugin(name)
-
- def hasplugin(self, name):
- """Return True if the plugin with the given name is registered."""
- return bool(self.get_plugin(name))
-
- def pytest_configure(self, config):
- # XXX now that the pluginmanager exposes hookimpl(tryfirst...)
- # we should remove tryfirst/trylast as markers
- config.addinivalue_line("markers",
- "tryfirst: mark a hook implementation function such that the "
- "plugin machinery will try to call it first/as early as possible.")
- config.addinivalue_line("markers",
- "trylast: mark a hook implementation function such that the "
- "plugin machinery will try to call it last/as late as possible.")
-
- def _warn(self, message):
- kwargs = message if isinstance(message, dict) else {
- 'code': 'I1',
- 'message': message,
- 'fslocation': None,
- 'nodeid': None,
- }
- self.hook.pytest_logwarning.call_historic(kwargs=kwargs)
-
- #
- # internal API for local conftest plugin handling
- #
- def _set_initial_conftests(self, namespace):
- """ load initial conftest files given a preparsed "namespace".
- As conftest files may add their own command line options
- which have arguments ('--my-opt somepath') we might get some
- false positives. All builtin and 3rd party plugins will have
- been loaded, however, so common options will not confuse our logic
- here.
- """
- current = py.path.local()
- self._confcutdir = current.join(namespace.confcutdir, abs=True) \
- if namespace.confcutdir else None
- self._noconftest = namespace.noconftest
- testpaths = namespace.file_or_dir
- foundanchor = False
- for path in testpaths:
- path = str(path)
- # remove node-id syntax
- i = path.find("::")
- if i != -1:
- path = path[:i]
- anchor = current.join(path, abs=1)
- if exists(anchor): # we found some file object
- self._try_load_conftest(anchor)
- foundanchor = True
- if not foundanchor:
- self._try_load_conftest(current)
-
- def _try_load_conftest(self, anchor):
- self._getconftestmodules(anchor)
- # let's also consider test* subdirs
- if anchor.check(dir=1):
- for x in anchor.listdir("test*"):
- if x.check(dir=1):
- self._getconftestmodules(x)
-
- def _getconftestmodules(self, path):
- if self._noconftest:
- return []
- try:
- return self._path2confmods[path]
- except KeyError:
- if path.isfile():
- clist = self._getconftestmodules(path.dirpath())
- else:
- # XXX these days we may rather want to use config.rootdir
- # and allow users to opt into looking into the rootdir parent
- # directories instead of requiring to specify confcutdir
- clist = []
- for parent in path.parts():
- if self._confcutdir and self._confcutdir.relto(parent):
- continue
- conftestpath = parent.join("conftest.py")
- if conftestpath.isfile():
- mod = self._importconftest(conftestpath)
- clist.append(mod)
-
- self._path2confmods[path] = clist
- return clist
-
- def _rget_with_confmod(self, name, path):
- modules = self._getconftestmodules(path)
- for mod in reversed(modules):
- try:
- return mod, getattr(mod, name)
- except AttributeError:
- continue
- raise KeyError(name)
-
- def _importconftest(self, conftestpath):
- try:
- return self._conftestpath2mod[conftestpath]
- except KeyError:
- pkgpath = conftestpath.pypkgpath()
- if pkgpath is None:
- _ensure_removed_sysmodule(conftestpath.purebasename)
- try:
- mod = conftestpath.pyimport()
- except Exception:
- raise ConftestImportFailure(conftestpath, sys.exc_info())
-
- self._conftest_plugins.add(mod)
- self._conftestpath2mod[conftestpath] = mod
- dirpath = conftestpath.dirpath()
- if dirpath in self._path2confmods:
- for path, mods in self._path2confmods.items():
- if path and path.relto(dirpath) or path == dirpath:
- assert mod not in mods
- mods.append(mod)
- self.trace("loaded conftestmodule %r" % (mod))
- self.consider_conftest(mod)
- return mod
-
- #
- # API for bootstrapping plugin loading
- #
- #
-
- def consider_preparse(self, args):
- for opt1, opt2 in zip(args, args[1:]):
- if opt1 == "-p":
- self.consider_pluginarg(opt2)
-
- def consider_pluginarg(self, arg):
- if arg.startswith("no:"):
- name = arg[3:]
- self.set_blocked(name)
- if not name.startswith("pytest_"):
- self.set_blocked("pytest_" + name)
- else:
- self.import_plugin(arg)
-
- def consider_conftest(self, conftestmodule):
- self.register(conftestmodule, name=conftestmodule.__file__)
-
- def consider_env(self):
- self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS"))
-
- def consider_module(self, mod):
- self._import_plugin_specs(getattr(mod, 'pytest_plugins', []))
-
- def _import_plugin_specs(self, spec):
- plugins = _get_plugin_specs_as_list(spec)
- for import_spec in plugins:
- self.import_plugin(import_spec)
-
- def import_plugin(self, modname):
- # most often modname refers to builtin modules, e.g. "pytester",
- # "terminal" or "capture". Those plugins are registered under their
- # basename for historic purposes but must be imported with the
- # _pytest prefix.
- assert isinstance(modname, (py.builtin.text, str)), "module name as text required, got %r" % modname
- modname = str(modname)
- if self.get_plugin(modname) is not None:
- return
- if modname in builtin_plugins:
- importspec = "_pytest." + modname
- else:
- importspec = modname
- self.rewrite_hook.mark_rewrite(importspec)
- try:
- __import__(importspec)
- except ImportError as e:
- new_exc = ImportError('Error importing plugin "%s": %s' % (modname, safe_str(e.args[0])))
- # copy over name and path attributes
- for attr in ('name', 'path'):
- if hasattr(e, attr):
- setattr(new_exc, attr, getattr(e, attr))
- raise new_exc
- except Exception as e:
- import pytest
- if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception):
- raise
- self._warn("skipped plugin %r: %s" % ((modname, e.msg)))
- else:
- mod = sys.modules[importspec]
- self.register(mod, modname)
-
-
-def _get_plugin_specs_as_list(specs):
- """
- Parses a list of "plugin specs" and returns a list of plugin names.
-
- Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in
- which case it is returned as a list. Specs can also be `None` in which case an
- empty list is returned.
- """
- if specs is not None:
- if isinstance(specs, str):
- specs = specs.split(',') if specs else []
- if not isinstance(specs, (list, tuple)):
- raise UsageError("Plugin specs must be a ','-separated string or a "
- "list/tuple of strings for plugin names. Given: %r" % specs)
- return list(specs)
- return []
-
-
-class Parser:
- """ Parser for command line arguments and ini-file values.
-
- :ivar extra_info: dict of generic param -> value to display in case
- there's an error processing the command line arguments.
- """
-
- def __init__(self, usage=None, processopt=None):
- self._anonymous = OptionGroup("custom options", parser=self)
- self._groups = []
- self._processopt = processopt
- self._usage = usage
- self._inidict = {}
- self._ininames = []
- self.extra_info = {}
-
- def processoption(self, option):
- if self._processopt:
- if option.dest:
- self._processopt(option)
-
- def getgroup(self, name, description="", after=None):
- """ get (or create) a named option Group.
-
- :name: name of the option group.
- :description: long description for --help output.
- :after: name of other group, used for ordering --help output.
-
- The returned group object has an ``addoption`` method with the same
- signature as :py:func:`parser.addoption
- <_pytest.config.Parser.addoption>` but will be shown in the
- respective group in the output of ``pytest. --help``.
- """
- for group in self._groups:
- if group.name == name:
- return group
- group = OptionGroup(name, description, parser=self)
- i = 0
- for i, grp in enumerate(self._groups):
- if grp.name == after:
- break
- self._groups.insert(i + 1, group)
- return group
-
- def addoption(self, *opts, **attrs):
- """ register a command line option.
-
- :opts: option names, can be short or long options.
- :attrs: same attributes which the ``add_option()`` function of the
- `argparse library
- <http://docs.python.org/2/library/argparse.html>`_
- accepts.
-
- After command line parsing options are available on the pytest config
- object via ``config.option.NAME`` where ``NAME`` is usually set
- by passing a ``dest`` attribute, for example
- ``addoption("--long", dest="NAME", ...)``.
- """
- self._anonymous.addoption(*opts, **attrs)
-
- def parse(self, args, namespace=None):
- from _pytest._argcomplete import try_argcomplete
- self.optparser = self._getparser()
- try_argcomplete(self.optparser)
- return self.optparser.parse_args([str(x) for x in args], namespace=namespace)
-
- def _getparser(self):
- from _pytest._argcomplete import filescompleter
- optparser = MyOptionParser(self, self.extra_info)
- groups = self._groups + [self._anonymous]
- for group in groups:
- if group.options:
- desc = group.description or group.name
- arggroup = optparser.add_argument_group(desc)
- for option in group.options:
- n = option.names()
- a = option.attrs()
- arggroup.add_argument(*n, **a)
- # bash like autocompletion for dirs (appending '/')
- optparser.add_argument(FILE_OR_DIR, nargs='*').completer = filescompleter
- return optparser
-
- def parse_setoption(self, args, option, namespace=None):
- parsedoption = self.parse(args, namespace=namespace)
- for name, value in parsedoption.__dict__.items():
- setattr(option, name, value)
- return getattr(parsedoption, FILE_OR_DIR)
-
- def parse_known_args(self, args, namespace=None):
- """parses and returns a namespace object with known arguments at this
- point.
- """
- return self.parse_known_and_unknown_args(args, namespace=namespace)[0]
-
- def parse_known_and_unknown_args(self, args, namespace=None):
- """parses and returns a namespace object with known arguments, and
- the remaining arguments unknown at this point.
- """
- optparser = self._getparser()
- args = [str(x) for x in args]
- return optparser.parse_known_args(args, namespace=namespace)
-
- def addini(self, name, help, type=None, default=None):
- """ register an ini-file option.
-
- :name: name of the ini-variable
- :type: type of the variable, can be ``pathlist``, ``args``, ``linelist``
- or ``bool``.
- :default: default value if no ini-file option exists but is queried.
-
- The value of ini-variables can be retrieved via a call to
- :py:func:`config.getini(name) <_pytest.config.Config.getini>`.
- """
- assert type in (None, "pathlist", "args", "linelist", "bool")
- self._inidict[name] = (help, type, default)
- self._ininames.append(name)
-
-
-class ArgumentError(Exception):
- """
- Raised if an Argument instance is created with invalid or
- inconsistent arguments.
- """
-
- def __init__(self, msg, option):
- self.msg = msg
- self.option_id = str(option)
-
- def __str__(self):
- if self.option_id:
- return "option %s: %s" % (self.option_id, self.msg)
- else:
- return self.msg
-
-
-class Argument:
- """class that mimics the necessary behaviour of optparse.Option
-
- its currently a least effort implementation
- and ignoring choices and integer prefixes
- https://docs.python.org/3/library/optparse.html#optparse-standard-option-types
- """
- _typ_map = {
- 'int': int,
- 'string': str,
- 'float': float,
- 'complex': complex,
- }
-
- def __init__(self, *names, **attrs):
- """store parms in private vars for use in add_argument"""
- self._attrs = attrs
- self._short_opts = []
- self._long_opts = []
- self.dest = attrs.get('dest')
- if '%default' in (attrs.get('help') or ''):
- warnings.warn(
- 'pytest now uses argparse. "%default" should be'
- ' changed to "%(default)s" ',
- DeprecationWarning,
- stacklevel=3)
- try:
- typ = attrs['type']
- except KeyError:
- pass
- else:
- # this might raise a keyerror as well, don't want to catch that
- if isinstance(typ, py.builtin._basestring):
- if typ == 'choice':
- warnings.warn(
- 'type argument to addoption() is a string %r.'
- ' For parsearg this is optional and when supplied'
- ' should be a type.'
- ' (options: %s)' % (typ, names),
- DeprecationWarning,
- stacklevel=3)
- # argparse expects a type here take it from
- # the type of the first element
- attrs['type'] = type(attrs['choices'][0])
- else:
- warnings.warn(
- 'type argument to addoption() is a string %r.'
- ' For parsearg this should be a type.'
- ' (options: %s)' % (typ, names),
- DeprecationWarning,
- stacklevel=3)
- attrs['type'] = Argument._typ_map[typ]
- # used in test_parseopt -> test_parse_defaultgetter
- self.type = attrs['type']
- else:
- self.type = typ
- try:
- # attribute existence is tested in Config._processopt
- self.default = attrs['default']
- except KeyError:
- pass
- self._set_opt_strings(names)
- if not self.dest:
- if self._long_opts:
- self.dest = self._long_opts[0][2:].replace('-', '_')
- else:
- try:
- self.dest = self._short_opts[0][1:]
- except IndexError:
- raise ArgumentError(
- 'need a long or short option', self)
-
- def names(self):
- return self._short_opts + self._long_opts
-
- def attrs(self):
- # update any attributes set by processopt
- attrs = 'default dest help'.split()
- if self.dest:
- attrs.append(self.dest)
- for attr in attrs:
- try:
- self._attrs[attr] = getattr(self, attr)
- except AttributeError:
- pass
- if self._attrs.get('help'):
- a = self._attrs['help']
- a = a.replace('%default', '%(default)s')
- # a = a.replace('%prog', '%(prog)s')
- self._attrs['help'] = a
- return self._attrs
-
- def _set_opt_strings(self, opts):
- """directly from optparse
-
- might not be necessary as this is passed to argparse later on"""
- for opt in opts:
- if len(opt) < 2:
- raise ArgumentError(
- "invalid option string %r: "
- "must be at least two characters long" % opt, self)
- elif len(opt) == 2:
- if not (opt[0] == "-" and opt[1] != "-"):
- raise ArgumentError(
- "invalid short option string %r: "
- "must be of the form -x, (x any non-dash char)" % opt,
- self)
- self._short_opts.append(opt)
- else:
- if not (opt[0:2] == "--" and opt[2] != "-"):
- raise ArgumentError(
- "invalid long option string %r: "
- "must start with --, followed by non-dash" % opt,
- self)
- self._long_opts.append(opt)
-
- def __repr__(self):
- args = []
- if self._short_opts:
- args += ['_short_opts: ' + repr(self._short_opts)]
- if self._long_opts:
- args += ['_long_opts: ' + repr(self._long_opts)]
- args += ['dest: ' + repr(self.dest)]
- if hasattr(self, 'type'):
- args += ['type: ' + repr(self.type)]
- if hasattr(self, 'default'):
- args += ['default: ' + repr(self.default)]
- return 'Argument({0})'.format(', '.join(args))
-
-
-class OptionGroup:
- def __init__(self, name, description="", parser=None):
- self.name = name
- self.description = description
- self.options = []
- self.parser = parser
-
- def addoption(self, *optnames, **attrs):
- """ add an option to this group.
-
- if a shortened version of a long option is specified it will
- be suppressed in the help. addoption('--twowords', '--two-words')
- results in help showing '--two-words' only, but --twowords gets
- accepted **and** the automatic destination is in args.twowords
- """
- conflict = set(optnames).intersection(
- name for opt in self.options for name in opt.names())
- if conflict:
- raise ValueError("option names %s already added" % conflict)
- option = Argument(*optnames, **attrs)
- self._addoption_instance(option, shortupper=False)
-
- def _addoption(self, *optnames, **attrs):
- option = Argument(*optnames, **attrs)
- self._addoption_instance(option, shortupper=True)
-
- def _addoption_instance(self, option, shortupper=False):
- if not shortupper:
- for opt in option._short_opts:
- if opt[0] == '-' and opt[1].islower():
- raise ValueError("lowercase shortoptions reserved")
- if self.parser:
- self.parser.processoption(option)
- self.options.append(option)
-
-
-class MyOptionParser(argparse.ArgumentParser):
- def __init__(self, parser, extra_info=None):
- if not extra_info:
- extra_info = {}
- self._parser = parser
- argparse.ArgumentParser.__init__(self, usage=parser._usage,
- add_help=False, formatter_class=DropShorterLongHelpFormatter)
- # extra_info is a dict of (param -> value) to display if there's
- # an usage error to provide more contextual information to the user
- self.extra_info = extra_info
-
- def parse_args(self, args=None, namespace=None):
- """allow splitting of positional arguments"""
- args, argv = self.parse_known_args(args, namespace)
- if argv:
- for arg in argv:
- if arg and arg[0] == '-':
- lines = ['unrecognized arguments: %s' % (' '.join(argv))]
- for k, v in sorted(self.extra_info.items()):
- lines.append(' %s: %s' % (k, v))
- self.error('\n'.join(lines))
- getattr(args, FILE_OR_DIR).extend(argv)
- return args
-
-
-class DropShorterLongHelpFormatter(argparse.HelpFormatter):
- """shorten help for long options that differ only in extra hyphens
-
- - collapse **long** options that are the same except for extra hyphens
- - special action attribute map_long_option allows surpressing additional
- long options
- - shortcut if there are only two options and one of them is a short one
- - cache result on action object as this is called at least 2 times
- """
-
- def _format_action_invocation(self, action):
- orgstr = argparse.HelpFormatter._format_action_invocation(self, action)
- if orgstr and orgstr[0] != '-': # only optional arguments
- return orgstr
- res = getattr(action, '_formatted_action_invocation', None)
- if res:
- return res
- options = orgstr.split(', ')
- if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2):
- # a shortcut for '-h, --help' or '--abc', '-a'
- action._formatted_action_invocation = orgstr
- return orgstr
- return_list = []
- option_map = getattr(action, 'map_long_option', {})
- if option_map is None:
- option_map = {}
- short_long = {}
- for option in options:
- if len(option) == 2 or option[2] == ' ':
- continue
- if not option.startswith('--'):
- raise ArgumentError('long optional argument without "--": [%s]'
- % (option), self)
- xxoption = option[2:]
- if xxoption.split()[0] not in option_map:
- shortened = xxoption.replace('-', '')
- if shortened not in short_long or \
- len(short_long[shortened]) < len(xxoption):
- short_long[shortened] = xxoption
- # now short_long has been filled out to the longest with dashes
- # **and** we keep the right option ordering from add_argument
- for option in options:
- if len(option) == 2 or option[2] == ' ':
- return_list.append(option)
- if option[2:] == short_long.get(option.replace('-', '')):
- return_list.append(option.replace(' ', '=', 1))
- action._formatted_action_invocation = ', '.join(return_list)
- return action._formatted_action_invocation
-
-
-def _ensure_removed_sysmodule(modname):
- try:
- del sys.modules[modname]
- except KeyError:
- pass
-
-
-class CmdOptions(object):
- """ holds cmdline options as attributes."""
-
- def __init__(self, values=()):
- self.__dict__.update(values)
-
- def __repr__(self):
- return "<CmdOptions %r>" % (self.__dict__,)
-
- def copy(self):
- return CmdOptions(self.__dict__)
-
-
-class Notset:
- def __repr__(self):
- return "<NOTSET>"
-
-
-notset = Notset()
-FILE_OR_DIR = 'file_or_dir'
-
-
-def _iter_rewritable_modules(package_files):
- for fn in package_files:
- is_simple_module = '/' not in fn and fn.endswith('.py')
- is_package = fn.count('/') == 1 and fn.endswith('__init__.py')
- if is_simple_module:
- module_name, _ = os.path.splitext(fn)
- yield module_name
- elif is_package:
- package_name = os.path.dirname(fn)
- yield package_name
-
-
-class Config(object):
- """ access to configuration values, pluginmanager and plugin hooks. """
-
- def __init__(self, pluginmanager):
- #: access to command line option as attributes.
- #: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead
- self.option = CmdOptions()
- _a = FILE_OR_DIR
- self._parser = Parser(
- usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a),
- processopt=self._processopt,
- )
- #: a pluginmanager instance
- self.pluginmanager = pluginmanager
- self.trace = self.pluginmanager.trace.root.get("config")
- self.hook = self.pluginmanager.hook
- self._inicache = {}
- self._override_ini = ()
- self._opt2dest = {}
- self._cleanup = []
- self._warn = self.pluginmanager._warn
- self.pluginmanager.register(self, "pytestconfig")
- self._configured = False
-
- def do_setns(dic):
- import pytest
- setns(pytest, dic)
-
- self.hook.pytest_namespace.call_historic(do_setns, {})
- self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser))
-
- def add_cleanup(self, func):
- """ Add a function to be called when the config object gets out of
- use (usually coninciding with pytest_unconfigure)."""
- self._cleanup.append(func)
-
- def _do_configure(self):
- assert not self._configured
- self._configured = True
- self.hook.pytest_configure.call_historic(kwargs=dict(config=self))
-
- def _ensure_unconfigure(self):
- if self._configured:
- self._configured = False
- self.hook.pytest_unconfigure(config=self)
- self.hook.pytest_configure._call_history = []
- while self._cleanup:
- fin = self._cleanup.pop()
- fin()
-
- def warn(self, code, message, fslocation=None, nodeid=None):
- """ generate a warning for this test session. """
- self.hook.pytest_logwarning.call_historic(kwargs=dict(
- code=code, message=message,
- fslocation=fslocation, nodeid=nodeid))
-
- def get_terminal_writer(self):
- return self.pluginmanager.get_plugin("terminalreporter")._tw
-
- def pytest_cmdline_parse(self, pluginmanager, args):
- # REF1 assert self == pluginmanager.config, (self, pluginmanager.config)
- self.parse(args)
- return self
-
- def notify_exception(self, excinfo, option=None):
- if option and option.fulltrace:
- style = "long"
- else:
- style = "native"
- excrepr = excinfo.getrepr(funcargs=True,
- showlocals=getattr(option, 'showlocals', False),
- style=style,
- )
- res = self.hook.pytest_internalerror(excrepr=excrepr,
- excinfo=excinfo)
- if not py.builtin.any(res):
- for line in str(excrepr).split("\n"):
- sys.stderr.write("INTERNALERROR> %s\n" % line)
- sys.stderr.flush()
-
- def cwd_relative_nodeid(self, nodeid):
- # nodeid's are relative to the rootpath, compute relative to cwd
- if self.invocation_dir != self.rootdir:
- fullpath = self.rootdir.join(nodeid)
- nodeid = self.invocation_dir.bestrelpath(fullpath)
- return nodeid
-
- @classmethod
- def fromdictargs(cls, option_dict, args):
- """ constructor useable for subprocesses. """
- config = get_config()
- config.option.__dict__.update(option_dict)
- config.parse(args, addopts=False)
- for x in config.option.plugins:
- config.pluginmanager.consider_pluginarg(x)
- return config
-
- def _processopt(self, opt):
- for name in opt._short_opts + opt._long_opts:
- self._opt2dest[name] = opt.dest
-
- if hasattr(opt, 'default') and opt.dest:
- if not hasattr(self.option, opt.dest):
- setattr(self.option, opt.dest, opt.default)
-
- @hookimpl(trylast=True)
- def pytest_load_initial_conftests(self, early_config):
- self.pluginmanager._set_initial_conftests(early_config.known_args_namespace)
-
- def _initini(self, args):
- ns, unknown_args = self._parser.parse_known_and_unknown_args(args, namespace=self.option.copy())
- r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args, warnfunc=self.warn)
- self.rootdir, self.inifile, self.inicfg = r
- self._parser.extra_info['rootdir'] = self.rootdir
- self._parser.extra_info['inifile'] = self.inifile
- self.invocation_dir = py.path.local()
- self._parser.addini('addopts', 'extra command line options', 'args')
- self._parser.addini('minversion', 'minimally required pytest version')
- self._override_ini = ns.override_ini or ()
-
- def _consider_importhook(self, args):
- """Install the PEP 302 import hook if using assertion re-writing.
-
- Needs to parse the --assert=<mode> option from the commandline
- and find all the installed plugins to mark them for re-writing
- by the importhook.
- """
- ns, unknown_args = self._parser.parse_known_and_unknown_args(args)
- mode = ns.assertmode
- if mode == 'rewrite':
- try:
- hook = _pytest.assertion.install_importhook(self)
- except SystemError:
- mode = 'plain'
- else:
- # REMOVED FOR SPACK: This routine imports `pkg_resources` from
- # `setuptools`, but we do not need it for Spack. We have removed
- # it from Spack to avoid a dependency on setuptools.
- # self._mark_plugins_for_rewrite(hook)
- pass
- self._warn_about_missing_assertion(mode)
-
- def _warn_about_missing_assertion(self, mode):
- try:
- assert False
- except AssertionError:
- pass
- else:
- if mode == 'plain':
- sys.stderr.write("WARNING: ASSERTIONS ARE NOT EXECUTED"
- " and FAILING TESTS WILL PASS. Are you"
- " using python -O?")
- else:
- sys.stderr.write("WARNING: assertions not in test modules or"
- " plugins will be ignored"
- " because assert statements are not executed "
- "by the underlying Python interpreter "
- "(are you using python -O?)\n")
-
- def _preparse(self, args, addopts=True):
- self._initini(args)
- if addopts:
- args[:] = shlex.split(os.environ.get('PYTEST_ADDOPTS', '')) + args
- args[:] = self.getini("addopts") + args
- self._checkversion()
- self._consider_importhook(args)
- self.pluginmanager.consider_preparse(args)
-
- # REMOVED FOR SPACK: This routine imports `pkg_resources` from
- # `setuptools`, but we do not need it for Spack. We have removed
- # it from Spack to avoid a dependency on setuptools.
- # self.pluginmanager.load_setuptools_entrypoints('pytest11')
-
- self.pluginmanager.consider_env()
- self.known_args_namespace = ns = self._parser.parse_known_args(args, namespace=self.option.copy())
- if self.known_args_namespace.confcutdir is None and self.inifile:
- confcutdir = py.path.local(self.inifile).dirname
- self.known_args_namespace.confcutdir = confcutdir
- try:
- self.hook.pytest_load_initial_conftests(early_config=self,
- args=args, parser=self._parser)
- except ConftestImportFailure:
- e = sys.exc_info()[1]
- if ns.help or ns.version:
- # we don't want to prevent --help/--version to work
- # so just let is pass and print a warning at the end
- self._warn("could not load initial conftests (%s)\n" % e.path)
- else:
- raise
-
- def _checkversion(self):
- import pytest
- minver = self.inicfg.get('minversion', None)
- if minver:
- ver = minver.split(".")
- myver = pytest.__version__.split(".")
- if myver < ver:
- raise pytest.UsageError(
- "%s:%d: requires pytest-%s, actual pytest-%s'" % (
- self.inicfg.config.path, self.inicfg.lineof('minversion'),
- minver, pytest.__version__))
-
- def parse(self, args, addopts=True):
- # parse given cmdline arguments into this config object.
- assert not hasattr(self, 'args'), (
- "can only parse cmdline args at most once per Config object")
- self._origargs = args
- self.hook.pytest_addhooks.call_historic(
- kwargs=dict(pluginmanager=self.pluginmanager))
- self._preparse(args, addopts=addopts)
- # XXX deprecated hook:
- self.hook.pytest_cmdline_preparse(config=self, args=args)
- self._parser.after_preparse = True
- try:
- args = self._parser.parse_setoption(args, self.option, namespace=self.option)
- if not args:
- cwd = os.getcwd()
- if cwd == self.rootdir:
- args = self.getini('testpaths')
- if not args:
- args = [cwd]
- self.args = args
- except PrintHelp:
- pass
-
- def addinivalue_line(self, name, line):
- """ add a line to an ini-file option. The option must have been
- declared but might not yet be set in which case the line becomes the
- the first line in its value. """
- x = self.getini(name)
- assert isinstance(x, list)
- x.append(line) # modifies the cached list inline
-
- def getini(self, name):
- """ return configuration value from an :ref:`ini file <inifiles>`. If the
- specified name hasn't been registered through a prior
- :py:func:`parser.addini <_pytest.config.Parser.addini>`
- call (usually from a plugin), a ValueError is raised. """
- try:
- return self._inicache[name]
- except KeyError:
- self._inicache[name] = val = self._getini(name)
- return val
-
- def _getini(self, name):
- try:
- description, type, default = self._parser._inidict[name]
- except KeyError:
- raise ValueError("unknown configuration value: %r" % (name,))
- value = self._get_override_ini_value(name)
- if value is None:
- try:
- value = self.inicfg[name]
- except KeyError:
- if default is not None:
- return default
- if type is None:
- return ''
- return []
- if type == "pathlist":
- dp = py.path.local(self.inicfg.config.path).dirpath()
- values = []
- for relpath in shlex.split(value):
- values.append(dp.join(relpath, abs=True))
- return values
- elif type == "args":
- return shlex.split(value)
- elif type == "linelist":
- return [t for t in map(lambda x: x.strip(), value.split("\n")) if t]
- elif type == "bool":
- return bool(_strtobool(value.strip()))
- else:
- assert type is None
- return value
-
- def _getconftest_pathlist(self, name, path):
- try:
- mod, relroots = self.pluginmanager._rget_with_confmod(name, path)
- except KeyError:
- return None
- modpath = py.path.local(mod.__file__).dirpath()
- values = []
- for relroot in relroots:
- if not isinstance(relroot, py.path.local):
- relroot = relroot.replace("/", py.path.local.sep)
- relroot = modpath.join(relroot, abs=True)
- values.append(relroot)
- return values
-
- def _get_override_ini_value(self, name):
- value = None
- # override_ini is a list of list, to support both -o foo1=bar1 foo2=bar2 and
- # and -o foo1=bar1 -o foo2=bar2 options
- # always use the last item if multiple value set for same ini-name,
- # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2
- for ini_config_list in self._override_ini:
- for ini_config in ini_config_list:
- try:
- (key, user_ini_value) = ini_config.split("=", 1)
- except ValueError:
- raise UsageError("-o/--override-ini expects option=value style.")
- if key == name:
- value = user_ini_value
- return value
-
- def getoption(self, name, default=notset, skip=False):
- """ return command line option value.
-
- :arg name: name of the option. You may also specify
- the literal ``--OPT`` option instead of the "dest" option name.
- :arg default: default value if no option of that name exists.
- :arg skip: if True raise pytest.skip if option does not exists
- or has a None value.
- """
- name = self._opt2dest.get(name, name)
- try:
- val = getattr(self.option, name)
- if val is None and skip:
- raise AttributeError(name)
- return val
- except AttributeError:
- if default is not notset:
- return default
- if skip:
- import pytest
- pytest.skip("no %r option found" % (name,))
- raise ValueError("no option named %r" % (name,))
-
- def getvalue(self, name, path=None):
- """ (deprecated, use getoption()) """
- return self.getoption(name)
-
- def getvalueorskip(self, name, path=None):
- """ (deprecated, use getoption(skip=True)) """
- return self.getoption(name, skip=True)
-
-
-def exists(path, ignore=EnvironmentError):
- try:
- return path.check()
- except ignore:
- return False
-
-
-def getcfg(args, warnfunc=None):
- """
- Search the list of arguments for a valid ini-file for pytest,
- and return a tuple of (rootdir, inifile, cfg-dict).
-
- note: warnfunc is an optional function used to warn
- about ini-files that use deprecated features.
- This parameter should be removed when pytest
- adopts standard deprecation warnings (#1804).
- """
- from _pytest.deprecated import SETUP_CFG_PYTEST
- inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"]
- args = [x for x in args if not str(x).startswith("-")]
- if not args:
- args = [py.path.local()]
- for arg in args:
- arg = py.path.local(arg)
- for base in arg.parts(reverse=True):
- for inibasename in inibasenames:
- p = base.join(inibasename)
- if exists(p):
- iniconfig = py.iniconfig.IniConfig(p)
- if 'pytest' in iniconfig.sections:
- if inibasename == 'setup.cfg' and warnfunc:
- warnfunc('C1', SETUP_CFG_PYTEST)
- return base, p, iniconfig['pytest']
- if inibasename == 'setup.cfg' and 'tool:pytest' in iniconfig.sections:
- return base, p, iniconfig['tool:pytest']
- elif inibasename == "pytest.ini":
- # allowed to be empty
- return base, p, {}
- return None, None, None
-
-
-def get_common_ancestor(paths):
- common_ancestor = None
- for path in paths:
- if not path.exists():
- continue
- if common_ancestor is None:
- common_ancestor = path
- else:
- if path.relto(common_ancestor) or path == common_ancestor:
- continue
- elif common_ancestor.relto(path):
- common_ancestor = path
- else:
- shared = path.common(common_ancestor)
- if shared is not None:
- common_ancestor = shared
- if common_ancestor is None:
- common_ancestor = py.path.local()
- elif common_ancestor.isfile():
- common_ancestor = common_ancestor.dirpath()
- return common_ancestor
-
-
-def get_dirs_from_args(args):
- def is_option(x):
- return str(x).startswith('-')
-
- def get_file_part_from_node_id(x):
- return str(x).split('::')[0]
-
- def get_dir_from_path(path):
- if path.isdir():
- return path
- return py.path.local(path.dirname)
-
- # These look like paths but may not exist
- possible_paths = (
- py.path.local(get_file_part_from_node_id(arg))
- for arg in args
- if not is_option(arg)
- )
-
- return [
- get_dir_from_path(path)
- for path in possible_paths
- if path.exists()
- ]
-
-
-def determine_setup(inifile, args, warnfunc=None):
- dirs = get_dirs_from_args(args)
- if inifile:
- iniconfig = py.iniconfig.IniConfig(inifile)
- try:
- inicfg = iniconfig["pytest"]
- except KeyError:
- inicfg = None
- rootdir = get_common_ancestor(dirs)
- else:
- ancestor = get_common_ancestor(dirs)
- rootdir, inifile, inicfg = getcfg([ancestor], warnfunc=warnfunc)
- if rootdir is None:
- for rootdir in ancestor.parts(reverse=True):
- if rootdir.join("setup.py").exists():
- break
- else:
- rootdir, inifile, inicfg = getcfg(dirs, warnfunc=warnfunc)
- if rootdir is None:
- rootdir = get_common_ancestor([py.path.local(), ancestor])
- is_fs_root = os.path.splitdrive(str(rootdir))[1] == '/'
- if is_fs_root:
- rootdir = ancestor
- return rootdir, inifile, inicfg or {}
-
-
-def setns(obj, dic):
- import pytest
- for name, value in dic.items():
- if isinstance(value, dict):
- mod = getattr(obj, name, None)
- if mod is None:
- modname = "pytest.%s" % name
- mod = types.ModuleType(modname)
- sys.modules[modname] = mod
- mod.__all__ = []
- setattr(obj, name, mod)
- obj.__all__.append(name)
- setns(mod, value)
- else:
- setattr(obj, name, value)
- obj.__all__.append(name)
- # if obj != pytest:
- # pytest.__all__.append(name)
- setattr(pytest, name, value)
-
-
-def create_terminal_writer(config, *args, **kwargs):
- """Create a TerminalWriter instance configured according to the options
- in the config object. Every code which requires a TerminalWriter object
- and has access to a config object should use this function.
- """
- tw = py.io.TerminalWriter(*args, **kwargs)
- if config.option.color == 'yes':
- tw.hasmarkup = True
- if config.option.color == 'no':
- tw.hasmarkup = False
- return tw
-
-
-def _strtobool(val):
- """Convert a string representation of truth to true (1) or false (0).
-
- True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
- are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
- 'val' is anything else.
-
- .. note:: copied from distutils.util
- """
- val = val.lower()
- if val in ('y', 'yes', 't', 'true', 'on', '1'):
- return 1
- elif val in ('n', 'no', 'f', 'false', 'off', '0'):
- return 0
- else:
- raise ValueError("invalid truth value %r" % (val,))
diff --git a/lib/spack/external/pytest-fallback/_pytest/debugging.py b/lib/spack/external/pytest-fallback/_pytest/debugging.py
deleted file mode 100644
index aa9c9a3863..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/debugging.py
+++ /dev/null
@@ -1,123 +0,0 @@
-""" interactive debugging with PDB, the Python Debugger. """
-from __future__ import absolute_import, division, print_function
-import pdb
-import sys
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("general")
- group._addoption(
- '--pdb', dest="usepdb", action="store_true",
- help="start the interactive Python debugger on errors.")
- group._addoption(
- '--pdbcls', dest="usepdb_cls", metavar="modulename:classname",
- help="start a custom interactive Python debugger on errors. "
- "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb")
-
-
-def pytest_configure(config):
- if config.getvalue("usepdb_cls"):
- modname, classname = config.getvalue("usepdb_cls").split(":")
- __import__(modname)
- pdb_cls = getattr(sys.modules[modname], classname)
- else:
- pdb_cls = pdb.Pdb
-
- if config.getvalue("usepdb"):
- config.pluginmanager.register(PdbInvoke(), 'pdbinvoke')
-
- old = (pdb.set_trace, pytestPDB._pluginmanager)
-
- def fin():
- pdb.set_trace, pytestPDB._pluginmanager = old
- pytestPDB._config = None
- pytestPDB._pdb_cls = pdb.Pdb
-
- pdb.set_trace = pytestPDB.set_trace
- pytestPDB._pluginmanager = config.pluginmanager
- pytestPDB._config = config
- pytestPDB._pdb_cls = pdb_cls
- config._cleanup.append(fin)
-
-
-class pytestPDB:
- """ Pseudo PDB that defers to the real pdb. """
- _pluginmanager = None
- _config = None
- _pdb_cls = pdb.Pdb
-
- @classmethod
- def set_trace(cls):
- """ invoke PDB set_trace debugging, dropping any IO capturing. """
- import _pytest.config
- frame = sys._getframe().f_back
- if cls._pluginmanager is not None:
- capman = cls._pluginmanager.getplugin("capturemanager")
- if capman:
- capman.suspendcapture(in_=True)
- tw = _pytest.config.create_terminal_writer(cls._config)
- tw.line()
- tw.sep(">", "PDB set_trace (IO-capturing turned off)")
- cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config)
- cls._pdb_cls().set_trace(frame)
-
-
-class PdbInvoke:
- def pytest_exception_interact(self, node, call, report):
- capman = node.config.pluginmanager.getplugin("capturemanager")
- if capman:
- out, err = capman.suspendcapture(in_=True)
- sys.stdout.write(out)
- sys.stdout.write(err)
- _enter_pdb(node, call.excinfo, report)
-
- def pytest_internalerror(self, excrepr, excinfo):
- for line in str(excrepr).split("\n"):
- sys.stderr.write("INTERNALERROR> %s\n" % line)
- sys.stderr.flush()
- tb = _postmortem_traceback(excinfo)
- post_mortem(tb)
-
-
-def _enter_pdb(node, excinfo, rep):
- # XXX we re-use the TerminalReporter's terminalwriter
- # because this seems to avoid some encoding related troubles
- # for not completely clear reasons.
- tw = node.config.pluginmanager.getplugin("terminalreporter")._tw
- tw.line()
- tw.sep(">", "traceback")
- rep.toterminal(tw)
- tw.sep(">", "entering PDB")
- tb = _postmortem_traceback(excinfo)
- post_mortem(tb)
- rep._pdbshown = True
- return rep
-
-
-def _postmortem_traceback(excinfo):
- # A doctest.UnexpectedException is not useful for post_mortem.
- # Use the underlying exception instead:
- from doctest import UnexpectedException
- if isinstance(excinfo.value, UnexpectedException):
- return excinfo.value.exc_info[2]
- else:
- return excinfo._excinfo[2]
-
-
-def _find_last_non_hidden_frame(stack):
- i = max(0, len(stack) - 1)
- while i and stack[i][0].f_locals.get("__tracebackhide__", False):
- i -= 1
- return i
-
-
-def post_mortem(t):
- class Pdb(pytestPDB._pdb_cls):
- def get_stack(self, f, t):
- stack, i = pdb.Pdb.get_stack(self, f, t)
- if f is None:
- i = _find_last_non_hidden_frame(stack)
- return stack, i
- p = Pdb()
- p.reset()
- p.interaction(None, t)
diff --git a/lib/spack/external/pytest-fallback/_pytest/deprecated.py b/lib/spack/external/pytest-fallback/_pytest/deprecated.py
deleted file mode 100644
index 38e9496778..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/deprecated.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-This module contains deprecation messages and bits of code used elsewhere in the codebase
-that is planned to be removed in the next pytest release.
-
-Keeping it in a central location makes it easy to track what is deprecated and should
-be removed when the time comes.
-"""
-from __future__ import absolute_import, division, print_function
-
-
-class RemovedInPytest4Warning(DeprecationWarning):
- """warning class for features removed in pytest 4.0"""
-
-
-MAIN_STR_ARGS = 'passing a string to pytest.main() is deprecated, ' \
- 'pass a list of arguments instead.'
-
-YIELD_TESTS = 'yield tests are deprecated, and scheduled to be removed in pytest 4.0'
-
-FUNCARG_PREFIX = (
- '{name}: declaring fixtures using "pytest_funcarg__" prefix is deprecated '
- 'and scheduled to be removed in pytest 4.0. '
- 'Please remove the prefix and use the @pytest.fixture decorator instead.')
-
-SETUP_CFG_PYTEST = '[pytest] section in setup.cfg files is deprecated, use [tool:pytest] instead.'
-
-GETFUNCARGVALUE = "use of getfuncargvalue is deprecated, use getfixturevalue"
-
-RESULT_LOG = (
- '--result-log is deprecated and scheduled for removal in pytest 4.0.\n'
- 'See https://docs.pytest.org/en/latest/usage.html#creating-resultlog-format-files for more information.'
-)
-
-MARK_INFO_ATTRIBUTE = RemovedInPytest4Warning(
- "MarkInfo objects are deprecated as they contain the merged marks"
-)
-
-MARK_PARAMETERSET_UNPACKING = RemovedInPytest4Warning(
- "Applying marks directly to parameters is deprecated,"
- " please use pytest.param(..., marks=...) instead.\n"
- "For more details, see: https://docs.pytest.org/en/latest/parametrize.html"
-)
diff --git a/lib/spack/external/pytest-fallback/_pytest/doctest.py b/lib/spack/external/pytest-fallback/_pytest/doctest.py
deleted file mode 100644
index 4c05acddf7..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/doctest.py
+++ /dev/null
@@ -1,362 +0,0 @@
-""" discover and run doctests in modules and test files."""
-from __future__ import absolute_import, division, print_function
-
-import traceback
-
-import pytest
-from _pytest._code.code import ExceptionInfo, ReprFileLocation, TerminalRepr
-from _pytest.fixtures import FixtureRequest
-
-
-DOCTEST_REPORT_CHOICE_NONE = 'none'
-DOCTEST_REPORT_CHOICE_CDIFF = 'cdiff'
-DOCTEST_REPORT_CHOICE_NDIFF = 'ndiff'
-DOCTEST_REPORT_CHOICE_UDIFF = 'udiff'
-DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = 'only_first_failure'
-
-DOCTEST_REPORT_CHOICES = (
- DOCTEST_REPORT_CHOICE_NONE,
- DOCTEST_REPORT_CHOICE_CDIFF,
- DOCTEST_REPORT_CHOICE_NDIFF,
- DOCTEST_REPORT_CHOICE_UDIFF,
- DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,
-)
-
-
-def pytest_addoption(parser):
- parser.addini('doctest_optionflags', 'option flags for doctests',
- type="args", default=["ELLIPSIS"])
- parser.addini("doctest_encoding", 'encoding used for doctest files', default="utf-8")
- group = parser.getgroup("collect")
- group.addoption("--doctest-modules",
- action="store_true", default=False,
- help="run doctests in all .py modules",
- dest="doctestmodules")
- group.addoption("--doctest-report",
- type=str.lower, default="udiff",
- help="choose another output format for diffs on doctest failure",
- choices=DOCTEST_REPORT_CHOICES,
- dest="doctestreport")
- group.addoption("--doctest-glob",
- action="append", default=[], metavar="pat",
- help="doctests file matching pattern, default: test*.txt",
- dest="doctestglob")
- group.addoption("--doctest-ignore-import-errors",
- action="store_true", default=False,
- help="ignore doctest ImportErrors",
- dest="doctest_ignore_import_errors")
-
-
-def pytest_collect_file(path, parent):
- config = parent.config
- if path.ext == ".py":
- if config.option.doctestmodules:
- return DoctestModule(path, parent)
- elif _is_doctest(config, path, parent):
- return DoctestTextfile(path, parent)
-
-
-def _is_doctest(config, path, parent):
- if path.ext in ('.txt', '.rst') and parent.session.isinitpath(path):
- return True
- globs = config.getoption("doctestglob") or ['test*.txt']
- for glob in globs:
- if path.check(fnmatch=glob):
- return True
- return False
-
-
-class ReprFailDoctest(TerminalRepr):
-
- def __init__(self, reprlocation, lines):
- self.reprlocation = reprlocation
- self.lines = lines
-
- def toterminal(self, tw):
- for line in self.lines:
- tw.line(line)
- self.reprlocation.toterminal(tw)
-
-
-class DoctestItem(pytest.Item):
- def __init__(self, name, parent, runner=None, dtest=None):
- super(DoctestItem, self).__init__(name, parent)
- self.runner = runner
- self.dtest = dtest
- self.obj = None
- self.fixture_request = None
-
- def setup(self):
- if self.dtest is not None:
- self.fixture_request = _setup_fixtures(self)
- globs = dict(getfixture=self.fixture_request.getfixturevalue)
- for name, value in self.fixture_request.getfixturevalue('doctest_namespace').items():
- globs[name] = value
- self.dtest.globs.update(globs)
-
- def runtest(self):
- _check_all_skipped(self.dtest)
- self.runner.run(self.dtest)
-
- def repr_failure(self, excinfo):
- import doctest
- if excinfo.errisinstance((doctest.DocTestFailure,
- doctest.UnexpectedException)):
- doctestfailure = excinfo.value
- example = doctestfailure.example
- test = doctestfailure.test
- filename = test.filename
- if test.lineno is None:
- lineno = None
- else:
- lineno = test.lineno + example.lineno + 1
- message = excinfo.type.__name__
- reprlocation = ReprFileLocation(filename, lineno, message)
- checker = _get_checker()
- report_choice = _get_report_choice(self.config.getoption("doctestreport"))
- if lineno is not None:
- lines = doctestfailure.test.docstring.splitlines(False)
- # add line numbers to the left of the error message
- lines = ["%03d %s" % (i + test.lineno + 1, x)
- for (i, x) in enumerate(lines)]
- # trim docstring error lines to 10
- lines = lines[max(example.lineno - 9, 0):example.lineno + 1]
- else:
- lines = ['EXAMPLE LOCATION UNKNOWN, not showing all tests of that example']
- indent = '>>>'
- for line in example.source.splitlines():
- lines.append('??? %s %s' % (indent, line))
- indent = '...'
- if excinfo.errisinstance(doctest.DocTestFailure):
- lines += checker.output_difference(example,
- doctestfailure.got, report_choice).split("\n")
- else:
- inner_excinfo = ExceptionInfo(excinfo.value.exc_info)
- lines += ["UNEXPECTED EXCEPTION: %s" %
- repr(inner_excinfo.value)]
- lines += traceback.format_exception(*excinfo.value.exc_info)
- return ReprFailDoctest(reprlocation, lines)
- else:
- return super(DoctestItem, self).repr_failure(excinfo)
-
- def reportinfo(self):
- return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name
-
-
-def _get_flag_lookup():
- import doctest
- return dict(DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
- DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
- NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
- ELLIPSIS=doctest.ELLIPSIS,
- IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
- COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
- ALLOW_UNICODE=_get_allow_unicode_flag(),
- ALLOW_BYTES=_get_allow_bytes_flag(),
- )
-
-
-def get_optionflags(parent):
- optionflags_str = parent.config.getini("doctest_optionflags")
- flag_lookup_table = _get_flag_lookup()
- flag_acc = 0
- for flag in optionflags_str:
- flag_acc |= flag_lookup_table[flag]
- return flag_acc
-
-
-class DoctestTextfile(pytest.Module):
- obj = None
-
- def collect(self):
- import doctest
-
- # inspired by doctest.testfile; ideally we would use it directly,
- # but it doesn't support passing a custom checker
- encoding = self.config.getini("doctest_encoding")
- text = self.fspath.read_text(encoding)
- filename = str(self.fspath)
- name = self.fspath.basename
- globs = {'__name__': '__main__'}
-
- optionflags = get_optionflags(self)
- runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
- checker=_get_checker())
- _fix_spoof_python2(runner, encoding)
-
- parser = doctest.DocTestParser()
- test = parser.get_doctest(text, globs, name, filename, 0)
- if test.examples:
- yield DoctestItem(test.name, self, runner, test)
-
-
-def _check_all_skipped(test):
- """raises pytest.skip() if all examples in the given DocTest have the SKIP
- option set.
- """
- import doctest
- all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
- if all_skipped:
- pytest.skip('all tests skipped by +SKIP option')
-
-
-class DoctestModule(pytest.Module):
- def collect(self):
- import doctest
- if self.fspath.basename == "conftest.py":
- module = self.config.pluginmanager._importconftest(self.fspath)
- else:
- try:
- module = self.fspath.pyimport()
- except ImportError:
- if self.config.getvalue('doctest_ignore_import_errors'):
- pytest.skip('unable to import module %r' % self.fspath)
- else:
- raise
- # uses internal doctest module parsing mechanism
- finder = doctest.DocTestFinder()
- optionflags = get_optionflags(self)
- runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
- checker=_get_checker())
-
- for test in finder.find(module, module.__name__):
- if test.examples: # skip empty doctests
- yield DoctestItem(test.name, self, runner, test)
-
-
-def _setup_fixtures(doctest_item):
- """
- Used by DoctestTextfile and DoctestItem to setup fixture information.
- """
- def func():
- pass
-
- doctest_item.funcargs = {}
- fm = doctest_item.session._fixturemanager
- doctest_item._fixtureinfo = fm.getfixtureinfo(node=doctest_item, func=func,
- cls=None, funcargs=False)
- fixture_request = FixtureRequest(doctest_item)
- fixture_request._fillfixtures()
- return fixture_request
-
-
-def _get_checker():
- """
- Returns a doctest.OutputChecker subclass that takes in account the
- ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES
- to strip b'' prefixes.
- Useful when the same doctest should run in Python 2 and Python 3.
-
- An inner class is used to avoid importing "doctest" at the module
- level.
- """
- if hasattr(_get_checker, 'LiteralsOutputChecker'):
- return _get_checker.LiteralsOutputChecker()
-
- import doctest
- import re
-
- class LiteralsOutputChecker(doctest.OutputChecker):
- """
- Copied from doctest_nose_plugin.py from the nltk project:
- https://github.com/nltk/nltk
-
- Further extended to also support byte literals.
- """
-
- _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
- _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
-
- def check_output(self, want, got, optionflags):
- res = doctest.OutputChecker.check_output(self, want, got,
- optionflags)
- if res:
- return True
-
- allow_unicode = optionflags & _get_allow_unicode_flag()
- allow_bytes = optionflags & _get_allow_bytes_flag()
- if not allow_unicode and not allow_bytes:
- return False
-
- else: # pragma: no cover
- def remove_prefixes(regex, txt):
- return re.sub(regex, r'\1\2', txt)
-
- if allow_unicode:
- want = remove_prefixes(self._unicode_literal_re, want)
- got = remove_prefixes(self._unicode_literal_re, got)
- if allow_bytes:
- want = remove_prefixes(self._bytes_literal_re, want)
- got = remove_prefixes(self._bytes_literal_re, got)
- res = doctest.OutputChecker.check_output(self, want, got,
- optionflags)
- return res
-
- _get_checker.LiteralsOutputChecker = LiteralsOutputChecker
- return _get_checker.LiteralsOutputChecker()
-
-
-def _get_allow_unicode_flag():
- """
- Registers and returns the ALLOW_UNICODE flag.
- """
- import doctest
- return doctest.register_optionflag('ALLOW_UNICODE')
-
-
-def _get_allow_bytes_flag():
- """
- Registers and returns the ALLOW_BYTES flag.
- """
- import doctest
- return doctest.register_optionflag('ALLOW_BYTES')
-
-
-def _get_report_choice(key):
- """
- This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid
- importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests.
- """
- import doctest
-
- return {
- DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF,
- DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF,
- DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF,
- DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE,
- DOCTEST_REPORT_CHOICE_NONE: 0,
- }[key]
-
-
-def _fix_spoof_python2(runner, encoding):
- """
- Installs a "SpoofOut" into the given DebugRunner so it properly deals with unicode output. This
- should patch only doctests for text files because they don't have a way to declare their
- encoding. Doctests in docstrings from Python modules don't have the same problem given that
- Python already decoded the strings.
-
- This fixes the problem related in issue #2434.
- """
- from _pytest.compat import _PY2
- if not _PY2:
- return
-
- from doctest import _SpoofOut
-
- class UnicodeSpoof(_SpoofOut):
-
- def getvalue(self):
- result = _SpoofOut.getvalue(self)
- if encoding:
- result = result.decode(encoding)
- return result
-
- runner._fakeout = UnicodeSpoof()
-
-
-@pytest.fixture(scope='session')
-def doctest_namespace():
- """
- Inject names into the doctest namespace.
- """
- return dict()
diff --git a/lib/spack/external/pytest-fallback/_pytest/fixtures.py b/lib/spack/external/pytest-fallback/_pytest/fixtures.py
deleted file mode 100644
index 7ad495615e..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/fixtures.py
+++ /dev/null
@@ -1,1135 +0,0 @@
-from __future__ import absolute_import, division, print_function
-
-import collections
-import inspect
-import sys
-import warnings
-
-import py
-from py._code.code import FormattedExcinfo
-
-import _pytest
-from _pytest import nodes
-from _pytest._code.code import TerminalRepr
-from _pytest.compat import (
- NOTSET, exc_clear, _format_args,
- getfslineno, get_real_func,
- is_generator, isclass, getimfunc,
- getlocation, getfuncargnames,
- safe_getattr,
- FuncargnamesCompatAttr,
-)
-from _pytest.outcomes import fail, TEST_OUTCOME
-
-
-def pytest_sessionstart(session):
- import _pytest.python
- scopename2class.update({
- 'class': _pytest.python.Class,
- 'module': _pytest.python.Module,
- 'function': _pytest.main.Item,
- })
- session._fixturemanager = FixtureManager(session)
-
-
-scopename2class = {}
-
-
-scope2props = dict(session=())
-scope2props["module"] = ("fspath", "module")
-scope2props["class"] = scope2props["module"] + ("cls",)
-scope2props["instance"] = scope2props["class"] + ("instance", )
-scope2props["function"] = scope2props["instance"] + ("function", "keywords")
-
-
-def scopeproperty(name=None, doc=None):
- def decoratescope(func):
- scopename = name or func.__name__
-
- def provide(self):
- if func.__name__ in scope2props[self.scope]:
- return func(self)
- raise AttributeError("%s not available in %s-scoped context" % (
- scopename, self.scope))
-
- return property(provide, None, None, func.__doc__)
- return decoratescope
-
-
-def get_scope_node(node, scope):
- cls = scopename2class.get(scope)
- if cls is None:
- if scope == "session":
- return node.session
- raise ValueError("unknown scope")
- return node.getparent(cls)
-
-
-def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
- # this function will transform all collected calls to a functions
- # if they use direct funcargs (i.e. direct parametrization)
- # because we want later test execution to be able to rely on
- # an existing FixtureDef structure for all arguments.
- # XXX we can probably avoid this algorithm if we modify CallSpec2
- # to directly care for creating the fixturedefs within its methods.
- if not metafunc._calls[0].funcargs:
- return # this function call does not have direct parametrization
- # collect funcargs of all callspecs into a list of values
- arg2params = {}
- arg2scope = {}
- for callspec in metafunc._calls:
- for argname, argvalue in callspec.funcargs.items():
- assert argname not in callspec.params
- callspec.params[argname] = argvalue
- arg2params_list = arg2params.setdefault(argname, [])
- callspec.indices[argname] = len(arg2params_list)
- arg2params_list.append(argvalue)
- if argname not in arg2scope:
- scopenum = callspec._arg2scopenum.get(argname,
- scopenum_function)
- arg2scope[argname] = scopes[scopenum]
- callspec.funcargs.clear()
-
- # register artificial FixtureDef's so that later at test execution
- # time we can rely on a proper FixtureDef to exist for fixture setup.
- arg2fixturedefs = metafunc._arg2fixturedefs
- for argname, valuelist in arg2params.items():
- # if we have a scope that is higher than function we need
- # to make sure we only ever create an according fixturedef on
- # a per-scope basis. We thus store and cache the fixturedef on the
- # node related to the scope.
- scope = arg2scope[argname]
- node = None
- if scope != "function":
- node = get_scope_node(collector, scope)
- if node is None:
- assert scope == "class" and isinstance(collector, _pytest.python.Module)
- # use module-level collector for class-scope (for now)
- node = collector
- if node and argname in node._name2pseudofixturedef:
- arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
- else:
- fixturedef = FixtureDef(fixturemanager, '', argname,
- get_direct_param_fixture_func,
- arg2scope[argname],
- valuelist, False, False)
- arg2fixturedefs[argname] = [fixturedef]
- if node is not None:
- node._name2pseudofixturedef[argname] = fixturedef
-
-
-def getfixturemarker(obj):
- """ return fixturemarker or None if it doesn't exist or raised
- exceptions."""
- try:
- return getattr(obj, "_pytestfixturefunction", None)
- except TEST_OUTCOME:
- # some objects raise errors like request (from flask import request)
- # we don't expect them to be fixture functions
- return None
-
-
-def get_parametrized_fixture_keys(item, scopenum):
- """ return list of keys for all parametrized arguments which match
- the specified scope. """
- assert scopenum < scopenum_function # function
- try:
- cs = item.callspec
- except AttributeError:
- pass
- else:
- # cs.indices.items() is random order of argnames. Need to
- # sort this so that different calls to
- # get_parametrized_fixture_keys will be deterministic.
- for argname, param_index in sorted(cs.indices.items()):
- if cs._arg2scopenum[argname] != scopenum:
- continue
- if scopenum == 0: # session
- key = (argname, param_index)
- elif scopenum == 1: # module
- key = (argname, param_index, item.fspath)
- elif scopenum == 2: # class
- key = (argname, param_index, item.fspath, item.cls)
- yield key
-
-
-# algorithm for sorting on a per-parametrized resource setup basis
-# it is called for scopenum==0 (session) first and performs sorting
-# down to the lower scopes such as to minimize number of "high scope"
-# setups and teardowns
-
-def reorder_items(items):
- argkeys_cache = {}
- for scopenum in range(0, scopenum_function):
- argkeys_cache[scopenum] = d = {}
- for item in items:
- keys = collections.OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum))
- if keys:
- d[item] = keys
- return reorder_items_atscope(items, set(), argkeys_cache, 0)
-
-
-def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
- if scopenum >= scopenum_function or len(items) < 3:
- return items
- items_done = []
- while 1:
- items_before, items_same, items_other, newignore = \
- slice_items(items, ignore, argkeys_cache[scopenum])
- items_before = reorder_items_atscope(
- items_before, ignore, argkeys_cache, scopenum + 1)
- if items_same is None:
- # nothing to reorder in this scope
- assert items_other is None
- return items_done + items_before
- items_done.extend(items_before)
- items = items_same + items_other
- ignore = newignore
-
-
-def slice_items(items, ignore, scoped_argkeys_cache):
- # we pick the first item which uses a fixture instance in the
- # requested scope and which we haven't seen yet. We slice the input
- # items list into a list of items_nomatch, items_same and
- # items_other
- if scoped_argkeys_cache: # do we need to do work at all?
- it = iter(items)
- # first find a slicing key
- for i, item in enumerate(it):
- argkeys = scoped_argkeys_cache.get(item)
- if argkeys is not None:
- newargkeys = collections.OrderedDict.fromkeys(k for k in argkeys if k not in ignore)
- if newargkeys: # found a slicing key
- slicing_argkey, _ = newargkeys.popitem()
- items_before = items[:i]
- items_same = [item]
- items_other = []
- # now slice the remainder of the list
- for item in it:
- argkeys = scoped_argkeys_cache.get(item)
- if argkeys and slicing_argkey in argkeys and \
- slicing_argkey not in ignore:
- items_same.append(item)
- else:
- items_other.append(item)
- newignore = ignore.copy()
- newignore.add(slicing_argkey)
- return (items_before, items_same, items_other, newignore)
- return items, None, None, None
-
-
-def fillfixtures(function):
- """ fill missing funcargs for a test function. """
- try:
- request = function._request
- except AttributeError:
- # XXX this special code path is only expected to execute
- # with the oejskit plugin. It uses classes with funcargs
- # and we thus have to work a bit to allow this.
- fm = function.session._fixturemanager
- fi = fm.getfixtureinfo(function.parent, function.obj, None)
- function._fixtureinfo = fi
- request = function._request = FixtureRequest(function)
- request._fillfixtures()
- # prune out funcargs for jstests
- newfuncargs = {}
- for name in fi.argnames:
- newfuncargs[name] = function.funcargs[name]
- function.funcargs = newfuncargs
- else:
- request._fillfixtures()
-
-
-def get_direct_param_fixture_func(request):
- return request.param
-
-
-class FuncFixtureInfo:
- def __init__(self, argnames, names_closure, name2fixturedefs):
- self.argnames = argnames
- self.names_closure = names_closure
- self.name2fixturedefs = name2fixturedefs
-
-
-class FixtureRequest(FuncargnamesCompatAttr):
- """ A request for a fixture from a test or fixture function.
-
- A request object gives access to the requesting test context
- and has an optional ``param`` attribute in case
- the fixture is parametrized indirectly.
- """
-
- def __init__(self, pyfuncitem):
- self._pyfuncitem = pyfuncitem
- #: fixture for which this request is being performed
- self.fixturename = None
- #: Scope string, one of "function", "class", "module", "session"
- self.scope = "function"
- self._fixture_values = {} # argname -> fixture value
- self._fixture_defs = {} # argname -> FixtureDef
- fixtureinfo = pyfuncitem._fixtureinfo
- self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
- self._arg2index = {}
- self._fixturemanager = pyfuncitem.session._fixturemanager
-
- @property
- def fixturenames(self):
- # backward incompatible note: now a readonly property
- return list(self._pyfuncitem._fixtureinfo.names_closure)
-
- @property
- def node(self):
- """ underlying collection node (depends on current request scope)"""
- return self._getscopeitem(self.scope)
-
- def _getnextfixturedef(self, argname):
- fixturedefs = self._arg2fixturedefs.get(argname, None)
- if fixturedefs is None:
- # we arrive here because of a a dynamic call to
- # getfixturevalue(argname) usage which was naturally
- # not known at parsing/collection time
- parentid = self._pyfuncitem.parent.nodeid
- fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)
- self._arg2fixturedefs[argname] = fixturedefs
- # fixturedefs list is immutable so we maintain a decreasing index
- index = self._arg2index.get(argname, 0) - 1
- if fixturedefs is None or (-index > len(fixturedefs)):
- raise FixtureLookupError(argname, self)
- self._arg2index[argname] = index
- return fixturedefs[index]
-
- @property
- def config(self):
- """ the pytest config object associated with this request. """
- return self._pyfuncitem.config
-
- @scopeproperty()
- def function(self):
- """ test function object if the request has a per-function scope. """
- return self._pyfuncitem.obj
-
- @scopeproperty("class")
- def cls(self):
- """ class (can be None) where the test function was collected. """
- clscol = self._pyfuncitem.getparent(_pytest.python.Class)
- if clscol:
- return clscol.obj
-
- @property
- def instance(self):
- """ instance (can be None) on which test function was collected. """
- # unittest support hack, see _pytest.unittest.TestCaseFunction
- try:
- return self._pyfuncitem._testcase
- except AttributeError:
- function = getattr(self, "function", None)
- if function is not None:
- return py.builtin._getimself(function)
-
- @scopeproperty()
- def module(self):
- """ python module object where the test function was collected. """
- return self._pyfuncitem.getparent(_pytest.python.Module).obj
-
- @scopeproperty()
- def fspath(self):
- """ the file system path of the test module which collected this test. """
- return self._pyfuncitem.fspath
-
- @property
- def keywords(self):
- """ keywords/markers dictionary for the underlying node. """
- return self.node.keywords
-
- @property
- def session(self):
- """ pytest session object. """
- return self._pyfuncitem.session
-
- def addfinalizer(self, finalizer):
- """ add finalizer/teardown function to be called after the
- last test within the requesting test context finished
- execution. """
- # XXX usually this method is shadowed by fixturedef specific ones
- self._addfinalizer(finalizer, scope=self.scope)
-
- def _addfinalizer(self, finalizer, scope):
- colitem = self._getscopeitem(scope)
- self._pyfuncitem.session._setupstate.addfinalizer(
- finalizer=finalizer, colitem=colitem)
-
- def applymarker(self, marker):
- """ Apply a marker to a single test function invocation.
- This method is useful if you don't want to have a keyword/marker
- on all function invocations.
-
- :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
- created by a call to ``pytest.mark.NAME(...)``.
- """
- try:
- self.node.keywords[marker.markname] = marker
- except AttributeError:
- raise ValueError(marker)
-
- def raiseerror(self, msg):
- """ raise a FixtureLookupError with the given message. """
- raise self._fixturemanager.FixtureLookupError(None, self, msg)
-
- def _fillfixtures(self):
- item = self._pyfuncitem
- fixturenames = getattr(item, "fixturenames", self.fixturenames)
- for argname in fixturenames:
- if argname not in item.funcargs:
- item.funcargs[argname] = self.getfixturevalue(argname)
-
- def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
- """ (deprecated) Return a testing resource managed by ``setup`` &
- ``teardown`` calls. ``scope`` and ``extrakey`` determine when the
- ``teardown`` function will be called so that subsequent calls to
- ``setup`` would recreate the resource. With pytest-2.3 you often
- do not need ``cached_setup()`` as you can directly declare a scope
- on a fixture function and register a finalizer through
- ``request.addfinalizer()``.
-
- :arg teardown: function receiving a previously setup resource.
- :arg setup: a no-argument function creating a resource.
- :arg scope: a string value out of ``function``, ``class``, ``module``
- or ``session`` indicating the caching lifecycle of the resource.
- :arg extrakey: added to internal caching key of (funcargname, scope).
- """
- if not hasattr(self.config, '_setupcache'):
- self.config._setupcache = {} # XXX weakref?
- cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
- cache = self.config._setupcache
- try:
- val = cache[cachekey]
- except KeyError:
- self._check_scope(self.fixturename, self.scope, scope)
- val = setup()
- cache[cachekey] = val
- if teardown is not None:
- def finalizer():
- del cache[cachekey]
- teardown(val)
- self._addfinalizer(finalizer, scope=scope)
- return val
-
- def getfixturevalue(self, argname):
- """ Dynamically run a named fixture function.
-
- Declaring fixtures via function argument is recommended where possible.
- But if you can only decide whether to use another fixture at test
- setup time, you may use this function to retrieve it inside a fixture
- or test function body.
- """
- return self._get_active_fixturedef(argname).cached_result[0]
-
- def getfuncargvalue(self, argname):
- """ Deprecated, use getfixturevalue. """
- from _pytest import deprecated
- warnings.warn(
- deprecated.GETFUNCARGVALUE,
- DeprecationWarning,
- stacklevel=2)
- return self.getfixturevalue(argname)
-
- def _get_active_fixturedef(self, argname):
- try:
- return self._fixture_defs[argname]
- except KeyError:
- try:
- fixturedef = self._getnextfixturedef(argname)
- except FixtureLookupError:
- if argname == "request":
- class PseudoFixtureDef:
- cached_result = (self, [0], None)
- scope = "function"
- return PseudoFixtureDef
- raise
- # remove indent to prevent the python3 exception
- # from leaking into the call
- result = self._getfixturevalue(fixturedef)
- self._fixture_values[argname] = result
- self._fixture_defs[argname] = fixturedef
- return fixturedef
-
- def _get_fixturestack(self):
- current = self
- values = []
- while 1:
- fixturedef = getattr(current, "_fixturedef", None)
- if fixturedef is None:
- values.reverse()
- return values
- values.append(fixturedef)
- current = current._parent_request
-
- def _getfixturevalue(self, fixturedef):
- # prepare a subrequest object before calling fixture function
- # (latter managed by fixturedef)
- argname = fixturedef.argname
- funcitem = self._pyfuncitem
- scope = fixturedef.scope
- try:
- param = funcitem.callspec.getparam(argname)
- except (AttributeError, ValueError):
- param = NOTSET
- param_index = 0
- if fixturedef.params is not None:
- frame = inspect.stack()[3]
- frameinfo = inspect.getframeinfo(frame[0])
- source_path = frameinfo.filename
- source_lineno = frameinfo.lineno
- source_path = py.path.local(source_path)
- if source_path.relto(funcitem.config.rootdir):
- source_path = source_path.relto(funcitem.config.rootdir)
- msg = (
- "The requested fixture has no parameter defined for the "
- "current test.\n\nRequested fixture '{0}' defined in:\n{1}"
- "\n\nRequested here:\n{2}:{3}".format(
- fixturedef.argname,
- getlocation(fixturedef.func, funcitem.config.rootdir),
- source_path,
- source_lineno,
- )
- )
- fail(msg)
- else:
- # indices might not be set if old-style metafunc.addcall() was used
- param_index = funcitem.callspec.indices.get(argname, 0)
- # if a parametrize invocation set a scope it will override
- # the static scope defined with the fixture function
- paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
- if paramscopenum is not None:
- scope = scopes[paramscopenum]
-
- subrequest = SubRequest(self, scope, param, param_index, fixturedef)
-
- # check if a higher-level scoped fixture accesses a lower level one
- subrequest._check_scope(argname, self.scope, scope)
-
- # clear sys.exc_info before invoking the fixture (python bug?)
- # if its not explicitly cleared it will leak into the call
- exc_clear()
- try:
- # call the fixture function
- val = fixturedef.execute(request=subrequest)
- finally:
- # if fixture function failed it might have registered finalizers
- self.session._setupstate.addfinalizer(fixturedef.finish,
- subrequest.node)
- return val
-
- def _check_scope(self, argname, invoking_scope, requested_scope):
- if argname == "request":
- return
- if scopemismatch(invoking_scope, requested_scope):
- # try to report something helpful
- lines = self._factorytraceback()
- fail("ScopeMismatch: You tried to access the %r scoped "
- "fixture %r with a %r scoped request object, "
- "involved factories\n%s" % (
- (requested_scope, argname, invoking_scope, "\n".join(lines))),
- pytrace=False)
-
- def _factorytraceback(self):
- lines = []
- for fixturedef in self._get_fixturestack():
- factory = fixturedef.func
- fs, lineno = getfslineno(factory)
- p = self._pyfuncitem.session.fspath.bestrelpath(fs)
- args = _format_args(factory)
- lines.append("%s:%d: def %s%s" % (
- p, lineno, factory.__name__, args))
- return lines
-
- def _getscopeitem(self, scope):
- if scope == "function":
- # this might also be a non-function Item despite its attribute name
- return self._pyfuncitem
- node = get_scope_node(self._pyfuncitem, scope)
- if node is None and scope == "class":
- # fallback to function item itself
- node = self._pyfuncitem
- assert node
- return node
-
- def __repr__(self):
- return "<FixtureRequest for %r>" % (self.node)
-
-
-class SubRequest(FixtureRequest):
- """ a sub request for handling getting a fixture from a
- test function/fixture. """
-
- def __init__(self, request, scope, param, param_index, fixturedef):
- self._parent_request = request
- self.fixturename = fixturedef.argname
- if param is not NOTSET:
- self.param = param
- self.param_index = param_index
- self.scope = scope
- self._fixturedef = fixturedef
- self._pyfuncitem = request._pyfuncitem
- self._fixture_values = request._fixture_values
- self._fixture_defs = request._fixture_defs
- self._arg2fixturedefs = request._arg2fixturedefs
- self._arg2index = request._arg2index
- self._fixturemanager = request._fixturemanager
-
- def __repr__(self):
- return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
-
- def addfinalizer(self, finalizer):
- self._fixturedef.addfinalizer(finalizer)
-
-
-class ScopeMismatchError(Exception):
- """ A fixture function tries to use a different fixture function which
- which has a lower scope (e.g. a Session one calls a function one)
- """
-
-
-scopes = "session module class function".split()
-scopenum_function = scopes.index("function")
-
-
-def scopemismatch(currentscope, newscope):
- return scopes.index(newscope) > scopes.index(currentscope)
-
-
-def scope2index(scope, descr, where=None):
- """Look up the index of ``scope`` and raise a descriptive value error
- if not defined.
- """
- try:
- return scopes.index(scope)
- except ValueError:
- raise ValueError(
- "{0} {1}has an unsupported scope value '{2}'".format(
- descr, 'from {0} '.format(where) if where else '',
- scope)
- )
-
-
-class FixtureLookupError(LookupError):
- """ could not return a requested Fixture (missing or invalid). """
-
- def __init__(self, argname, request, msg=None):
- self.argname = argname
- self.request = request
- self.fixturestack = request._get_fixturestack()
- self.msg = msg
-
- def formatrepr(self):
- tblines = []
- addline = tblines.append
- stack = [self.request._pyfuncitem.obj]
- stack.extend(map(lambda x: x.func, self.fixturestack))
- msg = self.msg
- if msg is not None:
- # the last fixture raise an error, let's present
- # it at the requesting side
- stack = stack[:-1]
- for function in stack:
- fspath, lineno = getfslineno(function)
- try:
- lines, _ = inspect.getsourcelines(get_real_func(function))
- except (IOError, IndexError, TypeError):
- error_msg = "file %s, line %s: source code not available"
- addline(error_msg % (fspath, lineno + 1))
- else:
- addline("file %s, line %s" % (fspath, lineno + 1))
- for i, line in enumerate(lines):
- line = line.rstrip()
- addline(" " + line)
- if line.lstrip().startswith('def'):
- break
-
- if msg is None:
- fm = self.request._fixturemanager
- available = []
- parentid = self.request._pyfuncitem.parent.nodeid
- for name, fixturedefs in fm._arg2fixturedefs.items():
- faclist = list(fm._matchfactories(fixturedefs, parentid))
- if faclist and name not in available:
- available.append(name)
- msg = "fixture %r not found" % (self.argname,)
- msg += "\n available fixtures: %s" % (", ".join(sorted(available)),)
- msg += "\n use 'pytest --fixtures [testpath]' for help on them."
-
- return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
-
-
-class FixtureLookupErrorRepr(TerminalRepr):
- def __init__(self, filename, firstlineno, tblines, errorstring, argname):
- self.tblines = tblines
- self.errorstring = errorstring
- self.filename = filename
- self.firstlineno = firstlineno
- self.argname = argname
-
- def toterminal(self, tw):
- # tw.line("FixtureLookupError: %s" %(self.argname), red=True)
- for tbline in self.tblines:
- tw.line(tbline.rstrip())
- lines = self.errorstring.split("\n")
- if lines:
- tw.line('{0} {1}'.format(FormattedExcinfo.fail_marker,
- lines[0].strip()), red=True)
- for line in lines[1:]:
- tw.line('{0} {1}'.format(FormattedExcinfo.flow_marker,
- line.strip()), red=True)
- tw.line()
- tw.line("%s:%d" % (self.filename, self.firstlineno + 1))
-
-
-def fail_fixturefunc(fixturefunc, msg):
- fs, lineno = getfslineno(fixturefunc)
- location = "%s:%s" % (fs, lineno + 1)
- source = _pytest._code.Source(fixturefunc)
- fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
- pytrace=False)
-
-
-def call_fixture_func(fixturefunc, request, kwargs):
- yieldctx = is_generator(fixturefunc)
- if yieldctx:
- it = fixturefunc(**kwargs)
- res = next(it)
-
- def teardown():
- try:
- next(it)
- except StopIteration:
- pass
- else:
- fail_fixturefunc(fixturefunc,
- "yield_fixture function has more than one 'yield'")
-
- request.addfinalizer(teardown)
- else:
- res = fixturefunc(**kwargs)
- return res
-
-
-class FixtureDef:
- """ A container for a factory definition. """
-
- def __init__(self, fixturemanager, baseid, argname, func, scope, params,
- unittest=False, ids=None):
- self._fixturemanager = fixturemanager
- self.baseid = baseid or ''
- self.has_location = baseid is not None
- self.func = func
- self.argname = argname
- self.scope = scope
- self.scopenum = scope2index(
- scope or "function",
- descr='fixture {0}'.format(func.__name__),
- where=baseid
- )
- self.params = params
- startindex = unittest and 1 or None
- self.argnames = getfuncargnames(func, startindex=startindex)
- self.unittest = unittest
- self.ids = ids
- self._finalizer = []
-
- def addfinalizer(self, finalizer):
- self._finalizer.append(finalizer)
-
- def finish(self):
- exceptions = []
- try:
- while self._finalizer:
- try:
- func = self._finalizer.pop()
- func()
- except: # noqa
- exceptions.append(sys.exc_info())
- if exceptions:
- e = exceptions[0]
- del exceptions # ensure we don't keep all frames alive because of the traceback
- py.builtin._reraise(*e)
-
- finally:
- ihook = self._fixturemanager.session.ihook
- ihook.pytest_fixture_post_finalizer(fixturedef=self)
- # even if finalization fails, we invalidate
- # the cached fixture value
- if hasattr(self, "cached_result"):
- del self.cached_result
-
- def execute(self, request):
- # get required arguments and register our own finish()
- # with their finalization
- for argname in self.argnames:
- fixturedef = request._get_active_fixturedef(argname)
- if argname != "request":
- fixturedef.addfinalizer(self.finish)
-
- my_cache_key = request.param_index
- cached_result = getattr(self, "cached_result", None)
- if cached_result is not None:
- result, cache_key, err = cached_result
- if my_cache_key == cache_key:
- if err is not None:
- py.builtin._reraise(*err)
- else:
- return result
- # we have a previous but differently parametrized fixture instance
- # so we need to tear it down before creating a new one
- self.finish()
- assert not hasattr(self, "cached_result")
-
- ihook = self._fixturemanager.session.ihook
- return ihook.pytest_fixture_setup(fixturedef=self, request=request)
-
- def __repr__(self):
- return ("<FixtureDef name=%r scope=%r baseid=%r >" %
- (self.argname, self.scope, self.baseid))
-
-
-def pytest_fixture_setup(fixturedef, request):
- """ Execution of fixture setup. """
- kwargs = {}
- for argname in fixturedef.argnames:
- fixdef = request._get_active_fixturedef(argname)
- result, arg_cache_key, exc = fixdef.cached_result
- request._check_scope(argname, request.scope, fixdef.scope)
- kwargs[argname] = result
-
- fixturefunc = fixturedef.func
- if fixturedef.unittest:
- if request.instance is not None:
- # bind the unbound method to the TestCase instance
- fixturefunc = fixturedef.func.__get__(request.instance)
- else:
- # the fixture function needs to be bound to the actual
- # request.instance so that code working with "fixturedef" behaves
- # as expected.
- if request.instance is not None:
- fixturefunc = getimfunc(fixturedef.func)
- if fixturefunc != fixturedef.func:
- fixturefunc = fixturefunc.__get__(request.instance)
- my_cache_key = request.param_index
- try:
- result = call_fixture_func(fixturefunc, request, kwargs)
- except TEST_OUTCOME:
- fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
- raise
- fixturedef.cached_result = (result, my_cache_key, None)
- return result
-
-
-class FixtureFunctionMarker:
- def __init__(self, scope, params, autouse=False, ids=None, name=None):
- self.scope = scope
- self.params = params
- self.autouse = autouse
- self.ids = ids
- self.name = name
-
- def __call__(self, function):
- if isclass(function):
- raise ValueError(
- "class fixtures not supported (may be in the future)")
- function._pytestfixturefunction = self
- return function
-
-
-def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
- """ (return a) decorator to mark a fixture factory function.
-
- This decorator can be used (with or without parameters) to define a
- fixture function. The name of the fixture function can later be
- referenced to cause its invocation ahead of running tests: test
- modules or classes can use the pytest.mark.usefixtures(fixturename)
- marker. Test functions can directly use fixture names as input
- arguments in which case the fixture instance returned from the fixture
- function will be injected.
-
- :arg scope: the scope for which this fixture is shared, one of
- "function" (default), "class", "module" or "session".
-
- :arg params: an optional list of parameters which will cause multiple
- invocations of the fixture function and all of the tests
- using it.
-
- :arg autouse: if True, the fixture func is activated for all tests that
- can see it. If False (the default) then an explicit
- reference is needed to activate the fixture.
-
- :arg ids: list of string ids each corresponding to the params
- so that they are part of the test id. If no ids are provided
- they will be generated automatically from the params.
-
- :arg name: the name of the fixture. This defaults to the name of the
- decorated function. If a fixture is used in the same module in
- which it is defined, the function name of the fixture will be
- shadowed by the function arg that requests the fixture; one way
- to resolve this is to name the decorated function
- ``fixture_<fixturename>`` and then use
- ``@pytest.fixture(name='<fixturename>')``.
-
- Fixtures can optionally provide their values to test functions using a ``yield`` statement,
- instead of ``return``. In this case, the code block after the ``yield`` statement is executed
- as teardown code regardless of the test outcome. A fixture function must yield exactly once.
- """
- if callable(scope) and params is None and autouse is False:
- # direct decoration
- return FixtureFunctionMarker(
- "function", params, autouse, name=name)(scope)
- if params is not None and not isinstance(params, (list, tuple)):
- params = list(params)
- return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
-
-
-def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None):
- """ (return a) decorator to mark a yield-fixture factory function.
-
- .. deprecated:: 3.0
- Use :py:func:`pytest.fixture` directly instead.
- """
- if callable(scope) and params is None and not autouse:
- # direct decoration
- return FixtureFunctionMarker(
- "function", params, autouse, ids=ids, name=name)(scope)
- else:
- return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
-
-
-defaultfuncargprefixmarker = fixture()
-
-
-@fixture(scope="session")
-def pytestconfig(request):
- """ the pytest config object with access to command line opts."""
- return request.config
-
-
-class FixtureManager:
- """
- pytest fixtures definitions and information is stored and managed
- from this class.
-
- During collection fm.parsefactories() is called multiple times to parse
- fixture function definitions into FixtureDef objects and internal
- data structures.
-
- During collection of test functions, metafunc-mechanics instantiate
- a FuncFixtureInfo object which is cached per node/func-name.
- This FuncFixtureInfo object is later retrieved by Function nodes
- which themselves offer a fixturenames attribute.
-
- The FuncFixtureInfo object holds information about fixtures and FixtureDefs
- relevant for a particular function. An initial list of fixtures is
- assembled like this:
-
- - ini-defined usefixtures
- - autouse-marked fixtures along the collection chain up from the function
- - usefixtures markers at module/class/function level
- - test function funcargs
-
- Subsequently the funcfixtureinfo.fixturenames attribute is computed
- as the closure of the fixtures needed to setup the initial fixtures,
- i. e. fixtures needed by fixture functions themselves are appended
- to the fixturenames list.
-
- Upon the test-setup phases all fixturenames are instantiated, retrieved
- by a lookup of their FuncFixtureInfo.
- """
-
- _argprefix = "pytest_funcarg__"
- FixtureLookupError = FixtureLookupError
- FixtureLookupErrorRepr = FixtureLookupErrorRepr
-
- def __init__(self, session):
- self.session = session
- self.config = session.config
- self._arg2fixturedefs = {}
- self._holderobjseen = set()
- self._arg2finish = {}
- self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
- session.config.pluginmanager.register(self, "funcmanage")
-
- def getfixtureinfo(self, node, func, cls, funcargs=True):
- if funcargs and not hasattr(node, "nofuncargs"):
- argnames = getfuncargnames(func, cls=cls)
- else:
- argnames = ()
- usefixtures = getattr(func, "usefixtures", None)
- initialnames = argnames
- if usefixtures is not None:
- initialnames = usefixtures.args + initialnames
- fm = node.session._fixturemanager
- names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
- node)
- return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
-
- def pytest_plugin_registered(self, plugin):
- nodeid = None
- try:
- p = py.path.local(plugin.__file__)
- except AttributeError:
- pass
- else:
- # construct the base nodeid which is later used to check
- # what fixtures are visible for particular tests (as denoted
- # by their test id)
- if p.basename.startswith("conftest.py"):
- nodeid = p.dirpath().relto(self.config.rootdir)
- if p.sep != nodes.SEP:
- nodeid = nodeid.replace(p.sep, nodes.SEP)
- self.parsefactories(plugin, nodeid)
-
- def _getautousenames(self, nodeid):
- """ return a tuple of fixture names to be used. """
- autousenames = []
- for baseid, basenames in self._nodeid_and_autousenames:
- if nodeid.startswith(baseid):
- if baseid:
- i = len(baseid)
- nextchar = nodeid[i:i + 1]
- if nextchar and nextchar not in ":/":
- continue
- autousenames.extend(basenames)
- # make sure autousenames are sorted by scope, scopenum 0 is session
- autousenames.sort(
- key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
- return autousenames
-
- def getfixtureclosure(self, fixturenames, parentnode):
- # collect the closure of all fixtures , starting with the given
- # fixturenames as the initial set. As we have to visit all
- # factory definitions anyway, we also return a arg2fixturedefs
- # mapping so that the caller can reuse it and does not have
- # to re-discover fixturedefs again for each fixturename
- # (discovering matching fixtures for a given name/node is expensive)
-
- parentid = parentnode.nodeid
- fixturenames_closure = self._getautousenames(parentid)
-
- def merge(otherlist):
- for arg in otherlist:
- if arg not in fixturenames_closure:
- fixturenames_closure.append(arg)
-
- merge(fixturenames)
- arg2fixturedefs = {}
- lastlen = -1
- while lastlen != len(fixturenames_closure):
- lastlen = len(fixturenames_closure)
- for argname in fixturenames_closure:
- if argname in arg2fixturedefs:
- continue
- fixturedefs = self.getfixturedefs(argname, parentid)
- if fixturedefs:
- arg2fixturedefs[argname] = fixturedefs
- merge(fixturedefs[-1].argnames)
- return fixturenames_closure, arg2fixturedefs
-
- def pytest_generate_tests(self, metafunc):
- for argname in metafunc.fixturenames:
- faclist = metafunc._arg2fixturedefs.get(argname)
- if faclist:
- fixturedef = faclist[-1]
- if fixturedef.params is not None:
- parametrize_func = getattr(metafunc.function, 'parametrize', None)
- func_params = getattr(parametrize_func, 'args', [[None]])
- func_kwargs = getattr(parametrize_func, 'kwargs', {})
- # skip directly parametrized arguments
- if "argnames" in func_kwargs:
- argnames = parametrize_func.kwargs["argnames"]
- else:
- argnames = func_params[0]
- if not isinstance(argnames, (tuple, list)):
- argnames = [x.strip() for x in argnames.split(",") if x.strip()]
- if argname not in func_params and argname not in argnames:
- metafunc.parametrize(argname, fixturedef.params,
- indirect=True, scope=fixturedef.scope,
- ids=fixturedef.ids)
- else:
- continue # will raise FixtureLookupError at setup time
-
- def pytest_collection_modifyitems(self, items):
- # separate parametrized setups
- items[:] = reorder_items(items)
-
- def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
- if nodeid is not NOTSET:
- holderobj = node_or_obj
- else:
- holderobj = node_or_obj.obj
- nodeid = node_or_obj.nodeid
- if holderobj in self._holderobjseen:
- return
- self._holderobjseen.add(holderobj)
- autousenames = []
- for name in dir(holderobj):
- # The attribute can be an arbitrary descriptor, so the attribute
- # access below can raise. safe_getatt() ignores such exceptions.
- obj = safe_getattr(holderobj, name, None)
- # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
- # or are "@pytest.fixture" marked
- marker = getfixturemarker(obj)
- if marker is None:
- if not name.startswith(self._argprefix):
- continue
- if not callable(obj):
- continue
- marker = defaultfuncargprefixmarker
- from _pytest import deprecated
- self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name), nodeid=nodeid)
- name = name[len(self._argprefix):]
- elif not isinstance(marker, FixtureFunctionMarker):
- # magic globals with __getattr__ might have got us a wrong
- # fixture attribute
- continue
- else:
- if marker.name:
- name = marker.name
- msg = 'fixtures cannot have "pytest_funcarg__" prefix ' \
- 'and be decorated with @pytest.fixture:\n%s' % name
- assert not name.startswith(self._argprefix), msg
-
- fixture_def = FixtureDef(self, nodeid, name, obj,
- marker.scope, marker.params,
- unittest=unittest, ids=marker.ids)
-
- faclist = self._arg2fixturedefs.setdefault(name, [])
- if fixture_def.has_location:
- faclist.append(fixture_def)
- else:
- # fixturedefs with no location are at the front
- # so this inserts the current fixturedef after the
- # existing fixturedefs from external plugins but
- # before the fixturedefs provided in conftests.
- i = len([f for f in faclist if not f.has_location])
- faclist.insert(i, fixture_def)
- if marker.autouse:
- autousenames.append(name)
-
- if autousenames:
- self._nodeid_and_autousenames.append((nodeid or '', autousenames))
-
- def getfixturedefs(self, argname, nodeid):
- """
- Gets a list of fixtures which are applicable to the given node id.
-
- :param str argname: name of the fixture to search for
- :param str nodeid: full node id of the requesting test.
- :return: list[FixtureDef]
- """
- try:
- fixturedefs = self._arg2fixturedefs[argname]
- except KeyError:
- return None
- else:
- return tuple(self._matchfactories(fixturedefs, nodeid))
-
- def _matchfactories(self, fixturedefs, nodeid):
- for fixturedef in fixturedefs:
- if nodes.ischildnode(fixturedef.baseid, nodeid):
- yield fixturedef
diff --git a/lib/spack/external/pytest-fallback/_pytest/freeze_support.py b/lib/spack/external/pytest-fallback/_pytest/freeze_support.py
deleted file mode 100644
index 97147a8825..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/freeze_support.py
+++ /dev/null
@@ -1,43 +0,0 @@
-"""
-Provides a function to report all internal modules for using freezing tools
-pytest
-"""
-from __future__ import absolute_import, division, print_function
-
-
-def freeze_includes():
- """
- Returns a list of module names used by py.test that should be
- included by cx_freeze.
- """
- import py
- import _pytest
- result = list(_iter_all_modules(py))
- result += list(_iter_all_modules(_pytest))
- return result
-
-
-def _iter_all_modules(package, prefix=''):
- """
- Iterates over the names of all modules that can be found in the given
- package, recursively.
- Example:
- _iter_all_modules(_pytest) ->
- ['_pytest.assertion.newinterpret',
- '_pytest.capture',
- '_pytest.core',
- ...
- ]
- """
- import os
- import pkgutil
- if type(package) is not str:
- path, prefix = package.__path__[0], package.__name__ + '.'
- else:
- path = package
- for _, name, is_package in pkgutil.iter_modules([path]):
- if is_package:
- for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
- yield prefix + m
- else:
- yield prefix + name
diff --git a/lib/spack/external/pytest-fallback/_pytest/helpconfig.py b/lib/spack/external/pytest-fallback/_pytest/helpconfig.py
deleted file mode 100644
index e744637f86..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/helpconfig.py
+++ /dev/null
@@ -1,184 +0,0 @@
-""" version info, help messages, tracing configuration. """
-from __future__ import absolute_import, division, print_function
-
-import py
-import pytest
-from _pytest.config import PrintHelp
-import os
-import sys
-from argparse import Action
-
-
-class HelpAction(Action):
- """This is an argparse Action that will raise an exception in
- order to skip the rest of the argument parsing when --help is passed.
- This prevents argparse from quitting due to missing required arguments
- when any are defined, for example by ``pytest_addoption``.
- This is similar to the way that the builtin argparse --help option is
- implemented by raising SystemExit.
- """
-
- def __init__(self,
- option_strings,
- dest=None,
- default=False,
- help=None):
- super(HelpAction, self).__init__(
- option_strings=option_strings,
- dest=dest,
- const=True,
- default=default,
- nargs=0,
- help=help)
-
- def __call__(self, parser, namespace, values, option_string=None):
- setattr(namespace, self.dest, self.const)
-
- # We should only skip the rest of the parsing after preparse is done
- if getattr(parser._parser, 'after_preparse', False):
- raise PrintHelp
-
-
-def pytest_addoption(parser):
- group = parser.getgroup('debugconfig')
- group.addoption('--version', action="store_true",
- help="display pytest lib version and import information.")
- group._addoption("-h", "--help", action=HelpAction, dest="help",
- help="show help message and configuration info")
- group._addoption('-p', action="append", dest="plugins", default=[],
- metavar="name",
- help="early-load given plugin (multi-allowed). "
- "To avoid loading of plugins, use the `no:` prefix, e.g. "
- "`no:doctest`.")
- group.addoption('--traceconfig', '--trace-config',
- action="store_true", default=False,
- help="trace considerations of conftest.py files."),
- group.addoption('--debug',
- action="store_true", dest="debug", default=False,
- help="store internal tracing debug information in 'pytestdebug.log'.")
- group._addoption(
- '-o', '--override-ini', nargs='*', dest="override_ini",
- action="append",
- help="override config option with option=value style, e.g. `-o xfail_strict=True`.")
-
-
-@pytest.hookimpl(hookwrapper=True)
-def pytest_cmdline_parse():
- outcome = yield
- config = outcome.get_result()
- if config.option.debug:
- path = os.path.abspath("pytestdebug.log")
- debugfile = open(path, 'w')
- debugfile.write("versions pytest-%s, py-%s, "
- "python-%s\ncwd=%s\nargs=%s\n\n" % (
- pytest.__version__, py.__version__,
- ".".join(map(str, sys.version_info)),
- os.getcwd(), config._origargs))
- config.trace.root.setwriter(debugfile.write)
- undo_tracing = config.pluginmanager.enable_tracing()
- sys.stderr.write("writing pytestdebug information to %s\n" % path)
-
- def unset_tracing():
- debugfile.close()
- sys.stderr.write("wrote pytestdebug information to %s\n" %
- debugfile.name)
- config.trace.root.setwriter(None)
- undo_tracing()
-
- config.add_cleanup(unset_tracing)
-
-
-def pytest_cmdline_main(config):
- if config.option.version:
- p = py.path.local(pytest.__file__)
- sys.stderr.write("This is pytest version %s, imported from %s\n" %
- (pytest.__version__, p))
- plugininfo = getpluginversioninfo(config)
- if plugininfo:
- for line in plugininfo:
- sys.stderr.write(line + "\n")
- return 0
- elif config.option.help:
- config._do_configure()
- showhelp(config)
- config._ensure_unconfigure()
- return 0
-
-
-def showhelp(config):
- reporter = config.pluginmanager.get_plugin('terminalreporter')
- tw = reporter._tw
- tw.write(config._parser.optparser.format_help())
- tw.line()
- tw.line()
- tw.line("[pytest] ini-options in the first "
- "pytest.ini|tox.ini|setup.cfg file found:")
- tw.line()
-
- for name in config._parser._ininames:
- help, type, default = config._parser._inidict[name]
- if type is None:
- type = "string"
- spec = "%s (%s)" % (name, type)
- line = " %-24s %s" % (spec, help)
- tw.line(line[:tw.fullwidth])
-
- tw.line()
- tw.line("environment variables:")
- vars = [
- ("PYTEST_ADDOPTS", "extra command line options"),
- ("PYTEST_PLUGINS", "comma-separated plugins to load during startup"),
- ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals")
- ]
- for name, help in vars:
- tw.line(" %-24s %s" % (name, help))
- tw.line()
- tw.line()
-
- tw.line("to see available markers type: pytest --markers")
- tw.line("to see available fixtures type: pytest --fixtures")
- tw.line("(shown according to specified file_or_dir or current dir "
- "if not specified)")
-
- for warningreport in reporter.stats.get('warnings', []):
- tw.line("warning : " + warningreport.message, red=True)
- return
-
-
-conftest_options = [
- ('pytest_plugins', 'list of plugin names to load'),
-]
-
-
-def getpluginversioninfo(config):
- lines = []
- plugininfo = config.pluginmanager.list_plugin_distinfo()
- if plugininfo:
- lines.append("setuptools registered plugins:")
- for plugin, dist in plugininfo:
- loc = getattr(plugin, '__file__', repr(plugin))
- content = "%s-%s at %s" % (dist.project_name, dist.version, loc)
- lines.append(" " + content)
- return lines
-
-
-def pytest_report_header(config):
- lines = []
- if config.option.debug or config.option.traceconfig:
- lines.append("using: pytest-%s pylib-%s" %
- (pytest.__version__, py.__version__))
-
- verinfo = getpluginversioninfo(config)
- if verinfo:
- lines.extend(verinfo)
-
- if config.option.traceconfig:
- lines.append("active plugins:")
- items = config.pluginmanager.list_name_plugin()
- for name, plugin in items:
- if hasattr(plugin, '__file__'):
- r = plugin.__file__
- else:
- r = repr(plugin)
- lines.append(" %-20s: %s" % (name, r))
- return lines
diff --git a/lib/spack/external/pytest-fallback/_pytest/hookspec.py b/lib/spack/external/pytest-fallback/_pytest/hookspec.py
deleted file mode 100644
index e5c966e58b..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/hookspec.py
+++ /dev/null
@@ -1,423 +0,0 @@
-""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """
-
-from _pytest._pluggy import HookspecMarker
-
-hookspec = HookspecMarker("pytest")
-
-# -------------------------------------------------------------------------
-# Initialization hooks called for every plugin
-# -------------------------------------------------------------------------
-
-
-@hookspec(historic=True)
-def pytest_addhooks(pluginmanager):
- """called at plugin registration time to allow adding new hooks via a call to
- pluginmanager.add_hookspecs(module_or_class, prefix)."""
-
-
-@hookspec(historic=True)
-def pytest_namespace():
- """
- DEPRECATED: this hook causes direct monkeypatching on pytest, its use is strongly discouraged
- return dict of name->object to be made globally available in
- the pytest namespace. This hook is called at plugin registration
- time.
- """
-
-
-@hookspec(historic=True)
-def pytest_plugin_registered(plugin, manager):
- """ a new pytest plugin got registered. """
-
-
-@hookspec(historic=True)
-def pytest_addoption(parser):
- """register argparse-style options and ini-style config values,
- called once at the beginning of a test run.
-
- .. note::
-
- This function should be implemented only in plugins or ``conftest.py``
- files situated at the tests root directory due to how pytest
- :ref:`discovers plugins during startup <pluginorder>`.
-
- :arg parser: To add command line options, call
- :py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`.
- To add ini-file values call :py:func:`parser.addini(...)
- <_pytest.config.Parser.addini>`.
-
- Options can later be accessed through the
- :py:class:`config <_pytest.config.Config>` object, respectively:
-
- - :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to
- retrieve the value of a command line option.
-
- - :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve
- a value read from an ini-style file.
-
- The config object is passed around on many internal objects via the ``.config``
- attribute or can be retrieved as the ``pytestconfig`` fixture or accessed
- via (deprecated) ``pytest.config``.
- """
-
-
-@hookspec(historic=True)
-def pytest_configure(config):
- """
- Allows plugins and conftest files to perform initial configuration.
-
- This hook is called for every plugin and initial conftest file
- after command line options have been parsed.
-
- After that, the hook is called for other conftest files as they are
- imported.
-
- :arg config: pytest config object
- :type config: _pytest.config.Config
- """
-
-# -------------------------------------------------------------------------
-# Bootstrapping hooks called for plugins registered early enough:
-# internal and 3rd party plugins as well as directly
-# discoverable conftest.py local plugins.
-# -------------------------------------------------------------------------
-
-
-@hookspec(firstresult=True)
-def pytest_cmdline_parse(pluginmanager, args):
- """return initialized config object, parsing the specified args.
-
- Stops at first non-None result, see :ref:`firstresult` """
-
-
-def pytest_cmdline_preparse(config, args):
- """(deprecated) modify command line arguments before option parsing. """
-
-
-@hookspec(firstresult=True)
-def pytest_cmdline_main(config):
- """ called for performing the main command line action. The default
- implementation will invoke the configure hooks and runtest_mainloop.
-
- Stops at first non-None result, see :ref:`firstresult` """
-
-
-def pytest_load_initial_conftests(early_config, parser, args):
- """ implements the loading of initial conftest files ahead
- of command line option parsing. """
-
-
-# -------------------------------------------------------------------------
-# collection hooks
-# -------------------------------------------------------------------------
-
-@hookspec(firstresult=True)
-def pytest_collection(session):
- """ perform the collection protocol for the given session.
-
- Stops at first non-None result, see :ref:`firstresult` """
-
-
-def pytest_collection_modifyitems(session, config, items):
- """ called after collection has been performed, may filter or re-order
- the items in-place."""
-
-
-def pytest_collection_finish(session):
- """ called after collection has been performed and modified. """
-
-
-@hookspec(firstresult=True)
-def pytest_ignore_collect(path, config):
- """ return True to prevent considering this path for collection.
- This hook is consulted for all files and directories prior to calling
- more specific hooks.
-
- Stops at first non-None result, see :ref:`firstresult`
- """
-
-
-@hookspec(firstresult=True)
-def pytest_collect_directory(path, parent):
- """ called before traversing a directory for collection files.
-
- Stops at first non-None result, see :ref:`firstresult` """
-
-
-def pytest_collect_file(path, parent):
- """ return collection Node or None for the given path. Any new node
- needs to have the specified ``parent`` as a parent."""
-
-# logging hooks for collection
-
-
-def pytest_collectstart(collector):
- """ collector starts collecting. """
-
-
-def pytest_itemcollected(item):
- """ we just collected a test item. """
-
-
-def pytest_collectreport(report):
- """ collector finished collecting. """
-
-
-def pytest_deselected(items):
- """ called for test items deselected by keyword. """
-
-
-@hookspec(firstresult=True)
-def pytest_make_collect_report(collector):
- """ perform ``collector.collect()`` and return a CollectReport.
-
- Stops at first non-None result, see :ref:`firstresult` """
-
-# -------------------------------------------------------------------------
-# Python test function related hooks
-# -------------------------------------------------------------------------
-
-
-@hookspec(firstresult=True)
-def pytest_pycollect_makemodule(path, parent):
- """ return a Module collector or None for the given path.
- This hook will be called for each matching test module path.
- The pytest_collect_file hook needs to be used if you want to
- create test modules for files that do not match as a test module.
-
- Stops at first non-None result, see :ref:`firstresult` """
-
-
-@hookspec(firstresult=True)
-def pytest_pycollect_makeitem(collector, name, obj):
- """ return custom item/collector for a python object in a module, or None.
-
- Stops at first non-None result, see :ref:`firstresult` """
-
-
-@hookspec(firstresult=True)
-def pytest_pyfunc_call(pyfuncitem):
- """ call underlying test function.
-
- Stops at first non-None result, see :ref:`firstresult` """
-
-
-def pytest_generate_tests(metafunc):
- """ generate (multiple) parametrized calls to a test function."""
-
-
-@hookspec(firstresult=True)
-def pytest_make_parametrize_id(config, val, argname):
- """Return a user-friendly string representation of the given ``val`` that will be used
- by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``.
- The parameter name is available as ``argname``, if required.
-
- Stops at first non-None result, see :ref:`firstresult` """
-
-# -------------------------------------------------------------------------
-# generic runtest related hooks
-# -------------------------------------------------------------------------
-
-
-@hookspec(firstresult=True)
-def pytest_runtestloop(session):
- """ called for performing the main runtest loop
- (after collection finished).
-
- Stops at first non-None result, see :ref:`firstresult` """
-
-
-def pytest_itemstart(item, node):
- """ (deprecated, use pytest_runtest_logstart). """
-
-
-@hookspec(firstresult=True)
-def pytest_runtest_protocol(item, nextitem):
- """ implements the runtest_setup/call/teardown protocol for
- the given test item, including capturing exceptions and calling
- reporting hooks.
-
- :arg item: test item for which the runtest protocol is performed.
-
- :arg nextitem: the scheduled-to-be-next test item (or None if this
- is the end my friend). This argument is passed on to
- :py:func:`pytest_runtest_teardown`.
-
- :return boolean: True if no further hook implementations should be invoked.
-
-
- Stops at first non-None result, see :ref:`firstresult` """
-
-
-def pytest_runtest_logstart(nodeid, location):
- """ signal the start of running a single test item. """
-
-
-def pytest_runtest_setup(item):
- """ called before ``pytest_runtest_call(item)``. """
-
-
-def pytest_runtest_call(item):
- """ called to execute the test ``item``. """
-
-
-def pytest_runtest_teardown(item, nextitem):
- """ called after ``pytest_runtest_call``.
-
- :arg nextitem: the scheduled-to-be-next test item (None if no further
- test item is scheduled). This argument can be used to
- perform exact teardowns, i.e. calling just enough finalizers
- so that nextitem only needs to call setup-functions.
- """
-
-
-@hookspec(firstresult=True)
-def pytest_runtest_makereport(item, call):
- """ return a :py:class:`_pytest.runner.TestReport` object
- for the given :py:class:`pytest.Item <_pytest.main.Item>` and
- :py:class:`_pytest.runner.CallInfo`.
-
- Stops at first non-None result, see :ref:`firstresult` """
-
-
-def pytest_runtest_logreport(report):
- """ process a test setup/call/teardown report relating to
- the respective phase of executing a test. """
-
-# -------------------------------------------------------------------------
-# Fixture related hooks
-# -------------------------------------------------------------------------
-
-
-@hookspec(firstresult=True)
-def pytest_fixture_setup(fixturedef, request):
- """ performs fixture setup execution.
-
- Stops at first non-None result, see :ref:`firstresult` """
-
-
-def pytest_fixture_post_finalizer(fixturedef):
- """ called after fixture teardown, but before the cache is cleared so
- the fixture result cache ``fixturedef.cached_result`` can
- still be accessed."""
-
-# -------------------------------------------------------------------------
-# test session related hooks
-# -------------------------------------------------------------------------
-
-
-def pytest_sessionstart(session):
- """ before session.main() is called. """
-
-
-def pytest_sessionfinish(session, exitstatus):
- """ whole test run finishes. """
-
-
-def pytest_unconfigure(config):
- """ called before test process is exited. """
-
-
-# -------------------------------------------------------------------------
-# hooks for customizing the assert methods
-# -------------------------------------------------------------------------
-
-def pytest_assertrepr_compare(config, op, left, right):
- """return explanation for comparisons in failing assert expressions.
-
- Return None for no custom explanation, otherwise return a list
- of strings. The strings will be joined by newlines but any newlines
- *in* a string will be escaped. Note that all but the first line will
- be indented slightly, the intention is for the first line to be a summary.
- """
-
-# -------------------------------------------------------------------------
-# hooks for influencing reporting (invoked from _pytest_terminal)
-# -------------------------------------------------------------------------
-
-
-def pytest_report_header(config, startdir):
- """ return a string or list of strings to be displayed as header info for terminal reporting.
-
- :param config: the pytest config object.
- :param startdir: py.path object with the starting dir
-
- .. note::
-
- This function should be implemented only in plugins or ``conftest.py``
- files situated at the tests root directory due to how pytest
- :ref:`discovers plugins during startup <pluginorder>`.
- """
-
-
-def pytest_report_collectionfinish(config, startdir, items):
- """
- .. versionadded:: 3.2
-
- return a string or list of strings to be displayed after collection has finished successfully.
-
- This strings will be displayed after the standard "collected X items" message.
-
- :param config: the pytest config object.
- :param startdir: py.path object with the starting dir
- :param items: list of pytest items that are going to be executed; this list should not be modified.
- """
-
-
-@hookspec(firstresult=True)
-def pytest_report_teststatus(report):
- """ return result-category, shortletter and verbose word for reporting.
-
- Stops at first non-None result, see :ref:`firstresult` """
-
-
-def pytest_terminal_summary(terminalreporter, exitstatus):
- """ add additional section in terminal summary reporting. """
-
-
-@hookspec(historic=True)
-def pytest_logwarning(message, code, nodeid, fslocation):
- """ process a warning specified by a message, a code string,
- a nodeid and fslocation (both of which may be None
- if the warning is not tied to a partilar node/location)."""
-
-# -------------------------------------------------------------------------
-# doctest hooks
-# -------------------------------------------------------------------------
-
-
-@hookspec(firstresult=True)
-def pytest_doctest_prepare_content(content):
- """ return processed content for a given doctest
-
- Stops at first non-None result, see :ref:`firstresult` """
-
-# -------------------------------------------------------------------------
-# error handling and internal debugging hooks
-# -------------------------------------------------------------------------
-
-
-def pytest_internalerror(excrepr, excinfo):
- """ called for internal errors. """
-
-
-def pytest_keyboard_interrupt(excinfo):
- """ called for keyboard interrupt. """
-
-
-def pytest_exception_interact(node, call, report):
- """called when an exception was raised which can potentially be
- interactively handled.
-
- This hook is only called if an exception was raised
- that is not an internal exception like ``skip.Exception``.
- """
-
-
-def pytest_enter_pdb(config):
- """ called upon pdb.set_trace(), can be used by plugins to take special
- action just before the python debugger enters in interactive mode.
-
- :arg config: pytest config object
- :type config: _pytest.config.Config
- """
diff --git a/lib/spack/external/pytest-fallback/_pytest/junitxml.py b/lib/spack/external/pytest-fallback/_pytest/junitxml.py
deleted file mode 100644
index 7fb40dc354..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/junitxml.py
+++ /dev/null
@@ -1,453 +0,0 @@
-"""
- report test results in JUnit-XML format,
- for use with Jenkins and build integration servers.
-
-
-Based on initial code from Ross Lawley.
-
-Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
-src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
-"""
-from __future__ import absolute_import, division, print_function
-
-import functools
-import py
-import os
-import re
-import sys
-import time
-import pytest
-from _pytest import nodes
-from _pytest.config import filename_arg
-
-# Python 2.X and 3.X compatibility
-if sys.version_info[0] < 3:
- from codecs import open
-else:
- unichr = chr
- unicode = str
- long = int
-
-
-class Junit(py.xml.Namespace):
- pass
-
-
-# We need to get the subset of the invalid unicode ranges according to
-# XML 1.0 which are valid in this python build. Hence we calculate
-# this dynamically instead of hardcoding it. The spec range of valid
-# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
-# | [#x10000-#x10FFFF]
-_legal_chars = (0x09, 0x0A, 0x0d)
-_legal_ranges = (
- (0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF),
-)
-_legal_xml_re = [
- unicode("%s-%s") % (unichr(low), unichr(high))
- for (low, high) in _legal_ranges if low < sys.maxunicode
-]
-_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
-illegal_xml_re = re.compile(unicode('[^%s]') % unicode('').join(_legal_xml_re))
-del _legal_chars
-del _legal_ranges
-del _legal_xml_re
-
-_py_ext_re = re.compile(r"\.py$")
-
-
-def bin_xml_escape(arg):
- def repl(matchobj):
- i = ord(matchobj.group())
- if i <= 0xFF:
- return unicode('#x%02X') % i
- else:
- return unicode('#x%04X') % i
-
- return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
-
-
-class _NodeReporter(object):
- def __init__(self, nodeid, xml):
-
- self.id = nodeid
- self.xml = xml
- self.add_stats = self.xml.add_stats
- self.duration = 0
- self.properties = []
- self.nodes = []
- self.testcase = None
- self.attrs = {}
-
- def append(self, node):
- self.xml.add_stats(type(node).__name__)
- self.nodes.append(node)
-
- def add_property(self, name, value):
- self.properties.append((str(name), bin_xml_escape(value)))
-
- def make_properties_node(self):
- """Return a Junit node containing custom properties, if any.
- """
- if self.properties:
- return Junit.properties([
- Junit.property(name=name, value=value)
- for name, value in self.properties
- ])
- return ''
-
- def record_testreport(self, testreport):
- assert not self.testcase
- names = mangle_test_address(testreport.nodeid)
- classnames = names[:-1]
- if self.xml.prefix:
- classnames.insert(0, self.xml.prefix)
- attrs = {
- "classname": ".".join(classnames),
- "name": bin_xml_escape(names[-1]),
- "file": testreport.location[0],
- }
- if testreport.location[1] is not None:
- attrs["line"] = testreport.location[1]
- if hasattr(testreport, "url"):
- attrs["url"] = testreport.url
- self.attrs = attrs
-
- def to_xml(self):
- testcase = Junit.testcase(time=self.duration, **self.attrs)
- testcase.append(self.make_properties_node())
- for node in self.nodes:
- testcase.append(node)
- return testcase
-
- def _add_simple(self, kind, message, data=None):
- data = bin_xml_escape(data)
- node = kind(data, message=message)
- self.append(node)
-
- def write_captured_output(self, report):
- for capname in ('out', 'err'):
- content = getattr(report, 'capstd' + capname)
- if content:
- tag = getattr(Junit, 'system-' + capname)
- self.append(tag(bin_xml_escape(content)))
-
- def append_pass(self, report):
- self.add_stats('passed')
-
- def append_failure(self, report):
- # msg = str(report.longrepr.reprtraceback.extraline)
- if hasattr(report, "wasxfail"):
- self._add_simple(
- Junit.skipped,
- "xfail-marked test passes unexpectedly")
- else:
- if hasattr(report.longrepr, "reprcrash"):
- message = report.longrepr.reprcrash.message
- elif isinstance(report.longrepr, (unicode, str)):
- message = report.longrepr
- else:
- message = str(report.longrepr)
- message = bin_xml_escape(message)
- fail = Junit.failure(message=message)
- fail.append(bin_xml_escape(report.longrepr))
- self.append(fail)
-
- def append_collect_error(self, report):
- # msg = str(report.longrepr.reprtraceback.extraline)
- self.append(Junit.error(bin_xml_escape(report.longrepr),
- message="collection failure"))
-
- def append_collect_skipped(self, report):
- self._add_simple(
- Junit.skipped, "collection skipped", report.longrepr)
-
- def append_error(self, report):
- if getattr(report, 'when', None) == 'teardown':
- msg = "test teardown failure"
- else:
- msg = "test setup failure"
- self._add_simple(
- Junit.error, msg, report.longrepr)
-
- def append_skipped(self, report):
- if hasattr(report, "wasxfail"):
- self._add_simple(
- Junit.skipped, "expected test failure", report.wasxfail
- )
- else:
- filename, lineno, skipreason = report.longrepr
- if skipreason.startswith("Skipped: "):
- skipreason = bin_xml_escape(skipreason[9:])
- self.append(
- Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason),
- type="pytest.skip",
- message=skipreason))
- self.write_captured_output(report)
-
- def finalize(self):
- data = self.to_xml().unicode(indent=0)
- self.__dict__.clear()
- self.to_xml = lambda: py.xml.raw(data)
-
-
-@pytest.fixture
-def record_xml_property(request):
- """Add extra xml properties to the tag for the calling test.
- The fixture is callable with ``(name, value)``, with value being automatically
- xml-encoded.
- """
- request.node.warn(
- code='C3',
- message='record_xml_property is an experimental feature',
- )
- xml = getattr(request.config, "_xml", None)
- if xml is not None:
- node_reporter = xml.node_reporter(request.node.nodeid)
- return node_reporter.add_property
- else:
- def add_property_noop(name, value):
- pass
-
- return add_property_noop
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("terminal reporting")
- group.addoption(
- '--junitxml', '--junit-xml',
- action="store",
- dest="xmlpath",
- metavar="path",
- type=functools.partial(filename_arg, optname="--junitxml"),
- default=None,
- help="create junit-xml style report file at given path.")
- group.addoption(
- '--junitprefix', '--junit-prefix',
- action="store",
- metavar="str",
- default=None,
- help="prepend prefix to classnames in junit-xml output")
- parser.addini("junit_suite_name", "Test suite name for JUnit report", default="pytest")
-
-
-def pytest_configure(config):
- xmlpath = config.option.xmlpath
- # prevent opening xmllog on slave nodes (xdist)
- if xmlpath and not hasattr(config, 'slaveinput'):
- config._xml = LogXML(xmlpath, config.option.junitprefix, config.getini("junit_suite_name"))
- config.pluginmanager.register(config._xml)
-
-
-def pytest_unconfigure(config):
- xml = getattr(config, '_xml', None)
- if xml:
- del config._xml
- config.pluginmanager.unregister(xml)
-
-
-def mangle_test_address(address):
- path, possible_open_bracket, params = address.partition('[')
- names = path.split("::")
- try:
- names.remove('()')
- except ValueError:
- pass
- # convert file path to dotted path
- names[0] = names[0].replace(nodes.SEP, '.')
- names[0] = _py_ext_re.sub("", names[0])
- # put any params back
- names[-1] += possible_open_bracket + params
- return names
-
-
-class LogXML(object):
- def __init__(self, logfile, prefix, suite_name="pytest"):
- logfile = os.path.expanduser(os.path.expandvars(logfile))
- self.logfile = os.path.normpath(os.path.abspath(logfile))
- self.prefix = prefix
- self.suite_name = suite_name
- self.stats = dict.fromkeys([
- 'error',
- 'passed',
- 'failure',
- 'skipped',
- ], 0)
- self.node_reporters = {} # nodeid -> _NodeReporter
- self.node_reporters_ordered = []
- self.global_properties = []
- # List of reports that failed on call but teardown is pending.
- self.open_reports = []
- self.cnt_double_fail_tests = 0
-
- def finalize(self, report):
- nodeid = getattr(report, 'nodeid', report)
- # local hack to handle xdist report order
- slavenode = getattr(report, 'node', None)
- reporter = self.node_reporters.pop((nodeid, slavenode))
- if reporter is not None:
- reporter.finalize()
-
- def node_reporter(self, report):
- nodeid = getattr(report, 'nodeid', report)
- # local hack to handle xdist report order
- slavenode = getattr(report, 'node', None)
-
- key = nodeid, slavenode
-
- if key in self.node_reporters:
- # TODO: breasks for --dist=each
- return self.node_reporters[key]
-
- reporter = _NodeReporter(nodeid, self)
-
- self.node_reporters[key] = reporter
- self.node_reporters_ordered.append(reporter)
-
- return reporter
-
- def add_stats(self, key):
- if key in self.stats:
- self.stats[key] += 1
-
- def _opentestcase(self, report):
- reporter = self.node_reporter(report)
- reporter.record_testreport(report)
- return reporter
-
- def pytest_runtest_logreport(self, report):
- """handle a setup/call/teardown report, generating the appropriate
- xml tags as necessary.
-
- note: due to plugins like xdist, this hook may be called in interlaced
- order with reports from other nodes. for example:
-
- usual call order:
- -> setup node1
- -> call node1
- -> teardown node1
- -> setup node2
- -> call node2
- -> teardown node2
-
- possible call order in xdist:
- -> setup node1
- -> call node1
- -> setup node2
- -> call node2
- -> teardown node2
- -> teardown node1
- """
- close_report = None
- if report.passed:
- if report.when == "call": # ignore setup/teardown
- reporter = self._opentestcase(report)
- reporter.append_pass(report)
- elif report.failed:
- if report.when == "teardown":
- # The following vars are needed when xdist plugin is used
- report_wid = getattr(report, "worker_id", None)
- report_ii = getattr(report, "item_index", None)
- close_report = next(
- (rep for rep in self.open_reports
- if (rep.nodeid == report.nodeid and
- getattr(rep, "item_index", None) == report_ii and
- getattr(rep, "worker_id", None) == report_wid
- )
- ), None)
- if close_report:
- # We need to open new testcase in case we have failure in
- # call and error in teardown in order to follow junit
- # schema
- self.finalize(close_report)
- self.cnt_double_fail_tests += 1
- reporter = self._opentestcase(report)
- if report.when == "call":
- reporter.append_failure(report)
- self.open_reports.append(report)
- else:
- reporter.append_error(report)
- elif report.skipped:
- reporter = self._opentestcase(report)
- reporter.append_skipped(report)
- self.update_testcase_duration(report)
- if report.when == "teardown":
- reporter = self._opentestcase(report)
- reporter.write_captured_output(report)
- self.finalize(report)
- report_wid = getattr(report, "worker_id", None)
- report_ii = getattr(report, "item_index", None)
- close_report = next(
- (rep for rep in self.open_reports
- if (rep.nodeid == report.nodeid and
- getattr(rep, "item_index", None) == report_ii and
- getattr(rep, "worker_id", None) == report_wid
- )
- ), None)
- if close_report:
- self.open_reports.remove(close_report)
-
- def update_testcase_duration(self, report):
- """accumulates total duration for nodeid from given report and updates
- the Junit.testcase with the new total if already created.
- """
- reporter = self.node_reporter(report)
- reporter.duration += getattr(report, 'duration', 0.0)
-
- def pytest_collectreport(self, report):
- if not report.passed:
- reporter = self._opentestcase(report)
- if report.failed:
- reporter.append_collect_error(report)
- else:
- reporter.append_collect_skipped(report)
-
- def pytest_internalerror(self, excrepr):
- reporter = self.node_reporter('internal')
- reporter.attrs.update(classname="pytest", name='internal')
- reporter._add_simple(Junit.error, 'internal error', excrepr)
-
- def pytest_sessionstart(self):
- self.suite_start_time = time.time()
-
- def pytest_sessionfinish(self):
- dirname = os.path.dirname(os.path.abspath(self.logfile))
- if not os.path.isdir(dirname):
- os.makedirs(dirname)
- logfile = open(self.logfile, 'w', encoding='utf-8')
- suite_stop_time = time.time()
- suite_time_delta = suite_stop_time - self.suite_start_time
-
- numtests = (self.stats['passed'] + self.stats['failure'] +
- self.stats['skipped'] + self.stats['error'] -
- self.cnt_double_fail_tests)
- logfile.write('<?xml version="1.0" encoding="utf-8"?>')
-
- logfile.write(Junit.testsuite(
- self._get_global_properties_node(),
- [x.to_xml() for x in self.node_reporters_ordered],
- name=self.suite_name,
- errors=self.stats['error'],
- failures=self.stats['failure'],
- skips=self.stats['skipped'],
- tests=numtests,
- time="%.3f" % suite_time_delta, ).unicode(indent=0))
- logfile.close()
-
- def pytest_terminal_summary(self, terminalreporter):
- terminalreporter.write_sep("-",
- "generated xml file: %s" % (self.logfile))
-
- def add_global_property(self, name, value):
- self.global_properties.append((str(name), bin_xml_escape(value)))
-
- def _get_global_properties_node(self):
- """Return a Junit node containing custom properties, if any.
- """
- if self.global_properties:
- return Junit.properties(
- [
- Junit.property(name=name, value=value)
- for name, value in self.global_properties
- ]
- )
- return ''
diff --git a/lib/spack/external/pytest-fallback/_pytest/main.py b/lib/spack/external/pytest-fallback/_pytest/main.py
deleted file mode 100644
index 98aa28eb34..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/main.py
+++ /dev/null
@@ -1,838 +0,0 @@
-""" core implementation of testing process: init, session, runtest loop. """
-from __future__ import absolute_import, division, print_function
-
-import functools
-import os
-import sys
-
-import _pytest
-from _pytest import nodes
-import _pytest._code
-import py
-try:
- from collections.abc import MutableMapping as MappingMixin
-except ImportError:
- try:
- from collections import MutableMapping as MappingMixin
- except ImportError:
- from UserDict import DictMixin as MappingMixin
-
-from _pytest.config import directory_arg, UsageError, hookimpl
-from _pytest.outcomes import exit
-from _pytest.runner import collect_one_node
-
-tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
-
-# exitcodes for the command line
-EXIT_OK = 0
-EXIT_TESTSFAILED = 1
-EXIT_INTERRUPTED = 2
-EXIT_INTERNALERROR = 3
-EXIT_USAGEERROR = 4
-EXIT_NOTESTSCOLLECTED = 5
-
-
-def pytest_addoption(parser):
- parser.addini("norecursedirs", "directory patterns to avoid for recursion",
- type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv'])
- parser.addini("testpaths", "directories to search for tests when no files or directories are given in the "
- "command line.",
- type="args", default=[])
- # parser.addini("dirpatterns",
- # "patterns specifying possible locations of test files",
- # type="linelist", default=["**/test_*.txt",
- # "**/test_*.py", "**/*_test.py"]
- # )
- group = parser.getgroup("general", "running and selection options")
- group._addoption('-x', '--exitfirst', action="store_const",
- dest="maxfail", const=1,
- help="exit instantly on first error or failed test."),
- group._addoption('--maxfail', metavar="num",
- action="store", type=int, dest="maxfail", default=0,
- help="exit after first num failures or errors.")
- group._addoption('--strict', action="store_true",
- help="marks not registered in configuration file raise errors.")
- group._addoption("-c", metavar="file", type=str, dest="inifilename",
- help="load configuration from `file` instead of trying to locate one of the implicit "
- "configuration files.")
- group._addoption("--continue-on-collection-errors", action="store_true",
- default=False, dest="continue_on_collection_errors",
- help="Force test execution even if collection errors occur.")
-
- group = parser.getgroup("collect", "collection")
- group.addoption('--collectonly', '--collect-only', action="store_true",
- help="only collect tests, don't execute them."),
- group.addoption('--pyargs', action="store_true",
- help="try to interpret all arguments as python packages.")
- group.addoption("--ignore", action="append", metavar="path",
- help="ignore path during collection (multi-allowed).")
- # when changing this to --conf-cut-dir, config.py Conftest.setinitial
- # needs upgrading as well
- group.addoption('--confcutdir', dest="confcutdir", default=None,
- metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"),
- help="only load conftest.py's relative to specified dir.")
- group.addoption('--noconftest', action="store_true",
- dest="noconftest", default=False,
- help="Don't load any conftest.py files.")
- group.addoption('--keepduplicates', '--keep-duplicates', action="store_true",
- dest="keepduplicates", default=False,
- help="Keep duplicate tests.")
- group.addoption('--collect-in-virtualenv', action='store_true',
- dest='collect_in_virtualenv', default=False,
- help="Don't ignore tests in a local virtualenv directory")
-
- group = parser.getgroup("debugconfig",
- "test session debugging and configuration")
- group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
- help="base temporary directory for this test run.")
-
-
-def pytest_namespace():
- """keeping this one works around a deeper startup issue in pytest
-
- i tried to find it for a while but the amount of time turned unsustainable,
- so i put a hack in to revisit later
- """
- return {}
-
-
-def pytest_configure(config):
- __import__('pytest').config = config # compatibiltiy
-
-
-def wrap_session(config, doit):
- """Skeleton command line program"""
- session = Session(config)
- session.exitstatus = EXIT_OK
- initstate = 0
- try:
- try:
- config._do_configure()
- initstate = 1
- config.hook.pytest_sessionstart(session=session)
- initstate = 2
- session.exitstatus = doit(config, session) or 0
- except UsageError:
- raise
- except KeyboardInterrupt:
- excinfo = _pytest._code.ExceptionInfo()
- if initstate < 2 and isinstance(excinfo.value, exit.Exception):
- sys.stderr.write('{0}: {1}\n'.format(
- excinfo.typename, excinfo.value.msg))
- config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
- session.exitstatus = EXIT_INTERRUPTED
- except: # noqa
- excinfo = _pytest._code.ExceptionInfo()
- config.notify_exception(excinfo, config.option)
- session.exitstatus = EXIT_INTERNALERROR
- if excinfo.errisinstance(SystemExit):
- sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
-
- finally:
- excinfo = None # Explicitly break reference cycle.
- session.startdir.chdir()
- if initstate >= 2:
- config.hook.pytest_sessionfinish(
- session=session,
- exitstatus=session.exitstatus)
- config._ensure_unconfigure()
- return session.exitstatus
-
-
-def pytest_cmdline_main(config):
- return wrap_session(config, _main)
-
-
-def _main(config, session):
- """ default command line protocol for initialization, session,
- running tests and reporting. """
- config.hook.pytest_collection(session=session)
- config.hook.pytest_runtestloop(session=session)
-
- if session.testsfailed:
- return EXIT_TESTSFAILED
- elif session.testscollected == 0:
- return EXIT_NOTESTSCOLLECTED
-
-
-def pytest_collection(session):
- return session.perform_collect()
-
-
-def pytest_runtestloop(session):
- if (session.testsfailed and
- not session.config.option.continue_on_collection_errors):
- raise session.Interrupted(
- "%d errors during collection" % session.testsfailed)
-
- if session.config.option.collectonly:
- return True
-
- for i, item in enumerate(session.items):
- nextitem = session.items[i + 1] if i + 1 < len(session.items) else None
- item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
- if session.shouldstop:
- raise session.Interrupted(session.shouldstop)
- return True
-
-
-def _in_venv(path):
- """Attempts to detect if ``path`` is the root of a Virtual Environment by
- checking for the existence of the appropriate activate script"""
- bindir = path.join('Scripts' if sys.platform.startswith('win') else 'bin')
- if not bindir.exists():
- return False
- activates = ('activate', 'activate.csh', 'activate.fish',
- 'Activate', 'Activate.bat', 'Activate.ps1')
- return any([fname.basename in activates for fname in bindir.listdir()])
-
-
-def pytest_ignore_collect(path, config):
- ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath())
- ignore_paths = ignore_paths or []
- excludeopt = config.getoption("ignore")
- if excludeopt:
- ignore_paths.extend([py.path.local(x) for x in excludeopt])
-
- if py.path.local(path) in ignore_paths:
- return True
-
- allow_in_venv = config.getoption("collect_in_virtualenv")
- if _in_venv(path) and not allow_in_venv:
- return True
-
- # Skip duplicate paths.
- keepduplicates = config.getoption("keepduplicates")
- duplicate_paths = config.pluginmanager._duplicatepaths
- if not keepduplicates:
- if path in duplicate_paths:
- return True
- else:
- duplicate_paths.add(path)
-
- return False
-
-
-class FSHookProxy:
- def __init__(self, fspath, pm, remove_mods):
- self.fspath = fspath
- self.pm = pm
- self.remove_mods = remove_mods
-
- def __getattr__(self, name):
- x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
- self.__dict__[name] = x
- return x
-
-
-class _CompatProperty(object):
- def __init__(self, name):
- self.name = name
-
- def __get__(self, obj, owner):
- if obj is None:
- return self
-
- # TODO: reenable in the features branch
- # warnings.warn(
- # "usage of {owner!r}.{name} is deprecated, please use pytest.{name} instead".format(
- # name=self.name, owner=type(owner).__name__),
- # PendingDeprecationWarning, stacklevel=2)
- return getattr(__import__('pytest'), self.name)
-
-
-class NodeKeywords(MappingMixin):
- def __init__(self, node):
- self.node = node
- self.parent = node.parent
- self._markers = {node.name: True}
-
- def __getitem__(self, key):
- try:
- return self._markers[key]
- except KeyError:
- if self.parent is None:
- raise
- return self.parent.keywords[key]
-
- def __setitem__(self, key, value):
- self._markers[key] = value
-
- def __delitem__(self, key):
- raise ValueError("cannot delete key in keywords dict")
-
- def __iter__(self):
- seen = set(self._markers)
- if self.parent is not None:
- seen.update(self.parent.keywords)
- return iter(seen)
-
- def __len__(self):
- return len(self.__iter__())
-
- def keys(self):
- return list(self)
-
- def __repr__(self):
- return "<NodeKeywords for node %s>" % (self.node, )
-
-
-class Node(object):
- """ base class for Collector and Item the test collection tree.
- Collector subclasses have children, Items are terminal nodes."""
-
- def __init__(self, name, parent=None, config=None, session=None):
- #: a unique name within the scope of the parent node
- self.name = name
-
- #: the parent collector node.
- self.parent = parent
-
- #: the pytest config object
- self.config = config or parent.config
-
- #: the session this node is part of
- self.session = session or parent.session
-
- #: filesystem path where this node was collected from (can be None)
- self.fspath = getattr(parent, 'fspath', None)
-
- #: keywords/markers collected from all scopes
- self.keywords = NodeKeywords(self)
-
- #: allow adding of extra keywords to use for matching
- self.extra_keyword_matches = set()
-
- # used for storing artificial fixturedefs for direct parametrization
- self._name2pseudofixturedef = {}
-
- @property
- def ihook(self):
- """ fspath sensitive hook proxy used to call pytest hooks"""
- return self.session.gethookproxy(self.fspath)
-
- Module = _CompatProperty("Module")
- Class = _CompatProperty("Class")
- Instance = _CompatProperty("Instance")
- Function = _CompatProperty("Function")
- File = _CompatProperty("File")
- Item = _CompatProperty("Item")
-
- def _getcustomclass(self, name):
- maybe_compatprop = getattr(type(self), name)
- if isinstance(maybe_compatprop, _CompatProperty):
- return getattr(__import__('pytest'), name)
- else:
- cls = getattr(self, name)
- # TODO: reenable in the features branch
- # warnings.warn("use of node.%s is deprecated, "
- # "use pytest_pycollect_makeitem(...) to create custom "
- # "collection nodes" % name, category=DeprecationWarning)
- return cls
-
- def __repr__(self):
- return "<%s %r>" % (self.__class__.__name__,
- getattr(self, 'name', None))
-
- def warn(self, code, message):
- """ generate a warning with the given code and message for this
- item. """
- assert isinstance(code, str)
- fslocation = getattr(self, "location", None)
- if fslocation is None:
- fslocation = getattr(self, "fspath", None)
- self.ihook.pytest_logwarning.call_historic(kwargs=dict(
- code=code, message=message,
- nodeid=self.nodeid, fslocation=fslocation))
-
- # methods for ordering nodes
- @property
- def nodeid(self):
- """ a ::-separated string denoting its collection tree address. """
- try:
- return self._nodeid
- except AttributeError:
- self._nodeid = x = self._makeid()
- return x
-
- def _makeid(self):
- return self.parent.nodeid + "::" + self.name
-
- def __hash__(self):
- return hash(self.nodeid)
-
- def setup(self):
- pass
-
- def teardown(self):
- pass
-
- def _memoizedcall(self, attrname, function):
- exattrname = "_ex_" + attrname
- failure = getattr(self, exattrname, None)
- if failure is not None:
- py.builtin._reraise(failure[0], failure[1], failure[2])
- if hasattr(self, attrname):
- return getattr(self, attrname)
- try:
- res = function()
- except py.builtin._sysex:
- raise
- except: # noqa
- failure = sys.exc_info()
- setattr(self, exattrname, failure)
- raise
- setattr(self, attrname, res)
- return res
-
- def listchain(self):
- """ return list of all parent collectors up to self,
- starting from root of collection tree. """
- chain = []
- item = self
- while item is not None:
- chain.append(item)
- item = item.parent
- chain.reverse()
- return chain
-
- def add_marker(self, marker):
- """ dynamically add a marker object to the node.
-
- ``marker`` can be a string or pytest.mark.* instance.
- """
- from _pytest.mark import MarkDecorator, MARK_GEN
- if isinstance(marker, py.builtin._basestring):
- marker = getattr(MARK_GEN, marker)
- elif not isinstance(marker, MarkDecorator):
- raise ValueError("is not a string or pytest.mark.* Marker")
- self.keywords[marker.name] = marker
-
- def get_marker(self, name):
- """ get a marker object from this node or None if
- the node doesn't have a marker with that name. """
- val = self.keywords.get(name, None)
- if val is not None:
- from _pytest.mark import MarkInfo, MarkDecorator
- if isinstance(val, (MarkDecorator, MarkInfo)):
- return val
-
- def listextrakeywords(self):
- """ Return a set of all extra keywords in self and any parents."""
- extra_keywords = set()
- item = self
- for item in self.listchain():
- extra_keywords.update(item.extra_keyword_matches)
- return extra_keywords
-
- def listnames(self):
- return [x.name for x in self.listchain()]
-
- def addfinalizer(self, fin):
- """ register a function to be called when this node is finalized.
-
- This method can only be called when this node is active
- in a setup chain, for example during self.setup().
- """
- self.session._setupstate.addfinalizer(fin, self)
-
- def getparent(self, cls):
- """ get the next parent node (including ourself)
- which is an instance of the given class"""
- current = self
- while current and not isinstance(current, cls):
- current = current.parent
- return current
-
- def _prunetraceback(self, excinfo):
- pass
-
- def _repr_failure_py(self, excinfo, style=None):
- fm = self.session._fixturemanager
- if excinfo.errisinstance(fm.FixtureLookupError):
- return excinfo.value.formatrepr()
- tbfilter = True
- if self.config.option.fulltrace:
- style = "long"
- else:
- tb = _pytest._code.Traceback([excinfo.traceback[-1]])
- self._prunetraceback(excinfo)
- if len(excinfo.traceback) == 0:
- excinfo.traceback = tb
- tbfilter = False # prunetraceback already does it
- if style == "auto":
- style = "long"
- # XXX should excinfo.getrepr record all data and toterminal() process it?
- if style is None:
- if self.config.option.tbstyle == "short":
- style = "short"
- else:
- style = "long"
-
- try:
- os.getcwd()
- abspath = False
- except OSError:
- abspath = True
-
- return excinfo.getrepr(funcargs=True, abspath=abspath,
- showlocals=self.config.option.showlocals,
- style=style, tbfilter=tbfilter)
-
- repr_failure = _repr_failure_py
-
-
-class Collector(Node):
- """ Collector instances create children through collect()
- and thus iteratively build a tree.
- """
-
- class CollectError(Exception):
- """ an error during collection, contains a custom message. """
-
- def collect(self):
- """ returns a list of children (items and collectors)
- for this collection node.
- """
- raise NotImplementedError("abstract")
-
- def repr_failure(self, excinfo):
- """ represent a collection failure. """
- if excinfo.errisinstance(self.CollectError):
- exc = excinfo.value
- return str(exc.args[0])
- return self._repr_failure_py(excinfo, style="short")
-
- def _prunetraceback(self, excinfo):
- if hasattr(self, 'fspath'):
- traceback = excinfo.traceback
- ntraceback = traceback.cut(path=self.fspath)
- if ntraceback == traceback:
- ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
- excinfo.traceback = ntraceback.filter()
-
-
-class FSCollector(Collector):
- def __init__(self, fspath, parent=None, config=None, session=None):
- fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
- name = fspath.basename
- if parent is not None:
- rel = fspath.relto(parent.fspath)
- if rel:
- name = rel
- name = name.replace(os.sep, nodes.SEP)
- super(FSCollector, self).__init__(name, parent, config, session)
- self.fspath = fspath
-
- def _check_initialpaths_for_relpath(self):
- for initialpath in self.session._initialpaths:
- if self.fspath.common(initialpath) == initialpath:
- return self.fspath.relto(initialpath.dirname)
-
- def _makeid(self):
- relpath = self.fspath.relto(self.config.rootdir)
-
- if not relpath:
- relpath = self._check_initialpaths_for_relpath()
- if os.sep != nodes.SEP:
- relpath = relpath.replace(os.sep, nodes.SEP)
- return relpath
-
-
-class File(FSCollector):
- """ base class for collecting tests from a file. """
-
-
-class Item(Node):
- """ a basic test invocation item. Note that for a single function
- there might be multiple test invocation items.
- """
- nextitem = None
-
- def __init__(self, name, parent=None, config=None, session=None):
- super(Item, self).__init__(name, parent, config, session)
- self._report_sections = []
-
- def add_report_section(self, when, key, content):
- """
- Adds a new report section, similar to what's done internally to add stdout and
- stderr captured output::
-
- item.add_report_section("call", "stdout", "report section contents")
-
- :param str when:
- One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``.
- :param str key:
- Name of the section, can be customized at will. Pytest uses ``"stdout"`` and
- ``"stderr"`` internally.
-
- :param str content:
- The full contents as a string.
- """
- if content:
- self._report_sections.append((when, key, content))
-
- def reportinfo(self):
- return self.fspath, None, ""
-
- @property
- def location(self):
- try:
- return self._location
- except AttributeError:
- location = self.reportinfo()
- # bestrelpath is a quite slow function
- cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
- try:
- fspath = cache[location[0]]
- except KeyError:
- fspath = self.session.fspath.bestrelpath(location[0])
- cache[location[0]] = fspath
- location = (fspath, location[1], str(location[2]))
- self._location = location
- return location
-
-
-class NoMatch(Exception):
- """ raised if matching cannot locate a matching names. """
-
-
-class Interrupted(KeyboardInterrupt):
- """ signals an interrupted test run. """
- __module__ = 'builtins' # for py3
-
-
-class Session(FSCollector):
- Interrupted = Interrupted
-
- def __init__(self, config):
- FSCollector.__init__(self, config.rootdir, parent=None,
- config=config, session=self)
- self.testsfailed = 0
- self.testscollected = 0
- self.shouldstop = False
- self.trace = config.trace.root.get("collection")
- self._norecursepatterns = config.getini("norecursedirs")
- self.startdir = py.path.local()
- self.config.pluginmanager.register(self, name="session")
-
- def _makeid(self):
- return ""
-
- @hookimpl(tryfirst=True)
- def pytest_collectstart(self):
- if self.shouldstop:
- raise self.Interrupted(self.shouldstop)
-
- @hookimpl(tryfirst=True)
- def pytest_runtest_logreport(self, report):
- if report.failed and not hasattr(report, 'wasxfail'):
- self.testsfailed += 1
- maxfail = self.config.getvalue("maxfail")
- if maxfail and self.testsfailed >= maxfail:
- self.shouldstop = "stopping after %d failures" % (
- self.testsfailed)
- pytest_collectreport = pytest_runtest_logreport
-
- def isinitpath(self, path):
- return path in self._initialpaths
-
- def gethookproxy(self, fspath):
- # check if we have the common case of running
- # hooks with all conftest.py filesall conftest.py
- pm = self.config.pluginmanager
- my_conftestmodules = pm._getconftestmodules(fspath)
- remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
- if remove_mods:
- # one or more conftests are not in use at this fspath
- proxy = FSHookProxy(fspath, pm, remove_mods)
- else:
- # all plugis are active for this fspath
- proxy = self.config.hook
- return proxy
-
- def perform_collect(self, args=None, genitems=True):
- hook = self.config.hook
- try:
- items = self._perform_collect(args, genitems)
- self.config.pluginmanager.check_pending()
- hook.pytest_collection_modifyitems(session=self,
- config=self.config, items=items)
- finally:
- hook.pytest_collection_finish(session=self)
- self.testscollected = len(items)
- return items
-
- def _perform_collect(self, args, genitems):
- if args is None:
- args = self.config.args
- self.trace("perform_collect", self, args)
- self.trace.root.indent += 1
- self._notfound = []
- self._initialpaths = set()
- self._initialparts = []
- self.items = items = []
- for arg in args:
- parts = self._parsearg(arg)
- self._initialparts.append(parts)
- self._initialpaths.add(parts[0])
- rep = collect_one_node(self)
- self.ihook.pytest_collectreport(report=rep)
- self.trace.root.indent -= 1
- if self._notfound:
- errors = []
- for arg, exc in self._notfound:
- line = "(no name %r in any of %r)" % (arg, exc.args[0])
- errors.append("not found: %s\n%s" % (arg, line))
- # XXX: test this
- raise UsageError(*errors)
- if not genitems:
- return rep.result
- else:
- if rep.passed:
- for node in rep.result:
- self.items.extend(self.genitems(node))
- return items
-
- def collect(self):
- for parts in self._initialparts:
- arg = "::".join(map(str, parts))
- self.trace("processing argument", arg)
- self.trace.root.indent += 1
- try:
- for x in self._collect(arg):
- yield x
- except NoMatch:
- # we are inside a make_report hook so
- # we cannot directly pass through the exception
- self._notfound.append((arg, sys.exc_info()[1]))
-
- self.trace.root.indent -= 1
-
- def _collect(self, arg):
- names = self._parsearg(arg)
- path = names.pop(0)
- if path.check(dir=1):
- assert not names, "invalid arg %r" % (arg,)
- for path in path.visit(fil=lambda x: x.check(file=1),
- rec=self._recurse, bf=True, sort=True):
- for x in self._collectfile(path):
- yield x
- else:
- assert path.check(file=1)
- for x in self.matchnodes(self._collectfile(path), names):
- yield x
-
- def _collectfile(self, path):
- ihook = self.gethookproxy(path)
- if not self.isinitpath(path):
- if ihook.pytest_ignore_collect(path=path, config=self.config):
- return ()
- return ihook.pytest_collect_file(path=path, parent=self)
-
- def _recurse(self, path):
- ihook = self.gethookproxy(path.dirpath())
- if ihook.pytest_ignore_collect(path=path, config=self.config):
- return
- for pat in self._norecursepatterns:
- if path.check(fnmatch=pat):
- return False
- ihook = self.gethookproxy(path)
- ihook.pytest_collect_directory(path=path, parent=self)
- return True
-
- def _tryconvertpyarg(self, x):
- """Convert a dotted module name to path.
-
- """
- import pkgutil
- try:
- loader = pkgutil.find_loader(x)
- except ImportError:
- return x
- if loader is None:
- return x
- # This method is sometimes invoked when AssertionRewritingHook, which
- # does not define a get_filename method, is already in place:
- try:
- path = loader.get_filename(x)
- except AttributeError:
- # Retrieve path from AssertionRewritingHook:
- path = loader.modules[x][0].co_filename
- if loader.is_package(x):
- path = os.path.dirname(path)
- return path
-
- def _parsearg(self, arg):
- """ return (fspath, names) tuple after checking the file exists. """
- parts = str(arg).split("::")
- if self.config.option.pyargs:
- parts[0] = self._tryconvertpyarg(parts[0])
- relpath = parts[0].replace("/", os.sep)
- path = self.config.invocation_dir.join(relpath, abs=True)
- if not path.check():
- if self.config.option.pyargs:
- raise UsageError(
- "file or package not found: " + arg +
- " (missing __init__.py?)")
- else:
- raise UsageError("file not found: " + arg)
- parts[0] = path
- return parts
-
- def matchnodes(self, matching, names):
- self.trace("matchnodes", matching, names)
- self.trace.root.indent += 1
- nodes = self._matchnodes(matching, names)
- num = len(nodes)
- self.trace("matchnodes finished -> ", num, "nodes")
- self.trace.root.indent -= 1
- if num == 0:
- raise NoMatch(matching, names[:1])
- return nodes
-
- def _matchnodes(self, matching, names):
- if not matching or not names:
- return matching
- name = names[0]
- assert name
- nextnames = names[1:]
- resultnodes = []
- for node in matching:
- if isinstance(node, Item):
- if not names:
- resultnodes.append(node)
- continue
- assert isinstance(node, Collector)
- rep = collect_one_node(node)
- if rep.passed:
- has_matched = False
- for x in rep.result:
- # TODO: remove parametrized workaround once collection structure contains parametrization
- if x.name == name or x.name.split("[")[0] == name:
- resultnodes.extend(self.matchnodes([x], nextnames))
- has_matched = True
- # XXX accept IDs that don't have "()" for class instances
- if not has_matched and len(rep.result) == 1 and x.name == "()":
- nextnames.insert(0, name)
- resultnodes.extend(self.matchnodes([x], nextnames))
- else:
- # report collection failures here to avoid failing to run some test
- # specified in the command line because the module could not be
- # imported (#134)
- node.ihook.pytest_collectreport(report=rep)
- return resultnodes
-
- def genitems(self, node):
- self.trace("genitems", node)
- if isinstance(node, Item):
- node.ihook.pytest_itemcollected(item=node)
- yield node
- else:
- assert isinstance(node, Collector)
- rep = collect_one_node(node)
- if rep.passed:
- for subnode in rep.result:
- for x in self.genitems(subnode):
- yield x
- node.ihook.pytest_collectreport(report=rep)
diff --git a/lib/spack/external/pytest-fallback/_pytest/mark.py b/lib/spack/external/pytest-fallback/_pytest/mark.py
deleted file mode 100644
index 454722ca2c..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/mark.py
+++ /dev/null
@@ -1,465 +0,0 @@
-""" generic mechanism for marking and selecting python functions. """
-from __future__ import absolute_import, division, print_function
-
-import inspect
-import warnings
-from collections import namedtuple
-from operator import attrgetter
-from .compat import imap
-from .deprecated import MARK_PARAMETERSET_UNPACKING
-
-
-def alias(name, warning=None):
- getter = attrgetter(name)
-
- def warned(self):
- warnings.warn(warning, stacklevel=2)
- return getter(self)
-
- return property(getter if warning is None else warned, doc='alias for ' + name)
-
-
-class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')):
- @classmethod
- def param(cls, *values, **kw):
- marks = kw.pop('marks', ())
- if isinstance(marks, MarkDecorator):
- marks = marks,
- else:
- assert isinstance(marks, (tuple, list, set))
-
- def param_extract_id(id=None):
- return id
-
- id = param_extract_id(**kw)
- return cls(values, marks, id)
-
- @classmethod
- def extract_from(cls, parameterset, legacy_force_tuple=False):
- """
- :param parameterset:
- a legacy style parameterset that may or may not be a tuple,
- and may or may not be wrapped into a mess of mark objects
-
- :param legacy_force_tuple:
- enforce tuple wrapping so single argument tuple values
- don't get decomposed and break tests
-
- """
-
- if isinstance(parameterset, cls):
- return parameterset
- if not isinstance(parameterset, MarkDecorator) and legacy_force_tuple:
- return cls.param(parameterset)
-
- newmarks = []
- argval = parameterset
- while isinstance(argval, MarkDecorator):
- newmarks.append(MarkDecorator(Mark(
- argval.markname, argval.args[:-1], argval.kwargs)))
- argval = argval.args[-1]
- assert not isinstance(argval, ParameterSet)
- if legacy_force_tuple:
- argval = argval,
-
- if newmarks:
- warnings.warn(MARK_PARAMETERSET_UNPACKING)
-
- return cls(argval, marks=newmarks, id=None)
-
- @property
- def deprecated_arg_dict(self):
- return dict((mark.name, mark) for mark in self.marks)
-
-
-class MarkerError(Exception):
-
- """Error in use of a pytest marker/attribute."""
-
-
-def param(*values, **kw):
- return ParameterSet.param(*values, **kw)
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("general")
- group._addoption(
- '-k',
- action="store", dest="keyword", default='', metavar="EXPRESSION",
- help="only run tests which match the given substring expression. "
- "An expression is a python evaluatable expression "
- "where all names are substring-matched against test names "
- "and their parent classes. Example: -k 'test_method or test_"
- "other' matches all test functions and classes whose name "
- "contains 'test_method' or 'test_other', while -k 'not test_method' "
- "matches those that don't contain 'test_method' in their names. "
- "Additionally keywords are matched to classes and functions "
- "containing extra names in their 'extra_keyword_matches' set, "
- "as well as functions which have names assigned directly to them."
- )
-
- group._addoption(
- "-m",
- action="store", dest="markexpr", default="", metavar="MARKEXPR",
- help="only run tests matching given mark expression. "
- "example: -m 'mark1 and not mark2'."
- )
-
- group.addoption(
- "--markers", action="store_true",
- help="show markers (builtin, plugin and per-project ones)."
- )
-
- parser.addini("markers", "markers for test functions", 'linelist')
-
-
-def pytest_cmdline_main(config):
- import _pytest.config
- if config.option.markers:
- config._do_configure()
- tw = _pytest.config.create_terminal_writer(config)
- for line in config.getini("markers"):
- name, rest = line.split(":", 1)
- tw.write("@pytest.mark.%s:" % name, bold=True)
- tw.line(rest)
- tw.line()
- config._ensure_unconfigure()
- return 0
-
-
-pytest_cmdline_main.tryfirst = True
-
-
-def pytest_collection_modifyitems(items, config):
- keywordexpr = config.option.keyword.lstrip()
- matchexpr = config.option.markexpr
- if not keywordexpr and not matchexpr:
- return
- # pytest used to allow "-" for negating
- # but today we just allow "-" at the beginning, use "not" instead
- # we probably remove "-" altogether soon
- if keywordexpr.startswith("-"):
- keywordexpr = "not " + keywordexpr[1:]
- selectuntil = False
- if keywordexpr[-1:] == ":":
- selectuntil = True
- keywordexpr = keywordexpr[:-1]
-
- remaining = []
- deselected = []
- for colitem in items:
- if keywordexpr and not matchkeyword(colitem, keywordexpr):
- deselected.append(colitem)
- else:
- if selectuntil:
- keywordexpr = None
- if matchexpr:
- if not matchmark(colitem, matchexpr):
- deselected.append(colitem)
- continue
- remaining.append(colitem)
-
- if deselected:
- config.hook.pytest_deselected(items=deselected)
- items[:] = remaining
-
-
-class MarkMapping:
- """Provides a local mapping for markers where item access
- resolves to True if the marker is present. """
-
- def __init__(self, keywords):
- mymarks = set()
- for key, value in keywords.items():
- if isinstance(value, MarkInfo) or isinstance(value, MarkDecorator):
- mymarks.add(key)
- self._mymarks = mymarks
-
- def __getitem__(self, name):
- return name in self._mymarks
-
-
-class KeywordMapping:
- """Provides a local mapping for keywords.
- Given a list of names, map any substring of one of these names to True.
- """
-
- def __init__(self, names):
- self._names = names
-
- def __getitem__(self, subname):
- for name in self._names:
- if subname in name:
- return True
- return False
-
-
-def matchmark(colitem, markexpr):
- """Tries to match on any marker names, attached to the given colitem."""
- return eval(markexpr, {}, MarkMapping(colitem.keywords))
-
-
-def matchkeyword(colitem, keywordexpr):
- """Tries to match given keyword expression to given collector item.
-
- Will match on the name of colitem, including the names of its parents.
- Only matches names of items which are either a :class:`Class` or a
- :class:`Function`.
- Additionally, matches on names in the 'extra_keyword_matches' set of
- any item, as well as names directly assigned to test functions.
- """
- mapped_names = set()
-
- # Add the names of the current item and any parent items
- import pytest
- for item in colitem.listchain():
- if not isinstance(item, pytest.Instance):
- mapped_names.add(item.name)
-
- # Add the names added as extra keywords to current or parent items
- for name in colitem.listextrakeywords():
- mapped_names.add(name)
-
- # Add the names attached to the current function through direct assignment
- if hasattr(colitem, 'function'):
- for name in colitem.function.__dict__:
- mapped_names.add(name)
-
- mapping = KeywordMapping(mapped_names)
- if " " not in keywordexpr:
- # special case to allow for simple "-k pass" and "-k 1.3"
- return mapping[keywordexpr]
- elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]:
- return not mapping[keywordexpr[4:]]
- return eval(keywordexpr, {}, mapping)
-
-
-def pytest_configure(config):
- config._old_mark_config = MARK_GEN._config
- if config.option.strict:
- MARK_GEN._config = config
-
-
-def pytest_unconfigure(config):
- MARK_GEN._config = getattr(config, '_old_mark_config', None)
-
-
-class MarkGenerator:
- """ Factory for :class:`MarkDecorator` objects - exposed as
- a ``pytest.mark`` singleton instance. Example::
-
- import pytest
- @pytest.mark.slowtest
- def test_function():
- pass
-
- will set a 'slowtest' :class:`MarkInfo` object
- on the ``test_function`` object. """
- _config = None
-
- def __getattr__(self, name):
- if name[0] == "_":
- raise AttributeError("Marker name must NOT start with underscore")
- if self._config is not None:
- self._check(name)
- return MarkDecorator(Mark(name, (), {}))
-
- def _check(self, name):
- try:
- if name in self._markers:
- return
- except AttributeError:
- pass
- self._markers = values = set()
- for line in self._config.getini("markers"):
- marker, _ = line.split(":", 1)
- marker = marker.rstrip()
- x = marker.split("(", 1)[0]
- values.add(x)
- if name not in self._markers:
- raise AttributeError("%r not a registered marker" % (name,))
-
-
-def istestfunc(func):
- return hasattr(func, "__call__") and \
- getattr(func, "__name__", "<lambda>") != "<lambda>"
-
-
-class MarkDecorator:
- """ A decorator for test functions and test classes. When applied
- it will create :class:`MarkInfo` objects which may be
- :ref:`retrieved by hooks as item keywords <excontrolskip>`.
- MarkDecorator instances are often created like this::
-
- mark1 = pytest.mark.NAME # simple MarkDecorator
- mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator
-
- and can then be applied as decorators to test functions::
-
- @mark2
- def test_function():
- pass
-
- When a MarkDecorator instance is called it does the following:
- 1. If called with a single class as its only positional argument and no
- additional keyword arguments, it attaches itself to the class so it
- gets applied automatically to all test cases found in that class.
- 2. If called with a single function as its only positional argument and
- no additional keyword arguments, it attaches a MarkInfo object to the
- function, containing all the arguments already stored internally in
- the MarkDecorator.
- 3. When called in any other case, it performs a 'fake construction' call,
- i.e. it returns a new MarkDecorator instance with the original
- MarkDecorator's content updated with the arguments passed to this
- call.
-
- Note: The rules above prevent MarkDecorator objects from storing only a
- single function or class reference as their positional argument with no
- additional keyword or positional arguments.
-
- """
-
- def __init__(self, mark):
- assert isinstance(mark, Mark), repr(mark)
- self.mark = mark
-
- name = alias('mark.name')
- args = alias('mark.args')
- kwargs = alias('mark.kwargs')
-
- @property
- def markname(self):
- return self.name # for backward-compat (2.4.1 had this attr)
-
- def __eq__(self, other):
- return self.mark == other.mark if isinstance(other, MarkDecorator) else False
-
- def __repr__(self):
- return "<MarkDecorator %r>" % (self.mark,)
-
- def with_args(self, *args, **kwargs):
- """ return a MarkDecorator with extra arguments added
-
- unlike call this can be used even if the sole argument is a callable/class
-
- :return: MarkDecorator
- """
-
- mark = Mark(self.name, args, kwargs)
- return self.__class__(self.mark.combined_with(mark))
-
- def __call__(self, *args, **kwargs):
- """ if passed a single callable argument: decorate it with mark info.
- otherwise add *args/**kwargs in-place to mark information. """
- if args and not kwargs:
- func = args[0]
- is_class = inspect.isclass(func)
- if len(args) == 1 and (istestfunc(func) or is_class):
- if is_class:
- store_mark(func, self.mark)
- else:
- store_legacy_markinfo(func, self.mark)
- store_mark(func, self.mark)
- return func
- return self.with_args(*args, **kwargs)
-
-
-def get_unpacked_marks(obj):
- """
- obtain the unpacked marks that are stored on a object
- """
- mark_list = getattr(obj, 'pytestmark', [])
-
- if not isinstance(mark_list, list):
- mark_list = [mark_list]
- return [
- getattr(mark, 'mark', mark) # unpack MarkDecorator
- for mark in mark_list
- ]
-
-
-def store_mark(obj, mark):
- """store a Mark on a object
- this is used to implement the Mark declarations/decorators correctly
- """
- assert isinstance(mark, Mark), mark
- # always reassign name to avoid updating pytestmark
- # in a reference that was only borrowed
- obj.pytestmark = get_unpacked_marks(obj) + [mark]
-
-
-def store_legacy_markinfo(func, mark):
- """create the legacy MarkInfo objects and put them onto the function
- """
- if not isinstance(mark, Mark):
- raise TypeError("got {mark!r} instead of a Mark".format(mark=mark))
- holder = getattr(func, mark.name, None)
- if holder is None:
- holder = MarkInfo(mark)
- setattr(func, mark.name, holder)
- else:
- holder.add_mark(mark)
-
-
-class Mark(namedtuple('Mark', 'name, args, kwargs')):
-
- def combined_with(self, other):
- assert self.name == other.name
- return Mark(
- self.name, self.args + other.args,
- dict(self.kwargs, **other.kwargs))
-
-
-class MarkInfo(object):
- """ Marking object created by :class:`MarkDecorator` instances. """
-
- def __init__(self, mark):
- assert isinstance(mark, Mark), repr(mark)
- self.combined = mark
- self._marks = [mark]
-
- name = alias('combined.name')
- args = alias('combined.args')
- kwargs = alias('combined.kwargs')
-
- def __repr__(self):
- return "<MarkInfo {0!r}>".format(self.combined)
-
- def add_mark(self, mark):
- """ add a MarkInfo with the given args and kwargs. """
- self._marks.append(mark)
- self.combined = self.combined.combined_with(mark)
-
- def __iter__(self):
- """ yield MarkInfo objects each relating to a marking-call. """
- return imap(MarkInfo, self._marks)
-
-
-MARK_GEN = MarkGenerator()
-
-
-def _marked(func, mark):
- """ Returns True if :func: is already marked with :mark:, False otherwise.
- This can happen if marker is applied to class and the test file is
- invoked more than once.
- """
- try:
- func_mark = getattr(func, mark.name)
- except AttributeError:
- return False
- return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
-
-
-def transfer_markers(funcobj, cls, mod):
- """
- this function transfers class level markers and module level markers
- into function level markinfo objects
-
- this is the main reason why marks are so broken
- the resolution will involve phasing out function level MarkInfo objects
-
- """
- for obj in (cls, mod):
- for mark in get_unpacked_marks(obj):
- if not _marked(funcobj, mark):
- store_legacy_markinfo(funcobj, mark)
diff --git a/lib/spack/external/pytest-fallback/_pytest/monkeypatch.py b/lib/spack/external/pytest-fallback/_pytest/monkeypatch.py
deleted file mode 100644
index 39ac770135..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/monkeypatch.py
+++ /dev/null
@@ -1,259 +0,0 @@
-""" monkeypatching and mocking functionality. """
-from __future__ import absolute_import, division, print_function
-
-import os
-import sys
-import re
-
-from py.builtin import _basestring
-from _pytest.fixtures import fixture
-
-RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$")
-
-
-@fixture
-def monkeypatch():
- """The returned ``monkeypatch`` fixture provides these
- helper methods to modify objects, dictionaries or os.environ::
-
- monkeypatch.setattr(obj, name, value, raising=True)
- monkeypatch.delattr(obj, name, raising=True)
- monkeypatch.setitem(mapping, name, value)
- monkeypatch.delitem(obj, name, raising=True)
- monkeypatch.setenv(name, value, prepend=False)
- monkeypatch.delenv(name, value, raising=True)
- monkeypatch.syspath_prepend(path)
- monkeypatch.chdir(path)
-
- All modifications will be undone after the requesting
- test function or fixture has finished. The ``raising``
- parameter determines if a KeyError or AttributeError
- will be raised if the set/deletion operation has no target.
- """
- mpatch = MonkeyPatch()
- yield mpatch
- mpatch.undo()
-
-
-def resolve(name):
- # simplified from zope.dottedname
- parts = name.split('.')
-
- used = parts.pop(0)
- found = __import__(used)
- for part in parts:
- used += '.' + part
- try:
- found = getattr(found, part)
- except AttributeError:
- pass
- else:
- continue
- # we use explicit un-nesting of the handling block in order
- # to avoid nested exceptions on python 3
- try:
- __import__(used)
- except ImportError as ex:
- # str is used for py2 vs py3
- expected = str(ex).split()[-1]
- if expected == used:
- raise
- else:
- raise ImportError(
- 'import error in %s: %s' % (used, ex)
- )
- found = annotated_getattr(found, part, used)
- return found
-
-
-def annotated_getattr(obj, name, ann):
- try:
- obj = getattr(obj, name)
- except AttributeError:
- raise AttributeError(
- '%r object at %s has no attribute %r' % (
- type(obj).__name__, ann, name
- )
- )
- return obj
-
-
-def derive_importpath(import_path, raising):
- if not isinstance(import_path, _basestring) or "." not in import_path:
- raise TypeError("must be absolute import path string, not %r" %
- (import_path,))
- module, attr = import_path.rsplit('.', 1)
- target = resolve(module)
- if raising:
- annotated_getattr(target, attr, ann=module)
- return attr, target
-
-
-class Notset:
- def __repr__(self):
- return "<notset>"
-
-
-notset = Notset()
-
-
-class MonkeyPatch:
- """ Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes.
- """
-
- def __init__(self):
- self._setattr = []
- self._setitem = []
- self._cwd = None
- self._savesyspath = None
-
- def setattr(self, target, name, value=notset, raising=True):
- """ Set attribute value on target, memorizing the old value.
- By default raise AttributeError if the attribute did not exist.
-
- For convenience you can specify a string as ``target`` which
- will be interpreted as a dotted import path, with the last part
- being the attribute name. Example:
- ``monkeypatch.setattr("os.getcwd", lambda x: "/")``
- would set the ``getcwd`` function of the ``os`` module.
-
- The ``raising`` value determines if the setattr should fail
- if the attribute is not already present (defaults to True
- which means it will raise).
- """
- __tracebackhide__ = True
- import inspect
-
- if value is notset:
- if not isinstance(target, _basestring):
- raise TypeError("use setattr(target, name, value) or "
- "setattr(target, value) with target being a dotted "
- "import string")
- value = name
- name, target = derive_importpath(target, raising)
-
- oldval = getattr(target, name, notset)
- if raising and oldval is notset:
- raise AttributeError("%r has no attribute %r" % (target, name))
-
- # avoid class descriptors like staticmethod/classmethod
- if inspect.isclass(target):
- oldval = target.__dict__.get(name, notset)
- self._setattr.append((target, name, oldval))
- setattr(target, name, value)
-
- def delattr(self, target, name=notset, raising=True):
- """ Delete attribute ``name`` from ``target``, by default raise
- AttributeError it the attribute did not previously exist.
-
- If no ``name`` is specified and ``target`` is a string
- it will be interpreted as a dotted import path with the
- last part being the attribute name.
-
- If ``raising`` is set to False, no exception will be raised if the
- attribute is missing.
- """
- __tracebackhide__ = True
- if name is notset:
- if not isinstance(target, _basestring):
- raise TypeError("use delattr(target, name) or "
- "delattr(target) with target being a dotted "
- "import string")
- name, target = derive_importpath(target, raising)
-
- if not hasattr(target, name):
- if raising:
- raise AttributeError(name)
- else:
- self._setattr.append((target, name, getattr(target, name, notset)))
- delattr(target, name)
-
- def setitem(self, dic, name, value):
- """ Set dictionary entry ``name`` to value. """
- self._setitem.append((dic, name, dic.get(name, notset)))
- dic[name] = value
-
- def delitem(self, dic, name, raising=True):
- """ Delete ``name`` from dict. Raise KeyError if it doesn't exist.
-
- If ``raising`` is set to False, no exception will be raised if the
- key is missing.
- """
- if name not in dic:
- if raising:
- raise KeyError(name)
- else:
- self._setitem.append((dic, name, dic.get(name, notset)))
- del dic[name]
-
- def setenv(self, name, value, prepend=None):
- """ Set environment variable ``name`` to ``value``. If ``prepend``
- is a character, read the current environment variable value
- and prepend the ``value`` adjoined with the ``prepend`` character."""
- value = str(value)
- if prepend and name in os.environ:
- value = value + prepend + os.environ[name]
- self.setitem(os.environ, name, value)
-
- def delenv(self, name, raising=True):
- """ Delete ``name`` from the environment. Raise KeyError it does not
- exist.
-
- If ``raising`` is set to False, no exception will be raised if the
- environment variable is missing.
- """
- self.delitem(os.environ, name, raising=raising)
-
- def syspath_prepend(self, path):
- """ Prepend ``path`` to ``sys.path`` list of import locations. """
- if self._savesyspath is None:
- self._savesyspath = sys.path[:]
- sys.path.insert(0, str(path))
-
- def chdir(self, path):
- """ Change the current working directory to the specified path.
- Path can be a string or a py.path.local object.
- """
- if self._cwd is None:
- self._cwd = os.getcwd()
- if hasattr(path, "chdir"):
- path.chdir()
- else:
- os.chdir(path)
-
- def undo(self):
- """ Undo previous changes. This call consumes the
- undo stack. Calling it a second time has no effect unless
- you do more monkeypatching after the undo call.
-
- There is generally no need to call `undo()`, since it is
- called automatically during tear-down.
-
- Note that the same `monkeypatch` fixture is used across a
- single test function invocation. If `monkeypatch` is used both by
- the test function itself and one of the test fixtures,
- calling `undo()` will undo all of the changes made in
- both functions.
- """
- for obj, name, value in reversed(self._setattr):
- if value is not notset:
- setattr(obj, name, value)
- else:
- delattr(obj, name)
- self._setattr[:] = []
- for dictionary, name, value in reversed(self._setitem):
- if value is notset:
- try:
- del dictionary[name]
- except KeyError:
- pass # was already deleted, so we have the desired state
- else:
- dictionary[name] = value
- self._setitem[:] = []
- if self._savesyspath is not None:
- sys.path[:] = self._savesyspath
- self._savesyspath = None
-
- if self._cwd is not None:
- os.chdir(self._cwd)
- self._cwd = None
diff --git a/lib/spack/external/pytest-fallback/_pytest/nodes.py b/lib/spack/external/pytest-fallback/_pytest/nodes.py
deleted file mode 100644
index ad3af2ce67..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/nodes.py
+++ /dev/null
@@ -1,37 +0,0 @@
-SEP = "/"
-
-
-def _splitnode(nodeid):
- """Split a nodeid into constituent 'parts'.
-
- Node IDs are strings, and can be things like:
- ''
- 'testing/code'
- 'testing/code/test_excinfo.py'
- 'testing/code/test_excinfo.py::TestFormattedExcinfo::()'
-
- Return values are lists e.g.
- []
- ['testing', 'code']
- ['testing', 'code', 'test_excinfo.py']
- ['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo', '()']
- """
- if nodeid == '':
- # If there is no root node at all, return an empty list so the caller's logic can remain sane
- return []
- parts = nodeid.split(SEP)
- # Replace single last element 'test_foo.py::Bar::()' with multiple elements 'test_foo.py', 'Bar', '()'
- parts[-1:] = parts[-1].split("::")
- return parts
-
-
-def ischildnode(baseid, nodeid):
- """Return True if the nodeid is a child node of the baseid.
-
- E.g. 'foo/bar::Baz::()' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz', but not of 'foo/blorp'
- """
- base_parts = _splitnode(baseid)
- node_parts = _splitnode(nodeid)
- if len(node_parts) < len(base_parts):
- return False
- return node_parts[:len(base_parts)] == base_parts
diff --git a/lib/spack/external/pytest-fallback/_pytest/nose.py b/lib/spack/external/pytest-fallback/_pytest/nose.py
deleted file mode 100644
index d246c5603d..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/nose.py
+++ /dev/null
@@ -1,73 +0,0 @@
-""" run test suites written for nose. """
-from __future__ import absolute_import, division, print_function
-
-import sys
-
-import py
-from _pytest import unittest, runner, python
-from _pytest.config import hookimpl
-
-
-def get_skip_exceptions():
- skip_classes = set()
- for module_name in ('unittest', 'unittest2', 'nose'):
- mod = sys.modules.get(module_name)
- if hasattr(mod, 'SkipTest'):
- skip_classes.add(mod.SkipTest)
- return tuple(skip_classes)
-
-
-def pytest_runtest_makereport(item, call):
- if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()):
- # let's substitute the excinfo with a pytest.skip one
- call2 = call.__class__(
- lambda: runner.skip(str(call.excinfo.value)), call.when)
- call.excinfo = call2.excinfo
-
-
-@hookimpl(trylast=True)
-def pytest_runtest_setup(item):
- if is_potential_nosetest(item):
- if isinstance(item.parent, python.Generator):
- gen = item.parent
- if not hasattr(gen, '_nosegensetup'):
- call_optional(gen.obj, 'setup')
- if isinstance(gen.parent, python.Instance):
- call_optional(gen.parent.obj, 'setup')
- gen._nosegensetup = True
- if not call_optional(item.obj, 'setup'):
- # call module level setup if there is no object level one
- call_optional(item.parent.obj, 'setup')
- # XXX this implies we only call teardown when setup worked
- item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item)
-
-
-def teardown_nose(item):
- if is_potential_nosetest(item):
- if not call_optional(item.obj, 'teardown'):
- call_optional(item.parent.obj, 'teardown')
- # if hasattr(item.parent, '_nosegensetup'):
- # #call_optional(item._nosegensetup, 'teardown')
- # del item.parent._nosegensetup
-
-
-def pytest_make_collect_report(collector):
- if isinstance(collector, python.Generator):
- call_optional(collector.obj, 'setup')
-
-
-def is_potential_nosetest(item):
- # extra check needed since we do not do nose style setup/teardown
- # on direct unittest style classes
- return isinstance(item, python.Function) and \
- not isinstance(item, unittest.TestCaseFunction)
-
-
-def call_optional(obj, name):
- method = getattr(obj, name, None)
- isfixture = hasattr(method, "_pytestfixturefunction")
- if method is not None and not isfixture and py.builtin.callable(method):
- # If there's any problems allow the exception to raise rather than
- # silently ignoring them
- method()
- return True
diff --git a/lib/spack/external/pytest-fallback/_pytest/outcomes.py b/lib/spack/external/pytest-fallback/_pytest/outcomes.py
deleted file mode 100644
index ff5ef756d9..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/outcomes.py
+++ /dev/null
@@ -1,140 +0,0 @@
-"""
-exception classes and constants handling test outcomes
-as well as functions creating them
-"""
-from __future__ import absolute_import, division, print_function
-import py
-import sys
-
-
-class OutcomeException(BaseException):
- """ OutcomeException and its subclass instances indicate and
- contain info about test and collection outcomes.
- """
- def __init__(self, msg=None, pytrace=True):
- BaseException.__init__(self, msg)
- self.msg = msg
- self.pytrace = pytrace
-
- def __repr__(self):
- if self.msg:
- val = self.msg
- if isinstance(val, bytes):
- val = py._builtin._totext(val, errors='replace')
- return val
- return "<%s instance>" % (self.__class__.__name__,)
- __str__ = __repr__
-
-
-TEST_OUTCOME = (OutcomeException, Exception)
-
-
-class Skipped(OutcomeException):
- # XXX hackish: on 3k we fake to live in the builtins
- # in order to have Skipped exception printing shorter/nicer
- __module__ = 'builtins'
-
- def __init__(self, msg=None, pytrace=True, allow_module_level=False):
- OutcomeException.__init__(self, msg=msg, pytrace=pytrace)
- self.allow_module_level = allow_module_level
-
-
-class Failed(OutcomeException):
- """ raised from an explicit call to pytest.fail() """
- __module__ = 'builtins'
-
-
-class Exit(KeyboardInterrupt):
- """ raised for immediate program exits (no tracebacks/summaries)"""
- def __init__(self, msg="unknown reason"):
- self.msg = msg
- KeyboardInterrupt.__init__(self, msg)
-
-# exposed helper methods
-
-
-def exit(msg):
- """ exit testing process as if KeyboardInterrupt was triggered. """
- __tracebackhide__ = True
- raise Exit(msg)
-
-
-exit.Exception = Exit
-
-
-def skip(msg=""):
- """ skip an executing test with the given message. Note: it's usually
- better to use the pytest.mark.skipif marker to declare a test to be
- skipped under certain conditions like mismatching platforms or
- dependencies. See the pytest_skipping plugin for details.
- """
- __tracebackhide__ = True
- raise Skipped(msg=msg)
-
-
-skip.Exception = Skipped
-
-
-def fail(msg="", pytrace=True):
- """ explicitly fail an currently-executing test with the given Message.
-
- :arg pytrace: if false the msg represents the full failure information
- and no python traceback will be reported.
- """
- __tracebackhide__ = True
- raise Failed(msg=msg, pytrace=pytrace)
-
-
-fail.Exception = Failed
-
-
-class XFailed(fail.Exception):
- """ raised from an explicit call to pytest.xfail() """
-
-
-def xfail(reason=""):
- """ xfail an executing test or setup functions with the given reason."""
- __tracebackhide__ = True
- raise XFailed(reason)
-
-
-xfail.Exception = XFailed
-
-
-def importorskip(modname, minversion=None):
- """ return imported module if it has at least "minversion" as its
- __version__ attribute. If no minversion is specified the a skip
- is only triggered if the module can not be imported.
- """
- import warnings
- __tracebackhide__ = True
- compile(modname, '', 'eval') # to catch syntaxerrors
- should_skip = False
-
- with warnings.catch_warnings():
- # make sure to ignore ImportWarnings that might happen because
- # of existing directories with the same name we're trying to
- # import but without a __init__.py file
- warnings.simplefilter('ignore')
- try:
- __import__(modname)
- except ImportError:
- # Do not raise chained exception here(#1485)
- should_skip = True
- if should_skip:
- raise Skipped("could not import %r" % (modname,), allow_module_level=True)
- mod = sys.modules[modname]
- if minversion is None:
- return mod
- verattr = getattr(mod, '__version__', None)
- if minversion is not None:
- try:
- from pkg_resources import parse_version as pv
- except ImportError:
- raise Skipped("we have a required version for %r but can not import "
- "pkg_resources to parse version strings." % (modname,),
- allow_module_level=True)
- if verattr is None or pv(verattr) < pv(minversion):
- raise Skipped("module %r has __version__ %r, required is: %r" % (
- modname, verattr, minversion), allow_module_level=True)
- return mod
diff --git a/lib/spack/external/pytest-fallback/_pytest/pastebin.py b/lib/spack/external/pytest-fallback/_pytest/pastebin.py
deleted file mode 100644
index 9d689819f0..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/pastebin.py
+++ /dev/null
@@ -1,100 +0,0 @@
-""" submit failure or test session information to a pastebin service. """
-from __future__ import absolute_import, division, print_function
-
-import pytest
-import sys
-import tempfile
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("terminal reporting")
- group._addoption('--pastebin', metavar="mode",
- action='store', dest="pastebin", default=None,
- choices=['failed', 'all'],
- help="send failed|all info to bpaste.net pastebin service.")
-
-
-@pytest.hookimpl(trylast=True)
-def pytest_configure(config):
- import py
- if config.option.pastebin == "all":
- tr = config.pluginmanager.getplugin('terminalreporter')
- # if no terminal reporter plugin is present, nothing we can do here;
- # this can happen when this function executes in a slave node
- # when using pytest-xdist, for example
- if tr is not None:
- # pastebin file will be utf-8 encoded binary file
- config._pastebinfile = tempfile.TemporaryFile('w+b')
- oldwrite = tr._tw.write
-
- def tee_write(s, **kwargs):
- oldwrite(s, **kwargs)
- if py.builtin._istext(s):
- s = s.encode('utf-8')
- config._pastebinfile.write(s)
-
- tr._tw.write = tee_write
-
-
-def pytest_unconfigure(config):
- if hasattr(config, '_pastebinfile'):
- # get terminal contents and delete file
- config._pastebinfile.seek(0)
- sessionlog = config._pastebinfile.read()
- config._pastebinfile.close()
- del config._pastebinfile
- # undo our patching in the terminal reporter
- tr = config.pluginmanager.getplugin('terminalreporter')
- del tr._tw.__dict__['write']
- # write summary
- tr.write_sep("=", "Sending information to Paste Service")
- pastebinurl = create_new_paste(sessionlog)
- tr.write_line("pastebin session-log: %s\n" % pastebinurl)
-
-
-def create_new_paste(contents):
- """
- Creates a new paste using bpaste.net service.
-
- :contents: paste contents as utf-8 encoded bytes
- :returns: url to the pasted contents
- """
- import re
- if sys.version_info < (3, 0):
- from urllib import urlopen, urlencode
- else:
- from urllib.request import urlopen
- from urllib.parse import urlencode
-
- params = {
- 'code': contents,
- 'lexer': 'python3' if sys.version_info[0] == 3 else 'python',
- 'expiry': '1week',
- }
- url = 'https://bpaste.net'
- response = urlopen(url, data=urlencode(params).encode('ascii')).read()
- m = re.search(r'href="/raw/(\w+)"', response.decode('utf-8'))
- if m:
- return '%s/show/%s' % (url, m.group(1))
- else:
- return 'bad response: ' + response
-
-
-def pytest_terminal_summary(terminalreporter):
- import _pytest.config
- if terminalreporter.config.option.pastebin != "failed":
- return
- tr = terminalreporter
- if 'failed' in tr.stats:
- terminalreporter.write_sep("=", "Sending information to Paste Service")
- for rep in terminalreporter.stats.get('failed'):
- try:
- msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
- except AttributeError:
- msg = tr._getfailureheadline(rep)
- tw = _pytest.config.create_terminal_writer(terminalreporter.config, stringio=True)
- rep.toterminal(tw)
- s = tw.stringio.getvalue()
- assert len(s)
- pastebinurl = create_new_paste(s)
- tr.write_line("%s --> %s" % (msg, pastebinurl))
diff --git a/lib/spack/external/pytest-fallback/_pytest/pytester.py b/lib/spack/external/pytest-fallback/_pytest/pytester.py
deleted file mode 100644
index 82aa00e0d2..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/pytester.py
+++ /dev/null
@@ -1,1167 +0,0 @@
-""" (disabled by default) support for testing pytest and pytest plugins. """
-from __future__ import absolute_import, division, print_function
-
-import codecs
-import gc
-import os
-import platform
-import re
-import subprocess
-import sys
-import time
-import traceback
-from fnmatch import fnmatch
-
-from weakref import WeakKeyDictionary
-
-from _pytest.capture import MultiCapture, SysCapture
-from _pytest._code import Source
-import py
-import pytest
-from _pytest.main import Session, EXIT_OK
-from _pytest.assertion.rewrite import AssertionRewritingHook
-
-
-def pytest_addoption(parser):
- # group = parser.getgroup("pytester", "pytester (self-tests) options")
- parser.addoption('--lsof',
- action="store_true", dest="lsof", default=False,
- help=("run FD checks if lsof is available"))
-
- parser.addoption('--runpytest', default="inprocess", dest="runpytest",
- choices=("inprocess", "subprocess", ),
- help=("run pytest sub runs in tests using an 'inprocess' "
- "or 'subprocess' (python -m main) method"))
-
-
-def pytest_configure(config):
- # This might be called multiple times. Only take the first.
- global _pytest_fullpath
- try:
- _pytest_fullpath
- except NameError:
- _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
- _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
-
- if config.getvalue("lsof"):
- checker = LsofFdLeakChecker()
- if checker.matching_platform():
- config.pluginmanager.register(checker)
-
-
-class LsofFdLeakChecker(object):
- def get_open_files(self):
- out = self._exec_lsof()
- open_files = self._parse_lsof_output(out)
- return open_files
-
- def _exec_lsof(self):
- pid = os.getpid()
- return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
-
- def _parse_lsof_output(self, out):
- def isopen(line):
- return line.startswith('f') and ("deleted" not in line and
- 'mem' not in line and "txt" not in line and 'cwd' not in line)
-
- open_files = []
-
- for line in out.split("\n"):
- if isopen(line):
- fields = line.split('\0')
- fd = fields[0][1:]
- filename = fields[1][1:]
- if filename.startswith('/'):
- open_files.append((fd, filename))
-
- return open_files
-
- def matching_platform(self):
- try:
- py.process.cmdexec("lsof -v")
- except (py.process.cmdexec.Error, UnicodeDecodeError):
- # cmdexec may raise UnicodeDecodeError on Windows systems
- # with locale other than english:
- # https://bitbucket.org/pytest-dev/py/issues/66
- return False
- else:
- return True
-
- @pytest.hookimpl(hookwrapper=True, tryfirst=True)
- def pytest_runtest_protocol(self, item):
- lines1 = self.get_open_files()
- yield
- if hasattr(sys, "pypy_version_info"):
- gc.collect()
- lines2 = self.get_open_files()
-
- new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
- leaked_files = [t for t in lines2 if t[0] in new_fds]
- if leaked_files:
- error = []
- error.append("***** %s FD leakage detected" % len(leaked_files))
- error.extend([str(f) for f in leaked_files])
- error.append("*** Before:")
- error.extend([str(f) for f in lines1])
- error.append("*** After:")
- error.extend([str(f) for f in lines2])
- error.append(error[0])
- error.append("*** function %s:%s: %s " % item.location)
- error.append("See issue #2366")
- item.warn('', "\n".join(error))
-
-
-# XXX copied from execnet's conftest.py - needs to be merged
-winpymap = {
- 'python2.7': r'C:\Python27\python.exe',
- 'python2.6': r'C:\Python26\python.exe',
- 'python3.1': r'C:\Python31\python.exe',
- 'python3.2': r'C:\Python32\python.exe',
- 'python3.3': r'C:\Python33\python.exe',
- 'python3.4': r'C:\Python34\python.exe',
- 'python3.5': r'C:\Python35\python.exe',
-}
-
-
-def getexecutable(name, cache={}):
- try:
- return cache[name]
- except KeyError:
- executable = py.path.local.sysfind(name)
- if executable:
- import subprocess
- popen = subprocess.Popen([str(executable), "--version"],
- universal_newlines=True, stderr=subprocess.PIPE)
- out, err = popen.communicate()
- if name == "jython":
- if not err or "2.5" not in err:
- executable = None
- if "2.5.2" in err:
- executable = None # http://bugs.jython.org/issue1790
- elif popen.returncode != 0:
- # Handle pyenv's 127.
- executable = None
- cache[name] = executable
- return executable
-
-
-@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
- 'pypy', 'pypy3'])
-def anypython(request):
- name = request.param
- executable = getexecutable(name)
- if executable is None:
- if sys.platform == "win32":
- executable = winpymap.get(name, None)
- if executable:
- executable = py.path.local(executable)
- if executable.check():
- return executable
- pytest.skip("no suitable %s found" % (name,))
- return executable
-
-# used at least by pytest-xdist plugin
-
-
-@pytest.fixture
-def _pytest(request):
- """ Return a helper which offers a gethookrecorder(hook)
- method which returns a HookRecorder instance which helps
- to make assertions about called hooks.
- """
- return PytestArg(request)
-
-
-class PytestArg:
- def __init__(self, request):
- self.request = request
-
- def gethookrecorder(self, hook):
- hookrecorder = HookRecorder(hook._pm)
- self.request.addfinalizer(hookrecorder.finish_recording)
- return hookrecorder
-
-
-def get_public_names(values):
- """Only return names from iterator values without a leading underscore."""
- return [x for x in values if x[0] != "_"]
-
-
-class ParsedCall:
- def __init__(self, name, kwargs):
- self.__dict__.update(kwargs)
- self._name = name
-
- def __repr__(self):
- d = self.__dict__.copy()
- del d['_name']
- return "<ParsedCall %r(**%r)>" % (self._name, d)
-
-
-class HookRecorder:
- """Record all hooks called in a plugin manager.
-
- This wraps all the hook calls in the plugin manager, recording
- each call before propagating the normal calls.
-
- """
-
- def __init__(self, pluginmanager):
- self._pluginmanager = pluginmanager
- self.calls = []
-
- def before(hook_name, hook_impls, kwargs):
- self.calls.append(ParsedCall(hook_name, kwargs))
-
- def after(outcome, hook_name, hook_impls, kwargs):
- pass
-
- self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
-
- def finish_recording(self):
- self._undo_wrapping()
-
- def getcalls(self, names):
- if isinstance(names, str):
- names = names.split()
- return [call for call in self.calls if call._name in names]
-
- def assert_contains(self, entries):
- __tracebackhide__ = True
- i = 0
- entries = list(entries)
- backlocals = sys._getframe(1).f_locals
- while entries:
- name, check = entries.pop(0)
- for ind, call in enumerate(self.calls[i:]):
- if call._name == name:
- print("NAMEMATCH", name, call)
- if eval(check, backlocals, call.__dict__):
- print("CHECKERMATCH", repr(check), "->", call)
- else:
- print("NOCHECKERMATCH", repr(check), "-", call)
- continue
- i += ind + 1
- break
- print("NONAMEMATCH", name, "with", call)
- else:
- pytest.fail("could not find %r check %r" % (name, check))
-
- def popcall(self, name):
- __tracebackhide__ = True
- for i, call in enumerate(self.calls):
- if call._name == name:
- del self.calls[i]
- return call
- lines = ["could not find call %r, in:" % (name,)]
- lines.extend([" %s" % str(x) for x in self.calls])
- pytest.fail("\n".join(lines))
-
- def getcall(self, name):
- values = self.getcalls(name)
- assert len(values) == 1, (name, values)
- return values[0]
-
- # functionality for test reports
-
- def getreports(self,
- names="pytest_runtest_logreport pytest_collectreport"):
- return [x.report for x in self.getcalls(names)]
-
- def matchreport(self, inamepart="",
- names="pytest_runtest_logreport pytest_collectreport", when=None):
- """ return a testreport whose dotted import path matches """
- values = []
- for rep in self.getreports(names=names):
- try:
- if not when and rep.when != "call" and rep.passed:
- # setup/teardown passing reports - let's ignore those
- continue
- except AttributeError:
- pass
- if when and getattr(rep, 'when', None) != when:
- continue
- if not inamepart or inamepart in rep.nodeid.split("::"):
- values.append(rep)
- if not values:
- raise ValueError("could not find test report matching %r: "
- "no test reports at all!" % (inamepart,))
- if len(values) > 1:
- raise ValueError(
- "found 2 or more testreports matching %r: %s" % (inamepart, values))
- return values[0]
-
- def getfailures(self,
- names='pytest_runtest_logreport pytest_collectreport'):
- return [rep for rep in self.getreports(names) if rep.failed]
-
- def getfailedcollections(self):
- return self.getfailures('pytest_collectreport')
-
- def listoutcomes(self):
- passed = []
- skipped = []
- failed = []
- for rep in self.getreports(
- "pytest_collectreport pytest_runtest_logreport"):
- if rep.passed:
- if getattr(rep, "when", None) == "call":
- passed.append(rep)
- elif rep.skipped:
- skipped.append(rep)
- elif rep.failed:
- failed.append(rep)
- return passed, skipped, failed
-
- def countoutcomes(self):
- return [len(x) for x in self.listoutcomes()]
-
- def assertoutcome(self, passed=0, skipped=0, failed=0):
- realpassed, realskipped, realfailed = self.listoutcomes()
- assert passed == len(realpassed)
- assert skipped == len(realskipped)
- assert failed == len(realfailed)
-
- def clear(self):
- self.calls[:] = []
-
-
-@pytest.fixture
-def linecomp(request):
- return LineComp()
-
-
-@pytest.fixture(name='LineMatcher')
-def LineMatcher_fixture(request):
- return LineMatcher
-
-
-@pytest.fixture
-def testdir(request, tmpdir_factory):
- return Testdir(request, tmpdir_factory)
-
-
-rex_outcome = re.compile(r"(\d+) ([\w-]+)")
-
-
-class RunResult:
- """The result of running a command.
-
- Attributes:
-
- :ret: The return value.
- :outlines: List of lines captured from stdout.
- :errlines: List of lines captures from stderr.
- :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
- reconstruct stdout or the commonly used
- ``stdout.fnmatch_lines()`` method.
- :stderrr: :py:class:`LineMatcher` of stderr.
- :duration: Duration in seconds.
-
- """
-
- def __init__(self, ret, outlines, errlines, duration):
- self.ret = ret
- self.outlines = outlines
- self.errlines = errlines
- self.stdout = LineMatcher(outlines)
- self.stderr = LineMatcher(errlines)
- self.duration = duration
-
- def parseoutcomes(self):
- """ Return a dictionary of outcomestring->num from parsing
- the terminal output that the test process produced."""
- for line in reversed(self.outlines):
- if 'seconds' in line:
- outcomes = rex_outcome.findall(line)
- if outcomes:
- d = {}
- for num, cat in outcomes:
- d[cat] = int(num)
- return d
- raise ValueError("Pytest terminal report not found")
-
- def assert_outcomes(self, passed=0, skipped=0, failed=0, error=0):
- """ assert that the specified outcomes appear with the respective
- numbers (0 means it didn't occur) in the text output from a test run."""
- d = self.parseoutcomes()
- obtained = {
- 'passed': d.get('passed', 0),
- 'skipped': d.get('skipped', 0),
- 'failed': d.get('failed', 0),
- 'error': d.get('error', 0),
- }
- assert obtained == dict(passed=passed, skipped=skipped, failed=failed, error=error)
-
-
-class Testdir:
- """Temporary test directory with tools to test/run pytest itself.
-
- This is based on the ``tmpdir`` fixture but provides a number of
- methods which aid with testing pytest itself. Unless
- :py:meth:`chdir` is used all methods will use :py:attr:`tmpdir` as
- current working directory.
-
- Attributes:
-
- :tmpdir: The :py:class:`py.path.local` instance of the temporary
- directory.
-
- :plugins: A list of plugins to use with :py:meth:`parseconfig` and
- :py:meth:`runpytest`. Initially this is an empty list but
- plugins can be added to the list. The type of items to add to
- the list depend on the method which uses them so refer to them
- for details.
-
- """
-
- def __init__(self, request, tmpdir_factory):
- self.request = request
- self._mod_collections = WeakKeyDictionary()
- # XXX remove duplication with tmpdir plugin
- basetmp = tmpdir_factory.ensuretemp("testdir")
- name = request.function.__name__
- for i in range(100):
- try:
- tmpdir = basetmp.mkdir(name + str(i))
- except py.error.EEXIST:
- continue
- break
- self.tmpdir = tmpdir
- self.plugins = []
- self._savesyspath = (list(sys.path), list(sys.meta_path))
- self._savemodulekeys = set(sys.modules)
- self.chdir() # always chdir
- self.request.addfinalizer(self.finalize)
- method = self.request.config.getoption("--runpytest")
- if method == "inprocess":
- self._runpytest_method = self.runpytest_inprocess
- elif method == "subprocess":
- self._runpytest_method = self.runpytest_subprocess
-
- def __repr__(self):
- return "<Testdir %r>" % (self.tmpdir,)
-
- def finalize(self):
- """Clean up global state artifacts.
-
- Some methods modify the global interpreter state and this
- tries to clean this up. It does not remove the temporary
- directory however so it can be looked at after the test run
- has finished.
-
- """
- sys.path[:], sys.meta_path[:] = self._savesyspath
- if hasattr(self, '_olddir'):
- self._olddir.chdir()
- self.delete_loaded_modules()
-
- def delete_loaded_modules(self):
- """Delete modules that have been loaded during a test.
-
- This allows the interpreter to catch module changes in case
- the module is re-imported.
- """
- for name in set(sys.modules).difference(self._savemodulekeys):
- # some zope modules used by twisted-related tests keeps internal
- # state and can't be deleted; we had some trouble in the past
- # with zope.interface for example
- if not name.startswith("zope"):
- del sys.modules[name]
-
- def make_hook_recorder(self, pluginmanager):
- """Create a new :py:class:`HookRecorder` for a PluginManager."""
- assert not hasattr(pluginmanager, "reprec")
- pluginmanager.reprec = reprec = HookRecorder(pluginmanager)
- self.request.addfinalizer(reprec.finish_recording)
- return reprec
-
- def chdir(self):
- """Cd into the temporary directory.
-
- This is done automatically upon instantiation.
-
- """
- old = self.tmpdir.chdir()
- if not hasattr(self, '_olddir'):
- self._olddir = old
-
- def _makefile(self, ext, args, kwargs, encoding="utf-8"):
- items = list(kwargs.items())
- if args:
- source = py.builtin._totext("\n").join(
- map(py.builtin._totext, args)) + py.builtin._totext("\n")
- basename = self.request.function.__name__
- items.insert(0, (basename, source))
- ret = None
- for name, value in items:
- p = self.tmpdir.join(name).new(ext=ext)
- p.dirpath().ensure_dir()
- source = Source(value)
-
- def my_totext(s, encoding="utf-8"):
- if py.builtin._isbytes(s):
- s = py.builtin._totext(s, encoding=encoding)
- return s
-
- source_unicode = "\n".join([my_totext(line) for line in source.lines])
- source = py.builtin._totext(source_unicode)
- content = source.strip().encode(encoding) # + "\n"
- # content = content.rstrip() + "\n"
- p.write(content, "wb")
- if ret is None:
- ret = p
- return ret
-
- def makefile(self, ext, *args, **kwargs):
- """Create a new file in the testdir.
-
- ext: The extension the file should use, including the dot.
- E.g. ".py".
-
- args: All args will be treated as strings and joined using
- newlines. The result will be written as contents to the
- file. The name of the file will be based on the test
- function requesting this fixture.
- E.g. "testdir.makefile('.txt', 'line1', 'line2')"
-
- kwargs: Each keyword is the name of a file, while the value of
- it will be written as contents of the file.
- E.g. "testdir.makefile('.ini', pytest='[pytest]\naddopts=-rs\n')"
-
- """
- return self._makefile(ext, args, kwargs)
-
- def makeconftest(self, source):
- """Write a contest.py file with 'source' as contents."""
- return self.makepyfile(conftest=source)
-
- def makeini(self, source):
- """Write a tox.ini file with 'source' as contents."""
- return self.makefile('.ini', tox=source)
-
- def getinicfg(self, source):
- """Return the pytest section from the tox.ini config file."""
- p = self.makeini(source)
- return py.iniconfig.IniConfig(p)['pytest']
-
- def makepyfile(self, *args, **kwargs):
- """Shortcut for .makefile() with a .py extension."""
- return self._makefile('.py', args, kwargs)
-
- def maketxtfile(self, *args, **kwargs):
- """Shortcut for .makefile() with a .txt extension."""
- return self._makefile('.txt', args, kwargs)
-
- def syspathinsert(self, path=None):
- """Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.
-
- This is undone automatically after the test.
- """
- if path is None:
- path = self.tmpdir
- sys.path.insert(0, str(path))
- # a call to syspathinsert() usually means that the caller
- # wants to import some dynamically created files.
- # with python3 we thus invalidate import caches.
- self._possibly_invalidate_import_caches()
-
- def _possibly_invalidate_import_caches(self):
- # invalidate caches if we can (py33 and above)
- try:
- import importlib
- except ImportError:
- pass
- else:
- if hasattr(importlib, "invalidate_caches"):
- importlib.invalidate_caches()
-
- def mkdir(self, name):
- """Create a new (sub)directory."""
- return self.tmpdir.mkdir(name)
-
- def mkpydir(self, name):
- """Create a new python package.
-
- This creates a (sub)directory with an empty ``__init__.py``
- file so that is recognised as a python package.
-
- """
- p = self.mkdir(name)
- p.ensure("__init__.py")
- return p
-
- Session = Session
-
- def getnode(self, config, arg):
- """Return the collection node of a file.
-
- :param config: :py:class:`_pytest.config.Config` instance, see
- :py:meth:`parseconfig` and :py:meth:`parseconfigure` to
- create the configuration.
-
- :param arg: A :py:class:`py.path.local` instance of the file.
-
- """
- session = Session(config)
- assert '::' not in str(arg)
- p = py.path.local(arg)
- config.hook.pytest_sessionstart(session=session)
- res = session.perform_collect([str(p)], genitems=False)[0]
- config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
- return res
-
- def getpathnode(self, path):
- """Return the collection node of a file.
-
- This is like :py:meth:`getnode` but uses
- :py:meth:`parseconfigure` to create the (configured) pytest
- Config instance.
-
- :param path: A :py:class:`py.path.local` instance of the file.
-
- """
- config = self.parseconfigure(path)
- session = Session(config)
- x = session.fspath.bestrelpath(path)
- config.hook.pytest_sessionstart(session=session)
- res = session.perform_collect([x], genitems=False)[0]
- config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
- return res
-
- def genitems(self, colitems):
- """Generate all test items from a collection node.
-
- This recurses into the collection node and returns a list of
- all the test items contained within.
-
- """
- session = colitems[0].session
- result = []
- for colitem in colitems:
- result.extend(session.genitems(colitem))
- return result
-
- def runitem(self, source):
- """Run the "test_func" Item.
-
- The calling test instance (the class which contains the test
- method) must provide a ``.getrunner()`` method which should
- return a runner which can run the test protocol for a single
- item, like e.g. :py:func:`_pytest.runner.runtestprotocol`.
-
- """
- # used from runner functional tests
- item = self.getitem(source)
- # the test class where we are called from wants to provide the runner
- testclassinstance = self.request.instance
- runner = testclassinstance.getrunner()
- return runner(item)
-
- def inline_runsource(self, source, *cmdlineargs):
- """Run a test module in process using ``pytest.main()``.
-
- This run writes "source" into a temporary file and runs
- ``pytest.main()`` on it, returning a :py:class:`HookRecorder`
- instance for the result.
-
- :param source: The source code of the test module.
-
- :param cmdlineargs: Any extra command line arguments to use.
-
- :return: :py:class:`HookRecorder` instance of the result.
-
- """
- p = self.makepyfile(source)
- values = list(cmdlineargs) + [p]
- return self.inline_run(*values)
-
- def inline_genitems(self, *args):
- """Run ``pytest.main(['--collectonly'])`` in-process.
-
- Returns a tuple of the collected items and a
- :py:class:`HookRecorder` instance.
-
- This runs the :py:func:`pytest.main` function to run all of
- pytest inside the test process itself like
- :py:meth:`inline_run`. However the return value is a tuple of
- the collection items and a :py:class:`HookRecorder` instance.
-
- """
- rec = self.inline_run("--collect-only", *args)
- items = [x.item for x in rec.getcalls("pytest_itemcollected")]
- return items, rec
-
- def inline_run(self, *args, **kwargs):
- """Run ``pytest.main()`` in-process, returning a HookRecorder.
-
- This runs the :py:func:`pytest.main` function to run all of
- pytest inside the test process itself. This means it can
- return a :py:class:`HookRecorder` instance which gives more
- detailed results from then run then can be done by matching
- stdout/stderr from :py:meth:`runpytest`.
-
- :param args: Any command line arguments to pass to
- :py:func:`pytest.main`.
-
- :param plugin: (keyword-only) Extra plugin instances the
- ``pytest.main()`` instance should use.
-
- :return: A :py:class:`HookRecorder` instance.
- """
- # When running py.test inline any plugins active in the main
- # test process are already imported. So this disables the
- # warning which will trigger to say they can no longer be
- # re-written, which is fine as they are already re-written.
- orig_warn = AssertionRewritingHook._warn_already_imported
-
- def revert():
- AssertionRewritingHook._warn_already_imported = orig_warn
-
- self.request.addfinalizer(revert)
- AssertionRewritingHook._warn_already_imported = lambda *a: None
-
- rec = []
-
- class Collect:
- def pytest_configure(x, config):
- rec.append(self.make_hook_recorder(config.pluginmanager))
-
- plugins = kwargs.get("plugins") or []
- plugins.append(Collect())
- ret = pytest.main(list(args), plugins=plugins)
- self.delete_loaded_modules()
- if len(rec) == 1:
- reprec = rec.pop()
- else:
- class reprec:
- pass
- reprec.ret = ret
-
- # typically we reraise keyboard interrupts from the child run
- # because it's our user requesting interruption of the testing
- if ret == 2 and not kwargs.get("no_reraise_ctrlc"):
- calls = reprec.getcalls("pytest_keyboard_interrupt")
- if calls and calls[-1].excinfo.type == KeyboardInterrupt:
- raise KeyboardInterrupt()
- return reprec
-
- def runpytest_inprocess(self, *args, **kwargs):
- """ Return result of running pytest in-process, providing a similar
- interface to what self.runpytest() provides. """
- if kwargs.get("syspathinsert"):
- self.syspathinsert()
- now = time.time()
- capture = MultiCapture(Capture=SysCapture)
- capture.start_capturing()
- try:
- try:
- reprec = self.inline_run(*args, **kwargs)
- except SystemExit as e:
-
- class reprec:
- ret = e.args[0]
-
- except Exception:
- traceback.print_exc()
-
- class reprec:
- ret = 3
- finally:
- out, err = capture.readouterr()
- capture.stop_capturing()
- sys.stdout.write(out)
- sys.stderr.write(err)
-
- res = RunResult(reprec.ret,
- out.split("\n"), err.split("\n"),
- time.time() - now)
- res.reprec = reprec
- return res
-
- def runpytest(self, *args, **kwargs):
- """ Run pytest inline or in a subprocess, depending on the command line
- option "--runpytest" and return a :py:class:`RunResult`.
-
- """
- args = self._ensure_basetemp(args)
- return self._runpytest_method(*args, **kwargs)
-
- def _ensure_basetemp(self, args):
- args = [str(x) for x in args]
- for x in args:
- if str(x).startswith('--basetemp'):
- # print("basedtemp exists: %s" %(args,))
- break
- else:
- args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp'))
- # print("added basetemp: %s" %(args,))
- return args
-
- def parseconfig(self, *args):
- """Return a new pytest Config instance from given commandline args.
-
- This invokes the pytest bootstrapping code in _pytest.config
- to create a new :py:class:`_pytest.core.PluginManager` and
- call the pytest_cmdline_parse hook to create new
- :py:class:`_pytest.config.Config` instance.
-
- If :py:attr:`plugins` has been populated they should be plugin
- modules which will be registered with the PluginManager.
-
- """
- args = self._ensure_basetemp(args)
-
- import _pytest.config
- config = _pytest.config._prepareconfig(args, self.plugins)
- # we don't know what the test will do with this half-setup config
- # object and thus we make sure it gets unconfigured properly in any
- # case (otherwise capturing could still be active, for example)
- self.request.addfinalizer(config._ensure_unconfigure)
- return config
-
- def parseconfigure(self, *args):
- """Return a new pytest configured Config instance.
-
- This returns a new :py:class:`_pytest.config.Config` instance
- like :py:meth:`parseconfig`, but also calls the
- pytest_configure hook.
-
- """
- config = self.parseconfig(*args)
- config._do_configure()
- self.request.addfinalizer(config._ensure_unconfigure)
- return config
-
- def getitem(self, source, funcname="test_func"):
- """Return the test item for a test function.
-
- This writes the source to a python file and runs pytest's
- collection on the resulting module, returning the test item
- for the requested function name.
-
- :param source: The module source.
-
- :param funcname: The name of the test function for which the
- Item must be returned.
-
- """
- items = self.getitems(source)
- for item in items:
- if item.name == funcname:
- return item
- assert 0, "%r item not found in module:\n%s\nitems: %s" % (
- funcname, source, items)
-
- def getitems(self, source):
- """Return all test items collected from the module.
-
- This writes the source to a python file and runs pytest's
- collection on the resulting module, returning all test items
- contained within.
-
- """
- modcol = self.getmodulecol(source)
- return self.genitems([modcol])
-
- def getmodulecol(self, source, configargs=(), withinit=False):
- """Return the module collection node for ``source``.
-
- This writes ``source`` to a file using :py:meth:`makepyfile`
- and then runs the pytest collection on it, returning the
- collection node for the test module.
-
- :param source: The source code of the module to collect.
-
- :param configargs: Any extra arguments to pass to
- :py:meth:`parseconfigure`.
-
- :param withinit: Whether to also write a ``__init__.py`` file
- to the temporary directory to ensure it is a package.
-
- """
- kw = {self.request.function.__name__: Source(source).strip()}
- path = self.makepyfile(**kw)
- if withinit:
- self.makepyfile(__init__="#")
- self.config = config = self.parseconfigure(path, *configargs)
- node = self.getnode(config, path)
-
- return node
-
- def collect_by_name(self, modcol, name):
- """Return the collection node for name from the module collection.
-
- This will search a module collection node for a collection
- node matching the given name.
-
- :param modcol: A module collection node, see
- :py:meth:`getmodulecol`.
-
- :param name: The name of the node to return.
-
- """
- if modcol not in self._mod_collections:
- self._mod_collections[modcol] = list(modcol.collect())
- for colitem in self._mod_collections[modcol]:
- if colitem.name == name:
- return colitem
-
- def popen(self, cmdargs, stdout, stderr, **kw):
- """Invoke subprocess.Popen.
-
- This calls subprocess.Popen making sure the current working
- directory is the PYTHONPATH.
-
- You probably want to use :py:meth:`run` instead.
-
- """
- env = os.environ.copy()
- env['PYTHONPATH'] = os.pathsep.join(filter(None, [
- str(os.getcwd()), env.get('PYTHONPATH', '')]))
- kw['env'] = env
-
- popen = subprocess.Popen(cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw)
- popen.stdin.close()
-
- return popen
-
- def run(self, *cmdargs):
- """Run a command with arguments.
-
- Run a process using subprocess.Popen saving the stdout and
- stderr.
-
- Returns a :py:class:`RunResult`.
-
- """
- return self._run(*cmdargs)
-
- def _run(self, *cmdargs):
- cmdargs = [str(x) for x in cmdargs]
- p1 = self.tmpdir.join("stdout")
- p2 = self.tmpdir.join("stderr")
- print("running:", ' '.join(cmdargs))
- print(" in:", str(py.path.local()))
- f1 = codecs.open(str(p1), "w", encoding="utf8")
- f2 = codecs.open(str(p2), "w", encoding="utf8")
- try:
- now = time.time()
- popen = self.popen(cmdargs, stdout=f1, stderr=f2,
- close_fds=(sys.platform != "win32"))
- ret = popen.wait()
- finally:
- f1.close()
- f2.close()
- f1 = codecs.open(str(p1), "r", encoding="utf8")
- f2 = codecs.open(str(p2), "r", encoding="utf8")
- try:
- out = f1.read().splitlines()
- err = f2.read().splitlines()
- finally:
- f1.close()
- f2.close()
- self._dump_lines(out, sys.stdout)
- self._dump_lines(err, sys.stderr)
- return RunResult(ret, out, err, time.time() - now)
-
- def _dump_lines(self, lines, fp):
- try:
- for line in lines:
- print(line, file=fp)
- except UnicodeEncodeError:
- print("couldn't print to %s because of encoding" % (fp,))
-
- def _getpytestargs(self):
- # we cannot use "(sys.executable,script)"
- # because on windows the script is e.g. a pytest.exe
- return (sys.executable, _pytest_fullpath,) # noqa
-
- def runpython(self, script):
- """Run a python script using sys.executable as interpreter.
-
- Returns a :py:class:`RunResult`.
- """
- return self.run(sys.executable, script)
-
- def runpython_c(self, command):
- """Run python -c "command", return a :py:class:`RunResult`."""
- return self.run(sys.executable, "-c", command)
-
- def runpytest_subprocess(self, *args, **kwargs):
- """Run pytest as a subprocess with given arguments.
-
- Any plugins added to the :py:attr:`plugins` list will added
- using the ``-p`` command line option. Addtionally
- ``--basetemp`` is used put any temporary files and directories
- in a numbered directory prefixed with "runpytest-" so they do
- not conflict with the normal numberd pytest location for
- temporary files and directories.
-
- Returns a :py:class:`RunResult`.
-
- """
- p = py.path.local.make_numbered_dir(prefix="runpytest-",
- keep=None, rootdir=self.tmpdir)
- args = ('--basetemp=%s' % p, ) + args
- # for x in args:
- # if '--confcutdir' in str(x):
- # break
- # else:
- # pass
- # args = ('--confcutdir=.',) + args
- plugins = [x for x in self.plugins if isinstance(x, str)]
- if plugins:
- args = ('-p', plugins[0]) + args
- args = self._getpytestargs() + args
- return self.run(*args)
-
- def spawn_pytest(self, string, expect_timeout=10.0):
- """Run pytest using pexpect.
-
- This makes sure to use the right pytest and sets up the
- temporary directory locations.
-
- The pexpect child is returned.
-
- """
- basetemp = self.tmpdir.mkdir("temp-pexpect")
- invoke = " ".join(map(str, self._getpytestargs()))
- cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
- return self.spawn(cmd, expect_timeout=expect_timeout)
-
- def spawn(self, cmd, expect_timeout=10.0):
- """Run a command using pexpect.
-
- The pexpect child is returned.
- """
- pexpect = pytest.importorskip("pexpect", "3.0")
- if hasattr(sys, 'pypy_version_info') and '64' in platform.machine():
- pytest.skip("pypy-64 bit not supported")
- if sys.platform.startswith("freebsd"):
- pytest.xfail("pexpect does not work reliably on freebsd")
- logfile = self.tmpdir.join("spawn.out").open("wb")
- child = pexpect.spawn(cmd, logfile=logfile)
- self.request.addfinalizer(logfile.close)
- child.timeout = expect_timeout
- return child
-
-
-def getdecoded(out):
- try:
- return out.decode("utf-8")
- except UnicodeDecodeError:
- return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
- py.io.saferepr(out),)
-
-
-class LineComp:
- def __init__(self):
- self.stringio = py.io.TextIO()
-
- def assert_contains_lines(self, lines2):
- """ assert that lines2 are contained (linearly) in lines1.
- return a list of extralines found.
- """
- __tracebackhide__ = True
- val = self.stringio.getvalue()
- self.stringio.truncate(0)
- self.stringio.seek(0)
- lines1 = val.split("\n")
- return LineMatcher(lines1).fnmatch_lines(lines2)
-
-
-class LineMatcher:
- """Flexible matching of text.
-
- This is a convenience class to test large texts like the output of
- commands.
-
- The constructor takes a list of lines without their trailing
- newlines, i.e. ``text.splitlines()``.
-
- """
-
- def __init__(self, lines):
- self.lines = lines
- self._log_output = []
-
- def str(self):
- """Return the entire original text."""
- return "\n".join(self.lines)
-
- def _getlines(self, lines2):
- if isinstance(lines2, str):
- lines2 = Source(lines2)
- if isinstance(lines2, Source):
- lines2 = lines2.strip().lines
- return lines2
-
- def fnmatch_lines_random(self, lines2):
- """Check lines exist in the output.
-
- The argument is a list of lines which have to occur in the
- output, in any order. Each line can contain glob whildcards.
-
- """
- lines2 = self._getlines(lines2)
- for line in lines2:
- for x in self.lines:
- if line == x or fnmatch(x, line):
- self._log("matched: ", repr(line))
- break
- else:
- self._log("line %r not found in output" % line)
- raise ValueError(self._log_text)
-
- def get_lines_after(self, fnline):
- """Return all lines following the given line in the text.
-
- The given line can contain glob wildcards.
- """
- for i, line in enumerate(self.lines):
- if fnline == line or fnmatch(line, fnline):
- return self.lines[i + 1:]
- raise ValueError("line %r not found in output" % fnline)
-
- def _log(self, *args):
- self._log_output.append(' '.join((str(x) for x in args)))
-
- @property
- def _log_text(self):
- return '\n'.join(self._log_output)
-
- def fnmatch_lines(self, lines2):
- """Search the text for matching lines.
-
- The argument is a list of lines which have to match and can
- use glob wildcards. If they do not match an pytest.fail() is
- called. The matches and non-matches are also printed on
- stdout.
-
- """
- lines2 = self._getlines(lines2)
- lines1 = self.lines[:]
- nextline = None
- extralines = []
- __tracebackhide__ = True
- for line in lines2:
- nomatchprinted = False
- while lines1:
- nextline = lines1.pop(0)
- if line == nextline:
- self._log("exact match:", repr(line))
- break
- elif fnmatch(nextline, line):
- self._log("fnmatch:", repr(line))
- self._log(" with:", repr(nextline))
- break
- else:
- if not nomatchprinted:
- self._log("nomatch:", repr(line))
- nomatchprinted = True
- self._log(" and:", repr(nextline))
- extralines.append(nextline)
- else:
- self._log("remains unmatched: %r" % (line,))
- pytest.fail(self._log_text)
diff --git a/lib/spack/external/pytest-fallback/_pytest/python.py b/lib/spack/external/pytest-fallback/_pytest/python.py
deleted file mode 100644
index 41fd2bdb7f..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/python.py
+++ /dev/null
@@ -1,1173 +0,0 @@
-""" Python test discovery, setup and run of test functions. """
-from __future__ import absolute_import, division, print_function
-
-import fnmatch
-import inspect
-import sys
-import os
-import collections
-from textwrap import dedent
-from itertools import count
-
-import py
-from _pytest.mark import MarkerError
-from _pytest.config import hookimpl
-
-import _pytest
-import _pytest._pluggy as pluggy
-from _pytest import fixtures
-from _pytest import main
-from _pytest.compat import (
- isclass, isfunction, is_generator, _ascii_escaped,
- REGEX_TYPE, STRING_TYPES, NoneType, NOTSET,
- get_real_func, getfslineno, safe_getattr,
- safe_str, getlocation, enum,
-)
-from _pytest.outcomes import fail
-from _pytest.mark import transfer_markers
-
-cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
-cutdir2 = py.path.local(_pytest.__file__).dirpath()
-cutdir3 = py.path.local(py.__file__).dirpath()
-
-
-def filter_traceback(entry):
- """Return True if a TracebackEntry instance should be removed from tracebacks:
- * dynamically generated code (no code to show up for it);
- * internal traceback from pytest or its internal libraries, py and pluggy.
- """
- # entry.path might sometimes return a str object when the entry
- # points to dynamically generated code
- # see https://bitbucket.org/pytest-dev/py/issues/71
- raw_filename = entry.frame.code.raw.co_filename
- is_generated = '<' in raw_filename and '>' in raw_filename
- if is_generated:
- return False
- # entry.path might point to an inexisting file, in which case it will
- # alsso return a str object. see #1133
- p = py.path.local(entry.path)
- return p != cutdir1 and not p.relto(cutdir2) and not p.relto(cutdir3)
-
-
-def pyobj_property(name):
- def get(self):
- node = self.getparent(getattr(__import__('pytest'), name))
- if node is not None:
- return node.obj
- doc = "python %s object this node was collected from (can be None)." % (
- name.lower(),)
- return property(get, None, None, doc)
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("general")
- group.addoption('--fixtures', '--funcargs',
- action="store_true", dest="showfixtures", default=False,
- help="show available fixtures, sorted by plugin appearance")
- group.addoption(
- '--fixtures-per-test',
- action="store_true",
- dest="show_fixtures_per_test",
- default=False,
- help="show fixtures per test",
- )
- parser.addini("usefixtures", type="args", default=[],
- help="list of default fixtures to be used with this project")
- parser.addini("python_files", type="args",
- default=['test_*.py', '*_test.py'],
- help="glob-style file patterns for Python test module discovery")
- parser.addini("python_classes", type="args", default=["Test", ],
- help="prefixes or glob names for Python test class discovery")
- parser.addini("python_functions", type="args", default=["test", ],
- help="prefixes or glob names for Python test function and "
- "method discovery")
-
- group.addoption("--import-mode", default="prepend",
- choices=["prepend", "append"], dest="importmode",
- help="prepend/append to sys.path when importing test modules, "
- "default is to prepend.")
-
-
-def pytest_cmdline_main(config):
- if config.option.showfixtures:
- showfixtures(config)
- return 0
- if config.option.show_fixtures_per_test:
- show_fixtures_per_test(config)
- return 0
-
-
-def pytest_generate_tests(metafunc):
- # those alternative spellings are common - raise a specific error to alert
- # the user
- alt_spellings = ['parameterize', 'parametrise', 'parameterise']
- for attr in alt_spellings:
- if hasattr(metafunc.function, attr):
- msg = "{0} has '{1}', spelling should be 'parametrize'"
- raise MarkerError(msg.format(metafunc.function.__name__, attr))
- try:
- markers = metafunc.function.parametrize
- except AttributeError:
- return
- for marker in markers:
- metafunc.parametrize(*marker.args, **marker.kwargs)
-
-
-def pytest_configure(config):
- config.addinivalue_line("markers",
- "parametrize(argnames, argvalues): call a test function multiple "
- "times passing in different arguments in turn. argvalues generally "
- "needs to be a list of values if argnames specifies only one name "
- "or a list of tuples of values if argnames specifies multiple names. "
- "Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
- "decorated test function, one with arg1=1 and another with arg1=2."
- "see http://pytest.org/latest/parametrize.html for more info and "
- "examples."
- )
- config.addinivalue_line("markers",
- "usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
- "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
- )
-
-
-@hookimpl(trylast=True)
-def pytest_pyfunc_call(pyfuncitem):
- testfunction = pyfuncitem.obj
- if pyfuncitem._isyieldedfunction():
- testfunction(*pyfuncitem._args)
- else:
- funcargs = pyfuncitem.funcargs
- testargs = {}
- for arg in pyfuncitem._fixtureinfo.argnames:
- testargs[arg] = funcargs[arg]
- testfunction(**testargs)
- return True
-
-
-def pytest_collect_file(path, parent):
- ext = path.ext
- if ext == ".py":
- if not parent.session.isinitpath(path):
- for pat in parent.config.getini('python_files'):
- if path.fnmatch(pat):
- break
- else:
- return
- ihook = parent.session.gethookproxy(path)
- return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
-
-
-def pytest_pycollect_makemodule(path, parent):
- return Module(path, parent)
-
-
-@hookimpl(hookwrapper=True)
-def pytest_pycollect_makeitem(collector, name, obj):
- outcome = yield
- res = outcome.get_result()
- if res is not None:
- return
- # nothing was collected elsewhere, let's do it here
- if isclass(obj):
- if collector.istestclass(obj, name):
- Class = collector._getcustomclass("Class")
- outcome.force_result(Class(name, parent=collector))
- elif collector.istestfunction(obj, name):
- # mock seems to store unbound methods (issue473), normalize it
- obj = getattr(obj, "__func__", obj)
- # We need to try and unwrap the function if it's a functools.partial
- # or a funtools.wrapped.
- # We musn't if it's been wrapped with mock.patch (python 2 only)
- if not (isfunction(obj) or isfunction(get_real_func(obj))):
- collector.warn(code="C2", message="cannot collect %r because it is not a function."
- % name, )
- elif getattr(obj, "__test__", True):
- if is_generator(obj):
- res = Generator(name, parent=collector)
- else:
- res = list(collector._genfunctions(name, obj))
- outcome.force_result(res)
-
-
-def pytest_make_parametrize_id(config, val, argname=None):
- return None
-
-
-class PyobjContext(object):
- module = pyobj_property("Module")
- cls = pyobj_property("Class")
- instance = pyobj_property("Instance")
-
-
-class PyobjMixin(PyobjContext):
- def obj():
- def fget(self):
- obj = getattr(self, '_obj', None)
- if obj is None:
- self._obj = obj = self._getobj()
- return obj
-
- def fset(self, value):
- self._obj = value
-
- return property(fget, fset, None, "underlying python object")
-
- obj = obj()
-
- def _getobj(self):
- return getattr(self.parent.obj, self.name)
-
- def getmodpath(self, stopatmodule=True, includemodule=False):
- """ return python path relative to the containing module. """
- chain = self.listchain()
- chain.reverse()
- parts = []
- for node in chain:
- if isinstance(node, Instance):
- continue
- name = node.name
- if isinstance(node, Module):
- name = os.path.splitext(name)[0]
- if stopatmodule:
- if includemodule:
- parts.append(name)
- break
- parts.append(name)
- parts.reverse()
- s = ".".join(parts)
- return s.replace(".[", "[")
-
- def _getfslineno(self):
- return getfslineno(self.obj)
-
- def reportinfo(self):
- # XXX caching?
- obj = self.obj
- compat_co_firstlineno = getattr(obj, 'compat_co_firstlineno', None)
- if isinstance(compat_co_firstlineno, int):
- # nose compatibility
- fspath = sys.modules[obj.__module__].__file__
- if fspath.endswith(".pyc"):
- fspath = fspath[:-1]
- lineno = compat_co_firstlineno
- else:
- fspath, lineno = getfslineno(obj)
- modpath = self.getmodpath()
- assert isinstance(lineno, int)
- return fspath, lineno, modpath
-
-
-class PyCollector(PyobjMixin, main.Collector):
-
- def funcnamefilter(self, name):
- return self._matches_prefix_or_glob_option('python_functions', name)
-
- def isnosetest(self, obj):
- """ Look for the __test__ attribute, which is applied by the
- @nose.tools.istest decorator
- """
- # We explicitly check for "is True" here to not mistakenly treat
- # classes with a custom __getattr__ returning something truthy (like a
- # function) as test classes.
- return safe_getattr(obj, '__test__', False) is True
-
- def classnamefilter(self, name):
- return self._matches_prefix_or_glob_option('python_classes', name)
-
- def istestfunction(self, obj, name):
- if self.funcnamefilter(name) or self.isnosetest(obj):
- if isinstance(obj, staticmethod):
- # static methods need to be unwrapped
- obj = safe_getattr(obj, '__func__', False)
- if obj is False:
- # Python 2.6 wraps in a different way that we won't try to handle
- msg = "cannot collect static method %r because " \
- "it is not a function (always the case in Python 2.6)"
- self.warn(
- code="C2", message=msg % name)
- return False
- return (
- safe_getattr(obj, "__call__", False) and fixtures.getfixturemarker(obj) is None
- )
- else:
- return False
-
- def istestclass(self, obj, name):
- return self.classnamefilter(name) or self.isnosetest(obj)
-
- def _matches_prefix_or_glob_option(self, option_name, name):
- """
- checks if the given name matches the prefix or glob-pattern defined
- in ini configuration.
- """
- for option in self.config.getini(option_name):
- if name.startswith(option):
- return True
- # check that name looks like a glob-string before calling fnmatch
- # because this is called for every name in each collected module,
- # and fnmatch is somewhat expensive to call
- elif ('*' in option or '?' in option or '[' in option) and \
- fnmatch.fnmatch(name, option):
- return True
- return False
-
- def collect(self):
- if not getattr(self.obj, "__test__", True):
- return []
-
- # NB. we avoid random getattrs and peek in the __dict__ instead
- # (XXX originally introduced from a PyPy need, still true?)
- dicts = [getattr(self.obj, '__dict__', {})]
- for basecls in inspect.getmro(self.obj.__class__):
- dicts.append(basecls.__dict__)
- seen = {}
- values = []
- for dic in dicts:
- for name, obj in list(dic.items()):
- if name in seen:
- continue
- seen[name] = True
- res = self.makeitem(name, obj)
- if res is None:
- continue
- if not isinstance(res, list):
- res = [res]
- values.extend(res)
- values.sort(key=lambda item: item.reportinfo()[:2])
- return values
-
- def makeitem(self, name, obj):
- # assert self.ihook.fspath == self.fspath, self
- return self.ihook.pytest_pycollect_makeitem(
- collector=self, name=name, obj=obj)
-
- def _genfunctions(self, name, funcobj):
- module = self.getparent(Module).obj
- clscol = self.getparent(Class)
- cls = clscol and clscol.obj or None
- transfer_markers(funcobj, cls, module)
- fm = self.session._fixturemanager
- fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
- metafunc = Metafunc(funcobj, fixtureinfo, self.config,
- cls=cls, module=module)
- methods = []
- if hasattr(module, "pytest_generate_tests"):
- methods.append(module.pytest_generate_tests)
- if hasattr(cls, "pytest_generate_tests"):
- methods.append(cls().pytest_generate_tests)
- if methods:
- self.ihook.pytest_generate_tests.call_extra(methods,
- dict(metafunc=metafunc))
- else:
- self.ihook.pytest_generate_tests(metafunc=metafunc)
-
- Function = self._getcustomclass("Function")
- if not metafunc._calls:
- yield Function(name, parent=self, fixtureinfo=fixtureinfo)
- else:
- # add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
- fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm)
-
- for callspec in metafunc._calls:
- subname = "%s[%s]" % (name, callspec.id)
- yield Function(name=subname, parent=self,
- callspec=callspec, callobj=funcobj,
- fixtureinfo=fixtureinfo,
- keywords={callspec.id: True},
- originalname=name,
- )
-
-
-class Module(main.File, PyCollector):
- """ Collector for test classes and functions. """
-
- def _getobj(self):
- return self._importtestmodule()
-
- def collect(self):
- self.session._fixturemanager.parsefactories(self)
- return super(Module, self).collect()
-
- def _importtestmodule(self):
- # we assume we are only called once per module
- importmode = self.config.getoption("--import-mode")
- try:
- mod = self.fspath.pyimport(ensuresyspath=importmode)
- except SyntaxError:
- raise self.CollectError(
- _pytest._code.ExceptionInfo().getrepr(style="short"))
- except self.fspath.ImportMismatchError:
- e = sys.exc_info()[1]
- raise self.CollectError(
- "import file mismatch:\n"
- "imported module %r has this __file__ attribute:\n"
- " %s\n"
- "which is not the same as the test file we want to collect:\n"
- " %s\n"
- "HINT: remove __pycache__ / .pyc files and/or use a "
- "unique basename for your test file modules"
- % e.args
- )
- except ImportError:
- from _pytest._code.code import ExceptionInfo
- exc_info = ExceptionInfo()
- if self.config.getoption('verbose') < 2:
- exc_info.traceback = exc_info.traceback.filter(filter_traceback)
- exc_repr = exc_info.getrepr(style='short') if exc_info.traceback else exc_info.exconly()
- formatted_tb = safe_str(exc_repr)
- raise self.CollectError(
- "ImportError while importing test module '{fspath}'.\n"
- "Hint: make sure your test modules/packages have valid Python names.\n"
- "Traceback:\n"
- "{traceback}".format(fspath=self.fspath, traceback=formatted_tb)
- )
- except _pytest.runner.Skipped as e:
- if e.allow_module_level:
- raise
- raise self.CollectError(
- "Using pytest.skip outside of a test is not allowed. "
- "To decorate a test function, use the @pytest.mark.skip "
- "or @pytest.mark.skipif decorators instead, and to skip a "
- "module use `pytestmark = pytest.mark.{skip,skipif}."
- )
- self.config.pluginmanager.consider_module(mod)
- return mod
-
- def setup(self):
- setup_module = _get_xunit_setup_teardown(self.obj, "setUpModule")
- if setup_module is None:
- setup_module = _get_xunit_setup_teardown(self.obj, "setup_module")
- if setup_module is not None:
- setup_module()
-
- teardown_module = _get_xunit_setup_teardown(self.obj, 'tearDownModule')
- if teardown_module is None:
- teardown_module = _get_xunit_setup_teardown(self.obj, 'teardown_module')
- if teardown_module is not None:
- self.addfinalizer(teardown_module)
-
-
-def _get_xunit_setup_teardown(holder, attr_name, param_obj=None):
- """
- Return a callable to perform xunit-style setup or teardown if
- the function exists in the ``holder`` object.
- The ``param_obj`` parameter is the parameter which will be passed to the function
- when the callable is called without arguments, defaults to the ``holder`` object.
- Return ``None`` if a suitable callable is not found.
- """
- param_obj = param_obj if param_obj is not None else holder
- result = _get_xunit_func(holder, attr_name)
- if result is not None:
- arg_count = result.__code__.co_argcount
- if inspect.ismethod(result):
- arg_count -= 1
- if arg_count:
- return lambda: result(param_obj)
- else:
- return result
-
-
-def _get_xunit_func(obj, name):
- """Return the attribute from the given object to be used as a setup/teardown
- xunit-style function, but only if not marked as a fixture to
- avoid calling it twice.
- """
- meth = getattr(obj, name, None)
- if fixtures.getfixturemarker(meth) is None:
- return meth
-
-
-class Class(PyCollector):
- """ Collector for test methods. """
-
- def collect(self):
- if not safe_getattr(self.obj, "__test__", True):
- return []
- if hasinit(self.obj):
- self.warn("C1", "cannot collect test class %r because it has a "
- "__init__ constructor" % self.obj.__name__)
- return []
- elif hasnew(self.obj):
- self.warn("C1", "cannot collect test class %r because it has a "
- "__new__ constructor" % self.obj.__name__)
- return []
- return [self._getcustomclass("Instance")(name="()", parent=self)]
-
- def setup(self):
- setup_class = _get_xunit_func(self.obj, 'setup_class')
- if setup_class is not None:
- setup_class = getattr(setup_class, 'im_func', setup_class)
- setup_class = getattr(setup_class, '__func__', setup_class)
- setup_class(self.obj)
-
- fin_class = getattr(self.obj, 'teardown_class', None)
- if fin_class is not None:
- fin_class = getattr(fin_class, 'im_func', fin_class)
- fin_class = getattr(fin_class, '__func__', fin_class)
- self.addfinalizer(lambda: fin_class(self.obj))
-
-
-class Instance(PyCollector):
- def _getobj(self):
- return self.parent.obj()
-
- def collect(self):
- self.session._fixturemanager.parsefactories(self)
- return super(Instance, self).collect()
-
- def newinstance(self):
- self.obj = self._getobj()
- return self.obj
-
-
-class FunctionMixin(PyobjMixin):
- """ mixin for the code common to Function and Generator.
- """
-
- def setup(self):
- """ perform setup for this test function. """
- if hasattr(self, '_preservedparent'):
- obj = self._preservedparent
- elif isinstance(self.parent, Instance):
- obj = self.parent.newinstance()
- self.obj = self._getobj()
- else:
- obj = self.parent.obj
- if inspect.ismethod(self.obj):
- setup_name = 'setup_method'
- teardown_name = 'teardown_method'
- else:
- setup_name = 'setup_function'
- teardown_name = 'teardown_function'
- setup_func_or_method = _get_xunit_setup_teardown(obj, setup_name, param_obj=self.obj)
- if setup_func_or_method is not None:
- setup_func_or_method()
- teardown_func_or_method = _get_xunit_setup_teardown(obj, teardown_name, param_obj=self.obj)
- if teardown_func_or_method is not None:
- self.addfinalizer(teardown_func_or_method)
-
- def _prunetraceback(self, excinfo):
- if hasattr(self, '_obj') and not self.config.option.fulltrace:
- code = _pytest._code.Code(get_real_func(self.obj))
- path, firstlineno = code.path, code.firstlineno
- traceback = excinfo.traceback
- ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
- if ntraceback == traceback:
- ntraceback = ntraceback.cut(path=path)
- if ntraceback == traceback:
- # ntraceback = ntraceback.cut(excludepath=cutdir2)
- ntraceback = ntraceback.filter(filter_traceback)
- if not ntraceback:
- ntraceback = traceback
-
- excinfo.traceback = ntraceback.filter()
- # issue364: mark all but first and last frames to
- # only show a single-line message for each frame
- if self.config.option.tbstyle == "auto":
- if len(excinfo.traceback) > 2:
- for entry in excinfo.traceback[1:-1]:
- entry.set_repr_style('short')
-
- def _repr_failure_py(self, excinfo, style="long"):
- if excinfo.errisinstance(fail.Exception):
- if not excinfo.value.pytrace:
- return py._builtin._totext(excinfo.value)
- return super(FunctionMixin, self)._repr_failure_py(excinfo,
- style=style)
-
- def repr_failure(self, excinfo, outerr=None):
- assert outerr is None, "XXX outerr usage is deprecated"
- style = self.config.option.tbstyle
- if style == "auto":
- style = "long"
- return self._repr_failure_py(excinfo, style=style)
-
-
-class Generator(FunctionMixin, PyCollector):
- def collect(self):
- # test generators are seen as collectors but they also
- # invoke setup/teardown on popular request
- # (induced by the common "test_*" naming shared with normal tests)
- from _pytest import deprecated
- self.session._setupstate.prepare(self)
- # see FunctionMixin.setup and test_setupstate_is_preserved_134
- self._preservedparent = self.parent.obj
- values = []
- seen = {}
- for i, x in enumerate(self.obj()):
- name, call, args = self.getcallargs(x)
- if not callable(call):
- raise TypeError("%r yielded non callable test %r" % (self.obj, call,))
- if name is None:
- name = "[%d]" % i
- else:
- name = "['%s']" % name
- if name in seen:
- raise ValueError("%r generated tests with non-unique name %r" % (self, name))
- seen[name] = True
- values.append(self.Function(name, self, args=args, callobj=call))
- self.warn('C1', deprecated.YIELD_TESTS)
- return values
-
- def getcallargs(self, obj):
- if not isinstance(obj, (tuple, list)):
- obj = (obj,)
- # explicit naming
- if isinstance(obj[0], py.builtin._basestring):
- name = obj[0]
- obj = obj[1:]
- else:
- name = None
- call, args = obj[0], obj[1:]
- return name, call, args
-
-
-def hasinit(obj):
- init = getattr(obj, '__init__', None)
- if init:
- return init != object.__init__
-
-
-def hasnew(obj):
- new = getattr(obj, '__new__', None)
- if new:
- return new != object.__new__
-
-
-class CallSpec2(object):
- def __init__(self, metafunc):
- self.metafunc = metafunc
- self.funcargs = {}
- self._idlist = []
- self.params = {}
- self._globalid = NOTSET
- self._globalid_args = set()
- self._globalparam = NOTSET
- self._arg2scopenum = {} # used for sorting parametrized resources
- self.keywords = {}
- self.indices = {}
-
- def copy(self, metafunc):
- cs = CallSpec2(self.metafunc)
- cs.funcargs.update(self.funcargs)
- cs.params.update(self.params)
- cs.keywords.update(self.keywords)
- cs.indices.update(self.indices)
- cs._arg2scopenum.update(self._arg2scopenum)
- cs._idlist = list(self._idlist)
- cs._globalid = self._globalid
- cs._globalid_args = self._globalid_args
- cs._globalparam = self._globalparam
- return cs
-
- def _checkargnotcontained(self, arg):
- if arg in self.params or arg in self.funcargs:
- raise ValueError("duplicate %r" % (arg,))
-
- def getparam(self, name):
- try:
- return self.params[name]
- except KeyError:
- if self._globalparam is NOTSET:
- raise ValueError(name)
- return self._globalparam
-
- @property
- def id(self):
- return "-".join(map(str, filter(None, self._idlist)))
-
- def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
- param_index):
- for arg, val in zip(argnames, valset):
- self._checkargnotcontained(arg)
- valtype_for_arg = valtypes[arg]
- getattr(self, valtype_for_arg)[arg] = val
- self.indices[arg] = param_index
- self._arg2scopenum[arg] = scopenum
- self._idlist.append(id)
- self.keywords.update(keywords)
-
- def setall(self, funcargs, id, param):
- for x in funcargs:
- self._checkargnotcontained(x)
- self.funcargs.update(funcargs)
- if id is not NOTSET:
- self._idlist.append(id)
- if param is not NOTSET:
- assert self._globalparam is NOTSET
- self._globalparam = param
- for arg in funcargs:
- self._arg2scopenum[arg] = fixtures.scopenum_function
-
-
-class Metafunc(fixtures.FuncargnamesCompatAttr):
- """
- Metafunc objects are passed to the ``pytest_generate_tests`` hook.
- They help to inspect a test function and to generate tests according to
- test configuration or values specified in the class or module where a
- test function is defined.
- """
-
- def __init__(self, function, fixtureinfo, config, cls=None, module=None):
- #: access to the :class:`_pytest.config.Config` object for the test session
- self.config = config
-
- #: the module object where the test function is defined in.
- self.module = module
-
- #: underlying python test function
- self.function = function
-
- #: set of fixture names required by the test function
- self.fixturenames = fixtureinfo.names_closure
-
- #: class object where the test function is defined in or ``None``.
- self.cls = cls
-
- self._calls = []
- self._ids = py.builtin.set()
- self._arg2fixturedefs = fixtureinfo.name2fixturedefs
-
- def parametrize(self, argnames, argvalues, indirect=False, ids=None,
- scope=None):
- """ Add new invocations to the underlying test function using the list
- of argvalues for the given argnames. Parametrization is performed
- during the collection phase. If you need to setup expensive resources
- see about setting indirect to do it rather at test setup time.
-
- :arg argnames: a comma-separated string denoting one or more argument
- names, or a list/tuple of argument strings.
-
- :arg argvalues: The list of argvalues determines how often a
- test is invoked with different argument values. If only one
- argname was specified argvalues is a list of values. If N
- argnames were specified, argvalues must be a list of N-tuples,
- where each tuple-element specifies a value for its respective
- argname.
-
- :arg indirect: The list of argnames or boolean. A list of arguments'
- names (subset of argnames). If True the list contains all names from
- the argnames. Each argvalue corresponding to an argname in this list will
- be passed as request.param to its respective argname fixture
- function so that it can perform more expensive setups during the
- setup phase of a test rather than at collection time.
-
- :arg ids: list of string ids, or a callable.
- If strings, each is corresponding to the argvalues so that they are
- part of the test id. If None is given as id of specific test, the
- automatically generated id for that argument will be used.
- If callable, it should take one argument (a single argvalue) and return
- a string or return None. If None, the automatically generated id for that
- argument will be used.
- If no ids are provided they will be generated automatically from
- the argvalues.
-
- :arg scope: if specified it denotes the scope of the parameters.
- The scope is used for grouping tests by parameter instances.
- It will also override any fixture-function defined scope, allowing
- to set a dynamic scope using test context or configuration.
- """
- from _pytest.fixtures import scope2index
- from _pytest.mark import MARK_GEN, ParameterSet
- from py.io import saferepr
-
- if not isinstance(argnames, (tuple, list)):
- argnames = [x.strip() for x in argnames.split(",") if x.strip()]
- force_tuple = len(argnames) == 1
- else:
- force_tuple = False
- parameters = [
- ParameterSet.extract_from(x, legacy_force_tuple=force_tuple)
- for x in argvalues]
- del argvalues
-
- if not parameters:
- fs, lineno = getfslineno(self.function)
- reason = "got empty parameter set %r, function %s at %s:%d" % (
- argnames, self.function.__name__, fs, lineno)
- mark = MARK_GEN.skip(reason=reason)
- parameters.append(ParameterSet(
- values=(NOTSET,) * len(argnames),
- marks=[mark],
- id=None,
- ))
-
- if scope is None:
- scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)
-
- scopenum = scope2index(scope, descr='call to {0}'.format(self.parametrize))
- valtypes = {}
- for arg in argnames:
- if arg not in self.fixturenames:
- if isinstance(indirect, (tuple, list)):
- name = 'fixture' if arg in indirect else 'argument'
- else:
- name = 'fixture' if indirect else 'argument'
- raise ValueError(
- "%r uses no %s %r" % (
- self.function, name, arg))
-
- if indirect is True:
- valtypes = dict.fromkeys(argnames, "params")
- elif indirect is False:
- valtypes = dict.fromkeys(argnames, "funcargs")
- elif isinstance(indirect, (tuple, list)):
- valtypes = dict.fromkeys(argnames, "funcargs")
- for arg in indirect:
- if arg not in argnames:
- raise ValueError("indirect given to %r: fixture %r doesn't exist" % (
- self.function, arg))
- valtypes[arg] = "params"
- idfn = None
- if callable(ids):
- idfn = ids
- ids = None
- if ids:
- if len(ids) != len(parameters):
- raise ValueError('%d tests specified with %d ids' % (
- len(parameters), len(ids)))
- for id_value in ids:
- if id_value is not None and not isinstance(id_value, py.builtin._basestring):
- msg = 'ids must be list of strings, found: %s (type: %s)'
- raise ValueError(msg % (saferepr(id_value), type(id_value).__name__))
- ids = idmaker(argnames, parameters, idfn, ids, self.config)
- newcalls = []
- for callspec in self._calls or [CallSpec2(self)]:
- elements = zip(ids, parameters, count())
- for a_id, param, param_index in elements:
- if len(param.values) != len(argnames):
- raise ValueError(
- 'In "parametrize" the number of values ({0}) must be '
- 'equal to the number of names ({1})'.format(
- param.values, argnames))
- newcallspec = callspec.copy(self)
- newcallspec.setmulti(valtypes, argnames, param.values, a_id,
- param.deprecated_arg_dict, scopenum, param_index)
- newcalls.append(newcallspec)
- self._calls = newcalls
-
- def addcall(self, funcargs=None, id=NOTSET, param=NOTSET):
- """ (deprecated, use parametrize) Add a new call to the underlying
- test function during the collection phase of a test run. Note that
- request.addcall() is called during the test collection phase prior and
- independently to actual test execution. You should only use addcall()
- if you need to specify multiple arguments of a test function.
-
- :arg funcargs: argument keyword dictionary used when invoking
- the test function.
-
- :arg id: used for reporting and identification purposes. If you
- don't supply an `id` an automatic unique id will be generated.
-
- :arg param: a parameter which will be exposed to a later fixture function
- invocation through the ``request.param`` attribute.
- """
- assert funcargs is None or isinstance(funcargs, dict)
- if funcargs is not None:
- for name in funcargs:
- if name not in self.fixturenames:
- fail("funcarg %r not used in this function." % name)
- else:
- funcargs = {}
- if id is None:
- raise ValueError("id=None not allowed")
- if id is NOTSET:
- id = len(self._calls)
- id = str(id)
- if id in self._ids:
- raise ValueError("duplicate id %r" % id)
- self._ids.add(id)
-
- cs = CallSpec2(self)
- cs.setall(funcargs, id, param)
- self._calls.append(cs)
-
-
-def _find_parametrized_scope(argnames, arg2fixturedefs, indirect):
- """Find the most appropriate scope for a parametrized call based on its arguments.
-
- When there's at least one direct argument, always use "function" scope.
-
- When a test function is parametrized and all its arguments are indirect
- (e.g. fixtures), return the most narrow scope based on the fixtures used.
-
- Related to issue #1832, based on code posted by @Kingdread.
- """
- from _pytest.fixtures import scopes
- indirect_as_list = isinstance(indirect, (list, tuple))
- all_arguments_are_fixtures = indirect is True or \
- indirect_as_list and len(indirect) == argnames
- if all_arguments_are_fixtures:
- fixturedefs = arg2fixturedefs or {}
- used_scopes = [fixturedef[0].scope for name, fixturedef in fixturedefs.items()]
- if used_scopes:
- # Takes the most narrow scope from used fixtures
- for scope in reversed(scopes):
- if scope in used_scopes:
- return scope
-
- return 'function'
-
-
-def _idval(val, argname, idx, idfn, config=None):
- if idfn:
- s = None
- try:
- s = idfn(val)
- except Exception:
- # See issue https://github.com/pytest-dev/pytest/issues/2169
- import warnings
- msg = "Raised while trying to determine id of parameter %s at position %d." % (argname, idx)
- msg += '\nUpdate your code as this will raise an error in pytest-4.0.'
- warnings.warn(msg, DeprecationWarning)
- if s:
- return _ascii_escaped(s)
-
- if config:
- hook_id = config.hook.pytest_make_parametrize_id(
- config=config, val=val, argname=argname)
- if hook_id:
- return hook_id
-
- if isinstance(val, STRING_TYPES):
- return _ascii_escaped(val)
- elif isinstance(val, (float, int, bool, NoneType)):
- return str(val)
- elif isinstance(val, REGEX_TYPE):
- return _ascii_escaped(val.pattern)
- elif enum is not None and isinstance(val, enum.Enum):
- return str(val)
- elif isclass(val) and hasattr(val, '__name__'):
- return val.__name__
- return str(argname) + str(idx)
-
-
-def _idvalset(idx, parameterset, argnames, idfn, ids, config=None):
- if parameterset.id is not None:
- return parameterset.id
- if ids is None or (idx >= len(ids) or ids[idx] is None):
- this_id = [_idval(val, argname, idx, idfn, config)
- for val, argname in zip(parameterset.values, argnames)]
- return "-".join(this_id)
- else:
- return _ascii_escaped(ids[idx])
-
-
-def idmaker(argnames, parametersets, idfn=None, ids=None, config=None):
- ids = [_idvalset(valindex, parameterset, argnames, idfn, ids, config)
- for valindex, parameterset in enumerate(parametersets)]
- if len(set(ids)) != len(ids):
- # The ids are not unique
- duplicates = [testid for testid in ids if ids.count(testid) > 1]
- counters = collections.defaultdict(lambda: 0)
- for index, testid in enumerate(ids):
- if testid in duplicates:
- ids[index] = testid + str(counters[testid])
- counters[testid] += 1
- return ids
-
-
-def show_fixtures_per_test(config):
- from _pytest.main import wrap_session
- return wrap_session(config, _show_fixtures_per_test)
-
-
-def _show_fixtures_per_test(config, session):
- import _pytest.config
- session.perform_collect()
- curdir = py.path.local()
- tw = _pytest.config.create_terminal_writer(config)
- verbose = config.getvalue("verbose")
-
- def get_best_relpath(func):
- loc = getlocation(func, curdir)
- return curdir.bestrelpath(loc)
-
- def write_fixture(fixture_def):
- argname = fixture_def.argname
- if verbose <= 0 and argname.startswith("_"):
- return
- if verbose > 0:
- bestrel = get_best_relpath(fixture_def.func)
- funcargspec = "{0} -- {1}".format(argname, bestrel)
- else:
- funcargspec = argname
- tw.line(funcargspec, green=True)
- fixture_doc = fixture_def.func.__doc__
- if fixture_doc:
- write_docstring(tw, fixture_doc)
- else:
- tw.line(' no docstring available', red=True)
-
- def write_item(item):
- try:
- info = item._fixtureinfo
- except AttributeError:
- # doctests items have no _fixtureinfo attribute
- return
- if not info.name2fixturedefs:
- # this test item does not use any fixtures
- return
- tw.line()
- tw.sep('-', 'fixtures used by {0}'.format(item.name))
- tw.sep('-', '({0})'.format(get_best_relpath(item.function)))
- # dict key not used in loop but needed for sorting
- for _, fixturedefs in sorted(info.name2fixturedefs.items()):
- assert fixturedefs is not None
- if not fixturedefs:
- continue
- # last item is expected to be the one used by the test item
- write_fixture(fixturedefs[-1])
-
- for session_item in session.items:
- write_item(session_item)
-
-
-def showfixtures(config):
- from _pytest.main import wrap_session
- return wrap_session(config, _showfixtures_main)
-
-
-def _showfixtures_main(config, session):
- import _pytest.config
- session.perform_collect()
- curdir = py.path.local()
- tw = _pytest.config.create_terminal_writer(config)
- verbose = config.getvalue("verbose")
-
- fm = session._fixturemanager
-
- available = []
- seen = set()
-
- for argname, fixturedefs in fm._arg2fixturedefs.items():
- assert fixturedefs is not None
- if not fixturedefs:
- continue
- for fixturedef in fixturedefs:
- loc = getlocation(fixturedef.func, curdir)
- if (fixturedef.argname, loc) in seen:
- continue
- seen.add((fixturedef.argname, loc))
- available.append((len(fixturedef.baseid),
- fixturedef.func.__module__,
- curdir.bestrelpath(loc),
- fixturedef.argname, fixturedef))
-
- available.sort()
- currentmodule = None
- for baseid, module, bestrel, argname, fixturedef in available:
- if currentmodule != module:
- if not module.startswith("_pytest."):
- tw.line()
- tw.sep("-", "fixtures defined from %s" % (module,))
- currentmodule = module
- if verbose <= 0 and argname[0] == "_":
- continue
- if verbose > 0:
- funcargspec = "%s -- %s" % (argname, bestrel,)
- else:
- funcargspec = argname
- tw.line(funcargspec, green=True)
- loc = getlocation(fixturedef.func, curdir)
- doc = fixturedef.func.__doc__ or ""
- if doc:
- write_docstring(tw, doc)
- else:
- tw.line(" %s: no docstring available" % (loc,),
- red=True)
-
-
-def write_docstring(tw, doc):
- INDENT = " "
- doc = doc.rstrip()
- if "\n" in doc:
- firstline, rest = doc.split("\n", 1)
- else:
- firstline, rest = doc, ""
-
- if firstline.strip():
- tw.line(INDENT + firstline.strip())
-
- if rest:
- for line in dedent(rest).split("\n"):
- tw.write(INDENT + line + "\n")
-
-
-class Function(FunctionMixin, main.Item, fixtures.FuncargnamesCompatAttr):
- """ a Function Item is responsible for setting up and executing a
- Python test function.
- """
- _genid = None
-
- def __init__(self, name, parent, args=None, config=None,
- callspec=None, callobj=NOTSET, keywords=None, session=None,
- fixtureinfo=None, originalname=None):
- super(Function, self).__init__(name, parent, config=config,
- session=session)
- self._args = args
- if callobj is not NOTSET:
- self.obj = callobj
-
- self.keywords.update(self.obj.__dict__)
- if callspec:
- self.callspec = callspec
- self.keywords.update(callspec.keywords)
- if keywords:
- self.keywords.update(keywords)
-
- if fixtureinfo is None:
- fixtureinfo = self.session._fixturemanager.getfixtureinfo(
- self.parent, self.obj, self.cls,
- funcargs=not self._isyieldedfunction())
- self._fixtureinfo = fixtureinfo
- self.fixturenames = fixtureinfo.names_closure
- self._initrequest()
-
- #: original function name, without any decorations (for example
- #: parametrization adds a ``"[...]"`` suffix to function names).
- #:
- #: .. versionadded:: 3.0
- self.originalname = originalname
-
- def _initrequest(self):
- self.funcargs = {}
- if self._isyieldedfunction():
- assert not hasattr(self, "callspec"), (
- "yielded functions (deprecated) cannot have funcargs")
- else:
- if hasattr(self, "callspec"):
- callspec = self.callspec
- assert not callspec.funcargs
- self._genid = callspec.id
- if hasattr(callspec, "param"):
- self.param = callspec.param
- self._request = fixtures.FixtureRequest(self)
-
- @property
- def function(self):
- "underlying python 'function' object"
- return getattr(self.obj, 'im_func', self.obj)
-
- def _getobj(self):
- name = self.name
- i = name.find("[") # parametrization
- if i != -1:
- name = name[:i]
- return getattr(self.parent.obj, name)
-
- @property
- def _pyfuncitem(self):
- "(compatonly) for code expecting pytest-2.2 style request objects"
- return self
-
- def _isyieldedfunction(self):
- return getattr(self, "_args", None) is not None
-
- def runtest(self):
- """ execute the underlying test function. """
- self.ihook.pytest_pyfunc_call(pyfuncitem=self)
-
- def setup(self):
- super(Function, self).setup()
- fixtures.fillfixtures(self)
diff --git a/lib/spack/external/pytest-fallback/_pytest/python_api.py b/lib/spack/external/pytest-fallback/_pytest/python_api.py
deleted file mode 100644
index a931b4d2c7..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/python_api.py
+++ /dev/null
@@ -1,629 +0,0 @@
-import math
-import sys
-
-import py
-
-from _pytest.compat import isclass, izip
-from _pytest.outcomes import fail
-import _pytest._code
-
-
-def _cmp_raises_type_error(self, other):
- """__cmp__ implementation which raises TypeError. Used
- by Approx base classes to implement only == and != and raise a
- TypeError for other comparisons.
-
- Needed in Python 2 only, Python 3 all it takes is not implementing the
- other operators at all.
- """
- __tracebackhide__ = True
- raise TypeError('Comparison operators other than == and != not supported by approx objects')
-
-
-# builtin pytest.approx helper
-
-
-class ApproxBase(object):
- """
- Provide shared utilities for making approximate comparisons between numbers
- or sequences of numbers.
- """
-
- def __init__(self, expected, rel=None, abs=None, nan_ok=False):
- self.expected = expected
- self.abs = abs
- self.rel = rel
- self.nan_ok = nan_ok
-
- def __repr__(self):
- raise NotImplementedError
-
- def __eq__(self, actual):
- return all(
- a == self._approx_scalar(x)
- for a, x in self._yield_comparisons(actual))
-
- __hash__ = None
-
- def __ne__(self, actual):
- return not (actual == self)
-
- if sys.version_info[0] == 2:
- __cmp__ = _cmp_raises_type_error
-
- def _approx_scalar(self, x):
- return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)
-
- def _yield_comparisons(self, actual):
- """
- Yield all the pairs of numbers to be compared. This is used to
- implement the `__eq__` method.
- """
- raise NotImplementedError
-
-
-class ApproxNumpy(ApproxBase):
- """
- Perform approximate comparisons for numpy arrays.
- """
-
- # Tell numpy to use our `__eq__` operator instead of its.
- __array_priority__ = 100
-
- def __repr__(self):
- # It might be nice to rewrite this function to account for the
- # shape of the array...
- return "approx({0!r})".format(list(
- self._approx_scalar(x) for x in self.expected))
-
- if sys.version_info[0] == 2:
- __cmp__ = _cmp_raises_type_error
-
- def __eq__(self, actual):
- import numpy as np
-
- try:
- actual = np.asarray(actual)
- except: # noqa
- raise TypeError("cannot compare '{0}' to numpy.ndarray".format(actual))
-
- if actual.shape != self.expected.shape:
- return False
-
- return ApproxBase.__eq__(self, actual)
-
- def _yield_comparisons(self, actual):
- import numpy as np
-
- # We can be sure that `actual` is a numpy array, because it's
- # casted in `__eq__` before being passed to `ApproxBase.__eq__`,
- # which is the only method that calls this one.
- for i in np.ndindex(self.expected.shape):
- yield actual[i], self.expected[i]
-
-
-class ApproxMapping(ApproxBase):
- """
- Perform approximate comparisons for mappings where the values are numbers
- (the keys can be anything).
- """
-
- def __repr__(self):
- return "approx({0!r})".format(dict(
- (k, self._approx_scalar(v))
- for k, v in self.expected.items()))
-
- def __eq__(self, actual):
- if set(actual.keys()) != set(self.expected.keys()):
- return False
-
- return ApproxBase.__eq__(self, actual)
-
- def _yield_comparisons(self, actual):
- for k in self.expected.keys():
- yield actual[k], self.expected[k]
-
-
-class ApproxSequence(ApproxBase):
- """
- Perform approximate comparisons for sequences of numbers.
- """
-
- # Tell numpy to use our `__eq__` operator instead of its.
- __array_priority__ = 100
-
- def __repr__(self):
- seq_type = type(self.expected)
- if seq_type not in (tuple, list, set):
- seq_type = list
- return "approx({0!r})".format(seq_type(
- self._approx_scalar(x) for x in self.expected))
-
- def __eq__(self, actual):
- if len(actual) != len(self.expected):
- return False
- return ApproxBase.__eq__(self, actual)
-
- def _yield_comparisons(self, actual):
- return izip(actual, self.expected)
-
-
-class ApproxScalar(ApproxBase):
- """
- Perform approximate comparisons for single numbers only.
- """
-
- def __repr__(self):
- """
- Return a string communicating both the expected value and the tolerance
- for the comparison being made, e.g. '1.0 +- 1e-6'. Use the unicode
- plus/minus symbol if this is python3 (it's too hard to get right for
- python2).
- """
- if isinstance(self.expected, complex):
- return str(self.expected)
-
- # Infinities aren't compared using tolerances, so don't show a
- # tolerance.
- if math.isinf(self.expected):
- return str(self.expected)
-
- # If a sensible tolerance can't be calculated, self.tolerance will
- # raise a ValueError. In this case, display '???'.
- try:
- vetted_tolerance = '{:.1e}'.format(self.tolerance)
- except ValueError:
- vetted_tolerance = '???'
-
- if sys.version_info[0] == 2:
- return '{0} +- {1}'.format(self.expected, vetted_tolerance)
- else:
- return u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance)
-
- def __eq__(self, actual):
- """
- Return true if the given value is equal to the expected value within
- the pre-specified tolerance.
- """
-
- # Short-circuit exact equality.
- if actual == self.expected:
- return True
-
- # Allow the user to control whether NaNs are considered equal to each
- # other or not. The abs() calls are for compatibility with complex
- # numbers.
- if math.isnan(abs(self.expected)):
- return self.nan_ok and math.isnan(abs(actual))
-
- # Infinity shouldn't be approximately equal to anything but itself, but
- # if there's a relative tolerance, it will be infinite and infinity
- # will seem approximately equal to everything. The equal-to-itself
- # case would have been short circuited above, so here we can just
- # return false if the expected value is infinite. The abs() call is
- # for compatibility with complex numbers.
- if math.isinf(abs(self.expected)):
- return False
-
- # Return true if the two numbers are within the tolerance.
- return abs(self.expected - actual) <= self.tolerance
-
- __hash__ = None
-
- @property
- def tolerance(self):
- """
- Return the tolerance for the comparison. This could be either an
- absolute tolerance or a relative tolerance, depending on what the user
- specified or which would be larger.
- """
- def set_default(x, default):
- return x if x is not None else default
-
- # Figure out what the absolute tolerance should be. ``self.abs`` is
- # either None or a value specified by the user.
- absolute_tolerance = set_default(self.abs, 1e-12)
-
- if absolute_tolerance < 0:
- raise ValueError("absolute tolerance can't be negative: {0}".format(absolute_tolerance))
- if math.isnan(absolute_tolerance):
- raise ValueError("absolute tolerance can't be NaN.")
-
- # If the user specified an absolute tolerance but not a relative one,
- # just return the absolute tolerance.
- if self.rel is None:
- if self.abs is not None:
- return absolute_tolerance
-
- # Figure out what the relative tolerance should be. ``self.rel`` is
- # either None or a value specified by the user. This is done after
- # we've made sure the user didn't ask for an absolute tolerance only,
- # because we don't want to raise errors about the relative tolerance if
- # we aren't even going to use it.
- relative_tolerance = set_default(self.rel, 1e-6) * abs(self.expected)
-
- if relative_tolerance < 0:
- raise ValueError("relative tolerance can't be negative: {0}".format(absolute_tolerance))
- if math.isnan(relative_tolerance):
- raise ValueError("relative tolerance can't be NaN.")
-
- # Return the larger of the relative and absolute tolerances.
- return max(relative_tolerance, absolute_tolerance)
-
-
-def approx(expected, rel=None, abs=None, nan_ok=False):
- """
- Assert that two numbers (or two sets of numbers) are equal to each other
- within some tolerance.
-
- Due to the `intricacies of floating-point arithmetic`__, numbers that we
- would intuitively expect to be equal are not always so::
-
- >>> 0.1 + 0.2 == 0.3
- False
-
- __ https://docs.python.org/3/tutorial/floatingpoint.html
-
- This problem is commonly encountered when writing tests, e.g. when making
- sure that floating-point values are what you expect them to be. One way to
- deal with this problem is to assert that two floating-point numbers are
- equal to within some appropriate tolerance::
-
- >>> abs((0.1 + 0.2) - 0.3) < 1e-6
- True
-
- However, comparisons like this are tedious to write and difficult to
- understand. Furthermore, absolute comparisons like the one above are
- usually discouraged because there's no tolerance that works well for all
- situations. ``1e-6`` is good for numbers around ``1``, but too small for
- very big numbers and too big for very small ones. It's better to express
- the tolerance as a fraction of the expected value, but relative comparisons
- like that are even more difficult to write correctly and concisely.
-
- The ``approx`` class performs floating-point comparisons using a syntax
- that's as intuitive as possible::
-
- >>> from pytest import approx
- >>> 0.1 + 0.2 == approx(0.3)
- True
-
- The same syntax also works for sequences of numbers::
-
- >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
- True
-
- Dictionary *values*::
-
- >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6})
- True
-
- And ``numpy`` arrays::
-
- >>> import numpy as np # doctest: +SKIP
- >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP
- True
-
- By default, ``approx`` considers numbers within a relative tolerance of
- ``1e-6`` (i.e. one part in a million) of its expected value to be equal.
- This treatment would lead to surprising results if the expected value was
- ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
- To handle this case less surprisingly, ``approx`` also considers numbers
- within an absolute tolerance of ``1e-12`` of its expected value to be
- equal. Infinity and NaN are special cases. Infinity is only considered
- equal to itself, regardless of the relative tolerance. NaN is not
- considered equal to anything by default, but you can make it be equal to
- itself by setting the ``nan_ok`` argument to True. (This is meant to
- facilitate comparing arrays that use NaN to mean "no data".)
-
- Both the relative and absolute tolerances can be changed by passing
- arguments to the ``approx`` constructor::
-
- >>> 1.0001 == approx(1)
- False
- >>> 1.0001 == approx(1, rel=1e-3)
- True
- >>> 1.0001 == approx(1, abs=1e-3)
- True
-
- If you specify ``abs`` but not ``rel``, the comparison will not consider
- the relative tolerance at all. In other words, two numbers that are within
- the default relative tolerance of ``1e-6`` will still be considered unequal
- if they exceed the specified absolute tolerance. If you specify both
- ``abs`` and ``rel``, the numbers will be considered equal if either
- tolerance is met::
-
- >>> 1 + 1e-8 == approx(1)
- True
- >>> 1 + 1e-8 == approx(1, abs=1e-12)
- False
- >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
- True
-
- If you're thinking about using ``approx``, then you might want to know how
- it compares to other good ways of comparing floating-point numbers. All of
- these algorithms are based on relative and absolute tolerances and should
- agree for the most part, but they do have meaningful differences:
-
- - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative
- tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
- tolerance is met. Because the relative tolerance is calculated w.r.t.
- both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor
- ``b`` is a "reference value"). You have to specify an absolute tolerance
- if you want to compare to ``0.0`` because there is no tolerance by
- default. Only available in python>=3.5. `More information...`__
-
- __ https://docs.python.org/3/library/math.html#math.isclose
-
- - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
- between ``a`` and ``b`` is less that the sum of the relative tolerance
- w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance
- is only calculated w.r.t. ``b``, this test is asymmetric and you can
- think of ``b`` as the reference value. Support for comparing sequences
- is provided by ``numpy.allclose``. `More information...`__
-
- __ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html
-
- - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
- are within an absolute tolerance of ``1e-7``. No relative tolerance is
- considered and the absolute tolerance cannot be changed, so this function
- is not appropriate for very large or very small numbers. Also, it's only
- available in subclasses of ``unittest.TestCase`` and it's ugly because it
- doesn't follow PEP8. `More information...`__
-
- __ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
-
- - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
- tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
- Because the relative tolerance is only calculated w.r.t. ``b``, this test
- is asymmetric and you can think of ``b`` as the reference value. In the
- special case that you explicitly specify an absolute tolerance but not a
- relative tolerance, only the absolute tolerance is considered.
-
- .. warning::
-
- .. versionchanged:: 3.2
-
- In order to avoid inconsistent behavior, ``TypeError`` is
- raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons.
- The example below illustrates the problem::
-
- assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10)
- assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10)
-
- In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)``
- to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to
- comparison. This is because the call hierarchy of rich comparisons
- follows a fixed behavior. `More information...`__
-
- __ https://docs.python.org/3/reference/datamodel.html#object.__ge__
- """
-
- if sys.version_info >= (3, 3):
- from collections.abc import Mapping, Sequence
- else:
- from collections import Mapping, Sequence
- from _pytest.compat import STRING_TYPES as String
-
- # Delegate the comparison to a class that knows how to deal with the type
- # of the expected value (e.g. int, float, list, dict, numpy.array, etc).
- #
- # This architecture is really driven by the need to support numpy arrays.
- # The only way to override `==` for arrays without requiring that approx be
- # the left operand is to inherit the approx object from `numpy.ndarray`.
- # But that can't be a general solution, because it requires (1) numpy to be
- # installed and (2) the expected value to be a numpy array. So the general
- # solution is to delegate each type of expected value to a different class.
- #
- # This has the advantage that it made it easy to support mapping types
- # (i.e. dict). The old code accepted mapping types, but would only compare
- # their keys, which is probably not what most people would expect.
-
- if _is_numpy_array(expected):
- cls = ApproxNumpy
- elif isinstance(expected, Mapping):
- cls = ApproxMapping
- elif isinstance(expected, Sequence) and not isinstance(expected, String):
- cls = ApproxSequence
- else:
- cls = ApproxScalar
-
- return cls(expected, rel, abs, nan_ok)
-
-
-def _is_numpy_array(obj):
- """
- Return true if the given object is a numpy array. Make a special effort to
- avoid importing numpy unless it's really necessary.
- """
- import inspect
-
- for cls in inspect.getmro(type(obj)):
- if cls.__module__ == 'numpy':
- try:
- import numpy as np
- return isinstance(obj, np.ndarray)
- except ImportError:
- pass
-
- return False
-
-
-# builtin pytest.raises helper
-
-def raises(expected_exception, *args, **kwargs):
- """
- Assert that a code block/function call raises ``expected_exception``
- and raise a failure exception otherwise.
-
- This helper produces a ``ExceptionInfo()`` object (see below).
-
- If using Python 2.5 or above, you may use this function as a
- context manager::
-
- >>> with raises(ZeroDivisionError):
- ... 1/0
-
- .. versionchanged:: 2.10
-
- In the context manager form you may use the keyword argument
- ``message`` to specify a custom failure message::
-
- >>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"):
- ... pass
- Traceback (most recent call last):
- ...
- Failed: Expecting ZeroDivisionError
-
- .. note::
-
- When using ``pytest.raises`` as a context manager, it's worthwhile to
- note that normal context manager rules apply and that the exception
- raised *must* be the final line in the scope of the context manager.
- Lines of code after that, within the scope of the context manager will
- not be executed. For example::
-
- >>> value = 15
- >>> with raises(ValueError) as exc_info:
- ... if value > 10:
- ... raise ValueError("value must be <= 10")
- ... assert exc_info.type == ValueError # this will not execute
-
- Instead, the following approach must be taken (note the difference in
- scope)::
-
- >>> with raises(ValueError) as exc_info:
- ... if value > 10:
- ... raise ValueError("value must be <= 10")
- ...
- >>> assert exc_info.type == ValueError
-
-
- Since version ``3.1`` you can use the keyword argument ``match`` to assert that the
- exception matches a text or regex::
-
- >>> with raises(ValueError, match='must be 0 or None'):
- ... raise ValueError("value must be 0 or None")
-
- >>> with raises(ValueError, match=r'must be \d+$'):
- ... raise ValueError("value must be 42")
-
- **Legacy forms**
-
- The forms below are fully supported but are discouraged for new code because the
- context manager form is regarded as more readable and less error-prone.
-
- It is possible to specify a callable by passing a to-be-called lambda::
-
- >>> raises(ZeroDivisionError, lambda: 1/0)
- <ExceptionInfo ...>
-
- or you can specify an arbitrary callable with arguments::
-
- >>> def f(x): return 1/x
- ...
- >>> raises(ZeroDivisionError, f, 0)
- <ExceptionInfo ...>
- >>> raises(ZeroDivisionError, f, x=0)
- <ExceptionInfo ...>
-
- It is also possible to pass a string to be evaluated at runtime::
-
- >>> raises(ZeroDivisionError, "f(0)")
- <ExceptionInfo ...>
-
- The string will be evaluated using the same ``locals()`` and ``globals()``
- at the moment of the ``raises`` call.
-
- .. autoclass:: _pytest._code.ExceptionInfo
- :members:
-
- .. note::
- Similar to caught exception objects in Python, explicitly clearing
- local references to returned ``ExceptionInfo`` objects can
- help the Python interpreter speed up its garbage collection.
-
- Clearing those references breaks a reference cycle
- (``ExceptionInfo`` --> caught exception --> frame stack raising
- the exception --> current frame stack --> local variables -->
- ``ExceptionInfo``) which makes Python keep all objects referenced
- from that cycle (including all local variables in the current
- frame) alive until the next cyclic garbage collection run. See the
- official Python ``try`` statement documentation for more detailed
- information.
-
- """
- __tracebackhide__ = True
- msg = ("exceptions must be old-style classes or"
- " derived from BaseException, not %s")
- if isinstance(expected_exception, tuple):
- for exc in expected_exception:
- if not isclass(exc):
- raise TypeError(msg % type(exc))
- elif not isclass(expected_exception):
- raise TypeError(msg % type(expected_exception))
-
- message = "DID NOT RAISE {0}".format(expected_exception)
- match_expr = None
-
- if not args:
- if "message" in kwargs:
- message = kwargs.pop("message")
- if "match" in kwargs:
- match_expr = kwargs.pop("match")
- message += " matching '{0}'".format(match_expr)
- return RaisesContext(expected_exception, message, match_expr)
- elif isinstance(args[0], str):
- code, = args
- assert isinstance(code, str)
- frame = sys._getframe(1)
- loc = frame.f_locals.copy()
- loc.update(kwargs)
- # print "raises frame scope: %r" % frame.f_locals
- try:
- code = _pytest._code.Source(code).compile()
- py.builtin.exec_(code, frame.f_globals, loc)
- # XXX didn'T mean f_globals == f_locals something special?
- # this is destroyed here ...
- except expected_exception:
- return _pytest._code.ExceptionInfo()
- else:
- func = args[0]
- try:
- func(*args[1:], **kwargs)
- except expected_exception:
- return _pytest._code.ExceptionInfo()
- fail(message)
-
-
-raises.Exception = fail.Exception
-
-
-class RaisesContext(object):
- def __init__(self, expected_exception, message, match_expr):
- self.expected_exception = expected_exception
- self.message = message
- self.match_expr = match_expr
- self.excinfo = None
-
- def __enter__(self):
- self.excinfo = object.__new__(_pytest._code.ExceptionInfo)
- return self.excinfo
-
- def __exit__(self, *tp):
- __tracebackhide__ = True
- if tp[0] is None:
- fail(self.message)
- if sys.version_info < (2, 7):
- # py26: on __exit__() exc_value often does not contain the
- # exception value.
- # http://bugs.python.org/issue7853
- if not isinstance(tp[1], BaseException):
- exc_type, value, traceback = tp
- tp = exc_type, exc_type(value), traceback
- self.excinfo.__init__(tp)
- suppress_exception = issubclass(self.excinfo.type, self.expected_exception)
- if sys.version_info[0] == 2 and suppress_exception:
- sys.exc_clear()
- if self.match_expr:
- self.excinfo.match(self.match_expr)
- return suppress_exception
diff --git a/lib/spack/external/pytest-fallback/_pytest/recwarn.py b/lib/spack/external/pytest-fallback/_pytest/recwarn.py
deleted file mode 100644
index c9fa872c07..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/recwarn.py
+++ /dev/null
@@ -1,205 +0,0 @@
-""" recording warnings during test function execution. """
-from __future__ import absolute_import, division, print_function
-
-import inspect
-
-import _pytest._code
-import py
-import sys
-import warnings
-
-from _pytest.fixtures import yield_fixture
-from _pytest.outcomes import fail
-
-
-@yield_fixture
-def recwarn():
- """Return a WarningsRecorder instance that provides these methods:
-
- * ``pop(category=None)``: return last warning matching the category.
- * ``clear()``: clear list of warnings
-
- See http://docs.python.org/library/warnings.html for information
- on warning categories.
- """
- wrec = WarningsRecorder()
- with wrec:
- warnings.simplefilter('default')
- yield wrec
-
-
-def deprecated_call(func=None, *args, **kwargs):
- """context manager that can be used to ensure a block of code triggers a
- ``DeprecationWarning`` or ``PendingDeprecationWarning``::
-
- >>> import warnings
- >>> def api_call_v2():
- ... warnings.warn('use v3 of this api', DeprecationWarning)
- ... return 200
-
- >>> with deprecated_call():
- ... assert api_call_v2() == 200
-
- ``deprecated_call`` can also be used by passing a function and ``*args`` and ``*kwargs``,
- in which case it will ensure calling ``func(*args, **kwargs)`` produces one of the warnings
- types above.
- """
- if not func:
- return _DeprecatedCallContext()
- else:
- __tracebackhide__ = True
- with _DeprecatedCallContext():
- return func(*args, **kwargs)
-
-
-class _DeprecatedCallContext(object):
- """Implements the logic to capture deprecation warnings as a context manager."""
-
- def __enter__(self):
- self._captured_categories = []
- self._old_warn = warnings.warn
- self._old_warn_explicit = warnings.warn_explicit
- warnings.warn_explicit = self._warn_explicit
- warnings.warn = self._warn
-
- def _warn_explicit(self, message, category, *args, **kwargs):
- self._captured_categories.append(category)
-
- def _warn(self, message, category=None, *args, **kwargs):
- if isinstance(message, Warning):
- self._captured_categories.append(message.__class__)
- else:
- self._captured_categories.append(category)
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- warnings.warn_explicit = self._old_warn_explicit
- warnings.warn = self._old_warn
-
- if exc_type is None:
- deprecation_categories = (DeprecationWarning, PendingDeprecationWarning)
- if not any(issubclass(c, deprecation_categories) for c in self._captured_categories):
- __tracebackhide__ = True
- msg = "Did not produce DeprecationWarning or PendingDeprecationWarning"
- raise AssertionError(msg)
-
-
-def warns(expected_warning, *args, **kwargs):
- """Assert that code raises a particular class of warning.
-
- Specifically, the input @expected_warning can be a warning class or
- tuple of warning classes, and the code must return that warning
- (if a single class) or one of those warnings (if a tuple).
-
- This helper produces a list of ``warnings.WarningMessage`` objects,
- one for each warning raised.
-
- This function can be used as a context manager, or any of the other ways
- ``pytest.raises`` can be used::
-
- >>> with warns(RuntimeWarning):
- ... warnings.warn("my warning", RuntimeWarning)
- """
- wcheck = WarningsChecker(expected_warning)
- if not args:
- return wcheck
- elif isinstance(args[0], str):
- code, = args
- assert isinstance(code, str)
- frame = sys._getframe(1)
- loc = frame.f_locals.copy()
- loc.update(kwargs)
-
- with wcheck:
- code = _pytest._code.Source(code).compile()
- py.builtin.exec_(code, frame.f_globals, loc)
- else:
- func = args[0]
- with wcheck:
- return func(*args[1:], **kwargs)
-
-
-class WarningsRecorder(warnings.catch_warnings):
- """A context manager to record raised warnings.
-
- Adapted from `warnings.catch_warnings`.
- """
-
- def __init__(self):
- super(WarningsRecorder, self).__init__(record=True)
- self._entered = False
- self._list = []
-
- @property
- def list(self):
- """The list of recorded warnings."""
- return self._list
-
- def __getitem__(self, i):
- """Get a recorded warning by index."""
- return self._list[i]
-
- def __iter__(self):
- """Iterate through the recorded warnings."""
- return iter(self._list)
-
- def __len__(self):
- """The number of recorded warnings."""
- return len(self._list)
-
- def pop(self, cls=Warning):
- """Pop the first recorded warning, raise exception if not exists."""
- for i, w in enumerate(self._list):
- if issubclass(w.category, cls):
- return self._list.pop(i)
- __tracebackhide__ = True
- raise AssertionError("%r not found in warning list" % cls)
-
- def clear(self):
- """Clear the list of recorded warnings."""
- self._list[:] = []
-
- def __enter__(self):
- if self._entered:
- __tracebackhide__ = True
- raise RuntimeError("Cannot enter %r twice" % self)
- self._list = super(WarningsRecorder, self).__enter__()
- warnings.simplefilter('always')
- return self
-
- def __exit__(self, *exc_info):
- if not self._entered:
- __tracebackhide__ = True
- raise RuntimeError("Cannot exit %r without entering first" % self)
- super(WarningsRecorder, self).__exit__(*exc_info)
-
-
-class WarningsChecker(WarningsRecorder):
- def __init__(self, expected_warning=None):
- super(WarningsChecker, self).__init__()
-
- msg = ("exceptions must be old-style classes or "
- "derived from Warning, not %s")
- if isinstance(expected_warning, tuple):
- for exc in expected_warning:
- if not inspect.isclass(exc):
- raise TypeError(msg % type(exc))
- elif inspect.isclass(expected_warning):
- expected_warning = (expected_warning,)
- elif expected_warning is not None:
- raise TypeError(msg % type(expected_warning))
-
- self.expected_warning = expected_warning
-
- def __exit__(self, *exc_info):
- super(WarningsChecker, self).__exit__(*exc_info)
-
- # only check if we're not currently handling an exception
- if all(a is None for a in exc_info):
- if self.expected_warning is not None:
- if not any(issubclass(r.category, self.expected_warning)
- for r in self):
- __tracebackhide__ = True
- fail("DID NOT WARN. No warnings of type {0} was emitted. "
- "The list of emitted warnings is: {1}.".format(
- self.expected_warning,
- [each.message for each in self]))
diff --git a/lib/spack/external/pytest-fallback/_pytest/resultlog.py b/lib/spack/external/pytest-fallback/_pytest/resultlog.py
deleted file mode 100644
index 9f9c2d1f65..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/resultlog.py
+++ /dev/null
@@ -1,113 +0,0 @@
-""" log machine-parseable test session result information in a plain
-text file.
-"""
-from __future__ import absolute_import, division, print_function
-
-import py
-import os
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("terminal reporting", "resultlog plugin options")
- group.addoption('--resultlog', '--result-log', action="store",
- metavar="path", default=None,
- help="DEPRECATED path for machine-readable result log.")
-
-
-def pytest_configure(config):
- resultlog = config.option.resultlog
- # prevent opening resultlog on slave nodes (xdist)
- if resultlog and not hasattr(config, 'slaveinput'):
- dirname = os.path.dirname(os.path.abspath(resultlog))
- if not os.path.isdir(dirname):
- os.makedirs(dirname)
- logfile = open(resultlog, 'w', 1) # line buffered
- config._resultlog = ResultLog(config, logfile)
- config.pluginmanager.register(config._resultlog)
-
- from _pytest.deprecated import RESULT_LOG
- config.warn('C1', RESULT_LOG)
-
-
-def pytest_unconfigure(config):
- resultlog = getattr(config, '_resultlog', None)
- if resultlog:
- resultlog.logfile.close()
- del config._resultlog
- config.pluginmanager.unregister(resultlog)
-
-
-def generic_path(item):
- chain = item.listchain()
- gpath = [chain[0].name]
- fspath = chain[0].fspath
- fspart = False
- for node in chain[1:]:
- newfspath = node.fspath
- if newfspath == fspath:
- if fspart:
- gpath.append(':')
- fspart = False
- else:
- gpath.append('.')
- else:
- gpath.append('/')
- fspart = True
- name = node.name
- if name[0] in '([':
- gpath.pop()
- gpath.append(name)
- fspath = newfspath
- return ''.join(gpath)
-
-
-class ResultLog(object):
- def __init__(self, config, logfile):
- self.config = config
- self.logfile = logfile # preferably line buffered
-
- def write_log_entry(self, testpath, lettercode, longrepr):
- print("%s %s" % (lettercode, testpath), file=self.logfile)
- for line in longrepr.splitlines():
- print(" %s" % line, file=self.logfile)
-
- def log_outcome(self, report, lettercode, longrepr):
- testpath = getattr(report, 'nodeid', None)
- if testpath is None:
- testpath = report.fspath
- self.write_log_entry(testpath, lettercode, longrepr)
-
- def pytest_runtest_logreport(self, report):
- if report.when != "call" and report.passed:
- return
- res = self.config.hook.pytest_report_teststatus(report=report)
- code = res[1]
- if code == 'x':
- longrepr = str(report.longrepr)
- elif code == 'X':
- longrepr = ''
- elif report.passed:
- longrepr = ""
- elif report.failed:
- longrepr = str(report.longrepr)
- elif report.skipped:
- longrepr = str(report.longrepr[2])
- self.log_outcome(report, code, longrepr)
-
- def pytest_collectreport(self, report):
- if not report.passed:
- if report.failed:
- code = "F"
- longrepr = str(report.longrepr)
- else:
- assert report.skipped
- code = "S"
- longrepr = "%s:%d: %s" % report.longrepr
- self.log_outcome(report, code, longrepr)
-
- def pytest_internalerror(self, excrepr):
- reprcrash = getattr(excrepr, 'reprcrash', None)
- path = getattr(reprcrash, "path", None)
- if path is None:
- path = "cwd:%s" % py.path.local()
- self.write_log_entry(path, '!', str(excrepr))
diff --git a/lib/spack/external/pytest-fallback/_pytest/runner.py b/lib/spack/external/pytest-fallback/_pytest/runner.py
deleted file mode 100644
index b643fa3c91..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/runner.py
+++ /dev/null
@@ -1,508 +0,0 @@
-""" basic collect and runtest protocol implementations """
-from __future__ import absolute_import, division, print_function
-
-import bdb
-import os
-import sys
-from time import time
-
-import py
-from _pytest.compat import _PY2
-from _pytest._code.code import TerminalRepr, ExceptionInfo
-from _pytest.outcomes import skip, Skipped, TEST_OUTCOME
-
-#
-# pytest plugin hooks
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("terminal reporting", "reporting", after="general")
- group.addoption('--durations',
- action="store", type=int, default=None, metavar="N",
- help="show N slowest setup/test durations (N=0 for all)."),
-
-
-def pytest_terminal_summary(terminalreporter):
- durations = terminalreporter.config.option.durations
- if durations is None:
- return
- tr = terminalreporter
- dlist = []
- for replist in tr.stats.values():
- for rep in replist:
- if hasattr(rep, 'duration'):
- dlist.append(rep)
- if not dlist:
- return
- dlist.sort(key=lambda x: x.duration)
- dlist.reverse()
- if not durations:
- tr.write_sep("=", "slowest test durations")
- else:
- tr.write_sep("=", "slowest %s test durations" % durations)
- dlist = dlist[:durations]
-
- for rep in dlist:
- nodeid = rep.nodeid.replace("::()::", "::")
- tr.write_line("%02.2fs %-8s %s" %
- (rep.duration, rep.when, nodeid))
-
-
-def pytest_sessionstart(session):
- session._setupstate = SetupState()
-
-
-def pytest_sessionfinish(session):
- session._setupstate.teardown_all()
-
-
-def pytest_runtest_protocol(item, nextitem):
- item.ihook.pytest_runtest_logstart(
- nodeid=item.nodeid, location=item.location,
- )
- runtestprotocol(item, nextitem=nextitem)
- return True
-
-
-def runtestprotocol(item, log=True, nextitem=None):
- hasrequest = hasattr(item, "_request")
- if hasrequest and not item._request:
- item._initrequest()
- rep = call_and_report(item, "setup", log)
- reports = [rep]
- if rep.passed:
- if item.config.option.setupshow:
- show_test_item(item)
- if not item.config.option.setuponly:
- reports.append(call_and_report(item, "call", log))
- reports.append(call_and_report(item, "teardown", log,
- nextitem=nextitem))
- # after all teardown hooks have been called
- # want funcargs and request info to go away
- if hasrequest:
- item._request = False
- item.funcargs = None
- return reports
-
-
-def show_test_item(item):
- """Show test function, parameters and the fixtures of the test item."""
- tw = item.config.get_terminal_writer()
- tw.line()
- tw.write(' ' * 8)
- tw.write(item._nodeid)
- used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys())
- if used_fixtures:
- tw.write(' (fixtures used: {0})'.format(', '.join(used_fixtures)))
-
-
-def pytest_runtest_setup(item):
- _update_current_test_var(item, 'setup')
- item.session._setupstate.prepare(item)
-
-
-def pytest_runtest_call(item):
- _update_current_test_var(item, 'call')
- try:
- item.runtest()
- except Exception:
- # Store trace info to allow postmortem debugging
- type, value, tb = sys.exc_info()
- tb = tb.tb_next # Skip *this* frame
- sys.last_type = type
- sys.last_value = value
- sys.last_traceback = tb
- del tb # Get rid of it in this namespace
- raise
-
-
-def pytest_runtest_teardown(item, nextitem):
- _update_current_test_var(item, 'teardown')
- item.session._setupstate.teardown_exact(item, nextitem)
- _update_current_test_var(item, None)
-
-
-def _update_current_test_var(item, when):
- """
- Update PYTEST_CURRENT_TEST to reflect the current item and stage.
-
- If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment.
- """
- var_name = 'PYTEST_CURRENT_TEST'
- if when:
- value = '{0} ({1})'.format(item.nodeid, when)
- if _PY2:
- # python 2 doesn't like null bytes on environment variables (see #2644)
- value = value.replace('\x00', '(null)')
- os.environ[var_name] = value
- else:
- os.environ.pop(var_name)
-
-
-def pytest_report_teststatus(report):
- if report.when in ("setup", "teardown"):
- if report.failed:
- # category, shortletter, verbose-word
- return "error", "E", "ERROR"
- elif report.skipped:
- return "skipped", "s", "SKIPPED"
- else:
- return "", "", ""
-
-
-#
-# Implementation
-
-def call_and_report(item, when, log=True, **kwds):
- call = call_runtest_hook(item, when, **kwds)
- hook = item.ihook
- report = hook.pytest_runtest_makereport(item=item, call=call)
- if log:
- hook.pytest_runtest_logreport(report=report)
- if check_interactive_exception(call, report):
- hook.pytest_exception_interact(node=item, call=call, report=report)
- return report
-
-
-def check_interactive_exception(call, report):
- return call.excinfo and not (
- hasattr(report, "wasxfail") or
- call.excinfo.errisinstance(skip.Exception) or
- call.excinfo.errisinstance(bdb.BdbQuit))
-
-
-def call_runtest_hook(item, when, **kwds):
- hookname = "pytest_runtest_" + when
- ihook = getattr(item.ihook, hookname)
- return CallInfo(lambda: ihook(item=item, **kwds), when=when)
-
-
-class CallInfo:
- """ Result/Exception info a function invocation. """
- #: None or ExceptionInfo object.
- excinfo = None
-
- def __init__(self, func, when):
- #: context of invocation: one of "setup", "call",
- #: "teardown", "memocollect"
- self.when = when
- self.start = time()
- try:
- self.result = func()
- except KeyboardInterrupt:
- self.stop = time()
- raise
- except: # noqa
- self.excinfo = ExceptionInfo()
- self.stop = time()
-
- def __repr__(self):
- if self.excinfo:
- status = "exception: %s" % str(self.excinfo.value)
- else:
- status = "result: %r" % (self.result,)
- return "<CallInfo when=%r %s>" % (self.when, status)
-
-
-def getslaveinfoline(node):
- try:
- return node._slaveinfocache
- except AttributeError:
- d = node.slaveinfo
- ver = "%s.%s.%s" % d['version_info'][:3]
- node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
- d['id'], d['sysplatform'], ver, d['executable'])
- return s
-
-
-class BaseReport(object):
-
- def __init__(self, **kw):
- self.__dict__.update(kw)
-
- def toterminal(self, out):
- if hasattr(self, 'node'):
- out.line(getslaveinfoline(self.node))
-
- longrepr = self.longrepr
- if longrepr is None:
- return
-
- if hasattr(longrepr, 'toterminal'):
- longrepr.toterminal(out)
- else:
- try:
- out.line(longrepr)
- except UnicodeEncodeError:
- out.line("<unprintable longrepr>")
-
- def get_sections(self, prefix):
- for name, content in self.sections:
- if name.startswith(prefix):
- yield prefix, content
-
- @property
- def longreprtext(self):
- """
- Read-only property that returns the full string representation
- of ``longrepr``.
-
- .. versionadded:: 3.0
- """
- tw = py.io.TerminalWriter(stringio=True)
- tw.hasmarkup = False
- self.toterminal(tw)
- exc = tw.stringio.getvalue()
- return exc.strip()
-
- @property
- def capstdout(self):
- """Return captured text from stdout, if capturing is enabled
-
- .. versionadded:: 3.0
- """
- return ''.join(content for (prefix, content) in self.get_sections('Captured stdout'))
-
- @property
- def capstderr(self):
- """Return captured text from stderr, if capturing is enabled
-
- .. versionadded:: 3.0
- """
- return ''.join(content for (prefix, content) in self.get_sections('Captured stderr'))
-
- passed = property(lambda x: x.outcome == "passed")
- failed = property(lambda x: x.outcome == "failed")
- skipped = property(lambda x: x.outcome == "skipped")
-
- @property
- def fspath(self):
- return self.nodeid.split("::")[0]
-
-
-def pytest_runtest_makereport(item, call):
- when = call.when
- duration = call.stop - call.start
- keywords = dict([(x, 1) for x in item.keywords])
- excinfo = call.excinfo
- sections = []
- if not call.excinfo:
- outcome = "passed"
- longrepr = None
- else:
- if not isinstance(excinfo, ExceptionInfo):
- outcome = "failed"
- longrepr = excinfo
- elif excinfo.errisinstance(skip.Exception):
- outcome = "skipped"
- r = excinfo._getreprcrash()
- longrepr = (str(r.path), r.lineno, r.message)
- else:
- outcome = "failed"
- if call.when == "call":
- longrepr = item.repr_failure(excinfo)
- else: # exception in setup or teardown
- longrepr = item._repr_failure_py(excinfo,
- style=item.config.option.tbstyle)
- for rwhen, key, content in item._report_sections:
- sections.append(("Captured %s %s" % (key, rwhen), content))
- return TestReport(item.nodeid, item.location,
- keywords, outcome, longrepr, when,
- sections, duration)
-
-
-class TestReport(BaseReport):
- """ Basic test report object (also used for setup and teardown calls if
- they fail).
- """
-
- def __init__(self, nodeid, location, keywords, outcome,
- longrepr, when, sections=(), duration=0, **extra):
- #: normalized collection node id
- self.nodeid = nodeid
-
- #: a (filesystempath, lineno, domaininfo) tuple indicating the
- #: actual location of a test item - it might be different from the
- #: collected one e.g. if a method is inherited from a different module.
- self.location = location
-
- #: a name -> value dictionary containing all keywords and
- #: markers associated with a test invocation.
- self.keywords = keywords
-
- #: test outcome, always one of "passed", "failed", "skipped".
- self.outcome = outcome
-
- #: None or a failure representation.
- self.longrepr = longrepr
-
- #: one of 'setup', 'call', 'teardown' to indicate runtest phase.
- self.when = when
-
- #: list of pairs ``(str, str)`` of extra information which needs to
- #: marshallable. Used by pytest to add captured text
- #: from ``stdout`` and ``stderr``, but may be used by other plugins
- #: to add arbitrary information to reports.
- self.sections = list(sections)
-
- #: time it took to run just the test
- self.duration = duration
-
- self.__dict__.update(extra)
-
- def __repr__(self):
- return "<TestReport %r when=%r outcome=%r>" % (
- self.nodeid, self.when, self.outcome)
-
-
-class TeardownErrorReport(BaseReport):
- outcome = "failed"
- when = "teardown"
-
- def __init__(self, longrepr, **extra):
- self.longrepr = longrepr
- self.sections = []
- self.__dict__.update(extra)
-
-
-def pytest_make_collect_report(collector):
- call = CallInfo(
- lambda: list(collector.collect()),
- 'collect')
- longrepr = None
- if not call.excinfo:
- outcome = "passed"
- else:
- from _pytest import nose
- skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
- if call.excinfo.errisinstance(skip_exceptions):
- outcome = "skipped"
- r = collector._repr_failure_py(call.excinfo, "line").reprcrash
- longrepr = (str(r.path), r.lineno, r.message)
- else:
- outcome = "failed"
- errorinfo = collector.repr_failure(call.excinfo)
- if not hasattr(errorinfo, "toterminal"):
- errorinfo = CollectErrorRepr(errorinfo)
- longrepr = errorinfo
- rep = CollectReport(collector.nodeid, outcome, longrepr,
- getattr(call, 'result', None))
- rep.call = call # see collect_one_node
- return rep
-
-
-class CollectReport(BaseReport):
- def __init__(self, nodeid, outcome, longrepr, result,
- sections=(), **extra):
- self.nodeid = nodeid
- self.outcome = outcome
- self.longrepr = longrepr
- self.result = result or []
- self.sections = list(sections)
- self.__dict__.update(extra)
-
- @property
- def location(self):
- return (self.fspath, None, self.fspath)
-
- def __repr__(self):
- return "<CollectReport %r lenresult=%s outcome=%r>" % (
- self.nodeid, len(self.result), self.outcome)
-
-
-class CollectErrorRepr(TerminalRepr):
- def __init__(self, msg):
- self.longrepr = msg
-
- def toterminal(self, out):
- out.line(self.longrepr, red=True)
-
-
-class SetupState(object):
- """ shared state for setting up/tearing down test items or collectors. """
-
- def __init__(self):
- self.stack = []
- self._finalizers = {}
-
- def addfinalizer(self, finalizer, colitem):
- """ attach a finalizer to the given colitem.
- if colitem is None, this will add a finalizer that
- is called at the end of teardown_all().
- """
- assert colitem and not isinstance(colitem, tuple)
- assert py.builtin.callable(finalizer)
- # assert colitem in self.stack # some unit tests don't setup stack :/
- self._finalizers.setdefault(colitem, []).append(finalizer)
-
- def _pop_and_teardown(self):
- colitem = self.stack.pop()
- self._teardown_with_finalization(colitem)
-
- def _callfinalizers(self, colitem):
- finalizers = self._finalizers.pop(colitem, None)
- exc = None
- while finalizers:
- fin = finalizers.pop()
- try:
- fin()
- except TEST_OUTCOME:
- # XXX Only first exception will be seen by user,
- # ideally all should be reported.
- if exc is None:
- exc = sys.exc_info()
- if exc:
- py.builtin._reraise(*exc)
-
- def _teardown_with_finalization(self, colitem):
- self._callfinalizers(colitem)
- if hasattr(colitem, "teardown"):
- colitem.teardown()
- for colitem in self._finalizers:
- assert colitem is None or colitem in self.stack \
- or isinstance(colitem, tuple)
-
- def teardown_all(self):
- while self.stack:
- self._pop_and_teardown()
- for key in list(self._finalizers):
- self._teardown_with_finalization(key)
- assert not self._finalizers
-
- def teardown_exact(self, item, nextitem):
- needed_collectors = nextitem and nextitem.listchain() or []
- self._teardown_towards(needed_collectors)
-
- def _teardown_towards(self, needed_collectors):
- while self.stack:
- if self.stack == needed_collectors[:len(self.stack)]:
- break
- self._pop_and_teardown()
-
- def prepare(self, colitem):
- """ setup objects along the collector chain to the test-method
- and teardown previously setup objects."""
- needed_collectors = colitem.listchain()
- self._teardown_towards(needed_collectors)
-
- # check if the last collection node has raised an error
- for col in self.stack:
- if hasattr(col, '_prepare_exc'):
- py.builtin._reraise(*col._prepare_exc)
- for col in needed_collectors[len(self.stack):]:
- self.stack.append(col)
- try:
- col.setup()
- except TEST_OUTCOME:
- col._prepare_exc = sys.exc_info()
- raise
-
-
-def collect_one_node(collector):
- ihook = collector.ihook
- ihook.pytest_collectstart(collector=collector)
- rep = ihook.pytest_make_collect_report(collector=collector)
- call = rep.__dict__.pop("call", None)
- if call and check_interactive_exception(call, rep):
- ihook.pytest_exception_interact(node=collector, call=call, report=rep)
- return rep
diff --git a/lib/spack/external/pytest-fallback/_pytest/setuponly.py b/lib/spack/external/pytest-fallback/_pytest/setuponly.py
deleted file mode 100644
index 15e195ad5a..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/setuponly.py
+++ /dev/null
@@ -1,74 +0,0 @@
-from __future__ import absolute_import, division, print_function
-
-import pytest
-import sys
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("debugconfig")
- group.addoption('--setuponly', '--setup-only', action="store_true",
- help="only setup fixtures, do not execute tests.")
- group.addoption('--setupshow', '--setup-show', action="store_true",
- help="show setup of fixtures while executing tests.")
-
-
-@pytest.hookimpl(hookwrapper=True)
-def pytest_fixture_setup(fixturedef, request):
- yield
- config = request.config
- if config.option.setupshow:
- if hasattr(request, 'param'):
- # Save the fixture parameter so ._show_fixture_action() can
- # display it now and during the teardown (in .finish()).
- if fixturedef.ids:
- if callable(fixturedef.ids):
- fixturedef.cached_param = fixturedef.ids(request.param)
- else:
- fixturedef.cached_param = fixturedef.ids[
- request.param_index]
- else:
- fixturedef.cached_param = request.param
- _show_fixture_action(fixturedef, 'SETUP')
-
-
-def pytest_fixture_post_finalizer(fixturedef):
- if hasattr(fixturedef, "cached_result"):
- config = fixturedef._fixturemanager.config
- if config.option.setupshow:
- _show_fixture_action(fixturedef, 'TEARDOWN')
- if hasattr(fixturedef, "cached_param"):
- del fixturedef.cached_param
-
-
-def _show_fixture_action(fixturedef, msg):
- config = fixturedef._fixturemanager.config
- capman = config.pluginmanager.getplugin('capturemanager')
- if capman:
- out, err = capman.suspendcapture()
-
- tw = config.get_terminal_writer()
- tw.line()
- tw.write(' ' * 2 * fixturedef.scopenum)
- tw.write('{step} {scope} {fixture}'.format(
- step=msg.ljust(8), # align the output to TEARDOWN
- scope=fixturedef.scope[0].upper(),
- fixture=fixturedef.argname))
-
- if msg == 'SETUP':
- deps = sorted(arg for arg in fixturedef.argnames if arg != 'request')
- if deps:
- tw.write(' (fixtures used: {0})'.format(', '.join(deps)))
-
- if hasattr(fixturedef, 'cached_param'):
- tw.write('[{0}]'.format(fixturedef.cached_param))
-
- if capman:
- capman.resumecapture()
- sys.stdout.write(out)
- sys.stderr.write(err)
-
-
-@pytest.hookimpl(tryfirst=True)
-def pytest_cmdline_main(config):
- if config.option.setuponly:
- config.option.setupshow = True
diff --git a/lib/spack/external/pytest-fallback/_pytest/setupplan.py b/lib/spack/external/pytest-fallback/_pytest/setupplan.py
deleted file mode 100644
index e11bd40698..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/setupplan.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from __future__ import absolute_import, division, print_function
-
-import pytest
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("debugconfig")
- group.addoption('--setupplan', '--setup-plan', action="store_true",
- help="show what fixtures and tests would be executed but "
- "don't execute anything.")
-
-
-@pytest.hookimpl(tryfirst=True)
-def pytest_fixture_setup(fixturedef, request):
- # Will return a dummy fixture if the setuponly option is provided.
- if request.config.option.setupplan:
- fixturedef.cached_result = (None, None, None)
- return fixturedef.cached_result
-
-
-@pytest.hookimpl(tryfirst=True)
-def pytest_cmdline_main(config):
- if config.option.setupplan:
- config.option.setuponly = True
- config.option.setupshow = True
diff --git a/lib/spack/external/pytest-fallback/_pytest/skipping.py b/lib/spack/external/pytest-fallback/_pytest/skipping.py
deleted file mode 100644
index b92800d10b..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/skipping.py
+++ /dev/null
@@ -1,372 +0,0 @@
-""" support for skip/xfail functions and markers. """
-from __future__ import absolute_import, division, print_function
-
-import os
-import sys
-import traceback
-
-import py
-from _pytest.config import hookimpl
-from _pytest.mark import MarkInfo, MarkDecorator
-from _pytest.outcomes import fail, skip, xfail, TEST_OUTCOME
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("general")
- group.addoption('--runxfail',
- action="store_true", dest="runxfail", default=False,
- help="run tests even if they are marked xfail")
-
- parser.addini("xfail_strict", "default for the strict parameter of xfail "
- "markers when not given explicitly (default: "
- "False)",
- default=False,
- type="bool")
-
-
-def pytest_configure(config):
- if config.option.runxfail:
- # yay a hack
- import pytest
- old = pytest.xfail
- config._cleanup.append(lambda: setattr(pytest, "xfail", old))
-
- def nop(*args, **kwargs):
- pass
-
- nop.Exception = xfail.Exception
- setattr(pytest, "xfail", nop)
-
- config.addinivalue_line("markers",
- "skip(reason=None): skip the given test function with an optional reason. "
- "Example: skip(reason=\"no way of currently testing this\") skips the "
- "test."
- )
- config.addinivalue_line("markers",
- "skipif(condition): skip the given test function if eval(condition) "
- "results in a True value. Evaluation happens within the "
- "module global context. Example: skipif('sys.platform == \"win32\"') "
- "skips the test if we are on the win32 platform. see "
- "http://pytest.org/latest/skipping.html"
- )
- config.addinivalue_line("markers",
- "xfail(condition, reason=None, run=True, raises=None, strict=False): "
- "mark the test function as an expected failure if eval(condition) "
- "has a True value. Optionally specify a reason for better reporting "
- "and run=False if you don't even want to execute the test function. "
- "If only specific exception(s) are expected, you can list them in "
- "raises, and if the test fails in other ways, it will be reported as "
- "a true failure. See http://pytest.org/latest/skipping.html"
- )
-
-
-class MarkEvaluator:
- def __init__(self, item, name):
- self.item = item
- self.name = name
-
- @property
- def holder(self):
- return self.item.keywords.get(self.name)
-
- def __bool__(self):
- return bool(self.holder)
- __nonzero__ = __bool__
-
- def wasvalid(self):
- return not hasattr(self, 'exc')
-
- def invalidraise(self, exc):
- raises = self.get('raises')
- if not raises:
- return
- return not isinstance(exc, raises)
-
- def istrue(self):
- try:
- return self._istrue()
- except TEST_OUTCOME:
- self.exc = sys.exc_info()
- if isinstance(self.exc[1], SyntaxError):
- msg = [" " * (self.exc[1].offset + 4) + "^", ]
- msg.append("SyntaxError: invalid syntax")
- else:
- msg = traceback.format_exception_only(*self.exc[:2])
- fail("Error evaluating %r expression\n"
- " %s\n"
- "%s"
- % (self.name, self.expr, "\n".join(msg)),
- pytrace=False)
-
- def _getglobals(self):
- d = {'os': os, 'sys': sys, 'config': self.item.config}
- if hasattr(self.item, 'obj'):
- d.update(self.item.obj.__globals__)
- return d
-
- def _istrue(self):
- if hasattr(self, 'result'):
- return self.result
- if self.holder:
- if self.holder.args or 'condition' in self.holder.kwargs:
- self.result = False
- # "holder" might be a MarkInfo or a MarkDecorator; only
- # MarkInfo keeps track of all parameters it received in an
- # _arglist attribute
- marks = getattr(self.holder, '_marks', None) \
- or [self.holder.mark]
- for _, args, kwargs in marks:
- if 'condition' in kwargs:
- args = (kwargs['condition'],)
- for expr in args:
- self.expr = expr
- if isinstance(expr, py.builtin._basestring):
- d = self._getglobals()
- result = cached_eval(self.item.config, expr, d)
- else:
- if "reason" not in kwargs:
- # XXX better be checked at collection time
- msg = "you need to specify reason=STRING " \
- "when using booleans as conditions."
- fail(msg)
- result = bool(expr)
- if result:
- self.result = True
- self.reason = kwargs.get('reason', None)
- self.expr = expr
- return self.result
- else:
- self.result = True
- return getattr(self, 'result', False)
-
- def get(self, attr, default=None):
- return self.holder.kwargs.get(attr, default)
-
- def getexplanation(self):
- expl = getattr(self, 'reason', None) or self.get('reason', None)
- if not expl:
- if not hasattr(self, 'expr'):
- return ""
- else:
- return "condition: " + str(self.expr)
- return expl
-
-
-@hookimpl(tryfirst=True)
-def pytest_runtest_setup(item):
- # Check if skip or skipif are specified as pytest marks
-
- skipif_info = item.keywords.get('skipif')
- if isinstance(skipif_info, (MarkInfo, MarkDecorator)):
- eval_skipif = MarkEvaluator(item, 'skipif')
- if eval_skipif.istrue():
- item._evalskip = eval_skipif
- skip(eval_skipif.getexplanation())
-
- skip_info = item.keywords.get('skip')
- if isinstance(skip_info, (MarkInfo, MarkDecorator)):
- item._evalskip = True
- if 'reason' in skip_info.kwargs:
- skip(skip_info.kwargs['reason'])
- elif skip_info.args:
- skip(skip_info.args[0])
- else:
- skip("unconditional skip")
-
- item._evalxfail = MarkEvaluator(item, 'xfail')
- check_xfail_no_run(item)
-
-
-@hookimpl(hookwrapper=True)
-def pytest_pyfunc_call(pyfuncitem):
- check_xfail_no_run(pyfuncitem)
- outcome = yield
- passed = outcome.excinfo is None
- if passed:
- check_strict_xfail(pyfuncitem)
-
-
-def check_xfail_no_run(item):
- """check xfail(run=False)"""
- if not item.config.option.runxfail:
- evalxfail = item._evalxfail
- if evalxfail.istrue():
- if not evalxfail.get('run', True):
- xfail("[NOTRUN] " + evalxfail.getexplanation())
-
-
-def check_strict_xfail(pyfuncitem):
- """check xfail(strict=True) for the given PASSING test"""
- evalxfail = pyfuncitem._evalxfail
- if evalxfail.istrue():
- strict_default = pyfuncitem.config.getini('xfail_strict')
- is_strict_xfail = evalxfail.get('strict', strict_default)
- if is_strict_xfail:
- del pyfuncitem._evalxfail
- explanation = evalxfail.getexplanation()
- fail('[XPASS(strict)] ' + explanation, pytrace=False)
-
-
-@hookimpl(hookwrapper=True)
-def pytest_runtest_makereport(item, call):
- outcome = yield
- rep = outcome.get_result()
- evalxfail = getattr(item, '_evalxfail', None)
- evalskip = getattr(item, '_evalskip', None)
- # unitttest special case, see setting of _unexpectedsuccess
- if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
- from _pytest.compat import _is_unittest_unexpected_success_a_failure
- if item._unexpectedsuccess:
- rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess)
- else:
- rep.longrepr = "Unexpected success"
- if _is_unittest_unexpected_success_a_failure():
- rep.outcome = "failed"
- else:
- rep.outcome = "passed"
- rep.wasxfail = rep.longrepr
- elif item.config.option.runxfail:
- pass # don't interefere
- elif call.excinfo and call.excinfo.errisinstance(xfail.Exception):
- rep.wasxfail = "reason: " + call.excinfo.value.msg
- rep.outcome = "skipped"
- elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \
- evalxfail.istrue():
- if call.excinfo:
- if evalxfail.invalidraise(call.excinfo.value):
- rep.outcome = "failed"
- else:
- rep.outcome = "skipped"
- rep.wasxfail = evalxfail.getexplanation()
- elif call.when == "call":
- strict_default = item.config.getini('xfail_strict')
- is_strict_xfail = evalxfail.get('strict', strict_default)
- explanation = evalxfail.getexplanation()
- if is_strict_xfail:
- rep.outcome = "failed"
- rep.longrepr = "[XPASS(strict)] {0}".format(explanation)
- else:
- rep.outcome = "passed"
- rep.wasxfail = explanation
- elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
- # skipped by mark.skipif; change the location of the failure
- # to point to the item definition, otherwise it will display
- # the location of where the skip exception was raised within pytest
- filename, line, reason = rep.longrepr
- filename, line = item.location[:2]
- rep.longrepr = filename, line, reason
-
-# called by terminalreporter progress reporting
-
-
-def pytest_report_teststatus(report):
- if hasattr(report, "wasxfail"):
- if report.skipped:
- return "xfailed", "x", "xfail"
- elif report.passed:
- return "xpassed", "X", ("XPASS", {'yellow': True})
-
-# called by the terminalreporter instance/plugin
-
-
-def pytest_terminal_summary(terminalreporter):
- tr = terminalreporter
- if not tr.reportchars:
- # for name in "xfailed skipped failed xpassed":
- # if not tr.stats.get(name, 0):
- # tr.write_line("HINT: use '-r' option to see extra "
- # "summary info about tests")
- # break
- return
-
- lines = []
- for char in tr.reportchars:
- if char == "x":
- show_xfailed(terminalreporter, lines)
- elif char == "X":
- show_xpassed(terminalreporter, lines)
- elif char in "fF":
- show_simple(terminalreporter, lines, 'failed', "FAIL %s")
- elif char in "sS":
- show_skipped(terminalreporter, lines)
- elif char == "E":
- show_simple(terminalreporter, lines, 'error', "ERROR %s")
- elif char == 'p':
- show_simple(terminalreporter, lines, 'passed', "PASSED %s")
-
- if lines:
- tr._tw.sep("=", "short test summary info")
- for line in lines:
- tr._tw.line(line)
-
-
-def show_simple(terminalreporter, lines, stat, format):
- failed = terminalreporter.stats.get(stat)
- if failed:
- for rep in failed:
- pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
- lines.append(format % (pos,))
-
-
-def show_xfailed(terminalreporter, lines):
- xfailed = terminalreporter.stats.get("xfailed")
- if xfailed:
- for rep in xfailed:
- pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
- reason = rep.wasxfail
- lines.append("XFAIL %s" % (pos,))
- if reason:
- lines.append(" " + str(reason))
-
-
-def show_xpassed(terminalreporter, lines):
- xpassed = terminalreporter.stats.get("xpassed")
- if xpassed:
- for rep in xpassed:
- pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
- reason = rep.wasxfail
- lines.append("XPASS %s %s" % (pos, reason))
-
-
-def cached_eval(config, expr, d):
- if not hasattr(config, '_evalcache'):
- config._evalcache = {}
- try:
- return config._evalcache[expr]
- except KeyError:
- import _pytest._code
- exprcode = _pytest._code.compile(expr, mode="eval")
- config._evalcache[expr] = x = eval(exprcode, d)
- return x
-
-
-def folded_skips(skipped):
- d = {}
- for event in skipped:
- key = event.longrepr
- assert len(key) == 3, (event, key)
- d.setdefault(key, []).append(event)
- values = []
- for key, events in d.items():
- values.append((len(events),) + key)
- return values
-
-
-def show_skipped(terminalreporter, lines):
- tr = terminalreporter
- skipped = tr.stats.get('skipped', [])
- if skipped:
- # if not tr.hasopt('skipped'):
- # tr.write_line(
- # "%d skipped tests, specify -rs for more info" %
- # len(skipped))
- # return
- fskips = folded_skips(skipped)
- if fskips:
- # tr.write_sep("_", "skipped test summary")
- for num, fspath, lineno, reason in fskips:
- if reason.startswith("Skipped: "):
- reason = reason[9:]
- lines.append(
- "SKIP [%d] %s:%d: %s" %
- (num, fspath, lineno + 1, reason))
diff --git a/lib/spack/external/pytest-fallback/_pytest/terminal.py b/lib/spack/external/pytest-fallback/_pytest/terminal.py
deleted file mode 100644
index 9da94d0c91..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/terminal.py
+++ /dev/null
@@ -1,650 +0,0 @@
-""" terminal reporting of the full testing process.
-
-This is a good source for looking at the various reporting hooks.
-"""
-from __future__ import absolute_import, division, print_function
-
-import itertools
-from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \
- EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED
-import pytest
-import py
-import sys
-import time
-import platform
-
-from _pytest import nodes
-import _pytest._pluggy as pluggy
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("terminal reporting", "reporting", after="general")
- group._addoption('-v', '--verbose', action="count",
- dest="verbose", default=0, help="increase verbosity."),
- group._addoption('-q', '--quiet', action="count",
- dest="quiet", default=0, help="decrease verbosity."),
- group._addoption('-r',
- action="store", dest="reportchars", default='', metavar="chars",
- help="show extra test summary info as specified by chars (f)ailed, "
- "(E)error, (s)skipped, (x)failed, (X)passed, "
- "(p)passed, (P)passed with output, (a)all except pP. "
- "Warnings are displayed at all times except when "
- "--disable-warnings is set")
- group._addoption('--disable-warnings', '--disable-pytest-warnings', default=False,
- dest='disable_warnings', action='store_true',
- help='disable warnings summary')
- group._addoption('-l', '--showlocals',
- action="store_true", dest="showlocals", default=False,
- help="show locals in tracebacks (disabled by default).")
- group._addoption('--tb', metavar="style",
- action="store", dest="tbstyle", default='auto',
- choices=['auto', 'long', 'short', 'no', 'line', 'native'],
- help="traceback print mode (auto/long/short/line/native/no).")
- group._addoption('--fulltrace', '--full-trace',
- action="store_true", default=False,
- help="don't cut any tracebacks (default is to cut).")
- group._addoption('--color', metavar="color",
- action="store", dest="color", default='auto',
- choices=['yes', 'no', 'auto'],
- help="color terminal output (yes/no/auto).")
-
-
-def pytest_configure(config):
- config.option.verbose -= config.option.quiet
- reporter = TerminalReporter(config, sys.stdout)
- config.pluginmanager.register(reporter, 'terminalreporter')
- if config.option.debug or config.option.traceconfig:
- def mywriter(tags, args):
- msg = " ".join(map(str, args))
- reporter.write_line("[traceconfig] " + msg)
- config.trace.root.setprocessor("pytest:config", mywriter)
-
-
-def getreportopt(config):
- reportopts = ""
- reportchars = config.option.reportchars
- if not config.option.disable_warnings and 'w' not in reportchars:
- reportchars += 'w'
- elif config.option.disable_warnings and 'w' in reportchars:
- reportchars = reportchars.replace('w', '')
- if reportchars:
- for char in reportchars:
- if char not in reportopts and char != 'a':
- reportopts += char
- elif char == 'a':
- reportopts = 'fEsxXw'
- return reportopts
-
-
-def pytest_report_teststatus(report):
- if report.passed:
- letter = "."
- elif report.skipped:
- letter = "s"
- elif report.failed:
- letter = "F"
- if report.when != "call":
- letter = "f"
- return report.outcome, letter, report.outcome.upper()
-
-
-class WarningReport:
- """
- Simple structure to hold warnings information captured by ``pytest_logwarning``.
- """
-
- def __init__(self, code, message, nodeid=None, fslocation=None):
- """
- :param code: unused
- :param str message: user friendly message about the warning
- :param str|None nodeid: node id that generated the warning (see ``get_location``).
- :param tuple|py.path.local fslocation:
- file system location of the source of the warning (see ``get_location``).
- """
- self.code = code
- self.message = message
- self.nodeid = nodeid
- self.fslocation = fslocation
-
- def get_location(self, config):
- """
- Returns the more user-friendly information about the location
- of a warning, or None.
- """
- if self.nodeid:
- return self.nodeid
- if self.fslocation:
- if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2:
- filename, linenum = self.fslocation[:2]
- relpath = py.path.local(filename).relto(config.invocation_dir)
- return '%s:%s' % (relpath, linenum)
- else:
- return str(self.fslocation)
- return None
-
-
-class TerminalReporter:
- def __init__(self, config, file=None):
- import _pytest.config
- self.config = config
- self.verbosity = self.config.option.verbose
- self.showheader = self.verbosity >= 0
- self.showfspath = self.verbosity >= 0
- self.showlongtestinfo = self.verbosity > 0
- self._numcollected = 0
-
- self.stats = {}
- self.startdir = py.path.local()
- if file is None:
- file = sys.stdout
- self._tw = self.writer = _pytest.config.create_terminal_writer(config,
- file)
- self.currentfspath = None
- self.reportchars = getreportopt(config)
- self.hasmarkup = self._tw.hasmarkup
- self.isatty = file.isatty()
-
- def hasopt(self, char):
- char = {'xfailed': 'x', 'skipped': 's'}.get(char, char)
- return char in self.reportchars
-
- def write_fspath_result(self, nodeid, res):
- fspath = self.config.rootdir.join(nodeid.split("::")[0])
- if fspath != self.currentfspath:
- self.currentfspath = fspath
- fspath = self.startdir.bestrelpath(fspath)
- self._tw.line()
- self._tw.write(fspath + " ")
- self._tw.write(res)
-
- def write_ensure_prefix(self, prefix, extra="", **kwargs):
- if self.currentfspath != prefix:
- self._tw.line()
- self.currentfspath = prefix
- self._tw.write(prefix)
- if extra:
- self._tw.write(extra, **kwargs)
- self.currentfspath = -2
-
- def ensure_newline(self):
- if self.currentfspath:
- self._tw.line()
- self.currentfspath = None
-
- def write(self, content, **markup):
- self._tw.write(content, **markup)
-
- def write_line(self, line, **markup):
- if not py.builtin._istext(line):
- line = py.builtin.text(line, errors="replace")
- self.ensure_newline()
- self._tw.line(line, **markup)
-
- def rewrite(self, line, **markup):
- """
- Rewinds the terminal cursor to the beginning and writes the given line.
-
- :kwarg erase: if True, will also add spaces until the full terminal width to ensure
- previous lines are properly erased.
-
- The rest of the keyword arguments are markup instructions.
- """
- erase = markup.pop('erase', False)
- if erase:
- fill_count = self._tw.fullwidth - len(line)
- fill = ' ' * fill_count
- else:
- fill = ''
- line = str(line)
- self._tw.write("\r" + line + fill, **markup)
-
- def write_sep(self, sep, title=None, **markup):
- self.ensure_newline()
- self._tw.sep(sep, title, **markup)
-
- def section(self, title, sep="=", **kw):
- self._tw.sep(sep, title, **kw)
-
- def line(self, msg, **kw):
- self._tw.line(msg, **kw)
-
- def pytest_internalerror(self, excrepr):
- for line in py.builtin.text(excrepr).split("\n"):
- self.write_line("INTERNALERROR> " + line)
- return 1
-
- def pytest_logwarning(self, code, fslocation, message, nodeid):
- warnings = self.stats.setdefault("warnings", [])
- warning = WarningReport(code=code, fslocation=fslocation,
- message=message, nodeid=nodeid)
- warnings.append(warning)
-
- def pytest_plugin_registered(self, plugin):
- if self.config.option.traceconfig:
- msg = "PLUGIN registered: %s" % (plugin,)
- # XXX this event may happen during setup/teardown time
- # which unfortunately captures our output here
- # which garbles our output if we use self.write_line
- self.write_line(msg)
-
- def pytest_deselected(self, items):
- self.stats.setdefault('deselected', []).extend(items)
-
- def pytest_runtest_logstart(self, nodeid, location):
- # ensure that the path is printed before the
- # 1st test of a module starts running
- if self.showlongtestinfo:
- line = self._locationline(nodeid, *location)
- self.write_ensure_prefix(line, "")
- elif self.showfspath:
- fsid = nodeid.split("::")[0]
- self.write_fspath_result(fsid, "")
-
- def pytest_runtest_logreport(self, report):
- rep = report
- res = self.config.hook.pytest_report_teststatus(report=rep)
- cat, letter, word = res
- self.stats.setdefault(cat, []).append(rep)
- self._tests_ran = True
- if not letter and not word:
- # probably passed setup/teardown
- return
- if self.verbosity <= 0:
- if not hasattr(rep, 'node') and self.showfspath:
- self.write_fspath_result(rep.nodeid, letter)
- else:
- self._tw.write(letter)
- else:
- if isinstance(word, tuple):
- word, markup = word
- else:
- if rep.passed:
- markup = {'green': True}
- elif rep.failed:
- markup = {'red': True}
- elif rep.skipped:
- markup = {'yellow': True}
- line = self._locationline(rep.nodeid, *rep.location)
- if not hasattr(rep, 'node'):
- self.write_ensure_prefix(line, word, **markup)
- # self._tw.write(word, **markup)
- else:
- self.ensure_newline()
- if hasattr(rep, 'node'):
- self._tw.write("[%s] " % rep.node.gateway.id)
- self._tw.write(word, **markup)
- self._tw.write(" " + line)
- self.currentfspath = -2
-
- def pytest_collection(self):
- if not self.isatty and self.config.option.verbose >= 1:
- self.write("collecting ... ", bold=True)
-
- def pytest_collectreport(self, report):
- if report.failed:
- self.stats.setdefault("error", []).append(report)
- elif report.skipped:
- self.stats.setdefault("skipped", []).append(report)
- items = [x for x in report.result if isinstance(x, pytest.Item)]
- self._numcollected += len(items)
- if self.isatty:
- # self.write_fspath_result(report.nodeid, 'E')
- self.report_collect()
-
- def report_collect(self, final=False):
- if self.config.option.verbose < 0:
- return
-
- errors = len(self.stats.get('error', []))
- skipped = len(self.stats.get('skipped', []))
- if final:
- line = "collected "
- else:
- line = "collecting "
- line += str(self._numcollected) + " item" + ('' if self._numcollected == 1 else 's')
- if errors:
- line += " / %d errors" % errors
- if skipped:
- line += " / %d skipped" % skipped
- if self.isatty:
- self.rewrite(line, bold=True, erase=True)
- if final:
- self.write('\n')
- else:
- self.write_line(line)
-
- def pytest_collection_modifyitems(self):
- self.report_collect(True)
-
- @pytest.hookimpl(trylast=True)
- def pytest_sessionstart(self, session):
- self._sessionstarttime = time.time()
- if not self.showheader:
- return
- self.write_sep("=", "test session starts", bold=True)
- verinfo = platform.python_version()
- msg = "platform %s -- Python %s" % (sys.platform, verinfo)
- if hasattr(sys, 'pypy_version_info'):
- verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
- msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
- msg += ", pytest-%s, py-%s, pluggy-%s" % (
- pytest.__version__, py.__version__, pluggy.__version__)
- if self.verbosity > 0 or self.config.option.debug or \
- getattr(self.config.option, 'pastebin', None):
- msg += " -- " + str(sys.executable)
- self.write_line(msg)
- lines = self.config.hook.pytest_report_header(
- config=self.config, startdir=self.startdir)
- self._write_report_lines_from_hooks(lines)
-
- def _write_report_lines_from_hooks(self, lines):
- lines.reverse()
- for line in flatten(lines):
- self.write_line(line)
-
- def pytest_report_header(self, config):
- inifile = ""
- if config.inifile:
- inifile = " " + config.rootdir.bestrelpath(config.inifile)
- lines = ["rootdir: %s, inifile:%s" % (config.rootdir, inifile)]
-
- plugininfo = config.pluginmanager.list_plugin_distinfo()
- if plugininfo:
-
- lines.append(
- "plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
- return lines
-
- def pytest_collection_finish(self, session):
- if self.config.option.collectonly:
- self._printcollecteditems(session.items)
- if self.stats.get('failed'):
- self._tw.sep("!", "collection failures")
- for rep in self.stats.get('failed'):
- rep.toterminal(self._tw)
- return 1
- return 0
- lines = self.config.hook.pytest_report_collectionfinish(
- config=self.config, startdir=self.startdir, items=session.items)
- self._write_report_lines_from_hooks(lines)
-
- def _printcollecteditems(self, items):
- # to print out items and their parent collectors
- # we take care to leave out Instances aka ()
- # because later versions are going to get rid of them anyway
- if self.config.option.verbose < 0:
- if self.config.option.verbose < -1:
- counts = {}
- for item in items:
- name = item.nodeid.split('::', 1)[0]
- counts[name] = counts.get(name, 0) + 1
- for name, count in sorted(counts.items()):
- self._tw.line("%s: %d" % (name, count))
- else:
- for item in items:
- nodeid = item.nodeid
- nodeid = nodeid.replace("::()::", "::")
- self._tw.line(nodeid)
- return
- stack = []
- indent = ""
- for item in items:
- needed_collectors = item.listchain()[1:] # strip root node
- while stack:
- if stack == needed_collectors[:len(stack)]:
- break
- stack.pop()
- for col in needed_collectors[len(stack):]:
- stack.append(col)
- # if col.name == "()":
- # continue
- indent = (len(stack) - 1) * " "
- self._tw.line("%s%s" % (indent, col))
-
- @pytest.hookimpl(hookwrapper=True)
- def pytest_sessionfinish(self, exitstatus):
- outcome = yield
- outcome.get_result()
- self._tw.line("")
- summary_exit_codes = (
- EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR,
- EXIT_NOTESTSCOLLECTED)
- if exitstatus in summary_exit_codes:
- self.config.hook.pytest_terminal_summary(terminalreporter=self,
- exitstatus=exitstatus)
- self.summary_errors()
- self.summary_failures()
- self.summary_warnings()
- self.summary_passes()
- if exitstatus == EXIT_INTERRUPTED:
- self._report_keyboardinterrupt()
- del self._keyboardinterrupt_memo
- self.summary_deselected()
- self.summary_stats()
-
- def pytest_keyboard_interrupt(self, excinfo):
- self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
-
- def pytest_unconfigure(self):
- if hasattr(self, '_keyboardinterrupt_memo'):
- self._report_keyboardinterrupt()
-
- def _report_keyboardinterrupt(self):
- excrepr = self._keyboardinterrupt_memo
- msg = excrepr.reprcrash.message
- self.write_sep("!", msg)
- if "KeyboardInterrupt" in msg:
- if self.config.option.fulltrace:
- excrepr.toterminal(self._tw)
- else:
- self._tw.line("to show a full traceback on KeyboardInterrupt use --fulltrace", yellow=True)
- excrepr.reprcrash.toterminal(self._tw)
-
- def _locationline(self, nodeid, fspath, lineno, domain):
- def mkrel(nodeid):
- line = self.config.cwd_relative_nodeid(nodeid)
- if domain and line.endswith(domain):
- line = line[:-len(domain)]
- values = domain.split("[")
- values[0] = values[0].replace('.', '::') # don't replace '.' in params
- line += "[".join(values)
- return line
- # collect_fspath comes from testid which has a "/"-normalized path
-
- if fspath:
- res = mkrel(nodeid).replace("::()", "") # parens-normalization
- if nodeid.split("::")[0] != fspath.replace("\\", nodes.SEP):
- res += " <- " + self.startdir.bestrelpath(fspath)
- else:
- res = "[location]"
- return res + " "
-
- def _getfailureheadline(self, rep):
- if hasattr(rep, 'location'):
- fspath, lineno, domain = rep.location
- return domain
- else:
- return "test session" # XXX?
-
- def _getcrashline(self, rep):
- try:
- return str(rep.longrepr.reprcrash)
- except AttributeError:
- try:
- return str(rep.longrepr)[:50]
- except AttributeError:
- return ""
-
- #
- # summaries for sessionfinish
- #
- def getreports(self, name):
- values = []
- for x in self.stats.get(name, []):
- if not hasattr(x, '_pdbshown'):
- values.append(x)
- return values
-
- def summary_warnings(self):
- if self.hasopt("w"):
- all_warnings = self.stats.get("warnings")
- if not all_warnings:
- return
-
- grouped = itertools.groupby(all_warnings, key=lambda wr: wr.get_location(self.config))
-
- self.write_sep("=", "warnings summary", yellow=True, bold=False)
- for location, warnings in grouped:
- self._tw.line(str(location) or '<undetermined location>')
- for w in warnings:
- lines = w.message.splitlines()
- indented = '\n'.join(' ' + x for x in lines)
- self._tw.line(indented)
- self._tw.line()
- self._tw.line('-- Docs: http://doc.pytest.org/en/latest/warnings.html')
-
- def summary_passes(self):
- if self.config.option.tbstyle != "no":
- if self.hasopt("P"):
- reports = self.getreports('passed')
- if not reports:
- return
- self.write_sep("=", "PASSES")
- for rep in reports:
- msg = self._getfailureheadline(rep)
- self.write_sep("_", msg)
- self._outrep_summary(rep)
-
- def print_teardown_sections(self, rep):
- for secname, content in rep.sections:
- if 'teardown' in secname:
- self._tw.sep('-', secname)
- if content[-1:] == "\n":
- content = content[:-1]
- self._tw.line(content)
-
- def summary_failures(self):
- if self.config.option.tbstyle != "no":
- reports = self.getreports('failed')
- if not reports:
- return
- self.write_sep("=", "FAILURES")
- for rep in reports:
- if self.config.option.tbstyle == "line":
- line = self._getcrashline(rep)
- self.write_line(line)
- else:
- msg = self._getfailureheadline(rep)
- markup = {'red': True, 'bold': True}
- self.write_sep("_", msg, **markup)
- self._outrep_summary(rep)
- for report in self.getreports(''):
- if report.nodeid == rep.nodeid and report.when == 'teardown':
- self.print_teardown_sections(report)
-
- def summary_errors(self):
- if self.config.option.tbstyle != "no":
- reports = self.getreports('error')
- if not reports:
- return
- self.write_sep("=", "ERRORS")
- for rep in self.stats['error']:
- msg = self._getfailureheadline(rep)
- if not hasattr(rep, 'when'):
- # collect
- msg = "ERROR collecting " + msg
- elif rep.when == "setup":
- msg = "ERROR at setup of " + msg
- elif rep.when == "teardown":
- msg = "ERROR at teardown of " + msg
- self.write_sep("_", msg)
- self._outrep_summary(rep)
-
- def _outrep_summary(self, rep):
- rep.toterminal(self._tw)
- for secname, content in rep.sections:
- self._tw.sep("-", secname)
- if content[-1:] == "\n":
- content = content[:-1]
- self._tw.line(content)
-
- def summary_stats(self):
- session_duration = time.time() - self._sessionstarttime
- (line, color) = build_summary_stats_line(self.stats)
- msg = "%s in %.2f seconds" % (line, session_duration)
- markup = {color: True, 'bold': True}
-
- if self.verbosity >= 0:
- self.write_sep("=", msg, **markup)
- if self.verbosity == -1:
- self.write_line(msg, **markup)
-
- def summary_deselected(self):
- if 'deselected' in self.stats:
- self.write_sep("=", "%d tests deselected" % (
- len(self.stats['deselected'])), bold=True)
-
-
-def repr_pythonversion(v=None):
- if v is None:
- v = sys.version_info
- try:
- return "%s.%s.%s-%s-%s" % v
- except (TypeError, ValueError):
- return str(v)
-
-
-def flatten(values):
- for x in values:
- if isinstance(x, (list, tuple)):
- for y in flatten(x):
- yield y
- else:
- yield x
-
-
-def build_summary_stats_line(stats):
- keys = ("failed passed skipped deselected "
- "xfailed xpassed warnings error").split()
- unknown_key_seen = False
- for key in stats.keys():
- if key not in keys:
- if key: # setup/teardown reports have an empty key, ignore them
- keys.append(key)
- unknown_key_seen = True
- parts = []
- for key in keys:
- val = stats.get(key, None)
- if val:
- parts.append("%d %s" % (len(val), key))
-
- if parts:
- line = ", ".join(parts)
- else:
- line = "no tests ran"
-
- if 'failed' in stats or 'error' in stats:
- color = 'red'
- elif 'warnings' in stats or unknown_key_seen:
- color = 'yellow'
- elif 'passed' in stats:
- color = 'green'
- else:
- color = 'yellow'
-
- return (line, color)
-
-
-def _plugin_nameversions(plugininfo):
- values = []
- for plugin, dist in plugininfo:
- # gets us name and version!
- name = '{dist.project_name}-{dist.version}'.format(dist=dist)
- # questionable convenience, but it keeps things short
- if name.startswith("pytest-"):
- name = name[7:]
- # we decided to print python package names
- # they can have more than one plugin
- if name not in values:
- values.append(name)
- return values
diff --git a/lib/spack/external/pytest-fallback/_pytest/tmpdir.py b/lib/spack/external/pytest-fallback/_pytest/tmpdir.py
deleted file mode 100644
index da1b032237..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/tmpdir.py
+++ /dev/null
@@ -1,126 +0,0 @@
-""" support for providing temporary directories to test functions. """
-from __future__ import absolute_import, division, print_function
-
-import re
-
-import pytest
-import py
-from _pytest.monkeypatch import MonkeyPatch
-
-
-class TempdirFactory:
- """Factory for temporary directories under the common base temp directory.
-
- The base directory can be configured using the ``--basetemp`` option.
- """
-
- def __init__(self, config):
- self.config = config
- self.trace = config.trace.get("tmpdir")
-
- def ensuretemp(self, string, dir=1):
- """ (deprecated) return temporary directory path with
- the given string as the trailing part. It is usually
- better to use the 'tmpdir' function argument which
- provides an empty unique-per-test-invocation directory
- and is guaranteed to be empty.
- """
- # py.log._apiwarn(">1.1", "use tmpdir function argument")
- return self.getbasetemp().ensure(string, dir=dir)
-
- def mktemp(self, basename, numbered=True):
- """Create a subdirectory of the base temporary directory and return it.
- If ``numbered``, ensure the directory is unique by adding a number
- prefix greater than any existing one.
- """
- basetemp = self.getbasetemp()
- if not numbered:
- p = basetemp.mkdir(basename)
- else:
- p = py.path.local.make_numbered_dir(prefix=basename,
- keep=0, rootdir=basetemp, lock_timeout=None)
- self.trace("mktemp", p)
- return p
-
- def getbasetemp(self):
- """ return base temporary directory. """
- try:
- return self._basetemp
- except AttributeError:
- basetemp = self.config.option.basetemp
- if basetemp:
- basetemp = py.path.local(basetemp)
- if basetemp.check():
- basetemp.remove()
- basetemp.mkdir()
- else:
- temproot = py.path.local.get_temproot()
- user = get_user()
- if user:
- # use a sub-directory in the temproot to speed-up
- # make_numbered_dir() call
- rootdir = temproot.join('pytest-of-%s' % user)
- else:
- rootdir = temproot
- rootdir.ensure(dir=1)
- basetemp = py.path.local.make_numbered_dir(prefix='pytest-',
- rootdir=rootdir)
- self._basetemp = t = basetemp.realpath()
- self.trace("new basetemp", t)
- return t
-
- def finish(self):
- self.trace("finish")
-
-
-def get_user():
- """Return the current user name, or None if getuser() does not work
- in the current environment (see #1010).
- """
- import getpass
- try:
- return getpass.getuser()
- except (ImportError, KeyError):
- return None
-
-
-# backward compatibility
-TempdirHandler = TempdirFactory
-
-
-def pytest_configure(config):
- """Create a TempdirFactory and attach it to the config object.
-
- This is to comply with existing plugins which expect the handler to be
- available at pytest_configure time, but ideally should be moved entirely
- to the tmpdir_factory session fixture.
- """
- mp = MonkeyPatch()
- t = TempdirFactory(config)
- config._cleanup.extend([mp.undo, t.finish])
- mp.setattr(config, '_tmpdirhandler', t, raising=False)
- mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False)
-
-
-@pytest.fixture(scope='session')
-def tmpdir_factory(request):
- """Return a TempdirFactory instance for the test session.
- """
- return request.config._tmpdirhandler
-
-
-@pytest.fixture
-def tmpdir(request, tmpdir_factory):
- """Return a temporary directory path object
- which is unique to each test function invocation,
- created as a sub directory of the base temporary
- directory. The returned object is a `py.path.local`_
- path object.
- """
- name = request.node.name
- name = re.sub(r"[\W]", "_", name)
- MAXVAL = 30
- if len(name) > MAXVAL:
- name = name[:MAXVAL]
- x = tmpdir_factory.mktemp(name, numbered=True)
- return x
diff --git a/lib/spack/external/pytest-fallback/_pytest/unittest.py b/lib/spack/external/pytest-fallback/_pytest/unittest.py
deleted file mode 100644
index 52c9813e8b..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/unittest.py
+++ /dev/null
@@ -1,239 +0,0 @@
-""" discovery and running of std-library "unittest" style tests. """
-from __future__ import absolute_import, division, print_function
-
-import sys
-import traceback
-
-# for transferring markers
-import _pytest._code
-from _pytest.config import hookimpl
-from _pytest.outcomes import fail, skip, xfail
-from _pytest.python import transfer_markers, Class, Module, Function
-from _pytest.skipping import MarkEvaluator
-
-
-def pytest_pycollect_makeitem(collector, name, obj):
- # has unittest been imported and is obj a subclass of its TestCase?
- try:
- if not issubclass(obj, sys.modules["unittest"].TestCase):
- return
- except Exception:
- return
- # yes, so let's collect it
- return UnitTestCase(name, parent=collector)
-
-
-class UnitTestCase(Class):
- # marker for fixturemanger.getfixtureinfo()
- # to declare that our children do not support funcargs
- nofuncargs = True
-
- def setup(self):
- cls = self.obj
- if getattr(cls, '__unittest_skip__', False):
- return # skipped
- setup = getattr(cls, 'setUpClass', None)
- if setup is not None:
- setup()
- teardown = getattr(cls, 'tearDownClass', None)
- if teardown is not None:
- self.addfinalizer(teardown)
- super(UnitTestCase, self).setup()
-
- def collect(self):
- from unittest import TestLoader
- cls = self.obj
- if not getattr(cls, "__test__", True):
- return
- self.session._fixturemanager.parsefactories(self, unittest=True)
- loader = TestLoader()
- module = self.getparent(Module).obj
- foundsomething = False
- for name in loader.getTestCaseNames(self.obj):
- x = getattr(self.obj, name)
- if not getattr(x, '__test__', True):
- continue
- funcobj = getattr(x, 'im_func', x)
- transfer_markers(funcobj, cls, module)
- yield TestCaseFunction(name, parent=self)
- foundsomething = True
-
- if not foundsomething:
- runtest = getattr(self.obj, 'runTest', None)
- if runtest is not None:
- ut = sys.modules.get("twisted.trial.unittest", None)
- if ut is None or runtest != ut.TestCase.runTest:
- yield TestCaseFunction('runTest', parent=self)
-
-
-class TestCaseFunction(Function):
- _excinfo = None
-
- def setup(self):
- self._testcase = self.parent.obj(self.name)
- self._fix_unittest_skip_decorator()
- self._obj = getattr(self._testcase, self.name)
- if hasattr(self._testcase, 'setup_method'):
- self._testcase.setup_method(self._obj)
- if hasattr(self, "_request"):
- self._request._fillfixtures()
-
- def _fix_unittest_skip_decorator(self):
- """
- The @unittest.skip decorator calls functools.wraps(self._testcase)
- The call to functools.wraps() fails unless self._testcase
- has a __name__ attribute. This is usually automatically supplied
- if the test is a function or method, but we need to add manually
- here.
-
- See issue #1169
- """
- if sys.version_info[0] == 2:
- setattr(self._testcase, "__name__", self.name)
-
- def teardown(self):
- if hasattr(self._testcase, 'teardown_method'):
- self._testcase.teardown_method(self._obj)
- # Allow garbage collection on TestCase instance attributes.
- self._testcase = None
- self._obj = None
-
- def startTest(self, testcase):
- pass
-
- def _addexcinfo(self, rawexcinfo):
- # unwrap potential exception info (see twisted trial support below)
- rawexcinfo = getattr(rawexcinfo, '_rawexcinfo', rawexcinfo)
- try:
- excinfo = _pytest._code.ExceptionInfo(rawexcinfo)
- except TypeError:
- try:
- try:
- values = traceback.format_exception(*rawexcinfo)
- values.insert(0, "NOTE: Incompatible Exception Representation, "
- "displaying natively:\n\n")
- fail("".join(values), pytrace=False)
- except (fail.Exception, KeyboardInterrupt):
- raise
- except: # noqa
- fail("ERROR: Unknown Incompatible Exception "
- "representation:\n%r" % (rawexcinfo,), pytrace=False)
- except KeyboardInterrupt:
- raise
- except fail.Exception:
- excinfo = _pytest._code.ExceptionInfo()
- self.__dict__.setdefault('_excinfo', []).append(excinfo)
-
- def addError(self, testcase, rawexcinfo):
- self._addexcinfo(rawexcinfo)
-
- def addFailure(self, testcase, rawexcinfo):
- self._addexcinfo(rawexcinfo)
-
- def addSkip(self, testcase, reason):
- try:
- skip(reason)
- except skip.Exception:
- self._evalskip = MarkEvaluator(self, 'SkipTest')
- self._evalskip.result = True
- self._addexcinfo(sys.exc_info())
-
- def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
- try:
- xfail(str(reason))
- except xfail.Exception:
- self._addexcinfo(sys.exc_info())
-
- def addUnexpectedSuccess(self, testcase, reason=""):
- self._unexpectedsuccess = reason
-
- def addSuccess(self, testcase):
- pass
-
- def stopTest(self, testcase):
- pass
-
- def _handle_skip(self):
- # implements the skipping machinery (see #2137)
- # analog to pythons Lib/unittest/case.py:run
- testMethod = getattr(self._testcase, self._testcase._testMethodName)
- if (getattr(self._testcase.__class__, "__unittest_skip__", False) or
- getattr(testMethod, "__unittest_skip__", False)):
- # If the class or method was skipped.
- skip_why = (getattr(self._testcase.__class__, '__unittest_skip_why__', '') or
- getattr(testMethod, '__unittest_skip_why__', ''))
- try: # PY3, unittest2 on PY2
- self._testcase._addSkip(self, self._testcase, skip_why)
- except TypeError: # PY2
- if sys.version_info[0] != 2:
- raise
- self._testcase._addSkip(self, skip_why)
- return True
- return False
-
- def runtest(self):
- if self.config.pluginmanager.get_plugin("pdbinvoke") is None:
- self._testcase(result=self)
- else:
- # disables tearDown and cleanups for post mortem debugging (see #1890)
- if self._handle_skip():
- return
- self._testcase.debug()
-
- def _prunetraceback(self, excinfo):
- Function._prunetraceback(self, excinfo)
- traceback = excinfo.traceback.filter(
- lambda x: not x.frame.f_globals.get('__unittest'))
- if traceback:
- excinfo.traceback = traceback
-
-
-@hookimpl(tryfirst=True)
-def pytest_runtest_makereport(item, call):
- if isinstance(item, TestCaseFunction):
- if item._excinfo:
- call.excinfo = item._excinfo.pop(0)
- try:
- del call.result
- except AttributeError:
- pass
-
-# twisted trial support
-
-
-@hookimpl(hookwrapper=True)
-def pytest_runtest_protocol(item):
- if isinstance(item, TestCaseFunction) and \
- 'twisted.trial.unittest' in sys.modules:
- ut = sys.modules['twisted.python.failure']
- Failure__init__ = ut.Failure.__init__
- check_testcase_implements_trial_reporter()
-
- def excstore(self, exc_value=None, exc_type=None, exc_tb=None,
- captureVars=None):
- if exc_value is None:
- self._rawexcinfo = sys.exc_info()
- else:
- if exc_type is None:
- exc_type = type(exc_value)
- self._rawexcinfo = (exc_type, exc_value, exc_tb)
- try:
- Failure__init__(self, exc_value, exc_type, exc_tb,
- captureVars=captureVars)
- except TypeError:
- Failure__init__(self, exc_value, exc_type, exc_tb)
-
- ut.Failure.__init__ = excstore
- yield
- ut.Failure.__init__ = Failure__init__
- else:
- yield
-
-
-def check_testcase_implements_trial_reporter(done=[]):
- if done:
- return
- from zope.interface import classImplements
- from twisted.trial.itrial import IReporter
- classImplements(TestCaseFunction, IReporter)
- done.append(1)
diff --git a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/README.md b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/README.md
deleted file mode 100644
index b5fe6febb0..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-This directory vendors the `pluggy` module.
-
-For a more detailed discussion for the reasons to vendoring this
-package, please see [this issue](https://github.com/pytest-dev/pytest/issues/944).
-
-To update the current version, execute:
-
-```
-$ pip install -U pluggy==<version> --no-compile --target=_pytest/vendored_packages
-```
-
-And commit the modified files. The `pluggy-<version>.dist-info` directory
-created by `pip` should be added as well.
diff --git a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/__init__.py b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/__init__.py
+++ /dev/null
diff --git a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst
deleted file mode 100644
index da0e7a6ed7..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-
-Plugin registration and hook calling for Python
-===============================================
-
-This is the plugin manager as used by pytest but stripped
-of pytest specific details.
-
-During the 0.x series this plugin does not have much documentation
-except extensive docstrings in the pluggy.py module.
-
-
diff --git a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER
deleted file mode 100644
index a1b589e38a..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt
deleted file mode 100644
index 121017d086..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 holger krekel (rather uses bitbucket/hpk42)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
diff --git a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA
deleted file mode 100644
index bd88517c94..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA
+++ /dev/null
@@ -1,40 +0,0 @@
-Metadata-Version: 2.0
-Name: pluggy
-Version: 0.4.0
-Summary: plugin and hook calling mechanisms for python
-Home-page: https://github.com/pytest-dev/pluggy
-Author: Holger Krekel
-Author-email: holger at merlinux.eu
-License: MIT license
-Platform: unix
-Platform: linux
-Platform: osx
-Platform: win32
-Classifier: Development Status :: 4 - Beta
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Operating System :: POSIX
-Classifier: Operating System :: Microsoft :: Windows
-Classifier: Operating System :: MacOS :: MacOS X
-Classifier: Topic :: Software Development :: Testing
-Classifier: Topic :: Software Development :: Libraries
-Classifier: Topic :: Utilities
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-
-
-Plugin registration and hook calling for Python
-===============================================
-
-This is the plugin manager as used by pytest but stripped
-of pytest specific details.
-
-During the 0.x series this plugin does not have much documentation
-except extensive docstrings in the pluggy.py module.
-
-
diff --git a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD
deleted file mode 100644
index 3003a3bf2b..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD
+++ /dev/null
@@ -1,9 +0,0 @@
-pluggy.py,sha256=u0oG9cv-oLOkNvEBlwnnu8pp1AyxpoERgUO00S3rvpQ,31543
-pluggy-0.4.0.dist-info/DESCRIPTION.rst,sha256=ltvjkFd40LW_xShthp6RRVM6OB_uACYDFR3kTpKw7o4,307
-pluggy-0.4.0.dist-info/LICENSE.txt,sha256=ruwhUOyV1HgE9F35JVL9BCZ9vMSALx369I4xq9rhpkM,1134
-pluggy-0.4.0.dist-info/METADATA,sha256=pe2hbsqKFaLHC6wAQPpFPn0KlpcPfLBe_BnS4O70bfk,1364
-pluggy-0.4.0.dist-info/RECORD,,
-pluggy-0.4.0.dist-info/WHEEL,sha256=9Z5Xm-eel1bTS7e6ogYiKz0zmPEqDwIypurdHN1hR40,116
-pluggy-0.4.0.dist-info/metadata.json,sha256=T3go5L2qOa_-H-HpCZi3EoVKb8sZ3R-fOssbkWo2nvM,1119
-pluggy-0.4.0.dist-info/top_level.txt,sha256=xKSCRhai-v9MckvMuWqNz16c1tbsmOggoMSwTgcpYHE,7
-pluggy-0.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
diff --git a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL
deleted file mode 100644
index 8b6dd1b5a8..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL
+++ /dev/null
@@ -1,6 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.29.0)
-Root-Is-Purelib: true
-Tag: py2-none-any
-Tag: py3-none-any
-
diff --git a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json
deleted file mode 100644
index cde22aff02..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json
+++ /dev/null
@@ -1 +0,0 @@
-{"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS :: MacOS X", "Topic :: Software Development :: Testing", "Topic :: Software Development :: Libraries", "Topic :: Utilities", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"], "extensions": {"python.details": {"contacts": [{"email": "holger at merlinux.eu", "name": "Holger Krekel", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "https://github.com/pytest-dev/pluggy"}}}, "generator": "bdist_wheel (0.29.0)", "license": "MIT license", "metadata_version": "2.0", "name": "pluggy", "platform": "unix", "summary": "plugin and hook calling mechanisms for python", "version": "0.4.0"} \ No newline at end of file
diff --git a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt
deleted file mode 100644
index 11bdb5c1f5..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-pluggy
diff --git a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy.py b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy.py
deleted file mode 100644
index 6f26552d73..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy.py
+++ /dev/null
@@ -1,782 +0,0 @@
-"""
-PluginManager, basic initialization and tracing.
-
-pluggy is the cristallized core of plugin management as used
-by some 150 plugins for pytest.
-
-Pluggy uses semantic versioning. Breaking changes are only foreseen for
-Major releases (incremented X in "X.Y.Z"). If you want to use pluggy in
-your project you should thus use a dependency restriction like
-"pluggy>=0.1.0,<1.0" to avoid surprises.
-
-pluggy is concerned with hook specification, hook implementations and hook
-calling. For any given hook specification a hook call invokes up to N implementations.
-A hook implementation can influence its position and type of execution:
-if attributed "tryfirst" or "trylast" it will be tried to execute
-first or last. However, if attributed "hookwrapper" an implementation
-can wrap all calls to non-hookwrapper implementations. A hookwrapper
-can thus execute some code ahead and after the execution of other hooks.
-
-Hook specification is done by way of a regular python function where
-both the function name and the names of all its arguments are significant.
-Each hook implementation function is verified against the original specification
-function, including the names of all its arguments. To allow for hook specifications
-to evolve over the livetime of a project, hook implementations can
-accept less arguments. One can thus add new arguments and semantics to
-a hook specification by adding another argument typically without breaking
-existing hook implementations.
-
-The chosen approach is meant to let a hook designer think carefuly about
-which objects are needed by an extension writer. By contrast, subclass-based
-extension mechanisms often expose a lot more state and behaviour than needed,
-thus restricting future developments.
-
-Pluggy currently consists of functionality for:
-
-- a way to register new hook specifications. Without a hook
- specification no hook calling can be performed.
-
-- a registry of plugins which contain hook implementation functions. It
- is possible to register plugins for which a hook specification is not yet
- known and validate all hooks when the system is in a more referentially
- consistent state. Setting an "optionalhook" attribution to a hook
- implementation will avoid PluginValidationError's if a specification
- is missing. This allows to have optional integration between plugins.
-
-- a "hook" relay object from which you can launch 1:N calls to
- registered hook implementation functions
-
-- a mechanism for ordering hook implementation functions
-
-- mechanisms for two different type of 1:N calls: "firstresult" for when
- the call should stop when the first implementation returns a non-None result.
- And the other (default) way of guaranteeing that all hook implementations
- will be called and their non-None result collected.
-
-- mechanisms for "historic" extension points such that all newly
- registered functions will receive all hook calls that happened
- before their registration.
-
-- a mechanism for discovering plugin objects which are based on
- setuptools based entry points.
-
-- a simple tracing mechanism, including tracing of plugin calls and
- their arguments.
-
-"""
-import sys
-import inspect
-
-__version__ = '0.4.0'
-
-__all__ = ["PluginManager", "PluginValidationError", "HookCallError",
- "HookspecMarker", "HookimplMarker"]
-
-_py3 = sys.version_info > (3, 0)
-
-
-class HookspecMarker:
- """ Decorator helper class for marking functions as hook specifications.
-
- You can instantiate it with a project_name to get a decorator.
- Calling PluginManager.add_hookspecs later will discover all marked functions
- if the PluginManager uses the same project_name.
- """
-
- def __init__(self, project_name):
- self.project_name = project_name
-
- def __call__(self, function=None, firstresult=False, historic=False):
- """ if passed a function, directly sets attributes on the function
- which will make it discoverable to add_hookspecs(). If passed no
- function, returns a decorator which can be applied to a function
- later using the attributes supplied.
-
- If firstresult is True the 1:N hook call (N being the number of registered
- hook implementation functions) will stop at I<=N when the I'th function
- returns a non-None result.
-
- If historic is True calls to a hook will be memorized and replayed
- on later registered plugins.
-
- """
- def setattr_hookspec_opts(func):
- if historic and firstresult:
- raise ValueError("cannot have a historic firstresult hook")
- setattr(func, self.project_name + "_spec",
- dict(firstresult=firstresult, historic=historic))
- return func
-
- if function is not None:
- return setattr_hookspec_opts(function)
- else:
- return setattr_hookspec_opts
-
-
-class HookimplMarker:
- """ Decorator helper class for marking functions as hook implementations.
-
- You can instantiate with a project_name to get a decorator.
- Calling PluginManager.register later will discover all marked functions
- if the PluginManager uses the same project_name.
- """
- def __init__(self, project_name):
- self.project_name = project_name
-
- def __call__(self, function=None, hookwrapper=False, optionalhook=False,
- tryfirst=False, trylast=False):
-
- """ if passed a function, directly sets attributes on the function
- which will make it discoverable to register(). If passed no function,
- returns a decorator which can be applied to a function later using
- the attributes supplied.
-
- If optionalhook is True a missing matching hook specification will not result
- in an error (by default it is an error if no matching spec is found).
-
- If tryfirst is True this hook implementation will run as early as possible
- in the chain of N hook implementations for a specfication.
-
- If trylast is True this hook implementation will run as late as possible
- in the chain of N hook implementations.
-
- If hookwrapper is True the hook implementations needs to execute exactly
- one "yield". The code before the yield is run early before any non-hookwrapper
- function is run. The code after the yield is run after all non-hookwrapper
- function have run. The yield receives an ``_CallOutcome`` object representing
- the exception or result outcome of the inner calls (including other hookwrapper
- calls).
-
- """
- def setattr_hookimpl_opts(func):
- setattr(func, self.project_name + "_impl",
- dict(hookwrapper=hookwrapper, optionalhook=optionalhook,
- tryfirst=tryfirst, trylast=trylast))
- return func
-
- if function is None:
- return setattr_hookimpl_opts
- else:
- return setattr_hookimpl_opts(function)
-
-
-def normalize_hookimpl_opts(opts):
- opts.setdefault("tryfirst", False)
- opts.setdefault("trylast", False)
- opts.setdefault("hookwrapper", False)
- opts.setdefault("optionalhook", False)
-
-
-class _TagTracer:
- def __init__(self):
- self._tag2proc = {}
- self.writer = None
- self.indent = 0
-
- def get(self, name):
- return _TagTracerSub(self, (name,))
-
- def format_message(self, tags, args):
- if isinstance(args[-1], dict):
- extra = args[-1]
- args = args[:-1]
- else:
- extra = {}
-
- content = " ".join(map(str, args))
- indent = " " * self.indent
-
- lines = [
- "%s%s [%s]\n" % (indent, content, ":".join(tags))
- ]
-
- for name, value in extra.items():
- lines.append("%s %s: %s\n" % (indent, name, value))
- return lines
-
- def processmessage(self, tags, args):
- if self.writer is not None and args:
- lines = self.format_message(tags, args)
- self.writer(''.join(lines))
- try:
- self._tag2proc[tags](tags, args)
- except KeyError:
- pass
-
- def setwriter(self, writer):
- self.writer = writer
-
- def setprocessor(self, tags, processor):
- if isinstance(tags, str):
- tags = tuple(tags.split(":"))
- else:
- assert isinstance(tags, tuple)
- self._tag2proc[tags] = processor
-
-
-class _TagTracerSub:
- def __init__(self, root, tags):
- self.root = root
- self.tags = tags
-
- def __call__(self, *args):
- self.root.processmessage(self.tags, args)
-
- def setmyprocessor(self, processor):
- self.root.setprocessor(self.tags, processor)
-
- def get(self, name):
- return self.__class__(self.root, self.tags + (name,))
-
-
-def _raise_wrapfail(wrap_controller, msg):
- co = wrap_controller.gi_code
- raise RuntimeError("wrap_controller at %r %s:%d %s" %
- (co.co_name, co.co_filename, co.co_firstlineno, msg))
-
-
-def _wrapped_call(wrap_controller, func):
- """ Wrap calling to a function with a generator which needs to yield
- exactly once. The yield point will trigger calling the wrapped function
- and return its _CallOutcome to the yield point. The generator then needs
- to finish (raise StopIteration) in order for the wrapped call to complete.
- """
- try:
- next(wrap_controller) # first yield
- except StopIteration:
- _raise_wrapfail(wrap_controller, "did not yield")
- call_outcome = _CallOutcome(func)
- try:
- wrap_controller.send(call_outcome)
- _raise_wrapfail(wrap_controller, "has second yield")
- except StopIteration:
- pass
- return call_outcome.get_result()
-
-
-class _CallOutcome:
- """ Outcome of a function call, either an exception or a proper result.
- Calling the ``get_result`` method will return the result or reraise
- the exception raised when the function was called. """
- excinfo = None
-
- def __init__(self, func):
- try:
- self.result = func()
- except BaseException:
- self.excinfo = sys.exc_info()
-
- def force_result(self, result):
- self.result = result
- self.excinfo = None
-
- def get_result(self):
- if self.excinfo is None:
- return self.result
- else:
- ex = self.excinfo
- if _py3:
- raise ex[1].with_traceback(ex[2])
- _reraise(*ex) # noqa
-
-if not _py3:
- exec("""
-def _reraise(cls, val, tb):
- raise cls, val, tb
-""")
-
-
-class _TracedHookExecution:
- def __init__(self, pluginmanager, before, after):
- self.pluginmanager = pluginmanager
- self.before = before
- self.after = after
- self.oldcall = pluginmanager._inner_hookexec
- assert not isinstance(self.oldcall, _TracedHookExecution)
- self.pluginmanager._inner_hookexec = self
-
- def __call__(self, hook, hook_impls, kwargs):
- self.before(hook.name, hook_impls, kwargs)
- outcome = _CallOutcome(lambda: self.oldcall(hook, hook_impls, kwargs))
- self.after(outcome, hook.name, hook_impls, kwargs)
- return outcome.get_result()
-
- def undo(self):
- self.pluginmanager._inner_hookexec = self.oldcall
-
-
-class PluginManager(object):
- """ Core Pluginmanager class which manages registration
- of plugin objects and 1:N hook calling.
-
- You can register new hooks by calling ``add_hookspec(module_or_class)``.
- You can register plugin objects (which contain hooks) by calling
- ``register(plugin)``. The Pluginmanager is initialized with a
- prefix that is searched for in the names of the dict of registered
- plugin objects. An optional excludefunc allows to blacklist names which
- are not considered as hooks despite a matching prefix.
-
- For debugging purposes you can call ``enable_tracing()``
- which will subsequently send debug information to the trace helper.
- """
-
- def __init__(self, project_name, implprefix=None):
- """ if implprefix is given implementation functions
- will be recognized if their name matches the implprefix. """
- self.project_name = project_name
- self._name2plugin = {}
- self._plugin2hookcallers = {}
- self._plugin_distinfo = []
- self.trace = _TagTracer().get("pluginmanage")
- self.hook = _HookRelay(self.trace.root.get("hook"))
- self._implprefix = implprefix
- self._inner_hookexec = lambda hook, methods, kwargs: \
- _MultiCall(methods, kwargs, hook.spec_opts).execute()
-
- def _hookexec(self, hook, methods, kwargs):
- # called from all hookcaller instances.
- # enable_tracing will set its own wrapping function at self._inner_hookexec
- return self._inner_hookexec(hook, methods, kwargs)
-
- def register(self, plugin, name=None):
- """ Register a plugin and return its canonical name or None if the name
- is blocked from registering. Raise a ValueError if the plugin is already
- registered. """
- plugin_name = name or self.get_canonical_name(plugin)
-
- if plugin_name in self._name2plugin or plugin in self._plugin2hookcallers:
- if self._name2plugin.get(plugin_name, -1) is None:
- return # blocked plugin, return None to indicate no registration
- raise ValueError("Plugin already registered: %s=%s\n%s" %
- (plugin_name, plugin, self._name2plugin))
-
- # XXX if an error happens we should make sure no state has been
- # changed at point of return
- self._name2plugin[plugin_name] = plugin
-
- # register matching hook implementations of the plugin
- self._plugin2hookcallers[plugin] = hookcallers = []
- for name in dir(plugin):
- hookimpl_opts = self.parse_hookimpl_opts(plugin, name)
- if hookimpl_opts is not None:
- normalize_hookimpl_opts(hookimpl_opts)
- method = getattr(plugin, name)
- hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts)
- hook = getattr(self.hook, name, None)
- if hook is None:
- hook = _HookCaller(name, self._hookexec)
- setattr(self.hook, name, hook)
- elif hook.has_spec():
- self._verify_hook(hook, hookimpl)
- hook._maybe_apply_history(hookimpl)
- hook._add_hookimpl(hookimpl)
- hookcallers.append(hook)
- return plugin_name
-
- def parse_hookimpl_opts(self, plugin, name):
- method = getattr(plugin, name)
- try:
- res = getattr(method, self.project_name + "_impl", None)
- except Exception:
- res = {}
- if res is not None and not isinstance(res, dict):
- # false positive
- res = None
- elif res is None and self._implprefix and name.startswith(self._implprefix):
- res = {}
- return res
-
- def unregister(self, plugin=None, name=None):
- """ unregister a plugin object and all its contained hook implementations
- from internal data structures. """
- if name is None:
- assert plugin is not None, "one of name or plugin needs to be specified"
- name = self.get_name(plugin)
-
- if plugin is None:
- plugin = self.get_plugin(name)
-
- # if self._name2plugin[name] == None registration was blocked: ignore
- if self._name2plugin.get(name):
- del self._name2plugin[name]
-
- for hookcaller in self._plugin2hookcallers.pop(plugin, []):
- hookcaller._remove_plugin(plugin)
-
- return plugin
-
- def set_blocked(self, name):
- """ block registrations of the given name, unregister if already registered. """
- self.unregister(name=name)
- self._name2plugin[name] = None
-
- def is_blocked(self, name):
- """ return True if the name blogs registering plugins of that name. """
- return name in self._name2plugin and self._name2plugin[name] is None
-
- def add_hookspecs(self, module_or_class):
- """ add new hook specifications defined in the given module_or_class.
- Functions are recognized if they have been decorated accordingly. """
- names = []
- for name in dir(module_or_class):
- spec_opts = self.parse_hookspec_opts(module_or_class, name)
- if spec_opts is not None:
- hc = getattr(self.hook, name, None)
- if hc is None:
- hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts)
- setattr(self.hook, name, hc)
- else:
- # plugins registered this hook without knowing the spec
- hc.set_specification(module_or_class, spec_opts)
- for hookfunction in (hc._wrappers + hc._nonwrappers):
- self._verify_hook(hc, hookfunction)
- names.append(name)
-
- if not names:
- raise ValueError("did not find any %r hooks in %r" %
- (self.project_name, module_or_class))
-
- def parse_hookspec_opts(self, module_or_class, name):
- method = getattr(module_or_class, name)
- return getattr(method, self.project_name + "_spec", None)
-
- def get_plugins(self):
- """ return the set of registered plugins. """
- return set(self._plugin2hookcallers)
-
- def is_registered(self, plugin):
- """ Return True if the plugin is already registered. """
- return plugin in self._plugin2hookcallers
-
- def get_canonical_name(self, plugin):
- """ Return canonical name for a plugin object. Note that a plugin
- may be registered under a different name which was specified
- by the caller of register(plugin, name). To obtain the name
- of an registered plugin use ``get_name(plugin)`` instead."""
- return getattr(plugin, "__name__", None) or str(id(plugin))
-
- def get_plugin(self, name):
- """ Return a plugin or None for the given name. """
- return self._name2plugin.get(name)
-
- def has_plugin(self, name):
- """ Return True if a plugin with the given name is registered. """
- return self.get_plugin(name) is not None
-
- def get_name(self, plugin):
- """ Return name for registered plugin or None if not registered. """
- for name, val in self._name2plugin.items():
- if plugin == val:
- return name
-
- def _verify_hook(self, hook, hookimpl):
- if hook.is_historic() and hookimpl.hookwrapper:
- raise PluginValidationError(
- "Plugin %r\nhook %r\nhistoric incompatible to hookwrapper" %
- (hookimpl.plugin_name, hook.name))
-
- for arg in hookimpl.argnames:
- if arg not in hook.argnames:
- raise PluginValidationError(
- "Plugin %r\nhook %r\nargument %r not available\n"
- "plugin definition: %s\n"
- "available hookargs: %s" %
- (hookimpl.plugin_name, hook.name, arg,
- _formatdef(hookimpl.function), ", ".join(hook.argnames)))
-
- def check_pending(self):
- """ Verify that all hooks which have not been verified against
- a hook specification are optional, otherwise raise PluginValidationError"""
- for name in self.hook.__dict__:
- if name[0] != "_":
- hook = getattr(self.hook, name)
- if not hook.has_spec():
- for hookimpl in (hook._wrappers + hook._nonwrappers):
- if not hookimpl.optionalhook:
- raise PluginValidationError(
- "unknown hook %r in plugin %r" %
- (name, hookimpl.plugin))
-
- def list_plugin_distinfo(self):
- """ return list of distinfo/plugin tuples for all setuptools registered
- plugins. """
- return list(self._plugin_distinfo)
-
- def list_name_plugin(self):
- """ return list of name/plugin pairs. """
- return list(self._name2plugin.items())
-
- def get_hookcallers(self, plugin):
- """ get all hook callers for the specified plugin. """
- return self._plugin2hookcallers.get(plugin)
-
- def add_hookcall_monitoring(self, before, after):
- """ add before/after tracing functions for all hooks
- and return an undo function which, when called,
- will remove the added tracers.
-
- ``before(hook_name, hook_impls, kwargs)`` will be called ahead
- of all hook calls and receive a hookcaller instance, a list
- of HookImpl instances and the keyword arguments for the hook call.
-
- ``after(outcome, hook_name, hook_impls, kwargs)`` receives the
- same arguments as ``before`` but also a :py:class:`_CallOutcome <_pytest.vendored_packages.pluggy._CallOutcome>` object
- which represents the result of the overall hook call.
- """
- return _TracedHookExecution(self, before, after).undo
-
- def enable_tracing(self):
- """ enable tracing of hook calls and return an undo function. """
- hooktrace = self.hook._trace
-
- def before(hook_name, methods, kwargs):
- hooktrace.root.indent += 1
- hooktrace(hook_name, kwargs)
-
- def after(outcome, hook_name, methods, kwargs):
- if outcome.excinfo is None:
- hooktrace("finish", hook_name, "-->", outcome.result)
- hooktrace.root.indent -= 1
-
- return self.add_hookcall_monitoring(before, after)
-
- def subset_hook_caller(self, name, remove_plugins):
- """ Return a new _HookCaller instance for the named method
- which manages calls to all registered plugins except the
- ones from remove_plugins. """
- orig = getattr(self.hook, name)
- plugins_to_remove = [plug for plug in remove_plugins if hasattr(plug, name)]
- if plugins_to_remove:
- hc = _HookCaller(orig.name, orig._hookexec, orig._specmodule_or_class,
- orig.spec_opts)
- for hookimpl in (orig._wrappers + orig._nonwrappers):
- plugin = hookimpl.plugin
- if plugin not in plugins_to_remove:
- hc._add_hookimpl(hookimpl)
- # we also keep track of this hook caller so it
- # gets properly removed on plugin unregistration
- self._plugin2hookcallers.setdefault(plugin, []).append(hc)
- return hc
- return orig
-
-
-class _MultiCall:
- """ execute a call into multiple python functions/methods. """
-
- # XXX note that the __multicall__ argument is supported only
- # for pytest compatibility reasons. It was never officially
- # supported there and is explicitely deprecated since 2.8
- # so we can remove it soon, allowing to avoid the below recursion
- # in execute() and simplify/speed up the execute loop.
-
- def __init__(self, hook_impls, kwargs, specopts={}):
- self.hook_impls = hook_impls
- self.kwargs = kwargs
- self.kwargs["__multicall__"] = self
- self.specopts = specopts
-
- def execute(self):
- all_kwargs = self.kwargs
- self.results = results = []
- firstresult = self.specopts.get("firstresult")
-
- while self.hook_impls:
- hook_impl = self.hook_impls.pop()
- try:
- args = [all_kwargs[argname] for argname in hook_impl.argnames]
- except KeyError:
- for argname in hook_impl.argnames:
- if argname not in all_kwargs:
- raise HookCallError(
- "hook call must provide argument %r" % (argname,))
- if hook_impl.hookwrapper:
- return _wrapped_call(hook_impl.function(*args), self.execute)
- res = hook_impl.function(*args)
- if res is not None:
- if firstresult:
- return res
- results.append(res)
-
- if not firstresult:
- return results
-
- def __repr__(self):
- status = "%d meths" % (len(self.hook_impls),)
- if hasattr(self, "results"):
- status = ("%d results, " % len(self.results)) + status
- return "<_MultiCall %s, kwargs=%r>" % (status, self.kwargs)
-
-
-def varnames(func, startindex=None):
- """ return argument name tuple for a function, method, class or callable.
-
- In case of a class, its "__init__" method is considered.
- For methods the "self" parameter is not included unless you are passing
- an unbound method with Python3 (which has no supports for unbound methods)
- """
- cache = getattr(func, "__dict__", {})
- try:
- return cache["_varnames"]
- except KeyError:
- pass
- if inspect.isclass(func):
- try:
- func = func.__init__
- except AttributeError:
- return ()
- startindex = 1
- else:
- if not inspect.isfunction(func) and not inspect.ismethod(func):
- try:
- func = getattr(func, '__call__', func)
- except Exception:
- return ()
- if startindex is None:
- startindex = int(inspect.ismethod(func))
-
- try:
- rawcode = func.__code__
- except AttributeError:
- return ()
- try:
- x = rawcode.co_varnames[startindex:rawcode.co_argcount]
- except AttributeError:
- x = ()
- else:
- defaults = func.__defaults__
- if defaults:
- x = x[:-len(defaults)]
- try:
- cache["_varnames"] = x
- except TypeError:
- pass
- return x
-
-
-class _HookRelay:
- """ hook holder object for performing 1:N hook calls where N is the number
- of registered plugins.
-
- """
-
- def __init__(self, trace):
- self._trace = trace
-
-
-class _HookCaller(object):
- def __init__(self, name, hook_execute, specmodule_or_class=None, spec_opts=None):
- self.name = name
- self._wrappers = []
- self._nonwrappers = []
- self._hookexec = hook_execute
- if specmodule_or_class is not None:
- assert spec_opts is not None
- self.set_specification(specmodule_or_class, spec_opts)
-
- def has_spec(self):
- return hasattr(self, "_specmodule_or_class")
-
- def set_specification(self, specmodule_or_class, spec_opts):
- assert not self.has_spec()
- self._specmodule_or_class = specmodule_or_class
- specfunc = getattr(specmodule_or_class, self.name)
- argnames = varnames(specfunc, startindex=inspect.isclass(specmodule_or_class))
- assert "self" not in argnames # sanity check
- self.argnames = ["__multicall__"] + list(argnames)
- self.spec_opts = spec_opts
- if spec_opts.get("historic"):
- self._call_history = []
-
- def is_historic(self):
- return hasattr(self, "_call_history")
-
- def _remove_plugin(self, plugin):
- def remove(wrappers):
- for i, method in enumerate(wrappers):
- if method.plugin == plugin:
- del wrappers[i]
- return True
- if remove(self._wrappers) is None:
- if remove(self._nonwrappers) is None:
- raise ValueError("plugin %r not found" % (plugin,))
-
- def _add_hookimpl(self, hookimpl):
- if hookimpl.hookwrapper:
- methods = self._wrappers
- else:
- methods = self._nonwrappers
-
- if hookimpl.trylast:
- methods.insert(0, hookimpl)
- elif hookimpl.tryfirst:
- methods.append(hookimpl)
- else:
- # find last non-tryfirst method
- i = len(methods) - 1
- while i >= 0 and methods[i].tryfirst:
- i -= 1
- methods.insert(i + 1, hookimpl)
-
- def __repr__(self):
- return "<_HookCaller %r>" % (self.name,)
-
- def __call__(self, **kwargs):
- assert not self.is_historic()
- return self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
-
- def call_historic(self, proc=None, kwargs=None):
- self._call_history.append((kwargs or {}, proc))
- # historizing hooks don't return results
- self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
-
- def call_extra(self, methods, kwargs):
- """ Call the hook with some additional temporarily participating
- methods using the specified kwargs as call parameters. """
- old = list(self._nonwrappers), list(self._wrappers)
- for method in methods:
- opts = dict(hookwrapper=False, trylast=False, tryfirst=False)
- hookimpl = HookImpl(None, "<temp>", method, opts)
- self._add_hookimpl(hookimpl)
- try:
- return self(**kwargs)
- finally:
- self._nonwrappers, self._wrappers = old
-
- def _maybe_apply_history(self, method):
- if self.is_historic():
- for kwargs, proc in self._call_history:
- res = self._hookexec(self, [method], kwargs)
- if res and proc is not None:
- proc(res[0])
-
-
-class HookImpl:
- def __init__(self, plugin, plugin_name, function, hook_impl_opts):
- self.function = function
- self.argnames = varnames(self.function)
- self.plugin = plugin
- self.opts = hook_impl_opts
- self.plugin_name = plugin_name
- self.__dict__.update(hook_impl_opts)
-
-
-class PluginValidationError(Exception):
- """ plugin failed validation. """
-
-
-class HookCallError(Exception):
- """ Hook was called wrongly. """
-
-
-if hasattr(inspect, 'signature'):
- def _formatdef(func):
- return "%s%s" % (
- func.__name__,
- str(inspect.signature(func))
- )
-else:
- def _formatdef(func):
- return "%s%s" % (
- func.__name__,
- inspect.formatargspec(*inspect.getargspec(func))
- )
diff --git a/lib/spack/external/pytest-fallback/_pytest/warnings.py b/lib/spack/external/pytest-fallback/_pytest/warnings.py
deleted file mode 100644
index 926b1f5811..0000000000
--- a/lib/spack/external/pytest-fallback/_pytest/warnings.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from __future__ import absolute_import, division, print_function
-
-import warnings
-from contextlib import contextmanager
-
-import pytest
-
-from _pytest import compat
-
-
-def _setoption(wmod, arg):
- """
- Copy of the warning._setoption function but does not escape arguments.
- """
- parts = arg.split(':')
- if len(parts) > 5:
- raise wmod._OptionError("too many fields (max 5): %r" % (arg,))
- while len(parts) < 5:
- parts.append('')
- action, message, category, module, lineno = [s.strip()
- for s in parts]
- action = wmod._getaction(action)
- category = wmod._getcategory(category)
- if lineno:
- try:
- lineno = int(lineno)
- if lineno < 0:
- raise ValueError
- except (ValueError, OverflowError):
- raise wmod._OptionError("invalid lineno %r" % (lineno,))
- else:
- lineno = 0
- wmod.filterwarnings(action, message, category, module, lineno)
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("pytest-warnings")
- group.addoption(
- '-W', '--pythonwarnings', action='append',
- help="set which warnings to report, see -W option of python itself.")
- parser.addini("filterwarnings", type="linelist",
- help="Each line specifies a pattern for "
- "warnings.filterwarnings. "
- "Processed after -W and --pythonwarnings.")
-
-
-@contextmanager
-def catch_warnings_for_item(item):
- """
- catches the warnings generated during setup/call/teardown execution
- of the given item and after it is done posts them as warnings to this
- item.
- """
- args = item.config.getoption('pythonwarnings') or []
- inifilters = item.config.getini("filterwarnings")
- with warnings.catch_warnings(record=True) as log:
- for arg in args:
- warnings._setoption(arg)
-
- for arg in inifilters:
- _setoption(warnings, arg)
-
- mark = item.get_marker('filterwarnings')
- if mark:
- for arg in mark.args:
- warnings._setoption(arg)
-
- yield
-
- for warning in log:
- warn_msg = warning.message
- unicode_warning = False
-
- if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args):
- new_args = [compat.safe_str(m) for m in warn_msg.args]
- unicode_warning = warn_msg.args != new_args
- warn_msg.args = new_args
-
- msg = warnings.formatwarning(
- warn_msg, warning.category,
- warning.filename, warning.lineno, warning.line)
- item.warn("unused", msg)
-
- if unicode_warning:
- warnings.warn(
- "Warning is using unicode non convertible to ascii, "
- "converting to a safe representation:\n %s" % msg,
- UnicodeWarning)
-
-
-@pytest.hookimpl(hookwrapper=True)
-def pytest_runtest_protocol(item):
- with catch_warnings_for_item(item):
- yield