diff options
Diffstat (limited to 'lib/spack/external/_pytest/skipping.py')
-rw-r--r-- | lib/spack/external/_pytest/skipping.py | 147 |
1 files changed, 72 insertions, 75 deletions
diff --git a/lib/spack/external/_pytest/skipping.py b/lib/spack/external/_pytest/skipping.py index a8eaea98aa..b92800d10b 100644 --- a/lib/spack/external/_pytest/skipping.py +++ b/lib/spack/external/_pytest/skipping.py @@ -1,18 +1,21 @@ """ support for skip/xfail functions and markers. """ +from __future__ import absolute_import, division, print_function + import os import sys import traceback import py -import pytest +from _pytest.config import hookimpl from _pytest.mark import MarkInfo, MarkDecorator +from _pytest.outcomes import fail, skip, xfail, TEST_OUTCOME def pytest_addoption(parser): group = parser.getgroup("general") group.addoption('--runxfail', - action="store_true", dest="runxfail", default=False, - help="run tests even if they are marked xfail") + action="store_true", dest="runxfail", default=False, + help="run tests even if they are marked xfail") parser.addini("xfail_strict", "default for the strict parameter of xfail " "markers when not given explicitly (default: " @@ -23,53 +26,38 @@ def pytest_addoption(parser): def pytest_configure(config): if config.option.runxfail: + # yay a hack + import pytest old = pytest.xfail config._cleanup.append(lambda: setattr(pytest, "xfail", old)) def nop(*args, **kwargs): pass - nop.Exception = XFailed + nop.Exception = xfail.Exception setattr(pytest, "xfail", nop) config.addinivalue_line("markers", - "skip(reason=None): skip the given test function with an optional reason. " - "Example: skip(reason=\"no way of currently testing this\") skips the " - "test." - ) + "skip(reason=None): skip the given test function with an optional reason. " + "Example: skip(reason=\"no way of currently testing this\") skips the " + "test." + ) config.addinivalue_line("markers", - "skipif(condition): skip the given test function if eval(condition) " - "results in a True value. Evaluation happens within the " - "module global context. Example: skipif('sys.platform == \"win32\"') " - "skips the test if we are on the win32 platform. see " - "http://pytest.org/latest/skipping.html" - ) + "skipif(condition): skip the given test function if eval(condition) " + "results in a True value. Evaluation happens within the " + "module global context. Example: skipif('sys.platform == \"win32\"') " + "skips the test if we are on the win32 platform. see " + "http://pytest.org/latest/skipping.html" + ) config.addinivalue_line("markers", - "xfail(condition, reason=None, run=True, raises=None, strict=False): " - "mark the the test function as an expected failure if eval(condition) " - "has a True value. Optionally specify a reason for better reporting " - "and run=False if you don't even want to execute the test function. " - "If only specific exception(s) are expected, you can list them in " - "raises, and if the test fails in other ways, it will be reported as " - "a true failure. See http://pytest.org/latest/skipping.html" - ) - - -def pytest_namespace(): - return dict(xfail=xfail) - - -class XFailed(pytest.fail.Exception): - """ raised from an explicit call to pytest.xfail() """ - - -def xfail(reason=""): - """ xfail an executing test or setup functions with the given reason.""" - __tracebackhide__ = True - raise XFailed(reason) - - -xfail.Exception = XFailed + "xfail(condition, reason=None, run=True, raises=None, strict=False): " + "mark the test function as an expected failure if eval(condition) " + "has a True value. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See http://pytest.org/latest/skipping.html" + ) class MarkEvaluator: @@ -97,51 +85,50 @@ class MarkEvaluator: def istrue(self): try: return self._istrue() - except Exception: + except TEST_OUTCOME: self.exc = sys.exc_info() if isinstance(self.exc[1], SyntaxError): - msg = [" " * (self.exc[1].offset + 4) + "^",] + msg = [" " * (self.exc[1].offset + 4) + "^", ] msg.append("SyntaxError: invalid syntax") else: msg = traceback.format_exception_only(*self.exc[:2]) - pytest.fail("Error evaluating %r expression\n" - " %s\n" - "%s" - %(self.name, self.expr, "\n".join(msg)), - pytrace=False) + fail("Error evaluating %r expression\n" + " %s\n" + "%s" + % (self.name, self.expr, "\n".join(msg)), + pytrace=False) def _getglobals(self): d = {'os': os, 'sys': sys, 'config': self.item.config} - d.update(self.item.obj.__globals__) + if hasattr(self.item, 'obj'): + d.update(self.item.obj.__globals__) return d def _istrue(self): if hasattr(self, 'result'): return self.result if self.holder: - d = self._getglobals() if self.holder.args or 'condition' in self.holder.kwargs: self.result = False # "holder" might be a MarkInfo or a MarkDecorator; only # MarkInfo keeps track of all parameters it received in an # _arglist attribute - if hasattr(self.holder, '_arglist'): - arglist = self.holder._arglist - else: - arglist = [(self.holder.args, self.holder.kwargs)] - for args, kwargs in arglist: + marks = getattr(self.holder, '_marks', None) \ + or [self.holder.mark] + for _, args, kwargs in marks: if 'condition' in kwargs: args = (kwargs['condition'],) for expr in args: self.expr = expr if isinstance(expr, py.builtin._basestring): + d = self._getglobals() result = cached_eval(self.item.config, expr, d) else: if "reason" not in kwargs: # XXX better be checked at collection time msg = "you need to specify reason=STRING " \ "when using booleans as conditions." - pytest.fail(msg) + fail(msg) result = bool(expr) if result: self.result = True @@ -165,7 +152,7 @@ class MarkEvaluator: return expl -@pytest.hookimpl(tryfirst=True) +@hookimpl(tryfirst=True) def pytest_runtest_setup(item): # Check if skip or skipif are specified as pytest marks @@ -174,23 +161,23 @@ def pytest_runtest_setup(item): eval_skipif = MarkEvaluator(item, 'skipif') if eval_skipif.istrue(): item._evalskip = eval_skipif - pytest.skip(eval_skipif.getexplanation()) + skip(eval_skipif.getexplanation()) skip_info = item.keywords.get('skip') if isinstance(skip_info, (MarkInfo, MarkDecorator)): item._evalskip = True if 'reason' in skip_info.kwargs: - pytest.skip(skip_info.kwargs['reason']) + skip(skip_info.kwargs['reason']) elif skip_info.args: - pytest.skip(skip_info.args[0]) + skip(skip_info.args[0]) else: - pytest.skip("unconditional skip") + skip("unconditional skip") item._evalxfail = MarkEvaluator(item, 'xfail') check_xfail_no_run(item) -@pytest.mark.hookwrapper +@hookimpl(hookwrapper=True) def pytest_pyfunc_call(pyfuncitem): check_xfail_no_run(pyfuncitem) outcome = yield @@ -205,7 +192,7 @@ def check_xfail_no_run(item): evalxfail = item._evalxfail if evalxfail.istrue(): if not evalxfail.get('run', True): - pytest.xfail("[NOTRUN] " + evalxfail.getexplanation()) + xfail("[NOTRUN] " + evalxfail.getexplanation()) def check_strict_xfail(pyfuncitem): @@ -217,10 +204,10 @@ def check_strict_xfail(pyfuncitem): if is_strict_xfail: del pyfuncitem._evalxfail explanation = evalxfail.getexplanation() - pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False) + fail('[XPASS(strict)] ' + explanation, pytrace=False) -@pytest.hookimpl(hookwrapper=True) +@hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): outcome = yield rep = outcome.get_result() @@ -240,11 +227,11 @@ def pytest_runtest_makereport(item, call): rep.wasxfail = rep.longrepr elif item.config.option.runxfail: pass # don't interefere - elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception): + elif call.excinfo and call.excinfo.errisinstance(xfail.Exception): rep.wasxfail = "reason: " + call.excinfo.value.msg rep.outcome = "skipped" elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \ - evalxfail.istrue(): + evalxfail.istrue(): if call.excinfo: if evalxfail.invalidraise(call.excinfo.value): rep.outcome = "failed" @@ -270,6 +257,8 @@ def pytest_runtest_makereport(item, call): rep.longrepr = filename, line, reason # called by terminalreporter progress reporting + + def pytest_report_teststatus(report): if hasattr(report, "wasxfail"): if report.skipped: @@ -278,10 +267,12 @@ def pytest_report_teststatus(report): return "xpassed", "X", ("XPASS", {'yellow': True}) # called by the terminalreporter instance/plugin + + def pytest_terminal_summary(terminalreporter): tr = terminalreporter if not tr.reportchars: - #for name in "xfailed skipped failed xpassed": + # for name in "xfailed skipped failed xpassed": # if not tr.stats.get(name, 0): # tr.write_line("HINT: use '-r' option to see extra " # "summary info about tests") @@ -308,12 +299,14 @@ def pytest_terminal_summary(terminalreporter): for line in lines: tr._tw.line(line) + def show_simple(terminalreporter, lines, stat, format): failed = terminalreporter.stats.get(stat) if failed: for rep in failed: pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) - lines.append(format %(pos,)) + lines.append(format % (pos,)) + def show_xfailed(terminalreporter, lines): xfailed = terminalreporter.stats.get("xfailed") @@ -325,13 +318,15 @@ def show_xfailed(terminalreporter, lines): if reason: lines.append(" " + str(reason)) + def show_xpassed(terminalreporter, lines): xpassed = terminalreporter.stats.get("xpassed") if xpassed: for rep in xpassed: pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) reason = rep.wasxfail - lines.append("XPASS %s %s" %(pos, reason)) + lines.append("XPASS %s %s" % (pos, reason)) + def cached_eval(config, expr, d): if not hasattr(config, '_evalcache'): @@ -351,25 +346,27 @@ def folded_skips(skipped): key = event.longrepr assert len(key) == 3, (event, key) d.setdefault(key, []).append(event) - l = [] + values = [] for key, events in d.items(): - l.append((len(events),) + key) - return l + values.append((len(events),) + key) + return values + def show_skipped(terminalreporter, lines): tr = terminalreporter skipped = tr.stats.get('skipped', []) if skipped: - #if not tr.hasopt('skipped'): + # if not tr.hasopt('skipped'): # tr.write_line( # "%d skipped tests, specify -rs for more info" % # len(skipped)) # return fskips = folded_skips(skipped) if fskips: - #tr.write_sep("_", "skipped test summary") + # tr.write_sep("_", "skipped test summary") for num, fspath, lineno, reason in fskips: if reason.startswith("Skipped: "): reason = reason[9:] - lines.append("SKIP [%d] %s:%d: %s" % - (num, fspath, lineno, reason)) + lines.append( + "SKIP [%d] %s:%d: %s" % + (num, fspath, lineno + 1, reason)) |