skipping.py 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. """ support for skip/xfail functions and markers. """
  2. from __future__ import absolute_import, division, print_function
  3. from _pytest.config import hookimpl
  4. from _pytest.mark.evaluate import MarkEvaluator
  5. from _pytest.outcomes import fail, skip, xfail
  6. def pytest_addoption(parser):
  7. group = parser.getgroup("general")
  8. group.addoption(
  9. "--runxfail",
  10. action="store_true",
  11. dest="runxfail",
  12. default=False,
  13. help="run tests even if they are marked xfail",
  14. )
  15. parser.addini(
  16. "xfail_strict",
  17. "default for the strict parameter of xfail "
  18. "markers when not given explicitly (default: False)",
  19. default=False,
  20. type="bool",
  21. )
  22. def pytest_configure(config):
  23. if config.option.runxfail:
  24. # yay a hack
  25. import pytest
  26. old = pytest.xfail
  27. config._cleanup.append(lambda: setattr(pytest, "xfail", old))
  28. def nop(*args, **kwargs):
  29. pass
  30. nop.Exception = xfail.Exception
  31. setattr(pytest, "xfail", nop)
  32. config.addinivalue_line(
  33. "markers",
  34. "skip(reason=None): skip the given test function with an optional reason. "
  35. 'Example: skip(reason="no way of currently testing this") skips the '
  36. "test.",
  37. )
  38. config.addinivalue_line(
  39. "markers",
  40. "skipif(condition): skip the given test function if eval(condition) "
  41. "results in a True value. Evaluation happens within the "
  42. "module global context. Example: skipif('sys.platform == \"win32\"') "
  43. "skips the test if we are on the win32 platform. see "
  44. "https://docs.pytest.org/en/latest/skipping.html",
  45. )
  46. config.addinivalue_line(
  47. "markers",
  48. "xfail(condition, reason=None, run=True, raises=None, strict=False): "
  49. "mark the test function as an expected failure if eval(condition) "
  50. "has a True value. Optionally specify a reason for better reporting "
  51. "and run=False if you don't even want to execute the test function. "
  52. "If only specific exception(s) are expected, you can list them in "
  53. "raises, and if the test fails in other ways, it will be reported as "
  54. "a true failure. See https://docs.pytest.org/en/latest/skipping.html",
  55. )
  56. @hookimpl(tryfirst=True)
  57. def pytest_runtest_setup(item):
  58. # Check if skip or skipif are specified as pytest marks
  59. item._skipped_by_mark = False
  60. eval_skipif = MarkEvaluator(item, "skipif")
  61. if eval_skipif.istrue():
  62. item._skipped_by_mark = True
  63. skip(eval_skipif.getexplanation())
  64. for skip_info in item.iter_markers(name="skip"):
  65. item._skipped_by_mark = True
  66. if "reason" in skip_info.kwargs:
  67. skip(skip_info.kwargs["reason"])
  68. elif skip_info.args:
  69. skip(skip_info.args[0])
  70. else:
  71. skip("unconditional skip")
  72. item._evalxfail = MarkEvaluator(item, "xfail")
  73. check_xfail_no_run(item)
  74. @hookimpl(hookwrapper=True)
  75. def pytest_pyfunc_call(pyfuncitem):
  76. check_xfail_no_run(pyfuncitem)
  77. outcome = yield
  78. passed = outcome.excinfo is None
  79. if passed:
  80. check_strict_xfail(pyfuncitem)
  81. def check_xfail_no_run(item):
  82. """check xfail(run=False)"""
  83. if not item.config.option.runxfail:
  84. evalxfail = item._evalxfail
  85. if evalxfail.istrue():
  86. if not evalxfail.get("run", True):
  87. xfail("[NOTRUN] " + evalxfail.getexplanation())
  88. def check_strict_xfail(pyfuncitem):
  89. """check xfail(strict=True) for the given PASSING test"""
  90. evalxfail = pyfuncitem._evalxfail
  91. if evalxfail.istrue():
  92. strict_default = pyfuncitem.config.getini("xfail_strict")
  93. is_strict_xfail = evalxfail.get("strict", strict_default)
  94. if is_strict_xfail:
  95. del pyfuncitem._evalxfail
  96. explanation = evalxfail.getexplanation()
  97. fail("[XPASS(strict)] " + explanation, pytrace=False)
  98. @hookimpl(hookwrapper=True)
  99. def pytest_runtest_makereport(item, call):
  100. outcome = yield
  101. rep = outcome.get_result()
  102. evalxfail = getattr(item, "_evalxfail", None)
  103. # unitttest special case, see setting of _unexpectedsuccess
  104. if hasattr(item, "_unexpectedsuccess") and rep.when == "call":
  105. from _pytest.compat import _is_unittest_unexpected_success_a_failure
  106. if item._unexpectedsuccess:
  107. rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess)
  108. else:
  109. rep.longrepr = "Unexpected success"
  110. if _is_unittest_unexpected_success_a_failure():
  111. rep.outcome = "failed"
  112. else:
  113. rep.outcome = "passed"
  114. rep.wasxfail = rep.longrepr
  115. elif item.config.option.runxfail:
  116. pass # don't interefere
  117. elif call.excinfo and call.excinfo.errisinstance(xfail.Exception):
  118. rep.wasxfail = "reason: " + call.excinfo.value.msg
  119. rep.outcome = "skipped"
  120. elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue():
  121. if call.excinfo:
  122. if evalxfail.invalidraise(call.excinfo.value):
  123. rep.outcome = "failed"
  124. else:
  125. rep.outcome = "skipped"
  126. rep.wasxfail = evalxfail.getexplanation()
  127. elif call.when == "call":
  128. strict_default = item.config.getini("xfail_strict")
  129. is_strict_xfail = evalxfail.get("strict", strict_default)
  130. explanation = evalxfail.getexplanation()
  131. if is_strict_xfail:
  132. rep.outcome = "failed"
  133. rep.longrepr = "[XPASS(strict)] {}".format(explanation)
  134. else:
  135. rep.outcome = "passed"
  136. rep.wasxfail = explanation
  137. elif (
  138. getattr(item, "_skipped_by_mark", False)
  139. and rep.skipped
  140. and type(rep.longrepr) is tuple
  141. ):
  142. # skipped by mark.skipif; change the location of the failure
  143. # to point to the item definition, otherwise it will display
  144. # the location of where the skip exception was raised within pytest
  145. filename, line, reason = rep.longrepr
  146. filename, line = item.location[:2]
  147. rep.longrepr = filename, line, reason
  148. # called by terminalreporter progress reporting
  149. def pytest_report_teststatus(report):
  150. if hasattr(report, "wasxfail"):
  151. if report.skipped:
  152. return "xfailed", "x", "xfail"
  153. elif report.passed:
  154. return "xpassed", "X", ("XPASS", {"yellow": True})
  155. # called by the terminalreporter instance/plugin
  156. def pytest_terminal_summary(terminalreporter):
  157. tr = terminalreporter
  158. if not tr.reportchars:
  159. # for name in "xfailed skipped failed xpassed":
  160. # if not tr.stats.get(name, 0):
  161. # tr.write_line("HINT: use '-r' option to see extra "
  162. # "summary info about tests")
  163. # break
  164. return
  165. lines = []
  166. for char in tr.reportchars:
  167. action = REPORTCHAR_ACTIONS.get(char, lambda tr, lines: None)
  168. action(terminalreporter, lines)
  169. if lines:
  170. tr._tw.sep("=", "short test summary info")
  171. for line in lines:
  172. tr._tw.line(line)
  173. def show_simple(terminalreporter, lines, stat, format):
  174. failed = terminalreporter.stats.get(stat)
  175. if failed:
  176. for rep in failed:
  177. pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
  178. lines.append(format % (pos,))
  179. def show_xfailed(terminalreporter, lines):
  180. xfailed = terminalreporter.stats.get("xfailed")
  181. if xfailed:
  182. for rep in xfailed:
  183. pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
  184. reason = rep.wasxfail
  185. lines.append("XFAIL %s" % (pos,))
  186. if reason:
  187. lines.append(" " + str(reason))
  188. def show_xpassed(terminalreporter, lines):
  189. xpassed = terminalreporter.stats.get("xpassed")
  190. if xpassed:
  191. for rep in xpassed:
  192. pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
  193. reason = rep.wasxfail
  194. lines.append("XPASS %s %s" % (pos, reason))
  195. def folded_skips(skipped):
  196. d = {}
  197. for event in skipped:
  198. key = event.longrepr
  199. assert len(key) == 3, (event, key)
  200. keywords = getattr(event, "keywords", {})
  201. # folding reports with global pytestmark variable
  202. # this is workaround, because for now we cannot identify the scope of a skip marker
  203. # TODO: revisit after marks scope would be fixed
  204. when = getattr(event, "when", None)
  205. if when == "setup" and "skip" in keywords and "pytestmark" not in keywords:
  206. key = (key[0], None, key[2])
  207. d.setdefault(key, []).append(event)
  208. values = []
  209. for key, events in d.items():
  210. values.append((len(events),) + key)
  211. return values
  212. def show_skipped(terminalreporter, lines):
  213. tr = terminalreporter
  214. skipped = tr.stats.get("skipped", [])
  215. if skipped:
  216. # if not tr.hasopt('skipped'):
  217. # tr.write_line(
  218. # "%d skipped tests, specify -rs for more info" %
  219. # len(skipped))
  220. # return
  221. fskips = folded_skips(skipped)
  222. if fskips:
  223. # tr.write_sep("_", "skipped test summary")
  224. for num, fspath, lineno, reason in fskips:
  225. if reason.startswith("Skipped: "):
  226. reason = reason[9:]
  227. if lineno is not None:
  228. lines.append(
  229. "SKIP [%d] %s:%d: %s" % (num, fspath, lineno + 1, reason)
  230. )
  231. else:
  232. lines.append("SKIP [%d] %s: %s" % (num, fspath, reason))
  233. def shower(stat, format):
  234. def show_(terminalreporter, lines):
  235. return show_simple(terminalreporter, lines, stat, format)
  236. return show_
  237. REPORTCHAR_ACTIONS = {
  238. "x": show_xfailed,
  239. "X": show_xpassed,
  240. "f": shower("failed", "FAIL %s"),
  241. "F": shower("failed", "FAIL %s"),
  242. "s": show_skipped,
  243. "S": show_skipped,
  244. "p": shower("passed", "PASSED %s"),
  245. "E": shower("error", "ERROR %s"),
  246. }