data.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822
  1. """Utilities for processing .test files containing test case descriptions."""
  2. from __future__ import annotations
  3. import os
  4. import os.path
  5. import posixpath
  6. import re
  7. import shutil
  8. import sys
  9. import tempfile
  10. from abc import abstractmethod
  11. from dataclasses import dataclass
  12. from pathlib import Path
  13. from typing import Any, Final, Iterator, NamedTuple, NoReturn, Pattern, Union
  14. from typing_extensions import TypeAlias as _TypeAlias
  15. import pytest
  16. from mypy import defaults
  17. from mypy.test.config import PREFIX, test_data_prefix, test_temp_dir
  18. root_dir = os.path.normpath(PREFIX)
  19. # Debuggers that we support for debugging mypyc run tests
  20. # implementation of using each of these debuggers is in test_run.py
  21. # TODO: support more debuggers
  22. SUPPORTED_DEBUGGERS: Final = ["gdb", "lldb"]
  23. # File modify/create operation: copy module contents from source_path.
  24. class UpdateFile(NamedTuple):
  25. module: str
  26. content: str
  27. target_path: str
  28. # File delete operation: delete module file.
  29. class DeleteFile(NamedTuple):
  30. module: str
  31. path: str
  32. FileOperation: _TypeAlias = Union[UpdateFile, DeleteFile]
  33. def _file_arg_to_module(filename: str) -> str:
  34. filename, _ = os.path.splitext(filename)
  35. parts = filename.split("/") # not os.sep since it comes from test data
  36. if parts[-1] == "__init__":
  37. parts.pop()
  38. return ".".join(parts)
  39. def parse_test_case(case: DataDrivenTestCase) -> None:
  40. """Parse and prepare a single case from suite with test case descriptions.
  41. This method is part of the setup phase, just before the test case is run.
  42. """
  43. test_items = parse_test_data(case.data, case.name)
  44. base_path = case.suite.base_path
  45. if case.suite.native_sep:
  46. join = os.path.join
  47. else:
  48. join = posixpath.join
  49. out_section_missing = case.suite.required_out_section
  50. normalize_output = True
  51. files: list[tuple[str, str]] = [] # path and contents
  52. output_files: list[tuple[str, str | Pattern[str]]] = [] # output path and contents
  53. output: list[str] = [] # Regular output errors
  54. output2: dict[int, list[str]] = {} # Output errors for incremental, runs 2+
  55. deleted_paths: dict[int, set[str]] = {} # from run number of paths
  56. stale_modules: dict[int, set[str]] = {} # from run number to module names
  57. rechecked_modules: dict[int, set[str]] = {} # from run number module names
  58. triggered: list[str] = [] # Active triggers (one line per incremental step)
  59. targets: dict[int, list[str]] = {} # Fine-grained targets (per fine-grained update)
  60. test_modules: list[str] = [] # Modules which are deemed "test" (vs "fixture")
  61. def _case_fail(msg: str) -> NoReturn:
  62. pytest.fail(f"{case.file}:{case.line}: {msg}", pytrace=False)
  63. # Process the parsed items. Each item has a header of form [id args],
  64. # optionally followed by lines of text.
  65. item = first_item = test_items[0]
  66. test_modules.append("__main__")
  67. for item in test_items[1:]:
  68. def _item_fail(msg: str) -> NoReturn:
  69. item_abs_line = case.line + item.line - 2
  70. pytest.fail(f"{case.file}:{item_abs_line}: {msg}", pytrace=False)
  71. if item.id in {"file", "fixture", "outfile", "outfile-re"}:
  72. # Record an extra file needed for the test case.
  73. assert item.arg is not None
  74. contents = expand_variables("\n".join(item.data))
  75. path = join(base_path, item.arg)
  76. if item.id != "fixture":
  77. test_modules.append(_file_arg_to_module(item.arg))
  78. if item.id in {"file", "fixture"}:
  79. files.append((path, contents))
  80. elif item.id == "outfile-re":
  81. output_files.append((path, re.compile(contents.rstrip(), re.S)))
  82. elif item.id == "outfile":
  83. output_files.append((path, contents))
  84. elif item.id == "builtins":
  85. # Use an alternative stub file for the builtins module.
  86. assert item.arg is not None
  87. mpath = join(os.path.dirname(case.file), item.arg)
  88. with open(mpath, encoding="utf8") as f:
  89. files.append((join(base_path, "builtins.pyi"), f.read()))
  90. elif item.id == "typing":
  91. # Use an alternative stub file for the typing module.
  92. assert item.arg is not None
  93. src_path = join(os.path.dirname(case.file), item.arg)
  94. with open(src_path, encoding="utf8") as f:
  95. files.append((join(base_path, "typing.pyi"), f.read()))
  96. elif item.id == "_typeshed":
  97. # Use an alternative stub file for the _typeshed module.
  98. assert item.arg is not None
  99. src_path = join(os.path.dirname(case.file), item.arg)
  100. with open(src_path, encoding="utf8") as f:
  101. files.append((join(base_path, "_typeshed.pyi"), f.read()))
  102. elif re.match(r"stale[0-9]*$", item.id):
  103. passnum = 1 if item.id == "stale" else int(item.id[len("stale") :])
  104. assert passnum > 0
  105. modules = set() if item.arg is None else {t.strip() for t in item.arg.split(",")}
  106. stale_modules[passnum] = modules
  107. elif re.match(r"rechecked[0-9]*$", item.id):
  108. passnum = 1 if item.id == "rechecked" else int(item.id[len("rechecked") :])
  109. assert passnum > 0
  110. modules = set() if item.arg is None else {t.strip() for t in item.arg.split(",")}
  111. rechecked_modules[passnum] = modules
  112. elif re.match(r"targets[0-9]*$", item.id):
  113. passnum = 1 if item.id == "targets" else int(item.id[len("targets") :])
  114. assert passnum > 0
  115. reprocessed = [] if item.arg is None else [t.strip() for t in item.arg.split(",")]
  116. targets[passnum] = reprocessed
  117. elif item.id == "delete":
  118. # File/directory to delete during a multi-step test case
  119. assert item.arg is not None
  120. m = re.match(r"(.*)\.([0-9]+)$", item.arg)
  121. if m is None:
  122. _item_fail(f"Invalid delete section {item.arg!r}")
  123. num = int(m.group(2))
  124. if num < 2:
  125. _item_fail(f"Can't delete during step {num}")
  126. full = join(base_path, m.group(1))
  127. deleted_paths.setdefault(num, set()).add(full)
  128. elif re.match(r"out[0-9]*$", item.id):
  129. if item.arg is None:
  130. args = []
  131. else:
  132. args = item.arg.split(",")
  133. version_check = True
  134. for arg in args:
  135. if arg == "skip-path-normalization":
  136. normalize_output = False
  137. if arg.startswith("version"):
  138. compare_op = arg[7:9]
  139. if compare_op not in {">=", "=="}:
  140. _item_fail("Only >= and == version checks are currently supported")
  141. version_str = arg[9:]
  142. try:
  143. version = tuple(int(x) for x in version_str.split("."))
  144. except ValueError:
  145. _item_fail(f"{version_str!r} is not a valid python version")
  146. if compare_op == ">=":
  147. if version <= defaults.PYTHON3_VERSION:
  148. _item_fail(
  149. f"{arg} always true since minimum runtime version is {defaults.PYTHON3_VERSION}"
  150. )
  151. version_check = sys.version_info >= version
  152. elif compare_op == "==":
  153. if version < defaults.PYTHON3_VERSION:
  154. _item_fail(
  155. f"{arg} always false since minimum runtime version is {defaults.PYTHON3_VERSION}"
  156. )
  157. if not 1 < len(version) < 4:
  158. _item_fail(
  159. f'Only minor or patch version checks are currently supported with "==": {version_str!r}'
  160. )
  161. version_check = sys.version_info[: len(version)] == version
  162. if version_check:
  163. tmp_output = [expand_variables(line) for line in item.data]
  164. if os.path.sep == "\\" and normalize_output:
  165. tmp_output = [fix_win_path(line) for line in tmp_output]
  166. if item.id == "out" or item.id == "out1":
  167. output = tmp_output
  168. else:
  169. passnum = int(item.id[len("out") :])
  170. assert passnum > 1
  171. output2[passnum] = tmp_output
  172. out_section_missing = False
  173. elif item.id == "triggered" and item.arg is None:
  174. triggered = item.data
  175. else:
  176. section_str = item.id + (f" {item.arg}" if item.arg else "")
  177. _item_fail(f"Invalid section header [{section_str}] in case {case.name!r}")
  178. if out_section_missing:
  179. _case_fail(f"Required output section not found in case {case.name!r}")
  180. for passnum in stale_modules.keys():
  181. if passnum not in rechecked_modules:
  182. # If the set of rechecked modules isn't specified, make it the same as the set
  183. # of modules with a stale public interface.
  184. rechecked_modules[passnum] = stale_modules[passnum]
  185. if (
  186. passnum in stale_modules
  187. and passnum in rechecked_modules
  188. and not stale_modules[passnum].issubset(rechecked_modules[passnum])
  189. ):
  190. _case_fail(f"Stale modules after pass {passnum} must be a subset of rechecked modules")
  191. output_inline_start = len(output)
  192. input = first_item.data
  193. expand_errors(input, output, "main")
  194. for file_path, contents in files:
  195. expand_errors(contents.split("\n"), output, file_path)
  196. seen_files = set()
  197. for file, _ in files:
  198. if file in seen_files:
  199. _case_fail(f"Duplicated filename {file}. Did you include it multiple times?")
  200. seen_files.add(file)
  201. case.input = input
  202. case.output = output
  203. case.output_inline_start = output_inline_start
  204. case.output2 = output2
  205. case.last_line = case.line + item.line + len(item.data) - 2
  206. case.files = files
  207. case.output_files = output_files
  208. case.expected_stale_modules = stale_modules
  209. case.expected_rechecked_modules = rechecked_modules
  210. case.deleted_paths = deleted_paths
  211. case.triggered = triggered or []
  212. case.normalize_output = normalize_output
  213. case.expected_fine_grained_targets = targets
  214. case.test_modules = test_modules
  215. class DataDrivenTestCase(pytest.Item):
  216. """Holds parsed data-driven test cases, and handles directory setup and teardown."""
  217. # Override parent member type
  218. parent: DataSuiteCollector
  219. input: list[str]
  220. output: list[str] # Output for the first pass
  221. output_inline_start: int
  222. output2: dict[int, list[str]] # Output for runs 2+, indexed by run number
  223. # full path of test suite
  224. file = ""
  225. line = 0
  226. # (file path, file content) tuples
  227. files: list[tuple[str, str]]
  228. # Modules which is to be considered "test" rather than "fixture"
  229. test_modules: list[str]
  230. expected_stale_modules: dict[int, set[str]]
  231. expected_rechecked_modules: dict[int, set[str]]
  232. expected_fine_grained_targets: dict[int, list[str]]
  233. # Whether or not we should normalize the output to standardize things like
  234. # forward vs backward slashes in file paths for Windows vs Linux.
  235. normalize_output = True
  236. # Extra attributes used by some tests.
  237. last_line: int
  238. output_files: list[tuple[str, str | Pattern[str]]] # Path and contents for output files
  239. deleted_paths: dict[int, set[str]] # Mapping run number -> paths
  240. triggered: list[str] # Active triggers (one line per incremental step)
  241. def __init__(
  242. self,
  243. parent: DataSuiteCollector,
  244. suite: DataSuite,
  245. file: str,
  246. name: str,
  247. writescache: bool,
  248. only_when: str,
  249. platform: str | None,
  250. skip: bool,
  251. xfail: bool,
  252. data: str,
  253. line: int,
  254. ) -> None:
  255. super().__init__(name, parent)
  256. self.suite = suite
  257. self.file = file
  258. self.writescache = writescache
  259. self.only_when = only_when
  260. if (platform == "windows" and sys.platform != "win32") or (
  261. platform == "posix" and sys.platform == "win32"
  262. ):
  263. skip = True
  264. self.skip = skip
  265. self.xfail = xfail
  266. self.data = data
  267. self.line = line
  268. self.old_cwd: str | None = None
  269. self.tmpdir: tempfile.TemporaryDirectory[str] | None = None
  270. def runtest(self) -> None:
  271. if self.skip:
  272. pytest.skip()
  273. # TODO: add a better error message for when someone uses skip and xfail at the same time
  274. elif self.xfail:
  275. self.add_marker(pytest.mark.xfail)
  276. parent = self.getparent(DataSuiteCollector)
  277. assert parent is not None, "Should not happen"
  278. suite = parent.obj()
  279. suite.setup()
  280. try:
  281. suite.run_case(self)
  282. except Exception:
  283. # As a debugging aid, support copying the contents of the tmp directory somewhere
  284. save_dir: str | None = self.config.getoption("--save-failures-to", None)
  285. if save_dir:
  286. assert self.tmpdir is not None
  287. target_dir = os.path.join(save_dir, os.path.basename(self.tmpdir.name))
  288. print(f"Copying data from test {self.name} to {target_dir}")
  289. if not os.path.isabs(target_dir):
  290. assert self.old_cwd
  291. target_dir = os.path.join(self.old_cwd, target_dir)
  292. shutil.copytree(self.tmpdir.name, target_dir)
  293. raise
  294. def setup(self) -> None:
  295. parse_test_case(case=self)
  296. self.old_cwd = os.getcwd()
  297. self.tmpdir = tempfile.TemporaryDirectory(prefix="mypy-test-")
  298. os.chdir(self.tmpdir.name)
  299. os.mkdir(test_temp_dir)
  300. # Precalculate steps for find_steps()
  301. steps: dict[int, list[FileOperation]] = {}
  302. for path, content in self.files:
  303. m = re.match(r".*\.([0-9]+)$", path)
  304. if m:
  305. # Skip writing subsequent incremental steps - rather
  306. # store them as operations.
  307. num = int(m.group(1))
  308. assert num >= 2
  309. target_path = re.sub(r"\.[0-9]+$", "", path)
  310. module = module_from_path(target_path)
  311. operation = UpdateFile(module, content, target_path)
  312. steps.setdefault(num, []).append(operation)
  313. else:
  314. # Write the first incremental steps
  315. dir = os.path.dirname(path)
  316. os.makedirs(dir, exist_ok=True)
  317. with open(path, "w", encoding="utf8") as f:
  318. f.write(content)
  319. for num, paths in self.deleted_paths.items():
  320. assert num >= 2
  321. for path in paths:
  322. module = module_from_path(path)
  323. steps.setdefault(num, []).append(DeleteFile(module, path))
  324. max_step = max(steps) if steps else 2
  325. self.steps = [steps.get(num, []) for num in range(2, max_step + 1)]
  326. def teardown(self) -> None:
  327. if self.old_cwd is not None:
  328. os.chdir(self.old_cwd)
  329. if self.tmpdir is not None:
  330. try:
  331. self.tmpdir.cleanup()
  332. except OSError:
  333. pass
  334. self.old_cwd = None
  335. self.tmpdir = None
  336. def reportinfo(self) -> tuple[str, int, str]:
  337. return self.file, self.line, self.name
  338. def repr_failure(
  339. self, excinfo: pytest.ExceptionInfo[BaseException], style: Any | None = None
  340. ) -> str:
  341. excrepr: object
  342. if isinstance(excinfo.value, SystemExit):
  343. # We assume that before doing exit() (which raises SystemExit) we've printed
  344. # enough context about what happened so that a stack trace is not useful.
  345. # In particular, uncaught exceptions during semantic analysis or type checking
  346. # call exit() and they already print out a stack trace.
  347. excrepr = excinfo.exconly()
  348. elif isinstance(excinfo.value, pytest.fail.Exception) and not excinfo.value.pytrace:
  349. excrepr = excinfo.exconly()
  350. else:
  351. excinfo.traceback = self.parent._traceback_filter(excinfo)
  352. excrepr = excinfo.getrepr(style="short")
  353. return f"data: {self.file}:{self.line}:\n{excrepr}"
  354. def find_steps(self) -> list[list[FileOperation]]:
  355. """Return a list of descriptions of file operations for each incremental step.
  356. The first list item corresponds to the first incremental step, the second for the
  357. second step, etc. Each operation can either be a file modification/creation (UpdateFile)
  358. or deletion (DeleteFile).
  359. Defaults to having two steps if there aern't any operations.
  360. """
  361. return self.steps
  362. def module_from_path(path: str) -> str:
  363. path = re.sub(r"\.pyi?$", "", path)
  364. # We can have a mix of Unix-style and Windows-style separators.
  365. parts = re.split(r"[/\\]", path)
  366. del parts[0]
  367. module = ".".join(parts)
  368. module = re.sub(r"\.__init__$", "", module)
  369. return module
  370. @dataclass
  371. class TestItem:
  372. """Parsed test caseitem.
  373. An item is of the form
  374. [id arg]
  375. .. data ..
  376. """
  377. id: str
  378. arg: str | None
  379. # Processed, collapsed text data
  380. data: list[str]
  381. # Start line: 1-based, inclusive, relative to testcase
  382. line: int
  383. # End line: 1-based, exclusive, relative to testcase; not same as `line + len(test_item.data)` due to collapsing
  384. end_line: int
  385. @property
  386. def trimmed_newlines(self) -> int: # compensates for strip_list
  387. return self.end_line - self.line - len(self.data)
  388. def parse_test_data(raw_data: str, name: str) -> list[TestItem]:
  389. """Parse a list of lines that represent a sequence of test items."""
  390. lines = ["", "[case " + name + "]"] + raw_data.split("\n")
  391. ret: list[TestItem] = []
  392. data: list[str] = []
  393. id: str | None = None
  394. arg: str | None = None
  395. i = 0
  396. i0 = 0
  397. while i < len(lines):
  398. s = lines[i].strip()
  399. if lines[i].startswith("[") and s.endswith("]"):
  400. if id:
  401. data = collapse_line_continuation(data)
  402. data = strip_list(data)
  403. ret.append(TestItem(id, arg, data, i0 + 1, i))
  404. i0 = i
  405. id = s[1:-1]
  406. arg = None
  407. if " " in id:
  408. arg = id[id.index(" ") + 1 :]
  409. id = id[: id.index(" ")]
  410. data = []
  411. elif lines[i].startswith("\\["):
  412. data.append(lines[i][1:])
  413. elif not lines[i].startswith("--"):
  414. data.append(lines[i])
  415. elif lines[i].startswith("----"):
  416. data.append(lines[i][2:])
  417. i += 1
  418. # Process the last item.
  419. if id:
  420. data = collapse_line_continuation(data)
  421. data = strip_list(data)
  422. ret.append(TestItem(id, arg, data, i0 + 1, i - 1))
  423. return ret
  424. def strip_list(l: list[str]) -> list[str]:
  425. """Return a stripped copy of l.
  426. Strip whitespace at the end of all lines, and strip all empty
  427. lines from the end of the array.
  428. """
  429. r: list[str] = []
  430. for s in l:
  431. # Strip spaces at end of line
  432. r.append(re.sub(r"\s+$", "", s))
  433. while r and r[-1] == "":
  434. r.pop()
  435. return r
  436. def collapse_line_continuation(l: list[str]) -> list[str]:
  437. r: list[str] = []
  438. cont = False
  439. for s in l:
  440. ss = re.sub(r"\\$", "", s)
  441. if cont:
  442. r[-1] += re.sub("^ +", "", ss)
  443. else:
  444. r.append(ss)
  445. cont = s.endswith("\\")
  446. return r
  447. def expand_variables(s: str) -> str:
  448. return s.replace("<ROOT>", root_dir)
  449. def expand_errors(input: list[str], output: list[str], fnam: str) -> None:
  450. """Transform comments such as '# E: message' or
  451. '# E:3: message' in input.
  452. The result is lines like 'fnam:line: error: message'.
  453. """
  454. for i in range(len(input)):
  455. # The first in the split things isn't a comment
  456. for possible_err_comment in input[i].split(" # ")[1:]:
  457. m = re.search(
  458. r"^([ENW]):((?P<col>\d+):)? (?P<message>.*)$", possible_err_comment.strip()
  459. )
  460. if m:
  461. if m.group(1) == "E":
  462. severity = "error"
  463. elif m.group(1) == "N":
  464. severity = "note"
  465. elif m.group(1) == "W":
  466. severity = "warning"
  467. col = m.group("col")
  468. message = m.group("message")
  469. message = message.replace("\\#", "#") # adds back escaped # character
  470. if col is None:
  471. output.append(f"{fnam}:{i + 1}: {severity}: {message}")
  472. else:
  473. output.append(f"{fnam}:{i + 1}:{col}: {severity}: {message}")
  474. def fix_win_path(line: str) -> str:
  475. r"""Changes Windows paths to Linux paths in error messages.
  476. E.g. foo\bar.py -> foo/bar.py.
  477. """
  478. line = line.replace(root_dir, root_dir.replace("\\", "/"))
  479. m = re.match(r"^([\S/]+):(\d+:)?(\s+.*)", line)
  480. if not m:
  481. return line
  482. else:
  483. filename, lineno, message = m.groups()
  484. return "{}:{}{}".format(filename.replace("\\", "/"), lineno or "", message)
  485. def fix_cobertura_filename(line: str) -> str:
  486. r"""Changes filename paths to Linux paths in Cobertura output files.
  487. E.g. filename="pkg\subpkg\a.py" -> filename="pkg/subpkg/a.py".
  488. """
  489. m = re.search(r'<class .* filename="(?P<filename>.*?)"', line)
  490. if not m:
  491. return line
  492. return "{}{}{}".format(
  493. line[: m.start(1)], m.group("filename").replace("\\", "/"), line[m.end(1) :]
  494. )
  495. ##
  496. #
  497. # pytest setup
  498. #
  499. ##
  500. # This function name is special to pytest. See
  501. # https://docs.pytest.org/en/latest/reference.html#initialization-hooks
  502. def pytest_addoption(parser: Any) -> None:
  503. group = parser.getgroup("mypy")
  504. group.addoption(
  505. "--update-data",
  506. action="store_true",
  507. default=False,
  508. help="Update test data to reflect actual output (supported only for certain tests)",
  509. )
  510. group.addoption(
  511. "--save-failures-to",
  512. default=None,
  513. help="Copy the temp directories from failing tests to a target directory",
  514. )
  515. group.addoption(
  516. "--mypy-verbose", action="count", help="Set the verbose flag when creating mypy Options"
  517. )
  518. group.addoption(
  519. "--mypyc-showc",
  520. action="store_true",
  521. default=False,
  522. help="Display C code on mypyc test failures",
  523. )
  524. group.addoption(
  525. "--mypyc-debug",
  526. default=None,
  527. dest="debugger",
  528. choices=SUPPORTED_DEBUGGERS,
  529. help="Run the first mypyc run test with the specified debugger",
  530. )
  531. def pytest_configure(config: pytest.Config) -> None:
  532. if config.getoption("--update-data") and config.getoption("--numprocesses", default=1) > 1:
  533. raise pytest.UsageError(
  534. "--update-data incompatible with parallelized tests; re-run with -n 1"
  535. )
  536. # This function name is special to pytest. See
  537. # https://doc.pytest.org/en/latest/how-to/writing_plugins.html#collection-hooks
  538. def pytest_pycollect_makeitem(collector: Any, name: str, obj: object) -> Any | None:
  539. """Called by pytest on each object in modules configured in conftest.py files.
  540. collector is pytest.Collector, returns Optional[pytest.Class]
  541. """
  542. if isinstance(obj, type):
  543. # Only classes derived from DataSuite contain test cases, not the DataSuite class itself
  544. if issubclass(obj, DataSuite) and obj is not DataSuite:
  545. # Non-None result means this obj is a test case.
  546. # The collect method of the returned DataSuiteCollector instance will be called later,
  547. # with self.obj being obj.
  548. return DataSuiteCollector.from_parent( # type: ignore[no-untyped-call]
  549. parent=collector, name=name
  550. )
  551. return None
  552. _case_name_pattern = re.compile(
  553. r"(?P<name>[a-zA-Z_0-9]+)"
  554. r"(?P<writescache>-writescache)?"
  555. r"(?P<only_when>-only_when_cache|-only_when_nocache)?"
  556. r"(-(?P<platform>posix|windows))?"
  557. r"(?P<skip>-skip)?"
  558. r"(?P<xfail>-xfail)?"
  559. )
  560. def split_test_cases(
  561. parent: DataFileCollector, suite: DataSuite, file: str
  562. ) -> Iterator[DataDrivenTestCase]:
  563. """Iterate over raw test cases in file, at collection time, ignoring sub items.
  564. The collection phase is slow, so any heavy processing should be deferred to after
  565. uninteresting tests are filtered (when using -k PATTERN switch).
  566. """
  567. with open(file, encoding="utf-8") as f:
  568. data = f.read()
  569. cases = re.split(r"^\[case ([^]+)]+)\][ \t]*$\n", data, flags=re.DOTALL | re.MULTILINE)
  570. cases_iter = iter(cases)
  571. line_no = next(cases_iter).count("\n") + 1
  572. test_names = set()
  573. for case_id in cases_iter:
  574. data = next(cases_iter)
  575. m = _case_name_pattern.fullmatch(case_id)
  576. if not m:
  577. raise RuntimeError(f"Invalid testcase id {case_id!r}")
  578. name = m.group("name")
  579. if name in test_names:
  580. raise RuntimeError(
  581. 'Found a duplicate test name "{}" in {} on line {}'.format(
  582. name, parent.name, line_no
  583. )
  584. )
  585. yield DataDrivenTestCase.from_parent(
  586. parent=parent,
  587. suite=suite,
  588. file=file,
  589. name=add_test_name_suffix(name, suite.test_name_suffix),
  590. writescache=bool(m.group("writescache")),
  591. only_when=m.group("only_when"),
  592. platform=m.group("platform"),
  593. skip=bool(m.group("skip")),
  594. xfail=bool(m.group("xfail")),
  595. data=data,
  596. line=line_no,
  597. )
  598. line_no += data.count("\n") + 1
  599. # Record existing tests to prevent duplicates:
  600. test_names.update({name})
  601. class DataSuiteCollector(pytest.Class):
  602. def collect(self) -> Iterator[DataFileCollector]:
  603. """Called by pytest on each of the object returned from pytest_pycollect_makeitem"""
  604. # obj is the object for which pytest_pycollect_makeitem returned self.
  605. suite: DataSuite = self.obj
  606. assert os.path.isdir(
  607. suite.data_prefix
  608. ), f"Test data prefix ({suite.data_prefix}) not set correctly"
  609. for data_file in suite.files:
  610. yield DataFileCollector.from_parent(parent=self, name=data_file)
  611. class DataFileFix(NamedTuple):
  612. lineno: int # 1-offset, inclusive
  613. end_lineno: int # 1-offset, exclusive
  614. lines: list[str]
  615. class DataFileCollector(pytest.Collector):
  616. """Represents a single `.test` data driven test file.
  617. More context: https://github.com/python/mypy/issues/11662
  618. """
  619. parent: DataSuiteCollector
  620. _fixes: list[DataFileFix]
  621. @classmethod # We have to fight with pytest here:
  622. def from_parent(
  623. cls, parent: DataSuiteCollector, *, name: str # type: ignore[override]
  624. ) -> DataFileCollector:
  625. collector = super().from_parent(parent, name=name)
  626. assert isinstance(collector, DataFileCollector)
  627. return collector
  628. def collect(self) -> Iterator[DataDrivenTestCase]:
  629. yield from split_test_cases(
  630. parent=self,
  631. suite=self.parent.obj,
  632. file=os.path.join(self.parent.obj.data_prefix, self.name),
  633. )
  634. def setup(self) -> None:
  635. super().setup()
  636. self._fixes = []
  637. def teardown(self) -> None:
  638. super().teardown()
  639. self._apply_fixes()
  640. def enqueue_fix(self, fix: DataFileFix) -> None:
  641. self._fixes.append(fix)
  642. def _apply_fixes(self) -> None:
  643. if not self._fixes:
  644. return
  645. data_path = Path(self.parent.obj.data_prefix) / self.name
  646. lines = data_path.read_text().split("\n")
  647. # start from end to prevent line offsets from shifting as we update
  648. for fix in sorted(self._fixes, reverse=True):
  649. lines[fix.lineno - 1 : fix.end_lineno - 1] = fix.lines
  650. data_path.write_text("\n".join(lines))
  651. def add_test_name_suffix(name: str, suffix: str) -> str:
  652. # Find magic suffix of form "-foobar" (used for things like "-skip").
  653. m = re.search(r"-[-A-Za-z0-9]+$", name)
  654. if m:
  655. # Insert suite-specific test name suffix before the magic suffix
  656. # which must be the last thing in the test case name since we
  657. # are using endswith() checks.
  658. magic_suffix = m.group(0)
  659. return name[: -len(magic_suffix)] + suffix + magic_suffix
  660. else:
  661. return name + suffix
  662. def is_incremental(testcase: DataDrivenTestCase) -> bool:
  663. return "incremental" in testcase.name.lower() or "incremental" in testcase.file
  664. def has_stable_flags(testcase: DataDrivenTestCase) -> bool:
  665. if any(re.match(r"# flags[2-9]:", line) for line in testcase.input):
  666. return False
  667. for filename, contents in testcase.files:
  668. if os.path.basename(filename).startswith("mypy.ini."):
  669. return False
  670. return True
  671. class DataSuite:
  672. # option fields - class variables
  673. files: list[str]
  674. base_path = test_temp_dir
  675. # Allow external users of the test code to override the data prefix
  676. data_prefix = test_data_prefix
  677. required_out_section = False
  678. native_sep = False
  679. # Name suffix automatically added to each test case in the suite (can be
  680. # used to distinguish test cases in suites that share data files)
  681. test_name_suffix = ""
  682. def setup(self) -> None:
  683. """Setup fixtures (ad-hoc)"""
  684. @abstractmethod
  685. def run_case(self, testcase: DataDrivenTestCase) -> None:
  686. raise NotImplementedError