testcheck.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. """Type checker test cases"""
  2. from __future__ import annotations
  3. import os
  4. import re
  5. import sys
  6. from mypy import build
  7. from mypy.build import Graph
  8. from mypy.errors import CompileError
  9. from mypy.modulefinder import BuildSource, FindModuleCache, SearchPaths
  10. from mypy.options import TYPE_VAR_TUPLE, UNPACK
  11. from mypy.test.config import test_data_prefix, test_temp_dir
  12. from mypy.test.data import DataDrivenTestCase, DataSuite, FileOperation, module_from_path
  13. from mypy.test.helpers import (
  14. assert_module_equivalence,
  15. assert_string_arrays_equal,
  16. assert_target_equivalence,
  17. check_test_output_files,
  18. find_test_files,
  19. normalize_error_messages,
  20. parse_options,
  21. perform_file_operations,
  22. )
  23. from mypy.test.update_data import update_testcase_output
  24. try:
  25. import lxml # type: ignore[import]
  26. except ImportError:
  27. lxml = None
  28. import pytest
  29. # List of files that contain test case descriptions.
  30. # Includes all check-* files with the .test extension in the test-data/unit directory
  31. typecheck_files = find_test_files(pattern="check-*.test")
  32. # Tests that use Python 3.8-only AST features (like expression-scoped ignores):
  33. if sys.version_info < (3, 8):
  34. typecheck_files.remove("check-python38.test")
  35. if sys.version_info < (3, 9):
  36. typecheck_files.remove("check-python39.test")
  37. if sys.version_info < (3, 10):
  38. typecheck_files.remove("check-python310.test")
  39. if sys.version_info < (3, 11):
  40. typecheck_files.remove("check-python311.test")
  41. # Special tests for platforms with case-insensitive filesystems.
  42. if sys.platform not in ("darwin", "win32"):
  43. typecheck_files.remove("check-modules-case.test")
  44. class TypeCheckSuite(DataSuite):
  45. files = typecheck_files
  46. def run_case(self, testcase: DataDrivenTestCase) -> None:
  47. if lxml is None and os.path.basename(testcase.file) == "check-reports.test":
  48. pytest.skip("Cannot import lxml. Is it installed?")
  49. incremental = (
  50. "incremental" in testcase.name.lower()
  51. or "incremental" in testcase.file
  52. or "serialize" in testcase.file
  53. )
  54. if incremental:
  55. # Incremental tests are run once with a cold cache, once with a warm cache.
  56. # Expect success on first run, errors from testcase.output (if any) on second run.
  57. num_steps = max([2] + list(testcase.output2.keys()))
  58. # Check that there are no file changes beyond the last run (they would be ignored).
  59. for dn, dirs, files in os.walk(os.curdir):
  60. for file in files:
  61. m = re.search(r"\.([2-9])$", file)
  62. if m and int(m.group(1)) > num_steps:
  63. raise ValueError(
  64. "Output file {} exists though test case only has {} runs".format(
  65. file, num_steps
  66. )
  67. )
  68. steps = testcase.find_steps()
  69. for step in range(1, num_steps + 1):
  70. idx = step - 2
  71. ops = steps[idx] if idx < len(steps) and idx >= 0 else []
  72. self.run_case_once(testcase, ops, step)
  73. else:
  74. self.run_case_once(testcase)
  75. def _sort_output_if_needed(self, testcase: DataDrivenTestCase, a: list[str]) -> None:
  76. idx = testcase.output_inline_start
  77. if not testcase.files or idx == len(testcase.output):
  78. return
  79. def _filename(_msg: str) -> str:
  80. return _msg.partition(":")[0]
  81. file_weights = {file: idx for idx, file in enumerate(_filename(msg) for msg in a)}
  82. testcase.output[idx:] = sorted(
  83. testcase.output[idx:], key=lambda msg: file_weights.get(_filename(msg), -1)
  84. )
  85. def run_case_once(
  86. self,
  87. testcase: DataDrivenTestCase,
  88. operations: list[FileOperation] = [],
  89. incremental_step: int = 0,
  90. ) -> None:
  91. original_program_text = "\n".join(testcase.input)
  92. module_data = self.parse_module(original_program_text, incremental_step)
  93. # Unload already loaded plugins, they may be updated.
  94. for file, _ in testcase.files:
  95. module = module_from_path(file)
  96. if module.endswith("_plugin") and module in sys.modules:
  97. del sys.modules[module]
  98. if incremental_step == 0 or incremental_step == 1:
  99. # In run 1, copy program text to program file.
  100. for module_name, program_path, program_text in module_data:
  101. if module_name == "__main__":
  102. with open(program_path, "w", encoding="utf8") as f:
  103. f.write(program_text)
  104. break
  105. elif incremental_step > 1:
  106. # In runs 2+, copy *.[num] files to * files.
  107. perform_file_operations(operations)
  108. # Parse options after moving files (in case mypy.ini is being moved).
  109. options = parse_options(original_program_text, testcase, incremental_step)
  110. options.use_builtins_fixtures = True
  111. if not testcase.name.endswith("_no_incomplete"):
  112. options.enable_incomplete_feature = [TYPE_VAR_TUPLE, UNPACK]
  113. options.show_traceback = True
  114. # Enable some options automatically based on test file name.
  115. if "optional" in testcase.file:
  116. options.strict_optional = True
  117. if "columns" in testcase.file:
  118. options.show_column_numbers = True
  119. if "errorcodes" in testcase.file:
  120. options.hide_error_codes = False
  121. if "abstract" not in testcase.file:
  122. options.allow_empty_bodies = not testcase.name.endswith("_no_empty")
  123. if "lowercase" not in testcase.file:
  124. options.force_uppercase_builtins = True
  125. if "union-error" not in testcase.file:
  126. options.force_union_syntax = True
  127. if incremental_step and options.incremental:
  128. # Don't overwrite # flags: --no-incremental in incremental test cases
  129. options.incremental = True
  130. else:
  131. options.incremental = False
  132. # Don't waste time writing cache unless we are specifically looking for it
  133. if not testcase.writescache:
  134. options.cache_dir = os.devnull
  135. sources = []
  136. for module_name, program_path, program_text in module_data:
  137. # Always set to none so we're forced to reread the module in incremental mode
  138. sources.append(
  139. BuildSource(program_path, module_name, None if incremental_step else program_text)
  140. )
  141. plugin_dir = os.path.join(test_data_prefix, "plugins")
  142. sys.path.insert(0, plugin_dir)
  143. res = None
  144. try:
  145. res = build.build(sources=sources, options=options, alt_lib_path=test_temp_dir)
  146. a = res.errors
  147. except CompileError as e:
  148. a = e.messages
  149. finally:
  150. assert sys.path[0] == plugin_dir
  151. del sys.path[0]
  152. if testcase.normalize_output:
  153. a = normalize_error_messages(a)
  154. # Make sure error messages match
  155. if incremental_step < 2:
  156. if incremental_step == 1:
  157. msg = "Unexpected type checker output in incremental, run 1 ({}, line {})"
  158. else:
  159. assert incremental_step == 0
  160. msg = "Unexpected type checker output ({}, line {})"
  161. self._sort_output_if_needed(testcase, a)
  162. output = testcase.output
  163. else:
  164. msg = (
  165. f"Unexpected type checker output in incremental, run {incremental_step}"
  166. + " ({}, line {})"
  167. )
  168. output = testcase.output2.get(incremental_step, [])
  169. if output != a and testcase.config.getoption("--update-data", False):
  170. update_testcase_output(testcase, a, incremental_step=incremental_step)
  171. assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line))
  172. if res:
  173. if options.cache_dir != os.devnull:
  174. self.verify_cache(module_data, res.errors, res.manager, res.graph)
  175. name = "targets"
  176. if incremental_step:
  177. name += str(incremental_step + 1)
  178. expected = testcase.expected_fine_grained_targets.get(incremental_step + 1)
  179. actual = [
  180. target
  181. for module, target in res.manager.processed_targets
  182. if module in testcase.test_modules
  183. ]
  184. if expected is not None:
  185. assert_target_equivalence(name, expected, actual)
  186. if incremental_step > 1:
  187. suffix = "" if incremental_step == 2 else str(incremental_step - 1)
  188. expected_rechecked = testcase.expected_rechecked_modules.get(incremental_step - 1)
  189. if expected_rechecked is not None:
  190. assert_module_equivalence(
  191. "rechecked" + suffix, expected_rechecked, res.manager.rechecked_modules
  192. )
  193. expected_stale = testcase.expected_stale_modules.get(incremental_step - 1)
  194. if expected_stale is not None:
  195. assert_module_equivalence(
  196. "stale" + suffix, expected_stale, res.manager.stale_modules
  197. )
  198. if testcase.output_files:
  199. check_test_output_files(testcase, incremental_step, strip_prefix="tmp/")
  200. def verify_cache(
  201. self,
  202. module_data: list[tuple[str, str, str]],
  203. a: list[str],
  204. manager: build.BuildManager,
  205. graph: Graph,
  206. ) -> None:
  207. # There should be valid cache metadata for each module except
  208. # for those that had an error in themselves or one of their
  209. # dependencies.
  210. error_paths = self.find_error_message_paths(a)
  211. busted_paths = {m.path for id, m in manager.modules.items() if graph[id].transitive_error}
  212. modules = self.find_module_files(manager)
  213. modules.update({module_name: path for module_name, path, text in module_data})
  214. missing_paths = self.find_missing_cache_files(modules, manager)
  215. # We would like to assert error_paths.issubset(busted_paths)
  216. # but this runs into trouble because while some 'notes' are
  217. # really errors that cause an error to be marked, many are
  218. # just notes attached to other errors.
  219. assert error_paths or not busted_paths, "Some modules reported error despite no errors"
  220. if not missing_paths == busted_paths:
  221. raise AssertionError(f"cache data discrepancy {missing_paths} != {busted_paths}")
  222. assert os.path.isfile(os.path.join(manager.options.cache_dir, ".gitignore"))
  223. cachedir_tag = os.path.join(manager.options.cache_dir, "CACHEDIR.TAG")
  224. assert os.path.isfile(cachedir_tag)
  225. with open(cachedir_tag) as f:
  226. assert f.read().startswith("Signature: 8a477f597d28d172789f06886806bc55")
  227. def find_error_message_paths(self, a: list[str]) -> set[str]:
  228. hits = set()
  229. for line in a:
  230. m = re.match(r"([^\s:]+):(\d+:)?(\d+:)? (error|warning|note):", line)
  231. if m:
  232. p = m.group(1)
  233. hits.add(p)
  234. return hits
  235. def find_module_files(self, manager: build.BuildManager) -> dict[str, str]:
  236. return {id: module.path for id, module in manager.modules.items()}
  237. def find_missing_cache_files(
  238. self, modules: dict[str, str], manager: build.BuildManager
  239. ) -> set[str]:
  240. ignore_errors = True
  241. missing = {}
  242. for id, path in modules.items():
  243. meta = build.find_cache_meta(id, path, manager)
  244. if not build.validate_meta(meta, id, path, ignore_errors, manager):
  245. missing[id] = path
  246. return set(missing.values())
  247. def parse_module(
  248. self, program_text: str, incremental_step: int = 0
  249. ) -> list[tuple[str, str, str]]:
  250. """Return the module and program names for a test case.
  251. Normally, the unit tests will parse the default ('__main__')
  252. module and follow all the imports listed there. You can override
  253. this behavior and instruct the tests to check multiple modules
  254. by using a comment like this in the test case input:
  255. # cmd: mypy -m foo.bar foo.baz
  256. You can also use `# cmdN:` to have a different cmd for incremental
  257. step N (2, 3, ...).
  258. Return a list of tuples (module name, file name, program text).
  259. """
  260. m = re.search("# cmd: mypy -m ([a-zA-Z0-9_. ]+)$", program_text, flags=re.MULTILINE)
  261. if incremental_step > 1:
  262. alt_regex = f"# cmd{incremental_step}: mypy -m ([a-zA-Z0-9_. ]+)$"
  263. alt_m = re.search(alt_regex, program_text, flags=re.MULTILINE)
  264. if alt_m is not None:
  265. # Optionally return a different command if in a later step
  266. # of incremental mode, otherwise default to reusing the
  267. # original cmd.
  268. m = alt_m
  269. if m:
  270. # The test case wants to use a non-default main
  271. # module. Look up the module and give it as the thing to
  272. # analyze.
  273. module_names = m.group(1)
  274. out = []
  275. search_paths = SearchPaths((test_temp_dir,), (), (), ())
  276. cache = FindModuleCache(search_paths, fscache=None, options=None)
  277. for module_name in module_names.split(" "):
  278. path = cache.find_module(module_name)
  279. assert isinstance(path, str), f"Can't find ad hoc case file: {module_name}"
  280. with open(path, encoding="utf8") as f:
  281. program_text = f.read()
  282. out.append((module_name, path, program_text))
  283. return out
  284. else:
  285. return [("__main__", "main", program_text)]