processor.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. """Module containing our file processor that tokenizes a file for checks."""
  2. from __future__ import annotations
  3. import argparse
  4. import ast
  5. import logging
  6. import tokenize
  7. from typing import Any
  8. from typing import Generator
  9. from typing import List
  10. from typing import Tuple
  11. from flake8 import defaults
  12. from flake8 import utils
  13. from flake8._compat import FSTRING_END
  14. from flake8._compat import FSTRING_MIDDLE
  15. from flake8.plugins.finder import LoadedPlugin
  16. LOG = logging.getLogger(__name__)
  17. NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE])
  18. SKIP_TOKENS = frozenset(
  19. [tokenize.NL, tokenize.NEWLINE, tokenize.INDENT, tokenize.DEDENT]
  20. )
  21. _LogicalMapping = List[Tuple[int, Tuple[int, int]]]
  22. _Logical = Tuple[List[str], List[str], _LogicalMapping]
  23. class FileProcessor:
  24. """Processes a file and holds state.
  25. This processes a file by generating tokens, logical and physical lines,
  26. and AST trees. This also provides a way of passing state about the file
  27. to checks expecting that state. Any public attribute on this object can
  28. be requested by a plugin. The known public attributes are:
  29. - :attr:`blank_before`
  30. - :attr:`blank_lines`
  31. - :attr:`checker_state`
  32. - :attr:`indent_char`
  33. - :attr:`indent_level`
  34. - :attr:`line_number`
  35. - :attr:`logical_line`
  36. - :attr:`max_line_length`
  37. - :attr:`max_doc_length`
  38. - :attr:`multiline`
  39. - :attr:`noqa`
  40. - :attr:`previous_indent_level`
  41. - :attr:`previous_logical`
  42. - :attr:`previous_unindented_logical_line`
  43. - :attr:`tokens`
  44. - :attr:`file_tokens`
  45. - :attr:`total_lines`
  46. - :attr:`verbose`
  47. """
  48. #: always ``False``, included for compatibility
  49. noqa = False
  50. def __init__(
  51. self,
  52. filename: str,
  53. options: argparse.Namespace,
  54. lines: list[str] | None = None,
  55. ) -> None:
  56. """Initialize our file processor.
  57. :param filename: Name of the file to process
  58. """
  59. self.options = options
  60. self.filename = filename
  61. self.lines = lines if lines is not None else self.read_lines()
  62. self.strip_utf_bom()
  63. # Defaults for public attributes
  64. #: Number of preceding blank lines
  65. self.blank_before = 0
  66. #: Number of blank lines
  67. self.blank_lines = 0
  68. #: Checker states for each plugin?
  69. self._checker_states: dict[str, dict[Any, Any]] = {}
  70. #: Current checker state
  71. self.checker_state: dict[Any, Any] = {}
  72. #: User provided option for hang closing
  73. self.hang_closing = options.hang_closing
  74. #: Character used for indentation
  75. self.indent_char: str | None = None
  76. #: Current level of indentation
  77. self.indent_level = 0
  78. #: Number of spaces used for indentation
  79. self.indent_size = options.indent_size
  80. #: Line number in the file
  81. self.line_number = 0
  82. #: Current logical line
  83. self.logical_line = ""
  84. #: Maximum line length as configured by the user
  85. self.max_line_length = options.max_line_length
  86. #: Maximum docstring / comment line length as configured by the user
  87. self.max_doc_length = options.max_doc_length
  88. #: Whether the current physical line is multiline
  89. self.multiline = False
  90. #: Previous level of indentation
  91. self.previous_indent_level = 0
  92. #: Previous logical line
  93. self.previous_logical = ""
  94. #: Previous unindented (i.e. top-level) logical line
  95. self.previous_unindented_logical_line = ""
  96. #: Current set of tokens
  97. self.tokens: list[tokenize.TokenInfo] = []
  98. #: Total number of lines in the file
  99. self.total_lines = len(self.lines)
  100. #: Verbosity level of Flake8
  101. self.verbose = options.verbose
  102. #: Statistics dictionary
  103. self.statistics = {"logical lines": 0}
  104. self._file_tokens: list[tokenize.TokenInfo] | None = None
  105. # map from line number to the line we'll search for `noqa` in
  106. self._noqa_line_mapping: dict[int, str] | None = None
  107. self._fstring_start = -1
  108. @property
  109. def file_tokens(self) -> list[tokenize.TokenInfo]:
  110. """Return the complete set of tokens for a file."""
  111. if self._file_tokens is None:
  112. line_iter = iter(self.lines)
  113. self._file_tokens = list(
  114. tokenize.generate_tokens(lambda: next(line_iter))
  115. )
  116. return self._file_tokens
  117. def fstring_start(self, lineno: int) -> None:
  118. """Signal the beginning of an fstring."""
  119. self._fstring_start = lineno
  120. def multiline_string(
  121. self, token: tokenize.TokenInfo
  122. ) -> Generator[str, None, None]:
  123. """Iterate through the lines of a multiline string."""
  124. if token.type == FSTRING_END:
  125. start = self._fstring_start
  126. else:
  127. start = token.start[0]
  128. self.multiline = True
  129. self.line_number = start
  130. # intentionally don't include the last line, that line will be
  131. # terminated later by a future end-of-line
  132. for _ in range(start, token.end[0]):
  133. yield self.lines[self.line_number - 1]
  134. self.line_number += 1
  135. self.multiline = False
  136. def reset_blank_before(self) -> None:
  137. """Reset the blank_before attribute to zero."""
  138. self.blank_before = 0
  139. def delete_first_token(self) -> None:
  140. """Delete the first token in the list of tokens."""
  141. del self.tokens[0]
  142. def visited_new_blank_line(self) -> None:
  143. """Note that we visited a new blank line."""
  144. self.blank_lines += 1
  145. def update_state(self, mapping: _LogicalMapping) -> None:
  146. """Update the indent level based on the logical line mapping."""
  147. (start_row, start_col) = mapping[0][1]
  148. start_line = self.lines[start_row - 1]
  149. self.indent_level = expand_indent(start_line[:start_col])
  150. if self.blank_before < self.blank_lines:
  151. self.blank_before = self.blank_lines
  152. def update_checker_state_for(self, plugin: LoadedPlugin) -> None:
  153. """Update the checker_state attribute for the plugin."""
  154. if "checker_state" in plugin.parameters:
  155. self.checker_state = self._checker_states.setdefault(
  156. plugin.entry_name, {}
  157. )
  158. def next_logical_line(self) -> None:
  159. """Record the previous logical line.
  160. This also resets the tokens list and the blank_lines count.
  161. """
  162. if self.logical_line:
  163. self.previous_indent_level = self.indent_level
  164. self.previous_logical = self.logical_line
  165. if not self.indent_level:
  166. self.previous_unindented_logical_line = self.logical_line
  167. self.blank_lines = 0
  168. self.tokens = []
  169. def build_logical_line_tokens(self) -> _Logical: # noqa: C901
  170. """Build the mapping, comments, and logical line lists."""
  171. logical = []
  172. comments = []
  173. mapping: _LogicalMapping = []
  174. length = 0
  175. previous_row = previous_column = None
  176. for token_type, text, start, end, line in self.tokens:
  177. if token_type in SKIP_TOKENS:
  178. continue
  179. if not mapping:
  180. mapping = [(0, start)]
  181. if token_type == tokenize.COMMENT:
  182. comments.append(text)
  183. continue
  184. if token_type == tokenize.STRING:
  185. text = mutate_string(text)
  186. elif token_type == FSTRING_MIDDLE:
  187. text = "x" * len(text)
  188. if previous_row:
  189. (start_row, start_column) = start
  190. if previous_row != start_row:
  191. row_index = previous_row - 1
  192. column_index = previous_column - 1
  193. previous_text = self.lines[row_index][column_index]
  194. if previous_text == "," or (
  195. previous_text not in "{[(" and text not in "}])"
  196. ):
  197. text = f" {text}"
  198. elif previous_column != start_column:
  199. text = line[previous_column:start_column] + text
  200. logical.append(text)
  201. length += len(text)
  202. mapping.append((length, end))
  203. (previous_row, previous_column) = end
  204. return comments, logical, mapping
  205. def build_ast(self) -> ast.AST:
  206. """Build an abstract syntax tree from the list of lines."""
  207. return ast.parse("".join(self.lines))
  208. def build_logical_line(self) -> tuple[str, str, _LogicalMapping]:
  209. """Build a logical line from the current tokens list."""
  210. comments, logical, mapping_list = self.build_logical_line_tokens()
  211. joined_comments = "".join(comments)
  212. self.logical_line = "".join(logical)
  213. self.statistics["logical lines"] += 1
  214. return joined_comments, self.logical_line, mapping_list
  215. def keyword_arguments_for(
  216. self,
  217. parameters: dict[str, bool],
  218. arguments: dict[str, Any],
  219. ) -> dict[str, Any]:
  220. """Generate the keyword arguments for a list of parameters."""
  221. ret = {}
  222. for param, required in parameters.items():
  223. if param in arguments:
  224. continue
  225. try:
  226. ret[param] = getattr(self, param)
  227. except AttributeError:
  228. if required:
  229. raise
  230. else:
  231. LOG.warning(
  232. 'Plugin requested optional parameter "%s" '
  233. "but this is not an available parameter.",
  234. param,
  235. )
  236. return ret
  237. def generate_tokens(self) -> Generator[tokenize.TokenInfo, None, None]:
  238. """Tokenize the file and yield the tokens."""
  239. for token in tokenize.generate_tokens(self.next_line):
  240. if token[2][0] > self.total_lines:
  241. break
  242. self.tokens.append(token)
  243. yield token
  244. def _noqa_line_range(self, min_line: int, max_line: int) -> dict[int, str]:
  245. line_range = range(min_line, max_line + 1)
  246. joined = "".join(self.lines[min_line - 1 : max_line])
  247. return dict.fromkeys(line_range, joined)
  248. def noqa_line_for(self, line_number: int) -> str | None:
  249. """Retrieve the line which will be used to determine noqa."""
  250. if self._noqa_line_mapping is None:
  251. try:
  252. file_tokens = self.file_tokens
  253. except (tokenize.TokenError, SyntaxError):
  254. # if we failed to parse the file tokens, we'll always fail in
  255. # the future, so set this so the code does not try again
  256. self._noqa_line_mapping = {}
  257. else:
  258. ret = {}
  259. min_line = len(self.lines) + 2
  260. max_line = -1
  261. for tp, _, (s_line, _), (e_line, _), _ in file_tokens:
  262. if tp == tokenize.ENDMARKER:
  263. break
  264. min_line = min(min_line, s_line)
  265. max_line = max(max_line, e_line)
  266. if tp in (tokenize.NL, tokenize.NEWLINE):
  267. ret.update(self._noqa_line_range(min_line, max_line))
  268. min_line = len(self.lines) + 2
  269. max_line = -1
  270. # in newer versions of python, a `NEWLINE` token is inserted
  271. # at the end of the file even if it doesn't have one.
  272. # on old pythons, they will not have hit a `NEWLINE`
  273. if max_line != -1:
  274. ret.update(self._noqa_line_range(min_line, max_line))
  275. self._noqa_line_mapping = ret
  276. # NOTE(sigmavirus24): Some plugins choose to report errors for empty
  277. # files on Line 1. In those cases, we shouldn't bother trying to
  278. # retrieve a physical line (since none exist).
  279. return self._noqa_line_mapping.get(line_number)
  280. def next_line(self) -> str:
  281. """Get the next line from the list."""
  282. if self.line_number >= self.total_lines:
  283. return ""
  284. line = self.lines[self.line_number]
  285. self.line_number += 1
  286. if self.indent_char is None and line[:1] in defaults.WHITESPACE:
  287. self.indent_char = line[0]
  288. return line
  289. def read_lines(self) -> list[str]:
  290. """Read the lines for this file checker."""
  291. if self.filename == "-":
  292. self.filename = self.options.stdin_display_name or "stdin"
  293. lines = self.read_lines_from_stdin()
  294. else:
  295. lines = self.read_lines_from_filename()
  296. return lines
  297. def read_lines_from_filename(self) -> list[str]:
  298. """Read the lines for a file."""
  299. try:
  300. with tokenize.open(self.filename) as fd:
  301. return fd.readlines()
  302. except (SyntaxError, UnicodeError):
  303. # If we can't detect the codec with tokenize.detect_encoding, or
  304. # the detected encoding is incorrect, just fallback to latin-1.
  305. with open(self.filename, encoding="latin-1") as fd:
  306. return fd.readlines()
  307. def read_lines_from_stdin(self) -> list[str]:
  308. """Read the lines from standard in."""
  309. return utils.stdin_get_lines()
  310. def should_ignore_file(self) -> bool:
  311. """Check if ``flake8: noqa`` is in the file to be ignored.
  312. :returns:
  313. True if a line matches :attr:`defaults.NOQA_FILE`,
  314. otherwise False
  315. """
  316. if not self.options.disable_noqa and any(
  317. defaults.NOQA_FILE.match(line) for line in self.lines
  318. ):
  319. return True
  320. elif any(defaults.NOQA_FILE.search(line) for line in self.lines):
  321. LOG.warning(
  322. "Detected `flake8: noqa` on line with code. To ignore an "
  323. "error on a line use `noqa` instead."
  324. )
  325. return False
  326. else:
  327. return False
  328. def strip_utf_bom(self) -> None:
  329. """Strip the UTF bom from the lines of the file."""
  330. if not self.lines:
  331. # If we have nothing to analyze quit early
  332. return
  333. first_byte = ord(self.lines[0][0])
  334. if first_byte not in (0xEF, 0xFEFF):
  335. return
  336. # If the first byte of the file is a UTF-8 BOM, strip it
  337. if first_byte == 0xFEFF:
  338. self.lines[0] = self.lines[0][1:]
  339. elif self.lines[0][:3] == "\xEF\xBB\xBF":
  340. self.lines[0] = self.lines[0][3:]
  341. def is_eol_token(token: tokenize.TokenInfo) -> bool:
  342. """Check if the token is an end-of-line token."""
  343. return token[0] in NEWLINE or token[4][token[3][1] :].lstrip() == "\\\n"
  344. def is_multiline_string(token: tokenize.TokenInfo) -> bool:
  345. """Check if this is a multiline string."""
  346. return token.type == FSTRING_END or (
  347. token.type == tokenize.STRING and "\n" in token.string
  348. )
  349. def token_is_newline(token: tokenize.TokenInfo) -> bool:
  350. """Check if the token type is a newline token type."""
  351. return token[0] in NEWLINE
  352. def count_parentheses(current_parentheses_count: int, token_text: str) -> int:
  353. """Count the number of parentheses."""
  354. if token_text in "([{": # nosec
  355. return current_parentheses_count + 1
  356. elif token_text in "}])": # nosec
  357. return current_parentheses_count - 1
  358. return current_parentheses_count
  359. def expand_indent(line: str) -> int:
  360. r"""Return the amount of indentation.
  361. Tabs are expanded to the next multiple of 8.
  362. >>> expand_indent(' ')
  363. 4
  364. >>> expand_indent('\t')
  365. 8
  366. >>> expand_indent(' \t')
  367. 8
  368. >>> expand_indent(' \t')
  369. 16
  370. """
  371. return len(line.expandtabs(8))
  372. # NOTE(sigmavirus24): This was taken wholesale from
  373. # https://github.com/PyCQA/pycodestyle. The in-line comments were edited to be
  374. # more descriptive.
  375. def mutate_string(text: str) -> str:
  376. """Replace contents with 'xxx' to prevent syntax matching.
  377. >>> mutate_string('"abc"')
  378. '"xxx"'
  379. >>> mutate_string("'''abc'''")
  380. "'''xxx'''"
  381. >>> mutate_string("r'abc'")
  382. "r'xxx'"
  383. """
  384. # NOTE(sigmavirus24): If there are string modifiers (e.g., b, u, r)
  385. # use the last "character" to determine if we're using single or double
  386. # quotes and then find the first instance of it
  387. start = text.index(text[-1]) + 1
  388. end = len(text) - 1
  389. # Check for triple-quoted strings
  390. if text[-3:] in ('"""', "'''"):
  391. start += 2
  392. end -= 2
  393. return text[:start] + "x" * (end - start) + text[end:]