format.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725
  1. # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
  2. # For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
  3. # Copyright (c) https://github.com/PyCQA/pylint/blob/main/CONTRIBUTORS.txt
  4. """Python code format's checker.
  5. By default, try to follow Guido's style guide :
  6. https://www.python.org/doc/essays/styleguide/
  7. Some parts of the process_token method is based from The Tab Nanny std module.
  8. """
  9. from __future__ import annotations
  10. import sys
  11. import tokenize
  12. from functools import reduce
  13. from re import Match
  14. from typing import TYPE_CHECKING
  15. from astroid import nodes
  16. from pylint.checkers import BaseRawFileChecker, BaseTokenChecker
  17. from pylint.checkers.utils import only_required_for_messages
  18. from pylint.constants import WarningScope
  19. from pylint.interfaces import HIGH
  20. from pylint.typing import MessageDefinitionTuple
  21. from pylint.utils.pragma_parser import OPTION_PO, PragmaParserError, parse_pragma
  22. if TYPE_CHECKING:
  23. from pylint.lint import PyLinter
  24. if sys.version_info >= (3, 8):
  25. from typing import Literal
  26. else:
  27. from typing_extensions import Literal
  28. _KEYWORD_TOKENS = {
  29. "assert",
  30. "del",
  31. "elif",
  32. "except",
  33. "for",
  34. "if",
  35. "in",
  36. "not",
  37. "raise",
  38. "return",
  39. "while",
  40. "yield",
  41. "with",
  42. "=",
  43. ":=",
  44. }
  45. _JUNK_TOKENS = {tokenize.COMMENT, tokenize.NL}
  46. MSGS: dict[str, MessageDefinitionTuple] = {
  47. "C0301": (
  48. "Line too long (%s/%s)",
  49. "line-too-long",
  50. "Used when a line is longer than a given number of characters.",
  51. ),
  52. "C0302": (
  53. "Too many lines in module (%s/%s)", # was W0302
  54. "too-many-lines",
  55. "Used when a module has too many lines, reducing its readability.",
  56. ),
  57. "C0303": (
  58. "Trailing whitespace",
  59. "trailing-whitespace",
  60. "Used when there is whitespace between the end of a line and the newline.",
  61. ),
  62. "C0304": (
  63. "Final newline missing",
  64. "missing-final-newline",
  65. "Used when the last line in a file is missing a newline.",
  66. ),
  67. "C0305": (
  68. "Trailing newlines",
  69. "trailing-newlines",
  70. "Used when there are trailing blank lines in a file.",
  71. ),
  72. "W0311": (
  73. "Bad indentation. Found %s %s, expected %s",
  74. "bad-indentation",
  75. "Used when an unexpected number of indentation's tabulations or "
  76. "spaces has been found.",
  77. ),
  78. "W0301": (
  79. "Unnecessary semicolon", # was W0106
  80. "unnecessary-semicolon",
  81. 'Used when a statement is ended by a semi-colon (";"), which '
  82. "isn't necessary (that's python, not C ;).",
  83. ),
  84. "C0321": (
  85. "More than one statement on a single line",
  86. "multiple-statements",
  87. "Used when more than on statement are found on the same line.",
  88. {"scope": WarningScope.NODE},
  89. ),
  90. "C0325": (
  91. "Unnecessary parens after %r keyword",
  92. "superfluous-parens",
  93. "Used when a single item in parentheses follows an if, for, or "
  94. "other keyword.",
  95. ),
  96. "C0327": (
  97. "Mixed line endings LF and CRLF",
  98. "mixed-line-endings",
  99. "Used when there are mixed (LF and CRLF) newline signs in a file.",
  100. ),
  101. "C0328": (
  102. "Unexpected line ending format. There is '%s' while it should be '%s'.",
  103. "unexpected-line-ending-format",
  104. "Used when there is different newline than expected.",
  105. ),
  106. }
  107. def _last_token_on_line_is(tokens: TokenWrapper, line_end: int, token: str) -> bool:
  108. return (
  109. line_end > 0
  110. and tokens.token(line_end - 1) == token
  111. or line_end > 1
  112. and tokens.token(line_end - 2) == token
  113. and tokens.type(line_end - 1) == tokenize.COMMENT
  114. )
  115. class TokenWrapper:
  116. """A wrapper for readable access to token information."""
  117. def __init__(self, tokens: list[tokenize.TokenInfo]) -> None:
  118. self._tokens = tokens
  119. def token(self, idx: int) -> str:
  120. return self._tokens[idx][1]
  121. def type(self, idx: int) -> int:
  122. return self._tokens[idx][0]
  123. def start_line(self, idx: int) -> int:
  124. return self._tokens[idx][2][0]
  125. def start_col(self, idx: int) -> int:
  126. return self._tokens[idx][2][1]
  127. def line(self, idx: int) -> str:
  128. return self._tokens[idx][4]
  129. class FormatChecker(BaseTokenChecker, BaseRawFileChecker):
  130. """Formatting checker.
  131. Checks for :
  132. * unauthorized constructions
  133. * strict indentation
  134. * line length
  135. """
  136. # configuration section name
  137. name = "format"
  138. # messages
  139. msgs = MSGS
  140. # configuration options
  141. # for available dict keys/values see the optik parser 'add_option' method
  142. options = (
  143. (
  144. "max-line-length",
  145. {
  146. "default": 100,
  147. "type": "int",
  148. "metavar": "<int>",
  149. "help": "Maximum number of characters on a single line.",
  150. },
  151. ),
  152. (
  153. "ignore-long-lines",
  154. {
  155. "type": "regexp",
  156. "metavar": "<regexp>",
  157. "default": r"^\s*(# )?<?https?://\S+>?$",
  158. "help": (
  159. "Regexp for a line that is allowed to be longer than the limit."
  160. ),
  161. },
  162. ),
  163. (
  164. "single-line-if-stmt",
  165. {
  166. "default": False,
  167. "type": "yn",
  168. "metavar": "<y or n>",
  169. "help": (
  170. "Allow the body of an if to be on the same "
  171. "line as the test if there is no else."
  172. ),
  173. },
  174. ),
  175. (
  176. "single-line-class-stmt",
  177. {
  178. "default": False,
  179. "type": "yn",
  180. "metavar": "<y or n>",
  181. "help": (
  182. "Allow the body of a class to be on the same "
  183. "line as the declaration if body contains "
  184. "single statement."
  185. ),
  186. },
  187. ),
  188. (
  189. "max-module-lines",
  190. {
  191. "default": 1000,
  192. "type": "int",
  193. "metavar": "<int>",
  194. "help": "Maximum number of lines in a module.",
  195. },
  196. ),
  197. (
  198. "indent-string",
  199. {
  200. "default": " ",
  201. "type": "non_empty_string",
  202. "metavar": "<string>",
  203. "help": "String used as indentation unit. This is usually "
  204. '" " (4 spaces) or "\\t" (1 tab).',
  205. },
  206. ),
  207. (
  208. "indent-after-paren",
  209. {
  210. "type": "int",
  211. "metavar": "<int>",
  212. "default": 4,
  213. "help": "Number of spaces of indent required inside a hanging "
  214. "or continued line.",
  215. },
  216. ),
  217. (
  218. "expected-line-ending-format",
  219. {
  220. "type": "choice",
  221. "metavar": "<empty or LF or CRLF>",
  222. "default": "",
  223. "choices": ["", "LF", "CRLF"],
  224. "help": (
  225. "Expected format of line ending, "
  226. "e.g. empty (any line ending), LF or CRLF."
  227. ),
  228. },
  229. ),
  230. )
  231. def __init__(self, linter: PyLinter) -> None:
  232. super().__init__(linter)
  233. self._lines: dict[int, str] = {}
  234. self._visited_lines: dict[int, Literal[1, 2]] = {}
  235. def new_line(self, tokens: TokenWrapper, line_end: int, line_start: int) -> None:
  236. """A new line has been encountered, process it if necessary."""
  237. if _last_token_on_line_is(tokens, line_end, ";"):
  238. self.add_message("unnecessary-semicolon", line=tokens.start_line(line_end))
  239. line_num = tokens.start_line(line_start)
  240. line = tokens.line(line_start)
  241. if tokens.type(line_start) not in _JUNK_TOKENS:
  242. self._lines[line_num] = line.split("\n")[0]
  243. self.check_lines(tokens, line_start, line, line_num)
  244. def process_module(self, node: nodes.Module) -> None:
  245. pass
  246. # pylint: disable-next = too-many-return-statements, too-many-branches
  247. def _check_keyword_parentheses(
  248. self, tokens: list[tokenize.TokenInfo], start: int
  249. ) -> None:
  250. """Check that there are not unnecessary parentheses after a keyword.
  251. Parens are unnecessary if there is exactly one balanced outer pair on a
  252. line and contains no commas (i.e. is not a tuple).
  253. Args:
  254. tokens: The entire list of Tokens.
  255. start: The position of the keyword in the token list.
  256. """
  257. # If the next token is not a paren, we're fine.
  258. if tokens[start + 1].string != "(":
  259. return
  260. if (
  261. tokens[start].string == "not"
  262. and start > 0
  263. and tokens[start - 1].string == "is"
  264. ):
  265. # If this is part of an `is not` expression, we have a binary operator
  266. # so the parentheses are not necessarily redundant.
  267. return
  268. found_and_or = False
  269. contains_walrus_operator = False
  270. walrus_operator_depth = 0
  271. contains_double_parens = 0
  272. depth = 0
  273. keyword_token = str(tokens[start].string)
  274. line_num = tokens[start].start[0]
  275. for i in range(start, len(tokens) - 1):
  276. token = tokens[i]
  277. # If we hit a newline, then assume any parens were for continuation.
  278. if token.type == tokenize.NL:
  279. return
  280. # Since the walrus operator doesn't exist below python3.8, the tokenizer
  281. # generates independent tokens
  282. if (
  283. token.string == ":=" # <-- python3.8+ path
  284. or token.string + tokens[i + 1].string == ":="
  285. ):
  286. contains_walrus_operator = True
  287. walrus_operator_depth = depth
  288. if token.string == "(":
  289. depth += 1
  290. if tokens[i + 1].string == "(":
  291. contains_double_parens = 1
  292. elif token.string == ")":
  293. depth -= 1
  294. if depth:
  295. if contains_double_parens and tokens[i + 1].string == ")":
  296. # For walrus operators in `if (not)` conditions and comprehensions
  297. if keyword_token in {"in", "if", "not"}:
  298. continue
  299. return
  300. contains_double_parens -= 1
  301. continue
  302. # ')' can't happen after if (foo), since it would be a syntax error.
  303. if tokens[i + 1].string in {":", ")", "]", "}", "in"} or tokens[
  304. i + 1
  305. ].type in {tokenize.NEWLINE, tokenize.ENDMARKER, tokenize.COMMENT}:
  306. if contains_walrus_operator and walrus_operator_depth - 1 == depth:
  307. return
  308. # The empty tuple () is always accepted.
  309. if i == start + 2:
  310. return
  311. if found_and_or:
  312. return
  313. if keyword_token == "in":
  314. # This special case was added in https://github.com/PyCQA/pylint/pull/4948
  315. # but it could be removed in the future. Avoid churn for now.
  316. return
  317. self.add_message(
  318. "superfluous-parens", line=line_num, args=keyword_token
  319. )
  320. return
  321. elif depth == 1:
  322. # This is a tuple, which is always acceptable.
  323. if token[1] == ",":
  324. return
  325. # 'and' and 'or' are the only boolean operators with lower precedence
  326. # than 'not', so parens are only required when they are found.
  327. if token[1] in {"and", "or"}:
  328. found_and_or = True
  329. # A yield inside an expression must always be in parentheses,
  330. # quit early without error.
  331. elif token[1] == "yield":
  332. return
  333. # A generator expression always has a 'for' token in it, and
  334. # the 'for' token is only legal inside parens when it is in a
  335. # generator expression. The parens are necessary here, so bail
  336. # without an error.
  337. elif token[1] == "for":
  338. return
  339. # A generator expression can have an 'else' token in it.
  340. # We check the rest of the tokens to see if any problems occur after
  341. # the 'else'.
  342. elif token[1] == "else":
  343. if "(" in (i.string for i in tokens[i:]):
  344. self._check_keyword_parentheses(tokens[i:], 0)
  345. return
  346. def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
  347. """Process tokens and search for:
  348. - too long lines (i.e. longer than <max_chars>)
  349. - optionally bad construct (if given, bad_construct must be a compiled
  350. regular expression).
  351. """
  352. indents = [0]
  353. check_equal = False
  354. line_num = 0
  355. self._lines = {}
  356. self._visited_lines = {}
  357. self._last_line_ending: str | None = None
  358. last_blank_line_num = 0
  359. for idx, (tok_type, string, start, _, line) in enumerate(tokens):
  360. if start[0] != line_num:
  361. line_num = start[0]
  362. # A tokenizer oddity: if an indented line contains a multi-line
  363. # docstring, the line member of the INDENT token does not contain
  364. # the full line; therefore we check the next token on the line.
  365. if tok_type == tokenize.INDENT:
  366. self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
  367. else:
  368. self.new_line(TokenWrapper(tokens), idx - 1, idx)
  369. if tok_type == tokenize.NEWLINE:
  370. # a program statement, or ENDMARKER, will eventually follow,
  371. # after some (possibly empty) run of tokens of the form
  372. # (NL | COMMENT)* (INDENT | DEDENT+)?
  373. # If an INDENT appears, setting check_equal is wrong, and will
  374. # be undone when we see the INDENT.
  375. check_equal = True
  376. self._check_line_ending(string, line_num)
  377. elif tok_type == tokenize.INDENT:
  378. check_equal = False
  379. self.check_indent_level(string, indents[-1] + 1, line_num)
  380. indents.append(indents[-1] + 1)
  381. elif tok_type == tokenize.DEDENT:
  382. # there's nothing we need to check here! what's important is
  383. # that when the run of DEDENTs ends, the indentation of the
  384. # program statement (or ENDMARKER) that triggered the run is
  385. # equal to what's left at the top of the indents stack
  386. check_equal = True
  387. if len(indents) > 1:
  388. del indents[-1]
  389. elif tok_type == tokenize.NL:
  390. if not line.strip("\r\n"):
  391. last_blank_line_num = line_num
  392. elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
  393. # This is the first concrete token following a NEWLINE, so it
  394. # must be the first token of the next program statement, or an
  395. # ENDMARKER; the "line" argument exposes the leading white-space
  396. # for this statement; in the case of ENDMARKER, line is an empty
  397. # string, so will properly match the empty string with which the
  398. # "indents" stack was seeded
  399. if check_equal:
  400. check_equal = False
  401. self.check_indent_level(line, indents[-1], line_num)
  402. if tok_type == tokenize.NUMBER and string.endswith("l"):
  403. self.add_message("lowercase-l-suffix", line=line_num)
  404. if string in _KEYWORD_TOKENS:
  405. self._check_keyword_parentheses(tokens, idx)
  406. line_num -= 1 # to be ok with "wc -l"
  407. if line_num > self.linter.config.max_module_lines:
  408. # Get the line where the too-many-lines (or its message id)
  409. # was disabled or default to 1.
  410. message_definition = self.linter.msgs_store.get_message_definitions(
  411. "too-many-lines"
  412. )[0]
  413. names = (message_definition.msgid, "too-many-lines")
  414. lineno = next(
  415. filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
  416. 1,
  417. )
  418. self.add_message(
  419. "too-many-lines",
  420. args=(line_num, self.linter.config.max_module_lines),
  421. line=lineno,
  422. )
  423. # See if there are any trailing lines. Do not complain about empty
  424. # files like __init__.py markers.
  425. if line_num == last_blank_line_num and line_num > 0:
  426. self.add_message("trailing-newlines", line=line_num)
  427. def _check_line_ending(self, line_ending: str, line_num: int) -> None:
  428. # check if line endings are mixed
  429. if self._last_line_ending is not None:
  430. # line_ending == "" indicates a synthetic newline added at
  431. # the end of a file that does not, in fact, end with a
  432. # newline.
  433. if line_ending and line_ending != self._last_line_ending:
  434. self.add_message("mixed-line-endings", line=line_num)
  435. self._last_line_ending = line_ending
  436. # check if line ending is as expected
  437. expected = self.linter.config.expected_line_ending_format
  438. if expected:
  439. # reduce multiple \n\n\n\n to one \n
  440. line_ending = reduce(lambda x, y: x + y if x != y else x, line_ending, "")
  441. line_ending = "LF" if line_ending == "\n" else "CRLF"
  442. if line_ending != expected:
  443. self.add_message(
  444. "unexpected-line-ending-format",
  445. args=(line_ending, expected),
  446. line=line_num,
  447. )
  448. @only_required_for_messages("multiple-statements")
  449. def visit_default(self, node: nodes.NodeNG) -> None:
  450. """Check the node line number and check it if not yet done."""
  451. if not node.is_statement:
  452. return
  453. if not node.root().pure_python:
  454. return
  455. prev_sibl = node.previous_sibling()
  456. if prev_sibl is not None:
  457. prev_line = prev_sibl.fromlineno
  458. # The line on which a 'finally': occurs in a 'try/finally'
  459. # is not directly represented in the AST. We infer it
  460. # by taking the last line of the body and adding 1, which
  461. # should be the line of finally:
  462. elif (
  463. isinstance(node.parent, nodes.TryFinally) and node in node.parent.finalbody
  464. ):
  465. prev_line = node.parent.body[0].tolineno + 1
  466. elif isinstance(node.parent, nodes.Module):
  467. prev_line = 0
  468. else:
  469. prev_line = node.parent.statement(future=True).fromlineno
  470. line = node.fromlineno
  471. assert line, node
  472. if prev_line == line and self._visited_lines.get(line) != 2:
  473. self._check_multi_statement_line(node, line)
  474. return
  475. if line in self._visited_lines:
  476. return
  477. try:
  478. tolineno = node.blockstart_tolineno
  479. except AttributeError:
  480. tolineno = node.tolineno
  481. assert tolineno, node
  482. lines: list[str] = []
  483. for line in range(line, tolineno + 1): # noqa: B020
  484. self._visited_lines[line] = 1
  485. try:
  486. lines.append(self._lines[line].rstrip())
  487. except KeyError:
  488. lines.append("")
  489. def _check_multi_statement_line(self, node: nodes.NodeNG, line: int) -> None:
  490. """Check for lines containing multiple statements."""
  491. # Do not warn about multiple nested context managers
  492. # in with statements.
  493. if isinstance(node, nodes.With):
  494. return
  495. # For try... except... finally..., the two nodes
  496. # appear to be on the same line due to how the AST is built.
  497. if isinstance(node, nodes.TryExcept) and isinstance(
  498. node.parent, nodes.TryFinally
  499. ):
  500. return
  501. if (
  502. isinstance(node.parent, nodes.If)
  503. and not node.parent.orelse
  504. and self.linter.config.single_line_if_stmt
  505. ):
  506. return
  507. if (
  508. isinstance(node.parent, nodes.ClassDef)
  509. and len(node.parent.body) == 1
  510. and self.linter.config.single_line_class_stmt
  511. ):
  512. return
  513. # Functions stubs with ``Ellipsis`` as body are exempted.
  514. if (
  515. isinstance(node.parent, nodes.FunctionDef)
  516. and isinstance(node, nodes.Expr)
  517. and isinstance(node.value, nodes.Const)
  518. and node.value.value is Ellipsis
  519. ):
  520. return
  521. self.add_message("multiple-statements", node=node)
  522. self._visited_lines[line] = 2
  523. def check_trailing_whitespace_ending(self, line: str, i: int) -> None:
  524. """Check that there is no trailing white-space."""
  525. # exclude \f (formfeed) from the rstrip
  526. stripped_line = line.rstrip("\t\n\r\v ")
  527. if line[len(stripped_line) :] not in ("\n", "\r\n"):
  528. self.add_message(
  529. "trailing-whitespace",
  530. line=i,
  531. col_offset=len(stripped_line),
  532. confidence=HIGH,
  533. )
  534. def check_line_length(self, line: str, i: int, checker_off: bool) -> None:
  535. """Check that the line length is less than the authorized value."""
  536. max_chars = self.linter.config.max_line_length
  537. ignore_long_line = self.linter.config.ignore_long_lines
  538. line = line.rstrip()
  539. if len(line) > max_chars and not ignore_long_line.search(line):
  540. if checker_off:
  541. self.linter.add_ignored_message("line-too-long", i)
  542. else:
  543. self.add_message("line-too-long", line=i, args=(len(line), max_chars))
  544. @staticmethod
  545. def remove_pylint_option_from_lines(options_pattern_obj: Match[str]) -> str:
  546. """Remove the `# pylint ...` pattern from lines."""
  547. lines = options_pattern_obj.string
  548. purged_lines = (
  549. lines[: options_pattern_obj.start(1)].rstrip()
  550. + lines[options_pattern_obj.end(1) :]
  551. )
  552. return purged_lines
  553. @staticmethod
  554. def is_line_length_check_activated(pylint_pattern_match_object: Match[str]) -> bool:
  555. """Return True if the line length check is activated."""
  556. try:
  557. for pragma in parse_pragma(pylint_pattern_match_object.group(2)):
  558. if pragma.action == "disable" and "line-too-long" in pragma.messages:
  559. return False
  560. except PragmaParserError:
  561. # Printing useful information dealing with this error is done in the lint package
  562. pass
  563. return True
  564. @staticmethod
  565. def specific_splitlines(lines: str) -> list[str]:
  566. """Split lines according to universal newlines except those in a specific
  567. sets.
  568. """
  569. unsplit_ends = {
  570. "\x0b", # synonym of \v
  571. "\x0c", # synonym of \f
  572. "\x1c",
  573. "\x1d",
  574. "\x1e",
  575. "\x85",
  576. "\u2028",
  577. "\u2029",
  578. }
  579. res: list[str] = []
  580. buffer = ""
  581. for atomic_line in lines.splitlines(True):
  582. if atomic_line[-1] not in unsplit_ends:
  583. res.append(buffer + atomic_line)
  584. buffer = ""
  585. else:
  586. buffer += atomic_line
  587. return res
  588. def check_lines(
  589. self, tokens: TokenWrapper, line_start: int, lines: str, lineno: int
  590. ) -> None:
  591. """Check given lines for potential messages.
  592. Check if lines have:
  593. - a final newline
  594. - no trailing white-space
  595. - less than a maximum number of characters
  596. """
  597. # we're first going to do a rough check whether any lines in this set
  598. # go over the line limit. If none of them do, then we don't need to
  599. # parse out the pylint options later on and can just assume that these
  600. # lines are clean
  601. # we'll also handle the line ending check here to avoid double-iteration
  602. # unless the line lengths are suspect
  603. max_chars = self.linter.config.max_line_length
  604. split_lines = self.specific_splitlines(lines)
  605. for offset, line in enumerate(split_lines):
  606. if not line.endswith("\n"):
  607. self.add_message("missing-final-newline", line=lineno + offset)
  608. continue
  609. # We don't test for trailing whitespaces in strings
  610. # See https://github.com/PyCQA/pylint/issues/6936
  611. # and https://github.com/PyCQA/pylint/issues/3822
  612. if tokens.type(line_start) != tokenize.STRING:
  613. self.check_trailing_whitespace_ending(line, lineno + offset)
  614. # This check is purposefully simple and doesn't rstrip since this is running
  615. # on every line you're checking it's advantageous to avoid doing a lot of work
  616. potential_line_length_warning = any(
  617. len(line) > max_chars for line in split_lines
  618. )
  619. # if there were no lines passing the max_chars config, we don't bother
  620. # running the full line check (as we've met an even more strict condition)
  621. if not potential_line_length_warning:
  622. return
  623. # Line length check may be deactivated through `pylint: disable` comment
  624. mobj = OPTION_PO.search(lines)
  625. checker_off = False
  626. if mobj:
  627. if not self.is_line_length_check_activated(mobj):
  628. checker_off = True
  629. # The 'pylint: disable whatever' should not be taken into account for line length count
  630. lines = self.remove_pylint_option_from_lines(mobj)
  631. # here we re-run specific_splitlines since we have filtered out pylint options above
  632. for offset, line in enumerate(self.specific_splitlines(lines)):
  633. self.check_line_length(line, lineno + offset, checker_off)
  634. def check_indent_level(self, string: str, expected: int, line_num: int) -> None:
  635. """Return the indent level of the string."""
  636. indent = self.linter.config.indent_string
  637. if indent == "\\t": # \t is not interpreted in the configuration file
  638. indent = "\t"
  639. level = 0
  640. unit_size = len(indent)
  641. while string[:unit_size] == indent:
  642. string = string[unit_size:]
  643. level += 1
  644. suppl = ""
  645. while string and string[0] in " \t":
  646. suppl += string[0]
  647. string = string[1:]
  648. if level != expected or suppl:
  649. i_type = "spaces"
  650. if indent[0] == "\t":
  651. i_type = "tabs"
  652. self.add_message(
  653. "bad-indentation",
  654. line=line_num,
  655. args=(level * unit_size + len(suppl), i_type, expected * unit_size),
  656. )
  657. def register(linter: PyLinter) -> None:
  658. linter.register_checker(FormatChecker(linter))