unicode.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
  2. # For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
  3. # Copyright (c) https://github.com/PyCQA/pylint/blob/main/CONTRIBUTORS.txt
  4. """Unicode and some other ASCII characters can be used to create programs that run
  5. much different compared to what a human reader would expect from them.
  6. PEP 672 lists some examples.
  7. See: https://www.python.org/dev/peps/pep-0672/
  8. The following checkers are intended to make users are aware of these issues.
  9. """
  10. from __future__ import annotations
  11. import codecs
  12. import contextlib
  13. import io
  14. import re
  15. from collections import OrderedDict
  16. from collections.abc import Iterable
  17. from functools import lru_cache
  18. from tokenize import detect_encoding
  19. from typing import NamedTuple, TypeVar
  20. from astroid import nodes
  21. import pylint.interfaces
  22. import pylint.lint
  23. from pylint import checkers
  24. _StrLike = TypeVar("_StrLike", str, bytes)
  25. # Based on:
  26. # https://golangexample.com/go-linter-which-checks-for-dangerous-unicode-character-sequences/
  27. # We use '\u' because it doesn't require a map lookup and is therefore faster
  28. BIDI_UNICODE = [
  29. "\u202A", # \N{LEFT-TO-RIGHT EMBEDDING}
  30. "\u202B", # \N{RIGHT-TO-LEFT EMBEDDING}
  31. "\u202C", # \N{POP DIRECTIONAL FORMATTING}
  32. "\u202D", # \N{LEFT-TO-RIGHT OVERRIDE}
  33. "\u202E", # \N{RIGHT-TO-LEFT OVERRIDE}
  34. "\u2066", # \N{LEFT-TO-RIGHT ISOLATE}
  35. "\u2067", # \N{RIGHT-TO-LEFT ISOLATE}
  36. "\u2068", # \N{FIRST STRONG ISOLATE}
  37. "\u2069", # \N{POP DIRECTIONAL ISOLATE}
  38. # The following was part of PEP 672:
  39. # https://www.python.org/dev/peps/pep-0672/
  40. # so the list above might not be complete
  41. "\u200F", # \n{RIGHT-TO-LEFT MARK}
  42. # We don't use
  43. # "\u200E" # \n{LEFT-TO-RIGHT MARK}
  44. # as this is the default for latin files and can't be used
  45. # to hide code
  46. ]
  47. class _BadChar(NamedTuple):
  48. """Representation of an ASCII char considered bad."""
  49. name: str
  50. unescaped: str
  51. escaped: str
  52. code: str
  53. help_text: str
  54. def description(self) -> str:
  55. """Used for the detailed error message description."""
  56. return (
  57. f"Invalid unescaped character {self.name}, "
  58. f'use "{self.escaped}" instead.'
  59. )
  60. def human_code(self) -> str:
  61. """Used to generate the human readable error message."""
  62. return f"invalid-character-{self.name}"
  63. # Based on https://www.python.org/dev/peps/pep-0672/
  64. BAD_CHARS = [
  65. _BadChar(
  66. "backspace",
  67. "\b",
  68. "\\b",
  69. "E2510",
  70. (
  71. "Moves the cursor back, so the character after it will overwrite the "
  72. "character before."
  73. ),
  74. ),
  75. _BadChar(
  76. "carriage-return",
  77. "\r",
  78. "\\r",
  79. "E2511",
  80. (
  81. "Moves the cursor to the start of line, subsequent characters overwrite "
  82. "the start of the line."
  83. ),
  84. ),
  85. _BadChar(
  86. "sub",
  87. "\x1A",
  88. "\\x1A",
  89. "E2512",
  90. (
  91. 'Ctrl+Z "End of text" on Windows. Some programs (such as type) ignore '
  92. "the rest of the file after it."
  93. ),
  94. ),
  95. _BadChar(
  96. "esc",
  97. "\x1B",
  98. "\\x1B",
  99. "E2513",
  100. (
  101. "Commonly initiates escape codes which allow arbitrary control "
  102. "of the terminal."
  103. ),
  104. ),
  105. _BadChar(
  106. "nul",
  107. "\0",
  108. "\\0",
  109. "E2514",
  110. "Mostly end of input for python.",
  111. ),
  112. _BadChar(
  113. # Zero Width with Space. At the time of writing not accepted by Python.
  114. # But used in Trojan Source Examples, so still included and tested for.
  115. "zero-width-space",
  116. "\u200B", # \n{ZERO WIDTH SPACE}
  117. "\\u200B",
  118. "E2515",
  119. "Invisible space character could hide real code execution.",
  120. ),
  121. ]
  122. BAD_ASCII_SEARCH_DICT = {char.unescaped: char for char in BAD_CHARS}
  123. def _line_length(line: _StrLike, codec: str) -> int:
  124. """Get the length of a string like line as displayed in an editor."""
  125. if isinstance(line, bytes):
  126. decoded = _remove_bom(line, codec).decode(codec, "replace")
  127. else:
  128. decoded = line
  129. stripped = decoded.rstrip("\n")
  130. if stripped != decoded:
  131. stripped = stripped.rstrip("\r")
  132. return len(stripped)
  133. def _map_positions_to_result(
  134. line: _StrLike,
  135. search_dict: dict[_StrLike, _BadChar],
  136. new_line: _StrLike,
  137. byte_str_length: int = 1,
  138. ) -> dict[int, _BadChar]:
  139. """Get all occurrences of search dict keys within line.
  140. Ignores Windows end of line and can handle bytes as well as string.
  141. Also takes care of encodings for which the length of an encoded code point does not
  142. default to 8 Bit.
  143. """
  144. result: dict[int, _BadChar] = {}
  145. for search_for, char in search_dict.items():
  146. if search_for not in line:
  147. continue
  148. # Special Handling for Windows '\r\n'
  149. if char.unescaped == "\r" and line.endswith(new_line):
  150. ignore_pos = len(line) - 2 * byte_str_length
  151. else:
  152. ignore_pos = None
  153. start = 0
  154. pos = line.find(search_for, start)
  155. while pos > 0:
  156. if pos != ignore_pos:
  157. # Calculate the column
  158. col = int(pos / byte_str_length)
  159. result[col] = char
  160. start = pos + 1
  161. pos = line.find(search_for, start)
  162. return result
  163. UNICODE_BOMS = {
  164. "utf-8": codecs.BOM_UTF8,
  165. "utf-16": codecs.BOM_UTF16,
  166. "utf-32": codecs.BOM_UTF32,
  167. "utf-16le": codecs.BOM_UTF16_LE,
  168. "utf-16be": codecs.BOM_UTF16_BE,
  169. "utf-32le": codecs.BOM_UTF32_LE,
  170. "utf-32be": codecs.BOM_UTF32_BE,
  171. }
  172. BOM_SORTED_TO_CODEC = OrderedDict(
  173. # Sorted by length of BOM of each codec
  174. (UNICODE_BOMS[codec], codec)
  175. for codec in ("utf-32le", "utf-32be", "utf-8", "utf-16le", "utf-16be")
  176. )
  177. UTF_NAME_REGEX_COMPILED = re.compile(
  178. "utf[ -]?(8|16|32)[ -]?(le|be|)?(sig)?", flags=re.IGNORECASE
  179. )
  180. def _normalize_codec_name(codec: str) -> str:
  181. """Make sure the codec name is always given as defined in the BOM dict."""
  182. return UTF_NAME_REGEX_COMPILED.sub(r"utf-\1\2", codec).lower()
  183. def _remove_bom(encoded: bytes, encoding: str) -> bytes:
  184. """Remove the bom if given from a line."""
  185. if encoding not in UNICODE_BOMS:
  186. return encoded
  187. bom = UNICODE_BOMS[encoding]
  188. if encoded.startswith(bom):
  189. return encoded[len(bom) :]
  190. return encoded
  191. def _encode_without_bom(string: str, encoding: str) -> bytes:
  192. """Encode a string but remove the BOM."""
  193. return _remove_bom(string.encode(encoding), encoding)
  194. def _byte_to_str_length(codec: str) -> int:
  195. """Return how many byte are usually(!) a character point."""
  196. if codec.startswith("utf-32"):
  197. return 4
  198. if codec.startswith("utf-16"):
  199. return 2
  200. return 1
  201. @lru_cache(maxsize=1000)
  202. def _cached_encode_search(string: str, encoding: str) -> bytes:
  203. """A cached version of encode used for search pattern."""
  204. return _encode_without_bom(string, encoding)
  205. def _fix_utf16_32_line_stream(steam: Iterable[bytes], codec: str) -> Iterable[bytes]:
  206. """Handle line ending for UTF16 and UTF32 correctly.
  207. Currently, Python simply strips the required zeros after \n after the
  208. line ending. Leading to lines that can't be decoded properly
  209. """
  210. if not codec.startswith("utf-16") and not codec.startswith("utf-32"):
  211. yield from steam
  212. else:
  213. # First we get all the bytes in memory
  214. content = b"".join(line for line in steam)
  215. new_line = _cached_encode_search("\n", codec)
  216. # Now we split the line by the real new line in the correct encoding
  217. # we can't use split as it would strip the \n that we need
  218. start = 0
  219. while True:
  220. pos = content.find(new_line, start)
  221. if pos >= 0:
  222. yield content[start : pos + len(new_line)]
  223. else:
  224. # Yield the rest and finish
  225. if content[start:]:
  226. yield content[start:]
  227. break
  228. start = pos + len(new_line)
  229. def extract_codec_from_bom(first_line: bytes) -> str:
  230. """Try to extract the codec (unicode only) by checking for the BOM.
  231. For details about BOM see https://unicode.org/faq/utf_bom.html#BOM
  232. Args:
  233. first_line: the first line of a file
  234. Returns:
  235. a codec name
  236. Raises:
  237. ValueError: if no codec was found
  238. """
  239. for bom, codec in BOM_SORTED_TO_CODEC.items():
  240. if first_line.startswith(bom):
  241. return codec
  242. raise ValueError("No BOM found. Could not detect Unicode codec.")
  243. class UnicodeChecker(checkers.BaseRawFileChecker):
  244. """Check characters that could be used to hide bad code to humans.
  245. This includes:
  246. - Bidirectional Unicode (see https://trojansource.codes/)
  247. - Bad ASCII characters (see PEP672)
  248. If a programmer requires to use such a character they should use the escaped
  249. version, that is also much easier to read and does not depend on the editor used.
  250. The Checker also includes a check that UTF-16 and UTF-32 are not used to encode
  251. Python files.
  252. At the time of writing Python supported only UTF-8. See
  253. https://stackoverflow.com/questions/69897842/ and https://bugs.python.org/issue1503789
  254. for background.
  255. """
  256. name = "unicode_checker"
  257. msgs = {
  258. "E2501": (
  259. # This error will be only displayed to users once Python Supports
  260. # UTF-16/UTF-32 (if at all)
  261. "UTF-16 and UTF-32 aren't backward compatible. Use UTF-8 instead",
  262. "invalid-unicode-codec",
  263. (
  264. "For compatibility use UTF-8 instead of UTF-16/UTF-32. "
  265. "See also https://bugs.python.org/issue1503789 for a history "
  266. "of this issue. And "
  267. "https://softwareengineering.stackexchange.com/questions/102205/ "
  268. "for some possible problems when using UTF-16 for instance."
  269. ),
  270. ),
  271. "E2502": (
  272. (
  273. "Contains control characters that can permit obfuscated code "
  274. "executed differently than displayed"
  275. ),
  276. "bidirectional-unicode",
  277. (
  278. "bidirectional unicode are typically not displayed characters required "
  279. "to display right-to-left (RTL) script "
  280. "(i.e. Chinese, Japanese, Arabic, Hebrew, ...) correctly. "
  281. "So can you trust this code? "
  282. "Are you sure it displayed correctly in all editors? "
  283. "If you did not write it or your language is not RTL,"
  284. " remove the special characters, as they could be used to trick you into "
  285. "executing code, "
  286. "that does something else than what it looks like.\n"
  287. "More Information:\n"
  288. "https://en.wikipedia.org/wiki/Bidirectional_text\n"
  289. "https://trojansource.codes/"
  290. ),
  291. ),
  292. "C2503": (
  293. "PEP8 recommends UTF-8 as encoding for Python files",
  294. "bad-file-encoding",
  295. (
  296. "PEP8 recommends UTF-8 default encoding for Python files. See "
  297. "https://peps.python.org/pep-0008/#source-file-encoding"
  298. ),
  299. ),
  300. **{
  301. bad_char.code: (
  302. bad_char.description(),
  303. bad_char.human_code(),
  304. bad_char.help_text,
  305. )
  306. for bad_char in BAD_CHARS
  307. },
  308. }
  309. @staticmethod
  310. def _is_invalid_codec(codec: str) -> bool:
  311. return codec.startswith("utf-16") or codec.startswith("utf-32")
  312. @staticmethod
  313. def _is_unicode(codec: str) -> bool:
  314. return codec.startswith("utf")
  315. @classmethod
  316. def _find_line_matches(cls, line: bytes, codec: str) -> dict[int, _BadChar]:
  317. """Find all matches of BAD_CHARS within line.
  318. Args:
  319. line: the input
  320. codec: that will be used to convert line/or search string into
  321. Return:
  322. A dictionary with the column offset and the BadASCIIChar
  323. """
  324. # We try to decode in Unicode to get the correct column offset
  325. # if we would use bytes, it could be off because UTF-8 has no fixed length
  326. try:
  327. line_search = line.decode(codec, errors="strict")
  328. search_dict = BAD_ASCII_SEARCH_DICT
  329. return _map_positions_to_result(line_search, search_dict, "\n")
  330. except UnicodeDecodeError:
  331. # If we can't decode properly, we simply use bytes, even so the column offsets
  332. # might be wrong a bit, but it is still better then nothing
  333. line_search_byte = line
  334. search_dict_byte: dict[bytes, _BadChar] = {}
  335. for char in BAD_CHARS:
  336. # Some characters might not exist in all encodings
  337. with contextlib.suppress(UnicodeDecodeError):
  338. search_dict_byte[
  339. _cached_encode_search(char.unescaped, codec)
  340. ] = char
  341. return _map_positions_to_result(
  342. line_search_byte,
  343. search_dict_byte,
  344. _cached_encode_search("\n", codec),
  345. byte_str_length=_byte_to_str_length(codec),
  346. )
  347. @staticmethod
  348. def _determine_codec(stream: io.BytesIO) -> tuple[str, int]:
  349. """Determine the codec from the given stream.
  350. first tries https://www.python.org/dev/peps/pep-0263/
  351. and if this fails also checks for BOMs of UTF-16 and UTF-32
  352. to be future-proof.
  353. Args:
  354. stream: The byte stream to analyse
  355. Returns: A tuple consisting of:
  356. - normalized codec name
  357. - the line in which the codec was found
  358. Raises:
  359. SyntaxError: if failing to detect codec
  360. """
  361. try:
  362. # First try to detect encoding with PEP 263
  363. # Doesn't work with UTF-16/32 at the time of writing
  364. # see https://bugs.python.org/issue1503789
  365. codec, lines = detect_encoding(stream.readline)
  366. # lines are empty if UTF-8 BOM is found
  367. codec_definition_line = len(lines) or 1
  368. except SyntaxError as e:
  369. # Codec could not be detected by Python, we try manually to check for
  370. # UTF 16/32 BOMs, which aren't supported by Python at the time of writing.
  371. # This is only included to be future save and handle these codecs as well
  372. stream.seek(0)
  373. try:
  374. codec = extract_codec_from_bom(stream.readline())
  375. codec_definition_line = 1
  376. except ValueError as ve:
  377. # Failed to detect codec, so the syntax error originated not from
  378. # UTF16/32 codec usage. So simply raise the error again.
  379. raise e from ve
  380. return _normalize_codec_name(codec), codec_definition_line
  381. def _check_codec(self, codec: str, codec_definition_line: int) -> None:
  382. """Check validity of the codec."""
  383. if codec != "utf-8":
  384. msg = "bad-file-encoding"
  385. if self._is_invalid_codec(codec):
  386. msg = "invalid-unicode-codec"
  387. self.add_message(
  388. msg,
  389. # Currently Nodes will lead to crashes of pylint
  390. # node=node,
  391. line=codec_definition_line,
  392. end_lineno=codec_definition_line,
  393. confidence=pylint.interfaces.HIGH,
  394. col_offset=None,
  395. end_col_offset=None,
  396. )
  397. def _check_invalid_chars(self, line: bytes, lineno: int, codec: str) -> None:
  398. """Look for chars considered bad."""
  399. matches = self._find_line_matches(line, codec)
  400. for col, char in matches.items():
  401. self.add_message(
  402. char.human_code(),
  403. # Currently Nodes will lead to crashes of pylint
  404. # node=node,
  405. line=lineno,
  406. end_lineno=lineno,
  407. confidence=pylint.interfaces.HIGH,
  408. col_offset=col + 1,
  409. end_col_offset=col + len(char.unescaped) + 1,
  410. )
  411. def _check_bidi_chars(self, line: bytes, lineno: int, codec: str) -> None:
  412. """Look for Bidirectional Unicode, if we use unicode."""
  413. if not self._is_unicode(codec):
  414. return
  415. for dangerous in BIDI_UNICODE:
  416. if _cached_encode_search(dangerous, codec) in line:
  417. # Note that we don't add a col_offset on purpose:
  418. # Using these unicode characters it depends on the editor
  419. # how it displays the location of characters in the line.
  420. # So we mark the complete line.
  421. self.add_message(
  422. "bidirectional-unicode",
  423. # Currently Nodes will lead to crashes of pylint
  424. # node=node,
  425. line=lineno,
  426. end_lineno=lineno,
  427. # We mark the complete line, as bidi controls make it hard
  428. # to determine the correct cursor position within an editor
  429. col_offset=0,
  430. end_col_offset=_line_length(line, codec),
  431. confidence=pylint.interfaces.HIGH,
  432. )
  433. # We look for bidirectional unicode only once per line
  434. # as we mark the complete line anyway
  435. break
  436. def process_module(self, node: nodes.Module) -> None:
  437. """Perform the actual check by checking module stream."""
  438. with node.stream() as stream:
  439. codec, codec_line = self._determine_codec(stream)
  440. self._check_codec(codec, codec_line)
  441. stream.seek(0)
  442. # Check for invalid content (controls/chars)
  443. for lineno, line in enumerate(
  444. _fix_utf16_32_line_stream(stream, codec), start=1
  445. ):
  446. if lineno == 1:
  447. line = _remove_bom(line, codec)
  448. self._check_bidi_chars(line, lineno, codec)
  449. self._check_invalid_chars(line, lineno, codec)
  450. def register(linter: pylint.lint.PyLinter) -> None:
  451. linter.register_checker(UnicodeChecker(linter))