textfmts.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431
  1. """
  2. pygments.lexers.textfmts
  3. ~~~~~~~~~~~~~~~~~~~~~~~~
  4. Lexers for various text formats.
  5. :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
  6. :license: BSD, see LICENSE for details.
  7. """
  8. import re
  9. from pygments.lexers import guess_lexer, get_lexer_by_name
  10. from pygments.lexer import RegexLexer, bygroups, default, include
  11. from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
  12. Number, Generic, Literal, Punctuation
  13. from pygments.util import ClassNotFound
  14. __all__ = ['IrcLogsLexer', 'TodotxtLexer', 'HttpLexer', 'GettextLexer',
  15. 'NotmuchLexer', 'KernelLogLexer']
  16. class IrcLogsLexer(RegexLexer):
  17. """
  18. Lexer for IRC logs in *irssi*, *xchat* or *weechat* style.
  19. """
  20. name = 'IRC logs'
  21. aliases = ['irc']
  22. filenames = ['*.weechatlog']
  23. mimetypes = ['text/x-irclog']
  24. flags = re.VERBOSE | re.MULTILINE
  25. timestamp = r"""
  26. (
  27. # irssi / xchat and others
  28. (?: \[|\()? # Opening bracket or paren for the timestamp
  29. (?: # Timestamp
  30. (?: (?:\d{1,4} [-/])* # Date as - or /-separated groups of digits
  31. (?:\d{1,4})
  32. [T ])? # Date/time separator: T or space
  33. (?: \d?\d [:.])* # Time as :/.-separated groups of 1 or 2 digits
  34. (?: \d?\d)
  35. )
  36. (?: \]|\))?\s+ # Closing bracket or paren for the timestamp
  37. |
  38. # weechat
  39. \d{4}\s\w{3}\s\d{2}\s # Date
  40. \d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
  41. |
  42. # xchat
  43. \w{3}\s\d{2}\s # Date
  44. \d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
  45. )?
  46. """
  47. tokens = {
  48. 'root': [
  49. # log start/end
  50. (r'^\*\*\*\*(.*)\*\*\*\*$', Comment),
  51. # hack
  52. ("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)),
  53. # normal msgs
  54. ("^" + timestamp + r"""
  55. (\s*<.*?>\s*) # Nick """,
  56. bygroups(Comment.Preproc, Name.Tag), 'msg'),
  57. # /me msgs
  58. ("^" + timestamp + r"""
  59. (\s*[*]\s+) # Star
  60. (\S+\s+.*?\n) # Nick + rest of message """,
  61. bygroups(Comment.Preproc, Keyword, Generic.Inserted)),
  62. # join/part msgs
  63. ("^" + timestamp + r"""
  64. (\s*(?:\*{3}|<?-[!@=P]?->?)\s*) # Star(s) or symbols
  65. (\S+\s+) # Nick + Space
  66. (.*?\n) # Rest of message """,
  67. bygroups(Comment.Preproc, Keyword, String, Comment)),
  68. (r"^.*?\n", Text),
  69. ],
  70. 'msg': [
  71. (r"\S+:(?!//)", Name.Attribute), # Prefix
  72. (r".*\n", Text, '#pop'),
  73. ],
  74. }
  75. class GettextLexer(RegexLexer):
  76. """
  77. Lexer for Gettext catalog files.
  78. .. versionadded:: 0.9
  79. """
  80. name = 'Gettext Catalog'
  81. aliases = ['pot', 'po']
  82. filenames = ['*.pot', '*.po']
  83. mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext']
  84. tokens = {
  85. 'root': [
  86. (r'^#,\s.*?$', Keyword.Type),
  87. (r'^#:\s.*?$', Keyword.Declaration),
  88. # (r'^#$', Comment),
  89. (r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single),
  90. (r'^(")([A-Za-z-]+:)(.*")$',
  91. bygroups(String, Name.Property, String)),
  92. (r'^".*"$', String),
  93. (r'^(msgid|msgid_plural|msgstr|msgctxt)(\s+)(".*")$',
  94. bygroups(Name.Variable, Text, String)),
  95. (r'^(msgstr\[)(\d)(\])(\s+)(".*")$',
  96. bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)),
  97. ]
  98. }
  99. class HttpLexer(RegexLexer):
  100. """
  101. Lexer for HTTP sessions.
  102. .. versionadded:: 1.5
  103. """
  104. name = 'HTTP'
  105. aliases = ['http']
  106. flags = re.DOTALL
  107. def get_tokens_unprocessed(self, text, stack=('root',)):
  108. """Reset the content-type state."""
  109. self.content_type = None
  110. return RegexLexer.get_tokens_unprocessed(self, text, stack)
  111. def header_callback(self, match):
  112. if match.group(1).lower() == 'content-type':
  113. content_type = match.group(5).strip()
  114. if ';' in content_type:
  115. content_type = content_type[:content_type.find(';')].strip()
  116. self.content_type = content_type
  117. yield match.start(1), Name.Attribute, match.group(1)
  118. yield match.start(2), Text, match.group(2)
  119. yield match.start(3), Operator, match.group(3)
  120. yield match.start(4), Text, match.group(4)
  121. yield match.start(5), Literal, match.group(5)
  122. yield match.start(6), Text, match.group(6)
  123. def continuous_header_callback(self, match):
  124. yield match.start(1), Text, match.group(1)
  125. yield match.start(2), Literal, match.group(2)
  126. yield match.start(3), Text, match.group(3)
  127. def content_callback(self, match):
  128. content_type = getattr(self, 'content_type', None)
  129. content = match.group()
  130. offset = match.start()
  131. if content_type:
  132. from pygments.lexers import get_lexer_for_mimetype
  133. possible_lexer_mimetypes = [content_type]
  134. if '+' in content_type:
  135. # application/calendar+xml can be treated as application/xml
  136. # if there's not a better match.
  137. general_type = re.sub(r'^(.*)/.*\+(.*)$', r'\1/\2',
  138. content_type)
  139. possible_lexer_mimetypes.append(general_type)
  140. for i in possible_lexer_mimetypes:
  141. try:
  142. lexer = get_lexer_for_mimetype(i)
  143. except ClassNotFound:
  144. pass
  145. else:
  146. for idx, token, value in lexer.get_tokens_unprocessed(content):
  147. yield offset + idx, token, value
  148. return
  149. yield offset, Text, content
  150. tokens = {
  151. 'root': [
  152. (r'(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE|PATCH|CONNECT)( +)([^ ]+)( +)'
  153. r'(HTTP)(/)(1\.[01]|2(?:\.0)?|3)(\r?\n|\Z)',
  154. bygroups(Name.Function, Text, Name.Namespace, Text,
  155. Keyword.Reserved, Operator, Number, Text),
  156. 'headers'),
  157. (r'(HTTP)(/)(1\.[01]|2(?:\.0)?|3)( +)(\d{3})(?:( +)([^\r\n]*))?(\r?\n|\Z)',
  158. bygroups(Keyword.Reserved, Operator, Number, Text, Number, Text,
  159. Name.Exception, Text),
  160. 'headers'),
  161. ],
  162. 'headers': [
  163. (r'([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|\Z)', header_callback),
  164. (r'([\t ]+)([^\r\n]+)(\r?\n|\Z)', continuous_header_callback),
  165. (r'\r?\n', Text, 'content')
  166. ],
  167. 'content': [
  168. (r'.+', content_callback)
  169. ]
  170. }
  171. def analyse_text(text):
  172. return text.startswith(('GET /', 'POST /', 'PUT /', 'DELETE /', 'HEAD /',
  173. 'OPTIONS /', 'TRACE /', 'PATCH /', 'CONNECT '))
  174. class TodotxtLexer(RegexLexer):
  175. """
  176. Lexer for Todo.txt todo list format.
  177. .. versionadded:: 2.0
  178. """
  179. name = 'Todotxt'
  180. url = 'http://todotxt.com/'
  181. aliases = ['todotxt']
  182. # *.todotxt is not a standard extension for Todo.txt files; including it
  183. # makes testing easier, and also makes autodetecting file type easier.
  184. filenames = ['todo.txt', '*.todotxt']
  185. mimetypes = ['text/x-todo']
  186. # Aliases mapping standard token types of Todo.txt format concepts
  187. CompleteTaskText = Operator # Chosen to de-emphasize complete tasks
  188. IncompleteTaskText = Text # Incomplete tasks should look like plain text
  189. # Priority should have most emphasis to indicate importance of tasks
  190. Priority = Generic.Heading
  191. # Dates should have next most emphasis because time is important
  192. Date = Generic.Subheading
  193. # Project and context should have equal weight, and be in different colors
  194. Project = Generic.Error
  195. Context = String
  196. # If tag functionality is added, it should have the same weight as Project
  197. # and Context, and a different color. Generic.Traceback would work well.
  198. # Regex patterns for building up rules; dates, priorities, projects, and
  199. # contexts are all atomic
  200. # TODO: Make date regex more ISO 8601 compliant
  201. date_regex = r'\d{4,}-\d{2}-\d{2}'
  202. priority_regex = r'\([A-Z]\)'
  203. project_regex = r'\+\S+'
  204. context_regex = r'@\S+'
  205. # Compound regex expressions
  206. complete_one_date_regex = r'(x )(' + date_regex + r')'
  207. complete_two_date_regex = (complete_one_date_regex + r'( )(' +
  208. date_regex + r')')
  209. priority_date_regex = r'(' + priority_regex + r')( )(' + date_regex + r')'
  210. tokens = {
  211. # Should parse starting at beginning of line; each line is a task
  212. 'root': [
  213. # Complete task entry points: two total:
  214. # 1. Complete task with two dates
  215. (complete_two_date_regex, bygroups(CompleteTaskText, Date,
  216. CompleteTaskText, Date),
  217. 'complete'),
  218. # 2. Complete task with one date
  219. (complete_one_date_regex, bygroups(CompleteTaskText, Date),
  220. 'complete'),
  221. # Incomplete task entry points: six total:
  222. # 1. Priority plus date
  223. (priority_date_regex, bygroups(Priority, IncompleteTaskText, Date),
  224. 'incomplete'),
  225. # 2. Priority only
  226. (priority_regex, Priority, 'incomplete'),
  227. # 3. Leading date
  228. (date_regex, Date, 'incomplete'),
  229. # 4. Leading context
  230. (context_regex, Context, 'incomplete'),
  231. # 5. Leading project
  232. (project_regex, Project, 'incomplete'),
  233. # 6. Non-whitespace catch-all
  234. (r'\S+', IncompleteTaskText, 'incomplete'),
  235. ],
  236. # Parse a complete task
  237. 'complete': [
  238. # Newline indicates end of task, should return to root
  239. (r'\s*\n', CompleteTaskText, '#pop'),
  240. # Tokenize contexts and projects
  241. (context_regex, Context),
  242. (project_regex, Project),
  243. # Tokenize non-whitespace text
  244. (r'\S+', CompleteTaskText),
  245. # Tokenize whitespace not containing a newline
  246. (r'\s+', CompleteTaskText),
  247. ],
  248. # Parse an incomplete task
  249. 'incomplete': [
  250. # Newline indicates end of task, should return to root
  251. (r'\s*\n', IncompleteTaskText, '#pop'),
  252. # Tokenize contexts and projects
  253. (context_regex, Context),
  254. (project_regex, Project),
  255. # Tokenize non-whitespace text
  256. (r'\S+', IncompleteTaskText),
  257. # Tokenize whitespace not containing a newline
  258. (r'\s+', IncompleteTaskText),
  259. ],
  260. }
  261. class NotmuchLexer(RegexLexer):
  262. """
  263. For Notmuch email text format.
  264. .. versionadded:: 2.5
  265. Additional options accepted:
  266. `body_lexer`
  267. If given, highlight the contents of the message body with the specified
  268. lexer, else guess it according to the body content (default: ``None``).
  269. """
  270. name = 'Notmuch'
  271. url = 'https://notmuchmail.org/'
  272. aliases = ['notmuch']
  273. def _highlight_code(self, match):
  274. code = match.group(1)
  275. try:
  276. if self.body_lexer:
  277. lexer = get_lexer_by_name(self.body_lexer)
  278. else:
  279. lexer = guess_lexer(code.strip())
  280. except ClassNotFound:
  281. lexer = get_lexer_by_name('text')
  282. yield from lexer.get_tokens_unprocessed(code)
  283. tokens = {
  284. 'root': [
  285. (r'\fmessage\{\s*', Keyword, ('message', 'message-attr')),
  286. ],
  287. 'message-attr': [
  288. (r'(\s*id:\s*)(\S+)', bygroups(Name.Attribute, String)),
  289. (r'(\s*(?:depth|match|excluded):\s*)(\d+)',
  290. bygroups(Name.Attribute, Number.Integer)),
  291. (r'(\s*filename:\s*)(.+\n)',
  292. bygroups(Name.Attribute, String)),
  293. default('#pop'),
  294. ],
  295. 'message': [
  296. (r'\fmessage\}\n', Keyword, '#pop'),
  297. (r'\fheader\{\n', Keyword, 'header'),
  298. (r'\fbody\{\n', Keyword, 'body'),
  299. ],
  300. 'header': [
  301. (r'\fheader\}\n', Keyword, '#pop'),
  302. (r'((?:Subject|From|To|Cc|Date):\s*)(.*\n)',
  303. bygroups(Name.Attribute, String)),
  304. (r'(.*)(\s*\(.*\))(\s*\(.*\)\n)',
  305. bygroups(Generic.Strong, Literal, Name.Tag)),
  306. ],
  307. 'body': [
  308. (r'\fpart\{\n', Keyword, 'part'),
  309. (r'\f(part|attachment)\{\s*', Keyword, ('part', 'part-attr')),
  310. (r'\fbody\}\n', Keyword, '#pop'),
  311. ],
  312. 'part-attr': [
  313. (r'(ID:\s*)(\d+)', bygroups(Name.Attribute, Number.Integer)),
  314. (r'(,\s*)((?:Filename|Content-id):\s*)([^,]+)',
  315. bygroups(Punctuation, Name.Attribute, String)),
  316. (r'(,\s*)(Content-type:\s*)(.+\n)',
  317. bygroups(Punctuation, Name.Attribute, String)),
  318. default('#pop'),
  319. ],
  320. 'part': [
  321. (r'\f(?:part|attachment)\}\n', Keyword, '#pop'),
  322. (r'\f(?:part|attachment)\{\s*', Keyword, ('#push', 'part-attr')),
  323. (r'^Non-text part: .*\n', Comment),
  324. (r'(?s)(.*?(?=\f(?:part|attachment)\}\n))', _highlight_code),
  325. ],
  326. }
  327. def analyse_text(text):
  328. return 1.0 if text.startswith('\fmessage{') else 0.0
  329. def __init__(self, **options):
  330. self.body_lexer = options.get('body_lexer', None)
  331. RegexLexer.__init__(self, **options)
  332. class KernelLogLexer(RegexLexer):
  333. """
  334. For Linux Kernel log ("dmesg") output.
  335. .. versionadded:: 2.6
  336. """
  337. name = 'Kernel log'
  338. aliases = ['kmsg', 'dmesg']
  339. filenames = ['*.kmsg', '*.dmesg']
  340. tokens = {
  341. 'root': [
  342. (r'^[^:]+:debug : (?=\[)', Text, 'debug'),
  343. (r'^[^:]+:info : (?=\[)', Text, 'info'),
  344. (r'^[^:]+:warn : (?=\[)', Text, 'warn'),
  345. (r'^[^:]+:notice: (?=\[)', Text, 'warn'),
  346. (r'^[^:]+:err : (?=\[)', Text, 'error'),
  347. (r'^[^:]+:crit : (?=\[)', Text, 'error'),
  348. (r'^(?=\[)', Text, 'unknown'),
  349. ],
  350. 'unknown': [
  351. (r'^(?=.+(warning|notice|audit|deprecated))', Text, 'warn'),
  352. (r'^(?=.+(error|critical|fail|Bug))', Text, 'error'),
  353. default('info'),
  354. ],
  355. 'base': [
  356. (r'\[[0-9. ]+\] ', Number),
  357. (r'(?<=\] ).+?:', Keyword),
  358. (r'\n', Text, '#pop'),
  359. ],
  360. 'debug': [
  361. include('base'),
  362. (r'.+\n', Comment, '#pop')
  363. ],
  364. 'info': [
  365. include('base'),
  366. (r'.+\n', Text, '#pop')
  367. ],
  368. 'warn': [
  369. include('base'),
  370. (r'.+\n', Generic.Strong, '#pop')
  371. ],
  372. 'error': [
  373. include('base'),
  374. (r'.+\n', Generic.Error, '#pop')
  375. ]
  376. }