parser.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. from __future__ import unicode_literals
  2. import warnings
  3. import bleach
  4. import markdown
  5. from bs4 import BeautifulSoup
  6. from htmlmin.minify import html_minify
  7. from markdown.extensions.fenced_code import FencedCodeExtension
  8. from django.http import Http404
  9. from django.urls import resolve
  10. from django.utils import six
  11. from .bbcode import blocks, inline
  12. from .md.shortimgs import ShortImagesExtension
  13. from .md.striketrough import StriketroughExtension
  14. from .mentions import add_mentions
  15. from .pipeline import pipeline
  16. MISAGO_ATTACHMENT_VIEWS = ('misago:attachment', 'misago:attachment-thumbnail')
  17. def parse(text, request, poster, allow_mentions=True, allow_links=True,
  18. allow_images=True, allow_blocks=True, force_shva=False, minify=True):
  19. """
  20. Message parser
  21. Utility for flavours to call
  22. Breaks text into paragraphs, supports code, spoiler and quote blocks,
  23. headers, lists, images, spoilers, text styles
  24. Returns dict object
  25. """
  26. md = md_factory(
  27. allow_links=allow_links,
  28. allow_images=allow_images,
  29. allow_blocks=allow_blocks,
  30. )
  31. parsing_result = {
  32. 'original_text': text,
  33. 'parsed_text': '',
  34. 'markdown': md,
  35. 'mentions': [],
  36. 'images': [],
  37. 'outgoing_links': [],
  38. 'inside_links': []
  39. }
  40. # Parse text
  41. parsed_text = md.convert(text)
  42. # Clean and store parsed text
  43. parsing_result['parsed_text'] = parsed_text.strip()
  44. if allow_links:
  45. linkify_paragraphs(parsing_result)
  46. parsing_result = pipeline.process_result(parsing_result)
  47. if allow_mentions:
  48. add_mentions(request, parsing_result)
  49. if allow_links or allow_images:
  50. clean_links(request, parsing_result, force_shva)
  51. if minify:
  52. minify_result(parsing_result)
  53. return parsing_result
  54. def md_factory(allow_links=True, allow_images=True, allow_blocks=True):
  55. """
  56. Create and configure markdown object
  57. """
  58. md = markdown.Markdown(safe_mode='escape', extensions=['nl2br'])
  59. # Remove references
  60. del md.preprocessors['reference']
  61. del md.inlinePatterns['reference']
  62. del md.inlinePatterns['image_reference']
  63. del md.inlinePatterns['short_reference']
  64. # Add [b], [i], [u]
  65. md.inlinePatterns.add('bb_b', inline.bold, '<strong')
  66. md.inlinePatterns.add('bb_i', inline.italics, '<emphasis')
  67. md.inlinePatterns.add('bb_u', inline.underline, '<emphasis2')
  68. # Add ~~deleted~~
  69. striketrough_md = StriketroughExtension()
  70. striketrough_md.extendMarkdown(md)
  71. if not allow_links:
  72. # Remove links
  73. del md.inlinePatterns['link']
  74. del md.inlinePatterns['autolink']
  75. del md.inlinePatterns['automail']
  76. if allow_images:
  77. # Add [img]
  78. short_images_md = ShortImagesExtension()
  79. short_images_md.extendMarkdown(md)
  80. else:
  81. # Remove images
  82. del md.inlinePatterns['image_link']
  83. if allow_blocks:
  84. # Add [hr] and [quote] blocks
  85. md.parser.blockprocessors.add('bb_hr', blocks.BBCodeHRProcessor(md.parser), '>hr')
  86. fenced_code = FencedCodeExtension()
  87. fenced_code.extendMarkdown(md, None)
  88. code_bbcode = blocks.CodeBlockExtension()
  89. code_bbcode.extendMarkdown(md)
  90. quote_bbcode = blocks.QuoteExtension()
  91. quote_bbcode.extendMarkdown(md)
  92. else:
  93. # Remove blocks
  94. del md.parser.blockprocessors['hashheader']
  95. del md.parser.blockprocessors['setextheader']
  96. del md.parser.blockprocessors['code']
  97. del md.parser.blockprocessors['quote']
  98. del md.parser.blockprocessors['hr']
  99. del md.parser.blockprocessors['olist']
  100. del md.parser.blockprocessors['ulist']
  101. return pipeline.extend_markdown(md)
  102. def linkify_paragraphs(result):
  103. result['parsed_text'] = bleach.linkify(
  104. result['parsed_text'], skip_pre=True, parse_email=True)
  105. # dirty fix for
  106. if '<code>' in result['parsed_text'] and '<a' in result['parsed_text']:
  107. with warnings.catch_warnings():
  108. warnings.simplefilter("ignore")
  109. soup = BeautifulSoup(result['parsed_text'], 'html5lib')
  110. for link in soup.select('code > a'):
  111. link.replace_with(BeautifulSoup(link.string, 'html.parser'))
  112. # [6:-7] trims <body></body> wrap
  113. result['parsed_text'] = six.text_type(soup.body)[6:-7]
  114. def clean_links(request, result, force_shva=False):
  115. host = request.get_host()
  116. soup = BeautifulSoup(result['parsed_text'], 'html5lib')
  117. for link in soup.find_all('a'):
  118. if is_internal_link(link['href'], host):
  119. link['href'] = clean_internal_link(link['href'], host)
  120. result['inside_links'].append(link['href'])
  121. link['href'] = clean_attachment_link(link['href'], force_shva)
  122. else:
  123. result['outgoing_links'].append(link['href'])
  124. if link.string:
  125. link.string = clean_link_prefix(link.string)
  126. for img in soup.find_all('img'):
  127. img['alt'] = clean_link_prefix(img['alt'])
  128. if is_internal_link(img['src'], host):
  129. img['src'] = clean_internal_link(img['src'], host)
  130. result['images'].append(img['src'])
  131. img['src'] = clean_attachment_link(img['src'], force_shva)
  132. else:
  133. result['images'].append(img['src'])
  134. # [6:-7] trims <body></body> wrap
  135. result['parsed_text'] = six.text_type(soup.body)[6:-7]
  136. def is_internal_link(link, host):
  137. if link.startswith('/') and not link.startswith('//'):
  138. return True
  139. link = clean_link_prefix(link).lstrip('www.').lower()
  140. return link.lower().startswith(host.lstrip('www.'))
  141. def clean_link_prefix(link):
  142. if link.lower().startswith('https:'):
  143. link = link[6:]
  144. if link.lower().startswith('http:'):
  145. link = link[5:]
  146. if link.startswith('//'):
  147. link = link[2:]
  148. return link
  149. def clean_internal_link(link, host):
  150. link = clean_link_prefix(link)
  151. if link.lower().startswith('www.'):
  152. link = link[4:]
  153. if host.lower().startswith('www.'):
  154. host = host[4:]
  155. if link.lower().startswith(host):
  156. link = link[len(host):]
  157. return link or '/'
  158. def clean_attachment_link(link, force_shva=False):
  159. try:
  160. resolution = resolve(link)
  161. url_name = ':'.join(resolution.namespaces + [resolution.url_name])
  162. except (Http404, ValueError):
  163. return link
  164. if url_name in MISAGO_ATTACHMENT_VIEWS:
  165. if force_shva:
  166. link = '{}?shva=1'.format(link)
  167. elif link.endswith('?shva=1'):
  168. link = link[:-7]
  169. return link
  170. def minify_result(result):
  171. # [25:-14] trims <html><head></head><body> and </body></html>
  172. result['parsed_text'] = html_minify(result['parsed_text'].encode('utf-8'))
  173. result['parsed_text'] = result['parsed_text'][25:-14]