parser.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. from __future__ import unicode_literals
  2. import markdown
  3. import bleach
  4. from bs4 import BeautifulSoup
  5. from django.core.urlresolvers import resolve
  6. from django.http import Http404
  7. from django.utils import six
  8. from htmlmin.minify import html_minify
  9. from .bbcode import blocks, inline
  10. from .md.shortimgs import ShortImagesExtension
  11. from .mentions import add_mentions
  12. from .pipeline import pipeline
  13. __all__ = ['parse']
  14. def parse(text, request, poster, allow_mentions=True, allow_links=True,
  15. allow_images=True, allow_blocks=True, force_shva=False, minify=True):
  16. """
  17. Message parser
  18. Utility for flavours to call
  19. Breaks text into paragraphs, supports code, spoiler and quote blocks,
  20. headers, lists, images, spoilers, text styles
  21. Returns dict object
  22. """
  23. md = md_factory(
  24. allow_links=allow_links,
  25. allow_images=allow_images,
  26. allow_blocks=allow_blocks,
  27. )
  28. parsing_result = {
  29. 'original_text': text,
  30. 'parsed_text': '',
  31. 'markdown': md,
  32. 'mentions': [],
  33. 'images': [],
  34. 'outgoing_links': [],
  35. 'inside_links': []
  36. }
  37. # Parse text
  38. parsed_text = md.convert(text)
  39. # Clean and store parsed text
  40. parsing_result['parsed_text'] = parsed_text.strip()
  41. if allow_links:
  42. linkify_paragraphs(parsing_result)
  43. parsing_result = pipeline.process_result(parsing_result)
  44. if allow_mentions:
  45. add_mentions(request, parsing_result)
  46. if allow_links or allow_images:
  47. clean_links(request, parsing_result, force_shva)
  48. if minify:
  49. minify_result(parsing_result)
  50. return parsing_result
  51. def md_factory(allow_links=True, allow_images=True, allow_blocks=True):
  52. """
  53. Create and configure markdown object
  54. """
  55. md = markdown.Markdown(safe_mode='escape', extensions=['nl2br'])
  56. # Remove references
  57. del md.preprocessors['reference']
  58. del md.inlinePatterns['reference']
  59. del md.inlinePatterns['image_reference']
  60. del md.inlinePatterns['short_reference']
  61. # Add [b], [i], [u]
  62. md.inlinePatterns.add('bb_b', inline.bold, '<strong')
  63. md.inlinePatterns.add('bb_i', inline.italics, '<emphasis')
  64. md.inlinePatterns.add('bb_u', inline.underline, '<emphasis2')
  65. if not allow_links:
  66. # Remove links
  67. del md.inlinePatterns['link']
  68. del md.inlinePatterns['autolink']
  69. del md.inlinePatterns['automail']
  70. if allow_images:
  71. # Add [img]
  72. short_images_md = ShortImagesExtension()
  73. short_images_md.extendMarkdown(md)
  74. else:
  75. # Remove images
  76. del md.inlinePatterns['image_link']
  77. if allow_blocks:
  78. # Add [hr], [quote] and [code] blocks
  79. md.parser.blockprocessors.add('bb_hr', blocks.BBCodeHRProcessor(md.parser), '>hr')
  80. else:
  81. # Remove blocks
  82. del md.parser.blockprocessors['hashheader']
  83. del md.parser.blockprocessors['setextheader']
  84. del md.parser.blockprocessors['code']
  85. del md.parser.blockprocessors['quote']
  86. del md.parser.blockprocessors['hr']
  87. del md.parser.blockprocessors['olist']
  88. del md.parser.blockprocessors['ulist']
  89. return pipeline.extend_markdown(md)
  90. def linkify_paragraphs(result):
  91. result['parsed_text'] = bleach.linkify(result['parsed_text'], skip_pre=True, parse_email=True)
  92. def clean_links(request, result, force_shva=False):
  93. host = request.get_host()
  94. site_address = '%s://%s' % (request.scheme, request.get_host())
  95. soup = BeautifulSoup(result['parsed_text'], 'html5lib')
  96. for link in soup.find_all('a'):
  97. if is_internal_link(link['href'], host):
  98. link['href'] = clean_internal_link(link['href'], host)
  99. if force_shva:
  100. try:
  101. resolution = resolve(link['href'])
  102. print resolution
  103. except (Http404, ValueError):
  104. pass
  105. result['inside_links'].append(link['href'])
  106. else:
  107. result['outgoing_links'].append(link['href'])
  108. if link.string:
  109. link.string = clean_link_prefix(link.string)
  110. for img in soup.find_all('img'):
  111. img['alt'] = clean_link_prefix(img['alt'])
  112. if is_internal_link(img['src'], host):
  113. img['src'] = clean_internal_link(img['src'], host)
  114. result['images'].append(img['src'])
  115. else:
  116. result['images'].append(img['src'])
  117. # [6:-7] trims <body></body> wrap
  118. result['parsed_text'] = six.text_type(soup.body)[6:-7]
  119. def is_internal_link(link, host):
  120. if link.startswith('/') and not link.startswith('//'):
  121. return True
  122. link = clean_link_prefix(link).lstrip('www.').lower()
  123. return link.lower().startswith(host.lstrip('www.'))
  124. def clean_link_prefix(link):
  125. if link.lower().startswith('https:'):
  126. link = link[6:]
  127. if link.lower().startswith('http:'):
  128. link = link[5:]
  129. if link.startswith('//'):
  130. link = link[2:]
  131. return link
  132. def clean_internal_link(link, host):
  133. link = clean_link_prefix(link)
  134. if link.lower().startswith('www.'):
  135. link = link[4:]
  136. if host.lower().startswith('www.'):
  137. host = host[4:]
  138. if link.lower().startswith(host):
  139. link = link[len(host):]
  140. return link or '/'
  141. def minify_result(result):
  142. # [25:-14] trims <html><head></head><body> and </body></html>
  143. result['parsed_text'] = html_minify(result['parsed_text'].encode('utf-8'))
  144. result['parsed_text'] = result['parsed_text'][25:-14]