parser.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. import markdown
  2. import bleach
  3. from bs4 import BeautifulSoup
  4. from django.utils import six
  5. from htmlmin.minify import html_minify
  6. from .bbcode import blocks, inline
  7. from .md.shortimgs import ShortImagesExtension
  8. from .mentions import add_mentions
  9. from .pipeline import pipeline
  10. __all__ = ['parse']
  11. def parse(text, request, poster, allow_mentions=True, allow_links=True,
  12. allow_images=True, allow_blocks=True, minify=True):
  13. """
  14. Message parser
  15. Utility for flavours to call
  16. Breaks text into paragraphs, supports code, spoiler and quote blocks,
  17. headers, lists, images, spoilers, text styles
  18. Returns dict object
  19. """
  20. md = md_factory(
  21. allow_links=allow_links,
  22. allow_images=allow_images,
  23. allow_blocks=allow_blocks,
  24. )
  25. parsing_result = {
  26. 'original_text': text,
  27. 'parsed_text': '',
  28. 'markdown': md,
  29. 'mentions': [],
  30. 'images': [],
  31. 'outgoing_links': [],
  32. 'inside_links': []
  33. }
  34. # Parse text
  35. parsed_text = md.convert(text)
  36. # Clean and store parsed text
  37. parsing_result['parsed_text'] = parsed_text.strip()
  38. if allow_links:
  39. linkify_paragraphs(parsing_result)
  40. parsing_result = pipeline.process_result(parsing_result)
  41. if allow_mentions:
  42. add_mentions(request, parsing_result)
  43. if allow_links or allow_images:
  44. clean_links(request, parsing_result)
  45. if minify:
  46. minify_result(parsing_result)
  47. return parsing_result
  48. def md_factory(allow_links=True, allow_images=True, allow_blocks=True):
  49. """
  50. Create and configure markdown object
  51. """
  52. md = markdown.Markdown(safe_mode='escape',
  53. extensions=['nl2br'])
  54. # Remove references
  55. del md.preprocessors['reference']
  56. del md.inlinePatterns['reference']
  57. del md.inlinePatterns['image_reference']
  58. del md.inlinePatterns['short_reference']
  59. # Add [b], [i], [u]
  60. md.inlinePatterns.add('bb_b', inline.bold, '<strong')
  61. md.inlinePatterns.add('bb_i', inline.italics, '<emphasis')
  62. md.inlinePatterns.add('bb_u', inline.underline, '<emphasis2')
  63. if allow_links:
  64. # Add [url]
  65. pass
  66. else:
  67. # Remove links
  68. del md.inlinePatterns['link']
  69. del md.inlinePatterns['autolink']
  70. del md.inlinePatterns['automail']
  71. if allow_images:
  72. # Add [img]
  73. short_images_md = ShortImagesExtension()
  74. short_images_md.extendMarkdown(md)
  75. else:
  76. # Remove images
  77. del md.inlinePatterns['image_link']
  78. if allow_blocks:
  79. # Add [hr] [quote], [spoiler], [list] and [code] blocks
  80. md.parser.blockprocessors.add('bb_hr', blocks.BBCodeHRProcessor(md.parser), '>hr')
  81. else:
  82. # Remove blocks
  83. del md.parser.blockprocessors['hashheader']
  84. del md.parser.blockprocessors['setextheader']
  85. del md.parser.blockprocessors['code']
  86. del md.parser.blockprocessors['quote']
  87. del md.parser.blockprocessors['hr']
  88. del md.parser.blockprocessors['olist']
  89. del md.parser.blockprocessors['ulist']
  90. return pipeline.extend_markdown(md)
  91. def linkify_paragraphs(result):
  92. result['parsed_text'] = bleach.linkify(result['parsed_text'], skip_pre=True, parse_email=True)
  93. def clean_links(request, result):
  94. site_address = '%s://%s' % (request.scheme, request.get_host())
  95. soup = BeautifulSoup(result['parsed_text'], 'html5lib')
  96. for link in soup.find_all('a'):
  97. if link['href'].lower().startswith(site_address):
  98. result['inside_links'].append(link['href'])
  99. if link['href'].lower() == site_address:
  100. link['href'] = '/'
  101. else:
  102. link['href'] = link['href'].lower()[len(site_address):]
  103. else:
  104. result['outgoing_links'].append(link['href'])
  105. if link.string.startswith('http://'):
  106. link.string.replace_with(link.string[7:].strip())
  107. if link.string.startswith('https://'):
  108. link.string.replace_with(link.string[8:].strip())
  109. for img in soup.find_all('img'):
  110. result['images'].append(img['src'])
  111. if img['src'].lower().startswith(site_address):
  112. if img['src'].lower() == site_address:
  113. img['src'] = '/'
  114. else:
  115. img['src'] = img['src'].lower()[len(site_address):]
  116. if img['alt'].startswith('http://'):
  117. img['alt'] = img['alt'][7:].strip()
  118. if img['alt'].startswith('https://'):
  119. img['alt'] = img['alt'][8:].strip()
  120. # [6:-7] trims <body></body> wrap
  121. result['parsed_text'] = six.text_type(soup.body)[6:-7]
  122. def minify_result(result):
  123. # [25:-14] trims <html><head></head><body> and </body></html>
  124. result['parsed_text'] = html_minify(result['parsed_text'].encode('utf-8'))
  125. result['parsed_text'] = result['parsed_text'][25:-14]