parser.py 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. import bleach
  2. from bs4 import BeautifulSoup
  3. from htmlmin.minify import html_minify
  4. import markdown
  5. from misago.markup.bbcode import inline, blocks
  6. from misago.markup.md.shortimgs import ShortImagesExtension
  7. from misago.markup.pipeline import pipeline
  8. __all__ = ['parse']
  9. def parse(text, request, poster, allow_mentions=True, allow_links=True,
  10. allow_images=True, allow_blocks=True, minify=True):
  11. """
  12. Message parser
  13. Utility for flavours to call
  14. Breaks text into paragraphs, supports code, spoiler and quote blocks,
  15. headers, lists, images, spoilers, text styles
  16. Returns dict object
  17. """
  18. md = md_factory(allow_links=allow_links, allow_images=allow_images,
  19. allow_blocks=allow_blocks)
  20. parsing_result = {
  21. 'original_text': text,
  22. 'parsed_text': '',
  23. 'markdown': md,
  24. 'mentions': [],
  25. 'images': [],
  26. 'outgoing_links': [],
  27. 'inside_links': []
  28. }
  29. # Parse text
  30. parsed_text = md.convert(text)
  31. # Clean and store parsed text
  32. parsing_result['parsed_text'] = parsed_text.strip()
  33. if allow_links:
  34. linkify_paragraphs(parsing_result)
  35. parsing_result = pipeline.process_result(parsing_result)
  36. if allow_links or allow_images:
  37. clean_links(parsing_result, request)
  38. if minify:
  39. minify_result(parsing_result)
  40. return parsing_result
  41. def md_factory(allow_links=True, allow_images=True, allow_blocks=True):
  42. """
  43. Create and configure markdown object
  44. """
  45. md = markdown.Markdown(safe_mode='escape',
  46. extensions=['nl2br'])
  47. # Remove references
  48. del md.preprocessors['reference']
  49. del md.inlinePatterns['reference']
  50. del md.inlinePatterns['image_reference']
  51. del md.inlinePatterns['short_reference']
  52. # Add [b], [i], [u]
  53. md.inlinePatterns.add('bb_b', inline.bold, '<strong')
  54. md.inlinePatterns.add('bb_i', inline.italics, '<emphasis')
  55. md.inlinePatterns.add('bb_u', inline.underline, '<emphasis2')
  56. if allow_links:
  57. # Add [url]
  58. pass
  59. else:
  60. # Remove links
  61. del md.inlinePatterns['link']
  62. del md.inlinePatterns['autolink']
  63. del md.inlinePatterns['automail']
  64. if allow_images:
  65. # Add [img]
  66. short_images_md = ShortImagesExtension()
  67. short_images_md.extendMarkdown(md)
  68. else:
  69. # Remove images
  70. del md.inlinePatterns['image_link']
  71. if allow_blocks:
  72. # Add [hr] [quote], [spoiler], [list] and [code] blocks
  73. md.parser.blockprocessors.add('bb_hr',
  74. blocks.BBCodeHRProcessor(md.parser),
  75. '>hr')
  76. else:
  77. # Remove blocks
  78. del md.parser.blockprocessors['hashheader']
  79. del md.parser.blockprocessors['setextheader']
  80. del md.parser.blockprocessors['code']
  81. del md.parser.blockprocessors['quote']
  82. del md.parser.blockprocessors['hr']
  83. del md.parser.blockprocessors['olist']
  84. del md.parser.blockprocessors['ulist']
  85. return pipeline.extend_markdown(md)
  86. def linkify_paragraphs(result):
  87. result['parsed_text'] = bleach.linkify(
  88. result['parsed_text'], skip_pre=True, parse_email=True)
  89. def clean_links(result, request):
  90. site_address = '%s://%s' % (request.scheme, request.get_host())
  91. soup = BeautifulSoup(result['parsed_text'])
  92. for link in soup.find_all('a'):
  93. if link['href'].lower().startswith(site_address):
  94. result['inside_links'].append(link['href'])
  95. if link['href'].lower() == site_address:
  96. link['href'] = '/'
  97. else:
  98. link['href'] = link['href'].lower()[len(site_address):]
  99. else:
  100. result['outgoing_links'].append(link['href'])
  101. if link.string.startswith('http://'):
  102. link.string = link.string[7:].strip()
  103. if link.string.startswith('https://'):
  104. link.string = link.string[8:].strip()
  105. for img in soup.find_all('img'):
  106. result['images'].append(img['src'])
  107. if img['src'].lower().startswith(site_address):
  108. if img['src'].lower() == site_address:
  109. img['src'] = '/'
  110. else:
  111. img['src'] = img['src'].lower()[len(site_address):]
  112. if img['alt'].startswith('http://'):
  113. img['alt'] = img['alt'][7:].strip()
  114. if img['alt'].startswith('https://'):
  115. img['alt'] = img['alt'][8:].strip()
  116. if result['outgoing_links'] or result['inside_links'] or result['images']:
  117. result['parsed_text'] = soup.prettify()
  118. def minify_result(result):
  119. # [25:-14] trims <html><head></head><body> and </body></html>
  120. result['parsed_text'] = html_minify(result['parsed_text'])[25:-14]