logo

oasis-root

Compiled tree of Oasis Linux based on own branch at <https://hacktivis.me/git/oasis/> git clone https://anongit.hacktivis.me/git/oasis-root.git

substack.py (5802B)


  1. import re
  2. import urllib.parse
  3. from .common import InfoExtractor
  4. from ..networking import HEADRequest
  5. from ..utils import (
  6. determine_ext,
  7. js_to_json,
  8. str_or_none,
  9. )
  10. from ..utils.traversal import traverse_obj
  11. class SubstackIE(InfoExtractor):
  12. _VALID_URL = r'https?://(?P<username>[\w-]+)\.substack\.com/p/(?P<id>[\w-]+)'
  13. _TESTS = [{
  14. 'url': 'https://haleynahman.substack.com/p/i-made-a-vlog?s=r',
  15. 'md5': 'f27e4fc6252001d48d479f45e65cdfd5',
  16. 'info_dict': {
  17. 'id': '47660949',
  18. 'ext': 'mp4',
  19. 'title': 'I MADE A VLOG',
  20. 'description': 'md5:9248af9a759321e1027226f988f54d96',
  21. 'thumbnail': 'md5:bec758a34d8ee9142d43bcebdf33af18',
  22. 'uploader': 'Maybe Baby',
  23. 'uploader_id': '33628',
  24. },
  25. }, {
  26. 'url': 'https://haleynahman.substack.com/p/-dear-danny-i-found-my-boyfriends?s=r',
  27. 'md5': '0a63eacec877a1171a62cfa69710fcea',
  28. 'info_dict': {
  29. 'id': '51045592',
  30. 'ext': 'mpga',
  31. 'title': "🎧 Dear Danny: I found my boyfriend's secret Twitter account",
  32. 'description': 'md5:a57f2439319e56e0af92dd0c95d75797',
  33. 'thumbnail': 'md5:daa40b6b79249417c14ff8103db29639',
  34. 'uploader': 'Maybe Baby',
  35. 'uploader_id': '33628',
  36. },
  37. }, {
  38. 'url': 'https://andrewzimmern.substack.com/p/mussels-with-black-bean-sauce-recipe',
  39. 'md5': 'fd3c07077b02444ff0130715b5f632bb',
  40. 'info_dict': {
  41. 'id': '47368578',
  42. 'ext': 'mp4',
  43. 'title': 'Mussels with Black Bean Sauce: Recipe of the Week #7',
  44. 'description': 'md5:b96234a2906c7d854d5229818d889515',
  45. 'thumbnail': 'md5:e30bfaa9da40e82aa62354263a9dd232',
  46. 'uploader': "Andrew Zimmern's Spilled Milk ",
  47. 'uploader_id': '577659',
  48. },
  49. }, {
  50. # Podcast that needs its file extension resolved to mp3
  51. 'url': 'https://persuasion1.substack.com/p/summers',
  52. 'md5': '1456a755d46084744facdfac9edf900f',
  53. 'info_dict': {
  54. 'id': '141970405',
  55. 'ext': 'mp3',
  56. 'title': 'Larry Summers on What Went Wrong on Campus',
  57. 'description': 'Yascha Mounk and Larry Summers also discuss the promise and perils of artificial intelligence.',
  58. 'thumbnail': r're:https://substackcdn\.com/image/.+\.jpeg',
  59. 'uploader': 'Persuasion',
  60. 'uploader_id': '61579',
  61. },
  62. }]
  63. @classmethod
  64. def _extract_embed_urls(cls, url, webpage):
  65. if not re.search(r'<script[^>]+src=["\']https://substackcdn.com/[^"\']+\.js', webpage):
  66. return
  67. mobj = re.search(r'{[^}]*\\?["\']subdomain\\?["\']\s*:\s*\\?["\'](?P<subdomain>[^\\"\']+)', webpage)
  68. if mobj:
  69. parsed = urllib.parse.urlparse(url)
  70. yield parsed._replace(netloc=f'{mobj.group("subdomain")}.substack.com').geturl()
  71. raise cls.StopExtraction
  72. def _extract_video_formats(self, video_id, url):
  73. formats, subtitles = [], {}
  74. for video_format in ('hls', 'mp4'):
  75. video_url = urllib.parse.urljoin(url, f'/api/v1/video/upload/{video_id}/src?type={video_format}')
  76. if video_format == 'hls':
  77. fmts, subs = self._extract_m3u8_formats_and_subtitles(video_url, video_id, 'mp4', fatal=False)
  78. formats.extend(fmts)
  79. self._merge_subtitles(subs, target=subtitles)
  80. else:
  81. formats.append({
  82. 'url': video_url,
  83. 'ext': video_format,
  84. })
  85. return formats, subtitles
  86. def _real_extract(self, url):
  87. display_id, username = self._match_valid_url(url).group('id', 'username')
  88. webpage = self._download_webpage(url, display_id)
  89. webpage_info = self._parse_json(self._search_json(
  90. r'window\._preloads\s*=\s*JSON\.parse\(', webpage, 'json string',
  91. display_id, transform_source=js_to_json, contains_pattern=r'"{(?s:.+)}"'), display_id)
  92. canonical_url = url
  93. domain = traverse_obj(webpage_info, ('domainInfo', 'customDomain', {str}))
  94. if domain:
  95. canonical_url = urllib.parse.urlparse(url)._replace(netloc=domain).geturl()
  96. post_type = webpage_info['post']['type']
  97. formats, subtitles = [], {}
  98. if post_type == 'podcast':
  99. fmt = {'url': webpage_info['post']['podcast_url']}
  100. if not determine_ext(fmt['url'], default_ext=None):
  101. # The redirected format URL expires but the original URL doesn't,
  102. # so we only want to extract the extension from this request
  103. fmt['ext'] = determine_ext(self._request_webpage(
  104. HEADRequest(fmt['url']), display_id,
  105. 'Resolving podcast file extension',
  106. 'Podcast URL is invalid').url)
  107. formats.append(fmt)
  108. elif post_type == 'video':
  109. formats, subtitles = self._extract_video_formats(webpage_info['post']['videoUpload']['id'], canonical_url)
  110. else:
  111. self.raise_no_formats(f'Page type "{post_type}" is not supported')
  112. return {
  113. 'id': str(webpage_info['post']['id']),
  114. 'formats': formats,
  115. 'subtitles': subtitles,
  116. 'title': traverse_obj(webpage_info, ('post', 'title')),
  117. 'description': traverse_obj(webpage_info, ('post', 'description')),
  118. 'thumbnail': traverse_obj(webpage_info, ('post', 'cover_image')),
  119. 'uploader': traverse_obj(webpage_info, ('pub', 'name')),
  120. 'uploader_id': str_or_none(traverse_obj(webpage_info, ('post', 'publication_id'))),
  121. 'webpage_url': canonical_url,
  122. }