logo

oasis-root

Compiled tree of Oasis Linux based on own branch at <https://hacktivis.me/git/oasis/> git clone https://anongit.hacktivis.me/git/oasis-root.git

tbsjp.py (7078B)


  1. from .common import InfoExtractor
  2. from ..networking.exceptions import HTTPError
  3. from ..utils import (
  4. ExtractorError,
  5. clean_html,
  6. int_or_none,
  7. str_or_none,
  8. unified_timestamp,
  9. urljoin,
  10. )
  11. from ..utils.traversal import find_element, traverse_obj
  12. class TBSJPEpisodeIE(InfoExtractor):
  13. _VALID_URL = r'https?://cu\.tbs\.co\.jp/episode/(?P<id>[\d_]+)'
  14. _GEO_BYPASS = False
  15. _TESTS = [{
  16. 'url': 'https://cu.tbs.co.jp/episode/23613_2044134_1000049010',
  17. 'skip': 'streams geo-restricted, Japan only. Also, will likely expire eventually',
  18. 'info_dict': {
  19. 'title': 'VIVANT 第三話 誤送金完結へ!絶体絶命の反撃開始',
  20. 'id': '23613_2044134_1000049010',
  21. 'ext': 'mp4',
  22. 'upload_date': '20230728',
  23. 'duration': 3517,
  24. 'release_timestamp': 1691118230,
  25. 'episode': '第三話 誤送金完結へ!絶体絶命の反撃開始',
  26. 'release_date': '20230804',
  27. 'categories': 'count:11',
  28. 'episode_number': 3,
  29. 'timestamp': 1690522538,
  30. 'description': 'md5:2b796341af1ef772034133174ba4a895',
  31. 'series': 'VIVANT',
  32. },
  33. }]
  34. def _real_extract(self, url):
  35. video_id = self._match_id(url)
  36. webpage = self._download_webpage(url, video_id)
  37. meta = self._search_json(r'window\.app\s*=', webpage, 'episode info', video_id, fatal=False)
  38. episode = traverse_obj(meta, ('falcorCache', 'catalog', 'episode', video_id, 'value'))
  39. tf_path = self._search_regex(
  40. r'<script[^>]+src=["\'](/assets/tf\.[^"\']+\.js)["\']', webpage, 'stream API config')
  41. tf_js = self._download_webpage(urljoin(url, tf_path), video_id, note='Downloading stream API config')
  42. video_url = self._search_regex(r'videoPlaybackUrl:\s*[\'"]([^\'"]+)[\'"]', tf_js, 'stream API url')
  43. api_key = self._search_regex(r'api_key:\s*[\'"]([^\'"]+)[\'"]', tf_js, 'stream API key')
  44. try:
  45. source_meta = self._download_json(f'{video_url}ref:{video_id}', video_id,
  46. headers={'X-Streaks-Api-Key': api_key},
  47. note='Downloading stream metadata')
  48. except ExtractorError as e:
  49. if isinstance(e.cause, HTTPError) and e.cause.status == 403:
  50. self.raise_geo_restricted(countries=['JP'])
  51. raise
  52. formats, subtitles = [], {}
  53. for src in traverse_obj(source_meta, ('sources', ..., 'src')):
  54. fmts, subs = self._extract_m3u8_formats_and_subtitles(src, video_id, fatal=False)
  55. formats.extend(fmts)
  56. self._merge_subtitles(subs, target=subtitles)
  57. return {
  58. 'title': traverse_obj(webpage, ({find_element(tag='h3')}, {clean_html})),
  59. 'id': video_id,
  60. **traverse_obj(episode, {
  61. 'categories': ('keywords', {list}),
  62. 'id': ('content_id', {str}),
  63. 'description': ('description', 0, 'value'),
  64. 'timestamp': ('created_at', {unified_timestamp}),
  65. 'release_timestamp': ('pub_date', {unified_timestamp}),
  66. 'duration': ('tv_episode_info', 'duration', {int_or_none}),
  67. 'episode_number': ('tv_episode_info', 'episode_number', {int_or_none}),
  68. 'episode': ('title', lambda _, v: not v.get('is_phonetic'), 'value'),
  69. 'series': ('custom_data', 'program_name'),
  70. }, get_all=False),
  71. 'formats': formats,
  72. 'subtitles': subtitles,
  73. }
  74. class TBSJPProgramIE(InfoExtractor):
  75. _VALID_URL = r'https?://cu\.tbs\.co\.jp/program/(?P<id>\d+)'
  76. _TESTS = [{
  77. 'url': 'https://cu.tbs.co.jp/program/23601',
  78. 'playlist_mincount': 4,
  79. 'info_dict': {
  80. 'id': '23601',
  81. 'categories': ['エンタメ', 'ミライカプセル', '会社', '働く', 'バラエティ', '動画'],
  82. 'description': '幼少期の夢は大人になって、どう成長したのだろうか?\nそしてその夢は今後、どのように広がっていくのか?\nいま話題の会社で働く人の「夢の成長」を描く',
  83. 'series': 'ミライカプセル -I have a dream-',
  84. 'title': 'ミライカプセル -I have a dream-',
  85. },
  86. }]
  87. def _real_extract(self, url):
  88. programme_id = self._match_id(url)
  89. webpage = self._download_webpage(url, programme_id)
  90. meta = self._search_json(r'window\.app\s*=', webpage, 'programme info', programme_id)
  91. programme = traverse_obj(meta, ('falcorCache', 'catalog', 'program', programme_id, 'false', 'value'))
  92. return {
  93. '_type': 'playlist',
  94. 'entries': [self.url_result(f'https://cu.tbs.co.jp/episode/{video_id}', TBSJPEpisodeIE, video_id)
  95. for video_id in traverse_obj(programme, ('custom_data', 'seriesList', 'episodeCode', ...))],
  96. 'id': programme_id,
  97. **traverse_obj(programme, {
  98. 'categories': ('keywords', ...),
  99. 'id': ('tv_episode_info', 'show_content_id', {str_or_none}),
  100. 'description': ('custom_data', 'program_description'),
  101. 'series': ('custom_data', 'program_name'),
  102. 'title': ('custom_data', 'program_name'),
  103. }),
  104. }
  105. class TBSJPPlaylistIE(InfoExtractor):
  106. _VALID_URL = r'https?://cu\.tbs\.co\.jp/playlist/(?P<id>[\da-f]+)'
  107. _TESTS = [{
  108. 'url': 'https://cu.tbs.co.jp/playlist/184f9970e7ba48e4915f1b252c55015e',
  109. 'playlist_mincount': 4,
  110. 'info_dict': {
  111. 'title': 'まもなく配信終了',
  112. 'id': '184f9970e7ba48e4915f1b252c55015e',
  113. },
  114. }]
  115. def _real_extract(self, url):
  116. playlist_id = self._match_id(url)
  117. page = self._download_webpage(url, playlist_id)
  118. meta = self._search_json(r'window\.app\s*=', page, 'playlist info', playlist_id)
  119. playlist = traverse_obj(meta, ('falcorCache', 'playList', playlist_id))
  120. def entries():
  121. for entry in traverse_obj(playlist, ('catalogs', 'value', lambda _, v: v['content_id'])):
  122. # TODO: it's likely possible to get all metadata from the playlist page json instead
  123. content_id = entry['content_id']
  124. content_type = entry.get('content_type')
  125. if content_type == 'tv_show':
  126. yield self.url_result(
  127. f'https://cu.tbs.co.jp/program/{content_id}', TBSJPProgramIE, content_id)
  128. elif content_type == 'tv_episode':
  129. yield self.url_result(
  130. f'https://cu.tbs.co.jp/episode/{content_id}', TBSJPEpisodeIE, content_id)
  131. else:
  132. self.report_warning(f'Skipping "{content_id}" with unsupported content_type "{content_type}"')
  133. return self.playlist_result(entries(), playlist_id, traverse_obj(playlist, ('display_name', 'value')))