logo

youtube-dl

[mirror] Download/Watch videos from video hostersgit clone https://hacktivis.me/git/mirror/youtube-dl.git

yahoo.py (22870B)


  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import hashlib
  4. import itertools
  5. import re
  6. from .common import InfoExtractor, SearchInfoExtractor
  7. from ..compat import (
  8. compat_str,
  9. compat_urllib_parse,
  10. )
  11. from ..utils import (
  12. clean_html,
  13. ExtractorError,
  14. int_or_none,
  15. mimetype2ext,
  16. parse_iso8601,
  17. smuggle_url,
  18. try_get,
  19. url_or_none,
  20. )
  21. from .brightcove import BrightcoveNewIE
  22. class YahooIE(InfoExtractor):
  23. IE_DESC = 'Yahoo screen and movies'
  24. _VALID_URL = r'(?P<url>https?://(?:(?P<country>[a-zA-Z]{2}(?:-[a-zA-Z]{2})?|malaysia)\.)?(?:[\da-zA-Z_-]+\.)?yahoo\.com/(?:[^/]+/)*(?P<id>[^?&#]*-[0-9]+(?:-[a-z]+)?)\.html)'
  25. _TESTS = [{
  26. 'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
  27. 'info_dict': {
  28. 'id': '2d25e626-2378-391f-ada0-ddaf1417e588',
  29. 'ext': 'mp4',
  30. 'title': 'Julian Smith & Travis Legg Watch Julian Smith',
  31. 'description': 'Julian and Travis watch Julian Smith',
  32. 'duration': 6863,
  33. 'timestamp': 1369812016,
  34. 'upload_date': '20130529',
  35. },
  36. }, {
  37. 'url': 'https://screen.yahoo.com/community/community-sizzle-reel-203225340.html?format=embed',
  38. 'md5': '7993e572fac98e044588d0b5260f4352',
  39. 'info_dict': {
  40. 'id': '4fe78544-8d48-39d8-97cd-13f205d9fcdb',
  41. 'ext': 'mp4',
  42. 'title': "Yahoo Saves 'Community'",
  43. 'description': 'md5:4d4145af2fd3de00cbb6c1d664105053',
  44. 'duration': 170,
  45. 'timestamp': 1406838636,
  46. 'upload_date': '20140731',
  47. },
  48. }, {
  49. 'url': 'https://uk.screen.yahoo.com/editor-picks/cute-raccoon-freed-drain-using-091756545.html',
  50. 'md5': '71298482f7c64cbb7fa064e4553ff1c1',
  51. 'info_dict': {
  52. 'id': 'b3affa53-2e14-3590-852b-0e0db6cd1a58',
  53. 'ext': 'webm',
  54. 'title': 'Cute Raccoon Freed From Drain\u00a0Using Angle Grinder',
  55. 'description': 'md5:f66c890e1490f4910a9953c941dee944',
  56. 'duration': 97,
  57. 'timestamp': 1414489862,
  58. 'upload_date': '20141028',
  59. }
  60. }, {
  61. 'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
  62. 'md5': '88e209b417f173d86186bef6e4d1f160',
  63. 'info_dict': {
  64. 'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521',
  65. 'ext': 'mp4',
  66. 'title': 'China Moses Is Crazy About the Blues',
  67. 'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
  68. 'duration': 128,
  69. 'timestamp': 1385722202,
  70. 'upload_date': '20131129',
  71. }
  72. }, {
  73. 'url': 'https://www.yahoo.com/movies/v/true-story-trailer-173000497.html',
  74. 'md5': '2a9752f74cb898af5d1083ea9f661b58',
  75. 'info_dict': {
  76. 'id': '071c4013-ce30-3a93-a5b2-e0413cd4a9d1',
  77. 'ext': 'mp4',
  78. 'title': '\'True Story\' Trailer',
  79. 'description': 'True Story',
  80. 'duration': 150,
  81. 'timestamp': 1418919206,
  82. 'upload_date': '20141218',
  83. },
  84. }, {
  85. 'url': 'https://gma.yahoo.com/pizza-delivery-man-surprised-huge-tip-college-kids-195200785.html',
  86. 'only_matching': True,
  87. }, {
  88. 'note': 'NBC Sports embeds',
  89. 'url': 'http://sports.yahoo.com/blogs/ncaab-the-dagger/tyler-kalinoski-s-buzzer-beater-caps-davidson-s-comeback-win-185609842.html?guid=nbc_cbk_davidsonbuzzerbeater_150313',
  90. 'info_dict': {
  91. 'id': '9CsDKds0kvHI',
  92. 'ext': 'flv',
  93. 'description': 'md5:df390f70a9ba7c95ff1daace988f0d8d',
  94. 'title': 'Tyler Kalinoski hits buzzer-beater to lift Davidson',
  95. 'upload_date': '20150313',
  96. 'uploader': 'NBCU-SPORTS',
  97. 'timestamp': 1426270238,
  98. },
  99. }, {
  100. 'url': 'https://tw.news.yahoo.com/-100120367.html',
  101. 'only_matching': True,
  102. }, {
  103. # Query result is embedded in webpage, but explicit request to video API fails with geo restriction
  104. 'url': 'https://screen.yahoo.com/community/communitary-community-episode-1-ladders-154501237.html',
  105. 'md5': '4fbafb9c9b6f07aa8f870629f6671b35',
  106. 'info_dict': {
  107. 'id': '1f32853c-a271-3eef-8cb6-f6d6872cb504',
  108. 'ext': 'mp4',
  109. 'title': 'Communitary - Community Episode 1: Ladders',
  110. 'description': 'md5:8fc39608213295748e1e289807838c97',
  111. 'duration': 1646,
  112. 'timestamp': 1440436550,
  113. 'upload_date': '20150824',
  114. 'series': 'Communitary',
  115. 'season_number': 6,
  116. 'episode_number': 1,
  117. },
  118. }, {
  119. # ytwnews://cavideo/
  120. 'url': 'https://tw.video.yahoo.com/movie-tw/單車天使-中文版預-092316541.html',
  121. 'info_dict': {
  122. 'id': 'ba133ff2-0793-3510-b636-59dfe9ff6cff',
  123. 'ext': 'mp4',
  124. 'title': '單車天使 - 中文版預',
  125. 'description': '中文版預',
  126. 'timestamp': 1476696196,
  127. 'upload_date': '20161017',
  128. },
  129. 'params': {
  130. 'skip_download': True,
  131. },
  132. }, {
  133. # Contains both a Yahoo hosted video and multiple Youtube embeds
  134. 'url': 'https://www.yahoo.com/entertainment/gwen-stefani-reveals-the-pop-hit-she-passed-on-assigns-it-to-her-voice-contestant-instead-033045672.html',
  135. 'info_dict': {
  136. 'id': '46c5d95a-528f-3d03-b732-732fcadd51de',
  137. 'title': 'Gwen Stefani reveals the pop hit she passed on, assigns it to her \'Voice\' contestant instead',
  138. 'description': 'Gwen decided not to record this hit herself, but she decided it was the perfect fit for Kyndall Inskeep.',
  139. },
  140. 'playlist': [{
  141. 'info_dict': {
  142. 'id': '966d4262-4fd1-3aaa-b45b-049ca6e38ba6',
  143. 'ext': 'mp4',
  144. 'title': 'Gwen Stefani reveals she turned down one of Sia\'s best songs',
  145. 'description': 'On "The Voice" Tuesday, Gwen Stefani told Taylor Swift which Sia hit was almost hers.',
  146. 'timestamp': 1572406500,
  147. 'upload_date': '20191030',
  148. },
  149. }, {
  150. 'info_dict': {
  151. 'id': '352CFDOQrKg',
  152. 'ext': 'mp4',
  153. 'title': 'Kyndal Inskeep "Performs the Hell Out of" Sia\'s "Elastic Heart" - The Voice Knockouts 2019',
  154. 'description': 'md5:35b61e94c2ae214bc965ff4245f80d11',
  155. 'uploader': 'The Voice',
  156. 'uploader_id': 'NBCTheVoice',
  157. 'upload_date': '20191029',
  158. },
  159. }],
  160. 'params': {
  161. 'playlistend': 2,
  162. },
  163. 'expected_warnings': ['HTTP Error 404'],
  164. }, {
  165. 'url': 'https://malaysia.news.yahoo.com/video/bystanders-help-ontario-policeman-bust-190932818.html',
  166. 'only_matching': True,
  167. }, {
  168. 'url': 'https://es-us.noticias.yahoo.com/es-la-puerta-irrompible-que-110539379.html',
  169. 'only_matching': True,
  170. }, {
  171. 'url': 'https://www.yahoo.com/entertainment/v/longtime-cbs-news-60-minutes-032036500-cbs.html',
  172. 'only_matching': True,
  173. }]
  174. def _extract_yahoo_video(self, video_id, country):
  175. video = self._download_json(
  176. 'https://%s.yahoo.com/_td/api/resource/VideoService.videos;view=full;video_ids=["%s"]' % (country, video_id),
  177. video_id, 'Downloading video JSON metadata')[0]
  178. title = video['title']
  179. if country == 'malaysia':
  180. country = 'my'
  181. is_live = video.get('live_state') == 'live'
  182. fmts = ('m3u8',) if is_live else ('webm', 'mp4')
  183. urls = []
  184. formats = []
  185. subtitles = {}
  186. for fmt in fmts:
  187. media_obj = self._download_json(
  188. 'https://video-api.yql.yahoo.com/v1/video/sapi/streams/' + video_id,
  189. video_id, 'Downloading %s JSON metadata' % fmt,
  190. headers=self.geo_verification_headers(), query={
  191. 'format': fmt,
  192. 'region': country.upper(),
  193. })['query']['results']['mediaObj'][0]
  194. msg = media_obj.get('status', {}).get('msg')
  195. for s in media_obj.get('streams', []):
  196. host = s.get('host')
  197. path = s.get('path')
  198. if not host or not path:
  199. continue
  200. s_url = host + path
  201. if s.get('format') == 'm3u8':
  202. formats.extend(self._extract_m3u8_formats(
  203. s_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
  204. continue
  205. tbr = int_or_none(s.get('bitrate'))
  206. formats.append({
  207. 'url': s_url,
  208. 'format_id': fmt + ('-%d' % tbr if tbr else ''),
  209. 'width': int_or_none(s.get('width')),
  210. 'height': int_or_none(s.get('height')),
  211. 'tbr': tbr,
  212. 'fps': int_or_none(s.get('framerate')),
  213. })
  214. for cc in media_obj.get('closedcaptions', []):
  215. cc_url = cc.get('url')
  216. if not cc_url or cc_url in urls:
  217. continue
  218. urls.append(cc_url)
  219. subtitles.setdefault(cc.get('lang') or 'en-US', []).append({
  220. 'url': cc_url,
  221. 'ext': mimetype2ext(cc.get('content_type')),
  222. })
  223. streaming_url = video.get('streaming_url')
  224. if streaming_url and not is_live:
  225. formats.extend(self._extract_m3u8_formats(
  226. streaming_url, video_id, 'mp4',
  227. 'm3u8_native', m3u8_id='hls', fatal=False))
  228. if not formats and msg == 'geo restricted':
  229. self.raise_geo_restricted()
  230. self._sort_formats(formats)
  231. thumbnails = []
  232. for thumb in video.get('thumbnails', []):
  233. thumb_url = thumb.get('url')
  234. if not thumb_url:
  235. continue
  236. thumbnails.append({
  237. 'id': thumb.get('tag'),
  238. 'url': thumb.get('url'),
  239. 'width': int_or_none(thumb.get('width')),
  240. 'height': int_or_none(thumb.get('height')),
  241. })
  242. series_info = video.get('series_info') or {}
  243. return {
  244. 'id': video_id,
  245. 'title': self._live_title(title) if is_live else title,
  246. 'formats': formats,
  247. 'thumbnails': thumbnails,
  248. 'description': clean_html(video.get('description')),
  249. 'timestamp': parse_iso8601(video.get('publish_time')),
  250. 'subtitles': subtitles,
  251. 'duration': int_or_none(video.get('duration')),
  252. 'view_count': int_or_none(video.get('view_count')),
  253. 'is_live': is_live,
  254. 'series': video.get('show_name'),
  255. 'season_number': int_or_none(series_info.get('season_number')),
  256. 'episode_number': int_or_none(series_info.get('episode_number')),
  257. }
  258. def _real_extract(self, url):
  259. url, country, display_id = re.match(self._VALID_URL, url).groups()
  260. if not country:
  261. country = 'us'
  262. else:
  263. country = country.split('-')[0]
  264. item = self._download_json(
  265. 'https://%s.yahoo.com/caas/content/article' % country, display_id,
  266. 'Downloading content JSON metadata', query={
  267. 'url': url
  268. })['items'][0]['data']['partnerData']
  269. if item.get('type') != 'video':
  270. entries = []
  271. cover = item.get('cover') or {}
  272. if cover.get('type') == 'yvideo':
  273. cover_url = cover.get('url')
  274. if cover_url:
  275. entries.append(self.url_result(
  276. cover_url, 'Yahoo', cover.get('uuid')))
  277. for e in (item.get('body') or []):
  278. if e.get('type') == 'videoIframe':
  279. iframe_url = e.get('url')
  280. if not iframe_url:
  281. continue
  282. entries.append(self.url_result(iframe_url))
  283. return self.playlist_result(
  284. entries, item.get('uuid'),
  285. item.get('title'), item.get('summary'))
  286. info = self._extract_yahoo_video(item['uuid'], country)
  287. info['display_id'] = display_id
  288. return info
  289. class YahooSearchIE(SearchInfoExtractor):
  290. IE_DESC = 'Yahoo screen search'
  291. _MAX_RESULTS = 1000
  292. IE_NAME = 'screen.yahoo:search'
  293. _SEARCH_KEY = 'yvsearch'
  294. def _get_n_results(self, query, n):
  295. """Get a specified number of results for a query"""
  296. entries = []
  297. for pagenum in itertools.count(0):
  298. result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
  299. info = self._download_json(result_url, query,
  300. note='Downloading results page ' + str(pagenum + 1))
  301. m = info['m']
  302. results = info['results']
  303. for (i, r) in enumerate(results):
  304. if (pagenum * 30) + i >= n:
  305. break
  306. mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r)
  307. e = self.url_result('http://' + mobj.group('url'), 'Yahoo')
  308. entries.append(e)
  309. if (pagenum * 30 + i >= n) or (m['last'] >= (m['total'] - 1)):
  310. break
  311. return {
  312. '_type': 'playlist',
  313. 'id': query,
  314. 'entries': entries,
  315. }
  316. class YahooGyaOPlayerIE(InfoExtractor):
  317. IE_NAME = 'yahoo:gyao:player'
  318. _VALID_URL = r'https?://(?:gyao\.yahoo\.co\.jp/(?:player|episode/[^/]+)|streaming\.yahoo\.co\.jp/c/y)/(?P<id>\d+/v\d+/v\d+|[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
  319. _TESTS = [{
  320. 'url': 'https://gyao.yahoo.co.jp/player/00998/v00818/v0000000000000008564/',
  321. 'info_dict': {
  322. 'id': '5993125228001',
  323. 'ext': 'mp4',
  324. 'title': 'フューリー 【字幕版】',
  325. 'description': 'md5:21e691c798a15330eda4db17a8fe45a5',
  326. 'uploader_id': '4235717419001',
  327. 'upload_date': '20190124',
  328. 'timestamp': 1548294365,
  329. },
  330. 'params': {
  331. # m3u8 download
  332. 'skip_download': True,
  333. },
  334. }, {
  335. 'url': 'https://streaming.yahoo.co.jp/c/y/01034/v00133/v0000000000000000706/',
  336. 'only_matching': True,
  337. }, {
  338. 'url': 'https://gyao.yahoo.co.jp/episode/%E3%81%8D%E3%81%AE%E3%81%86%E4%BD%95%E9%A3%9F%E3%81%B9%E3%81%9F%EF%BC%9F%20%E7%AC%AC2%E8%A9%B1%202019%2F4%2F12%E6%94%BE%E9%80%81%E5%88%86/5cb02352-b725-409e-9f8d-88f947a9f682',
  339. 'only_matching': True,
  340. }]
  341. _GEO_BYPASS = False
  342. def _real_extract(self, url):
  343. video_id = self._match_id(url).replace('/', ':')
  344. headers = self.geo_verification_headers()
  345. headers['Accept'] = 'application/json'
  346. resp = self._download_json(
  347. 'https://gyao.yahoo.co.jp/apis/playback/graphql', video_id, query={
  348. 'appId': 'dj00aiZpPUNJeDh2cU1RazU3UCZzPWNvbnN1bWVyc2VjcmV0Jng9NTk-',
  349. 'query': '''{
  350. content(parameter: {contentId: "%s", logicaAgent: PC_WEB}) {
  351. video {
  352. delivery {
  353. id
  354. }
  355. title
  356. }
  357. }
  358. }''' % video_id,
  359. }, headers=headers)
  360. content = resp['data']['content']
  361. if not content:
  362. msg = resp['errors'][0]['message']
  363. if msg == 'not in japan':
  364. self.raise_geo_restricted(countries=['JP'])
  365. raise ExtractorError(msg)
  366. video = content['video']
  367. return {
  368. '_type': 'url_transparent',
  369. 'id': video_id,
  370. 'title': video['title'],
  371. 'url': smuggle_url(
  372. 'http://players.brightcove.net/4235717419001/SyG5P0gjb_default/index.html?videoId=' + video['delivery']['id'],
  373. {'geo_countries': ['JP']}),
  374. 'ie_key': BrightcoveNewIE.ie_key(),
  375. }
  376. class YahooGyaOIE(InfoExtractor):
  377. IE_NAME = 'yahoo:gyao'
  378. _VALID_URL = r'https?://(?:gyao\.yahoo\.co\.jp/(?:p|title(?:/[^/]+)?)|streaming\.yahoo\.co\.jp/p/y)/(?P<id>\d+/v\d+|[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
  379. _TESTS = [{
  380. 'url': 'https://gyao.yahoo.co.jp/p/00449/v03102/',
  381. 'info_dict': {
  382. 'id': '00449:v03102',
  383. },
  384. 'playlist_count': 2,
  385. }, {
  386. 'url': 'https://streaming.yahoo.co.jp/p/y/01034/v00133/',
  387. 'only_matching': True,
  388. }, {
  389. 'url': 'https://gyao.yahoo.co.jp/title/%E3%81%97%E3%82%83%E3%81%B9%E3%81%8F%E3%82%8A007/5b025a49-b2e5-4dc7-945c-09c6634afacf',
  390. 'only_matching': True,
  391. }, {
  392. 'url': 'https://gyao.yahoo.co.jp/title/5b025a49-b2e5-4dc7-945c-09c6634afacf',
  393. 'only_matching': True,
  394. }]
  395. def _real_extract(self, url):
  396. program_id = self._match_id(url).replace('/', ':')
  397. videos = self._download_json(
  398. 'https://gyao.yahoo.co.jp/api/programs/%s/videos' % program_id, program_id)['videos']
  399. entries = []
  400. for video in videos:
  401. video_id = video.get('id')
  402. if not video_id:
  403. continue
  404. entries.append(self.url_result(
  405. 'https://gyao.yahoo.co.jp/player/%s/' % video_id.replace(':', '/'),
  406. YahooGyaOPlayerIE.ie_key(), video_id))
  407. return self.playlist_result(entries, program_id)
  408. class YahooJapanNewsIE(InfoExtractor):
  409. IE_NAME = 'yahoo:japannews'
  410. IE_DESC = 'Yahoo! Japan News'
  411. _VALID_URL = r'https?://(?P<host>(?:news|headlines)\.yahoo\.co\.jp)[^\d]*(?P<id>\d[\d-]*\d)?'
  412. _GEO_COUNTRIES = ['JP']
  413. _TESTS = [{
  414. 'url': 'https://headlines.yahoo.co.jp/videonews/ann?a=20190716-00000071-ann-int',
  415. 'info_dict': {
  416. 'id': '1736242',
  417. 'ext': 'mp4',
  418. 'title': 'ムン大統領が対日批判を強化“現金化”効果は?(テレビ朝日系(ANN)) - Yahoo!ニュース',
  419. 'description': '韓国の元徴用工らを巡る裁判の原告が弁護士が差し押さえた三菱重工業の資産を売却して - Yahoo!ニュース(テレビ朝日系(ANN))',
  420. 'thumbnail': r're:^https?://.*\.[a-zA-Z\d]{3,4}$',
  421. },
  422. 'params': {
  423. 'skip_download': True,
  424. },
  425. }, {
  426. # geo restricted
  427. 'url': 'https://headlines.yahoo.co.jp/hl?a=20190721-00000001-oxv-l04',
  428. 'only_matching': True,
  429. }, {
  430. 'url': 'https://headlines.yahoo.co.jp/videonews/',
  431. 'only_matching': True,
  432. }, {
  433. 'url': 'https://news.yahoo.co.jp',
  434. 'only_matching': True,
  435. }, {
  436. 'url': 'https://news.yahoo.co.jp/byline/hashimotojunji/20190628-00131977/',
  437. 'only_matching': True,
  438. }, {
  439. 'url': 'https://news.yahoo.co.jp/feature/1356',
  440. 'only_matching': True
  441. }]
  442. def _extract_formats(self, json_data, content_id):
  443. formats = []
  444. video_data = try_get(
  445. json_data,
  446. lambda x: x['ResultSet']['Result'][0]['VideoUrlSet']['VideoUrl'],
  447. list)
  448. for vid in video_data or []:
  449. delivery = vid.get('delivery')
  450. url = url_or_none(vid.get('Url'))
  451. if not delivery or not url:
  452. continue
  453. elif delivery == 'hls':
  454. formats.extend(
  455. self._extract_m3u8_formats(
  456. url, content_id, 'mp4', 'm3u8_native',
  457. m3u8_id='hls', fatal=False))
  458. else:
  459. formats.append({
  460. 'url': url,
  461. 'format_id': 'http-%s' % compat_str(vid.get('bitrate', '')),
  462. 'height': int_or_none(vid.get('height')),
  463. 'width': int_or_none(vid.get('width')),
  464. 'tbr': int_or_none(vid.get('bitrate')),
  465. })
  466. self._remove_duplicate_formats(formats)
  467. self._sort_formats(formats)
  468. return formats
  469. def _real_extract(self, url):
  470. mobj = re.match(self._VALID_URL, url)
  471. host = mobj.group('host')
  472. display_id = mobj.group('id') or host
  473. webpage = self._download_webpage(url, display_id)
  474. title = self._html_search_meta(
  475. ['og:title', 'twitter:title'], webpage, 'title', default=None
  476. ) or self._html_search_regex('<title>([^<]+)</title>', webpage, 'title')
  477. if display_id == host:
  478. # Headline page (w/ multiple BC playlists) ('news.yahoo.co.jp', 'headlines.yahoo.co.jp/videonews/', ...)
  479. stream_plists = re.findall(r'plist=(\d+)', webpage) or re.findall(r'plist["\']:\s*["\']([^"\']+)', webpage)
  480. entries = [
  481. self.url_result(
  482. smuggle_url(
  483. 'http://players.brightcove.net/5690807595001/HyZNerRl7_default/index.html?playlistId=%s' % plist_id,
  484. {'geo_countries': ['JP']}),
  485. ie='BrightcoveNew', video_id=plist_id)
  486. for plist_id in stream_plists]
  487. return self.playlist_result(entries, playlist_title=title)
  488. # Article page
  489. description = self._html_search_meta(
  490. ['og:description', 'description', 'twitter:description'],
  491. webpage, 'description', default=None)
  492. thumbnail = self._og_search_thumbnail(
  493. webpage, default=None) or self._html_search_meta(
  494. 'twitter:image', webpage, 'thumbnail', default=None)
  495. space_id = self._search_regex([
  496. r'<script[^>]+class=["\']yvpub-player["\'][^>]+spaceid=([^&"\']+)',
  497. r'YAHOO\.JP\.srch\.\w+link\.onLoad[^;]+spaceID["\' ]*:["\' ]+([^"\']+)',
  498. r'<!--\s+SpaceID=(\d+)'
  499. ], webpage, 'spaceid')
  500. content_id = self._search_regex(
  501. r'<script[^>]+class=["\']yvpub-player["\'][^>]+contentid=(?P<contentid>[^&"\']+)',
  502. webpage, 'contentid', group='contentid')
  503. json_data = self._download_json(
  504. 'https://feapi-yvpub.yahooapis.jp/v1/content/%s' % content_id,
  505. content_id,
  506. query={
  507. 'appid': 'dj0zaiZpPVZMTVFJR0FwZWpiMyZzPWNvbnN1bWVyc2VjcmV0Jng9YjU-',
  508. 'output': 'json',
  509. 'space_id': space_id,
  510. 'domain': host,
  511. 'ak': hashlib.md5('_'.join((space_id, host)).encode()).hexdigest(),
  512. 'device_type': '1100',
  513. })
  514. formats = self._extract_formats(json_data, content_id)
  515. return {
  516. 'id': content_id,
  517. 'title': title,
  518. 'description': description,
  519. 'thumbnail': thumbnail,
  520. 'formats': formats,
  521. }