logo

youtube-dl

[mirror] Download/Watch videos from video hostersgit clone https://hacktivis.me/git/mirror/youtube-dl.git

youtube.py (169316B)


  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import collections
  4. import itertools
  5. import json
  6. import os.path
  7. import random
  8. import re
  9. import traceback
  10. from .common import InfoExtractor, SearchInfoExtractor
  11. from ..compat import (
  12. compat_chr,
  13. compat_HTTPError,
  14. compat_map as map,
  15. compat_str,
  16. compat_urllib_parse,
  17. compat_urllib_parse_parse_qs as compat_parse_qs,
  18. compat_urllib_parse_unquote_plus,
  19. compat_urllib_parse_urlparse,
  20. compat_zip as zip,
  21. )
  22. from ..jsinterp import JSInterpreter
  23. from ..utils import (
  24. clean_html,
  25. dict_get,
  26. error_to_compat_str,
  27. ExtractorError,
  28. float_or_none,
  29. extract_attributes,
  30. get_element_by_attribute,
  31. int_or_none,
  32. join_nonempty,
  33. js_to_json,
  34. LazyList,
  35. merge_dicts,
  36. mimetype2ext,
  37. NO_DEFAULT,
  38. parse_codecs,
  39. parse_count,
  40. parse_duration,
  41. parse_qs,
  42. qualities,
  43. remove_start,
  44. smuggle_url,
  45. str_or_none,
  46. str_to_int,
  47. T,
  48. traverse_obj,
  49. try_call,
  50. try_get,
  51. txt_or_none,
  52. unescapeHTML,
  53. unified_strdate,
  54. unsmuggle_url,
  55. update_url,
  56. update_url_query,
  57. url_or_none,
  58. urlencode_postdata,
  59. urljoin,
  60. )
  61. class YoutubeBaseInfoExtractor(InfoExtractor):
  62. """Provide base functions for Youtube extractors"""
  63. _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
  64. _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
  65. _LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
  66. _CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
  67. _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
  68. _NETRC_MACHINE = 'youtube'
  69. # If True it will raise an error if no login info is provided
  70. _LOGIN_REQUIRED = False
  71. _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM)'
  72. def _login(self):
  73. """
  74. Attempt to log in to YouTube.
  75. True is returned if successful or skipped.
  76. False is returned if login failed.
  77. If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
  78. """
  79. username, password = self._get_login_info()
  80. # No authentication to be performed
  81. if username is None:
  82. if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
  83. raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
  84. return True
  85. login_page = self._download_webpage(
  86. self._LOGIN_URL, None,
  87. note='Downloading login page',
  88. errnote='unable to fetch login page', fatal=False)
  89. if login_page is False:
  90. return
  91. login_form = self._hidden_inputs(login_page)
  92. def req(url, f_req, note, errnote):
  93. data = login_form.copy()
  94. data.update({
  95. 'pstMsg': 1,
  96. 'checkConnection': 'youtube',
  97. 'checkedDomains': 'youtube',
  98. 'hl': 'en',
  99. 'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
  100. 'f.req': json.dumps(f_req),
  101. 'flowName': 'GlifWebSignIn',
  102. 'flowEntry': 'ServiceLogin',
  103. # TODO: reverse actual botguard identifier generation algo
  104. 'bgRequest': '["identifier",""]',
  105. })
  106. return self._download_json(
  107. url, None, note=note, errnote=errnote,
  108. transform_source=lambda s: re.sub(r'^[^[]*', '', s),
  109. fatal=False,
  110. data=urlencode_postdata(data), headers={
  111. 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
  112. 'Google-Accounts-XSRF': 1,
  113. })
  114. def warn(message):
  115. self._downloader.report_warning(message)
  116. lookup_req = [
  117. username,
  118. None, [], None, 'US', None, None, 2, False, True,
  119. [
  120. None, None,
  121. [2, 1, None, 1,
  122. 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
  123. None, [], 4],
  124. 1, [None, None, []], None, None, None, True
  125. ],
  126. username,
  127. ]
  128. lookup_results = req(
  129. self._LOOKUP_URL, lookup_req,
  130. 'Looking up account info', 'Unable to look up account info')
  131. if lookup_results is False:
  132. return False
  133. user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
  134. if not user_hash:
  135. warn('Unable to extract user hash')
  136. return False
  137. challenge_req = [
  138. user_hash,
  139. None, 1, None, [1, None, None, None, [password, None, True]],
  140. [
  141. None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
  142. 1, [None, None, []], None, None, None, True
  143. ]]
  144. challenge_results = req(
  145. self._CHALLENGE_URL, challenge_req,
  146. 'Logging in', 'Unable to log in')
  147. if challenge_results is False:
  148. return
  149. login_res = try_get(challenge_results, lambda x: x[0][5], list)
  150. if login_res:
  151. login_msg = try_get(login_res, lambda x: x[5], compat_str)
  152. warn(
  153. 'Unable to login: %s' % 'Invalid password'
  154. if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
  155. return False
  156. res = try_get(challenge_results, lambda x: x[0][-1], list)
  157. if not res:
  158. warn('Unable to extract result entry')
  159. return False
  160. login_challenge = try_get(res, lambda x: x[0][0], list)
  161. if login_challenge:
  162. challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
  163. if challenge_str == 'TWO_STEP_VERIFICATION':
  164. # SEND_SUCCESS - TFA code has been successfully sent to phone
  165. # QUOTA_EXCEEDED - reached the limit of TFA codes
  166. status = try_get(login_challenge, lambda x: x[5], compat_str)
  167. if status == 'QUOTA_EXCEEDED':
  168. warn('Exceeded the limit of TFA codes, try later')
  169. return False
  170. tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
  171. if not tl:
  172. warn('Unable to extract TL')
  173. return False
  174. tfa_code = self._get_tfa_info('2-step verification code')
  175. if not tfa_code:
  176. warn(
  177. 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
  178. '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
  179. return False
  180. tfa_code = remove_start(tfa_code, 'G-')
  181. tfa_req = [
  182. user_hash, None, 2, None,
  183. [
  184. 9, None, None, None, None, None, None, None,
  185. [None, tfa_code, True, 2]
  186. ]]
  187. tfa_results = req(
  188. self._TFA_URL.format(tl), tfa_req,
  189. 'Submitting TFA code', 'Unable to submit TFA code')
  190. if tfa_results is False:
  191. return False
  192. tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
  193. if tfa_res:
  194. tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
  195. warn(
  196. 'Unable to finish TFA: %s' % 'Invalid TFA code'
  197. if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
  198. return False
  199. check_cookie_url = try_get(
  200. tfa_results, lambda x: x[0][-1][2], compat_str)
  201. else:
  202. CHALLENGES = {
  203. 'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
  204. 'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
  205. 'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
  206. }
  207. challenge = CHALLENGES.get(
  208. challenge_str,
  209. '%s returned error %s.' % (self.IE_NAME, challenge_str))
  210. warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
  211. return False
  212. else:
  213. check_cookie_url = try_get(res, lambda x: x[2], compat_str)
  214. if not check_cookie_url:
  215. warn('Unable to extract CheckCookie URL')
  216. return False
  217. check_cookie_results = self._download_webpage(
  218. check_cookie_url, None, 'Checking cookie', fatal=False)
  219. if check_cookie_results is False:
  220. return False
  221. if 'https://myaccount.google.com/' not in check_cookie_results:
  222. warn('Unable to log in')
  223. return False
  224. return True
  225. def _initialize_consent(self):
  226. cookies = self._get_cookies('https://www.youtube.com/')
  227. if cookies.get('__Secure-3PSID'):
  228. return
  229. socs = cookies.get('SOCS')
  230. if socs and not socs.value.startswith('CAA'): # not consented
  231. return
  232. self._set_cookie('.youtube.com', 'SOCS', 'CAI', secure=True) # accept all (required for mixes)
  233. def _real_initialize(self):
  234. self._initialize_consent()
  235. if self._downloader is None:
  236. return
  237. if not self._login():
  238. return
  239. _DEFAULT_API_DATA = {
  240. 'context': {
  241. 'client': {
  242. 'clientName': 'WEB',
  243. 'clientVersion': '2.20201021.03.00',
  244. }
  245. },
  246. }
  247. _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
  248. _YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
  249. _YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
  250. def _call_api(self, ep, query, video_id, fatal=True, headers=None):
  251. data = self._DEFAULT_API_DATA.copy()
  252. data.update(query)
  253. real_headers = {'content-type': 'application/json'}
  254. if headers:
  255. real_headers.update(headers)
  256. return self._download_json(
  257. 'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id,
  258. note='Downloading API JSON', errnote='Unable to download API page',
  259. data=json.dumps(data).encode('utf8'), fatal=fatal,
  260. headers=real_headers,
  261. query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'})
  262. def _extract_yt_initial_data(self, video_id, webpage):
  263. return self._parse_json(
  264. self._search_regex(
  265. (r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE),
  266. self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'),
  267. video_id)
  268. def _extract_ytcfg(self, video_id, webpage):
  269. return self._parse_json(
  270. self._search_regex(
  271. r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
  272. default='{}'), video_id, fatal=False) or {}
  273. def _extract_video(self, renderer):
  274. video_id = renderer['videoId']
  275. title = try_get(
  276. renderer,
  277. (lambda x: x['title']['runs'][0]['text'],
  278. lambda x: x['title']['simpleText'],
  279. lambda x: x['headline']['simpleText']), compat_str)
  280. description = try_get(
  281. renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'],
  282. compat_str)
  283. duration = parse_duration(try_get(
  284. renderer, lambda x: x['lengthText']['simpleText'], compat_str))
  285. view_count_text = try_get(
  286. renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
  287. view_count = str_to_int(self._search_regex(
  288. r'^([\d,]+)', re.sub(r'\s', '', view_count_text),
  289. 'view count', default=None))
  290. uploader = try_get(
  291. renderer,
  292. (lambda x: x['ownerText']['runs'][0]['text'],
  293. lambda x: x['shortBylineText']['runs'][0]['text']), compat_str)
  294. return {
  295. '_type': 'url',
  296. 'ie_key': YoutubeIE.ie_key(),
  297. 'id': video_id,
  298. 'url': video_id,
  299. 'title': title,
  300. 'description': description,
  301. 'duration': duration,
  302. 'view_count': view_count,
  303. 'uploader': uploader,
  304. }
  305. def _search_results(self, query, params):
  306. data = {
  307. 'context': {
  308. 'client': {
  309. 'clientName': 'WEB',
  310. 'clientVersion': '2.20201021.03.00',
  311. }
  312. },
  313. 'query': query,
  314. }
  315. if params:
  316. data['params'] = params
  317. for page_num in itertools.count(1):
  318. search = self._download_json(
  319. 'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
  320. video_id='query "%s"' % query,
  321. note='Downloading page %s' % page_num,
  322. errnote='Unable to download API page', fatal=False,
  323. data=json.dumps(data).encode('utf8'),
  324. headers={'content-type': 'application/json'})
  325. if not search:
  326. break
  327. slr_contents = try_get(
  328. search,
  329. (lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
  330. lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
  331. list)
  332. if not slr_contents:
  333. break
  334. for slr_content in slr_contents:
  335. isr_contents = try_get(
  336. slr_content,
  337. lambda x: x['itemSectionRenderer']['contents'],
  338. list)
  339. if not isr_contents:
  340. continue
  341. for content in isr_contents:
  342. if not isinstance(content, dict):
  343. continue
  344. video = content.get('videoRenderer')
  345. if not isinstance(video, dict):
  346. continue
  347. video_id = video.get('videoId')
  348. if not video_id:
  349. continue
  350. yield self._extract_video(video)
  351. token = try_get(
  352. slr_contents,
  353. lambda x: x[-1]['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'],
  354. compat_str)
  355. if not token:
  356. break
  357. data['continuation'] = token
  358. @staticmethod
  359. def _owner_endpoints_path():
  360. return [
  361. Ellipsis,
  362. lambda k, _: k.endswith('SecondaryInfoRenderer'),
  363. ('owner', 'videoOwner'), 'videoOwnerRenderer', 'title',
  364. 'runs', Ellipsis]
  365. def _extract_channel_id(self, webpage, videodetails={}, metadata={}, renderers=[]):
  366. channel_id = None
  367. if any((videodetails, metadata, renderers)):
  368. channel_id = (
  369. traverse_obj(videodetails, 'channelId')
  370. or traverse_obj(metadata, 'externalChannelId', 'externalId')
  371. or traverse_obj(renderers,
  372. self._owner_endpoints_path() + [
  373. 'navigationEndpoint', 'browseEndpoint', 'browseId'],
  374. get_all=False)
  375. )
  376. return channel_id or self._html_search_meta(
  377. 'channelId', webpage, 'channel id', default=None)
  378. def _extract_author_var(self, webpage, var_name,
  379. videodetails={}, metadata={}, renderers=[]):
  380. result = None
  381. paths = {
  382. # (HTML, videodetails, metadata, renderers)
  383. 'name': ('content', 'author', (('ownerChannelName', None), 'title'), ['text']),
  384. 'url': ('href', 'ownerProfileUrl', 'vanityChannelUrl',
  385. ['navigationEndpoint', 'browseEndpoint', 'canonicalBaseUrl'])
  386. }
  387. if any((videodetails, metadata, renderers)):
  388. result = (
  389. traverse_obj(videodetails, paths[var_name][1], get_all=False)
  390. or traverse_obj(metadata, paths[var_name][2], get_all=False)
  391. or traverse_obj(renderers,
  392. self._owner_endpoints_path() + paths[var_name][3],
  393. get_all=False)
  394. )
  395. return result or traverse_obj(
  396. extract_attributes(self._search_regex(
  397. r'''(?s)(<link\b[^>]+\bitemprop\s*=\s*("|')%s\2[^>]*>)'''
  398. % re.escape(var_name),
  399. get_element_by_attribute('itemprop', 'author', webpage or '') or '',
  400. 'author link', default='')),
  401. paths[var_name][0])
  402. @staticmethod
  403. def _yt_urljoin(url_or_path):
  404. return urljoin('https://www.youtube.com', url_or_path)
  405. def _extract_uploader_id(self, uploader_url):
  406. return self._search_regex(
  407. r'/(?:(?:channel|user)/|(?=@))([^/?&#]+)', uploader_url or '',
  408. 'uploader id', default=None)
  409. class YoutubeIE(YoutubeBaseInfoExtractor):
  410. IE_DESC = 'YouTube.com'
  411. _INVIDIOUS_SITES = (
  412. # invidious-redirect websites
  413. r'(?:www\.)?redirect\.invidious\.io',
  414. r'(?:(?:www|dev)\.)?invidio\.us',
  415. # Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md
  416. r'(?:(?:www|no)\.)?invidiou\.sh',
  417. r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
  418. r'(?:www\.)?invidious\.kabi\.tk',
  419. r'(?:www\.)?invidious\.13ad\.de',
  420. r'(?:www\.)?invidious\.mastodon\.host',
  421. r'(?:www\.)?invidious\.zapashcanon\.fr',
  422. r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks',
  423. r'(?:www\.)?invidious\.tinfoil-hat\.net',
  424. r'(?:www\.)?invidious\.himiko\.cloud',
  425. r'(?:www\.)?invidious\.reallyancient\.tech',
  426. r'(?:www\.)?invidious\.tube',
  427. r'(?:www\.)?invidiou\.site',
  428. r'(?:www\.)?invidious\.site',
  429. r'(?:www\.)?invidious\.xyz',
  430. r'(?:www\.)?invidious\.nixnet\.xyz',
  431. r'(?:www\.)?invidious\.048596\.xyz',
  432. r'(?:www\.)?invidious\.drycat\.fr',
  433. r'(?:www\.)?inv\.skyn3t\.in',
  434. r'(?:www\.)?tube\.poal\.co',
  435. r'(?:www\.)?tube\.connect\.cafe',
  436. r'(?:www\.)?vid\.wxzm\.sx',
  437. r'(?:www\.)?vid\.mint\.lgbt',
  438. r'(?:www\.)?vid\.puffyan\.us',
  439. r'(?:www\.)?yewtu\.be',
  440. r'(?:www\.)?yt\.elukerio\.org',
  441. r'(?:www\.)?yt\.lelux\.fi',
  442. r'(?:www\.)?invidious\.ggc-project\.de',
  443. r'(?:www\.)?yt\.maisputain\.ovh',
  444. r'(?:www\.)?ytprivate\.com',
  445. r'(?:www\.)?invidious\.13ad\.de',
  446. r'(?:www\.)?invidious\.toot\.koeln',
  447. r'(?:www\.)?invidious\.fdn\.fr',
  448. r'(?:www\.)?watch\.nettohikari\.com',
  449. r'(?:www\.)?invidious\.namazso\.eu',
  450. r'(?:www\.)?invidious\.silkky\.cloud',
  451. r'(?:www\.)?invidious\.exonip\.de',
  452. r'(?:www\.)?invidious\.riverside\.rocks',
  453. r'(?:www\.)?invidious\.blamefran\.net',
  454. r'(?:www\.)?invidious\.moomoo\.de',
  455. r'(?:www\.)?ytb\.trom\.tf',
  456. r'(?:www\.)?yt\.cyberhost\.uk',
  457. r'(?:www\.)?kgg2m7yk5aybusll\.onion',
  458. r'(?:www\.)?qklhadlycap4cnod\.onion',
  459. r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion',
  460. r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion',
  461. r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion',
  462. r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion',
  463. r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p',
  464. r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion',
  465. r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
  466. r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
  467. r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion',
  468. r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
  469. )
  470. _VALID_URL = r"""(?x)^
  471. (
  472. (?:https?://|//) # http(s):// or protocol-independent URL
  473. (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com|
  474. (?:www\.)?deturl\.com/www\.youtube\.com|
  475. (?:www\.)?pwnyoutube\.com|
  476. (?:www\.)?hooktube\.com|
  477. (?:www\.)?yourepeat\.com|
  478. tube\.majestyc\.net|
  479. %(invidious)s|
  480. youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains
  481. (?:.*?\#/)? # handle anchor (#/) redirect urls
  482. (?: # the various things that can precede the ID:
  483. (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
  484. |shorts/
  485. |(?: # or the v= param in all its forms
  486. (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
  487. (?:\?|\#!?) # the params delimiter ? or # or #!
  488. (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
  489. v=
  490. )
  491. ))
  492. |(?:
  493. youtu\.be| # just youtu.be/xxxx
  494. vid\.plus| # or vid.plus/xxxx
  495. zwearz\.com/watch| # or zwearz.com/watch/xxxx
  496. %(invidious)s
  497. )/
  498. |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
  499. )
  500. )? # all until now is optional -> you can pass the naked ID
  501. (?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
  502. (?(1).+)? # if we found the ID, everything can follow
  503. $""" % {
  504. 'invidious': '|'.join(_INVIDIOUS_SITES),
  505. }
  506. _PLAYER_INFO_RE = (
  507. r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
  508. r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
  509. r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
  510. )
  511. _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
  512. _GEO_BYPASS = False
  513. IE_NAME = 'youtube'
  514. _TESTS = [
  515. {
  516. 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
  517. 'info_dict': {
  518. 'id': 'BaW_jenozKc',
  519. 'ext': 'mp4',
  520. 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
  521. 'uploader': 'Philipp Hagemeister',
  522. 'uploader_id': '@PhilippHagemeister',
  523. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@PhilippHagemeister',
  524. 'channel': 'Philipp Hagemeister',
  525. 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
  526. 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
  527. 'upload_date': '20121002',
  528. 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
  529. 'categories': ['Science & Technology'],
  530. 'tags': ['youtube-dl'],
  531. 'duration': 10,
  532. 'view_count': int,
  533. 'like_count': int,
  534. 'thumbnail': 'https://i.ytimg.com/vi/BaW_jenozKc/maxresdefault.jpg',
  535. 'start_time': 1,
  536. 'end_time': 9,
  537. },
  538. },
  539. {
  540. 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
  541. 'note': 'Embed-only video (#1746)',
  542. 'info_dict': {
  543. 'id': 'yZIXLfi8CZQ',
  544. 'ext': 'mp4',
  545. 'upload_date': '20120608',
  546. 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
  547. 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
  548. 'uploader': 'SET India',
  549. 'uploader_id': 'setindia',
  550. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
  551. 'age_limit': 18,
  552. },
  553. 'skip': 'Private video',
  554. },
  555. {
  556. 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
  557. 'note': 'Use the first video ID in the URL',
  558. 'info_dict': {
  559. 'id': 'BaW_jenozKc',
  560. 'ext': 'mp4',
  561. 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
  562. 'uploader': 'Philipp Hagemeister',
  563. 'uploader_id': '@PhilippHagemeister',
  564. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@PhilippHagemeister',
  565. 'upload_date': '20121002',
  566. 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
  567. 'categories': ['Science & Technology'],
  568. 'tags': ['youtube-dl'],
  569. 'duration': 10,
  570. 'view_count': int,
  571. 'like_count': int,
  572. },
  573. 'params': {
  574. 'skip_download': True,
  575. },
  576. },
  577. {
  578. 'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
  579. 'note': '256k DASH audio (format 141) via DASH manifest',
  580. 'info_dict': {
  581. 'id': 'a9LDPn-MO4I',
  582. 'ext': 'm4a',
  583. 'upload_date': '20121002',
  584. 'uploader_id': '8KVIDEO',
  585. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
  586. 'description': '',
  587. 'uploader': '8KVIDEO',
  588. 'title': 'UHDTV TEST 8K VIDEO.mp4'
  589. },
  590. 'params': {
  591. 'youtube_include_dash_manifest': True,
  592. 'format': '141',
  593. },
  594. 'skip': 'format 141 not served any more',
  595. },
  596. # DASH manifest with encrypted signature
  597. {
  598. 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
  599. 'info_dict': {
  600. 'id': 'IB3lcPjvWLA',
  601. 'ext': 'm4a',
  602. 'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
  603. 'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
  604. 'duration': 244,
  605. 'uploader': 'AfrojackVEVO',
  606. 'uploader_id': '@AfrojackVEVO',
  607. 'upload_date': '20131011',
  608. 'abr': 129.495,
  609. },
  610. 'params': {
  611. 'youtube_include_dash_manifest': True,
  612. 'format': '141/bestaudio[ext=m4a]',
  613. },
  614. },
  615. # Controversy video
  616. {
  617. 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
  618. 'info_dict': {
  619. 'id': 'T4XJQO3qol8',
  620. 'ext': 'mp4',
  621. 'duration': 219,
  622. 'upload_date': '20100909',
  623. 'uploader': 'Amazing Atheist',
  624. 'uploader_id': '@theamazingatheist',
  625. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@theamazingatheist',
  626. 'title': 'Burning Everyone\'s Koran',
  627. 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms \r\n\r\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
  628. }
  629. },
  630. # Age-gated videos
  631. {
  632. 'note': 'Age-gated video (No vevo, embed allowed)',
  633. 'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
  634. 'info_dict': {
  635. 'id': 'HtVdAasjOgU',
  636. 'ext': 'mp4',
  637. 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
  638. 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
  639. 'duration': 142,
  640. 'uploader': 'The Witcher',
  641. 'uploader_id': '@thewitcher',
  642. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@thewitcher',
  643. 'upload_date': '20140605',
  644. 'thumbnail': 'https://i.ytimg.com/vi/HtVdAasjOgU/maxresdefault.jpg',
  645. 'age_limit': 18,
  646. 'categories': ['Gaming'],
  647. 'tags': 'count:17',
  648. 'channel': 'The Witcher',
  649. 'channel_url': 'https://www.youtube.com/channel/UCzybXLxv08IApdjdN0mJhEg',
  650. 'channel_id': 'UCzybXLxv08IApdjdN0mJhEg',
  651. 'view_count': int,
  652. 'like_count': int,
  653. },
  654. },
  655. {
  656. 'note': 'Age-gated video with embed allowed in public site',
  657. 'url': 'https://youtube.com/watch?v=HsUATh_Nc2U',
  658. 'info_dict': {
  659. 'id': 'HsUATh_Nc2U',
  660. 'ext': 'mp4',
  661. 'title': 'Godzilla 2 (Official Video)',
  662. 'description': 'md5:bf77e03fcae5529475e500129b05668a',
  663. 'duration': 177,
  664. 'uploader': 'FlyingKitty',
  665. 'uploader_id': '@FlyingKitty900',
  666. 'upload_date': '20200408',
  667. 'thumbnail': 'https://i.ytimg.com/vi/HsUATh_Nc2U/maxresdefault.jpg',
  668. 'age_limit': 18,
  669. 'categories': ['Entertainment'],
  670. 'tags': ['Flyingkitty', 'godzilla 2'],
  671. 'channel': 'FlyingKitty',
  672. 'channel_url': 'https://www.youtube.com/channel/UCYQT13AtrJC0gsM1far_zJg',
  673. 'channel_id': 'UCYQT13AtrJC0gsM1far_zJg',
  674. 'view_count': int,
  675. 'like_count': int,
  676. },
  677. },
  678. {
  679. 'note': 'Age-gated video embeddable only with clientScreen=EMBED',
  680. 'url': 'https://youtube.com/watch?v=Tq92D6wQ1mg',
  681. 'info_dict': {
  682. 'id': 'Tq92D6wQ1mg',
  683. 'ext': 'mp4',
  684. 'title': '[MMD] Adios - EVERGLOW [+Motion DL]',
  685. 'description': 'md5:17eccca93a786d51bc67646756894066',
  686. 'duration': 106,
  687. 'uploader': 'Projekt Melody',
  688. 'uploader_id': '@ProjektMelody',
  689. 'upload_date': '20191227',
  690. 'age_limit': 18,
  691. 'thumbnail': 'https://i.ytimg.com/vi/Tq92D6wQ1mg/sddefault.jpg',
  692. 'tags': ['mmd', 'dance', 'mikumikudance', 'kpop', 'vtuber'],
  693. 'categories': ['Entertainment'],
  694. 'channel': 'Projekt Melody',
  695. 'channel_url': 'https://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ',
  696. 'channel_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
  697. 'view_count': int,
  698. 'like_count': int,
  699. },
  700. },
  701. {
  702. 'note': 'Non-Age-gated non-embeddable video',
  703. 'url': 'https://youtube.com/watch?v=MeJVWBSsPAY',
  704. 'info_dict': {
  705. 'id': 'MeJVWBSsPAY',
  706. 'ext': 'mp4',
  707. 'title': 'OOMPH! - Such Mich Find Mich (Lyrics)',
  708. 'description': 'Fan Video. Music & Lyrics by OOMPH!.',
  709. 'duration': 210,
  710. 'upload_date': '20130730',
  711. 'uploader': 'Herr Lurik',
  712. 'uploader_id': '@HerrLurik',
  713. 'uploader_url': 'http://www.youtube.com/@HerrLurik',
  714. 'age_limit': 0,
  715. 'thumbnail': 'https://i.ytimg.com/vi/MeJVWBSsPAY/hqdefault.jpg',
  716. 'tags': ['oomph', 'such mich find mich', 'lyrics', 'german industrial', 'musica industrial'],
  717. 'categories': ['Music'],
  718. 'channel': 'Herr Lurik',
  719. 'channel_url': 'https://www.youtube.com/channel/UCdR3RSDPqub28LjZx0v9-aA',
  720. 'channel_id': 'UCdR3RSDPqub28LjZx0v9-aA',
  721. 'artist': 'OOMPH!',
  722. 'view_count': int,
  723. 'like_count': int,
  724. },
  725. },
  726. {
  727. 'note': 'Non-bypassable age-gated video',
  728. 'url': 'https://youtube.com/watch?v=Cr381pDsSsA',
  729. 'only_matching': True,
  730. },
  731. {
  732. 'note': 'Age-gated video only available with authentication (not via embed workaround)',
  733. 'url': 'XgnwCQzjau8',
  734. 'only_matching': True,
  735. 'skip': '''This video has been removed for violating YouTube's Community Guidelines''',
  736. },
  737. # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
  738. # YouTube Red ad is not captured for creator
  739. {
  740. 'url': '__2ABJjxzNo',
  741. 'info_dict': {
  742. 'id': '__2ABJjxzNo',
  743. 'ext': 'mp4',
  744. 'duration': 266,
  745. 'upload_date': '20100430',
  746. 'uploader_id': '@deadmau5',
  747. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@deadmau5',
  748. 'creator': 'deadmau5',
  749. 'description': 'md5:6cbcd3a92ce1bc676fc4d6ab4ace2336',
  750. 'uploader': 'deadmau5',
  751. 'title': 'Deadmau5 - Some Chords (HD)',
  752. 'alt_title': 'Some Chords',
  753. },
  754. 'expected_warnings': [
  755. 'DASH manifest missing',
  756. ]
  757. },
  758. # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
  759. {
  760. 'url': 'lqQg6PlCWgI',
  761. 'info_dict': {
  762. 'id': 'lqQg6PlCWgI',
  763. 'ext': 'mp4',
  764. 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
  765. 'description': r're:(?s)(?:.+\s)?HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games\s*',
  766. 'duration': 6085,
  767. 'upload_date': '20150827',
  768. 'uploader_id': '@Olympics',
  769. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@Olympics',
  770. 'uploader': r're:Olympics?',
  771. 'age_limit': 0,
  772. 'thumbnail': 'https://i.ytimg.com/vi/lqQg6PlCWgI/maxresdefault.jpg',
  773. 'categories': ['Sports'],
  774. 'tags': ['Hockey', '2012-07-31', '31 July 2012', 'Riverbank Arena', 'Session', 'Olympics', 'Olympic Games', 'London 2012', '2012 Summer Olympics', 'Summer Games'],
  775. 'channel': 'Olympics',
  776. 'channel_url': 'https://www.youtube.com/channel/UCTl3QQTvqHFjurroKxexy2Q',
  777. 'channel_id': 'UCTl3QQTvqHFjurroKxexy2Q',
  778. 'view_count': int,
  779. 'like_count': int,
  780. },
  781. },
  782. # Non-square pixels
  783. {
  784. 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
  785. 'info_dict': {
  786. 'id': '_b-2C3KPAM0',
  787. 'ext': 'mp4',
  788. 'stretched_ratio': 16 / 9.,
  789. 'duration': 85,
  790. 'upload_date': '20110310',
  791. 'uploader_id': '@AllenMeow',
  792. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@AllenMeow',
  793. 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
  794. 'uploader': '孫ᄋᄅ',
  795. 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
  796. },
  797. },
  798. # url_encoded_fmt_stream_map is empty string
  799. {
  800. 'url': 'qEJwOuvDf7I',
  801. 'info_dict': {
  802. 'id': 'qEJwOuvDf7I',
  803. 'ext': 'webm',
  804. 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
  805. 'description': '',
  806. 'upload_date': '20150404',
  807. 'uploader_id': 'spbelect',
  808. 'uploader': 'Наблюдатели Петербурга',
  809. },
  810. 'params': {
  811. 'skip_download': 'requires avconv',
  812. },
  813. 'skip': 'This live event has ended.',
  814. },
  815. # Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
  816. {
  817. 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
  818. 'info_dict': {
  819. 'id': 'FIl7x6_3R5Y',
  820. 'ext': 'webm',
  821. 'title': 'md5:7b81415841e02ecd4313668cde88737a',
  822. 'description': 'md5:116377fd2963b81ec4ce64b542173306',
  823. 'duration': 220,
  824. 'upload_date': '20150625',
  825. 'uploader_id': 'dorappi2000',
  826. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
  827. 'uploader': 'dorappi2000',
  828. 'formats': 'mincount:31',
  829. },
  830. 'skip': 'not actual any more',
  831. },
  832. # DASH manifest with segment_list
  833. {
  834. 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
  835. 'md5': '8ce563a1d667b599d21064e982ab9e31',
  836. 'info_dict': {
  837. 'id': 'CsmdDsKjzN8',
  838. 'ext': 'mp4',
  839. 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
  840. 'uploader': 'Airtek',
  841. 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
  842. 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
  843. 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
  844. },
  845. 'params': {
  846. 'youtube_include_dash_manifest': True,
  847. 'format': '135', # bestvideo
  848. },
  849. 'skip': 'This live event has ended.',
  850. },
  851. {
  852. # Multifeed videos (multiple cameras), URL is for Main Camera
  853. 'url': 'https://www.youtube.com/watch?v=jvGDaLqkpTg',
  854. 'info_dict': {
  855. 'id': 'jvGDaLqkpTg',
  856. 'title': 'Tom Clancy Free Weekend Rainbow Whatever',
  857. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  858. },
  859. 'playlist': [{
  860. 'info_dict': {
  861. 'id': 'jvGDaLqkpTg',
  862. 'ext': 'mp4',
  863. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Main Camera)',
  864. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  865. 'duration': 10643,
  866. 'upload_date': '20161111',
  867. 'uploader': 'Team PGP',
  868. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  869. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  870. },
  871. }, {
  872. 'info_dict': {
  873. 'id': '3AKt1R1aDnw',
  874. 'ext': 'mp4',
  875. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 2)',
  876. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  877. 'duration': 10991,
  878. 'upload_date': '20161111',
  879. 'uploader': 'Team PGP',
  880. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  881. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  882. },
  883. }, {
  884. 'info_dict': {
  885. 'id': 'RtAMM00gpVc',
  886. 'ext': 'mp4',
  887. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 3)',
  888. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  889. 'duration': 10995,
  890. 'upload_date': '20161111',
  891. 'uploader': 'Team PGP',
  892. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  893. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  894. },
  895. }, {
  896. 'info_dict': {
  897. 'id': '6N2fdlP3C5U',
  898. 'ext': 'mp4',
  899. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 4)',
  900. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  901. 'duration': 10990,
  902. 'upload_date': '20161111',
  903. 'uploader': 'Team PGP',
  904. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  905. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  906. },
  907. }],
  908. 'params': {
  909. 'skip_download': True,
  910. },
  911. 'skip': 'Not multifeed any more',
  912. },
  913. {
  914. # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
  915. 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
  916. 'info_dict': {
  917. 'id': 'gVfLd0zydlo',
  918. 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
  919. },
  920. 'playlist_count': 2,
  921. 'skip': 'Not multifeed any more',
  922. },
  923. {
  924. 'url': 'https://vid.plus/FlRa-iH7PGw',
  925. 'only_matching': True,
  926. },
  927. {
  928. 'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
  929. 'only_matching': True,
  930. },
  931. {
  932. # Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
  933. # Also tests cut-off URL expansion in video description (see
  934. # https://github.com/ytdl-org/youtube-dl/issues/1892,
  935. # https://github.com/ytdl-org/youtube-dl/issues/8164)
  936. 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
  937. 'info_dict': {
  938. 'id': 'lsguqyKfVQg',
  939. 'ext': 'mp4',
  940. 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
  941. 'alt_title': 'Dark Walk',
  942. 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
  943. 'duration': 133,
  944. 'upload_date': '20151119',
  945. 'uploader_id': '@IronSoulElf',
  946. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@IronSoulElf',
  947. 'uploader': 'IronSoulElf',
  948. 'creator': r're:Todd Haberman[;,]\s+Daniel Law Heath and Aaron Kaplan',
  949. 'track': 'Dark Walk',
  950. 'artist': r're:Todd Haberman[;,]\s+Daniel Law Heath and Aaron Kaplan',
  951. 'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
  952. },
  953. 'params': {
  954. 'skip_download': True,
  955. },
  956. },
  957. {
  958. # Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
  959. 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
  960. 'only_matching': True,
  961. },
  962. {
  963. # Video with yt:stretch=17:0
  964. 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
  965. 'info_dict': {
  966. 'id': 'Q39EVAstoRM',
  967. 'ext': 'mp4',
  968. 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
  969. 'description': 'md5:ee18a25c350637c8faff806845bddee9',
  970. 'upload_date': '20151107',
  971. 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
  972. 'uploader': 'CH GAMER DROID',
  973. },
  974. 'params': {
  975. 'skip_download': True,
  976. },
  977. 'skip': 'This video does not exist.',
  978. },
  979. {
  980. # Video with incomplete 'yt:stretch=16:'
  981. 'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI',
  982. 'only_matching': True,
  983. },
  984. {
  985. # Video licensed under Creative Commons
  986. 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
  987. 'info_dict': {
  988. 'id': 'M4gD1WSo5mA',
  989. 'ext': 'mp4',
  990. 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
  991. 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
  992. 'duration': 721,
  993. 'upload_date': '20150127',
  994. 'uploader_id': '@BKCHarvard',
  995. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@BKCHarvard',
  996. 'uploader': 'The Berkman Klein Center for Internet & Society',
  997. 'license': 'Creative Commons Attribution license (reuse allowed)',
  998. },
  999. 'params': {
  1000. 'skip_download': True,
  1001. },
  1002. },
  1003. {
  1004. # Channel-like uploader_url
  1005. 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
  1006. 'info_dict': {
  1007. 'id': 'eQcmzGIKrzg',
  1008. 'ext': 'mp4',
  1009. 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
  1010. 'description': 'md5:13a2503d7b5904ef4b223aa101628f39',
  1011. 'duration': 4060,
  1012. 'upload_date': '20151119',
  1013. 'uploader': 'Bernie Sanders',
  1014. 'uploader_id': '@BernieSanders',
  1015. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@BernieSanders',
  1016. 'license': 'Creative Commons Attribution license (reuse allowed)',
  1017. },
  1018. 'params': {
  1019. 'skip_download': True,
  1020. },
  1021. },
  1022. {
  1023. 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
  1024. 'only_matching': True,
  1025. },
  1026. {
  1027. # YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
  1028. 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
  1029. 'only_matching': True,
  1030. },
  1031. {
  1032. # Rental video preview
  1033. 'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
  1034. 'info_dict': {
  1035. 'id': 'uGpuVWrhIzE',
  1036. 'ext': 'mp4',
  1037. 'title': 'Piku - Trailer',
  1038. 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
  1039. 'upload_date': '20150811',
  1040. 'uploader': 'FlixMatrix',
  1041. 'uploader_id': 'FlixMatrixKaravan',
  1042. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
  1043. 'license': 'Standard YouTube License',
  1044. },
  1045. 'params': {
  1046. 'skip_download': True,
  1047. },
  1048. 'skip': 'This video is not available.',
  1049. },
  1050. {
  1051. # YouTube Red video with episode data
  1052. 'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
  1053. 'info_dict': {
  1054. 'id': 'iqKdEhx-dD4',
  1055. 'ext': 'mp4',
  1056. 'title': 'Isolation - Mind Field (Ep 1)',
  1057. 'description': 'md5:f540112edec5d09fc8cc752d3d4ba3cd',
  1058. 'duration': 2085,
  1059. 'upload_date': '20170118',
  1060. 'uploader': 'Vsauce',
  1061. 'uploader_id': '@Vsauce',
  1062. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@Vsauce',
  1063. 'series': 'Mind Field',
  1064. 'season_number': 1,
  1065. 'episode_number': 1,
  1066. },
  1067. 'params': {
  1068. 'skip_download': True,
  1069. },
  1070. 'expected_warnings': [
  1071. 'Skipping DASH manifest',
  1072. ],
  1073. },
  1074. {
  1075. # The following content has been identified by the YouTube community
  1076. # as inappropriate or offensive to some audiences.
  1077. 'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
  1078. 'info_dict': {
  1079. 'id': '6SJNVb0GnPI',
  1080. 'ext': 'mp4',
  1081. 'title': 'Race Differences in Intelligence',
  1082. 'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
  1083. 'duration': 965,
  1084. 'upload_date': '20140124',
  1085. 'uploader': 'New Century Foundation',
  1086. 'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
  1087. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
  1088. },
  1089. 'params': {
  1090. 'skip_download': True,
  1091. },
  1092. 'skip': 'This video has been removed for violating YouTube\'s policy on hate speech.',
  1093. },
  1094. {
  1095. # itag 212
  1096. 'url': '1t24XAntNCY',
  1097. 'only_matching': True,
  1098. },
  1099. {
  1100. # geo restricted to JP
  1101. 'url': 'sJL6WA-aGkQ',
  1102. 'only_matching': True,
  1103. },
  1104. {
  1105. 'url': 'https://invidio.us/watch?v=BaW_jenozKc',
  1106. 'only_matching': True,
  1107. },
  1108. {
  1109. 'url': 'https://redirect.invidious.io/watch?v=BaW_jenozKc',
  1110. 'only_matching': True,
  1111. },
  1112. {
  1113. # from https://nitter.pussthecat.org/YouTube/status/1360363141947944964#m
  1114. 'url': 'https://redirect.invidious.io/Yh0AhrY9GjA',
  1115. 'only_matching': True,
  1116. },
  1117. {
  1118. # DRM protected
  1119. 'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
  1120. 'only_matching': True,
  1121. },
  1122. {
  1123. # Video with unsupported adaptive stream type formats
  1124. 'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
  1125. 'info_dict': {
  1126. 'id': 'Z4Vy8R84T1U',
  1127. 'ext': 'mp4',
  1128. 'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
  1129. 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
  1130. 'duration': 433,
  1131. 'upload_date': '20130923',
  1132. 'uploader': 'Amelia Putri Harwita',
  1133. 'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
  1134. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
  1135. 'formats': 'maxcount:10',
  1136. },
  1137. 'params': {
  1138. 'skip_download': True,
  1139. 'youtube_include_dash_manifest': False,
  1140. },
  1141. 'skip': 'not actual any more',
  1142. },
  1143. {
  1144. # Youtube Music Auto-generated description
  1145. 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
  1146. 'info_dict': {
  1147. 'id': 'MgNrAu2pzNs',
  1148. 'ext': 'mp4',
  1149. 'title': 'Voyeur Girl',
  1150. 'description': 'md5:7ae382a65843d6df2685993e90a8628f',
  1151. 'upload_date': '20190312',
  1152. 'uploader': 'Stephen - Topic',
  1153. 'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
  1154. 'artist': 'Stephen',
  1155. 'track': 'Voyeur Girl',
  1156. 'album': 'it\'s too much love to know my dear',
  1157. 'release_date': '20190313',
  1158. 'release_year': 2019,
  1159. },
  1160. 'params': {
  1161. 'skip_download': True,
  1162. },
  1163. },
  1164. {
  1165. 'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
  1166. 'only_matching': True,
  1167. },
  1168. {
  1169. # invalid -> valid video id redirection
  1170. 'url': 'DJztXj2GPfl',
  1171. 'info_dict': {
  1172. 'id': 'DJztXj2GPfk',
  1173. 'ext': 'mp4',
  1174. 'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
  1175. 'description': 'md5:bf577a41da97918e94fa9798d9228825',
  1176. 'upload_date': '20090125',
  1177. 'uploader': 'Prochorowka',
  1178. 'uploader_id': 'Prochorowka',
  1179. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
  1180. 'artist': 'Panjabi MC',
  1181. 'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
  1182. 'album': 'Beware of the Boys (Mundian To Bach Ke)',
  1183. },
  1184. 'params': {
  1185. 'skip_download': True,
  1186. },
  1187. 'skip': 'Video unavailable',
  1188. },
  1189. {
  1190. # empty description results in an empty string
  1191. 'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
  1192. 'info_dict': {
  1193. 'id': 'x41yOUIvK2k',
  1194. 'ext': 'mp4',
  1195. 'title': 'IMG 3456',
  1196. 'description': '',
  1197. 'upload_date': '20170613',
  1198. 'uploader': "l'Or Vert asbl",
  1199. 'uploader_id': '@ElevageOrVert',
  1200. },
  1201. 'params': {
  1202. 'skip_download': True,
  1203. },
  1204. },
  1205. {
  1206. # with '};' inside yt initial data (see [1])
  1207. # see [2] for an example with '};' inside ytInitialPlayerResponse
  1208. # 1. https://github.com/ytdl-org/youtube-dl/issues/27093
  1209. # 2. https://github.com/ytdl-org/youtube-dl/issues/27216
  1210. 'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
  1211. 'info_dict': {
  1212. 'id': 'CHqg6qOn4no',
  1213. 'ext': 'mp4',
  1214. 'title': 'Part 77 Sort a list of simple types in c#',
  1215. 'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
  1216. 'upload_date': '20130831',
  1217. 'uploader': 'kudvenkat',
  1218. 'uploader_id': '@Csharp-video-tutorialsBlogspot',
  1219. },
  1220. 'params': {
  1221. 'skip_download': True,
  1222. },
  1223. },
  1224. {
  1225. # another example of '};' in ytInitialData
  1226. 'url': 'https://www.youtube.com/watch?v=gVfgbahppCY',
  1227. 'only_matching': True,
  1228. },
  1229. {
  1230. 'url': 'https://www.youtube.com/watch_popup?v=63RmMXCd_bQ',
  1231. 'only_matching': True,
  1232. },
  1233. {
  1234. # https://github.com/ytdl-org/youtube-dl/pull/28094
  1235. 'url': 'OtqTfy26tG0',
  1236. 'info_dict': {
  1237. 'id': 'OtqTfy26tG0',
  1238. 'ext': 'mp4',
  1239. 'title': 'Burn Out',
  1240. 'description': 'md5:8d07b84dcbcbfb34bc12a56d968b6131',
  1241. 'upload_date': '20141120',
  1242. 'uploader': 'The Cinematic Orchestra - Topic',
  1243. 'uploader_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
  1244. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
  1245. 'artist': 'The Cinematic Orchestra',
  1246. 'track': 'Burn Out',
  1247. 'album': 'Every Day',
  1248. 'release_data': None,
  1249. 'release_year': None,
  1250. },
  1251. 'params': {
  1252. 'skip_download': True,
  1253. },
  1254. },
  1255. {
  1256. # controversial video, only works with bpctr when authenticated with cookies
  1257. 'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
  1258. 'only_matching': True,
  1259. },
  1260. {
  1261. # restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
  1262. 'url': 'cBvYw8_A0vQ',
  1263. 'info_dict': {
  1264. 'id': 'cBvYw8_A0vQ',
  1265. 'ext': 'mp4',
  1266. 'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き',
  1267. 'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
  1268. 'upload_date': '20201120',
  1269. 'uploader': 'Walk around Japan',
  1270. 'uploader_id': '@walkaroundjapan7124',
  1271. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@walkaroundjapan7124',
  1272. },
  1273. 'params': {
  1274. 'skip_download': True,
  1275. },
  1276. },
  1277. {
  1278. # YT 'Shorts'
  1279. 'url': 'https://youtube.com/shorts/4L2J27mJ3Dc',
  1280. 'info_dict': {
  1281. 'id': '4L2J27mJ3Dc',
  1282. 'ext': 'mp4',
  1283. 'title': 'Midwest Squid Game #Shorts',
  1284. 'description': 'md5:976512b8a29269b93bbd8a61edc45a6d',
  1285. 'upload_date': '20211025',
  1286. 'uploader': 'Charlie Berens',
  1287. 'uploader_id': '@CharlieBerens',
  1288. },
  1289. 'params': {
  1290. 'skip_download': True,
  1291. },
  1292. },
  1293. ]
  1294. _formats = {
  1295. '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
  1296. '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
  1297. '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
  1298. '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
  1299. '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
  1300. '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  1301. '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1302. '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1303. # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
  1304. '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
  1305. '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  1306. '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  1307. '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
  1308. '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
  1309. '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
  1310. '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
  1311. '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1312. '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1313. # 3D videos
  1314. '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
  1315. '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
  1316. '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
  1317. '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
  1318. '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
  1319. '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
  1320. '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
  1321. # Apple HTTP Live Streaming
  1322. '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  1323. '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  1324. '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
  1325. '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
  1326. '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
  1327. '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
  1328. '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  1329. '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
  1330. # DASH mp4 video
  1331. '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1332. '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1333. '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1334. '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1335. '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1336. '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
  1337. '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1338. '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1339. '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1340. '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
  1341. '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
  1342. '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1343. # Dash mp4 audio
  1344. '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
  1345. '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
  1346. '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
  1347. '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
  1348. '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
  1349. '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
  1350. '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
  1351. # Dash webm
  1352. '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1353. '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1354. '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1355. '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1356. '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1357. '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1358. '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
  1359. '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1360. '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1361. '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1362. '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1363. '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1364. '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1365. '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1366. '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1367. # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
  1368. '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1369. '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1370. '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1371. '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1372. '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1373. '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1374. # Dash webm audio
  1375. '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
  1376. '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
  1377. # Dash webm audio with opus inside
  1378. '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
  1379. '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
  1380. '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
  1381. # RTMP (unnamed)
  1382. '_rtmp': {'protocol': 'rtmp'},
  1383. # av01 video only formats sometimes served with "unknown" codecs
  1384. '394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1385. '395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1386. '396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1387. '397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1388. }
  1389. @classmethod
  1390. def suitable(cls, url):
  1391. if parse_qs(url).get('list', [None])[0]:
  1392. return False
  1393. return super(YoutubeIE, cls).suitable(url)
  1394. def __init__(self, *args, **kwargs):
  1395. super(YoutubeIE, self).__init__(*args, **kwargs)
  1396. self._code_cache = {}
  1397. self._player_cache = {}
  1398. # *ytcfgs, webpage=None
  1399. def _extract_player_url(self, *ytcfgs, **kw_webpage):
  1400. if ytcfgs and not isinstance(ytcfgs[0], dict):
  1401. webpage = kw_webpage.get('webpage') or ytcfgs[0]
  1402. if webpage:
  1403. player_url = self._search_regex(
  1404. r'"(?:PLAYER_JS_URL|jsUrl)"\s*:\s*"([^"]+)"',
  1405. webpage or '', 'player URL', fatal=False)
  1406. if player_url:
  1407. ytcfgs = ytcfgs + ({'PLAYER_JS_URL': player_url},)
  1408. return traverse_obj(
  1409. ytcfgs, (Ellipsis, 'PLAYER_JS_URL'), (Ellipsis, 'WEB_PLAYER_CONTEXT_CONFIGS', Ellipsis, 'jsUrl'),
  1410. get_all=False, expected_type=lambda u: urljoin('https://www.youtube.com', u))
  1411. def _download_player_url(self, video_id, fatal=False):
  1412. res = self._download_webpage(
  1413. 'https://www.youtube.com/iframe_api',
  1414. note='Downloading iframe API JS', video_id=video_id, fatal=fatal)
  1415. player_version = self._search_regex(
  1416. r'player\\?/([0-9a-fA-F]{8})\\?/', res or '', 'player version', fatal=fatal,
  1417. default=NO_DEFAULT if res else None)
  1418. if player_version:
  1419. return 'https://www.youtube.com/s/player/{0}/player_ias.vflset/en_US/base.js'.format(player_version)
  1420. def _signature_cache_id(self, example_sig):
  1421. """ Return a string representation of a signature """
  1422. return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
  1423. @classmethod
  1424. def _extract_player_info(cls, player_url):
  1425. for player_re in cls._PLAYER_INFO_RE:
  1426. id_m = re.search(player_re, player_url)
  1427. if id_m:
  1428. break
  1429. else:
  1430. raise ExtractorError('Cannot identify player %r' % player_url)
  1431. return id_m.group('id')
  1432. def _load_player(self, video_id, player_url, fatal=True, player_id=None):
  1433. if not player_id:
  1434. player_id = self._extract_player_info(player_url)
  1435. if player_id not in self._code_cache:
  1436. code = self._download_webpage(
  1437. player_url, video_id, fatal=fatal,
  1438. note='Downloading player ' + player_id,
  1439. errnote='Download of %s failed' % player_url)
  1440. if code:
  1441. self._code_cache[player_id] = code
  1442. return self._code_cache[player_id] if fatal else self._code_cache.get(player_id)
  1443. def _extract_signature_function(self, video_id, player_url, example_sig):
  1444. player_id = self._extract_player_info(player_url)
  1445. # Read from filesystem cache
  1446. func_id = 'js_{0}_{1}'.format(
  1447. player_id, self._signature_cache_id(example_sig))
  1448. assert os.path.basename(func_id) == func_id
  1449. self.write_debug('Extracting signature function {0}'.format(func_id))
  1450. cache_spec, code = self.cache.load('youtube-sigfuncs', func_id), None
  1451. if not cache_spec:
  1452. code = self._load_player(video_id, player_url, player_id)
  1453. if code:
  1454. res = self._parse_sig_js(code)
  1455. test_string = ''.join(map(compat_chr, range(len(example_sig))))
  1456. cache_spec = [ord(c) for c in res(test_string)]
  1457. self.cache.store('youtube-sigfuncs', func_id, cache_spec)
  1458. return lambda s: ''.join(s[i] for i in cache_spec)
  1459. def _print_sig_code(self, func, example_sig):
  1460. if not self.get_param('youtube_print_sig_code'):
  1461. return
  1462. def gen_sig_code(idxs):
  1463. def _genslice(start, end, step):
  1464. starts = '' if start == 0 else str(start)
  1465. ends = (':%d' % (end + step)) if end + step >= 0 else ':'
  1466. steps = '' if step == 1 else (':%d' % step)
  1467. return 's[{0}{1}{2}]'.format(starts, ends, steps)
  1468. step = None
  1469. # Quelch pyflakes warnings - start will be set when step is set
  1470. start = '(Never used)'
  1471. for i, prev in zip(idxs[1:], idxs[:-1]):
  1472. if step is not None:
  1473. if i - prev == step:
  1474. continue
  1475. yield _genslice(start, prev, step)
  1476. step = None
  1477. continue
  1478. if i - prev in [-1, 1]:
  1479. step = i - prev
  1480. start = prev
  1481. continue
  1482. else:
  1483. yield 's[%d]' % prev
  1484. if step is None:
  1485. yield 's[%d]' % i
  1486. else:
  1487. yield _genslice(start, i, step)
  1488. test_string = ''.join(map(compat_chr, range(len(example_sig))))
  1489. cache_res = func(test_string)
  1490. cache_spec = [ord(c) for c in cache_res]
  1491. expr_code = ' + '.join(gen_sig_code(cache_spec))
  1492. signature_id_tuple = '(%s)' % (
  1493. ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
  1494. code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
  1495. ' return %s\n') % (signature_id_tuple, expr_code)
  1496. self.to_screen('Extracted signature function:\n' + code)
  1497. def _parse_sig_js(self, jscode):
  1498. funcname = self._search_regex(
  1499. (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1500. r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1501. r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
  1502. r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
  1503. r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)(?:;[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\))?',
  1504. r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
  1505. # Obsolete patterns
  1506. r'("|\')signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1507. r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
  1508. r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1509. r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1510. r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1511. r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
  1512. jscode, 'Initial JS player signature function name', group='sig')
  1513. jsi = JSInterpreter(jscode)
  1514. initial_function = jsi.extract_function(funcname)
  1515. return lambda s: initial_function([s])
  1516. def _cached(self, func, *cache_id):
  1517. def inner(*args, **kwargs):
  1518. if cache_id not in self._player_cache:
  1519. try:
  1520. self._player_cache[cache_id] = func(*args, **kwargs)
  1521. except ExtractorError as e:
  1522. self._player_cache[cache_id] = e
  1523. except Exception as e:
  1524. self._player_cache[cache_id] = ExtractorError(traceback.format_exc(), cause=e)
  1525. ret = self._player_cache[cache_id]
  1526. if isinstance(ret, Exception):
  1527. raise ret
  1528. return ret
  1529. return inner
  1530. def _decrypt_signature(self, s, video_id, player_url):
  1531. """Turn the encrypted s field into a working signature"""
  1532. extract_sig = self._cached(
  1533. self._extract_signature_function, 'sig', player_url, self._signature_cache_id(s))
  1534. func = extract_sig(video_id, player_url, s)
  1535. self._print_sig_code(func, s)
  1536. return func(s)
  1537. # from yt-dlp
  1538. # See also:
  1539. # 1. https://github.com/ytdl-org/youtube-dl/issues/29326#issuecomment-894619419
  1540. # 2. https://code.videolan.org/videolan/vlc/-/blob/4fb284e5af69aa9ac2100ccbdd3b88debec9987f/share/lua/playlist/youtube.lua#L116
  1541. # 3. https://github.com/ytdl-org/youtube-dl/issues/30097#issuecomment-950157377
  1542. def _decrypt_nsig(self, n, video_id, player_url):
  1543. """Turn the encrypted n field into a working signature"""
  1544. if player_url is None:
  1545. raise ExtractorError('Cannot decrypt nsig without player_url')
  1546. try:
  1547. jsi, player_id, func_code = self._extract_n_function_code(video_id, player_url)
  1548. except ExtractorError as e:
  1549. raise ExtractorError('Unable to extract nsig jsi, player_id, func_codefunction code', cause=e)
  1550. if self.get_param('youtube_print_sig_code'):
  1551. self.to_screen('Extracted nsig function from {0}:\n{1}\n'.format(
  1552. player_id, func_code[1]))
  1553. try:
  1554. extract_nsig = self._cached(self._extract_n_function_from_code, 'nsig func', player_url)
  1555. ret = extract_nsig(jsi, func_code)(n)
  1556. except JSInterpreter.Exception as e:
  1557. self.report_warning(
  1558. '%s (%s %s)' % (
  1559. self.__ie_msg(
  1560. 'Unable to decode n-parameter: download likely to be throttled'),
  1561. error_to_compat_str(e),
  1562. traceback.format_exc()))
  1563. return
  1564. self.write_debug('Decrypted nsig {0} => {1}'.format(n, ret))
  1565. return ret
  1566. def _extract_n_function_name(self, jscode):
  1567. func_name, idx = self._search_regex(
  1568. r'\.get\("n"\)\)&&\(b=(?P<nfunc>[a-zA-Z_$][\w$]*)(?:\[(?P<idx>\d+)\])?\([\w$]+\)',
  1569. jscode, 'Initial JS player n function name', group=('nfunc', 'idx'))
  1570. if not idx:
  1571. return func_name
  1572. return self._parse_json(self._search_regex(
  1573. r'var {0}\s*=\s*(\[.+?\])\s*[,;]'.format(re.escape(func_name)), jscode,
  1574. 'Initial JS player n function list ({0}.{1})'.format(func_name, idx)),
  1575. func_name, transform_source=js_to_json)[int(idx)]
  1576. def _extract_n_function_code(self, video_id, player_url):
  1577. player_id = self._extract_player_info(player_url)
  1578. func_code = self.cache.load('youtube-nsig', player_id)
  1579. jscode = func_code or self._load_player(video_id, player_url)
  1580. jsi = JSInterpreter(jscode)
  1581. if func_code:
  1582. return jsi, player_id, func_code
  1583. func_name = self._extract_n_function_name(jscode)
  1584. # For redundancy
  1585. func_code = self._search_regex(
  1586. r'''(?xs)%s\s*=\s*function\s*\((?P<var>[\w$]+)\)\s*
  1587. # NB: The end of the regex is intentionally kept strict
  1588. {(?P<code>.+?}\s*return\ [\w$]+.join\(""\))};''' % func_name,
  1589. jscode, 'nsig function', group=('var', 'code'), default=None)
  1590. if func_code:
  1591. func_code = ([func_code[0]], func_code[1])
  1592. else:
  1593. self.write_debug('Extracting nsig function with jsinterp')
  1594. func_code = jsi.extract_function_code(func_name)
  1595. self.cache.store('youtube-nsig', player_id, func_code)
  1596. return jsi, player_id, func_code
  1597. def _extract_n_function_from_code(self, jsi, func_code):
  1598. func = jsi.extract_function_from_code(*func_code)
  1599. def extract_nsig(s):
  1600. try:
  1601. ret = func([s])
  1602. except JSInterpreter.Exception:
  1603. raise
  1604. except Exception as e:
  1605. raise JSInterpreter.Exception(traceback.format_exc(), cause=e)
  1606. if ret.startswith('enhanced_except_'):
  1607. raise JSInterpreter.Exception('Signature function returned an exception')
  1608. return ret
  1609. return extract_nsig
  1610. def _unthrottle_format_urls(self, video_id, player_url, *formats):
  1611. def decrypt_nsig(n):
  1612. return self._cached(self._decrypt_nsig, 'nsig', n, player_url)
  1613. for fmt in formats:
  1614. parsed_fmt_url = compat_urllib_parse.urlparse(fmt['url'])
  1615. n_param = compat_parse_qs(parsed_fmt_url.query).get('n')
  1616. if not n_param:
  1617. continue
  1618. n_param = n_param[-1]
  1619. n_response = decrypt_nsig(n_param)(n_param, video_id, player_url)
  1620. if n_response is None:
  1621. # give up if descrambling failed
  1622. break
  1623. fmt['url'] = update_url_query(fmt['url'], {'n': n_response})
  1624. # from yt-dlp, with tweaks
  1625. def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False):
  1626. """
  1627. Extract signatureTimestamp (sts)
  1628. Required to tell API what sig/player version is in use.
  1629. """
  1630. sts = traverse_obj(ytcfg, 'STS', expected_type=int)
  1631. if not sts:
  1632. # Attempt to extract from player
  1633. if player_url is None:
  1634. error_msg = 'Cannot extract signature timestamp without player_url.'
  1635. if fatal:
  1636. raise ExtractorError(error_msg)
  1637. self.report_warning(error_msg)
  1638. return
  1639. code = self._load_player(video_id, player_url, fatal=fatal)
  1640. sts = int_or_none(self._search_regex(
  1641. r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code or '',
  1642. 'JS player signature timestamp', group='sts', fatal=fatal))
  1643. return sts
  1644. def _mark_watched(self, video_id, player_response):
  1645. playback_url = url_or_none(try_get(
  1646. player_response,
  1647. lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']))
  1648. if not playback_url:
  1649. return
  1650. # cpn generation algorithm is reverse engineered from base.js.
  1651. # In fact it works even with dummy cpn.
  1652. CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
  1653. cpn = ''.join(CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16))
  1654. # more consistent results setting it to right before the end
  1655. qs = parse_qs(playback_url)
  1656. video_length = '{0}'.format(float((qs.get('len') or ['1.5'])[0]) - 1)
  1657. playback_url = update_url_query(
  1658. playback_url, {
  1659. 'ver': '2',
  1660. 'cpn': cpn,
  1661. 'cmt': video_length,
  1662. 'el': 'detailpage', # otherwise defaults to "shorts"
  1663. })
  1664. self._download_webpage(
  1665. playback_url, video_id, 'Marking watched',
  1666. 'Unable to mark watched', fatal=False)
  1667. @staticmethod
  1668. def _extract_urls(webpage):
  1669. # Embedded YouTube player
  1670. entries = [
  1671. unescapeHTML(mobj.group('url'))
  1672. for mobj in re.finditer(r'''(?x)
  1673. (?:
  1674. <iframe[^>]+?src=|
  1675. data-video-url=|
  1676. <embed[^>]+?src=|
  1677. embedSWF\(?:\s*|
  1678. <object[^>]+data=|
  1679. new\s+SWFObject\(
  1680. )
  1681. (["\'])
  1682. (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
  1683. (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
  1684. \1''', webpage)]
  1685. # lazyYT YouTube embed
  1686. entries.extend(list(map(
  1687. unescapeHTML,
  1688. re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
  1689. # Wordpress "YouTube Video Importer" plugin
  1690. matches = re.findall(r'''(?x)<div[^>]+
  1691. class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
  1692. data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
  1693. entries.extend(m[-1] for m in matches)
  1694. return entries
  1695. @staticmethod
  1696. def _extract_url(webpage):
  1697. urls = YoutubeIE._extract_urls(webpage)
  1698. return urls[0] if urls else None
  1699. @classmethod
  1700. def extract_id(cls, url):
  1701. mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
  1702. if mobj is None:
  1703. raise ExtractorError('Invalid URL: %s' % url)
  1704. video_id = mobj.group(2)
  1705. return video_id
  1706. def _extract_chapters_from_json(self, data, video_id, duration):
  1707. chapters_list = try_get(
  1708. data,
  1709. lambda x: x['playerOverlays']
  1710. ['playerOverlayRenderer']
  1711. ['decoratedPlayerBarRenderer']
  1712. ['decoratedPlayerBarRenderer']
  1713. ['playerBar']
  1714. ['chapteredPlayerBarRenderer']
  1715. ['chapters'],
  1716. list)
  1717. if not chapters_list:
  1718. return
  1719. def chapter_time(chapter):
  1720. return float_or_none(
  1721. try_get(
  1722. chapter,
  1723. lambda x: x['chapterRenderer']['timeRangeStartMillis'],
  1724. int),
  1725. scale=1000)
  1726. chapters = []
  1727. for next_num, chapter in enumerate(chapters_list, start=1):
  1728. start_time = chapter_time(chapter)
  1729. if start_time is None:
  1730. continue
  1731. end_time = (chapter_time(chapters_list[next_num])
  1732. if next_num < len(chapters_list) else duration)
  1733. if end_time is None:
  1734. continue
  1735. title = try_get(
  1736. chapter, lambda x: x['chapterRenderer']['title']['simpleText'],
  1737. compat_str)
  1738. chapters.append({
  1739. 'start_time': start_time,
  1740. 'end_time': end_time,
  1741. 'title': title,
  1742. })
  1743. return chapters
  1744. def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
  1745. return self._parse_json(self._search_regex(
  1746. (r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
  1747. regex), webpage, name, default='{}'), video_id, fatal=False)
  1748. def _real_extract(self, url):
  1749. url, smuggled_data = unsmuggle_url(url, {})
  1750. video_id = self._match_id(url)
  1751. base_url = self.http_scheme() + '//www.youtube.com/'
  1752. webpage_url = base_url + 'watch?v=' + video_id
  1753. webpage = self._download_webpage(
  1754. webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
  1755. player_response = None
  1756. player_url = None
  1757. if webpage:
  1758. player_response = self._extract_yt_initial_variable(
  1759. webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
  1760. video_id, 'initial player response')
  1761. if not player_response:
  1762. player_response = self._call_api(
  1763. 'player', {'videoId': video_id}, video_id)
  1764. def is_agegated(playability):
  1765. if not isinstance(playability, dict):
  1766. return
  1767. if playability.get('desktopLegacyAgeGateReason'):
  1768. return True
  1769. reasons = filter(None, (playability.get(r) for r in ('status', 'reason')))
  1770. AGE_GATE_REASONS = (
  1771. 'confirm your age', 'age-restricted', 'inappropriate', # reason
  1772. 'age_verification_required', 'age_check_required', # status
  1773. )
  1774. return any(expected in reason for expected in AGE_GATE_REASONS for reason in reasons)
  1775. def get_playability_status(response):
  1776. return try_get(response, lambda x: x['playabilityStatus'], dict) or {}
  1777. playability_status = get_playability_status(player_response)
  1778. if (is_agegated(playability_status)
  1779. and int_or_none(self._downloader.params.get('age_limit'), default=18) >= 18):
  1780. self.report_age_confirmation()
  1781. # Thanks: https://github.com/yt-dlp/yt-dlp/pull/3233
  1782. pb_context = {'html5Preference': 'HTML5_PREF_WANTS'}
  1783. # Use signatureTimestamp if available
  1784. # Thanks https://github.com/ytdl-org/youtube-dl/issues/31034#issuecomment-1160718026
  1785. player_url = self._extract_player_url(webpage)
  1786. ytcfg = self._extract_ytcfg(video_id, webpage)
  1787. sts = self._extract_signature_timestamp(video_id, player_url, ytcfg)
  1788. if sts:
  1789. pb_context['signatureTimestamp'] = sts
  1790. query = {
  1791. 'playbackContext': {'contentPlaybackContext': pb_context},
  1792. 'contentCheckOk': True,
  1793. 'racyCheckOk': True,
  1794. 'context': {
  1795. 'client': {'clientName': 'TVHTML5_SIMPLY_EMBEDDED_PLAYER', 'clientVersion': '2.0', 'hl': 'en', 'clientScreen': 'EMBED'},
  1796. 'thirdParty': {'embedUrl': 'https://google.com'},
  1797. },
  1798. 'videoId': video_id,
  1799. }
  1800. headers = {
  1801. 'X-YouTube-Client-Name': '85',
  1802. 'X-YouTube-Client-Version': '2.0',
  1803. 'Origin': 'https://www.youtube.com'
  1804. }
  1805. video_info = self._call_api('player', query, video_id, fatal=False, headers=headers)
  1806. age_gate_status = get_playability_status(video_info)
  1807. if age_gate_status.get('status') == 'OK':
  1808. player_response = video_info
  1809. playability_status = age_gate_status
  1810. trailer_video_id = try_get(
  1811. playability_status,
  1812. lambda x: x['errorScreen']['playerLegacyDesktopYpcTrailerRenderer']['trailerVideoId'],
  1813. compat_str)
  1814. if trailer_video_id:
  1815. return self.url_result(
  1816. trailer_video_id, self.ie_key(), trailer_video_id)
  1817. def get_text(x):
  1818. if not x:
  1819. return
  1820. text = x.get('simpleText')
  1821. if text and isinstance(text, compat_str):
  1822. return text
  1823. runs = x.get('runs')
  1824. if not isinstance(runs, list):
  1825. return
  1826. return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)])
  1827. search_meta = (
  1828. lambda x: self._html_search_meta(x, webpage, default=None)) \
  1829. if webpage else lambda x: None
  1830. video_details = player_response.get('videoDetails') or {}
  1831. microformat = try_get(
  1832. player_response,
  1833. lambda x: x['microformat']['playerMicroformatRenderer'],
  1834. dict) or {}
  1835. video_title = video_details.get('title') \
  1836. or get_text(microformat.get('title')) \
  1837. or search_meta(['og:title', 'twitter:title', 'title'])
  1838. video_description = video_details.get('shortDescription')
  1839. if not smuggled_data.get('force_singlefeed', False):
  1840. if not self._downloader.params.get('noplaylist'):
  1841. multifeed_metadata_list = try_get(
  1842. player_response,
  1843. lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
  1844. compat_str)
  1845. if multifeed_metadata_list:
  1846. entries = []
  1847. feed_ids = []
  1848. for feed in multifeed_metadata_list.split(','):
  1849. # Unquote should take place before split on comma (,) since textual
  1850. # fields may contain comma as well (see
  1851. # https://github.com/ytdl-org/youtube-dl/issues/8536)
  1852. feed_data = compat_parse_qs(
  1853. compat_urllib_parse_unquote_plus(feed))
  1854. def feed_entry(name):
  1855. return try_get(
  1856. feed_data, lambda x: x[name][0], compat_str)
  1857. feed_id = feed_entry('id')
  1858. if not feed_id:
  1859. continue
  1860. feed_title = feed_entry('title')
  1861. title = video_title
  1862. if feed_title:
  1863. title += ' (%s)' % feed_title
  1864. entries.append({
  1865. '_type': 'url_transparent',
  1866. 'ie_key': 'Youtube',
  1867. 'url': smuggle_url(
  1868. base_url + 'watch?v=' + feed_data['id'][0],
  1869. {'force_singlefeed': True}),
  1870. 'title': title,
  1871. })
  1872. feed_ids.append(feed_id)
  1873. self.to_screen(
  1874. 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
  1875. % (', '.join(feed_ids), video_id))
  1876. return self.playlist_result(
  1877. entries, video_id, video_title, video_description)
  1878. else:
  1879. self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
  1880. if not player_url:
  1881. player_url = self._extract_player_url(webpage)
  1882. formats = []
  1883. itags = collections.defaultdict(set)
  1884. itag_qualities = {}
  1885. q = qualities(['tiny', 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'])
  1886. CHUNK_SIZE = 10 << 20
  1887. streaming_data = player_response.get('streamingData') or {}
  1888. streaming_formats = streaming_data.get('formats') or []
  1889. streaming_formats.extend(streaming_data.get('adaptiveFormats') or [])
  1890. def build_fragments(f):
  1891. return LazyList({
  1892. 'url': update_url_query(f['url'], {
  1893. 'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, f['filesize']))
  1894. })
  1895. } for range_start in range(0, f['filesize'], CHUNK_SIZE))
  1896. lower = lambda s: s.lower()
  1897. for fmt in streaming_formats:
  1898. if fmt.get('targetDurationSec'):
  1899. continue
  1900. itag = str_or_none(fmt.get('itag'))
  1901. audio_track = traverse_obj(fmt, ('audioTrack', T(dict))) or {}
  1902. quality = traverse_obj(fmt, ((
  1903. # The 3gp format (17) in android client has a quality of "small",
  1904. # but is actually worse than other formats
  1905. T(lambda _: 'tiny' if itag == 17 else None),
  1906. ('quality', T(lambda q: q if q and q != 'tiny' else None)),
  1907. ('audioQuality', T(lower)),
  1908. 'quality'), T(txt_or_none)), get_all=False)
  1909. if quality and itag:
  1910. itag_qualities[itag] = quality
  1911. # FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment
  1912. # (adding `&sq=0` to the URL) and parsing emsg box to determine the
  1913. # number of fragments that would subsequently be requested with (`&sq=N`)
  1914. if fmt.get('type') == 'FORMAT_STREAM_TYPE_OTF':
  1915. continue
  1916. fmt_url = fmt.get('url')
  1917. if not fmt_url:
  1918. sc = compat_parse_qs(fmt.get('signatureCipher'))
  1919. fmt_url = traverse_obj(sc, ('url', -1, T(url_or_none)))
  1920. encrypted_sig = traverse_obj(sc, ('s', -1))
  1921. if not (fmt_url and encrypted_sig):
  1922. continue
  1923. player_url = player_url or self._extract_player_url(webpage)
  1924. if not player_url:
  1925. continue
  1926. try:
  1927. fmt_url = update_url_query(fmt_url, {
  1928. traverse_obj(sc, ('sp', -1)) or 'signature':
  1929. [self._decrypt_signature(encrypted_sig, video_id, player_url)],
  1930. })
  1931. except ExtractorError as e:
  1932. self.report_warning('Signature extraction failed: Some formats may be missing',
  1933. video_id=video_id, only_once=True)
  1934. self.write_debug(error_to_compat_str(e), only_once=True)
  1935. continue
  1936. language_preference = (
  1937. 10 if audio_track.get('audioIsDefault')
  1938. else -10 if 'descriptive' in (traverse_obj(audio_track, ('displayName', T(lower))) or '')
  1939. else -1)
  1940. name = (
  1941. traverse_obj(fmt, ('qualityLabel', T(txt_or_none)))
  1942. or quality.replace('audio_quality_', ''))
  1943. dct = {
  1944. 'format_id': join_nonempty(itag, fmt.get('isDrc') and 'drc'),
  1945. 'url': fmt_url,
  1946. # Format 22 is likely to be damaged: see https://github.com/yt-dlp/yt-dlp/issues/3372
  1947. 'source_preference': ((-5 if itag == '22' else -1)
  1948. + (100 if 'Premium' in name else 0)),
  1949. 'quality': q(quality),
  1950. 'language': join_nonempty(audio_track.get('id', '').split('.')[0],
  1951. 'desc' if language_preference < -1 else '') or None,
  1952. 'language_preference': language_preference,
  1953. # Strictly de-prioritize 3gp formats
  1954. 'preference': -2 if itag == '17' else None,
  1955. }
  1956. if itag:
  1957. itags[itag].add(('https', dct.get('language')))
  1958. self._unthrottle_format_urls(video_id, player_url, dct)
  1959. dct.update(traverse_obj(fmt, {
  1960. 'asr': ('audioSampleRate', T(int_or_none)),
  1961. 'filesize': ('contentLength', T(int_or_none)),
  1962. 'format_note': ('qualityLabel', T(lambda x: x or quality)),
  1963. # for some formats, fps is wrongly returned as 1
  1964. 'fps': ('fps', T(int_or_none), T(lambda f: f if f > 1 else None)),
  1965. 'audio_channels': ('audioChannels', T(int_or_none)),
  1966. 'height': ('height', T(int_or_none)),
  1967. 'has_drm': ('drmFamilies', T(bool)),
  1968. 'tbr': (('averageBitrate', 'bitrate'), T(lambda t: float_or_none(t, 1000))),
  1969. 'width': ('width', T(int_or_none)),
  1970. '_duration_ms': ('approxDurationMs', T(int_or_none)),
  1971. }, get_all=False))
  1972. mime_mobj = re.match(
  1973. r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', fmt.get('mimeType') or '')
  1974. if mime_mobj:
  1975. dct['ext'] = mimetype2ext(mime_mobj.group(1))
  1976. dct.update(parse_codecs(mime_mobj.group(2)))
  1977. single_stream = 'none' in (dct.get(c) for c in ('acodec', 'vcodec'))
  1978. if single_stream and dct.get('ext'):
  1979. dct['container'] = dct['ext'] + '_dash'
  1980. if single_stream or itag == '17':
  1981. # avoid Youtube throttling
  1982. dct.update({
  1983. 'protocol': 'http_dash_segments',
  1984. 'fragments': build_fragments(dct),
  1985. } if dct['filesize'] else {
  1986. 'downloader_options': {'http_chunk_size': CHUNK_SIZE} # No longer useful?
  1987. })
  1988. formats.append(dct)
  1989. def process_manifest_format(f, proto, client_name, itag, all_formats=False):
  1990. key = (proto, f.get('language'))
  1991. if not all_formats and key in itags[itag]:
  1992. return False
  1993. itags[itag].add(key)
  1994. if itag:
  1995. f['format_id'] = (
  1996. '{0}-{1}'.format(itag, proto)
  1997. if all_formats or any(p != proto for p, _ in itags[itag])
  1998. else itag)
  1999. if f.get('source_preference') is None:
  2000. f['source_preference'] = -1
  2001. if itag in ('616', '235'):
  2002. f['format_note'] = join_nonempty(f.get('format_note'), 'Premium', delim=' ')
  2003. f['source_preference'] += 100
  2004. f['quality'] = q(traverse_obj(f, (
  2005. 'format_id', T(lambda s: itag_qualities[s.split('-')[0]])), default=-1))
  2006. if try_call(lambda: f['fps'] <= 1):
  2007. del f['fps']
  2008. if proto == 'hls' and f.get('has_drm'):
  2009. f['has_drm'] = 'maybe'
  2010. f['source_preference'] -= 5
  2011. return True
  2012. hls_manifest_url = streaming_data.get('hlsManifestUrl')
  2013. if hls_manifest_url:
  2014. for f in self._extract_m3u8_formats(
  2015. hls_manifest_url, video_id, 'mp4', fatal=False):
  2016. if process_manifest_format(
  2017. f, 'hls', None, self._search_regex(
  2018. r'/itag/(\d+)', f['url'], 'itag', default=None)):
  2019. formats.append(f)
  2020. if self._downloader.params.get('youtube_include_dash_manifest', True):
  2021. dash_manifest_url = streaming_data.get('dashManifestUrl')
  2022. if dash_manifest_url:
  2023. for f in self._extract_mpd_formats(
  2024. dash_manifest_url, video_id, fatal=False):
  2025. if process_manifest_format(
  2026. f, 'dash', None, f['format_id']):
  2027. f['filesize'] = traverse_obj(f, (
  2028. ('fragment_base_url', 'url'), T(lambda u: self._search_regex(
  2029. r'/clen/(\d+)', u, 'file size', default=None)),
  2030. T(int_or_none)), get_all=False)
  2031. formats.append(f)
  2032. playable_formats = [f for f in formats if not f.get('has_drm')]
  2033. if formats and not playable_formats:
  2034. # If there are no formats that definitely don't have DRM, all have DRM
  2035. self.report_drm(video_id)
  2036. formats[:] = playable_formats
  2037. if not formats:
  2038. if streaming_data.get('licenseInfos'):
  2039. raise ExtractorError(
  2040. 'This video is DRM protected.', expected=True)
  2041. pemr = try_get(
  2042. playability_status,
  2043. lambda x: x['errorScreen']['playerErrorMessageRenderer'],
  2044. dict) or {}
  2045. reason = get_text(pemr.get('reason')) or playability_status.get('reason')
  2046. subreason = pemr.get('subreason')
  2047. if subreason:
  2048. subreason = clean_html(get_text(subreason))
  2049. if subreason == 'The uploader has not made this video available in your country.':
  2050. countries = microformat.get('availableCountries')
  2051. if not countries:
  2052. regions_allowed = search_meta('regionsAllowed')
  2053. countries = regions_allowed.split(',') if regions_allowed else None
  2054. self.raise_geo_restricted(
  2055. subreason, countries)
  2056. reason += '\n' + subreason
  2057. if reason:
  2058. raise ExtractorError(reason, expected=True)
  2059. self._sort_formats(formats)
  2060. keywords = video_details.get('keywords') or []
  2061. if not keywords and webpage:
  2062. keywords = [
  2063. unescapeHTML(m.group('content'))
  2064. for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
  2065. for keyword in keywords:
  2066. if keyword.startswith('yt:stretch='):
  2067. mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword)
  2068. if mobj:
  2069. # NB: float is intentional for forcing float division
  2070. w, h = (float(v) for v in mobj.groups())
  2071. if w > 0 and h > 0:
  2072. ratio = w / h
  2073. for f in formats:
  2074. if f.get('vcodec') != 'none':
  2075. f['stretched_ratio'] = ratio
  2076. break
  2077. thumbnails = []
  2078. for container in (video_details, microformat):
  2079. for thumbnail in try_get(
  2080. container,
  2081. lambda x: x['thumbnail']['thumbnails'], list) or []:
  2082. thumbnail_url = url_or_none(thumbnail.get('url'))
  2083. if not thumbnail_url:
  2084. continue
  2085. thumbnails.append({
  2086. 'height': int_or_none(thumbnail.get('height')),
  2087. 'url': update_url(thumbnail_url, query=None, fragment=None),
  2088. 'width': int_or_none(thumbnail.get('width')),
  2089. })
  2090. if thumbnails:
  2091. break
  2092. else:
  2093. thumbnail = search_meta(['og:image', 'twitter:image'])
  2094. if thumbnail:
  2095. thumbnails = [{'url': thumbnail}]
  2096. category = microformat.get('category') or search_meta('genre')
  2097. channel_id = self._extract_channel_id(
  2098. webpage, videodetails=video_details, metadata=microformat)
  2099. duration = int_or_none(
  2100. video_details.get('lengthSeconds')
  2101. or microformat.get('lengthSeconds')) \
  2102. or parse_duration(search_meta('duration'))
  2103. for f in formats:
  2104. # Some formats may have much smaller duration than others (possibly damaged during encoding)
  2105. # but avoid false positives with small duration differences.
  2106. # Ref: https://github.com/yt-dlp/yt-dlp/issues/2823
  2107. if try_call(lambda x: float(x.pop('_duration_ms')) / duration < 500, args=(f,)):
  2108. self.report_warning(
  2109. '{0}: Some possibly damaged formats will be deprioritized'.format(video_id), only_once=True)
  2110. # Strictly de-prioritize damaged formats
  2111. f['preference'] = -10
  2112. is_live = video_details.get('isLive')
  2113. owner_profile_url = self._yt_urljoin(self._extract_author_var(
  2114. webpage, 'url', videodetails=video_details, metadata=microformat))
  2115. uploader = self._extract_author_var(
  2116. webpage, 'name', videodetails=video_details, metadata=microformat)
  2117. info = {
  2118. 'id': video_id,
  2119. 'title': self._live_title(video_title) if is_live else video_title,
  2120. 'formats': formats,
  2121. 'thumbnails': thumbnails,
  2122. 'description': video_description,
  2123. 'upload_date': unified_strdate(
  2124. microformat.get('uploadDate')
  2125. or search_meta('uploadDate')),
  2126. 'uploader': uploader,
  2127. 'channel_id': channel_id,
  2128. 'duration': duration,
  2129. 'view_count': int_or_none(
  2130. video_details.get('viewCount')
  2131. or microformat.get('viewCount')
  2132. or search_meta('interactionCount')),
  2133. 'average_rating': float_or_none(video_details.get('averageRating')),
  2134. 'age_limit': 18 if (
  2135. microformat.get('isFamilySafe') is False
  2136. or search_meta('isFamilyFriendly') == 'false'
  2137. or search_meta('og:restrictions:age') == '18+') else 0,
  2138. 'webpage_url': webpage_url,
  2139. 'categories': [category] if category else None,
  2140. 'tags': keywords,
  2141. 'is_live': is_live,
  2142. }
  2143. pctr = try_get(
  2144. player_response,
  2145. lambda x: x['captions']['playerCaptionsTracklistRenderer'], dict)
  2146. if pctr:
  2147. def process_language(container, base_url, lang_code, query):
  2148. lang_subs = []
  2149. for fmt in self._SUBTITLE_FORMATS:
  2150. query.update({
  2151. 'fmt': fmt,
  2152. })
  2153. lang_subs.append({
  2154. 'ext': fmt,
  2155. 'url': update_url_query(base_url, query),
  2156. })
  2157. container[lang_code] = lang_subs
  2158. subtitles = {}
  2159. for caption_track in (pctr.get('captionTracks') or []):
  2160. base_url = caption_track.get('baseUrl')
  2161. if not base_url:
  2162. continue
  2163. if caption_track.get('kind') != 'asr':
  2164. lang_code = caption_track.get('languageCode')
  2165. if not lang_code:
  2166. continue
  2167. process_language(
  2168. subtitles, base_url, lang_code, {})
  2169. continue
  2170. automatic_captions = {}
  2171. for translation_language in (pctr.get('translationLanguages') or []):
  2172. translation_language_code = translation_language.get('languageCode')
  2173. if not translation_language_code:
  2174. continue
  2175. process_language(
  2176. automatic_captions, base_url, translation_language_code,
  2177. {'tlang': translation_language_code})
  2178. info['automatic_captions'] = automatic_captions
  2179. info['subtitles'] = subtitles
  2180. parsed_url = compat_urllib_parse_urlparse(url)
  2181. for component in [parsed_url.fragment, parsed_url.query]:
  2182. query = compat_parse_qs(component)
  2183. for k, v in query.items():
  2184. for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
  2185. d_k += '_time'
  2186. if d_k not in info and k in s_ks:
  2187. info[d_k] = parse_duration(query[k][0])
  2188. if video_description:
  2189. # Youtube Music Auto-generated description
  2190. mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
  2191. if mobj:
  2192. release_year = mobj.group('release_year')
  2193. release_date = mobj.group('release_date')
  2194. if release_date:
  2195. release_date = release_date.replace('-', '')
  2196. if not release_year:
  2197. release_year = release_date[:4]
  2198. info.update({
  2199. 'album': mobj.group('album'.strip()),
  2200. 'artist': mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·')),
  2201. 'track': mobj.group('track').strip(),
  2202. 'release_date': release_date,
  2203. 'release_year': int_or_none(release_year),
  2204. })
  2205. initial_data = None
  2206. if webpage:
  2207. initial_data = self._extract_yt_initial_variable(
  2208. webpage, self._YT_INITIAL_DATA_RE, video_id,
  2209. 'yt initial data')
  2210. if not initial_data:
  2211. initial_data = self._call_api(
  2212. 'next', {'videoId': video_id}, video_id, fatal=False)
  2213. if initial_data:
  2214. chapters = self._extract_chapters_from_json(
  2215. initial_data, video_id, duration)
  2216. if not chapters:
  2217. for engagment_pannel in (initial_data.get('engagementPanels') or []):
  2218. contents = try_get(
  2219. engagment_pannel, lambda x: x['engagementPanelSectionListRenderer']['content']['macroMarkersListRenderer']['contents'],
  2220. list)
  2221. if not contents:
  2222. continue
  2223. def chapter_time(mmlir):
  2224. return parse_duration(
  2225. get_text(mmlir.get('timeDescription')))
  2226. chapters = []
  2227. for next_num, content in enumerate(contents, start=1):
  2228. mmlir = content.get('macroMarkersListItemRenderer') or {}
  2229. start_time = chapter_time(mmlir)
  2230. end_time = chapter_time(try_get(
  2231. contents, lambda x: x[next_num]['macroMarkersListItemRenderer'])) \
  2232. if next_num < len(contents) else duration
  2233. if start_time is None or end_time is None:
  2234. continue
  2235. chapters.append({
  2236. 'start_time': start_time,
  2237. 'end_time': end_time,
  2238. 'title': get_text(mmlir.get('title')),
  2239. })
  2240. if chapters:
  2241. break
  2242. if chapters:
  2243. info['chapters'] = chapters
  2244. contents = try_get(
  2245. initial_data,
  2246. lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'],
  2247. list) or []
  2248. if not info['channel_id']:
  2249. channel_id = self._extract_channel_id('', renderers=contents)
  2250. if not info['uploader']:
  2251. info['uploader'] = self._extract_author_var('', 'name', renderers=contents)
  2252. if not owner_profile_url:
  2253. owner_profile_url = self._yt_urljoin(self._extract_author_var('', 'url', renderers=contents))
  2254. for content in contents:
  2255. vpir = content.get('videoPrimaryInfoRenderer')
  2256. if vpir:
  2257. stl = vpir.get('superTitleLink')
  2258. if stl:
  2259. stl = get_text(stl)
  2260. if try_get(
  2261. vpir,
  2262. lambda x: x['superTitleIcon']['iconType']) == 'LOCATION_PIN':
  2263. info['location'] = stl
  2264. else:
  2265. # •? doesn't match, but [•]? does; \xa0 = non-breaking space
  2266. mobj = re.search(r'([^\xa0\s].*?)[\xa0\s]*S(\d+)[\xa0\s]*[•]?[\xa0\s]*E(\d+)', stl)
  2267. if mobj:
  2268. info.update({
  2269. 'series': mobj.group(1),
  2270. 'season_number': int(mobj.group(2)),
  2271. 'episode_number': int(mobj.group(3)),
  2272. })
  2273. for tlb in (try_get(
  2274. vpir,
  2275. lambda x: x['videoActions']['menuRenderer']['topLevelButtons'],
  2276. list) or []):
  2277. tbr = traverse_obj(tlb, ('segmentedLikeDislikeButtonRenderer', 'likeButton', 'toggleButtonRenderer'), 'toggleButtonRenderer') or {}
  2278. for getter, regex in [(
  2279. lambda x: x['defaultText']['accessibility']['accessibilityData'],
  2280. r'(?P<count>[\d,]+)\s*(?P<type>(?:dis)?like)'), ([
  2281. lambda x: x['accessibility'],
  2282. lambda x: x['accessibilityData']['accessibilityData'],
  2283. ], r'(?P<type>(?:dis)?like) this video along with (?P<count>[\d,]+) other people')]:
  2284. label = (try_get(tbr, getter, dict) or {}).get('label')
  2285. if label:
  2286. mobj = re.match(regex, label)
  2287. if mobj:
  2288. info[mobj.group('type') + '_count'] = str_to_int(mobj.group('count'))
  2289. break
  2290. sbr_tooltip = try_get(
  2291. vpir, lambda x: x['sentimentBar']['sentimentBarRenderer']['tooltip'])
  2292. if sbr_tooltip:
  2293. # however dislike_count was hidden by YT, as if there could ever be dislikable content on YT
  2294. like_count, dislike_count = sbr_tooltip.split(' / ')
  2295. info.update({
  2296. 'like_count': str_to_int(like_count),
  2297. 'dislike_count': str_to_int(dislike_count),
  2298. })
  2299. else:
  2300. info['like_count'] = traverse_obj(vpir, (
  2301. 'videoActions', 'menuRenderer', 'topLevelButtons', Ellipsis,
  2302. 'segmentedLikeDislikeButtonViewModel', 'likeButtonViewModel', 'likeButtonViewModel',
  2303. 'toggleButtonViewModel', 'toggleButtonViewModel', 'defaultButtonViewModel',
  2304. 'buttonViewModel', (('title', ('accessibilityText', T(lambda s: s.split()), Ellipsis))), T(parse_count)),
  2305. get_all=False)
  2306. vsir = content.get('videoSecondaryInfoRenderer')
  2307. if vsir:
  2308. rows = try_get(
  2309. vsir,
  2310. lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
  2311. list) or []
  2312. multiple_songs = False
  2313. for row in rows:
  2314. if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
  2315. multiple_songs = True
  2316. break
  2317. for row in rows:
  2318. mrr = row.get('metadataRowRenderer') or {}
  2319. mrr_title = mrr.get('title')
  2320. if not mrr_title:
  2321. continue
  2322. mrr_title = get_text(mrr['title'])
  2323. mrr_contents_text = get_text(mrr['contents'][0])
  2324. if mrr_title == 'License':
  2325. info['license'] = mrr_contents_text
  2326. elif not multiple_songs:
  2327. if mrr_title == 'Album':
  2328. info['album'] = mrr_contents_text
  2329. elif mrr_title == 'Artist':
  2330. info['artist'] = mrr_contents_text
  2331. elif mrr_title == 'Song':
  2332. info['track'] = mrr_contents_text
  2333. # this is not extraction but spelunking!
  2334. carousel_lockups = traverse_obj(
  2335. initial_data,
  2336. ('engagementPanels', Ellipsis, 'engagementPanelSectionListRenderer',
  2337. 'content', 'structuredDescriptionContentRenderer', 'items', Ellipsis,
  2338. 'videoDescriptionMusicSectionRenderer', 'carouselLockups', Ellipsis),
  2339. expected_type=dict) or []
  2340. # try to reproduce logic from metadataRowContainerRenderer above (if it still is)
  2341. fields = (('ALBUM', 'album'), ('ARTIST', 'artist'), ('SONG', 'track'), ('LICENSES', 'license'))
  2342. # multiple_songs ?
  2343. if len(carousel_lockups) > 1:
  2344. fields = fields[-1:]
  2345. for info_row in traverse_obj(
  2346. carousel_lockups,
  2347. (0, 'carouselLockupRenderer', 'infoRows', Ellipsis, 'infoRowRenderer'),
  2348. expected_type=dict):
  2349. row_title = traverse_obj(info_row, ('title', 'simpleText'))
  2350. row_text = traverse_obj(info_row, 'defaultMetadata', 'expandedMetadata', expected_type=get_text)
  2351. if not row_text:
  2352. continue
  2353. for name, field in fields:
  2354. if name == row_title and not info.get(field):
  2355. info[field] = row_text
  2356. for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
  2357. v = info.get(s_k)
  2358. if v:
  2359. info[d_k] = v
  2360. self.mark_watched(video_id, player_response)
  2361. return merge_dicts(
  2362. info, {
  2363. 'uploader_id': self._extract_uploader_id(owner_profile_url),
  2364. 'uploader_url': owner_profile_url,
  2365. 'channel_id': channel_id,
  2366. 'channel_url': channel_id and self._yt_urljoin('/channel/' + channel_id),
  2367. 'channel': info['uploader'],
  2368. })
  2369. class YoutubeTabIE(YoutubeBaseInfoExtractor):
  2370. IE_DESC = 'YouTube.com tab'
  2371. _VALID_URL = r'''(?x)
  2372. https?://
  2373. (?:\w+\.)?
  2374. (?:
  2375. youtube(?:kids)?\.com|
  2376. invidio\.us
  2377. )/
  2378. (?:
  2379. (?:channel|c|user|feed|hashtag)/|
  2380. (?:playlist|watch)\?.*?\blist=|
  2381. (?!(?:watch|embed|v|e|results)\b)
  2382. )
  2383. (?P<id>[^/?\#&]+)
  2384. '''
  2385. IE_NAME = 'youtube:tab'
  2386. _TESTS = [{
  2387. # Shorts
  2388. 'url': 'https://www.youtube.com/@SuperCooperShorts/shorts',
  2389. 'playlist_mincount': 5,
  2390. 'info_dict': {
  2391. 'description': 'Short clips from Super Cooper Sundays!',
  2392. 'id': 'UCKMA8kHZ8bPYpnMNaUSxfEQ',
  2393. 'title': 'Super Cooper Shorts - Shorts',
  2394. 'uploader': 'Super Cooper Shorts',
  2395. 'uploader_id': '@SuperCooperShorts',
  2396. }
  2397. }, {
  2398. # Channel that does not have a Shorts tab. Test should just download videos on Home tab instead
  2399. 'url': 'https://www.youtube.com/@emergencyawesome/shorts',
  2400. 'info_dict': {
  2401. 'description': 'md5:592c080c06fef4de3c902c4a8eecd850',
  2402. 'id': 'UCDiFRMQWpcp8_KD4vwIVicw',
  2403. 'title': 'Emergency Awesome - Home',
  2404. },
  2405. 'playlist_mincount': 5,
  2406. 'skip': 'new test page needed to replace `Emergency Awesome - Shorts`',
  2407. }, {
  2408. # playlists, multipage
  2409. 'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
  2410. 'playlist_mincount': 94,
  2411. 'info_dict': {
  2412. 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
  2413. 'title': r're:Igor Kleiner(?: Ph\.D\.)? - Playlists',
  2414. 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
  2415. 'uploader': 'Igor Kleiner',
  2416. 'uploader_id': '@IgorDataScience',
  2417. },
  2418. }, {
  2419. # playlists, multipage, different order
  2420. 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
  2421. 'playlist_mincount': 94,
  2422. 'info_dict': {
  2423. 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
  2424. 'title': r're:Igor Kleiner(?: Ph\.D\.)? - Playlists',
  2425. 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
  2426. 'uploader': 'Igor Kleiner',
  2427. 'uploader_id': '@IgorDataScience',
  2428. },
  2429. }, {
  2430. # playlists, series
  2431. 'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3',
  2432. 'playlist_mincount': 5,
  2433. 'info_dict': {
  2434. 'id': 'UCYO_jab_esuFRV4b17AJtAw',
  2435. 'title': '3Blue1Brown - Playlists',
  2436. 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
  2437. 'uploader': '3Blue1Brown',
  2438. 'uploader_id': '@3blue1brown',
  2439. },
  2440. }, {
  2441. # playlists, singlepage
  2442. 'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
  2443. 'playlist_mincount': 4,
  2444. 'info_dict': {
  2445. 'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
  2446. 'title': 'ThirstForScience - Playlists',
  2447. 'description': 'md5:609399d937ea957b0f53cbffb747a14c',
  2448. 'uploader': 'ThirstForScience',
  2449. 'uploader_id': '@ThirstForScience',
  2450. }
  2451. }, {
  2452. 'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
  2453. 'only_matching': True,
  2454. }, {
  2455. # basic, single video playlist
  2456. 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2457. 'info_dict': {
  2458. 'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2459. 'title': 'youtube-dl public playlist',
  2460. 'uploader': 'Sergey M.',
  2461. 'uploader_id': '@sergeym.6173',
  2462. 'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
  2463. },
  2464. 'playlist_count': 1,
  2465. }, {
  2466. # empty playlist
  2467. 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
  2468. 'info_dict': {
  2469. 'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
  2470. 'title': 'youtube-dl empty playlist',
  2471. 'uploader': 'Sergey M.',
  2472. 'uploader_id': '@sergeym.6173',
  2473. 'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
  2474. },
  2475. 'playlist_count': 0,
  2476. }, {
  2477. # Home tab
  2478. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
  2479. 'info_dict': {
  2480. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2481. 'title': 'lex will - Home',
  2482. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2483. 'uploader': 'lex will',
  2484. 'uploader_id': '@lexwill718',
  2485. },
  2486. 'playlist_mincount': 2,
  2487. }, {
  2488. # Videos tab
  2489. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
  2490. 'info_dict': {
  2491. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2492. 'title': 'lex will - Videos',
  2493. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2494. 'uploader': 'lex will',
  2495. 'uploader_id': '@lexwill718',
  2496. },
  2497. 'playlist_mincount': 975,
  2498. }, {
  2499. # Videos tab, sorted by popular
  2500. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
  2501. 'info_dict': {
  2502. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2503. 'title': 'lex will - Videos',
  2504. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2505. 'uploader': 'lex will',
  2506. 'uploader_id': '@lexwill718',
  2507. },
  2508. 'playlist_mincount': 199,
  2509. }, {
  2510. # Playlists tab
  2511. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
  2512. 'info_dict': {
  2513. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2514. 'title': 'lex will - Playlists',
  2515. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2516. 'uploader': 'lex will',
  2517. 'uploader_id': '@lexwill718',
  2518. },
  2519. 'playlist_mincount': 17,
  2520. }, {
  2521. # Community tab
  2522. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
  2523. 'info_dict': {
  2524. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2525. 'title': 'lex will - Community',
  2526. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2527. 'uploader': 'lex will',
  2528. 'uploader_id': '@lexwill718',
  2529. },
  2530. 'playlist_mincount': 18,
  2531. }, {
  2532. # Channels tab
  2533. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
  2534. 'info_dict': {
  2535. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2536. 'title': r're:lex will - (?:Home|Channels)',
  2537. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2538. 'uploader': 'lex will',
  2539. 'uploader_id': '@lexwill718',
  2540. },
  2541. 'playlist_mincount': 75,
  2542. }, {
  2543. # Releases tab
  2544. 'url': 'https://www.youtube.com/@daftpunk/releases',
  2545. 'info_dict': {
  2546. 'id': 'UC_kRDKYrUlrbtrSiyu5Tflg',
  2547. 'title': 'Daft Punk - Releases',
  2548. 'description': 'Daft Punk (1993 - 2021) - Official YouTube Channel',
  2549. 'uploader_id': '@daftpunk',
  2550. 'uploader': 'Daft Punk',
  2551. },
  2552. 'playlist_mincount': 36,
  2553. }, {
  2554. 'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2555. 'only_matching': True,
  2556. }, {
  2557. 'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2558. 'only_matching': True,
  2559. }, {
  2560. 'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2561. 'only_matching': True,
  2562. }, {
  2563. 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
  2564. 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
  2565. 'info_dict': {
  2566. 'title': '29C3: Not my department',
  2567. 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
  2568. 'uploader': 'Christiaan008',
  2569. 'uploader_id': '@ChRiStIaAn008',
  2570. 'channel_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
  2571. },
  2572. 'playlist_count': 96,
  2573. }, {
  2574. 'note': 'Large playlist',
  2575. 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
  2576. 'info_dict': {
  2577. 'title': 'Uploads from Cauchemar',
  2578. 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
  2579. 'uploader': 'Cauchemar',
  2580. 'uploader_id': '@Cauchemar89',
  2581. 'channel_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
  2582. },
  2583. 'playlist_mincount': 1123,
  2584. }, {
  2585. # even larger playlist, 8832 videos
  2586. 'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
  2587. 'only_matching': True,
  2588. }, {
  2589. 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
  2590. 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
  2591. 'info_dict': {
  2592. 'title': 'Uploads from Interstellar Movie',
  2593. 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
  2594. 'uploader': 'Interstellar Movie',
  2595. 'uploader_id': '@InterstellarMovie',
  2596. 'channel_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
  2597. },
  2598. 'playlist_mincount': 21,
  2599. }, {
  2600. # https://github.com/ytdl-org/youtube-dl/issues/21844
  2601. 'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
  2602. 'info_dict': {
  2603. 'title': 'Data Analysis with Dr Mike Pound',
  2604. 'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
  2605. 'uploader': 'Computerphile',
  2606. 'uploader_id': '@Computerphile',
  2607. 'channel_id': 'UC9-y-6csu5WGm29I7JiwpnA',
  2608. },
  2609. 'playlist_mincount': 11,
  2610. }, {
  2611. 'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2612. 'only_matching': True,
  2613. }, {
  2614. # Playlist URL that does not actually serve a playlist
  2615. 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
  2616. 'info_dict': {
  2617. 'id': 'FqZTN594JQw',
  2618. 'ext': 'webm',
  2619. 'title': "Smiley's People 01 detective, Adventure Series, Action",
  2620. 'uploader': 'STREEM',
  2621. 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
  2622. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
  2623. 'upload_date': '20150526',
  2624. 'license': 'Standard YouTube License',
  2625. 'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
  2626. 'categories': ['People & Blogs'],
  2627. 'tags': list,
  2628. 'view_count': int,
  2629. 'like_count': int,
  2630. },
  2631. 'params': {
  2632. 'skip_download': True,
  2633. },
  2634. 'skip': 'This video is not available.',
  2635. 'add_ie': [YoutubeIE.ie_key()],
  2636. }, {
  2637. 'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
  2638. 'only_matching': True,
  2639. }, {
  2640. 'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
  2641. 'only_matching': True,
  2642. }, {
  2643. 'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
  2644. 'info_dict': {
  2645. 'id': r're:[\da-zA-Z_-]{8,}',
  2646. 'ext': 'mp4',
  2647. 'title': r're:(?s)[A-Z].{20,}',
  2648. 'uploader': 'Sky News',
  2649. 'uploader_id': '@SkyNews',
  2650. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@SkyNews',
  2651. 'upload_date': r're:\d{8}',
  2652. 'description': r're:(?s)(?:.*\n)+SUBSCRIBE to our YouTube channel for more videos: http://www\.youtube\.com/skynews *\n.*',
  2653. 'categories': ['News & Politics'],
  2654. 'tags': list,
  2655. 'like_count': int,
  2656. },
  2657. 'params': {
  2658. 'skip_download': True,
  2659. },
  2660. }, {
  2661. 'url': 'https://www.youtube.com/user/TheYoungTurks/live',
  2662. 'info_dict': {
  2663. 'id': 'a48o2S1cPoo',
  2664. 'ext': 'mp4',
  2665. 'title': 'The Young Turks - Live Main Show',
  2666. 'uploader': 'The Young Turks',
  2667. 'uploader_id': 'TheYoungTurks',
  2668. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
  2669. 'upload_date': '20150715',
  2670. 'license': 'Standard YouTube License',
  2671. 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
  2672. 'categories': ['News & Politics'],
  2673. 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
  2674. 'like_count': int,
  2675. },
  2676. 'params': {
  2677. 'skip_download': True,
  2678. },
  2679. 'only_matching': True,
  2680. }, {
  2681. 'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
  2682. 'only_matching': True,
  2683. }, {
  2684. 'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
  2685. 'only_matching': True,
  2686. }, {
  2687. 'url': 'https://www.youtube.com/feed/trending',
  2688. 'only_matching': True,
  2689. }, {
  2690. # needs auth
  2691. 'url': 'https://www.youtube.com/feed/library',
  2692. 'only_matching': True,
  2693. }, {
  2694. # needs auth
  2695. 'url': 'https://www.youtube.com/feed/history',
  2696. 'only_matching': True,
  2697. }, {
  2698. # needs auth
  2699. 'url': 'https://www.youtube.com/feed/subscriptions',
  2700. 'only_matching': True,
  2701. }, {
  2702. # needs auth
  2703. 'url': 'https://www.youtube.com/feed/watch_later',
  2704. 'only_matching': True,
  2705. }, {
  2706. # no longer available?
  2707. 'url': 'https://www.youtube.com/feed/recommended',
  2708. 'only_matching': True,
  2709. }, {
  2710. # inline playlist with not always working continuations
  2711. 'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
  2712. 'only_matching': True,
  2713. }, {
  2714. 'url': 'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8',
  2715. 'only_matching': True,
  2716. }, {
  2717. 'url': 'https://www.youtube.com/course',
  2718. 'only_matching': True,
  2719. }, {
  2720. 'url': 'https://www.youtube.com/zsecurity',
  2721. 'only_matching': True,
  2722. }, {
  2723. 'url': 'http://www.youtube.com/NASAgovVideo/videos',
  2724. 'only_matching': True,
  2725. }, {
  2726. 'url': 'https://www.youtube.com/TheYoungTurks/live',
  2727. 'only_matching': True,
  2728. }, {
  2729. 'url': 'https://www.youtube.com/hashtag/cctv9',
  2730. 'info_dict': {
  2731. 'id': 'cctv9',
  2732. 'title': '#cctv9',
  2733. },
  2734. 'playlist_mincount': 350,
  2735. }, {
  2736. 'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
  2737. 'only_matching': True,
  2738. }, {
  2739. 'note': 'Search tab',
  2740. 'url': 'https://www.youtube.com/c/3blue1brown/search?query=linear%20algebra',
  2741. 'playlist_mincount': 20,
  2742. 'info_dict': {
  2743. 'id': 'UCYO_jab_esuFRV4b17AJtAw',
  2744. 'title': '3Blue1Brown - Search - linear algebra',
  2745. 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
  2746. 'uploader': '3Blue1Brown',
  2747. 'uploader_id': '@3blue1brown',
  2748. 'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
  2749. }
  2750. }]
  2751. @classmethod
  2752. def suitable(cls, url):
  2753. return not YoutubeIE.suitable(url) and super(
  2754. YoutubeTabIE, cls).suitable(url)
  2755. @staticmethod
  2756. def _extract_grid_item_renderer(item):
  2757. assert isinstance(item, dict)
  2758. for key, renderer in item.items():
  2759. if not key.startswith('grid') or not key.endswith('Renderer'):
  2760. continue
  2761. if not isinstance(renderer, dict):
  2762. continue
  2763. return renderer
  2764. @staticmethod
  2765. def _get_text(r, k):
  2766. return traverse_obj(
  2767. r, (k, 'runs', 0, 'text'), (k, 'simpleText'),
  2768. expected_type=txt_or_none)
  2769. def _grid_entries(self, grid_renderer):
  2770. for item in grid_renderer['items']:
  2771. if not isinstance(item, dict):
  2772. continue
  2773. renderer = self._extract_grid_item_renderer(item)
  2774. if not isinstance(renderer, dict):
  2775. continue
  2776. title = self._get_text(renderer, 'title')
  2777. # playlist
  2778. playlist_id = renderer.get('playlistId')
  2779. if playlist_id:
  2780. yield self.url_result(
  2781. 'https://www.youtube.com/playlist?list=%s' % playlist_id,
  2782. ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
  2783. video_title=title)
  2784. continue
  2785. # video
  2786. video_id = renderer.get('videoId')
  2787. if video_id:
  2788. yield self._extract_video(renderer)
  2789. continue
  2790. # channel
  2791. channel_id = renderer.get('channelId')
  2792. if channel_id:
  2793. title = self._get_text(renderer, 'title')
  2794. yield self.url_result(
  2795. 'https://www.youtube.com/channel/%s' % channel_id,
  2796. ie=YoutubeTabIE.ie_key(), video_title=title)
  2797. continue
  2798. # generic endpoint URL support
  2799. ep_url = urljoin('https://www.youtube.com/', try_get(
  2800. renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
  2801. compat_str))
  2802. if ep_url:
  2803. for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE):
  2804. if ie.suitable(ep_url):
  2805. yield self.url_result(
  2806. ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title)
  2807. break
  2808. def _shelf_entries_from_content(self, shelf_renderer):
  2809. content = shelf_renderer.get('content')
  2810. if not isinstance(content, dict):
  2811. return
  2812. renderer = content.get('gridRenderer')
  2813. if renderer:
  2814. # TODO: add support for nested playlists so each shelf is processed
  2815. # as separate playlist
  2816. # TODO: this includes only first N items
  2817. for entry in self._grid_entries(renderer):
  2818. yield entry
  2819. renderer = content.get('horizontalListRenderer')
  2820. if renderer:
  2821. # TODO
  2822. pass
  2823. def _shelf_entries(self, shelf_renderer, skip_channels=False):
  2824. ep = try_get(
  2825. shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
  2826. compat_str)
  2827. shelf_url = urljoin('https://www.youtube.com', ep)
  2828. if shelf_url:
  2829. # Skipping links to another channels, note that checking for
  2830. # endpoint.commandMetadata.webCommandMetadata.webPageTypwebPageType == WEB_PAGE_TYPE_CHANNEL
  2831. # will not work
  2832. if skip_channels and '/channels?' in shelf_url:
  2833. return
  2834. title = try_get(
  2835. shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
  2836. yield self.url_result(shelf_url, video_title=title)
  2837. # Shelf may not contain shelf URL, fallback to extraction from content
  2838. for entry in self._shelf_entries_from_content(shelf_renderer):
  2839. yield entry
  2840. def _playlist_entries(self, video_list_renderer):
  2841. for content in video_list_renderer['contents']:
  2842. if not isinstance(content, dict):
  2843. continue
  2844. renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
  2845. if not isinstance(renderer, dict):
  2846. continue
  2847. video_id = renderer.get('videoId')
  2848. if not video_id:
  2849. continue
  2850. yield self._extract_video(renderer)
  2851. def _video_entry(self, video_renderer):
  2852. video_id = video_renderer.get('videoId')
  2853. if video_id:
  2854. return self._extract_video(video_renderer)
  2855. def _post_thread_entries(self, post_thread_renderer):
  2856. post_renderer = try_get(
  2857. post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
  2858. if not post_renderer:
  2859. return
  2860. # video attachment
  2861. video_renderer = try_get(
  2862. post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict)
  2863. video_id = None
  2864. if video_renderer:
  2865. entry = self._video_entry(video_renderer)
  2866. if entry:
  2867. yield entry
  2868. # inline video links
  2869. runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
  2870. for run in runs:
  2871. if not isinstance(run, dict):
  2872. continue
  2873. ep_url = try_get(
  2874. run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
  2875. if not ep_url:
  2876. continue
  2877. if not YoutubeIE.suitable(ep_url):
  2878. continue
  2879. ep_video_id = YoutubeIE._match_id(ep_url)
  2880. if video_id == ep_video_id:
  2881. continue
  2882. yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
  2883. def _post_thread_continuation_entries(self, post_thread_continuation):
  2884. contents = post_thread_continuation.get('contents')
  2885. if not isinstance(contents, list):
  2886. return
  2887. for content in contents:
  2888. renderer = content.get('backstagePostThreadRenderer')
  2889. if not isinstance(renderer, dict):
  2890. continue
  2891. for entry in self._post_thread_entries(renderer):
  2892. yield entry
  2893. def _rich_grid_entries(self, contents):
  2894. for content in contents:
  2895. content = traverse_obj(
  2896. content, ('richItemRenderer', 'content'),
  2897. expected_type=dict) or {}
  2898. video_renderer = traverse_obj(
  2899. content, 'videoRenderer', 'reelItemRenderer',
  2900. expected_type=dict)
  2901. if video_renderer:
  2902. entry = self._video_entry(video_renderer)
  2903. if entry:
  2904. yield entry
  2905. # playlist
  2906. renderer = traverse_obj(
  2907. content, 'playlistRenderer', expected_type=dict) or {}
  2908. title = self._get_text(renderer, 'title')
  2909. playlist_id = renderer.get('playlistId')
  2910. if playlist_id:
  2911. yield self.url_result(
  2912. 'https://www.youtube.com/playlist?list=%s' % playlist_id,
  2913. ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
  2914. video_title=title)
  2915. @staticmethod
  2916. def _build_continuation_query(continuation, ctp=None):
  2917. query = {
  2918. 'ctoken': continuation,
  2919. 'continuation': continuation,
  2920. }
  2921. if ctp:
  2922. query['itct'] = ctp
  2923. return query
  2924. @staticmethod
  2925. def _extract_next_continuation_data(renderer):
  2926. next_continuation = try_get(
  2927. renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict)
  2928. if not next_continuation:
  2929. return
  2930. continuation = next_continuation.get('continuation')
  2931. if not continuation:
  2932. return
  2933. ctp = next_continuation.get('clickTrackingParams')
  2934. return YoutubeTabIE._build_continuation_query(continuation, ctp)
  2935. @classmethod
  2936. def _extract_continuation(cls, renderer):
  2937. next_continuation = cls._extract_next_continuation_data(renderer)
  2938. if next_continuation:
  2939. return next_continuation
  2940. contents = []
  2941. for key in ('contents', 'items'):
  2942. contents.extend(try_get(renderer, lambda x: x[key], list) or [])
  2943. for content in contents:
  2944. if not isinstance(content, dict):
  2945. continue
  2946. continuation_ep = try_get(
  2947. content, lambda x: x['continuationItemRenderer']['continuationEndpoint'],
  2948. dict)
  2949. if not continuation_ep:
  2950. continue
  2951. continuation = try_get(
  2952. continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
  2953. if not continuation:
  2954. continue
  2955. ctp = continuation_ep.get('clickTrackingParams')
  2956. return YoutubeTabIE._build_continuation_query(continuation, ctp)
  2957. def _entries(self, tab, item_id, webpage):
  2958. tab_content = try_get(tab, lambda x: x['content'], dict)
  2959. if not tab_content:
  2960. return
  2961. slr_renderer = try_get(tab_content, lambda x: x['sectionListRenderer'], dict)
  2962. if slr_renderer:
  2963. is_channels_tab = tab.get('title') == 'Channels'
  2964. continuation = None
  2965. slr_contents = try_get(slr_renderer, lambda x: x['contents'], list) or []
  2966. for slr_content in slr_contents:
  2967. if not isinstance(slr_content, dict):
  2968. continue
  2969. is_renderer = try_get(slr_content, lambda x: x['itemSectionRenderer'], dict)
  2970. if not is_renderer:
  2971. continue
  2972. isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
  2973. for isr_content in isr_contents:
  2974. if not isinstance(isr_content, dict):
  2975. continue
  2976. renderer = isr_content.get('playlistVideoListRenderer')
  2977. if renderer:
  2978. for entry in self._playlist_entries(renderer):
  2979. yield entry
  2980. continuation = self._extract_continuation(renderer)
  2981. continue
  2982. renderer = isr_content.get('gridRenderer')
  2983. if renderer:
  2984. for entry in self._grid_entries(renderer):
  2985. yield entry
  2986. continuation = self._extract_continuation(renderer)
  2987. continue
  2988. renderer = isr_content.get('shelfRenderer')
  2989. if renderer:
  2990. for entry in self._shelf_entries(renderer, not is_channels_tab):
  2991. yield entry
  2992. continue
  2993. renderer = isr_content.get('backstagePostThreadRenderer')
  2994. if renderer:
  2995. for entry in self._post_thread_entries(renderer):
  2996. yield entry
  2997. continuation = self._extract_continuation(renderer)
  2998. continue
  2999. renderer = isr_content.get('videoRenderer')
  3000. if renderer:
  3001. entry = self._video_entry(renderer)
  3002. if entry:
  3003. yield entry
  3004. if not continuation:
  3005. continuation = self._extract_continuation(is_renderer)
  3006. if not continuation:
  3007. continuation = self._extract_continuation(slr_renderer)
  3008. else:
  3009. rich_grid_renderer = tab_content.get('richGridRenderer')
  3010. if not rich_grid_renderer:
  3011. return
  3012. for entry in self._rich_grid_entries(rich_grid_renderer.get('contents') or []):
  3013. yield entry
  3014. continuation = self._extract_continuation(rich_grid_renderer)
  3015. ytcfg = self._extract_ytcfg(item_id, webpage)
  3016. client_version = try_get(
  3017. ytcfg, lambda x: x['INNERTUBE_CLIENT_VERSION'], compat_str) or '2.20210407.08.00'
  3018. headers = {
  3019. 'x-youtube-client-name': '1',
  3020. 'x-youtube-client-version': client_version,
  3021. 'content-type': 'application/json',
  3022. }
  3023. context = try_get(ytcfg, lambda x: x['INNERTUBE_CONTEXT'], dict) or {
  3024. 'client': {
  3025. 'clientName': 'WEB',
  3026. 'clientVersion': client_version,
  3027. }
  3028. }
  3029. visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str)
  3030. identity_token = self._extract_identity_token(ytcfg, webpage)
  3031. if identity_token:
  3032. headers['x-youtube-identity-token'] = identity_token
  3033. data = {
  3034. 'context': context,
  3035. }
  3036. for page_num in itertools.count(1):
  3037. if not continuation:
  3038. break
  3039. if visitor_data:
  3040. headers['x-goog-visitor-id'] = visitor_data
  3041. data['continuation'] = continuation['continuation']
  3042. data['clickTracking'] = {
  3043. 'clickTrackingParams': continuation['itct']
  3044. }
  3045. count = 0
  3046. retries = 3
  3047. while count <= retries:
  3048. try:
  3049. # Downloading page may result in intermittent 5xx HTTP error
  3050. # that is usually worked around with a retry
  3051. response = self._download_json(
  3052. 'https://www.youtube.com/youtubei/v1/browse?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
  3053. None, 'Downloading page %d%s' % (page_num, ' (retry #%d)' % count if count else ''),
  3054. headers=headers, data=json.dumps(data).encode('utf8'))
  3055. break
  3056. except ExtractorError as e:
  3057. if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503):
  3058. count += 1
  3059. if count <= retries:
  3060. continue
  3061. raise
  3062. if not response:
  3063. break
  3064. visitor_data = try_get(
  3065. response, lambda x: x['responseContext']['visitorData'], compat_str) or visitor_data
  3066. continuation_contents = try_get(
  3067. response, lambda x: x['continuationContents'], dict)
  3068. if continuation_contents:
  3069. continuation_renderer = continuation_contents.get('playlistVideoListContinuation')
  3070. if continuation_renderer:
  3071. for entry in self._playlist_entries(continuation_renderer):
  3072. yield entry
  3073. continuation = self._extract_continuation(continuation_renderer)
  3074. continue
  3075. continuation_renderer = continuation_contents.get('gridContinuation')
  3076. if continuation_renderer:
  3077. for entry in self._grid_entries(continuation_renderer):
  3078. yield entry
  3079. continuation = self._extract_continuation(continuation_renderer)
  3080. continue
  3081. continuation_renderer = continuation_contents.get('itemSectionContinuation')
  3082. if continuation_renderer:
  3083. for entry in self._post_thread_continuation_entries(continuation_renderer):
  3084. yield entry
  3085. continuation = self._extract_continuation(continuation_renderer)
  3086. continue
  3087. on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
  3088. continuation_items = try_get(
  3089. on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
  3090. if continuation_items:
  3091. continuation_item = continuation_items[0]
  3092. if not isinstance(continuation_item, dict):
  3093. continue
  3094. renderer = self._extract_grid_item_renderer(continuation_item)
  3095. if renderer:
  3096. grid_renderer = {'items': continuation_items}
  3097. for entry in self._grid_entries(grid_renderer):
  3098. yield entry
  3099. continuation = self._extract_continuation(grid_renderer)
  3100. continue
  3101. renderer = continuation_item.get('playlistVideoRenderer') or continuation_item.get('itemSectionRenderer')
  3102. if renderer:
  3103. video_list_renderer = {'contents': continuation_items}
  3104. for entry in self._playlist_entries(video_list_renderer):
  3105. yield entry
  3106. continuation = self._extract_continuation(video_list_renderer)
  3107. continue
  3108. renderer = continuation_item.get('backstagePostThreadRenderer')
  3109. if renderer:
  3110. continuation_renderer = {'contents': continuation_items}
  3111. for entry in self._post_thread_continuation_entries(continuation_renderer):
  3112. yield entry
  3113. continuation = self._extract_continuation(continuation_renderer)
  3114. continue
  3115. renderer = continuation_item.get('richItemRenderer')
  3116. if renderer:
  3117. for entry in self._rich_grid_entries(continuation_items):
  3118. yield entry
  3119. continuation = self._extract_continuation({'contents': continuation_items})
  3120. continue
  3121. break
  3122. @staticmethod
  3123. def _extract_selected_tab(tabs):
  3124. for tab in tabs:
  3125. renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {}
  3126. if renderer.get('selected') is True:
  3127. return renderer
  3128. else:
  3129. raise ExtractorError('Unable to find selected tab')
  3130. def _extract_uploader(self, metadata, data):
  3131. uploader = {}
  3132. renderers = traverse_obj(data,
  3133. ('sidebar', 'playlistSidebarRenderer', 'items'))
  3134. uploader['channel_id'] = self._extract_channel_id('', metadata=metadata, renderers=renderers)
  3135. uploader['uploader'] = (
  3136. self._extract_author_var('', 'name', renderers=renderers)
  3137. or self._extract_author_var('', 'name', metadata=metadata))
  3138. uploader['uploader_url'] = self._yt_urljoin(
  3139. self._extract_author_var('', 'url', metadata=metadata, renderers=renderers))
  3140. uploader['uploader_id'] = self._extract_uploader_id(uploader['uploader_url'])
  3141. uploader['channel'] = uploader['uploader']
  3142. return uploader
  3143. @classmethod
  3144. def _extract_alert(cls, data):
  3145. alerts = []
  3146. for alert in traverse_obj(data, ('alerts', Ellipsis), expected_type=dict):
  3147. alert_text = traverse_obj(
  3148. alert, (None, lambda x: x['alertRenderer']['text']), get_all=False)
  3149. if not alert_text:
  3150. continue
  3151. text = cls._get_text(alert_text, 'text')
  3152. if text:
  3153. alerts.append(text)
  3154. return '\n'.join(alerts)
  3155. def _extract_from_tabs(self, item_id, webpage, data, tabs):
  3156. selected_tab = self._extract_selected_tab(tabs)
  3157. renderer = traverse_obj(data, ('metadata', 'channelMetadataRenderer'),
  3158. expected_type=dict) or {}
  3159. playlist_id = item_id
  3160. title = description = None
  3161. if renderer:
  3162. channel_title = txt_or_none(renderer.get('title')) or item_id
  3163. tab_title = txt_or_none(selected_tab.get('title'))
  3164. title = join_nonempty(
  3165. channel_title or item_id, tab_title,
  3166. txt_or_none(selected_tab.get('expandedText')),
  3167. delim=' - ')
  3168. description = txt_or_none(renderer.get('description'))
  3169. playlist_id = txt_or_none(renderer.get('externalId')) or playlist_id
  3170. else:
  3171. renderer = traverse_obj(data,
  3172. ('metadata', 'playlistMetadataRenderer'),
  3173. ('header', 'hashtagHeaderRenderer'),
  3174. expected_type=dict) or {}
  3175. title = traverse_obj(renderer, 'title', ('hashtag', 'simpleText'),
  3176. expected_type=txt_or_none)
  3177. playlist = self.playlist_result(
  3178. self._entries(selected_tab, item_id, webpage),
  3179. playlist_id=playlist_id, playlist_title=title,
  3180. playlist_description=description)
  3181. return merge_dicts(playlist, self._extract_uploader(renderer, data))
  3182. def _extract_from_playlist(self, item_id, url, data, playlist):
  3183. title = traverse_obj((playlist, data),
  3184. (0, 'title'), (1, 'titleText', 'simpleText'),
  3185. expected_type=txt_or_none)
  3186. playlist_id = txt_or_none(playlist.get('playlistId')) or item_id
  3187. # Inline playlist rendition continuation does not always work
  3188. # at Youtube side, so delegating regular tab-based playlist URL
  3189. # processing whenever possible.
  3190. playlist_url = urljoin(url, traverse_obj(
  3191. playlist, ('endpoint', 'commandMetadata', 'webCommandMetadata', 'url'),
  3192. expected_type=url_or_none))
  3193. if playlist_url and playlist_url != url:
  3194. return self.url_result(
  3195. playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
  3196. video_title=title)
  3197. return self.playlist_result(
  3198. self._playlist_entries(playlist), playlist_id=playlist_id,
  3199. playlist_title=title)
  3200. def _extract_identity_token(self, ytcfg, webpage):
  3201. if ytcfg:
  3202. token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
  3203. if token:
  3204. return token
  3205. return self._search_regex(
  3206. r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
  3207. 'identity token', default=None)
  3208. def _real_extract(self, url):
  3209. item_id = self._match_id(url)
  3210. url = update_url(url, netloc='www.youtube.com')
  3211. # Handle both video/playlist URLs
  3212. qs = parse_qs(url)
  3213. video_id = qs.get('v', [None])[0]
  3214. playlist_id = qs.get('list', [None])[0]
  3215. if video_id and playlist_id:
  3216. if self._downloader.params.get('noplaylist'):
  3217. self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
  3218. return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
  3219. self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
  3220. webpage = self._download_webpage(url, item_id)
  3221. data = self._extract_yt_initial_data(item_id, webpage)
  3222. tabs = try_get(
  3223. data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
  3224. if tabs:
  3225. return self._extract_from_tabs(item_id, webpage, data, tabs)
  3226. playlist = try_get(
  3227. data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
  3228. if playlist:
  3229. return self._extract_from_playlist(item_id, url, data, playlist)
  3230. # Fallback to video extraction if no playlist alike page is recognized.
  3231. # First check for the current video then try the v attribute of URL query.
  3232. video_id = try_get(
  3233. data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
  3234. compat_str) or video_id
  3235. if video_id:
  3236. return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
  3237. # Capture and output alerts
  3238. alert = self._extract_alert(data)
  3239. if alert:
  3240. raise ExtractorError(alert, expected=True)
  3241. # Failed to recognize
  3242. raise ExtractorError('Unable to recognize tab page')
  3243. class YoutubePlaylistIE(InfoExtractor):
  3244. IE_DESC = 'YouTube.com playlists'
  3245. _VALID_URL = r'''(?x)(?:
  3246. (?:https?://)?
  3247. (?:\w+\.)?
  3248. (?:
  3249. (?:
  3250. youtube(?:kids)?\.com|
  3251. invidio\.us
  3252. )
  3253. /.*?\?.*?\blist=
  3254. )?
  3255. (?P<id>%(playlist_id)s)
  3256. )''' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
  3257. IE_NAME = 'youtube:playlist'
  3258. _TESTS = [{
  3259. 'note': 'issue #673',
  3260. 'url': 'PLBB231211A4F62143',
  3261. 'info_dict': {
  3262. 'title': '[OLD]Team Fortress 2 (Class-based LP)',
  3263. 'id': 'PLBB231211A4F62143',
  3264. 'uploader': 'Wickman',
  3265. 'uploader_id': '@WickmanVT',
  3266. 'channel_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
  3267. },
  3268. 'playlist_mincount': 29,
  3269. }, {
  3270. 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
  3271. 'info_dict': {
  3272. 'title': 'YDL_safe_search',
  3273. 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
  3274. },
  3275. 'playlist_count': 2,
  3276. 'skip': 'This playlist is private',
  3277. }, {
  3278. 'note': 'embedded',
  3279. 'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
  3280. # TODO: full playlist requires _reload_with_unavailable_videos()
  3281. # 'playlist_count': 4,
  3282. 'playlist_mincount': 1,
  3283. 'info_dict': {
  3284. 'title': 'JODA15',
  3285. 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
  3286. 'uploader': 'milan',
  3287. 'uploader_id': '@milan5503',
  3288. 'channel_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
  3289. }
  3290. }, {
  3291. 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
  3292. 'playlist_mincount': 455,
  3293. 'info_dict': {
  3294. 'title': '2018 Chinese New Singles (11/6 updated)',
  3295. 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
  3296. 'uploader': 'LBK',
  3297. 'uploader_id': '@music_king',
  3298. 'channel_id': 'UC21nz3_MesPLqtDqwdvnoxA',
  3299. }
  3300. }, {
  3301. 'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
  3302. 'only_matching': True,
  3303. }, {
  3304. # music album playlist
  3305. 'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
  3306. 'only_matching': True,
  3307. }]
  3308. @classmethod
  3309. def suitable(cls, url):
  3310. if YoutubeTabIE.suitable(url):
  3311. return False
  3312. if parse_qs(url).get('v', [None])[0]:
  3313. return False
  3314. return super(YoutubePlaylistIE, cls).suitable(url)
  3315. def _real_extract(self, url):
  3316. playlist_id = self._match_id(url)
  3317. qs = parse_qs(url)
  3318. if not qs:
  3319. qs = {'list': playlist_id}
  3320. return self.url_result(
  3321. update_url_query('https://www.youtube.com/playlist', qs),
  3322. ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
  3323. class YoutubeYtBeIE(InfoExtractor):
  3324. _VALID_URL = r'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{11})/*?.*?\blist=(?P<playlist_id>%(playlist_id)s)' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
  3325. _TESTS = [{
  3326. 'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
  3327. 'info_dict': {
  3328. 'id': 'yeWKywCrFtk',
  3329. 'ext': 'mp4',
  3330. 'title': 'Small Scale Baler and Braiding Rugs',
  3331. 'uploader': 'Backus-Page House Museum',
  3332. 'uploader_id': '@backuspagemuseum',
  3333. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@backuspagemuseum',
  3334. 'upload_date': '20161008',
  3335. 'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
  3336. 'categories': ['Nonprofits & Activism'],
  3337. 'tags': list,
  3338. 'like_count': int,
  3339. },
  3340. 'params': {
  3341. 'noplaylist': True,
  3342. 'skip_download': True,
  3343. },
  3344. }, {
  3345. 'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
  3346. 'only_matching': True,
  3347. }]
  3348. def _real_extract(self, url):
  3349. mobj = re.match(self._VALID_URL, url)
  3350. video_id = mobj.group('id')
  3351. playlist_id = mobj.group('playlist_id')
  3352. return self.url_result(
  3353. update_url_query('https://www.youtube.com/watch', {
  3354. 'v': video_id,
  3355. 'list': playlist_id,
  3356. 'feature': 'youtu.be',
  3357. }), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
  3358. class YoutubeYtUserIE(InfoExtractor):
  3359. _VALID_URL = r'ytuser:(?P<id>.+)'
  3360. _TESTS = [{
  3361. 'url': 'ytuser:phihag',
  3362. 'only_matching': True,
  3363. }]
  3364. def _real_extract(self, url):
  3365. user_id = self._match_id(url)
  3366. return self.url_result(
  3367. 'https://www.youtube.com/user/%s' % user_id,
  3368. ie=YoutubeTabIE.ie_key(), video_id=user_id)
  3369. class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
  3370. IE_NAME = 'youtube:favorites'
  3371. IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
  3372. _VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
  3373. _LOGIN_REQUIRED = True
  3374. _TESTS = [{
  3375. 'url': ':ytfav',
  3376. 'only_matching': True,
  3377. }, {
  3378. 'url': ':ytfavorites',
  3379. 'only_matching': True,
  3380. }]
  3381. def _real_extract(self, url):
  3382. return self.url_result(
  3383. 'https://www.youtube.com/playlist?list=LL',
  3384. ie=YoutubeTabIE.ie_key())
  3385. class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
  3386. IE_DESC = 'YouTube.com searches'
  3387. IE_NAME = 'youtube:search'
  3388. _SEARCH_KEY = 'ytsearch'
  3389. _SEARCH_PARAMS = 'EgIQAQ%3D%3D' # Videos only
  3390. _MAX_RESULTS = float('inf')
  3391. _TESTS = [{
  3392. 'url': 'ytsearch10:youtube-dl test video',
  3393. 'playlist_count': 10,
  3394. 'info_dict': {
  3395. 'id': 'youtube-dl test video',
  3396. 'title': 'youtube-dl test video',
  3397. }
  3398. }]
  3399. def _get_n_results(self, query, n):
  3400. """Get a specified number of results for a query"""
  3401. entries = itertools.islice(self._search_results(query, self._SEARCH_PARAMS), 0, None if n == float('inf') else n)
  3402. return self.playlist_result(entries, query, query)
  3403. class YoutubeSearchDateIE(YoutubeSearchIE):
  3404. IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
  3405. _SEARCH_KEY = 'ytsearchdate'
  3406. IE_DESC = 'YouTube.com searches, newest videos first'
  3407. _SEARCH_PARAMS = 'CAISAhAB' # Videos only, sorted by date
  3408. _TESTS = [{
  3409. 'url': 'ytsearchdate10:youtube-dl test video',
  3410. 'playlist_count': 10,
  3411. 'info_dict': {
  3412. 'id': 'youtube-dl test video',
  3413. 'title': 'youtube-dl test video',
  3414. }
  3415. }]
  3416. class YoutubeSearchURLIE(YoutubeBaseInfoExtractor):
  3417. IE_DESC = 'YouTube search URLs with sorting and filter support'
  3418. IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
  3419. _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?:[^&]+)(?:[&]|$)'
  3420. _TESTS = [{
  3421. 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
  3422. 'playlist_mincount': 5,
  3423. 'info_dict': {
  3424. 'id': 'youtube-dl test video',
  3425. 'title': 'youtube-dl test video',
  3426. },
  3427. 'params': {'playlistend': 5}
  3428. }, {
  3429. 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
  3430. 'only_matching': True,
  3431. }]
  3432. def _real_extract(self, url):
  3433. qs = parse_qs(url)
  3434. query = (qs.get('search_query') or qs.get('q'))[-1]
  3435. params = qs.get('sp', ('',))[-1]
  3436. return self.playlist_result(self._search_results(query, params), query, query)
  3437. class YoutubeFeedsInfoExtractor(YoutubeTabIE):
  3438. """
  3439. Base class for feed extractors
  3440. Subclasses must define the _FEED_NAME property.
  3441. """
  3442. _LOGIN_REQUIRED = True
  3443. @property
  3444. def IE_NAME(self):
  3445. return 'youtube:%s' % self._FEED_NAME
  3446. def _real_initialize(self):
  3447. self._login()
  3448. def _real_extract(self, url):
  3449. return self.url_result(
  3450. 'https://www.youtube.com/feed/%s' % self._FEED_NAME,
  3451. ie=YoutubeTabIE.ie_key())
  3452. class YoutubeWatchLaterIE(InfoExtractor):
  3453. IE_NAME = 'youtube:watchlater'
  3454. IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
  3455. _VALID_URL = r':ytwatchlater'
  3456. _TESTS = [{
  3457. 'url': ':ytwatchlater',
  3458. 'only_matching': True,
  3459. }]
  3460. def _real_extract(self, url):
  3461. return self.url_result(
  3462. 'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
  3463. class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
  3464. IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
  3465. _VALID_URL = r':ytrec(?:ommended)?'
  3466. _FEED_NAME = 'recommended'
  3467. _TESTS = [{
  3468. 'url': ':ytrec',
  3469. 'only_matching': True,
  3470. }, {
  3471. 'url': ':ytrecommended',
  3472. 'only_matching': True,
  3473. }]
  3474. class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
  3475. IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
  3476. _VALID_URL = r':ytsubs(?:criptions)?'
  3477. _FEED_NAME = 'subscriptions'
  3478. _TESTS = [{
  3479. 'url': ':ytsubs',
  3480. 'only_matching': True,
  3481. }, {
  3482. 'url': ':ytsubscriptions',
  3483. 'only_matching': True,
  3484. }]
  3485. class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
  3486. IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
  3487. _VALID_URL = r':ythistory'
  3488. _FEED_NAME = 'history'
  3489. _TESTS = [{
  3490. 'url': ':ythistory',
  3491. 'only_matching': True,
  3492. }]
  3493. class YoutubeTruncatedURLIE(InfoExtractor):
  3494. IE_NAME = 'youtube:truncated_url'
  3495. IE_DESC = False # Do not list
  3496. _VALID_URL = r'''(?x)
  3497. (?:https?://)?
  3498. (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
  3499. (?:watch\?(?:
  3500. feature=[a-z_]+|
  3501. annotation_id=annotation_[^&]+|
  3502. x-yt-cl=[0-9]+|
  3503. hl=[^&]*|
  3504. t=[0-9]+
  3505. )?
  3506. |
  3507. attribution_link\?a=[^&]+
  3508. )
  3509. $
  3510. '''
  3511. _TESTS = [{
  3512. 'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
  3513. 'only_matching': True,
  3514. }, {
  3515. 'url': 'https://www.youtube.com/watch?',
  3516. 'only_matching': True,
  3517. }, {
  3518. 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
  3519. 'only_matching': True,
  3520. }, {
  3521. 'url': 'https://www.youtube.com/watch?feature=foo',
  3522. 'only_matching': True,
  3523. }, {
  3524. 'url': 'https://www.youtube.com/watch?hl=en-GB',
  3525. 'only_matching': True,
  3526. }, {
  3527. 'url': 'https://www.youtube.com/watch?t=2372',
  3528. 'only_matching': True,
  3529. }]
  3530. def _real_extract(self, url):
  3531. raise ExtractorError(
  3532. 'Did you forget to quote the URL? Remember that & is a meta '
  3533. 'character in most shells, so you want to put the URL in quotes, '
  3534. 'like youtube-dl '
  3535. '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
  3536. ' or simply youtube-dl BaW_jenozKc .',
  3537. expected=True)
  3538. class YoutubeTruncatedIDIE(InfoExtractor):
  3539. IE_NAME = 'youtube:truncated_id'
  3540. IE_DESC = False # Do not list
  3541. _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
  3542. _TESTS = [{
  3543. 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
  3544. 'only_matching': True,
  3545. }]
  3546. def _real_extract(self, url):
  3547. video_id = self._match_id(url)
  3548. raise ExtractorError(
  3549. 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
  3550. expected=True)