logo

youtube-dl

[mirror] Download/Watch videos from video hostersgit clone https://hacktivis.me/git/mirror/youtube-dl.git

youtube.py (162702B)


  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import itertools
  4. import json
  5. import os.path
  6. import random
  7. import re
  8. import traceback
  9. from .common import InfoExtractor, SearchInfoExtractor
  10. from ..compat import (
  11. compat_chr,
  12. compat_HTTPError,
  13. compat_map as map,
  14. compat_str,
  15. compat_urllib_parse,
  16. compat_urllib_parse_parse_qs as compat_parse_qs,
  17. compat_urllib_parse_unquote_plus,
  18. compat_urllib_parse_urlparse,
  19. )
  20. from ..jsinterp import JSInterpreter
  21. from ..utils import (
  22. ExtractorError,
  23. clean_html,
  24. dict_get,
  25. error_to_compat_str,
  26. float_or_none,
  27. extract_attributes,
  28. get_element_by_attribute,
  29. int_or_none,
  30. js_to_json,
  31. merge_dicts,
  32. mimetype2ext,
  33. parse_codecs,
  34. parse_duration,
  35. parse_qs,
  36. qualities,
  37. remove_start,
  38. smuggle_url,
  39. str_or_none,
  40. str_to_int,
  41. traverse_obj,
  42. try_get,
  43. unescapeHTML,
  44. unified_strdate,
  45. unsmuggle_url,
  46. update_url,
  47. update_url_query,
  48. url_or_none,
  49. urlencode_postdata,
  50. urljoin,
  51. )
  52. class YoutubeBaseInfoExtractor(InfoExtractor):
  53. """Provide base functions for Youtube extractors"""
  54. _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
  55. _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
  56. _LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
  57. _CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
  58. _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
  59. _NETRC_MACHINE = 'youtube'
  60. # If True it will raise an error if no login info is provided
  61. _LOGIN_REQUIRED = False
  62. _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM)'
  63. def _login(self):
  64. """
  65. Attempt to log in to YouTube.
  66. True is returned if successful or skipped.
  67. False is returned if login failed.
  68. If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
  69. """
  70. username, password = self._get_login_info()
  71. # No authentication to be performed
  72. if username is None:
  73. if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
  74. raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
  75. return True
  76. login_page = self._download_webpage(
  77. self._LOGIN_URL, None,
  78. note='Downloading login page',
  79. errnote='unable to fetch login page', fatal=False)
  80. if login_page is False:
  81. return
  82. login_form = self._hidden_inputs(login_page)
  83. def req(url, f_req, note, errnote):
  84. data = login_form.copy()
  85. data.update({
  86. 'pstMsg': 1,
  87. 'checkConnection': 'youtube',
  88. 'checkedDomains': 'youtube',
  89. 'hl': 'en',
  90. 'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
  91. 'f.req': json.dumps(f_req),
  92. 'flowName': 'GlifWebSignIn',
  93. 'flowEntry': 'ServiceLogin',
  94. # TODO: reverse actual botguard identifier generation algo
  95. 'bgRequest': '["identifier",""]',
  96. })
  97. return self._download_json(
  98. url, None, note=note, errnote=errnote,
  99. transform_source=lambda s: re.sub(r'^[^[]*', '', s),
  100. fatal=False,
  101. data=urlencode_postdata(data), headers={
  102. 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
  103. 'Google-Accounts-XSRF': 1,
  104. })
  105. def warn(message):
  106. self._downloader.report_warning(message)
  107. lookup_req = [
  108. username,
  109. None, [], None, 'US', None, None, 2, False, True,
  110. [
  111. None, None,
  112. [2, 1, None, 1,
  113. 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
  114. None, [], 4],
  115. 1, [None, None, []], None, None, None, True
  116. ],
  117. username,
  118. ]
  119. lookup_results = req(
  120. self._LOOKUP_URL, lookup_req,
  121. 'Looking up account info', 'Unable to look up account info')
  122. if lookup_results is False:
  123. return False
  124. user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
  125. if not user_hash:
  126. warn('Unable to extract user hash')
  127. return False
  128. challenge_req = [
  129. user_hash,
  130. None, 1, None, [1, None, None, None, [password, None, True]],
  131. [
  132. None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
  133. 1, [None, None, []], None, None, None, True
  134. ]]
  135. challenge_results = req(
  136. self._CHALLENGE_URL, challenge_req,
  137. 'Logging in', 'Unable to log in')
  138. if challenge_results is False:
  139. return
  140. login_res = try_get(challenge_results, lambda x: x[0][5], list)
  141. if login_res:
  142. login_msg = try_get(login_res, lambda x: x[5], compat_str)
  143. warn(
  144. 'Unable to login: %s' % 'Invalid password'
  145. if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
  146. return False
  147. res = try_get(challenge_results, lambda x: x[0][-1], list)
  148. if not res:
  149. warn('Unable to extract result entry')
  150. return False
  151. login_challenge = try_get(res, lambda x: x[0][0], list)
  152. if login_challenge:
  153. challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
  154. if challenge_str == 'TWO_STEP_VERIFICATION':
  155. # SEND_SUCCESS - TFA code has been successfully sent to phone
  156. # QUOTA_EXCEEDED - reached the limit of TFA codes
  157. status = try_get(login_challenge, lambda x: x[5], compat_str)
  158. if status == 'QUOTA_EXCEEDED':
  159. warn('Exceeded the limit of TFA codes, try later')
  160. return False
  161. tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
  162. if not tl:
  163. warn('Unable to extract TL')
  164. return False
  165. tfa_code = self._get_tfa_info('2-step verification code')
  166. if not tfa_code:
  167. warn(
  168. 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
  169. '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
  170. return False
  171. tfa_code = remove_start(tfa_code, 'G-')
  172. tfa_req = [
  173. user_hash, None, 2, None,
  174. [
  175. 9, None, None, None, None, None, None, None,
  176. [None, tfa_code, True, 2]
  177. ]]
  178. tfa_results = req(
  179. self._TFA_URL.format(tl), tfa_req,
  180. 'Submitting TFA code', 'Unable to submit TFA code')
  181. if tfa_results is False:
  182. return False
  183. tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
  184. if tfa_res:
  185. tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
  186. warn(
  187. 'Unable to finish TFA: %s' % 'Invalid TFA code'
  188. if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
  189. return False
  190. check_cookie_url = try_get(
  191. tfa_results, lambda x: x[0][-1][2], compat_str)
  192. else:
  193. CHALLENGES = {
  194. 'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
  195. 'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
  196. 'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
  197. }
  198. challenge = CHALLENGES.get(
  199. challenge_str,
  200. '%s returned error %s.' % (self.IE_NAME, challenge_str))
  201. warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
  202. return False
  203. else:
  204. check_cookie_url = try_get(res, lambda x: x[2], compat_str)
  205. if not check_cookie_url:
  206. warn('Unable to extract CheckCookie URL')
  207. return False
  208. check_cookie_results = self._download_webpage(
  209. check_cookie_url, None, 'Checking cookie', fatal=False)
  210. if check_cookie_results is False:
  211. return False
  212. if 'https://myaccount.google.com/' not in check_cookie_results:
  213. warn('Unable to log in')
  214. return False
  215. return True
  216. def _initialize_consent(self):
  217. cookies = self._get_cookies('https://www.youtube.com/')
  218. if cookies.get('__Secure-3PSID'):
  219. return
  220. consent_id = None
  221. consent = cookies.get('CONSENT')
  222. if consent:
  223. if 'YES' in consent.value:
  224. return
  225. consent_id = self._search_regex(
  226. r'PENDING\+(\d+)', consent.value, 'consent', default=None)
  227. if not consent_id:
  228. consent_id = random.randint(100, 999)
  229. self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
  230. def _real_initialize(self):
  231. self._initialize_consent()
  232. if self._downloader is None:
  233. return
  234. if not self._login():
  235. return
  236. _DEFAULT_API_DATA = {
  237. 'context': {
  238. 'client': {
  239. 'clientName': 'WEB',
  240. 'clientVersion': '2.20201021.03.00',
  241. }
  242. },
  243. }
  244. _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
  245. _YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
  246. _YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
  247. def _call_api(self, ep, query, video_id, fatal=True, headers=None):
  248. data = self._DEFAULT_API_DATA.copy()
  249. data.update(query)
  250. real_headers = {'content-type': 'application/json'}
  251. if headers:
  252. real_headers.update(headers)
  253. return self._download_json(
  254. 'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id,
  255. note='Downloading API JSON', errnote='Unable to download API page',
  256. data=json.dumps(data).encode('utf8'), fatal=fatal,
  257. headers=real_headers,
  258. query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'})
  259. def _extract_yt_initial_data(self, video_id, webpage):
  260. return self._parse_json(
  261. self._search_regex(
  262. (r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE),
  263. self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'),
  264. video_id)
  265. def _extract_ytcfg(self, video_id, webpage):
  266. return self._parse_json(
  267. self._search_regex(
  268. r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
  269. default='{}'), video_id, fatal=False) or {}
  270. def _extract_video(self, renderer):
  271. video_id = renderer['videoId']
  272. title = try_get(
  273. renderer,
  274. (lambda x: x['title']['runs'][0]['text'],
  275. lambda x: x['title']['simpleText'],
  276. lambda x: x['headline']['simpleText']), compat_str)
  277. description = try_get(
  278. renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'],
  279. compat_str)
  280. duration = parse_duration(try_get(
  281. renderer, lambda x: x['lengthText']['simpleText'], compat_str))
  282. view_count_text = try_get(
  283. renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
  284. view_count = str_to_int(self._search_regex(
  285. r'^([\d,]+)', re.sub(r'\s', '', view_count_text),
  286. 'view count', default=None))
  287. uploader = try_get(
  288. renderer,
  289. (lambda x: x['ownerText']['runs'][0]['text'],
  290. lambda x: x['shortBylineText']['runs'][0]['text']), compat_str)
  291. return {
  292. '_type': 'url',
  293. 'ie_key': YoutubeIE.ie_key(),
  294. 'id': video_id,
  295. 'url': video_id,
  296. 'title': title,
  297. 'description': description,
  298. 'duration': duration,
  299. 'view_count': view_count,
  300. 'uploader': uploader,
  301. }
  302. def _search_results(self, query, params):
  303. data = {
  304. 'context': {
  305. 'client': {
  306. 'clientName': 'WEB',
  307. 'clientVersion': '2.20201021.03.00',
  308. }
  309. },
  310. 'query': query,
  311. }
  312. if params:
  313. data['params'] = params
  314. for page_num in itertools.count(1):
  315. search = self._download_json(
  316. 'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
  317. video_id='query "%s"' % query,
  318. note='Downloading page %s' % page_num,
  319. errnote='Unable to download API page', fatal=False,
  320. data=json.dumps(data).encode('utf8'),
  321. headers={'content-type': 'application/json'})
  322. if not search:
  323. break
  324. slr_contents = try_get(
  325. search,
  326. (lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
  327. lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
  328. list)
  329. if not slr_contents:
  330. break
  331. for slr_content in slr_contents:
  332. isr_contents = try_get(
  333. slr_content,
  334. lambda x: x['itemSectionRenderer']['contents'],
  335. list)
  336. if not isr_contents:
  337. continue
  338. for content in isr_contents:
  339. if not isinstance(content, dict):
  340. continue
  341. video = content.get('videoRenderer')
  342. if not isinstance(video, dict):
  343. continue
  344. video_id = video.get('videoId')
  345. if not video_id:
  346. continue
  347. yield self._extract_video(video)
  348. token = try_get(
  349. slr_contents,
  350. lambda x: x[-1]['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'],
  351. compat_str)
  352. if not token:
  353. break
  354. data['continuation'] = token
  355. @staticmethod
  356. def _owner_endpoints_path():
  357. return [
  358. Ellipsis,
  359. lambda k, _: k.endswith('SecondaryInfoRenderer'),
  360. ('owner', 'videoOwner'), 'videoOwnerRenderer', 'title',
  361. 'runs', Ellipsis]
  362. def _extract_channel_id(self, webpage, videodetails={}, metadata={}, renderers=[]):
  363. channel_id = None
  364. if any((videodetails, metadata, renderers)):
  365. channel_id = (
  366. traverse_obj(videodetails, 'channelId')
  367. or traverse_obj(metadata, 'externalChannelId', 'externalId')
  368. or traverse_obj(renderers,
  369. self._owner_endpoints_path() + [
  370. 'navigationEndpoint', 'browseEndpoint', 'browseId'],
  371. get_all=False)
  372. )
  373. return channel_id or self._html_search_meta(
  374. 'channelId', webpage, 'channel id', default=None)
  375. def _extract_author_var(self, webpage, var_name,
  376. videodetails={}, metadata={}, renderers=[]):
  377. result = None
  378. paths = {
  379. # (HTML, videodetails, metadata, renderers)
  380. 'name': ('content', 'author', (('ownerChannelName', None), 'title'), ['text']),
  381. 'url': ('href', 'ownerProfileUrl', 'vanityChannelUrl',
  382. ['navigationEndpoint', 'browseEndpoint', 'canonicalBaseUrl'])
  383. }
  384. if any((videodetails, metadata, renderers)):
  385. result = (
  386. traverse_obj(videodetails, paths[var_name][1], get_all=False)
  387. or traverse_obj(metadata, paths[var_name][2], get_all=False)
  388. or traverse_obj(renderers,
  389. self._owner_endpoints_path() + paths[var_name][3],
  390. get_all=False)
  391. )
  392. return result or traverse_obj(
  393. extract_attributes(self._search_regex(
  394. r'''(?s)(<link\b[^>]+\bitemprop\s*=\s*("|')%s\2[^>]*>)'''
  395. % re.escape(var_name),
  396. get_element_by_attribute('itemprop', 'author', webpage) or '',
  397. 'author link', default='')),
  398. paths[var_name][0])
  399. @staticmethod
  400. def _yt_urljoin(url_or_path):
  401. return urljoin('https://www.youtube.com', url_or_path)
  402. def _extract_uploader_id(self, uploader_url):
  403. return self._search_regex(
  404. r'/(?:(?:channel|user)/|(?=@))([^/?&#]+)', uploader_url or '',
  405. 'uploader id', default=None)
  406. class YoutubeIE(YoutubeBaseInfoExtractor):
  407. IE_DESC = 'YouTube.com'
  408. _INVIDIOUS_SITES = (
  409. # invidious-redirect websites
  410. r'(?:www\.)?redirect\.invidious\.io',
  411. r'(?:(?:www|dev)\.)?invidio\.us',
  412. # Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md
  413. r'(?:(?:www|no)\.)?invidiou\.sh',
  414. r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
  415. r'(?:www\.)?invidious\.kabi\.tk',
  416. r'(?:www\.)?invidious\.13ad\.de',
  417. r'(?:www\.)?invidious\.mastodon\.host',
  418. r'(?:www\.)?invidious\.zapashcanon\.fr',
  419. r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks',
  420. r'(?:www\.)?invidious\.tinfoil-hat\.net',
  421. r'(?:www\.)?invidious\.himiko\.cloud',
  422. r'(?:www\.)?invidious\.reallyancient\.tech',
  423. r'(?:www\.)?invidious\.tube',
  424. r'(?:www\.)?invidiou\.site',
  425. r'(?:www\.)?invidious\.site',
  426. r'(?:www\.)?invidious\.xyz',
  427. r'(?:www\.)?invidious\.nixnet\.xyz',
  428. r'(?:www\.)?invidious\.048596\.xyz',
  429. r'(?:www\.)?invidious\.drycat\.fr',
  430. r'(?:www\.)?inv\.skyn3t\.in',
  431. r'(?:www\.)?tube\.poal\.co',
  432. r'(?:www\.)?tube\.connect\.cafe',
  433. r'(?:www\.)?vid\.wxzm\.sx',
  434. r'(?:www\.)?vid\.mint\.lgbt',
  435. r'(?:www\.)?vid\.puffyan\.us',
  436. r'(?:www\.)?yewtu\.be',
  437. r'(?:www\.)?yt\.elukerio\.org',
  438. r'(?:www\.)?yt\.lelux\.fi',
  439. r'(?:www\.)?invidious\.ggc-project\.de',
  440. r'(?:www\.)?yt\.maisputain\.ovh',
  441. r'(?:www\.)?ytprivate\.com',
  442. r'(?:www\.)?invidious\.13ad\.de',
  443. r'(?:www\.)?invidious\.toot\.koeln',
  444. r'(?:www\.)?invidious\.fdn\.fr',
  445. r'(?:www\.)?watch\.nettohikari\.com',
  446. r'(?:www\.)?invidious\.namazso\.eu',
  447. r'(?:www\.)?invidious\.silkky\.cloud',
  448. r'(?:www\.)?invidious\.exonip\.de',
  449. r'(?:www\.)?invidious\.riverside\.rocks',
  450. r'(?:www\.)?invidious\.blamefran\.net',
  451. r'(?:www\.)?invidious\.moomoo\.de',
  452. r'(?:www\.)?ytb\.trom\.tf',
  453. r'(?:www\.)?yt\.cyberhost\.uk',
  454. r'(?:www\.)?kgg2m7yk5aybusll\.onion',
  455. r'(?:www\.)?qklhadlycap4cnod\.onion',
  456. r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion',
  457. r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion',
  458. r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion',
  459. r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion',
  460. r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p',
  461. r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion',
  462. r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
  463. r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
  464. r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion',
  465. r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
  466. )
  467. _VALID_URL = r"""(?x)^
  468. (
  469. (?:https?://|//) # http(s):// or protocol-independent URL
  470. (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com|
  471. (?:www\.)?deturl\.com/www\.youtube\.com|
  472. (?:www\.)?pwnyoutube\.com|
  473. (?:www\.)?hooktube\.com|
  474. (?:www\.)?yourepeat\.com|
  475. tube\.majestyc\.net|
  476. %(invidious)s|
  477. youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains
  478. (?:.*?\#/)? # handle anchor (#/) redirect urls
  479. (?: # the various things that can precede the ID:
  480. (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
  481. |shorts/
  482. |(?: # or the v= param in all its forms
  483. (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
  484. (?:\?|\#!?) # the params delimiter ? or # or #!
  485. (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
  486. v=
  487. )
  488. ))
  489. |(?:
  490. youtu\.be| # just youtu.be/xxxx
  491. vid\.plus| # or vid.plus/xxxx
  492. zwearz\.com/watch| # or zwearz.com/watch/xxxx
  493. %(invidious)s
  494. )/
  495. |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
  496. )
  497. )? # all until now is optional -> you can pass the naked ID
  498. (?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
  499. (?(1).+)? # if we found the ID, everything can follow
  500. $""" % {
  501. 'invidious': '|'.join(_INVIDIOUS_SITES),
  502. }
  503. _PLAYER_INFO_RE = (
  504. r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
  505. r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
  506. r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
  507. )
  508. _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
  509. _GEO_BYPASS = False
  510. IE_NAME = 'youtube'
  511. _TESTS = [
  512. {
  513. 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
  514. 'info_dict': {
  515. 'id': 'BaW_jenozKc',
  516. 'ext': 'mp4',
  517. 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
  518. 'uploader': 'Philipp Hagemeister',
  519. 'uploader_id': '@PhilippHagemeister',
  520. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@PhilippHagemeister',
  521. 'channel': 'Philipp Hagemeister',
  522. 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
  523. 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
  524. 'upload_date': '20121002',
  525. 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
  526. 'categories': ['Science & Technology'],
  527. 'tags': ['youtube-dl'],
  528. 'duration': 10,
  529. 'view_count': int,
  530. 'like_count': int,
  531. 'thumbnail': 'https://i.ytimg.com/vi/BaW_jenozKc/maxresdefault.jpg',
  532. 'start_time': 1,
  533. 'end_time': 9,
  534. },
  535. },
  536. {
  537. 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
  538. 'note': 'Embed-only video (#1746)',
  539. 'info_dict': {
  540. 'id': 'yZIXLfi8CZQ',
  541. 'ext': 'mp4',
  542. 'upload_date': '20120608',
  543. 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
  544. 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
  545. 'uploader': 'SET India',
  546. 'uploader_id': 'setindia',
  547. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
  548. 'age_limit': 18,
  549. },
  550. 'skip': 'Private video',
  551. },
  552. {
  553. 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
  554. 'note': 'Use the first video ID in the URL',
  555. 'info_dict': {
  556. 'id': 'BaW_jenozKc',
  557. 'ext': 'mp4',
  558. 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
  559. 'uploader': 'Philipp Hagemeister',
  560. 'uploader_id': '@PhilippHagemeister',
  561. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@PhilippHagemeister',
  562. 'upload_date': '20121002',
  563. 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
  564. 'categories': ['Science & Technology'],
  565. 'tags': ['youtube-dl'],
  566. 'duration': 10,
  567. 'view_count': int,
  568. 'like_count': int,
  569. },
  570. 'params': {
  571. 'skip_download': True,
  572. },
  573. },
  574. {
  575. 'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
  576. 'note': '256k DASH audio (format 141) via DASH manifest',
  577. 'info_dict': {
  578. 'id': 'a9LDPn-MO4I',
  579. 'ext': 'm4a',
  580. 'upload_date': '20121002',
  581. 'uploader_id': '8KVIDEO',
  582. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
  583. 'description': '',
  584. 'uploader': '8KVIDEO',
  585. 'title': 'UHDTV TEST 8K VIDEO.mp4'
  586. },
  587. 'params': {
  588. 'youtube_include_dash_manifest': True,
  589. 'format': '141',
  590. },
  591. 'skip': 'format 141 not served any more',
  592. },
  593. # DASH manifest with encrypted signature
  594. {
  595. 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
  596. 'info_dict': {
  597. 'id': 'IB3lcPjvWLA',
  598. 'ext': 'm4a',
  599. 'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
  600. 'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
  601. 'duration': 244,
  602. 'uploader': 'AfrojackVEVO',
  603. 'uploader_id': '@AfrojackVEVO',
  604. 'upload_date': '20131011',
  605. 'abr': 129.495,
  606. },
  607. 'params': {
  608. 'youtube_include_dash_manifest': True,
  609. 'format': '141/bestaudio[ext=m4a]',
  610. },
  611. },
  612. # Controversy video
  613. {
  614. 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
  615. 'info_dict': {
  616. 'id': 'T4XJQO3qol8',
  617. 'ext': 'mp4',
  618. 'duration': 219,
  619. 'upload_date': '20100909',
  620. 'uploader': 'Amazing Atheist',
  621. 'uploader_id': '@theamazingatheist',
  622. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@theamazingatheist',
  623. 'title': 'Burning Everyone\'s Koran',
  624. 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms \r\n\r\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
  625. }
  626. },
  627. # Age-gated videos
  628. {
  629. 'note': 'Age-gated video (No vevo, embed allowed)',
  630. 'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
  631. 'info_dict': {
  632. 'id': 'HtVdAasjOgU',
  633. 'ext': 'mp4',
  634. 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
  635. 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
  636. 'duration': 142,
  637. 'uploader': 'The Witcher',
  638. 'uploader_id': '@thewitcher',
  639. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@thewitcher',
  640. 'upload_date': '20140605',
  641. 'thumbnail': 'https://i.ytimg.com/vi/HtVdAasjOgU/maxresdefault.jpg',
  642. 'age_limit': 18,
  643. 'categories': ['Gaming'],
  644. 'tags': 'count:17',
  645. 'channel': 'The Witcher',
  646. 'channel_url': 'https://www.youtube.com/channel/UCzybXLxv08IApdjdN0mJhEg',
  647. 'channel_id': 'UCzybXLxv08IApdjdN0mJhEg',
  648. 'view_count': int,
  649. 'like_count': int,
  650. },
  651. },
  652. {
  653. 'note': 'Age-gated video with embed allowed in public site',
  654. 'url': 'https://youtube.com/watch?v=HsUATh_Nc2U',
  655. 'info_dict': {
  656. 'id': 'HsUATh_Nc2U',
  657. 'ext': 'mp4',
  658. 'title': 'Godzilla 2 (Official Video)',
  659. 'description': 'md5:bf77e03fcae5529475e500129b05668a',
  660. 'duration': 177,
  661. 'uploader': 'FlyingKitty',
  662. 'uploader_id': '@FlyingKitty900',
  663. 'upload_date': '20200408',
  664. 'thumbnail': 'https://i.ytimg.com/vi/HsUATh_Nc2U/maxresdefault.jpg',
  665. 'age_limit': 18,
  666. 'categories': ['Entertainment'],
  667. 'tags': ['Flyingkitty', 'godzilla 2'],
  668. 'channel': 'FlyingKitty',
  669. 'channel_url': 'https://www.youtube.com/channel/UCYQT13AtrJC0gsM1far_zJg',
  670. 'channel_id': 'UCYQT13AtrJC0gsM1far_zJg',
  671. 'view_count': int,
  672. 'like_count': int,
  673. },
  674. },
  675. {
  676. 'note': 'Age-gated video embeddable only with clientScreen=EMBED',
  677. 'url': 'https://youtube.com/watch?v=Tq92D6wQ1mg',
  678. 'info_dict': {
  679. 'id': 'Tq92D6wQ1mg',
  680. 'ext': 'mp4',
  681. 'title': '[MMD] Adios - EVERGLOW [+Motion DL]',
  682. 'description': 'md5:17eccca93a786d51bc67646756894066',
  683. 'duration': 106,
  684. 'uploader': 'Projekt Melody',
  685. 'uploader_id': '@ProjektMelody',
  686. 'upload_date': '20191227',
  687. 'age_limit': 18,
  688. 'thumbnail': 'https://i.ytimg.com/vi/Tq92D6wQ1mg/sddefault.jpg',
  689. 'tags': ['mmd', 'dance', 'mikumikudance', 'kpop', 'vtuber'],
  690. 'categories': ['Entertainment'],
  691. 'channel': 'Projekt Melody',
  692. 'channel_url': 'https://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ',
  693. 'channel_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
  694. 'view_count': int,
  695. 'like_count': int,
  696. },
  697. },
  698. {
  699. 'note': 'Non-Age-gated non-embeddable video',
  700. 'url': 'https://youtube.com/watch?v=MeJVWBSsPAY',
  701. 'info_dict': {
  702. 'id': 'MeJVWBSsPAY',
  703. 'ext': 'mp4',
  704. 'title': 'OOMPH! - Such Mich Find Mich (Lyrics)',
  705. 'description': 'Fan Video. Music & Lyrics by OOMPH!.',
  706. 'duration': 210,
  707. 'upload_date': '20130730',
  708. 'uploader': 'Herr Lurik',
  709. 'uploader_id': '@HerrLurik',
  710. 'uploader_url': 'http://www.youtube.com/@HerrLurik',
  711. 'age_limit': 0,
  712. 'thumbnail': 'https://i.ytimg.com/vi/MeJVWBSsPAY/hqdefault.jpg',
  713. 'tags': ['oomph', 'such mich find mich', 'lyrics', 'german industrial', 'musica industrial'],
  714. 'categories': ['Music'],
  715. 'channel': 'Herr Lurik',
  716. 'channel_url': 'https://www.youtube.com/channel/UCdR3RSDPqub28LjZx0v9-aA',
  717. 'channel_id': 'UCdR3RSDPqub28LjZx0v9-aA',
  718. 'artist': 'OOMPH!',
  719. 'view_count': int,
  720. 'like_count': int,
  721. },
  722. },
  723. {
  724. 'note': 'Non-bypassable age-gated video',
  725. 'url': 'https://youtube.com/watch?v=Cr381pDsSsA',
  726. 'only_matching': True,
  727. },
  728. {
  729. 'note': 'Age-gated video only available with authentication (not via embed workaround)',
  730. 'url': 'XgnwCQzjau8',
  731. 'only_matching': True,
  732. 'skip': '''This video has been removed for violating YouTube's Community Guidelines''',
  733. },
  734. # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
  735. # YouTube Red ad is not captured for creator
  736. {
  737. 'url': '__2ABJjxzNo',
  738. 'info_dict': {
  739. 'id': '__2ABJjxzNo',
  740. 'ext': 'mp4',
  741. 'duration': 266,
  742. 'upload_date': '20100430',
  743. 'uploader_id': '@deadmau5',
  744. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@deadmau5',
  745. 'creator': 'deadmau5',
  746. 'description': 'md5:6cbcd3a92ce1bc676fc4d6ab4ace2336',
  747. 'uploader': 'deadmau5',
  748. 'title': 'Deadmau5 - Some Chords (HD)',
  749. 'alt_title': 'Some Chords',
  750. },
  751. 'expected_warnings': [
  752. 'DASH manifest missing',
  753. ]
  754. },
  755. # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
  756. {
  757. 'url': 'lqQg6PlCWgI',
  758. 'info_dict': {
  759. 'id': 'lqQg6PlCWgI',
  760. 'ext': 'mp4',
  761. 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
  762. 'description': r're:(?s)(?:.+\s)?HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games\s*',
  763. 'duration': 6085,
  764. 'upload_date': '20150827',
  765. 'uploader_id': '@Olympics',
  766. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@Olympics',
  767. 'uploader': r're:Olympics?',
  768. 'age_limit': 0,
  769. 'thumbnail': 'https://i.ytimg.com/vi/lqQg6PlCWgI/maxresdefault.jpg',
  770. 'categories': ['Sports'],
  771. 'tags': ['Hockey', '2012-07-31', '31 July 2012', 'Riverbank Arena', 'Session', 'Olympics', 'Olympic Games', 'London 2012', '2012 Summer Olympics', 'Summer Games'],
  772. 'channel': 'Olympics',
  773. 'channel_url': 'https://www.youtube.com/channel/UCTl3QQTvqHFjurroKxexy2Q',
  774. 'channel_id': 'UCTl3QQTvqHFjurroKxexy2Q',
  775. 'view_count': int,
  776. 'like_count': int,
  777. },
  778. },
  779. # Non-square pixels
  780. {
  781. 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
  782. 'info_dict': {
  783. 'id': '_b-2C3KPAM0',
  784. 'ext': 'mp4',
  785. 'stretched_ratio': 16 / 9.,
  786. 'duration': 85,
  787. 'upload_date': '20110310',
  788. 'uploader_id': '@AllenMeow',
  789. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@AllenMeow',
  790. 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
  791. 'uploader': '孫ᄋᄅ',
  792. 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
  793. },
  794. },
  795. # url_encoded_fmt_stream_map is empty string
  796. {
  797. 'url': 'qEJwOuvDf7I',
  798. 'info_dict': {
  799. 'id': 'qEJwOuvDf7I',
  800. 'ext': 'webm',
  801. 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
  802. 'description': '',
  803. 'upload_date': '20150404',
  804. 'uploader_id': 'spbelect',
  805. 'uploader': 'Наблюдатели Петербурга',
  806. },
  807. 'params': {
  808. 'skip_download': 'requires avconv',
  809. },
  810. 'skip': 'This live event has ended.',
  811. },
  812. # Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
  813. {
  814. 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
  815. 'info_dict': {
  816. 'id': 'FIl7x6_3R5Y',
  817. 'ext': 'webm',
  818. 'title': 'md5:7b81415841e02ecd4313668cde88737a',
  819. 'description': 'md5:116377fd2963b81ec4ce64b542173306',
  820. 'duration': 220,
  821. 'upload_date': '20150625',
  822. 'uploader_id': 'dorappi2000',
  823. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
  824. 'uploader': 'dorappi2000',
  825. 'formats': 'mincount:31',
  826. },
  827. 'skip': 'not actual any more',
  828. },
  829. # DASH manifest with segment_list
  830. {
  831. 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
  832. 'md5': '8ce563a1d667b599d21064e982ab9e31',
  833. 'info_dict': {
  834. 'id': 'CsmdDsKjzN8',
  835. 'ext': 'mp4',
  836. 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
  837. 'uploader': 'Airtek',
  838. 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
  839. 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
  840. 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
  841. },
  842. 'params': {
  843. 'youtube_include_dash_manifest': True,
  844. 'format': '135', # bestvideo
  845. },
  846. 'skip': 'This live event has ended.',
  847. },
  848. {
  849. # Multifeed videos (multiple cameras), URL is for Main Camera
  850. 'url': 'https://www.youtube.com/watch?v=jvGDaLqkpTg',
  851. 'info_dict': {
  852. 'id': 'jvGDaLqkpTg',
  853. 'title': 'Tom Clancy Free Weekend Rainbow Whatever',
  854. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  855. },
  856. 'playlist': [{
  857. 'info_dict': {
  858. 'id': 'jvGDaLqkpTg',
  859. 'ext': 'mp4',
  860. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Main Camera)',
  861. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  862. 'duration': 10643,
  863. 'upload_date': '20161111',
  864. 'uploader': 'Team PGP',
  865. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  866. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  867. },
  868. }, {
  869. 'info_dict': {
  870. 'id': '3AKt1R1aDnw',
  871. 'ext': 'mp4',
  872. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 2)',
  873. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  874. 'duration': 10991,
  875. 'upload_date': '20161111',
  876. 'uploader': 'Team PGP',
  877. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  878. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  879. },
  880. }, {
  881. 'info_dict': {
  882. 'id': 'RtAMM00gpVc',
  883. 'ext': 'mp4',
  884. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 3)',
  885. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  886. 'duration': 10995,
  887. 'upload_date': '20161111',
  888. 'uploader': 'Team PGP',
  889. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  890. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  891. },
  892. }, {
  893. 'info_dict': {
  894. 'id': '6N2fdlP3C5U',
  895. 'ext': 'mp4',
  896. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 4)',
  897. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  898. 'duration': 10990,
  899. 'upload_date': '20161111',
  900. 'uploader': 'Team PGP',
  901. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  902. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  903. },
  904. }],
  905. 'params': {
  906. 'skip_download': True,
  907. },
  908. 'skip': 'Not multifeed any more',
  909. },
  910. {
  911. # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
  912. 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
  913. 'info_dict': {
  914. 'id': 'gVfLd0zydlo',
  915. 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
  916. },
  917. 'playlist_count': 2,
  918. 'skip': 'Not multifeed any more',
  919. },
  920. {
  921. 'url': 'https://vid.plus/FlRa-iH7PGw',
  922. 'only_matching': True,
  923. },
  924. {
  925. 'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
  926. 'only_matching': True,
  927. },
  928. {
  929. # Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
  930. # Also tests cut-off URL expansion in video description (see
  931. # https://github.com/ytdl-org/youtube-dl/issues/1892,
  932. # https://github.com/ytdl-org/youtube-dl/issues/8164)
  933. 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
  934. 'info_dict': {
  935. 'id': 'lsguqyKfVQg',
  936. 'ext': 'mp4',
  937. 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
  938. 'alt_title': 'Dark Walk',
  939. 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
  940. 'duration': 133,
  941. 'upload_date': '20151119',
  942. 'uploader_id': '@IronSoulElf',
  943. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@IronSoulElf',
  944. 'uploader': 'IronSoulElf',
  945. 'creator': r're:Todd Haberman[;,]\s+Daniel Law Heath and Aaron Kaplan',
  946. 'track': 'Dark Walk',
  947. 'artist': r're:Todd Haberman[;,]\s+Daniel Law Heath and Aaron Kaplan',
  948. 'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
  949. },
  950. 'params': {
  951. 'skip_download': True,
  952. },
  953. },
  954. {
  955. # Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
  956. 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
  957. 'only_matching': True,
  958. },
  959. {
  960. # Video with yt:stretch=17:0
  961. 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
  962. 'info_dict': {
  963. 'id': 'Q39EVAstoRM',
  964. 'ext': 'mp4',
  965. 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
  966. 'description': 'md5:ee18a25c350637c8faff806845bddee9',
  967. 'upload_date': '20151107',
  968. 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
  969. 'uploader': 'CH GAMER DROID',
  970. },
  971. 'params': {
  972. 'skip_download': True,
  973. },
  974. 'skip': 'This video does not exist.',
  975. },
  976. {
  977. # Video with incomplete 'yt:stretch=16:'
  978. 'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI',
  979. 'only_matching': True,
  980. },
  981. {
  982. # Video licensed under Creative Commons
  983. 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
  984. 'info_dict': {
  985. 'id': 'M4gD1WSo5mA',
  986. 'ext': 'mp4',
  987. 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
  988. 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
  989. 'duration': 721,
  990. 'upload_date': '20150127',
  991. 'uploader_id': '@BKCHarvard',
  992. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@BKCHarvard',
  993. 'uploader': 'The Berkman Klein Center for Internet & Society',
  994. 'license': 'Creative Commons Attribution license (reuse allowed)',
  995. },
  996. 'params': {
  997. 'skip_download': True,
  998. },
  999. },
  1000. {
  1001. # Channel-like uploader_url
  1002. 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
  1003. 'info_dict': {
  1004. 'id': 'eQcmzGIKrzg',
  1005. 'ext': 'mp4',
  1006. 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
  1007. 'description': 'md5:13a2503d7b5904ef4b223aa101628f39',
  1008. 'duration': 4060,
  1009. 'upload_date': '20151119',
  1010. 'uploader': 'Bernie Sanders',
  1011. 'uploader_id': '@BernieSanders',
  1012. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@BernieSanders',
  1013. 'license': 'Creative Commons Attribution license (reuse allowed)',
  1014. },
  1015. 'params': {
  1016. 'skip_download': True,
  1017. },
  1018. },
  1019. {
  1020. 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
  1021. 'only_matching': True,
  1022. },
  1023. {
  1024. # YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
  1025. 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
  1026. 'only_matching': True,
  1027. },
  1028. {
  1029. # Rental video preview
  1030. 'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
  1031. 'info_dict': {
  1032. 'id': 'uGpuVWrhIzE',
  1033. 'ext': 'mp4',
  1034. 'title': 'Piku - Trailer',
  1035. 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
  1036. 'upload_date': '20150811',
  1037. 'uploader': 'FlixMatrix',
  1038. 'uploader_id': 'FlixMatrixKaravan',
  1039. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
  1040. 'license': 'Standard YouTube License',
  1041. },
  1042. 'params': {
  1043. 'skip_download': True,
  1044. },
  1045. 'skip': 'This video is not available.',
  1046. },
  1047. {
  1048. # YouTube Red video with episode data
  1049. 'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
  1050. 'info_dict': {
  1051. 'id': 'iqKdEhx-dD4',
  1052. 'ext': 'mp4',
  1053. 'title': 'Isolation - Mind Field (Ep 1)',
  1054. 'description': 'md5:f540112edec5d09fc8cc752d3d4ba3cd',
  1055. 'duration': 2085,
  1056. 'upload_date': '20170118',
  1057. 'uploader': 'Vsauce',
  1058. 'uploader_id': '@Vsauce',
  1059. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@Vsauce',
  1060. 'series': 'Mind Field',
  1061. 'season_number': 1,
  1062. 'episode_number': 1,
  1063. },
  1064. 'params': {
  1065. 'skip_download': True,
  1066. },
  1067. 'expected_warnings': [
  1068. 'Skipping DASH manifest',
  1069. ],
  1070. },
  1071. {
  1072. # The following content has been identified by the YouTube community
  1073. # as inappropriate or offensive to some audiences.
  1074. 'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
  1075. 'info_dict': {
  1076. 'id': '6SJNVb0GnPI',
  1077. 'ext': 'mp4',
  1078. 'title': 'Race Differences in Intelligence',
  1079. 'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
  1080. 'duration': 965,
  1081. 'upload_date': '20140124',
  1082. 'uploader': 'New Century Foundation',
  1083. 'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
  1084. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
  1085. },
  1086. 'params': {
  1087. 'skip_download': True,
  1088. },
  1089. 'skip': 'This video has been removed for violating YouTube\'s policy on hate speech.',
  1090. },
  1091. {
  1092. # itag 212
  1093. 'url': '1t24XAntNCY',
  1094. 'only_matching': True,
  1095. },
  1096. {
  1097. # geo restricted to JP
  1098. 'url': 'sJL6WA-aGkQ',
  1099. 'only_matching': True,
  1100. },
  1101. {
  1102. 'url': 'https://invidio.us/watch?v=BaW_jenozKc',
  1103. 'only_matching': True,
  1104. },
  1105. {
  1106. 'url': 'https://redirect.invidious.io/watch?v=BaW_jenozKc',
  1107. 'only_matching': True,
  1108. },
  1109. {
  1110. # from https://nitter.pussthecat.org/YouTube/status/1360363141947944964#m
  1111. 'url': 'https://redirect.invidious.io/Yh0AhrY9GjA',
  1112. 'only_matching': True,
  1113. },
  1114. {
  1115. # DRM protected
  1116. 'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
  1117. 'only_matching': True,
  1118. },
  1119. {
  1120. # Video with unsupported adaptive stream type formats
  1121. 'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
  1122. 'info_dict': {
  1123. 'id': 'Z4Vy8R84T1U',
  1124. 'ext': 'mp4',
  1125. 'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
  1126. 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
  1127. 'duration': 433,
  1128. 'upload_date': '20130923',
  1129. 'uploader': 'Amelia Putri Harwita',
  1130. 'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
  1131. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
  1132. 'formats': 'maxcount:10',
  1133. },
  1134. 'params': {
  1135. 'skip_download': True,
  1136. 'youtube_include_dash_manifest': False,
  1137. },
  1138. 'skip': 'not actual any more',
  1139. },
  1140. {
  1141. # Youtube Music Auto-generated description
  1142. 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
  1143. 'info_dict': {
  1144. 'id': 'MgNrAu2pzNs',
  1145. 'ext': 'mp4',
  1146. 'title': 'Voyeur Girl',
  1147. 'description': 'md5:7ae382a65843d6df2685993e90a8628f',
  1148. 'upload_date': '20190312',
  1149. 'uploader': 'Stephen - Topic',
  1150. 'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
  1151. 'artist': 'Stephen',
  1152. 'track': 'Voyeur Girl',
  1153. 'album': 'it\'s too much love to know my dear',
  1154. 'release_date': '20190313',
  1155. 'release_year': 2019,
  1156. },
  1157. 'params': {
  1158. 'skip_download': True,
  1159. },
  1160. },
  1161. {
  1162. 'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
  1163. 'only_matching': True,
  1164. },
  1165. {
  1166. # invalid -> valid video id redirection
  1167. 'url': 'DJztXj2GPfl',
  1168. 'info_dict': {
  1169. 'id': 'DJztXj2GPfk',
  1170. 'ext': 'mp4',
  1171. 'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
  1172. 'description': 'md5:bf577a41da97918e94fa9798d9228825',
  1173. 'upload_date': '20090125',
  1174. 'uploader': 'Prochorowka',
  1175. 'uploader_id': 'Prochorowka',
  1176. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
  1177. 'artist': 'Panjabi MC',
  1178. 'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
  1179. 'album': 'Beware of the Boys (Mundian To Bach Ke)',
  1180. },
  1181. 'params': {
  1182. 'skip_download': True,
  1183. },
  1184. 'skip': 'Video unavailable',
  1185. },
  1186. {
  1187. # empty description results in an empty string
  1188. 'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
  1189. 'info_dict': {
  1190. 'id': 'x41yOUIvK2k',
  1191. 'ext': 'mp4',
  1192. 'title': 'IMG 3456',
  1193. 'description': '',
  1194. 'upload_date': '20170613',
  1195. 'uploader': 'ElevageOrVert',
  1196. 'uploader_id': '@ElevageOrVert',
  1197. },
  1198. 'params': {
  1199. 'skip_download': True,
  1200. },
  1201. },
  1202. {
  1203. # with '};' inside yt initial data (see [1])
  1204. # see [2] for an example with '};' inside ytInitialPlayerResponse
  1205. # 1. https://github.com/ytdl-org/youtube-dl/issues/27093
  1206. # 2. https://github.com/ytdl-org/youtube-dl/issues/27216
  1207. 'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
  1208. 'info_dict': {
  1209. 'id': 'CHqg6qOn4no',
  1210. 'ext': 'mp4',
  1211. 'title': 'Part 77 Sort a list of simple types in c#',
  1212. 'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
  1213. 'upload_date': '20130831',
  1214. 'uploader': 'kudvenkat',
  1215. 'uploader_id': '@Csharp-video-tutorialsBlogspot',
  1216. },
  1217. 'params': {
  1218. 'skip_download': True,
  1219. },
  1220. },
  1221. {
  1222. # another example of '};' in ytInitialData
  1223. 'url': 'https://www.youtube.com/watch?v=gVfgbahppCY',
  1224. 'only_matching': True,
  1225. },
  1226. {
  1227. 'url': 'https://www.youtube.com/watch_popup?v=63RmMXCd_bQ',
  1228. 'only_matching': True,
  1229. },
  1230. {
  1231. # https://github.com/ytdl-org/youtube-dl/pull/28094
  1232. 'url': 'OtqTfy26tG0',
  1233. 'info_dict': {
  1234. 'id': 'OtqTfy26tG0',
  1235. 'ext': 'mp4',
  1236. 'title': 'Burn Out',
  1237. 'description': 'md5:8d07b84dcbcbfb34bc12a56d968b6131',
  1238. 'upload_date': '20141120',
  1239. 'uploader': 'The Cinematic Orchestra - Topic',
  1240. 'uploader_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
  1241. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
  1242. 'artist': 'The Cinematic Orchestra',
  1243. 'track': 'Burn Out',
  1244. 'album': 'Every Day',
  1245. 'release_data': None,
  1246. 'release_year': None,
  1247. },
  1248. 'params': {
  1249. 'skip_download': True,
  1250. },
  1251. },
  1252. {
  1253. # controversial video, only works with bpctr when authenticated with cookies
  1254. 'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
  1255. 'only_matching': True,
  1256. },
  1257. {
  1258. # restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
  1259. 'url': 'cBvYw8_A0vQ',
  1260. 'info_dict': {
  1261. 'id': 'cBvYw8_A0vQ',
  1262. 'ext': 'mp4',
  1263. 'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き',
  1264. 'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
  1265. 'upload_date': '20201120',
  1266. 'uploader': 'Walk around Japan',
  1267. 'uploader_id': '@walkaroundjapan7124',
  1268. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@walkaroundjapan7124',
  1269. },
  1270. 'params': {
  1271. 'skip_download': True,
  1272. },
  1273. },
  1274. {
  1275. # YT 'Shorts'
  1276. 'url': 'https://youtube.com/shorts/4L2J27mJ3Dc',
  1277. 'info_dict': {
  1278. 'id': '4L2J27mJ3Dc',
  1279. 'ext': 'mp4',
  1280. 'title': 'Midwest Squid Game #Shorts',
  1281. 'description': 'md5:976512b8a29269b93bbd8a61edc45a6d',
  1282. 'upload_date': '20211025',
  1283. 'uploader': 'Charlie Berens',
  1284. 'uploader_id': '@CharlieBerens',
  1285. },
  1286. 'params': {
  1287. 'skip_download': True,
  1288. },
  1289. },
  1290. ]
  1291. _formats = {
  1292. '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
  1293. '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
  1294. '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
  1295. '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
  1296. '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
  1297. '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  1298. '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1299. '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1300. # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
  1301. '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
  1302. '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  1303. '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  1304. '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
  1305. '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
  1306. '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
  1307. '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
  1308. '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1309. '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1310. # 3D videos
  1311. '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
  1312. '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
  1313. '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
  1314. '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
  1315. '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
  1316. '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
  1317. '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
  1318. # Apple HTTP Live Streaming
  1319. '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  1320. '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  1321. '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
  1322. '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
  1323. '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
  1324. '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
  1325. '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  1326. '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
  1327. # DASH mp4 video
  1328. '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1329. '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1330. '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1331. '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1332. '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1333. '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
  1334. '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1335. '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1336. '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1337. '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
  1338. '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
  1339. '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1340. # Dash mp4 audio
  1341. '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
  1342. '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
  1343. '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
  1344. '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
  1345. '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
  1346. '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
  1347. '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
  1348. # Dash webm
  1349. '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1350. '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1351. '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1352. '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1353. '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1354. '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1355. '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
  1356. '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1357. '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1358. '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1359. '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1360. '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1361. '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1362. '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1363. '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1364. # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
  1365. '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1366. '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1367. '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1368. '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1369. '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1370. '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1371. # Dash webm audio
  1372. '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
  1373. '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
  1374. # Dash webm audio with opus inside
  1375. '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
  1376. '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
  1377. '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
  1378. # RTMP (unnamed)
  1379. '_rtmp': {'protocol': 'rtmp'},
  1380. # av01 video only formats sometimes served with "unknown" codecs
  1381. '394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1382. '395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1383. '396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1384. '397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1385. }
  1386. @classmethod
  1387. def suitable(cls, url):
  1388. if parse_qs(url).get('list', [None])[0]:
  1389. return False
  1390. return super(YoutubeIE, cls).suitable(url)
  1391. def __init__(self, *args, **kwargs):
  1392. super(YoutubeIE, self).__init__(*args, **kwargs)
  1393. self._code_cache = {}
  1394. self._player_cache = {}
  1395. def _signature_cache_id(self, example_sig):
  1396. """ Return a string representation of a signature """
  1397. return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
  1398. @classmethod
  1399. def _extract_player_info(cls, player_url):
  1400. for player_re in cls._PLAYER_INFO_RE:
  1401. id_m = re.search(player_re, player_url)
  1402. if id_m:
  1403. break
  1404. else:
  1405. raise ExtractorError('Cannot identify player %r' % player_url)
  1406. return id_m.group('id')
  1407. def _get_player_code(self, video_id, player_url, player_id=None):
  1408. if not player_id:
  1409. player_id = self._extract_player_info(player_url)
  1410. if player_id not in self._code_cache:
  1411. self._code_cache[player_id] = self._download_webpage(
  1412. player_url, video_id,
  1413. note='Downloading player ' + player_id,
  1414. errnote='Download of %s failed' % player_url)
  1415. return self._code_cache[player_id]
  1416. def _extract_signature_function(self, video_id, player_url, example_sig):
  1417. player_id = self._extract_player_info(player_url)
  1418. # Read from filesystem cache
  1419. func_id = 'js_%s_%s' % (
  1420. player_id, self._signature_cache_id(example_sig))
  1421. assert os.path.basename(func_id) == func_id
  1422. cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
  1423. if cache_spec is not None:
  1424. return lambda s: ''.join(s[i] for i in cache_spec)
  1425. code = self._get_player_code(video_id, player_url, player_id)
  1426. res = self._parse_sig_js(code)
  1427. test_string = ''.join(map(compat_chr, range(len(example_sig))))
  1428. cache_res = res(test_string)
  1429. cache_spec = [ord(c) for c in cache_res]
  1430. self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
  1431. return res
  1432. def _print_sig_code(self, func, example_sig):
  1433. def gen_sig_code(idxs):
  1434. def _genslice(start, end, step):
  1435. starts = '' if start == 0 else str(start)
  1436. ends = (':%d' % (end + step)) if end + step >= 0 else ':'
  1437. steps = '' if step == 1 else (':%d' % step)
  1438. return 's[%s%s%s]' % (starts, ends, steps)
  1439. step = None
  1440. # Quelch pyflakes warnings - start will be set when step is set
  1441. start = '(Never used)'
  1442. for i, prev in zip(idxs[1:], idxs[:-1]):
  1443. if step is not None:
  1444. if i - prev == step:
  1445. continue
  1446. yield _genslice(start, prev, step)
  1447. step = None
  1448. continue
  1449. if i - prev in [-1, 1]:
  1450. step = i - prev
  1451. start = prev
  1452. continue
  1453. else:
  1454. yield 's[%d]' % prev
  1455. if step is None:
  1456. yield 's[%d]' % i
  1457. else:
  1458. yield _genslice(start, i, step)
  1459. test_string = ''.join(map(compat_chr, range(len(example_sig))))
  1460. cache_res = func(test_string)
  1461. cache_spec = [ord(c) for c in cache_res]
  1462. expr_code = ' + '.join(gen_sig_code(cache_spec))
  1463. signature_id_tuple = '(%s)' % (
  1464. ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
  1465. code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
  1466. ' return %s\n') % (signature_id_tuple, expr_code)
  1467. self.to_screen('Extracted signature function:\n' + code)
  1468. def _parse_sig_js(self, jscode):
  1469. funcname = self._search_regex(
  1470. (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1471. r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1472. r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
  1473. r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
  1474. r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)',
  1475. r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
  1476. r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
  1477. # Obsolete patterns
  1478. r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1479. r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
  1480. r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1481. r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1482. r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1483. r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1484. r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1485. r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
  1486. jscode, 'Initial JS player signature function name', group='sig')
  1487. jsi = JSInterpreter(jscode)
  1488. initial_function = jsi.extract_function(funcname)
  1489. return lambda s: initial_function([s])
  1490. def _decrypt_signature(self, s, video_id, player_url):
  1491. """Turn the encrypted s field into a working signature"""
  1492. if player_url is None:
  1493. raise ExtractorError('Cannot decrypt signature without player_url')
  1494. try:
  1495. player_id = (player_url, self._signature_cache_id(s))
  1496. if player_id not in self._player_cache:
  1497. func = self._extract_signature_function(
  1498. video_id, player_url, s
  1499. )
  1500. self._player_cache[player_id] = func
  1501. func = self._player_cache[player_id]
  1502. if self._downloader.params.get('youtube_print_sig_code'):
  1503. self._print_sig_code(func, s)
  1504. return func(s)
  1505. except Exception as e:
  1506. tb = traceback.format_exc()
  1507. raise ExtractorError(
  1508. 'Signature extraction failed: ' + tb, cause=e)
  1509. def _extract_player_url(self, webpage):
  1510. player_url = self._search_regex(
  1511. r'"(?:PLAYER_JS_URL|jsUrl)"\s*:\s*"([^"]+)"',
  1512. webpage or '', 'player URL', fatal=False)
  1513. if not player_url:
  1514. return
  1515. if player_url.startswith('//'):
  1516. player_url = 'https:' + player_url
  1517. elif not re.match(r'https?://', player_url):
  1518. player_url = compat_urllib_parse.urljoin(
  1519. 'https://www.youtube.com', player_url)
  1520. return player_url
  1521. # from yt-dlp
  1522. # See also:
  1523. # 1. https://github.com/ytdl-org/youtube-dl/issues/29326#issuecomment-894619419
  1524. # 2. https://code.videolan.org/videolan/vlc/-/blob/4fb284e5af69aa9ac2100ccbdd3b88debec9987f/share/lua/playlist/youtube.lua#L116
  1525. # 3. https://github.com/ytdl-org/youtube-dl/issues/30097#issuecomment-950157377
  1526. def _extract_n_function_name(self, jscode):
  1527. target = r'(?P<nfunc>[a-zA-Z_$][\w$]*)(?:\[(?P<idx>\d+)\])?'
  1528. nfunc_and_idx = self._search_regex(
  1529. r'\.get\("n"\)\)&&\(b=(%s)\([\w$]+\)' % (target, ),
  1530. jscode, 'Initial JS player n function name')
  1531. nfunc, idx = re.match(target, nfunc_and_idx).group('nfunc', 'idx')
  1532. if not idx:
  1533. return nfunc
  1534. if int_or_none(idx) == 0:
  1535. real_nfunc = self._search_regex(
  1536. r'var %s\s*=\s*\[([a-zA-Z_$][\w$]*)\];' % (re.escape(nfunc), ), jscode,
  1537. 'Initial JS player n function alias ({nfunc}[{idx}])'.format(**locals()))
  1538. if real_nfunc:
  1539. return real_nfunc
  1540. return self._parse_json(self._search_regex(
  1541. r'var %s\s*=\s*(\[.+?\]);' % (re.escape(nfunc), ), jscode,
  1542. 'Initial JS player n function name ({nfunc}[{idx}])'.format(**locals())), nfunc, transform_source=js_to_json)[int(idx)]
  1543. def _extract_n_function(self, video_id, player_url):
  1544. player_id = self._extract_player_info(player_url)
  1545. func_code = self._downloader.cache.load('youtube-nsig', player_id)
  1546. if func_code:
  1547. jsi = JSInterpreter(func_code)
  1548. else:
  1549. jscode = self._get_player_code(video_id, player_url, player_id)
  1550. funcname = self._extract_n_function_name(jscode)
  1551. jsi = JSInterpreter(jscode)
  1552. func_code = jsi.extract_function_code(funcname)
  1553. self._downloader.cache.store('youtube-nsig', player_id, func_code)
  1554. if self._downloader.params.get('youtube_print_sig_code'):
  1555. self.to_screen('Extracted nsig function from {0}:\n{1}\n'.format(player_id, func_code[1]))
  1556. return lambda s: jsi.extract_function_from_code(*func_code)([s])
  1557. def _n_descramble(self, n_param, player_url, video_id):
  1558. """Compute the response to YT's "n" parameter challenge,
  1559. or None
  1560. Args:
  1561. n_param -- challenge string that is the value of the
  1562. URL's "n" query parameter
  1563. player_url -- URL of YT player JS
  1564. video_id
  1565. """
  1566. sig_id = ('nsig_value', n_param)
  1567. if sig_id in self._player_cache:
  1568. return self._player_cache[sig_id]
  1569. try:
  1570. player_id = ('nsig', player_url)
  1571. if player_id not in self._player_cache:
  1572. self._player_cache[player_id] = self._extract_n_function(video_id, player_url)
  1573. func = self._player_cache[player_id]
  1574. ret = func(n_param)
  1575. if ret.startswith('enhanced_except_'):
  1576. raise ExtractorError('Unhandled exception in decode')
  1577. self._player_cache[sig_id] = ret
  1578. if self._downloader.params.get('verbose', False):
  1579. self._downloader.to_screen('[debug] [%s] %s' % (self.IE_NAME, 'Decrypted nsig {0} => {1}'.format(n_param, self._player_cache[sig_id])))
  1580. return self._player_cache[sig_id]
  1581. except Exception as e:
  1582. self._downloader.report_warning(
  1583. '[%s] %s (%s %s)' % (
  1584. self.IE_NAME,
  1585. 'Unable to decode n-parameter: download likely to be throttled',
  1586. error_to_compat_str(e),
  1587. traceback.format_exc()))
  1588. def _unthrottle_format_urls(self, video_id, player_url, formats):
  1589. for fmt in formats:
  1590. parsed_fmt_url = compat_urllib_parse.urlparse(fmt['url'])
  1591. n_param = compat_parse_qs(parsed_fmt_url.query).get('n')
  1592. if not n_param:
  1593. continue
  1594. n_param = n_param[-1]
  1595. n_response = self._n_descramble(n_param, player_url, video_id)
  1596. if n_response is None:
  1597. # give up if descrambling failed
  1598. break
  1599. for fmt_dct in traverse_obj(fmt, (None, (None, ('fragments', Ellipsis))), expected_type=dict):
  1600. fmt_dct['url'] = update_url(
  1601. fmt_dct['url'], query_update={'n': [n_response]})
  1602. # from yt-dlp, with tweaks
  1603. def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False):
  1604. """
  1605. Extract signatureTimestamp (sts)
  1606. Required to tell API what sig/player version is in use.
  1607. """
  1608. sts = int_or_none(ytcfg.get('STS')) if isinstance(ytcfg, dict) else None
  1609. if not sts:
  1610. # Attempt to extract from player
  1611. if player_url is None:
  1612. error_msg = 'Cannot extract signature timestamp without player_url.'
  1613. if fatal:
  1614. raise ExtractorError(error_msg)
  1615. self._downloader.report_warning(error_msg)
  1616. return
  1617. code = self._get_player_code(video_id, player_url)
  1618. sts = int_or_none(self._search_regex(
  1619. r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code or '',
  1620. 'JS player signature timestamp', group='sts', fatal=fatal))
  1621. return sts
  1622. def _mark_watched(self, video_id, player_response):
  1623. playback_url = url_or_none(try_get(
  1624. player_response,
  1625. lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']))
  1626. if not playback_url:
  1627. return
  1628. # cpn generation algorithm is reverse engineered from base.js.
  1629. # In fact it works even with dummy cpn.
  1630. CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
  1631. cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
  1632. playback_url = update_url(
  1633. playback_url, query_update={
  1634. 'ver': ['2'],
  1635. 'cpn': [cpn],
  1636. })
  1637. self._download_webpage(
  1638. playback_url, video_id, 'Marking watched',
  1639. 'Unable to mark watched', fatal=False)
  1640. @staticmethod
  1641. def _extract_urls(webpage):
  1642. # Embedded YouTube player
  1643. entries = [
  1644. unescapeHTML(mobj.group('url'))
  1645. for mobj in re.finditer(r'''(?x)
  1646. (?:
  1647. <iframe[^>]+?src=|
  1648. data-video-url=|
  1649. <embed[^>]+?src=|
  1650. embedSWF\(?:\s*|
  1651. <object[^>]+data=|
  1652. new\s+SWFObject\(
  1653. )
  1654. (["\'])
  1655. (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
  1656. (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
  1657. \1''', webpage)]
  1658. # lazyYT YouTube embed
  1659. entries.extend(list(map(
  1660. unescapeHTML,
  1661. re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
  1662. # Wordpress "YouTube Video Importer" plugin
  1663. matches = re.findall(r'''(?x)<div[^>]+
  1664. class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
  1665. data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
  1666. entries.extend(m[-1] for m in matches)
  1667. return entries
  1668. @staticmethod
  1669. def _extract_url(webpage):
  1670. urls = YoutubeIE._extract_urls(webpage)
  1671. return urls[0] if urls else None
  1672. @classmethod
  1673. def extract_id(cls, url):
  1674. mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
  1675. if mobj is None:
  1676. raise ExtractorError('Invalid URL: %s' % url)
  1677. video_id = mobj.group(2)
  1678. return video_id
  1679. def _extract_chapters_from_json(self, data, video_id, duration):
  1680. chapters_list = try_get(
  1681. data,
  1682. lambda x: x['playerOverlays']
  1683. ['playerOverlayRenderer']
  1684. ['decoratedPlayerBarRenderer']
  1685. ['decoratedPlayerBarRenderer']
  1686. ['playerBar']
  1687. ['chapteredPlayerBarRenderer']
  1688. ['chapters'],
  1689. list)
  1690. if not chapters_list:
  1691. return
  1692. def chapter_time(chapter):
  1693. return float_or_none(
  1694. try_get(
  1695. chapter,
  1696. lambda x: x['chapterRenderer']['timeRangeStartMillis'],
  1697. int),
  1698. scale=1000)
  1699. chapters = []
  1700. for next_num, chapter in enumerate(chapters_list, start=1):
  1701. start_time = chapter_time(chapter)
  1702. if start_time is None:
  1703. continue
  1704. end_time = (chapter_time(chapters_list[next_num])
  1705. if next_num < len(chapters_list) else duration)
  1706. if end_time is None:
  1707. continue
  1708. title = try_get(
  1709. chapter, lambda x: x['chapterRenderer']['title']['simpleText'],
  1710. compat_str)
  1711. chapters.append({
  1712. 'start_time': start_time,
  1713. 'end_time': end_time,
  1714. 'title': title,
  1715. })
  1716. return chapters
  1717. def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
  1718. return self._parse_json(self._search_regex(
  1719. (r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
  1720. regex), webpage, name, default='{}'), video_id, fatal=False)
  1721. def _real_extract(self, url):
  1722. url, smuggled_data = unsmuggle_url(url, {})
  1723. video_id = self._match_id(url)
  1724. base_url = self.http_scheme() + '//www.youtube.com/'
  1725. webpage_url = base_url + 'watch?v=' + video_id
  1726. webpage = self._download_webpage(
  1727. webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
  1728. player_response = None
  1729. player_url = None
  1730. if webpage:
  1731. player_response = self._extract_yt_initial_variable(
  1732. webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
  1733. video_id, 'initial player response')
  1734. if not player_response:
  1735. player_response = self._call_api(
  1736. 'player', {'videoId': video_id}, video_id)
  1737. def is_agegated(playability):
  1738. if not isinstance(playability, dict):
  1739. return
  1740. if playability.get('desktopLegacyAgeGateReason'):
  1741. return True
  1742. reasons = filter(None, (playability.get(r) for r in ('status', 'reason')))
  1743. AGE_GATE_REASONS = (
  1744. 'confirm your age', 'age-restricted', 'inappropriate', # reason
  1745. 'age_verification_required', 'age_check_required', # status
  1746. )
  1747. return any(expected in reason for expected in AGE_GATE_REASONS for reason in reasons)
  1748. def get_playability_status(response):
  1749. return try_get(response, lambda x: x['playabilityStatus'], dict) or {}
  1750. playability_status = get_playability_status(player_response)
  1751. if (is_agegated(playability_status)
  1752. and int_or_none(self._downloader.params.get('age_limit'), default=18) >= 18):
  1753. self.report_age_confirmation()
  1754. # Thanks: https://github.com/yt-dlp/yt-dlp/pull/3233
  1755. pb_context = {'html5Preference': 'HTML5_PREF_WANTS'}
  1756. # Use signatureTimestamp if available
  1757. # Thanks https://github.com/ytdl-org/youtube-dl/issues/31034#issuecomment-1160718026
  1758. player_url = self._extract_player_url(webpage)
  1759. ytcfg = self._extract_ytcfg(video_id, webpage)
  1760. sts = self._extract_signature_timestamp(video_id, player_url, ytcfg)
  1761. if sts:
  1762. pb_context['signatureTimestamp'] = sts
  1763. query = {
  1764. 'playbackContext': {'contentPlaybackContext': pb_context},
  1765. 'contentCheckOk': True,
  1766. 'racyCheckOk': True,
  1767. 'context': {
  1768. 'client': {'clientName': 'TVHTML5_SIMPLY_EMBEDDED_PLAYER', 'clientVersion': '2.0', 'hl': 'en', 'clientScreen': 'EMBED'},
  1769. 'thirdParty': {'embedUrl': 'https://google.com'},
  1770. },
  1771. 'videoId': video_id,
  1772. }
  1773. headers = {
  1774. 'X-YouTube-Client-Name': '85',
  1775. 'X-YouTube-Client-Version': '2.0',
  1776. 'Origin': 'https://www.youtube.com'
  1777. }
  1778. video_info = self._call_api('player', query, video_id, fatal=False, headers=headers)
  1779. age_gate_status = get_playability_status(video_info)
  1780. if age_gate_status.get('status') == 'OK':
  1781. player_response = video_info
  1782. playability_status = age_gate_status
  1783. trailer_video_id = try_get(
  1784. playability_status,
  1785. lambda x: x['errorScreen']['playerLegacyDesktopYpcTrailerRenderer']['trailerVideoId'],
  1786. compat_str)
  1787. if trailer_video_id:
  1788. return self.url_result(
  1789. trailer_video_id, self.ie_key(), trailer_video_id)
  1790. def get_text(x):
  1791. if not x:
  1792. return
  1793. text = x.get('simpleText')
  1794. if text and isinstance(text, compat_str):
  1795. return text
  1796. runs = x.get('runs')
  1797. if not isinstance(runs, list):
  1798. return
  1799. return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)])
  1800. search_meta = (
  1801. lambda x: self._html_search_meta(x, webpage, default=None)) \
  1802. if webpage else lambda x: None
  1803. video_details = player_response.get('videoDetails') or {}
  1804. microformat = try_get(
  1805. player_response,
  1806. lambda x: x['microformat']['playerMicroformatRenderer'],
  1807. dict) or {}
  1808. video_title = video_details.get('title') \
  1809. or get_text(microformat.get('title')) \
  1810. or search_meta(['og:title', 'twitter:title', 'title'])
  1811. video_description = video_details.get('shortDescription')
  1812. if not smuggled_data.get('force_singlefeed', False):
  1813. if not self._downloader.params.get('noplaylist'):
  1814. multifeed_metadata_list = try_get(
  1815. player_response,
  1816. lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
  1817. compat_str)
  1818. if multifeed_metadata_list:
  1819. entries = []
  1820. feed_ids = []
  1821. for feed in multifeed_metadata_list.split(','):
  1822. # Unquote should take place before split on comma (,) since textual
  1823. # fields may contain comma as well (see
  1824. # https://github.com/ytdl-org/youtube-dl/issues/8536)
  1825. feed_data = compat_parse_qs(
  1826. compat_urllib_parse_unquote_plus(feed))
  1827. def feed_entry(name):
  1828. return try_get(
  1829. feed_data, lambda x: x[name][0], compat_str)
  1830. feed_id = feed_entry('id')
  1831. if not feed_id:
  1832. continue
  1833. feed_title = feed_entry('title')
  1834. title = video_title
  1835. if feed_title:
  1836. title += ' (%s)' % feed_title
  1837. entries.append({
  1838. '_type': 'url_transparent',
  1839. 'ie_key': 'Youtube',
  1840. 'url': smuggle_url(
  1841. base_url + 'watch?v=' + feed_data['id'][0],
  1842. {'force_singlefeed': True}),
  1843. 'title': title,
  1844. })
  1845. feed_ids.append(feed_id)
  1846. self.to_screen(
  1847. 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
  1848. % (', '.join(feed_ids), video_id))
  1849. return self.playlist_result(
  1850. entries, video_id, video_title, video_description)
  1851. else:
  1852. self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
  1853. formats = []
  1854. itags = []
  1855. itag_qualities = {}
  1856. q = qualities(['tiny', 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'])
  1857. streaming_data = player_response.get('streamingData') or {}
  1858. streaming_formats = streaming_data.get('formats') or []
  1859. streaming_formats.extend(streaming_data.get('adaptiveFormats') or [])
  1860. for fmt in streaming_formats:
  1861. if fmt.get('targetDurationSec') or fmt.get('drmFamilies'):
  1862. continue
  1863. itag = str_or_none(fmt.get('itag'))
  1864. quality = fmt.get('quality')
  1865. if itag and quality:
  1866. itag_qualities[itag] = quality
  1867. # FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment
  1868. # (adding `&sq=0` to the URL) and parsing emsg box to determine the
  1869. # number of fragment that would subsequently requested with (`&sq=N`)
  1870. if fmt.get('type') == 'FORMAT_STREAM_TYPE_OTF':
  1871. continue
  1872. fmt_url = fmt.get('url')
  1873. if not fmt_url:
  1874. sc = compat_parse_qs(fmt.get('signatureCipher'))
  1875. fmt_url = url_or_none(try_get(sc, lambda x: x['url'][0]))
  1876. encrypted_sig = try_get(sc, lambda x: x['s'][0])
  1877. if not (sc and fmt_url and encrypted_sig):
  1878. continue
  1879. if not player_url:
  1880. player_url = self._extract_player_url(webpage)
  1881. if not player_url:
  1882. continue
  1883. signature = self._decrypt_signature(sc['s'][0], video_id, player_url)
  1884. sp = try_get(sc, lambda x: x['sp'][0]) or 'signature'
  1885. fmt_url += '&' + sp + '=' + signature
  1886. if itag:
  1887. itags.append(itag)
  1888. tbr = float_or_none(
  1889. fmt.get('averageBitrate') or fmt.get('bitrate'), 1000)
  1890. dct = {
  1891. 'asr': int_or_none(fmt.get('audioSampleRate')),
  1892. 'filesize': int_or_none(fmt.get('contentLength')),
  1893. 'format_id': itag,
  1894. 'format_note': fmt.get('qualityLabel') or quality,
  1895. 'fps': int_or_none(fmt.get('fps')),
  1896. 'height': int_or_none(fmt.get('height')),
  1897. 'quality': q(quality),
  1898. 'tbr': tbr,
  1899. 'url': fmt_url,
  1900. 'width': fmt.get('width'),
  1901. }
  1902. mimetype = fmt.get('mimeType')
  1903. if mimetype:
  1904. mobj = re.match(
  1905. r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', mimetype)
  1906. if mobj:
  1907. dct['ext'] = mimetype2ext(mobj.group(1))
  1908. dct.update(parse_codecs(mobj.group(2)))
  1909. no_audio = dct.get('acodec') == 'none'
  1910. no_video = dct.get('vcodec') == 'none'
  1911. if no_audio:
  1912. dct['vbr'] = tbr
  1913. if no_video:
  1914. dct['abr'] = tbr
  1915. if no_audio or no_video:
  1916. CHUNK_SIZE = 10 << 20
  1917. # avoid Youtube throttling
  1918. dct.update({
  1919. 'protocol': 'http_dash_segments',
  1920. 'fragments': [{
  1921. 'url': update_url_query(dct['url'], {
  1922. 'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, dct['filesize']))
  1923. })
  1924. } for range_start in range(0, dct['filesize'], CHUNK_SIZE)]
  1925. } if dct['filesize'] else {
  1926. 'downloader_options': {'http_chunk_size': CHUNK_SIZE} # No longer useful?
  1927. })
  1928. if dct.get('ext'):
  1929. dct['container'] = dct['ext'] + '_dash'
  1930. formats.append(dct)
  1931. hls_manifest_url = streaming_data.get('hlsManifestUrl')
  1932. if hls_manifest_url:
  1933. for f in self._extract_m3u8_formats(
  1934. hls_manifest_url, video_id, 'mp4', fatal=False):
  1935. itag = self._search_regex(
  1936. r'/itag/(\d+)', f['url'], 'itag', default=None)
  1937. if itag:
  1938. f['format_id'] = itag
  1939. formats.append(f)
  1940. if self._downloader.params.get('youtube_include_dash_manifest', True):
  1941. dash_manifest_url = streaming_data.get('dashManifestUrl')
  1942. if dash_manifest_url:
  1943. for f in self._extract_mpd_formats(
  1944. dash_manifest_url, video_id, fatal=False):
  1945. itag = f['format_id']
  1946. if itag in itags:
  1947. continue
  1948. if itag in itag_qualities:
  1949. f['quality'] = q(itag_qualities[itag])
  1950. filesize = int_or_none(self._search_regex(
  1951. r'/clen/(\d+)', f.get('fragment_base_url')
  1952. or f['url'], 'file size', default=None))
  1953. if filesize:
  1954. f['filesize'] = filesize
  1955. formats.append(f)
  1956. if not formats:
  1957. if streaming_data.get('licenseInfos'):
  1958. raise ExtractorError(
  1959. 'This video is DRM protected.', expected=True)
  1960. pemr = try_get(
  1961. playability_status,
  1962. lambda x: x['errorScreen']['playerErrorMessageRenderer'],
  1963. dict) or {}
  1964. reason = get_text(pemr.get('reason')) or playability_status.get('reason')
  1965. subreason = pemr.get('subreason')
  1966. if subreason:
  1967. subreason = clean_html(get_text(subreason))
  1968. if subreason == 'The uploader has not made this video available in your country.':
  1969. countries = microformat.get('availableCountries')
  1970. if not countries:
  1971. regions_allowed = search_meta('regionsAllowed')
  1972. countries = regions_allowed.split(',') if regions_allowed else None
  1973. self.raise_geo_restricted(
  1974. subreason, countries)
  1975. reason += '\n' + subreason
  1976. if reason:
  1977. raise ExtractorError(reason, expected=True)
  1978. self._sort_formats(formats)
  1979. keywords = video_details.get('keywords') or []
  1980. if not keywords and webpage:
  1981. keywords = [
  1982. unescapeHTML(m.group('content'))
  1983. for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
  1984. for keyword in keywords:
  1985. if keyword.startswith('yt:stretch='):
  1986. mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword)
  1987. if mobj:
  1988. # NB: float is intentional for forcing float division
  1989. w, h = (float(v) for v in mobj.groups())
  1990. if w > 0 and h > 0:
  1991. ratio = w / h
  1992. for f in formats:
  1993. if f.get('vcodec') != 'none':
  1994. f['stretched_ratio'] = ratio
  1995. break
  1996. thumbnails = []
  1997. for container in (video_details, microformat):
  1998. for thumbnail in try_get(
  1999. container,
  2000. lambda x: x['thumbnail']['thumbnails'], list) or []:
  2001. thumbnail_url = url_or_none(thumbnail.get('url'))
  2002. if not thumbnail_url:
  2003. continue
  2004. thumbnails.append({
  2005. 'height': int_or_none(thumbnail.get('height')),
  2006. 'url': update_url(thumbnail_url, query=None, fragment=None),
  2007. 'width': int_or_none(thumbnail.get('width')),
  2008. })
  2009. if thumbnails:
  2010. break
  2011. else:
  2012. thumbnail = search_meta(['og:image', 'twitter:image'])
  2013. if thumbnail:
  2014. thumbnails = [{'url': thumbnail}]
  2015. category = microformat.get('category') or search_meta('genre')
  2016. channel_id = self._extract_channel_id(
  2017. webpage, videodetails=video_details, metadata=microformat)
  2018. duration = int_or_none(
  2019. video_details.get('lengthSeconds')
  2020. or microformat.get('lengthSeconds')) \
  2021. or parse_duration(search_meta('duration'))
  2022. is_live = video_details.get('isLive')
  2023. owner_profile_url = self._yt_urljoin(self._extract_author_var(
  2024. webpage, 'url', videodetails=video_details, metadata=microformat))
  2025. uploader = self._extract_author_var(
  2026. webpage, 'name', videodetails=video_details, metadata=microformat)
  2027. if not player_url:
  2028. player_url = self._extract_player_url(webpage)
  2029. self._unthrottle_format_urls(video_id, player_url, formats)
  2030. info = {
  2031. 'id': video_id,
  2032. 'title': self._live_title(video_title) if is_live else video_title,
  2033. 'formats': formats,
  2034. 'thumbnails': thumbnails,
  2035. 'description': video_description,
  2036. 'upload_date': unified_strdate(
  2037. microformat.get('uploadDate')
  2038. or search_meta('uploadDate')),
  2039. 'uploader': uploader,
  2040. 'channel_id': channel_id,
  2041. 'duration': duration,
  2042. 'view_count': int_or_none(
  2043. video_details.get('viewCount')
  2044. or microformat.get('viewCount')
  2045. or search_meta('interactionCount')),
  2046. 'average_rating': float_or_none(video_details.get('averageRating')),
  2047. 'age_limit': 18 if (
  2048. microformat.get('isFamilySafe') is False
  2049. or search_meta('isFamilyFriendly') == 'false'
  2050. or search_meta('og:restrictions:age') == '18+') else 0,
  2051. 'webpage_url': webpage_url,
  2052. 'categories': [category] if category else None,
  2053. 'tags': keywords,
  2054. 'is_live': is_live,
  2055. }
  2056. pctr = try_get(
  2057. player_response,
  2058. lambda x: x['captions']['playerCaptionsTracklistRenderer'], dict)
  2059. if pctr:
  2060. def process_language(container, base_url, lang_code, query):
  2061. lang_subs = []
  2062. for fmt in self._SUBTITLE_FORMATS:
  2063. query.update({
  2064. 'fmt': fmt,
  2065. })
  2066. lang_subs.append({
  2067. 'ext': fmt,
  2068. 'url': update_url_query(base_url, query),
  2069. })
  2070. container[lang_code] = lang_subs
  2071. subtitles = {}
  2072. for caption_track in (pctr.get('captionTracks') or []):
  2073. base_url = caption_track.get('baseUrl')
  2074. if not base_url:
  2075. continue
  2076. if caption_track.get('kind') != 'asr':
  2077. lang_code = caption_track.get('languageCode')
  2078. if not lang_code:
  2079. continue
  2080. process_language(
  2081. subtitles, base_url, lang_code, {})
  2082. continue
  2083. automatic_captions = {}
  2084. for translation_language in (pctr.get('translationLanguages') or []):
  2085. translation_language_code = translation_language.get('languageCode')
  2086. if not translation_language_code:
  2087. continue
  2088. process_language(
  2089. automatic_captions, base_url, translation_language_code,
  2090. {'tlang': translation_language_code})
  2091. info['automatic_captions'] = automatic_captions
  2092. info['subtitles'] = subtitles
  2093. parsed_url = compat_urllib_parse_urlparse(url)
  2094. for component in [parsed_url.fragment, parsed_url.query]:
  2095. query = compat_parse_qs(component)
  2096. for k, v in query.items():
  2097. for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
  2098. d_k += '_time'
  2099. if d_k not in info and k in s_ks:
  2100. info[d_k] = parse_duration(query[k][0])
  2101. if video_description:
  2102. # Youtube Music Auto-generated description
  2103. mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
  2104. if mobj:
  2105. release_year = mobj.group('release_year')
  2106. release_date = mobj.group('release_date')
  2107. if release_date:
  2108. release_date = release_date.replace('-', '')
  2109. if not release_year:
  2110. release_year = release_date[:4]
  2111. info.update({
  2112. 'album': mobj.group('album'.strip()),
  2113. 'artist': mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·')),
  2114. 'track': mobj.group('track').strip(),
  2115. 'release_date': release_date,
  2116. 'release_year': int_or_none(release_year),
  2117. })
  2118. initial_data = None
  2119. if webpage:
  2120. initial_data = self._extract_yt_initial_variable(
  2121. webpage, self._YT_INITIAL_DATA_RE, video_id,
  2122. 'yt initial data')
  2123. if not initial_data:
  2124. initial_data = self._call_api(
  2125. 'next', {'videoId': video_id}, video_id, fatal=False)
  2126. if initial_data:
  2127. chapters = self._extract_chapters_from_json(
  2128. initial_data, video_id, duration)
  2129. if not chapters:
  2130. for engagment_pannel in (initial_data.get('engagementPanels') or []):
  2131. contents = try_get(
  2132. engagment_pannel, lambda x: x['engagementPanelSectionListRenderer']['content']['macroMarkersListRenderer']['contents'],
  2133. list)
  2134. if not contents:
  2135. continue
  2136. def chapter_time(mmlir):
  2137. return parse_duration(
  2138. get_text(mmlir.get('timeDescription')))
  2139. chapters = []
  2140. for next_num, content in enumerate(contents, start=1):
  2141. mmlir = content.get('macroMarkersListItemRenderer') or {}
  2142. start_time = chapter_time(mmlir)
  2143. end_time = chapter_time(try_get(
  2144. contents, lambda x: x[next_num]['macroMarkersListItemRenderer'])) \
  2145. if next_num < len(contents) else duration
  2146. if start_time is None or end_time is None:
  2147. continue
  2148. chapters.append({
  2149. 'start_time': start_time,
  2150. 'end_time': end_time,
  2151. 'title': get_text(mmlir.get('title')),
  2152. })
  2153. if chapters:
  2154. break
  2155. if chapters:
  2156. info['chapters'] = chapters
  2157. contents = try_get(
  2158. initial_data,
  2159. lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'],
  2160. list) or []
  2161. if not info['channel_id']:
  2162. channel_id = self._extract_channel_id('', renderers=contents)
  2163. if not info['uploader']:
  2164. info['uploader'] = self._extract_author_var('', 'name', renderers=contents)
  2165. if not owner_profile_url:
  2166. owner_profile_url = self._yt_urljoin(self._extract_author_var('', 'url', renderers=contents))
  2167. for content in contents:
  2168. vpir = content.get('videoPrimaryInfoRenderer')
  2169. if vpir:
  2170. stl = vpir.get('superTitleLink')
  2171. if stl:
  2172. stl = get_text(stl)
  2173. if try_get(
  2174. vpir,
  2175. lambda x: x['superTitleIcon']['iconType']) == 'LOCATION_PIN':
  2176. info['location'] = stl
  2177. else:
  2178. # •? doesn't match, but [•]? does; \xa0 = non-breaking space
  2179. mobj = re.search(r'([^\xa0\s].*?)[\xa0\s]*S(\d+)[\xa0\s]*[•]?[\xa0\s]*E(\d+)', stl)
  2180. if mobj:
  2181. info.update({
  2182. 'series': mobj.group(1),
  2183. 'season_number': int(mobj.group(2)),
  2184. 'episode_number': int(mobj.group(3)),
  2185. })
  2186. for tlb in (try_get(
  2187. vpir,
  2188. lambda x: x['videoActions']['menuRenderer']['topLevelButtons'],
  2189. list) or []):
  2190. tbr = traverse_obj(tlb, ('segmentedLikeDislikeButtonRenderer', 'likeButton', 'toggleButtonRenderer'), 'toggleButtonRenderer') or {}
  2191. for getter, regex in [(
  2192. lambda x: x['defaultText']['accessibility']['accessibilityData'],
  2193. r'(?P<count>[\d,]+)\s*(?P<type>(?:dis)?like)'), ([
  2194. lambda x: x['accessibility'],
  2195. lambda x: x['accessibilityData']['accessibilityData'],
  2196. ], r'(?P<type>(?:dis)?like) this video along with (?P<count>[\d,]+) other people')]:
  2197. label = (try_get(tbr, getter, dict) or {}).get('label')
  2198. if label:
  2199. mobj = re.match(regex, label)
  2200. if mobj:
  2201. info[mobj.group('type') + '_count'] = str_to_int(mobj.group('count'))
  2202. break
  2203. sbr_tooltip = try_get(
  2204. vpir, lambda x: x['sentimentBar']['sentimentBarRenderer']['tooltip'])
  2205. if sbr_tooltip:
  2206. # however dislike_count was hidden by YT, as if there could ever be dislikable content on YT
  2207. like_count, dislike_count = sbr_tooltip.split(' / ')
  2208. info.update({
  2209. 'like_count': str_to_int(like_count),
  2210. 'dislike_count': str_to_int(dislike_count),
  2211. })
  2212. vsir = content.get('videoSecondaryInfoRenderer')
  2213. if vsir:
  2214. rows = try_get(
  2215. vsir,
  2216. lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
  2217. list) or []
  2218. multiple_songs = False
  2219. for row in rows:
  2220. if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
  2221. multiple_songs = True
  2222. break
  2223. for row in rows:
  2224. mrr = row.get('metadataRowRenderer') or {}
  2225. mrr_title = mrr.get('title')
  2226. if not mrr_title:
  2227. continue
  2228. mrr_title = get_text(mrr['title'])
  2229. mrr_contents_text = get_text(mrr['contents'][0])
  2230. if mrr_title == 'License':
  2231. info['license'] = mrr_contents_text
  2232. elif not multiple_songs:
  2233. if mrr_title == 'Album':
  2234. info['album'] = mrr_contents_text
  2235. elif mrr_title == 'Artist':
  2236. info['artist'] = mrr_contents_text
  2237. elif mrr_title == 'Song':
  2238. info['track'] = mrr_contents_text
  2239. # this is not extraction but spelunking!
  2240. carousel_lockups = traverse_obj(
  2241. initial_data,
  2242. ('engagementPanels', Ellipsis, 'engagementPanelSectionListRenderer',
  2243. 'content', 'structuredDescriptionContentRenderer', 'items', Ellipsis,
  2244. 'videoDescriptionMusicSectionRenderer', 'carouselLockups', Ellipsis),
  2245. expected_type=dict) or []
  2246. # try to reproduce logic from metadataRowContainerRenderer above (if it still is)
  2247. fields = (('ALBUM', 'album'), ('ARTIST', 'artist'), ('SONG', 'track'), ('LICENSES', 'license'))
  2248. # multiple_songs ?
  2249. if len(carousel_lockups) > 1:
  2250. fields = fields[-1:]
  2251. for info_row in traverse_obj(
  2252. carousel_lockups,
  2253. (0, 'carouselLockupRenderer', 'infoRows', Ellipsis, 'infoRowRenderer'),
  2254. expected_type=dict):
  2255. row_title = traverse_obj(info_row, ('title', 'simpleText'))
  2256. row_text = traverse_obj(info_row, 'defaultMetadata', 'expandedMetadata', expected_type=get_text)
  2257. if not row_text:
  2258. continue
  2259. for name, field in fields:
  2260. if name == row_title and not info.get(field):
  2261. info[field] = row_text
  2262. for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
  2263. v = info.get(s_k)
  2264. if v:
  2265. info[d_k] = v
  2266. self.mark_watched(video_id, player_response)
  2267. return merge_dicts(
  2268. info, {
  2269. 'uploader_id': self._extract_uploader_id(owner_profile_url),
  2270. 'uploader_url': owner_profile_url,
  2271. 'channel_id': channel_id,
  2272. 'channel_url': channel_id and self._yt_urljoin('/channel/' + channel_id),
  2273. 'channel': info['uploader'],
  2274. })
  2275. class YoutubeTabIE(YoutubeBaseInfoExtractor):
  2276. IE_DESC = 'YouTube.com tab'
  2277. _VALID_URL = r'''(?x)
  2278. https?://
  2279. (?:\w+\.)?
  2280. (?:
  2281. youtube(?:kids)?\.com|
  2282. invidio\.us
  2283. )/
  2284. (?:
  2285. (?:channel|c|user|feed|hashtag)/|
  2286. (?:playlist|watch)\?.*?\blist=|
  2287. (?!(?:watch|embed|v|e|results)\b)
  2288. )
  2289. (?P<id>[^/?\#&]+)
  2290. '''
  2291. IE_NAME = 'youtube:tab'
  2292. _TESTS = [{
  2293. # Shorts
  2294. 'url': 'https://www.youtube.com/@SuperCooperShorts/shorts',
  2295. 'playlist_mincount': 5,
  2296. 'info_dict': {
  2297. 'description': 'Short clips from Super Cooper Sundays!',
  2298. 'id': 'UCKMA8kHZ8bPYpnMNaUSxfEQ',
  2299. 'title': 'Super Cooper Shorts - Shorts',
  2300. 'uploader': 'Super Cooper Shorts',
  2301. 'uploader_id': '@SuperCooperShorts',
  2302. }
  2303. }, {
  2304. # Channel that does not have a Shorts tab. Test should just download videos on Home tab instead
  2305. 'url': 'https://www.youtube.com/@emergencyawesome/shorts',
  2306. 'info_dict': {
  2307. 'description': 'md5:592c080c06fef4de3c902c4a8eecd850',
  2308. 'id': 'UCDiFRMQWpcp8_KD4vwIVicw',
  2309. 'title': 'Emergency Awesome - Home',
  2310. },
  2311. 'playlist_mincount': 5,
  2312. 'skip': 'new test page needed to replace `Emergency Awesome - Shorts`',
  2313. }, {
  2314. # playlists, multipage
  2315. 'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
  2316. 'playlist_mincount': 94,
  2317. 'info_dict': {
  2318. 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
  2319. 'title': 'Igor Kleiner - Playlists',
  2320. 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
  2321. 'uploader': 'Igor Kleiner',
  2322. 'uploader_id': '@IgorDataScience',
  2323. },
  2324. }, {
  2325. # playlists, multipage, different order
  2326. 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
  2327. 'playlist_mincount': 94,
  2328. 'info_dict': {
  2329. 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
  2330. 'title': 'Igor Kleiner - Playlists',
  2331. 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
  2332. 'uploader': 'Igor Kleiner',
  2333. 'uploader_id': '@IgorDataScience',
  2334. },
  2335. }, {
  2336. # playlists, series
  2337. 'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3',
  2338. 'playlist_mincount': 5,
  2339. 'info_dict': {
  2340. 'id': 'UCYO_jab_esuFRV4b17AJtAw',
  2341. 'title': '3Blue1Brown - Playlists',
  2342. 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
  2343. 'uploader': '3Blue1Brown',
  2344. 'uploader_id': '@3blue1brown',
  2345. },
  2346. }, {
  2347. # playlists, singlepage
  2348. 'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
  2349. 'playlist_mincount': 4,
  2350. 'info_dict': {
  2351. 'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
  2352. 'title': 'ThirstForScience - Playlists',
  2353. 'description': 'md5:609399d937ea957b0f53cbffb747a14c',
  2354. 'uploader': 'ThirstForScience',
  2355. 'uploader_id': '@ThirstForScience',
  2356. }
  2357. }, {
  2358. 'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
  2359. 'only_matching': True,
  2360. }, {
  2361. # basic, single video playlist
  2362. 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2363. 'info_dict': {
  2364. 'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2365. 'title': 'youtube-dl public playlist',
  2366. 'uploader': 'Sergey M.',
  2367. 'uploader_id': '@sergeym.6173',
  2368. 'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
  2369. },
  2370. 'playlist_count': 1,
  2371. }, {
  2372. # empty playlist
  2373. 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
  2374. 'info_dict': {
  2375. 'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
  2376. 'title': 'youtube-dl empty playlist',
  2377. 'uploader': 'Sergey M.',
  2378. 'uploader_id': '@sergeym.6173',
  2379. 'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
  2380. },
  2381. 'playlist_count': 0,
  2382. }, {
  2383. # Home tab
  2384. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
  2385. 'info_dict': {
  2386. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2387. 'title': 'lex will - Home',
  2388. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2389. 'uploader': 'lex will',
  2390. 'uploader_id': '@lexwill718',
  2391. },
  2392. 'playlist_mincount': 2,
  2393. }, {
  2394. # Videos tab
  2395. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
  2396. 'info_dict': {
  2397. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2398. 'title': 'lex will - Videos',
  2399. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2400. 'uploader': 'lex will',
  2401. 'uploader_id': '@lexwill718',
  2402. },
  2403. 'playlist_mincount': 975,
  2404. }, {
  2405. # Videos tab, sorted by popular
  2406. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
  2407. 'info_dict': {
  2408. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2409. 'title': 'lex will - Videos',
  2410. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2411. 'uploader': 'lex will',
  2412. 'uploader_id': '@lexwill718',
  2413. },
  2414. 'playlist_mincount': 199,
  2415. }, {
  2416. # Playlists tab
  2417. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
  2418. 'info_dict': {
  2419. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2420. 'title': 'lex will - Playlists',
  2421. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2422. 'uploader': 'lex will',
  2423. 'uploader_id': '@lexwill718',
  2424. },
  2425. 'playlist_mincount': 17,
  2426. }, {
  2427. # Community tab
  2428. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
  2429. 'info_dict': {
  2430. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2431. 'title': 'lex will - Community',
  2432. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2433. 'uploader': 'lex will',
  2434. 'uploader_id': '@lexwill718',
  2435. },
  2436. 'playlist_mincount': 18,
  2437. }, {
  2438. # Channels tab
  2439. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
  2440. 'info_dict': {
  2441. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2442. 'title': 'lex will - Channels',
  2443. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2444. 'uploader': 'lex will',
  2445. 'uploader_id': '@lexwill718',
  2446. },
  2447. 'playlist_mincount': 75,
  2448. }, {
  2449. 'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2450. 'only_matching': True,
  2451. }, {
  2452. 'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2453. 'only_matching': True,
  2454. }, {
  2455. 'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2456. 'only_matching': True,
  2457. }, {
  2458. 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
  2459. 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
  2460. 'info_dict': {
  2461. 'title': '29C3: Not my department',
  2462. 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
  2463. 'uploader': 'Christiaan008',
  2464. 'uploader_id': '@ChRiStIaAn008',
  2465. 'channel_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
  2466. },
  2467. 'playlist_count': 96,
  2468. }, {
  2469. 'note': 'Large playlist',
  2470. 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
  2471. 'info_dict': {
  2472. 'title': 'Uploads from Cauchemar',
  2473. 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
  2474. 'uploader': 'Cauchemar',
  2475. 'uploader_id': '@Cauchemar89',
  2476. 'channel_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
  2477. },
  2478. 'playlist_mincount': 1123,
  2479. }, {
  2480. # even larger playlist, 8832 videos
  2481. 'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
  2482. 'only_matching': True,
  2483. }, {
  2484. 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
  2485. 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
  2486. 'info_dict': {
  2487. 'title': 'Uploads from Interstellar Movie',
  2488. 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
  2489. 'uploader': 'Interstellar Movie',
  2490. 'uploader_id': '@InterstellarMovie',
  2491. 'channel_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
  2492. },
  2493. 'playlist_mincount': 21,
  2494. }, {
  2495. # https://github.com/ytdl-org/youtube-dl/issues/21844
  2496. 'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
  2497. 'info_dict': {
  2498. 'title': 'Data Analysis with Dr Mike Pound',
  2499. 'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
  2500. 'uploader': 'Computerphile',
  2501. 'uploader_id': '@Computerphile',
  2502. 'channel_id': 'UC9-y-6csu5WGm29I7JiwpnA',
  2503. },
  2504. 'playlist_mincount': 11,
  2505. }, {
  2506. 'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2507. 'only_matching': True,
  2508. }, {
  2509. # Playlist URL that does not actually serve a playlist
  2510. 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
  2511. 'info_dict': {
  2512. 'id': 'FqZTN594JQw',
  2513. 'ext': 'webm',
  2514. 'title': "Smiley's People 01 detective, Adventure Series, Action",
  2515. 'uploader': 'STREEM',
  2516. 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
  2517. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
  2518. 'upload_date': '20150526',
  2519. 'license': 'Standard YouTube License',
  2520. 'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
  2521. 'categories': ['People & Blogs'],
  2522. 'tags': list,
  2523. 'view_count': int,
  2524. 'like_count': int,
  2525. },
  2526. 'params': {
  2527. 'skip_download': True,
  2528. },
  2529. 'skip': 'This video is not available.',
  2530. 'add_ie': [YoutubeIE.ie_key()],
  2531. }, {
  2532. 'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
  2533. 'only_matching': True,
  2534. }, {
  2535. 'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
  2536. 'only_matching': True,
  2537. }, {
  2538. 'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
  2539. 'info_dict': {
  2540. 'id': r're:[\da-zA-Z_-]{8,}',
  2541. 'ext': 'mp4',
  2542. 'title': r're:(?s)[A-Z].{20,}',
  2543. 'uploader': 'Sky News',
  2544. 'uploader_id': '@SkyNews',
  2545. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@SkyNews',
  2546. 'upload_date': r're:\d{8}',
  2547. 'description': r're:(?s)(?:.*\n)+SUBSCRIBE to our YouTube channel for more videos: http://www\.youtube\.com/skynews *\n.*',
  2548. 'categories': ['News & Politics'],
  2549. 'tags': list,
  2550. 'like_count': int,
  2551. },
  2552. 'params': {
  2553. 'skip_download': True,
  2554. },
  2555. }, {
  2556. 'url': 'https://www.youtube.com/user/TheYoungTurks/live',
  2557. 'info_dict': {
  2558. 'id': 'a48o2S1cPoo',
  2559. 'ext': 'mp4',
  2560. 'title': 'The Young Turks - Live Main Show',
  2561. 'uploader': 'The Young Turks',
  2562. 'uploader_id': 'TheYoungTurks',
  2563. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
  2564. 'upload_date': '20150715',
  2565. 'license': 'Standard YouTube License',
  2566. 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
  2567. 'categories': ['News & Politics'],
  2568. 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
  2569. 'like_count': int,
  2570. },
  2571. 'params': {
  2572. 'skip_download': True,
  2573. },
  2574. 'only_matching': True,
  2575. }, {
  2576. 'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
  2577. 'only_matching': True,
  2578. }, {
  2579. 'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
  2580. 'only_matching': True,
  2581. }, {
  2582. 'url': 'https://www.youtube.com/feed/trending',
  2583. 'only_matching': True,
  2584. }, {
  2585. # needs auth
  2586. 'url': 'https://www.youtube.com/feed/library',
  2587. 'only_matching': True,
  2588. }, {
  2589. # needs auth
  2590. 'url': 'https://www.youtube.com/feed/history',
  2591. 'only_matching': True,
  2592. }, {
  2593. # needs auth
  2594. 'url': 'https://www.youtube.com/feed/subscriptions',
  2595. 'only_matching': True,
  2596. }, {
  2597. # needs auth
  2598. 'url': 'https://www.youtube.com/feed/watch_later',
  2599. 'only_matching': True,
  2600. }, {
  2601. # no longer available?
  2602. 'url': 'https://www.youtube.com/feed/recommended',
  2603. 'only_matching': True,
  2604. }, {
  2605. # inline playlist with not always working continuations
  2606. 'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
  2607. 'only_matching': True,
  2608. }, {
  2609. 'url': 'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8',
  2610. 'only_matching': True,
  2611. }, {
  2612. 'url': 'https://www.youtube.com/course',
  2613. 'only_matching': True,
  2614. }, {
  2615. 'url': 'https://www.youtube.com/zsecurity',
  2616. 'only_matching': True,
  2617. }, {
  2618. 'url': 'http://www.youtube.com/NASAgovVideo/videos',
  2619. 'only_matching': True,
  2620. }, {
  2621. 'url': 'https://www.youtube.com/TheYoungTurks/live',
  2622. 'only_matching': True,
  2623. }, {
  2624. 'url': 'https://www.youtube.com/hashtag/cctv9',
  2625. 'info_dict': {
  2626. 'id': 'cctv9',
  2627. 'title': '#cctv9',
  2628. },
  2629. 'playlist_mincount': 350,
  2630. }, {
  2631. 'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
  2632. 'only_matching': True,
  2633. }, {
  2634. 'note': 'Search tab',
  2635. 'url': 'https://www.youtube.com/c/3blue1brown/search?query=linear%20algebra',
  2636. 'playlist_mincount': 20,
  2637. 'info_dict': {
  2638. 'id': 'UCYO_jab_esuFRV4b17AJtAw',
  2639. 'title': '3Blue1Brown - Search - linear algebra',
  2640. 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
  2641. 'uploader': '3Blue1Brown',
  2642. 'uploader_id': '@3blue1brown',
  2643. 'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
  2644. }
  2645. }]
  2646. @classmethod
  2647. def suitable(cls, url):
  2648. return not YoutubeIE.suitable(url) and super(
  2649. YoutubeTabIE, cls).suitable(url)
  2650. @staticmethod
  2651. def _extract_grid_item_renderer(item):
  2652. assert isinstance(item, dict)
  2653. for key, renderer in item.items():
  2654. if not key.startswith('grid') or not key.endswith('Renderer'):
  2655. continue
  2656. if not isinstance(renderer, dict):
  2657. continue
  2658. return renderer
  2659. def _grid_entries(self, grid_renderer):
  2660. for item in grid_renderer['items']:
  2661. if not isinstance(item, dict):
  2662. continue
  2663. renderer = self._extract_grid_item_renderer(item)
  2664. if not isinstance(renderer, dict):
  2665. continue
  2666. title = try_get(
  2667. renderer, (lambda x: x['title']['runs'][0]['text'],
  2668. lambda x: x['title']['simpleText']), compat_str)
  2669. # playlist
  2670. playlist_id = renderer.get('playlistId')
  2671. if playlist_id:
  2672. yield self.url_result(
  2673. 'https://www.youtube.com/playlist?list=%s' % playlist_id,
  2674. ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
  2675. video_title=title)
  2676. continue
  2677. # video
  2678. video_id = renderer.get('videoId')
  2679. if video_id:
  2680. yield self._extract_video(renderer)
  2681. continue
  2682. # channel
  2683. channel_id = renderer.get('channelId')
  2684. if channel_id:
  2685. title = try_get(
  2686. renderer, lambda x: x['title']['simpleText'], compat_str)
  2687. yield self.url_result(
  2688. 'https://www.youtube.com/channel/%s' % channel_id,
  2689. ie=YoutubeTabIE.ie_key(), video_title=title)
  2690. continue
  2691. # generic endpoint URL support
  2692. ep_url = urljoin('https://www.youtube.com/', try_get(
  2693. renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
  2694. compat_str))
  2695. if ep_url:
  2696. for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE):
  2697. if ie.suitable(ep_url):
  2698. yield self.url_result(
  2699. ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title)
  2700. break
  2701. def _shelf_entries_from_content(self, shelf_renderer):
  2702. content = shelf_renderer.get('content')
  2703. if not isinstance(content, dict):
  2704. return
  2705. renderer = content.get('gridRenderer')
  2706. if renderer:
  2707. # TODO: add support for nested playlists so each shelf is processed
  2708. # as separate playlist
  2709. # TODO: this includes only first N items
  2710. for entry in self._grid_entries(renderer):
  2711. yield entry
  2712. renderer = content.get('horizontalListRenderer')
  2713. if renderer:
  2714. # TODO
  2715. pass
  2716. def _shelf_entries(self, shelf_renderer, skip_channels=False):
  2717. ep = try_get(
  2718. shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
  2719. compat_str)
  2720. shelf_url = urljoin('https://www.youtube.com', ep)
  2721. if shelf_url:
  2722. # Skipping links to another channels, note that checking for
  2723. # endpoint.commandMetadata.webCommandMetadata.webPageTypwebPageType == WEB_PAGE_TYPE_CHANNEL
  2724. # will not work
  2725. if skip_channels and '/channels?' in shelf_url:
  2726. return
  2727. title = try_get(
  2728. shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
  2729. yield self.url_result(shelf_url, video_title=title)
  2730. # Shelf may not contain shelf URL, fallback to extraction from content
  2731. for entry in self._shelf_entries_from_content(shelf_renderer):
  2732. yield entry
  2733. def _playlist_entries(self, video_list_renderer):
  2734. for content in video_list_renderer['contents']:
  2735. if not isinstance(content, dict):
  2736. continue
  2737. renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
  2738. if not isinstance(renderer, dict):
  2739. continue
  2740. video_id = renderer.get('videoId')
  2741. if not video_id:
  2742. continue
  2743. yield self._extract_video(renderer)
  2744. def _video_entry(self, video_renderer):
  2745. video_id = video_renderer.get('videoId')
  2746. if video_id:
  2747. return self._extract_video(video_renderer)
  2748. def _post_thread_entries(self, post_thread_renderer):
  2749. post_renderer = try_get(
  2750. post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
  2751. if not post_renderer:
  2752. return
  2753. # video attachment
  2754. video_renderer = try_get(
  2755. post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict)
  2756. video_id = None
  2757. if video_renderer:
  2758. entry = self._video_entry(video_renderer)
  2759. if entry:
  2760. yield entry
  2761. # inline video links
  2762. runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
  2763. for run in runs:
  2764. if not isinstance(run, dict):
  2765. continue
  2766. ep_url = try_get(
  2767. run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
  2768. if not ep_url:
  2769. continue
  2770. if not YoutubeIE.suitable(ep_url):
  2771. continue
  2772. ep_video_id = YoutubeIE._match_id(ep_url)
  2773. if video_id == ep_video_id:
  2774. continue
  2775. yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
  2776. def _post_thread_continuation_entries(self, post_thread_continuation):
  2777. contents = post_thread_continuation.get('contents')
  2778. if not isinstance(contents, list):
  2779. return
  2780. for content in contents:
  2781. renderer = content.get('backstagePostThreadRenderer')
  2782. if not isinstance(renderer, dict):
  2783. continue
  2784. for entry in self._post_thread_entries(renderer):
  2785. yield entry
  2786. def _rich_grid_entries(self, contents):
  2787. for content in contents:
  2788. video_renderer = try_get(
  2789. content,
  2790. (lambda x: x['richItemRenderer']['content']['videoRenderer'],
  2791. lambda x: x['richItemRenderer']['content']['reelItemRenderer']),
  2792. dict)
  2793. if video_renderer:
  2794. entry = self._video_entry(video_renderer)
  2795. if entry:
  2796. yield entry
  2797. @staticmethod
  2798. def _build_continuation_query(continuation, ctp=None):
  2799. query = {
  2800. 'ctoken': continuation,
  2801. 'continuation': continuation,
  2802. }
  2803. if ctp:
  2804. query['itct'] = ctp
  2805. return query
  2806. @staticmethod
  2807. def _extract_next_continuation_data(renderer):
  2808. next_continuation = try_get(
  2809. renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict)
  2810. if not next_continuation:
  2811. return
  2812. continuation = next_continuation.get('continuation')
  2813. if not continuation:
  2814. return
  2815. ctp = next_continuation.get('clickTrackingParams')
  2816. return YoutubeTabIE._build_continuation_query(continuation, ctp)
  2817. @classmethod
  2818. def _extract_continuation(cls, renderer):
  2819. next_continuation = cls._extract_next_continuation_data(renderer)
  2820. if next_continuation:
  2821. return next_continuation
  2822. contents = []
  2823. for key in ('contents', 'items'):
  2824. contents.extend(try_get(renderer, lambda x: x[key], list) or [])
  2825. for content in contents:
  2826. if not isinstance(content, dict):
  2827. continue
  2828. continuation_ep = try_get(
  2829. content, lambda x: x['continuationItemRenderer']['continuationEndpoint'],
  2830. dict)
  2831. if not continuation_ep:
  2832. continue
  2833. continuation = try_get(
  2834. continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
  2835. if not continuation:
  2836. continue
  2837. ctp = continuation_ep.get('clickTrackingParams')
  2838. return YoutubeTabIE._build_continuation_query(continuation, ctp)
  2839. def _entries(self, tab, item_id, webpage):
  2840. tab_content = try_get(tab, lambda x: x['content'], dict)
  2841. if not tab_content:
  2842. return
  2843. slr_renderer = try_get(tab_content, lambda x: x['sectionListRenderer'], dict)
  2844. if slr_renderer:
  2845. is_channels_tab = tab.get('title') == 'Channels'
  2846. continuation = None
  2847. slr_contents = try_get(slr_renderer, lambda x: x['contents'], list) or []
  2848. for slr_content in slr_contents:
  2849. if not isinstance(slr_content, dict):
  2850. continue
  2851. is_renderer = try_get(slr_content, lambda x: x['itemSectionRenderer'], dict)
  2852. if not is_renderer:
  2853. continue
  2854. isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
  2855. for isr_content in isr_contents:
  2856. if not isinstance(isr_content, dict):
  2857. continue
  2858. renderer = isr_content.get('playlistVideoListRenderer')
  2859. if renderer:
  2860. for entry in self._playlist_entries(renderer):
  2861. yield entry
  2862. continuation = self._extract_continuation(renderer)
  2863. continue
  2864. renderer = isr_content.get('gridRenderer')
  2865. if renderer:
  2866. for entry in self._grid_entries(renderer):
  2867. yield entry
  2868. continuation = self._extract_continuation(renderer)
  2869. continue
  2870. renderer = isr_content.get('shelfRenderer')
  2871. if renderer:
  2872. for entry in self._shelf_entries(renderer, not is_channels_tab):
  2873. yield entry
  2874. continue
  2875. renderer = isr_content.get('backstagePostThreadRenderer')
  2876. if renderer:
  2877. for entry in self._post_thread_entries(renderer):
  2878. yield entry
  2879. continuation = self._extract_continuation(renderer)
  2880. continue
  2881. renderer = isr_content.get('videoRenderer')
  2882. if renderer:
  2883. entry = self._video_entry(renderer)
  2884. if entry:
  2885. yield entry
  2886. if not continuation:
  2887. continuation = self._extract_continuation(is_renderer)
  2888. if not continuation:
  2889. continuation = self._extract_continuation(slr_renderer)
  2890. else:
  2891. rich_grid_renderer = tab_content.get('richGridRenderer')
  2892. if not rich_grid_renderer:
  2893. return
  2894. for entry in self._rich_grid_entries(rich_grid_renderer.get('contents') or []):
  2895. yield entry
  2896. continuation = self._extract_continuation(rich_grid_renderer)
  2897. ytcfg = self._extract_ytcfg(item_id, webpage)
  2898. client_version = try_get(
  2899. ytcfg, lambda x: x['INNERTUBE_CLIENT_VERSION'], compat_str) or '2.20210407.08.00'
  2900. headers = {
  2901. 'x-youtube-client-name': '1',
  2902. 'x-youtube-client-version': client_version,
  2903. 'content-type': 'application/json',
  2904. }
  2905. context = try_get(ytcfg, lambda x: x['INNERTUBE_CONTEXT'], dict) or {
  2906. 'client': {
  2907. 'clientName': 'WEB',
  2908. 'clientVersion': client_version,
  2909. }
  2910. }
  2911. visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str)
  2912. identity_token = self._extract_identity_token(ytcfg, webpage)
  2913. if identity_token:
  2914. headers['x-youtube-identity-token'] = identity_token
  2915. data = {
  2916. 'context': context,
  2917. }
  2918. for page_num in itertools.count(1):
  2919. if not continuation:
  2920. break
  2921. if visitor_data:
  2922. headers['x-goog-visitor-id'] = visitor_data
  2923. data['continuation'] = continuation['continuation']
  2924. data['clickTracking'] = {
  2925. 'clickTrackingParams': continuation['itct']
  2926. }
  2927. count = 0
  2928. retries = 3
  2929. while count <= retries:
  2930. try:
  2931. # Downloading page may result in intermittent 5xx HTTP error
  2932. # that is usually worked around with a retry
  2933. response = self._download_json(
  2934. 'https://www.youtube.com/youtubei/v1/browse?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
  2935. None, 'Downloading page %d%s' % (page_num, ' (retry #%d)' % count if count else ''),
  2936. headers=headers, data=json.dumps(data).encode('utf8'))
  2937. break
  2938. except ExtractorError as e:
  2939. if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503):
  2940. count += 1
  2941. if count <= retries:
  2942. continue
  2943. raise
  2944. if not response:
  2945. break
  2946. visitor_data = try_get(
  2947. response, lambda x: x['responseContext']['visitorData'], compat_str) or visitor_data
  2948. continuation_contents = try_get(
  2949. response, lambda x: x['continuationContents'], dict)
  2950. if continuation_contents:
  2951. continuation_renderer = continuation_contents.get('playlistVideoListContinuation')
  2952. if continuation_renderer:
  2953. for entry in self._playlist_entries(continuation_renderer):
  2954. yield entry
  2955. continuation = self._extract_continuation(continuation_renderer)
  2956. continue
  2957. continuation_renderer = continuation_contents.get('gridContinuation')
  2958. if continuation_renderer:
  2959. for entry in self._grid_entries(continuation_renderer):
  2960. yield entry
  2961. continuation = self._extract_continuation(continuation_renderer)
  2962. continue
  2963. continuation_renderer = continuation_contents.get('itemSectionContinuation')
  2964. if continuation_renderer:
  2965. for entry in self._post_thread_continuation_entries(continuation_renderer):
  2966. yield entry
  2967. continuation = self._extract_continuation(continuation_renderer)
  2968. continue
  2969. on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
  2970. continuation_items = try_get(
  2971. on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
  2972. if continuation_items:
  2973. continuation_item = continuation_items[0]
  2974. if not isinstance(continuation_item, dict):
  2975. continue
  2976. renderer = self._extract_grid_item_renderer(continuation_item)
  2977. if renderer:
  2978. grid_renderer = {'items': continuation_items}
  2979. for entry in self._grid_entries(grid_renderer):
  2980. yield entry
  2981. continuation = self._extract_continuation(grid_renderer)
  2982. continue
  2983. renderer = continuation_item.get('playlistVideoRenderer') or continuation_item.get('itemSectionRenderer')
  2984. if renderer:
  2985. video_list_renderer = {'contents': continuation_items}
  2986. for entry in self._playlist_entries(video_list_renderer):
  2987. yield entry
  2988. continuation = self._extract_continuation(video_list_renderer)
  2989. continue
  2990. renderer = continuation_item.get('backstagePostThreadRenderer')
  2991. if renderer:
  2992. continuation_renderer = {'contents': continuation_items}
  2993. for entry in self._post_thread_continuation_entries(continuation_renderer):
  2994. yield entry
  2995. continuation = self._extract_continuation(continuation_renderer)
  2996. continue
  2997. renderer = continuation_item.get('richItemRenderer')
  2998. if renderer:
  2999. for entry in self._rich_grid_entries(continuation_items):
  3000. yield entry
  3001. continuation = self._extract_continuation({'contents': continuation_items})
  3002. continue
  3003. break
  3004. @staticmethod
  3005. def _extract_selected_tab(tabs):
  3006. for tab in tabs:
  3007. renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {}
  3008. if renderer.get('selected') is True:
  3009. return renderer
  3010. else:
  3011. raise ExtractorError('Unable to find selected tab')
  3012. def _extract_uploader(self, metadata, data):
  3013. uploader = {}
  3014. renderers = traverse_obj(data,
  3015. ('sidebar', 'playlistSidebarRenderer', 'items'))
  3016. uploader['channel_id'] = self._extract_channel_id('', metadata=metadata, renderers=renderers)
  3017. uploader['uploader'] = (
  3018. self._extract_author_var('', 'name', renderers=renderers)
  3019. or self._extract_author_var('', 'name', metadata=metadata))
  3020. uploader['uploader_url'] = self._yt_urljoin(
  3021. self._extract_author_var('', 'url', metadata=metadata, renderers=renderers))
  3022. uploader['uploader_id'] = self._extract_uploader_id(uploader['uploader_url'])
  3023. uploader['channel'] = uploader['uploader']
  3024. return uploader
  3025. @staticmethod
  3026. def _extract_alert(data):
  3027. alerts = []
  3028. for alert in try_get(data, lambda x: x['alerts'], list) or []:
  3029. if not isinstance(alert, dict):
  3030. continue
  3031. alert_text = try_get(
  3032. alert, lambda x: x['alertRenderer']['text'], dict)
  3033. if not alert_text:
  3034. continue
  3035. text = try_get(
  3036. alert_text,
  3037. (lambda x: x['simpleText'], lambda x: x['runs'][0]['text']),
  3038. compat_str)
  3039. if text:
  3040. alerts.append(text)
  3041. return '\n'.join(alerts)
  3042. def _extract_from_tabs(self, item_id, webpage, data, tabs):
  3043. selected_tab = self._extract_selected_tab(tabs)
  3044. renderer = try_get(
  3045. data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
  3046. playlist_id = item_id
  3047. title = description = None
  3048. if renderer:
  3049. channel_title = renderer.get('title') or item_id
  3050. tab_title = selected_tab.get('title')
  3051. title = channel_title or item_id
  3052. if tab_title:
  3053. title += ' - %s' % tab_title
  3054. if selected_tab.get('expandedText'):
  3055. title += ' - %s' % selected_tab['expandedText']
  3056. description = renderer.get('description')
  3057. playlist_id = renderer.get('externalId')
  3058. else:
  3059. renderer = try_get(
  3060. data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
  3061. if renderer:
  3062. title = renderer.get('title')
  3063. else:
  3064. renderer = try_get(
  3065. data, lambda x: x['header']['hashtagHeaderRenderer'], dict)
  3066. if renderer:
  3067. title = try_get(renderer, lambda x: x['hashtag']['simpleText'])
  3068. playlist = self.playlist_result(
  3069. self._entries(selected_tab, item_id, webpage),
  3070. playlist_id=playlist_id, playlist_title=title,
  3071. playlist_description=description)
  3072. return merge_dicts(playlist, self._extract_uploader(renderer, data))
  3073. def _extract_from_playlist(self, item_id, url, data, playlist):
  3074. title = playlist.get('title') or try_get(
  3075. data, lambda x: x['titleText']['simpleText'], compat_str)
  3076. playlist_id = playlist.get('playlistId') or item_id
  3077. # Inline playlist rendition continuation does not always work
  3078. # at Youtube side, so delegating regular tab-based playlist URL
  3079. # processing whenever possible.
  3080. playlist_url = urljoin(url, try_get(
  3081. playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
  3082. compat_str))
  3083. if playlist_url and playlist_url != url:
  3084. return self.url_result(
  3085. playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
  3086. video_title=title)
  3087. return self.playlist_result(
  3088. self._playlist_entries(playlist), playlist_id=playlist_id,
  3089. playlist_title=title)
  3090. def _extract_identity_token(self, ytcfg, webpage):
  3091. if ytcfg:
  3092. token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
  3093. if token:
  3094. return token
  3095. return self._search_regex(
  3096. r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
  3097. 'identity token', default=None)
  3098. def _real_extract(self, url):
  3099. item_id = self._match_id(url)
  3100. url = update_url(url, netloc='www.youtube.com')
  3101. # Handle both video/playlist URLs
  3102. qs = parse_qs(url)
  3103. video_id = qs.get('v', [None])[0]
  3104. playlist_id = qs.get('list', [None])[0]
  3105. if video_id and playlist_id:
  3106. if self._downloader.params.get('noplaylist'):
  3107. self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
  3108. return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
  3109. self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
  3110. webpage = self._download_webpage(url, item_id)
  3111. data = self._extract_yt_initial_data(item_id, webpage)
  3112. tabs = try_get(
  3113. data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
  3114. if tabs:
  3115. return self._extract_from_tabs(item_id, webpage, data, tabs)
  3116. playlist = try_get(
  3117. data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
  3118. if playlist:
  3119. return self._extract_from_playlist(item_id, url, data, playlist)
  3120. # Fallback to video extraction if no playlist alike page is recognized.
  3121. # First check for the current video then try the v attribute of URL query.
  3122. video_id = try_get(
  3123. data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
  3124. compat_str) or video_id
  3125. if video_id:
  3126. return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
  3127. # Capture and output alerts
  3128. alert = self._extract_alert(data)
  3129. if alert:
  3130. raise ExtractorError(alert, expected=True)
  3131. # Failed to recognize
  3132. raise ExtractorError('Unable to recognize tab page')
  3133. class YoutubePlaylistIE(InfoExtractor):
  3134. IE_DESC = 'YouTube.com playlists'
  3135. _VALID_URL = r'''(?x)(?:
  3136. (?:https?://)?
  3137. (?:\w+\.)?
  3138. (?:
  3139. (?:
  3140. youtube(?:kids)?\.com|
  3141. invidio\.us
  3142. )
  3143. /.*?\?.*?\blist=
  3144. )?
  3145. (?P<id>%(playlist_id)s)
  3146. )''' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
  3147. IE_NAME = 'youtube:playlist'
  3148. _TESTS = [{
  3149. 'note': 'issue #673',
  3150. 'url': 'PLBB231211A4F62143',
  3151. 'info_dict': {
  3152. 'title': '[OLD]Team Fortress 2 (Class-based LP)',
  3153. 'id': 'PLBB231211A4F62143',
  3154. 'uploader': 'Wickman',
  3155. 'uploader_id': '@WickmanVT',
  3156. 'channel_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
  3157. },
  3158. 'playlist_mincount': 29,
  3159. }, {
  3160. 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
  3161. 'info_dict': {
  3162. 'title': 'YDL_safe_search',
  3163. 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
  3164. },
  3165. 'playlist_count': 2,
  3166. 'skip': 'This playlist is private',
  3167. }, {
  3168. 'note': 'embedded',
  3169. 'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
  3170. # TODO: full playlist requires _reload_with_unavailable_videos()
  3171. # 'playlist_count': 4,
  3172. 'playlist_mincount': 1,
  3173. 'info_dict': {
  3174. 'title': 'JODA15',
  3175. 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
  3176. 'uploader': 'milan',
  3177. 'uploader_id': '@milan5503',
  3178. 'channel_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
  3179. }
  3180. }, {
  3181. 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
  3182. 'playlist_mincount': 455,
  3183. 'info_dict': {
  3184. 'title': '2018 Chinese New Singles (11/6 updated)',
  3185. 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
  3186. 'uploader': 'LBK',
  3187. 'uploader_id': '@music_king',
  3188. 'channel_id': 'UC21nz3_MesPLqtDqwdvnoxA',
  3189. }
  3190. }, {
  3191. 'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
  3192. 'only_matching': True,
  3193. }, {
  3194. # music album playlist
  3195. 'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
  3196. 'only_matching': True,
  3197. }]
  3198. @classmethod
  3199. def suitable(cls, url):
  3200. if YoutubeTabIE.suitable(url):
  3201. return False
  3202. if parse_qs(url).get('v', [None])[0]:
  3203. return False
  3204. return super(YoutubePlaylistIE, cls).suitable(url)
  3205. def _real_extract(self, url):
  3206. playlist_id = self._match_id(url)
  3207. qs = parse_qs(url)
  3208. if not qs:
  3209. qs = {'list': playlist_id}
  3210. return self.url_result(
  3211. update_url_query('https://www.youtube.com/playlist', qs),
  3212. ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
  3213. class YoutubeYtBeIE(InfoExtractor):
  3214. _VALID_URL = r'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{11})/*?.*?\blist=(?P<playlist_id>%(playlist_id)s)' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
  3215. _TESTS = [{
  3216. 'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
  3217. 'info_dict': {
  3218. 'id': 'yeWKywCrFtk',
  3219. 'ext': 'mp4',
  3220. 'title': 'Small Scale Baler and Braiding Rugs',
  3221. 'uploader': 'Backus-Page House Museum',
  3222. 'uploader_id': '@backuspagemuseum',
  3223. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@backuspagemuseum',
  3224. 'upload_date': '20161008',
  3225. 'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
  3226. 'categories': ['Nonprofits & Activism'],
  3227. 'tags': list,
  3228. 'like_count': int,
  3229. },
  3230. 'params': {
  3231. 'noplaylist': True,
  3232. 'skip_download': True,
  3233. },
  3234. }, {
  3235. 'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
  3236. 'only_matching': True,
  3237. }]
  3238. def _real_extract(self, url):
  3239. mobj = re.match(self._VALID_URL, url)
  3240. video_id = mobj.group('id')
  3241. playlist_id = mobj.group('playlist_id')
  3242. return self.url_result(
  3243. update_url_query('https://www.youtube.com/watch', {
  3244. 'v': video_id,
  3245. 'list': playlist_id,
  3246. 'feature': 'youtu.be',
  3247. }), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
  3248. class YoutubeYtUserIE(InfoExtractor):
  3249. _VALID_URL = r'ytuser:(?P<id>.+)'
  3250. _TESTS = [{
  3251. 'url': 'ytuser:phihag',
  3252. 'only_matching': True,
  3253. }]
  3254. def _real_extract(self, url):
  3255. user_id = self._match_id(url)
  3256. return self.url_result(
  3257. 'https://www.youtube.com/user/%s' % user_id,
  3258. ie=YoutubeTabIE.ie_key(), video_id=user_id)
  3259. class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
  3260. IE_NAME = 'youtube:favorites'
  3261. IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
  3262. _VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
  3263. _LOGIN_REQUIRED = True
  3264. _TESTS = [{
  3265. 'url': ':ytfav',
  3266. 'only_matching': True,
  3267. }, {
  3268. 'url': ':ytfavorites',
  3269. 'only_matching': True,
  3270. }]
  3271. def _real_extract(self, url):
  3272. return self.url_result(
  3273. 'https://www.youtube.com/playlist?list=LL',
  3274. ie=YoutubeTabIE.ie_key())
  3275. class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
  3276. IE_DESC = 'YouTube.com searches'
  3277. IE_NAME = 'youtube:search'
  3278. _SEARCH_KEY = 'ytsearch'
  3279. _SEARCH_PARAMS = 'EgIQAQ%3D%3D' # Videos only
  3280. _MAX_RESULTS = float('inf')
  3281. _TESTS = [{
  3282. 'url': 'ytsearch10:youtube-dl test video',
  3283. 'playlist_count': 10,
  3284. 'info_dict': {
  3285. 'id': 'youtube-dl test video',
  3286. 'title': 'youtube-dl test video',
  3287. }
  3288. }]
  3289. def _get_n_results(self, query, n):
  3290. """Get a specified number of results for a query"""
  3291. entries = itertools.islice(self._search_results(query, self._SEARCH_PARAMS), 0, None if n == float('inf') else n)
  3292. return self.playlist_result(entries, query, query)
  3293. class YoutubeSearchDateIE(YoutubeSearchIE):
  3294. IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
  3295. _SEARCH_KEY = 'ytsearchdate'
  3296. IE_DESC = 'YouTube.com searches, newest videos first'
  3297. _SEARCH_PARAMS = 'CAISAhAB' # Videos only, sorted by date
  3298. _TESTS = [{
  3299. 'url': 'ytsearchdate10:youtube-dl test video',
  3300. 'playlist_count': 10,
  3301. 'info_dict': {
  3302. 'id': 'youtube-dl test video',
  3303. 'title': 'youtube-dl test video',
  3304. }
  3305. }]
  3306. class YoutubeSearchURLIE(YoutubeBaseInfoExtractor):
  3307. IE_DESC = 'YouTube search URLs with sorting and filter support'
  3308. IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
  3309. _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?:[^&]+)(?:[&]|$)'
  3310. _TESTS = [{
  3311. 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
  3312. 'playlist_mincount': 5,
  3313. 'info_dict': {
  3314. 'id': 'youtube-dl test video',
  3315. 'title': 'youtube-dl test video',
  3316. },
  3317. 'params': {'playlistend': 5}
  3318. }, {
  3319. 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
  3320. 'only_matching': True,
  3321. }]
  3322. def _real_extract(self, url):
  3323. qs = parse_qs(url)
  3324. query = (qs.get('search_query') or qs.get('q'))[-1]
  3325. params = qs.get('sp', ('',))[-1]
  3326. return self.playlist_result(self._search_results(query, params), query, query)
  3327. class YoutubeFeedsInfoExtractor(YoutubeTabIE):
  3328. """
  3329. Base class for feed extractors
  3330. Subclasses must define the _FEED_NAME property.
  3331. """
  3332. _LOGIN_REQUIRED = True
  3333. @property
  3334. def IE_NAME(self):
  3335. return 'youtube:%s' % self._FEED_NAME
  3336. def _real_initialize(self):
  3337. self._login()
  3338. def _real_extract(self, url):
  3339. return self.url_result(
  3340. 'https://www.youtube.com/feed/%s' % self._FEED_NAME,
  3341. ie=YoutubeTabIE.ie_key())
  3342. class YoutubeWatchLaterIE(InfoExtractor):
  3343. IE_NAME = 'youtube:watchlater'
  3344. IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
  3345. _VALID_URL = r':ytwatchlater'
  3346. _TESTS = [{
  3347. 'url': ':ytwatchlater',
  3348. 'only_matching': True,
  3349. }]
  3350. def _real_extract(self, url):
  3351. return self.url_result(
  3352. 'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
  3353. class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
  3354. IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
  3355. _VALID_URL = r':ytrec(?:ommended)?'
  3356. _FEED_NAME = 'recommended'
  3357. _TESTS = [{
  3358. 'url': ':ytrec',
  3359. 'only_matching': True,
  3360. }, {
  3361. 'url': ':ytrecommended',
  3362. 'only_matching': True,
  3363. }]
  3364. class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
  3365. IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
  3366. _VALID_URL = r':ytsubs(?:criptions)?'
  3367. _FEED_NAME = 'subscriptions'
  3368. _TESTS = [{
  3369. 'url': ':ytsubs',
  3370. 'only_matching': True,
  3371. }, {
  3372. 'url': ':ytsubscriptions',
  3373. 'only_matching': True,
  3374. }]
  3375. class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
  3376. IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
  3377. _VALID_URL = r':ythistory'
  3378. _FEED_NAME = 'history'
  3379. _TESTS = [{
  3380. 'url': ':ythistory',
  3381. 'only_matching': True,
  3382. }]
  3383. class YoutubeTruncatedURLIE(InfoExtractor):
  3384. IE_NAME = 'youtube:truncated_url'
  3385. IE_DESC = False # Do not list
  3386. _VALID_URL = r'''(?x)
  3387. (?:https?://)?
  3388. (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
  3389. (?:watch\?(?:
  3390. feature=[a-z_]+|
  3391. annotation_id=annotation_[^&]+|
  3392. x-yt-cl=[0-9]+|
  3393. hl=[^&]*|
  3394. t=[0-9]+
  3395. )?
  3396. |
  3397. attribution_link\?a=[^&]+
  3398. )
  3399. $
  3400. '''
  3401. _TESTS = [{
  3402. 'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
  3403. 'only_matching': True,
  3404. }, {
  3405. 'url': 'https://www.youtube.com/watch?',
  3406. 'only_matching': True,
  3407. }, {
  3408. 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
  3409. 'only_matching': True,
  3410. }, {
  3411. 'url': 'https://www.youtube.com/watch?feature=foo',
  3412. 'only_matching': True,
  3413. }, {
  3414. 'url': 'https://www.youtube.com/watch?hl=en-GB',
  3415. 'only_matching': True,
  3416. }, {
  3417. 'url': 'https://www.youtube.com/watch?t=2372',
  3418. 'only_matching': True,
  3419. }]
  3420. def _real_extract(self, url):
  3421. raise ExtractorError(
  3422. 'Did you forget to quote the URL? Remember that & is a meta '
  3423. 'character in most shells, so you want to put the URL in quotes, '
  3424. 'like youtube-dl '
  3425. '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
  3426. ' or simply youtube-dl BaW_jenozKc .',
  3427. expected=True)
  3428. class YoutubeTruncatedIDIE(InfoExtractor):
  3429. IE_NAME = 'youtube:truncated_id'
  3430. IE_DESC = False # Do not list
  3431. _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
  3432. _TESTS = [{
  3433. 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
  3434. 'only_matching': True,
  3435. }]
  3436. def _real_extract(self, url):
  3437. video_id = self._match_id(url)
  3438. raise ExtractorError(
  3439. 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
  3440. expected=True)