logo

youtube-dl

[mirror] Download/Watch videos from video hostersgit clone https://hacktivis.me/git/mirror/youtube-dl.git

youtube.py (185719B)


  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import collections
  4. import hashlib
  5. import itertools
  6. import json
  7. import os.path
  8. import random
  9. import re
  10. import string
  11. import time
  12. import traceback
  13. from .common import InfoExtractor, SearchInfoExtractor
  14. from ..compat import (
  15. compat_chr,
  16. compat_HTTPError,
  17. compat_map as map,
  18. compat_str,
  19. compat_urllib_parse,
  20. compat_urllib_parse_parse_qs as compat_parse_qs,
  21. compat_urllib_parse_unquote_plus,
  22. compat_urllib_parse_urlparse,
  23. compat_zip as zip,
  24. )
  25. from ..jsinterp import JSInterpreter
  26. from ..utils import (
  27. bug_reports_message,
  28. clean_html,
  29. dict_get,
  30. error_to_compat_str,
  31. ExtractorError,
  32. filter_dict,
  33. float_or_none,
  34. get_first,
  35. extract_attributes,
  36. get_element_by_attribute,
  37. int_or_none,
  38. join_nonempty,
  39. js_to_json,
  40. LazyList,
  41. merge_dicts,
  42. mimetype2ext,
  43. NO_DEFAULT,
  44. parse_codecs,
  45. parse_count,
  46. parse_duration,
  47. parse_qs,
  48. qualities,
  49. remove_start,
  50. smuggle_url,
  51. str_or_none,
  52. str_to_int,
  53. T,
  54. traverse_obj,
  55. try_call,
  56. try_get,
  57. txt_or_none,
  58. unescapeHTML,
  59. unified_strdate,
  60. unsmuggle_url,
  61. update_url,
  62. update_url_query,
  63. url_or_none,
  64. urlencode_postdata,
  65. urljoin,
  66. variadic,
  67. )
  68. class YoutubeBaseInfoExtractor(InfoExtractor):
  69. """Provide base functions for Youtube extractors"""
  70. _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
  71. _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
  72. _LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
  73. _CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
  74. _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
  75. _NETRC_MACHINE = 'youtube'
  76. # If True it will raise an error if no login info is provided
  77. _LOGIN_REQUIRED = False
  78. _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM)'
  79. _INNERTUBE_CLIENTS = {
  80. 'ios': {
  81. 'INNERTUBE_CONTEXT': {
  82. 'client': {
  83. 'clientName': 'IOS',
  84. 'clientVersion': '20.10.4',
  85. 'deviceMake': 'Apple',
  86. 'deviceModel': 'iPhone16,2',
  87. 'userAgent': 'com.google.ios.youtube/20.10.4 (iPhone16,2; U; CPU iOS 18_3_2 like Mac OS X;)',
  88. 'osName': 'iPhone',
  89. 'osVersion': '18.3.2.22D82',
  90. },
  91. },
  92. 'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
  93. 'REQUIRE_JS_PLAYER': False,
  94. 'REQUIRE_PO_TOKEN': True,
  95. },
  96. # mweb has 'ultralow' formats
  97. # See: https://github.com/yt-dlp/yt-dlp/pull/557
  98. 'mweb': {
  99. 'INNERTUBE_CONTEXT': {
  100. 'client': {
  101. 'clientName': 'MWEB',
  102. 'clientVersion': '2.20250311.03.00',
  103. # mweb previously did not require PO Token with this UA
  104. 'userAgent': 'Mozilla/5.0 (iPad; CPU OS 16_7_10 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1,gzip(gfe)',
  105. },
  106. },
  107. 'INNERTUBE_CONTEXT_CLIENT_NAME': 2,
  108. 'REQUIRE_PO_TOKEN': True,
  109. 'SUPPORTS_COOKIES': True,
  110. },
  111. 'tv': {
  112. 'INNERTUBE_CONTEXT': {
  113. 'client': {
  114. 'clientName': 'TVHTML5',
  115. 'clientVersion': '7.20250312.16.00',
  116. 'userAgent': 'Mozilla/5.0 (ChromiumStylePlatform) Cobalt/Version',
  117. },
  118. },
  119. 'INNERTUBE_CONTEXT_CLIENT_NAME': 7,
  120. 'SUPPORTS_COOKIES': True,
  121. },
  122. 'web': {
  123. 'INNERTUBE_CONTEXT': {
  124. 'client': {
  125. 'clientName': 'WEB',
  126. 'clientVersion': '2.20250312.04.00',
  127. },
  128. },
  129. 'INNERTUBE_CONTEXT_CLIENT_NAME': 1,
  130. 'REQUIRE_PO_TOKEN': True,
  131. 'SUPPORTS_COOKIES': True,
  132. },
  133. }
  134. def _login(self):
  135. """
  136. Attempt to log in to YouTube.
  137. True is returned if successful or skipped.
  138. False is returned if login failed.
  139. If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
  140. """
  141. username, password = self._get_login_info()
  142. # No authentication to be performed
  143. if username is None:
  144. if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
  145. raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
  146. return True
  147. login_page = self._download_webpage(
  148. self._LOGIN_URL, None,
  149. note='Downloading login page',
  150. errnote='unable to fetch login page', fatal=False)
  151. if login_page is False:
  152. return
  153. login_form = self._hidden_inputs(login_page)
  154. def req(url, f_req, note, errnote):
  155. data = login_form.copy()
  156. data.update({
  157. 'pstMsg': 1,
  158. 'checkConnection': 'youtube',
  159. 'checkedDomains': 'youtube',
  160. 'hl': 'en',
  161. 'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
  162. 'f.req': json.dumps(f_req),
  163. 'flowName': 'GlifWebSignIn',
  164. 'flowEntry': 'ServiceLogin',
  165. # TODO: reverse actual botguard identifier generation algo
  166. 'bgRequest': '["identifier",""]',
  167. })
  168. return self._download_json(
  169. url, None, note=note, errnote=errnote,
  170. transform_source=lambda s: re.sub(r'^[^[]*', '', s),
  171. fatal=False,
  172. data=urlencode_postdata(data), headers={
  173. 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
  174. 'Google-Accounts-XSRF': 1,
  175. })
  176. def warn(message):
  177. self._downloader.report_warning(message)
  178. lookup_req = [
  179. username,
  180. None, [], None, 'US', None, None, 2, False, True,
  181. [
  182. None, None,
  183. [2, 1, None, 1,
  184. 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
  185. None, [], 4],
  186. 1, [None, None, []], None, None, None, True,
  187. ],
  188. username,
  189. ]
  190. lookup_results = req(
  191. self._LOOKUP_URL, lookup_req,
  192. 'Looking up account info', 'Unable to look up account info')
  193. if lookup_results is False:
  194. return False
  195. user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
  196. if not user_hash:
  197. warn('Unable to extract user hash')
  198. return False
  199. challenge_req = [
  200. user_hash,
  201. None, 1, None, [1, None, None, None, [password, None, True]],
  202. [
  203. None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
  204. 1, [None, None, []], None, None, None, True,
  205. ]]
  206. challenge_results = req(
  207. self._CHALLENGE_URL, challenge_req,
  208. 'Logging in', 'Unable to log in')
  209. if challenge_results is False:
  210. return
  211. login_res = try_get(challenge_results, lambda x: x[0][5], list)
  212. if login_res:
  213. login_msg = try_get(login_res, lambda x: x[5], compat_str)
  214. warn(
  215. 'Unable to login: %s' % 'Invalid password'
  216. if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
  217. return False
  218. res = try_get(challenge_results, lambda x: x[0][-1], list)
  219. if not res:
  220. warn('Unable to extract result entry')
  221. return False
  222. login_challenge = try_get(res, lambda x: x[0][0], list)
  223. if login_challenge:
  224. challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
  225. if challenge_str == 'TWO_STEP_VERIFICATION':
  226. # SEND_SUCCESS - TFA code has been successfully sent to phone
  227. # QUOTA_EXCEEDED - reached the limit of TFA codes
  228. status = try_get(login_challenge, lambda x: x[5], compat_str)
  229. if status == 'QUOTA_EXCEEDED':
  230. warn('Exceeded the limit of TFA codes, try later')
  231. return False
  232. tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
  233. if not tl:
  234. warn('Unable to extract TL')
  235. return False
  236. tfa_code = self._get_tfa_info('2-step verification code')
  237. if not tfa_code:
  238. warn(
  239. 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
  240. '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
  241. return False
  242. tfa_code = remove_start(tfa_code, 'G-')
  243. tfa_req = [
  244. user_hash, None, 2, None,
  245. [
  246. 9, None, None, None, None, None, None, None,
  247. [None, tfa_code, True, 2],
  248. ]]
  249. tfa_results = req(
  250. self._TFA_URL.format(tl), tfa_req,
  251. 'Submitting TFA code', 'Unable to submit TFA code')
  252. if tfa_results is False:
  253. return False
  254. tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
  255. if tfa_res:
  256. tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
  257. warn(
  258. 'Unable to finish TFA: %s' % 'Invalid TFA code'
  259. if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
  260. return False
  261. check_cookie_url = try_get(
  262. tfa_results, lambda x: x[0][-1][2], compat_str)
  263. else:
  264. CHALLENGES = {
  265. 'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
  266. 'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
  267. 'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
  268. }
  269. challenge = CHALLENGES.get(
  270. challenge_str,
  271. '%s returned error %s.' % (self.IE_NAME, challenge_str))
  272. warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
  273. return False
  274. else:
  275. check_cookie_url = try_get(res, lambda x: x[2], compat_str)
  276. if not check_cookie_url:
  277. warn('Unable to extract CheckCookie URL')
  278. return False
  279. check_cookie_results = self._download_webpage(
  280. check_cookie_url, None, 'Checking cookie', fatal=False)
  281. if check_cookie_results is False:
  282. return False
  283. if 'https://myaccount.google.com/' not in check_cookie_results:
  284. warn('Unable to log in')
  285. return False
  286. return True
  287. def _initialize_consent(self):
  288. cookies = self._get_cookies('https://www.youtube.com/')
  289. if cookies.get('__Secure-3PSID'):
  290. return
  291. socs = cookies.get('SOCS')
  292. if socs and not socs.value.startswith('CAA'): # not consented
  293. return
  294. self._set_cookie('.youtube.com', 'SOCS', 'CAI', secure=True) # accept all (required for mixes)
  295. def _real_initialize(self):
  296. self._initialize_consent()
  297. if self._downloader is None:
  298. return
  299. if not self._login():
  300. return
  301. _DEFAULT_API_DATA = {'context': _INNERTUBE_CLIENTS['web']['INNERTUBE_CONTEXT']}
  302. _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
  303. _YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
  304. _YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
  305. _SAPISID = None
  306. def _generate_sapisidhash_header(self, origin='https://www.youtube.com'):
  307. time_now = round(time.time())
  308. if self._SAPISID is None:
  309. yt_cookies = self._get_cookies('https://www.youtube.com')
  310. # Sometimes SAPISID cookie isn't present but __Secure-3PAPISID is.
  311. # See: https://github.com/yt-dlp/yt-dlp/issues/393
  312. sapisid_cookie = dict_get(
  313. yt_cookies, ('__Secure-3PAPISID', 'SAPISID'))
  314. if sapisid_cookie and sapisid_cookie.value:
  315. self._SAPISID = sapisid_cookie.value
  316. self.write_debug('Extracted SAPISID cookie')
  317. # SAPISID cookie is required if not already present
  318. if not yt_cookies.get('SAPISID'):
  319. self.write_debug('Copying __Secure-3PAPISID cookie to SAPISID cookie')
  320. self._set_cookie(
  321. '.youtube.com', 'SAPISID', self._SAPISID, secure=True, expire_time=time_now + 3600)
  322. else:
  323. self._SAPISID = False
  324. if not self._SAPISID:
  325. return None
  326. # SAPISIDHASH algorithm from https://stackoverflow.com/a/32065323
  327. sapisidhash = hashlib.sha1(
  328. '{0} {1} {2}'.format(time_now, self._SAPISID, origin).encode('utf-8')).hexdigest()
  329. return 'SAPISIDHASH {0}_{1}'.format(time_now, sapisidhash)
  330. def _call_api(self, ep, query, video_id, fatal=True, headers=None,
  331. note='Downloading API JSON'):
  332. data = self._DEFAULT_API_DATA.copy()
  333. data.update(query)
  334. real_headers = {'content-type': 'application/json'}
  335. if headers:
  336. real_headers.update(headers)
  337. # was: 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
  338. api_key = self.get_param('youtube_innertube_key')
  339. return self._download_json(
  340. 'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id,
  341. note=note, errnote='Unable to download API page',
  342. data=json.dumps(data).encode('utf8'), fatal=fatal,
  343. headers=real_headers, query=filter_dict({
  344. 'key': api_key,
  345. 'prettyPrint': 'false',
  346. }))
  347. def _extract_yt_initial_data(self, video_id, webpage):
  348. return self._parse_json(
  349. self._search_regex(
  350. (r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE),
  351. self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'),
  352. video_id)
  353. def _extract_visitor_data(self, *args):
  354. """
  355. Extract visitorData from an API response or ytcfg
  356. Appears to be used to track session state
  357. """
  358. visitor_data = self.get_param('youtube_visitor_data')
  359. if visitor_data:
  360. return visitor_data
  361. return get_first(
  362. args, (('VISITOR_DATA',
  363. ('INNERTUBE_CONTEXT', 'client', 'visitorData'),
  364. ('responseContext', 'visitorData')),
  365. T(compat_str)))
  366. def _extract_ytcfg(self, video_id, webpage):
  367. return self._parse_json(
  368. self._search_regex(
  369. r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
  370. default='{}'), video_id, fatal=False) or {}
  371. def _extract_video(self, renderer):
  372. video_id = renderer['videoId']
  373. title = try_get(
  374. renderer,
  375. (lambda x: x['title']['runs'][0]['text'],
  376. lambda x: x['title']['simpleText'],
  377. lambda x: x['headline']['simpleText']), compat_str)
  378. description = try_get(
  379. renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'],
  380. compat_str)
  381. duration = parse_duration(try_get(
  382. renderer, lambda x: x['lengthText']['simpleText'], compat_str))
  383. view_count_text = try_get(
  384. renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
  385. view_count = str_to_int(self._search_regex(
  386. r'^([\d,]+)', re.sub(r'\s', '', view_count_text),
  387. 'view count', default=None))
  388. uploader = try_get(
  389. renderer,
  390. (lambda x: x['ownerText']['runs'][0]['text'],
  391. lambda x: x['shortBylineText']['runs'][0]['text']), compat_str)
  392. return {
  393. '_type': 'url',
  394. 'ie_key': YoutubeIE.ie_key(),
  395. 'id': video_id,
  396. 'url': video_id,
  397. 'title': title,
  398. 'description': description,
  399. 'duration': duration,
  400. 'view_count': view_count,
  401. 'uploader': uploader,
  402. }
  403. @staticmethod
  404. def _extract_thumbnails(data, *path_list, **kw_final_key):
  405. """
  406. Extract thumbnails from thumbnails dict
  407. @param path_list: path list to level that contains 'thumbnails' key
  408. """
  409. final_key = kw_final_key.get('final_key', 'thumbnails')
  410. return traverse_obj(data, ((
  411. tuple(variadic(path) + (final_key, Ellipsis)
  412. for path in path_list or [()])), {
  413. 'url': ('url', T(url_or_none),
  414. # Sometimes youtube gives a wrong thumbnail URL. See:
  415. # https://github.com/yt-dlp/yt-dlp/issues/233
  416. # https://github.com/ytdl-org/youtube-dl/issues/28023
  417. T(lambda u: update_url(u, query=None) if u and 'maxresdefault' in u else u)),
  418. 'height': ('height', T(int_or_none)),
  419. 'width': ('width', T(int_or_none)),
  420. }, T(lambda t: t if t.get('url') else None)))
  421. def _search_results(self, query, params):
  422. data = {
  423. 'context': {
  424. 'client': {
  425. 'clientName': 'WEB',
  426. 'clientVersion': '2.20201021.03.00',
  427. },
  428. },
  429. 'query': query,
  430. }
  431. if params:
  432. data['params'] = params
  433. for page_num in itertools.count(1):
  434. search = self._download_json(
  435. 'https://www.youtube.com/youtubei/v1/search',
  436. video_id='query "%s"' % query,
  437. note='Downloading page %s' % page_num,
  438. errnote='Unable to download API page', fatal=False,
  439. data=json.dumps(data).encode('utf8'),
  440. query={
  441. # 'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
  442. 'prettyPrint': 'false',
  443. },
  444. headers={'content-type': 'application/json'})
  445. if not search:
  446. break
  447. slr_contents = try_get(
  448. search,
  449. (lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
  450. lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
  451. list)
  452. if not slr_contents:
  453. break
  454. for slr_content in slr_contents:
  455. isr_contents = try_get(
  456. slr_content,
  457. lambda x: x['itemSectionRenderer']['contents'],
  458. list)
  459. if not isr_contents:
  460. continue
  461. for content in isr_contents:
  462. if not isinstance(content, dict):
  463. continue
  464. video = content.get('videoRenderer')
  465. if not isinstance(video, dict):
  466. continue
  467. video_id = video.get('videoId')
  468. if not video_id:
  469. continue
  470. yield self._extract_video(video)
  471. token = try_get(
  472. slr_contents,
  473. lambda x: x[-1]['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'],
  474. compat_str)
  475. if not token:
  476. break
  477. data['continuation'] = token
  478. @staticmethod
  479. def _owner_endpoints_path():
  480. return [
  481. Ellipsis,
  482. lambda k, _: k.endswith('SecondaryInfoRenderer'),
  483. ('owner', 'videoOwner'), 'videoOwnerRenderer', 'title',
  484. 'runs', Ellipsis]
  485. def _extract_channel_id(self, webpage, videodetails={}, metadata={}, renderers=[]):
  486. channel_id = None
  487. if any((videodetails, metadata, renderers)):
  488. channel_id = (
  489. traverse_obj(videodetails, 'channelId')
  490. or traverse_obj(metadata, 'externalChannelId', 'externalId')
  491. or traverse_obj(renderers,
  492. self._owner_endpoints_path() + [
  493. 'navigationEndpoint', 'browseEndpoint', 'browseId'],
  494. get_all=False)
  495. )
  496. return channel_id or self._html_search_meta(
  497. 'channelId', webpage, 'channel id', default=None)
  498. def _extract_author_var(self, webpage, var_name,
  499. videodetails={}, metadata={}, renderers=[]):
  500. result = None
  501. paths = {
  502. # (HTML, videodetails, metadata, renderers)
  503. 'name': ('content', 'author', (('ownerChannelName', None), 'title'), ['text']),
  504. 'url': ('href', 'ownerProfileUrl', 'vanityChannelUrl',
  505. ['navigationEndpoint', 'browseEndpoint', 'canonicalBaseUrl']),
  506. }
  507. if any((videodetails, metadata, renderers)):
  508. result = (
  509. traverse_obj(videodetails, paths[var_name][1], get_all=False)
  510. or traverse_obj(metadata, paths[var_name][2], get_all=False)
  511. or traverse_obj(renderers,
  512. self._owner_endpoints_path() + paths[var_name][3],
  513. get_all=False)
  514. )
  515. return result or traverse_obj(
  516. extract_attributes(self._search_regex(
  517. r'''(?s)(<link\b[^>]+\bitemprop\s*=\s*("|')%s\2[^>]*>)'''
  518. % re.escape(var_name),
  519. get_element_by_attribute('itemprop', 'author', webpage or '') or '',
  520. 'author link', default='')),
  521. paths[var_name][0])
  522. @staticmethod
  523. def _yt_urljoin(url_or_path):
  524. return urljoin('https://www.youtube.com', url_or_path)
  525. def _extract_uploader_id(self, uploader_url):
  526. return self._search_regex(
  527. r'/(?:(?:channel|user)/|(?=@))([^/?&#]+)', uploader_url or '',
  528. 'uploader id', default=None)
  529. class YoutubeIE(YoutubeBaseInfoExtractor):
  530. IE_DESC = 'YouTube.com'
  531. _INVIDIOUS_SITES = (
  532. # invidious-redirect websites
  533. r'(?:www\.)?redirect\.invidious\.io',
  534. r'(?:(?:www|dev)\.)?invidio\.us',
  535. # Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md
  536. r'(?:(?:www|no)\.)?invidiou\.sh',
  537. r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
  538. r'(?:www\.)?invidious\.kabi\.tk',
  539. r'(?:www\.)?invidious\.13ad\.de',
  540. r'(?:www\.)?invidious\.mastodon\.host',
  541. r'(?:www\.)?invidious\.zapashcanon\.fr',
  542. r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks',
  543. r'(?:www\.)?invidious\.tinfoil-hat\.net',
  544. r'(?:www\.)?invidious\.himiko\.cloud',
  545. r'(?:www\.)?invidious\.reallyancient\.tech',
  546. r'(?:www\.)?invidious\.tube',
  547. r'(?:www\.)?invidiou\.site',
  548. r'(?:www\.)?invidious\.site',
  549. r'(?:www\.)?invidious\.xyz',
  550. r'(?:www\.)?invidious\.nixnet\.xyz',
  551. r'(?:www\.)?invidious\.048596\.xyz',
  552. r'(?:www\.)?invidious\.drycat\.fr',
  553. r'(?:www\.)?inv\.skyn3t\.in',
  554. r'(?:www\.)?tube\.poal\.co',
  555. r'(?:www\.)?tube\.connect\.cafe',
  556. r'(?:www\.)?vid\.wxzm\.sx',
  557. r'(?:www\.)?vid\.mint\.lgbt',
  558. r'(?:www\.)?vid\.puffyan\.us',
  559. r'(?:www\.)?yewtu\.be',
  560. r'(?:www\.)?yt\.elukerio\.org',
  561. r'(?:www\.)?yt\.lelux\.fi',
  562. r'(?:www\.)?invidious\.ggc-project\.de',
  563. r'(?:www\.)?yt\.maisputain\.ovh',
  564. r'(?:www\.)?ytprivate\.com',
  565. r'(?:www\.)?invidious\.13ad\.de',
  566. r'(?:www\.)?invidious\.toot\.koeln',
  567. r'(?:www\.)?invidious\.fdn\.fr',
  568. r'(?:www\.)?watch\.nettohikari\.com',
  569. r'(?:www\.)?invidious\.namazso\.eu',
  570. r'(?:www\.)?invidious\.silkky\.cloud',
  571. r'(?:www\.)?invidious\.exonip\.de',
  572. r'(?:www\.)?invidious\.riverside\.rocks',
  573. r'(?:www\.)?invidious\.blamefran\.net',
  574. r'(?:www\.)?invidious\.moomoo\.de',
  575. r'(?:www\.)?ytb\.trom\.tf',
  576. r'(?:www\.)?yt\.cyberhost\.uk',
  577. r'(?:www\.)?kgg2m7yk5aybusll\.onion',
  578. r'(?:www\.)?qklhadlycap4cnod\.onion',
  579. r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion',
  580. r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion',
  581. r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion',
  582. r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion',
  583. r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p',
  584. r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion',
  585. r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
  586. r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
  587. r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion',
  588. r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
  589. )
  590. _VALID_URL = r"""(?x)^
  591. (
  592. (?:https?://|//) # http(s):// or protocol-independent URL
  593. (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com|
  594. (?:www\.)?deturl\.com/www\.youtube\.com|
  595. (?:www\.)?pwnyoutube\.com|
  596. (?:www\.)?hooktube\.com|
  597. (?:www\.)?yourepeat\.com|
  598. tube\.majestyc\.net|
  599. %(invidious)s|
  600. youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains
  601. (?:.*?\#/)? # handle anchor (#/) redirect urls
  602. (?: # the various things that can precede the ID:
  603. (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
  604. |shorts/
  605. |(?: # or the v= param in all its forms
  606. (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
  607. (?:\?|\#!?) # the params delimiter ? or # or #!
  608. (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
  609. v=
  610. )
  611. ))
  612. |(?:
  613. youtu\.be| # just youtu.be/xxxx
  614. vid\.plus| # or vid.plus/xxxx
  615. zwearz\.com/watch| # or zwearz.com/watch/xxxx
  616. %(invidious)s
  617. )/
  618. |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
  619. )
  620. )? # all until now is optional -> you can pass the naked ID
  621. (?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
  622. (?(1).+)? # if we found the ID, everything can follow
  623. $""" % {
  624. 'invidious': '|'.join(_INVIDIOUS_SITES),
  625. }
  626. _PLAYER_INFO_RE = (
  627. r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/(?:tv-)?player',
  628. r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias(?:_tce)?\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
  629. r'\b(?P<id>vfl[a-zA-Z0-9_-]{6,})\b.*?\.js$',
  630. )
  631. _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
  632. _GEO_BYPASS = False
  633. IE_NAME = 'youtube'
  634. _TESTS = [
  635. {
  636. 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
  637. 'info_dict': {
  638. 'id': 'BaW_jenozKc',
  639. 'ext': 'mp4',
  640. 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
  641. 'uploader': 'Philipp Hagemeister',
  642. 'uploader_id': '@PhilippHagemeister',
  643. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@PhilippHagemeister',
  644. 'channel': 'Philipp Hagemeister',
  645. 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
  646. 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
  647. 'upload_date': '20121002',
  648. 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
  649. 'categories': ['Science & Technology'],
  650. 'tags': ['youtube-dl'],
  651. 'duration': 10,
  652. 'view_count': int,
  653. 'like_count': int,
  654. 'thumbnail': 'https://i.ytimg.com/vi/BaW_jenozKc/maxresdefault.jpg',
  655. 'start_time': 1,
  656. 'end_time': 9,
  657. },
  658. },
  659. {
  660. 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
  661. 'note': 'Embed-only video (#1746)',
  662. 'info_dict': {
  663. 'id': 'yZIXLfi8CZQ',
  664. 'ext': 'mp4',
  665. 'upload_date': '20120608',
  666. 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
  667. 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
  668. 'uploader': 'SET India',
  669. 'uploader_id': 'setindia',
  670. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
  671. 'age_limit': 18,
  672. },
  673. 'skip': 'Private video',
  674. },
  675. {
  676. 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
  677. 'note': 'Use the first video ID in the URL',
  678. 'info_dict': {
  679. 'id': 'BaW_jenozKc',
  680. 'ext': 'mp4',
  681. 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
  682. 'uploader': 'Philipp Hagemeister',
  683. 'uploader_id': '@PhilippHagemeister',
  684. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@PhilippHagemeister',
  685. 'upload_date': '20121002',
  686. 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
  687. 'categories': ['Science & Technology'],
  688. 'tags': ['youtube-dl'],
  689. 'duration': 10,
  690. 'view_count': int,
  691. 'like_count': int,
  692. },
  693. 'params': {
  694. 'skip_download': True,
  695. },
  696. },
  697. {
  698. 'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
  699. 'note': '256k DASH audio (format 141) via DASH manifest',
  700. 'info_dict': {
  701. 'id': 'a9LDPn-MO4I',
  702. 'ext': 'm4a',
  703. 'upload_date': '20121002',
  704. 'uploader_id': '8KVIDEO',
  705. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
  706. 'description': '',
  707. 'uploader': '8KVIDEO',
  708. 'title': 'UHDTV TEST 8K VIDEO.mp4',
  709. },
  710. 'params': {
  711. 'youtube_include_dash_manifest': True,
  712. 'format': '141',
  713. },
  714. 'skip': 'format 141 not served any more',
  715. },
  716. # DASH manifest with encrypted signature
  717. {
  718. 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
  719. 'info_dict': {
  720. 'id': 'IB3lcPjvWLA',
  721. 'ext': 'm4a',
  722. 'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
  723. 'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
  724. 'duration': 244,
  725. 'uploader': 'AfrojackVEVO',
  726. 'uploader_id': '@AfrojackVEVO',
  727. 'upload_date': '20131011',
  728. 'abr': 129.495,
  729. },
  730. 'params': {
  731. 'youtube_include_dash_manifest': True,
  732. 'format': '141/bestaudio[ext=m4a]',
  733. },
  734. },
  735. # Controversy video
  736. {
  737. 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
  738. 'info_dict': {
  739. 'id': 'T4XJQO3qol8',
  740. 'ext': 'mp4',
  741. 'duration': 219,
  742. 'upload_date': '20100909',
  743. 'uploader': 'Amazing Atheist',
  744. 'uploader_id': '@theamazingatheist',
  745. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@theamazingatheist',
  746. 'title': 'Burning Everyone\'s Koran',
  747. 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms \r\n\r\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
  748. },
  749. },
  750. # Age-gated videos
  751. {
  752. 'note': 'Age-gated video (No vevo, embed allowed)',
  753. 'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
  754. 'info_dict': {
  755. 'id': 'HtVdAasjOgU',
  756. 'ext': 'mp4',
  757. 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
  758. 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
  759. 'duration': 142,
  760. 'uploader': 'The Witcher',
  761. 'uploader_id': '@thewitcher',
  762. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@thewitcher',
  763. 'upload_date': '20140605',
  764. 'thumbnail': 'https://i.ytimg.com/vi/HtVdAasjOgU/maxresdefault.jpg',
  765. 'age_limit': 18,
  766. 'categories': ['Gaming'],
  767. 'tags': 'count:17',
  768. 'channel': 'The Witcher',
  769. 'channel_url': 'https://www.youtube.com/channel/UCzybXLxv08IApdjdN0mJhEg',
  770. 'channel_id': 'UCzybXLxv08IApdjdN0mJhEg',
  771. 'view_count': int,
  772. 'like_count': int,
  773. },
  774. },
  775. {
  776. 'note': 'Age-gated video with embed allowed in public site',
  777. 'url': 'https://youtube.com/watch?v=HsUATh_Nc2U',
  778. 'info_dict': {
  779. 'id': 'HsUATh_Nc2U',
  780. 'ext': 'mp4',
  781. 'title': 'Godzilla 2 (Official Video)',
  782. 'description': 'md5:bf77e03fcae5529475e500129b05668a',
  783. 'duration': 177,
  784. 'uploader': 'FlyingKitty',
  785. 'uploader_id': '@FlyingKitty900',
  786. 'upload_date': '20200408',
  787. 'thumbnail': 'https://i.ytimg.com/vi/HsUATh_Nc2U/maxresdefault.jpg',
  788. 'age_limit': 18,
  789. 'categories': ['Entertainment'],
  790. 'tags': ['Flyingkitty', 'godzilla 2'],
  791. 'channel': 'FlyingKitty',
  792. 'channel_url': 'https://www.youtube.com/channel/UCYQT13AtrJC0gsM1far_zJg',
  793. 'channel_id': 'UCYQT13AtrJC0gsM1far_zJg',
  794. 'view_count': int,
  795. 'like_count': int,
  796. },
  797. },
  798. {
  799. 'note': 'Age-gated video embeddable only with clientScreen=EMBED',
  800. 'url': 'https://youtube.com/watch?v=Tq92D6wQ1mg',
  801. 'info_dict': {
  802. 'id': 'Tq92D6wQ1mg',
  803. 'ext': 'mp4',
  804. 'title': '[MMD] Adios - EVERGLOW [+Motion DL]',
  805. 'description': 'md5:17eccca93a786d51bc67646756894066',
  806. 'duration': 106,
  807. 'uploader': 'Projekt Melody',
  808. 'uploader_id': '@ProjektMelody',
  809. 'upload_date': '20191227',
  810. 'age_limit': 18,
  811. 'thumbnail': 'https://i.ytimg.com/vi/Tq92D6wQ1mg/sddefault.jpg',
  812. 'tags': ['mmd', 'dance', 'mikumikudance', 'kpop', 'vtuber'],
  813. 'categories': ['Entertainment'],
  814. 'channel': 'Projekt Melody',
  815. 'channel_url': 'https://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ',
  816. 'channel_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
  817. 'view_count': int,
  818. 'like_count': int,
  819. },
  820. },
  821. {
  822. 'note': 'Non-Age-gated non-embeddable video',
  823. 'url': 'https://youtube.com/watch?v=MeJVWBSsPAY',
  824. 'info_dict': {
  825. 'id': 'MeJVWBSsPAY',
  826. 'ext': 'mp4',
  827. 'title': 'OOMPH! - Such Mich Find Mich (Lyrics)',
  828. 'description': 'Fan Video. Music & Lyrics by OOMPH!.',
  829. 'duration': 210,
  830. 'upload_date': '20130730',
  831. 'uploader': 'Herr Lurik',
  832. 'uploader_id': '@HerrLurik',
  833. 'uploader_url': 'http://www.youtube.com/@HerrLurik',
  834. 'age_limit': 0,
  835. 'thumbnail': 'https://i.ytimg.com/vi/MeJVWBSsPAY/hqdefault.jpg',
  836. 'tags': ['oomph', 'such mich find mich', 'lyrics', 'german industrial', 'musica industrial'],
  837. 'categories': ['Music'],
  838. 'channel': 'Herr Lurik',
  839. 'channel_url': 'https://www.youtube.com/channel/UCdR3RSDPqub28LjZx0v9-aA',
  840. 'channel_id': 'UCdR3RSDPqub28LjZx0v9-aA',
  841. 'artist': 'OOMPH!',
  842. 'view_count': int,
  843. 'like_count': int,
  844. },
  845. },
  846. {
  847. 'note': 'Non-bypassable age-gated video',
  848. 'url': 'https://youtube.com/watch?v=Cr381pDsSsA',
  849. 'only_matching': True,
  850. },
  851. {
  852. 'note': 'Age-gated video only available with authentication (not via embed workaround)',
  853. 'url': 'XgnwCQzjau8',
  854. 'only_matching': True,
  855. 'skip': '''This video has been removed for violating YouTube's Community Guidelines''',
  856. },
  857. # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
  858. # YouTube Red ad is not captured for creator
  859. {
  860. 'url': '__2ABJjxzNo',
  861. 'info_dict': {
  862. 'id': '__2ABJjxzNo',
  863. 'ext': 'mp4',
  864. 'duration': 266,
  865. 'upload_date': '20100430',
  866. 'uploader_id': '@deadmau5',
  867. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@deadmau5',
  868. 'creator': 'deadmau5',
  869. 'description': 'md5:6cbcd3a92ce1bc676fc4d6ab4ace2336',
  870. 'uploader': 'deadmau5',
  871. 'title': 'Deadmau5 - Some Chords (HD)',
  872. 'alt_title': 'Some Chords',
  873. },
  874. 'expected_warnings': [
  875. 'DASH manifest missing',
  876. ],
  877. },
  878. # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
  879. {
  880. 'url': 'lqQg6PlCWgI',
  881. 'info_dict': {
  882. 'id': 'lqQg6PlCWgI',
  883. 'ext': 'mp4',
  884. 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
  885. 'description': r're:(?s)(?:.+\s)?HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games\s*',
  886. 'duration': 6085,
  887. 'upload_date': '20150827',
  888. 'uploader_id': '@Olympics',
  889. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@Olympics',
  890. 'uploader': r're:Olympics?',
  891. 'age_limit': 0,
  892. 'thumbnail': 'https://i.ytimg.com/vi/lqQg6PlCWgI/maxresdefault.jpg',
  893. 'categories': ['Sports'],
  894. 'tags': ['Hockey', '2012-07-31', '31 July 2012', 'Riverbank Arena', 'Session', 'Olympics', 'Olympic Games', 'London 2012', '2012 Summer Olympics', 'Summer Games'],
  895. 'channel': 'Olympics',
  896. 'channel_url': 'https://www.youtube.com/channel/UCTl3QQTvqHFjurroKxexy2Q',
  897. 'channel_id': 'UCTl3QQTvqHFjurroKxexy2Q',
  898. 'view_count': int,
  899. 'like_count': int,
  900. },
  901. },
  902. # Non-square pixels
  903. {
  904. 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
  905. 'info_dict': {
  906. 'id': '_b-2C3KPAM0',
  907. 'ext': 'mp4',
  908. 'stretched_ratio': 16 / 9.,
  909. 'duration': 85,
  910. 'upload_date': '20110310',
  911. 'uploader_id': '@AllenMeow',
  912. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@AllenMeow',
  913. 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
  914. 'uploader': '孫ᄋᄅ',
  915. 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
  916. },
  917. },
  918. # url_encoded_fmt_stream_map is empty string
  919. {
  920. 'url': 'qEJwOuvDf7I',
  921. 'info_dict': {
  922. 'id': 'qEJwOuvDf7I',
  923. 'ext': 'webm',
  924. 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
  925. 'description': '',
  926. 'upload_date': '20150404',
  927. 'uploader_id': 'spbelect',
  928. 'uploader': 'Наблюдатели Петербурга',
  929. },
  930. 'params': {
  931. 'skip_download': 'requires avconv',
  932. },
  933. 'skip': 'This live event has ended.',
  934. },
  935. # Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
  936. {
  937. 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
  938. 'info_dict': {
  939. 'id': 'FIl7x6_3R5Y',
  940. 'ext': 'webm',
  941. 'title': 'md5:7b81415841e02ecd4313668cde88737a',
  942. 'description': 'md5:116377fd2963b81ec4ce64b542173306',
  943. 'duration': 220,
  944. 'upload_date': '20150625',
  945. 'uploader_id': 'dorappi2000',
  946. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
  947. 'uploader': 'dorappi2000',
  948. 'formats': 'mincount:31',
  949. },
  950. 'skip': 'not actual any more',
  951. },
  952. # DASH manifest with segment_list
  953. {
  954. 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
  955. 'md5': '8ce563a1d667b599d21064e982ab9e31',
  956. 'info_dict': {
  957. 'id': 'CsmdDsKjzN8',
  958. 'ext': 'mp4',
  959. 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
  960. 'uploader': 'Airtek',
  961. 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
  962. 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
  963. 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
  964. },
  965. 'params': {
  966. 'youtube_include_dash_manifest': True,
  967. 'format': '135', # bestvideo
  968. },
  969. 'skip': 'This live event has ended.',
  970. },
  971. {
  972. # Multifeed videos (multiple cameras), URL is for Main Camera
  973. 'url': 'https://www.youtube.com/watch?v=jvGDaLqkpTg',
  974. 'info_dict': {
  975. 'id': 'jvGDaLqkpTg',
  976. 'title': 'Tom Clancy Free Weekend Rainbow Whatever',
  977. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  978. },
  979. 'playlist': [{
  980. 'info_dict': {
  981. 'id': 'jvGDaLqkpTg',
  982. 'ext': 'mp4',
  983. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Main Camera)',
  984. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  985. 'duration': 10643,
  986. 'upload_date': '20161111',
  987. 'uploader': 'Team PGP',
  988. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  989. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  990. },
  991. }, {
  992. 'info_dict': {
  993. 'id': '3AKt1R1aDnw',
  994. 'ext': 'mp4',
  995. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 2)',
  996. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  997. 'duration': 10991,
  998. 'upload_date': '20161111',
  999. 'uploader': 'Team PGP',
  1000. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  1001. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  1002. },
  1003. }, {
  1004. 'info_dict': {
  1005. 'id': 'RtAMM00gpVc',
  1006. 'ext': 'mp4',
  1007. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 3)',
  1008. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  1009. 'duration': 10995,
  1010. 'upload_date': '20161111',
  1011. 'uploader': 'Team PGP',
  1012. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  1013. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  1014. },
  1015. }, {
  1016. 'info_dict': {
  1017. 'id': '6N2fdlP3C5U',
  1018. 'ext': 'mp4',
  1019. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 4)',
  1020. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  1021. 'duration': 10990,
  1022. 'upload_date': '20161111',
  1023. 'uploader': 'Team PGP',
  1024. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  1025. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  1026. },
  1027. }],
  1028. 'params': {
  1029. 'skip_download': True,
  1030. },
  1031. 'skip': 'Not multifeed any more',
  1032. },
  1033. {
  1034. # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
  1035. 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
  1036. 'info_dict': {
  1037. 'id': 'gVfLd0zydlo',
  1038. 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
  1039. },
  1040. 'playlist_count': 2,
  1041. 'skip': 'Not multifeed any more',
  1042. },
  1043. {
  1044. 'url': 'https://vid.plus/FlRa-iH7PGw',
  1045. 'only_matching': True,
  1046. },
  1047. {
  1048. 'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
  1049. 'only_matching': True,
  1050. },
  1051. {
  1052. # Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
  1053. # Also tests cut-off URL expansion in video description (see
  1054. # https://github.com/ytdl-org/youtube-dl/issues/1892,
  1055. # https://github.com/ytdl-org/youtube-dl/issues/8164)
  1056. 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
  1057. 'info_dict': {
  1058. 'id': 'lsguqyKfVQg',
  1059. 'ext': 'mp4',
  1060. 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
  1061. 'alt_title': 'Dark Walk',
  1062. 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
  1063. 'duration': 133,
  1064. 'upload_date': '20151119',
  1065. 'uploader_id': '@IronSoulElf',
  1066. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@IronSoulElf',
  1067. 'uploader': 'IronSoulElf',
  1068. 'creator': r're:Todd Haberman[;,]\s+Daniel Law Heath and Aaron Kaplan',
  1069. 'track': 'Dark Walk',
  1070. 'artist': r're:Todd Haberman[;,]\s+Daniel Law Heath and Aaron Kaplan',
  1071. 'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
  1072. },
  1073. 'params': {
  1074. 'skip_download': True,
  1075. },
  1076. },
  1077. {
  1078. # Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
  1079. 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
  1080. 'only_matching': True,
  1081. },
  1082. {
  1083. # Video with yt:stretch=17:0
  1084. 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
  1085. 'info_dict': {
  1086. 'id': 'Q39EVAstoRM',
  1087. 'ext': 'mp4',
  1088. 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
  1089. 'description': 'md5:ee18a25c350637c8faff806845bddee9',
  1090. 'upload_date': '20151107',
  1091. 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
  1092. 'uploader': 'CH GAMER DROID',
  1093. },
  1094. 'params': {
  1095. 'skip_download': True,
  1096. },
  1097. 'skip': 'This video does not exist.',
  1098. },
  1099. {
  1100. # Video with incomplete 'yt:stretch=16:'
  1101. 'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI',
  1102. 'only_matching': True,
  1103. },
  1104. {
  1105. # Video licensed under Creative Commons
  1106. 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
  1107. 'info_dict': {
  1108. 'id': 'M4gD1WSo5mA',
  1109. 'ext': 'mp4',
  1110. 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
  1111. 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
  1112. 'duration': 721,
  1113. 'upload_date': '20150127',
  1114. 'uploader_id': '@BKCHarvard',
  1115. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@BKCHarvard',
  1116. 'uploader': 'The Berkman Klein Center for Internet & Society',
  1117. 'license': 'Creative Commons Attribution license (reuse allowed)',
  1118. },
  1119. 'params': {
  1120. 'skip_download': True,
  1121. },
  1122. },
  1123. {
  1124. # Channel-like uploader_url
  1125. 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
  1126. 'info_dict': {
  1127. 'id': 'eQcmzGIKrzg',
  1128. 'ext': 'mp4',
  1129. 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
  1130. 'description': 'md5:13a2503d7b5904ef4b223aa101628f39',
  1131. 'duration': 4060,
  1132. 'upload_date': '20151119',
  1133. 'uploader': 'Bernie Sanders',
  1134. 'uploader_id': '@BernieSanders',
  1135. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@BernieSanders',
  1136. 'license': 'Creative Commons Attribution license (reuse allowed)',
  1137. },
  1138. 'params': {
  1139. 'skip_download': True,
  1140. },
  1141. },
  1142. {
  1143. 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
  1144. 'only_matching': True,
  1145. },
  1146. {
  1147. # YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
  1148. 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
  1149. 'only_matching': True,
  1150. },
  1151. {
  1152. # Rental video preview
  1153. 'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
  1154. 'info_dict': {
  1155. 'id': 'uGpuVWrhIzE',
  1156. 'ext': 'mp4',
  1157. 'title': 'Piku - Trailer',
  1158. 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
  1159. 'upload_date': '20150811',
  1160. 'uploader': 'FlixMatrix',
  1161. 'uploader_id': 'FlixMatrixKaravan',
  1162. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
  1163. 'license': 'Standard YouTube License',
  1164. },
  1165. 'params': {
  1166. 'skip_download': True,
  1167. },
  1168. 'skip': 'This video is not available.',
  1169. },
  1170. {
  1171. # YouTube Red video with episode data
  1172. 'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
  1173. 'info_dict': {
  1174. 'id': 'iqKdEhx-dD4',
  1175. 'ext': 'mp4',
  1176. 'title': 'Isolation - Mind Field (Ep 1)',
  1177. 'description': 'md5:f540112edec5d09fc8cc752d3d4ba3cd',
  1178. 'duration': 2085,
  1179. 'upload_date': '20170118',
  1180. 'uploader': 'Vsauce',
  1181. 'uploader_id': '@Vsauce',
  1182. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@Vsauce',
  1183. 'series': 'Mind Field',
  1184. 'season_number': 1,
  1185. 'episode_number': 1,
  1186. },
  1187. 'params': {
  1188. 'skip_download': True,
  1189. },
  1190. 'expected_warnings': [
  1191. 'Skipping DASH manifest',
  1192. ],
  1193. },
  1194. {
  1195. # The following content has been identified by the YouTube community
  1196. # as inappropriate or offensive to some audiences.
  1197. 'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
  1198. 'info_dict': {
  1199. 'id': '6SJNVb0GnPI',
  1200. 'ext': 'mp4',
  1201. 'title': 'Race Differences in Intelligence',
  1202. 'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
  1203. 'duration': 965,
  1204. 'upload_date': '20140124',
  1205. 'uploader': 'New Century Foundation',
  1206. 'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
  1207. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
  1208. },
  1209. 'params': {
  1210. 'skip_download': True,
  1211. },
  1212. 'skip': 'This video has been removed for violating YouTube\'s policy on hate speech.',
  1213. },
  1214. {
  1215. # itag 212
  1216. 'url': '1t24XAntNCY',
  1217. 'only_matching': True,
  1218. },
  1219. {
  1220. # geo restricted to JP
  1221. 'url': 'sJL6WA-aGkQ',
  1222. 'only_matching': True,
  1223. },
  1224. {
  1225. 'url': 'https://invidio.us/watch?v=BaW_jenozKc',
  1226. 'only_matching': True,
  1227. },
  1228. {
  1229. 'url': 'https://redirect.invidious.io/watch?v=BaW_jenozKc',
  1230. 'only_matching': True,
  1231. },
  1232. {
  1233. # from https://nitter.pussthecat.org/YouTube/status/1360363141947944964#m
  1234. 'url': 'https://redirect.invidious.io/Yh0AhrY9GjA',
  1235. 'only_matching': True,
  1236. },
  1237. {
  1238. # DRM protected
  1239. 'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
  1240. 'only_matching': True,
  1241. },
  1242. {
  1243. # Video with unsupported adaptive stream type formats
  1244. 'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
  1245. 'info_dict': {
  1246. 'id': 'Z4Vy8R84T1U',
  1247. 'ext': 'mp4',
  1248. 'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
  1249. 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
  1250. 'duration': 433,
  1251. 'upload_date': '20130923',
  1252. 'uploader': 'Amelia Putri Harwita',
  1253. 'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
  1254. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
  1255. 'formats': 'maxcount:10',
  1256. },
  1257. 'params': {
  1258. 'skip_download': True,
  1259. 'youtube_include_dash_manifest': False,
  1260. },
  1261. 'skip': 'not actual any more',
  1262. },
  1263. {
  1264. # Youtube Music Auto-generated description
  1265. 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
  1266. 'info_dict': {
  1267. 'id': 'MgNrAu2pzNs',
  1268. 'ext': 'mp4',
  1269. 'title': 'Voyeur Girl',
  1270. 'description': 'md5:7ae382a65843d6df2685993e90a8628f',
  1271. 'upload_date': '20190312',
  1272. 'uploader': 'Stephen - Topic',
  1273. 'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
  1274. 'artist': 'Stephen',
  1275. 'track': 'Voyeur Girl',
  1276. 'album': 'it\'s too much love to know my dear',
  1277. 'release_date': '20190313',
  1278. 'release_year': 2019,
  1279. },
  1280. 'params': {
  1281. 'skip_download': True,
  1282. },
  1283. },
  1284. {
  1285. 'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
  1286. 'only_matching': True,
  1287. },
  1288. {
  1289. # invalid -> valid video id redirection
  1290. 'url': 'DJztXj2GPfl',
  1291. 'info_dict': {
  1292. 'id': 'DJztXj2GPfk',
  1293. 'ext': 'mp4',
  1294. 'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
  1295. 'description': 'md5:bf577a41da97918e94fa9798d9228825',
  1296. 'upload_date': '20090125',
  1297. 'uploader': 'Prochorowka',
  1298. 'uploader_id': 'Prochorowka',
  1299. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
  1300. 'artist': 'Panjabi MC',
  1301. 'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
  1302. 'album': 'Beware of the Boys (Mundian To Bach Ke)',
  1303. },
  1304. 'params': {
  1305. 'skip_download': True,
  1306. },
  1307. 'skip': 'Video unavailable',
  1308. },
  1309. {
  1310. # empty description results in an empty string
  1311. 'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
  1312. 'info_dict': {
  1313. 'id': 'x41yOUIvK2k',
  1314. 'ext': 'mp4',
  1315. 'title': 'IMG 3456',
  1316. 'description': '',
  1317. 'upload_date': '20170613',
  1318. 'uploader': "l'Or Vert asbl",
  1319. 'uploader_id': '@ElevageOrVert',
  1320. },
  1321. 'params': {
  1322. 'skip_download': True,
  1323. },
  1324. },
  1325. {
  1326. # with '};' inside yt initial data (see [1])
  1327. # see [2] for an example with '};' inside ytInitialPlayerResponse
  1328. # 1. https://github.com/ytdl-org/youtube-dl/issues/27093
  1329. # 2. https://github.com/ytdl-org/youtube-dl/issues/27216
  1330. 'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
  1331. 'info_dict': {
  1332. 'id': 'CHqg6qOn4no',
  1333. 'ext': 'mp4',
  1334. 'title': 'Part 77 Sort a list of simple types in c#',
  1335. 'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
  1336. 'upload_date': '20130831',
  1337. 'uploader': 'kudvenkat',
  1338. 'uploader_id': '@Csharp-video-tutorialsBlogspot',
  1339. },
  1340. 'params': {
  1341. 'skip_download': True,
  1342. },
  1343. },
  1344. {
  1345. # another example of '};' in ytInitialData
  1346. 'url': 'https://www.youtube.com/watch?v=gVfgbahppCY',
  1347. 'only_matching': True,
  1348. },
  1349. {
  1350. 'url': 'https://www.youtube.com/watch_popup?v=63RmMXCd_bQ',
  1351. 'only_matching': True,
  1352. },
  1353. {
  1354. # https://github.com/ytdl-org/youtube-dl/pull/28094
  1355. 'url': 'OtqTfy26tG0',
  1356. 'info_dict': {
  1357. 'id': 'OtqTfy26tG0',
  1358. 'ext': 'mp4',
  1359. 'title': 'Burn Out',
  1360. 'description': 'md5:8d07b84dcbcbfb34bc12a56d968b6131',
  1361. 'upload_date': '20141120',
  1362. 'uploader': 'The Cinematic Orchestra - Topic',
  1363. 'uploader_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
  1364. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
  1365. 'artist': 'The Cinematic Orchestra',
  1366. 'track': 'Burn Out',
  1367. 'album': 'Every Day',
  1368. 'release_data': None,
  1369. 'release_year': None,
  1370. },
  1371. 'params': {
  1372. 'skip_download': True,
  1373. },
  1374. },
  1375. {
  1376. # controversial video, only works with bpctr when authenticated with cookies
  1377. 'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
  1378. 'only_matching': True,
  1379. },
  1380. {
  1381. # restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
  1382. 'url': 'cBvYw8_A0vQ',
  1383. 'info_dict': {
  1384. 'id': 'cBvYw8_A0vQ',
  1385. 'ext': 'mp4',
  1386. 'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き',
  1387. 'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
  1388. 'upload_date': '20201120',
  1389. 'uploader': 'Walk around Japan',
  1390. 'uploader_id': '@walkaroundjapan7124',
  1391. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@walkaroundjapan7124',
  1392. },
  1393. 'params': {
  1394. 'skip_download': True,
  1395. },
  1396. },
  1397. {
  1398. # YT 'Shorts'
  1399. 'url': 'https://youtube.com/shorts/4L2J27mJ3Dc',
  1400. 'info_dict': {
  1401. 'id': '4L2J27mJ3Dc',
  1402. 'ext': 'mp4',
  1403. 'title': 'Midwest Squid Game #Shorts',
  1404. 'description': 'md5:976512b8a29269b93bbd8a61edc45a6d',
  1405. 'upload_date': '20211025',
  1406. 'uploader': 'Charlie Berens',
  1407. 'uploader_id': '@CharlieBerens',
  1408. },
  1409. 'params': {
  1410. 'skip_download': True,
  1411. },
  1412. },
  1413. ]
  1414. _formats = {
  1415. '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
  1416. '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
  1417. '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
  1418. '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
  1419. '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
  1420. '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  1421. '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1422. '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1423. # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
  1424. '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
  1425. '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  1426. '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  1427. '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
  1428. '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
  1429. '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
  1430. '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
  1431. '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1432. '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1433. # 3D videos
  1434. '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
  1435. '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
  1436. '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
  1437. '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
  1438. '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
  1439. '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
  1440. '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
  1441. # Apple HTTP Live Streaming
  1442. '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  1443. '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  1444. '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
  1445. '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
  1446. '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
  1447. '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
  1448. '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  1449. '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
  1450. # DASH mp4 video
  1451. '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1452. '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1453. '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1454. '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1455. '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1456. '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
  1457. '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1458. '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1459. '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1460. '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
  1461. '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
  1462. '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1463. # Dash mp4 audio
  1464. '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
  1465. '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
  1466. '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
  1467. '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
  1468. '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
  1469. '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
  1470. '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
  1471. # Dash webm
  1472. '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1473. '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1474. '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1475. '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1476. '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1477. '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1478. '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
  1479. '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1480. '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1481. '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1482. '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1483. '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1484. '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1485. '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1486. '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1487. # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
  1488. '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1489. '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1490. '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1491. '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1492. '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1493. '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1494. # Dash webm audio
  1495. '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
  1496. '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
  1497. # Dash webm audio with opus inside
  1498. '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
  1499. '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
  1500. '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
  1501. # RTMP (unnamed)
  1502. '_rtmp': {'protocol': 'rtmp'},
  1503. # av01 video only formats sometimes served with "unknown" codecs
  1504. '394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1505. '395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1506. '396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1507. '397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1508. }
  1509. @classmethod
  1510. def suitable(cls, url):
  1511. if parse_qs(url).get('list', [None])[0]:
  1512. return False
  1513. return super(YoutubeIE, cls).suitable(url)
  1514. def __init__(self, *args, **kwargs):
  1515. super(YoutubeIE, self).__init__(*args, **kwargs)
  1516. self._code_cache = {}
  1517. self._player_cache = {}
  1518. # *ytcfgs, webpage=None
  1519. def _extract_player_url(self, *ytcfgs, **kw_webpage):
  1520. if ytcfgs and not isinstance(ytcfgs[0], dict):
  1521. webpage = kw_webpage.get('webpage') or ytcfgs[0]
  1522. if webpage:
  1523. player_url = self._search_regex(
  1524. r'"(?:PLAYER_JS_URL|jsUrl)"\s*:\s*"([^"]+)"',
  1525. webpage or '', 'player URL', fatal=False)
  1526. if player_url:
  1527. ytcfgs = ytcfgs + ({'PLAYER_JS_URL': player_url},)
  1528. return traverse_obj(
  1529. ytcfgs, (Ellipsis, 'PLAYER_JS_URL'), (Ellipsis, 'WEB_PLAYER_CONTEXT_CONFIGS', Ellipsis, 'jsUrl'),
  1530. get_all=False, expected_type=lambda u: urljoin('https://www.youtube.com', u))
  1531. def _download_player_url(self, video_id, fatal=False):
  1532. res = self._download_webpage(
  1533. 'https://www.youtube.com/iframe_api',
  1534. note='Downloading iframe API JS', video_id=video_id, fatal=fatal)
  1535. player_version = self._search_regex(
  1536. r'player\\?/([0-9a-fA-F]{8})\\?/', res or '', 'player version', fatal=fatal,
  1537. default=NO_DEFAULT if res else None)
  1538. if player_version:
  1539. return 'https://www.youtube.com/s/player/{0}/player_ias.vflset/en_US/base.js'.format(player_version)
  1540. def _signature_cache_id(self, example_sig):
  1541. """ Return a string representation of a signature """
  1542. return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
  1543. def _extract_player_info(self, player_url):
  1544. try:
  1545. return self._search_regex(
  1546. self._PLAYER_INFO_RE, player_url, 'player info', group='id')
  1547. except ExtractorError as e:
  1548. raise ExtractorError(
  1549. 'Cannot identify player %r' % (player_url,), cause=e)
  1550. def _load_player(self, video_id, player_url, fatal=True, player_id=None):
  1551. if not player_id:
  1552. player_id = self._extract_player_info(player_url)
  1553. if player_id not in self._code_cache:
  1554. code = self._download_webpage(
  1555. player_url, video_id, fatal=fatal,
  1556. note='Downloading player ' + player_id,
  1557. errnote='Download of %s failed' % player_url)
  1558. if code:
  1559. self._code_cache[player_id] = code
  1560. return self._code_cache[player_id] if fatal else self._code_cache.get(player_id)
  1561. def _extract_signature_function(self, video_id, player_url, example_sig):
  1562. player_id = self._extract_player_info(player_url)
  1563. # Read from filesystem cache
  1564. func_id = 'js_{0}_{1}'.format(
  1565. player_id, self._signature_cache_id(example_sig))
  1566. assert os.path.basename(func_id) == func_id
  1567. self.write_debug('Extracting signature function {0}'.format(func_id))
  1568. cache_spec, code = self.cache.load('youtube-sigfuncs', func_id, min_ver='2025.04.07'), None
  1569. if not cache_spec:
  1570. code = self._load_player(video_id, player_url, player_id)
  1571. if code:
  1572. res = self._parse_sig_js(code)
  1573. test_string = ''.join(map(compat_chr, range(len(example_sig))))
  1574. cache_spec = [ord(c) for c in res(test_string)]
  1575. self.cache.store('youtube-sigfuncs', func_id, cache_spec)
  1576. return lambda s: ''.join(s[i] for i in cache_spec)
  1577. def _print_sig_code(self, func, example_sig):
  1578. if not self.get_param('youtube_print_sig_code'):
  1579. return
  1580. def gen_sig_code(idxs):
  1581. def _genslice(start, end, step):
  1582. starts = '' if start == 0 else str(start)
  1583. ends = (':%d' % (end + step)) if end + step >= 0 else ':'
  1584. steps = '' if step == 1 else (':%d' % step)
  1585. return 's[{0}{1}{2}]'.format(starts, ends, steps)
  1586. step = None
  1587. # Quelch pyflakes warnings - start will be set when step is set
  1588. start = '(Never used)'
  1589. for i, prev in zip(idxs[1:], idxs[:-1]):
  1590. if step is not None:
  1591. if i - prev == step:
  1592. continue
  1593. yield _genslice(start, prev, step)
  1594. step = None
  1595. continue
  1596. if i - prev in [-1, 1]:
  1597. step = i - prev
  1598. start = prev
  1599. continue
  1600. else:
  1601. yield 's[%d]' % prev
  1602. if step is None:
  1603. yield 's[%d]' % i
  1604. else:
  1605. yield _genslice(start, i, step)
  1606. test_string = ''.join(map(compat_chr, range(len(example_sig))))
  1607. cache_res = func(test_string)
  1608. cache_spec = [ord(c) for c in cache_res]
  1609. expr_code = ' + '.join(gen_sig_code(cache_spec))
  1610. signature_id_tuple = '(%s)' % (
  1611. ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
  1612. code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
  1613. ' return %s\n') % (signature_id_tuple, expr_code)
  1614. self.to_screen('Extracted signature function:\n' + code)
  1615. def _extract_sig_fn(self, jsi, funcname):
  1616. var_ay = self._search_regex(
  1617. r'''(?x)
  1618. (?:\*/|\{|\n|^)\s*(?:'[^']+'\s*;\s*)
  1619. (var\s*[\w$]+\s*=\s*(?:
  1620. ('|")(?:\\\2|(?!\2).)+\2\s*\.\s*split\(\s*('|")\W+\3\s*\)|
  1621. \[\s*(?:('|")(?:\\\4|(?!\4).)*\4\s*(?:(?=\])|,\s*))+\]
  1622. ))(?=\s*[,;])
  1623. ''', jsi.code, 'useful values', default='')
  1624. sig_fn = jsi.extract_function_code(funcname)
  1625. if var_ay:
  1626. sig_fn = (sig_fn[0], ';\n'.join((var_ay, sig_fn[1])))
  1627. return sig_fn
  1628. def _parse_sig_js(self, jscode):
  1629. # Examples where `sig` is funcname:
  1630. # sig=function(a){a=a.split(""); ... ;return a.join("")};
  1631. # ;c&&(c=sig(decodeURIComponent(c)),a.set(b,encodeURIComponent(c)));return a};
  1632. # {var l=f,m=h.sp,n=sig(decodeURIComponent(h.s));l.set(m,encodeURIComponent(n))}
  1633. # sig=function(J){J=J.split(""); ... ;return J.join("")};
  1634. # ;N&&(N=sig(decodeURIComponent(N)),J.set(R,encodeURIComponent(N)));return J};
  1635. # {var H=u,k=f.sp,v=sig(decodeURIComponent(f.s));H.set(k,encodeURIComponent(v))}
  1636. funcname = self._search_regex(
  1637. (r'\b(?P<var>[\w$]+)&&\((?P=var)=(?P<sig>[\w$]{2,})\(decodeURIComponent\((?P=var)\)\)',
  1638. r'(?P<sig>[\w$]+)\s*=\s*function\(\s*(?P<arg>[\w$]+)\s*\)\s*{\s*(?P=arg)\s*=\s*(?P=arg)\.split\(\s*""\s*\)\s*;\s*[^}]+;\s*return\s+(?P=arg)\.join\(\s*""\s*\)',
  1639. r'(?:\b|[^\w$])(?P<sig>[\w$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)(?:;[\w$]{2}\.[\w$]{2}\(a,\d+\))?',
  1640. # Old patterns
  1641. r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[\w$]+)\(',
  1642. r'\b[\w]+\s*&&\s*[\w]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[\w$]+)\(',
  1643. r'\bm=(?P<sig>[\w$]{2,})\(decodeURIComponent\(h\.s\)\)',
  1644. # Obsolete patterns
  1645. r'("|\')signature\1\s*,\s*(?P<sig>[\w$]+)\(',
  1646. r'\.sig\|\|(?P<sig>[\w$]+)\(',
  1647. r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[\w$]+)\(',
  1648. r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[\w$]+)\(',
  1649. r'\bc\s*&&\s*[\w]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[\w$]+)\('),
  1650. jscode, 'Initial JS player signature function name', group='sig')
  1651. jsi = JSInterpreter(jscode)
  1652. initial_function = self._extract_sig_fn(jsi, funcname)
  1653. func = jsi.extract_function_from_code(*initial_function)
  1654. return lambda s: func([s])
  1655. def _cached(self, func, *cache_id):
  1656. def inner(*args, **kwargs):
  1657. if cache_id not in self._player_cache:
  1658. try:
  1659. self._player_cache[cache_id] = func(*args, **kwargs)
  1660. except ExtractorError as e:
  1661. self._player_cache[cache_id] = e
  1662. except Exception as e:
  1663. self._player_cache[cache_id] = ExtractorError(traceback.format_exc(), cause=e)
  1664. ret = self._player_cache[cache_id]
  1665. if isinstance(ret, Exception):
  1666. raise ret
  1667. return ret
  1668. return inner
  1669. def _decrypt_signature(self, s, video_id, player_url):
  1670. """Turn the encrypted s field into a working signature"""
  1671. extract_sig = self._cached(
  1672. self._extract_signature_function, 'sig', player_url, self._signature_cache_id(s))
  1673. func = extract_sig(video_id, player_url, s)
  1674. self._print_sig_code(func, s)
  1675. return func(s)
  1676. # from yt-dlp
  1677. # See also:
  1678. # 1. https://github.com/ytdl-org/youtube-dl/issues/29326#issuecomment-894619419
  1679. # 2. https://code.videolan.org/videolan/vlc/-/blob/4fb284e5af69aa9ac2100ccbdd3b88debec9987f/share/lua/playlist/youtube.lua#L116
  1680. # 3. https://github.com/ytdl-org/youtube-dl/issues/30097#issuecomment-950157377
  1681. def _decrypt_nsig(self, n, video_id, player_url):
  1682. """Turn the encrypted n field into a working signature"""
  1683. if player_url is None:
  1684. raise ExtractorError('Cannot decrypt nsig without player_url')
  1685. try:
  1686. jsi, player_id, func_code = self._extract_n_function_code(video_id, player_url)
  1687. except ExtractorError as e:
  1688. raise ExtractorError('Unable to extract nsig function code', cause=e)
  1689. if self.get_param('youtube_print_sig_code'):
  1690. self.to_screen('Extracted nsig function from {0}:\n{1}\n'.format(
  1691. player_id, func_code[1]))
  1692. try:
  1693. extract_nsig = self._cached(self._extract_n_function_from_code, 'nsig func', player_url)
  1694. ret = extract_nsig(jsi, func_code)(n)
  1695. except JSInterpreter.Exception as e:
  1696. self.report_warning(
  1697. '%s (%s %s)' % (
  1698. 'Unable to decode n-parameter: expect download to be blocked or throttled',
  1699. error_to_compat_str(e),
  1700. traceback.format_exc()),
  1701. video_id=video_id)
  1702. return
  1703. self.write_debug('Decrypted nsig {0} => {1}'.format(n, ret))
  1704. return ret
  1705. def _extract_n_function_name(self, jscode):
  1706. func_name, idx = None, None
  1707. # these special cases are redundant and probably obsolete (2025-04):
  1708. # they make the tests run ~10% faster without fallback warnings
  1709. r"""
  1710. func_name, idx = self._search_regex(
  1711. # (y=NuD(),Mw(k),q=k.Z[y]||null)&&(q=narray[idx](q),k.set(y,q),k.V||NuD(''))}};
  1712. # (R="nn"[+J.Z],mW(J),N=J.K[R]||null)&&(N=narray[idx](N),J.set(R,N))}};
  1713. # or: (b=String.fromCharCode(110),c=a.get(b))&&c=narray[idx](c)
  1714. # or: (b="nn"[+a.D],c=a.get(b))&&(c=narray[idx](c)
  1715. # or: (PL(a),b=a.j.n||null)&&(b=narray[idx](b)
  1716. # or: (b="nn"[+a.D],vL(a),c=a.j[b]||null)&&(c=narray[idx](c),a.set(b,c),narray.length||nfunc("")
  1717. # old: (b=a.get("n"))&&(b=narray[idx](b)(?P<c>[a-z])\s*=\s*[a-z]\s*
  1718. # older: (b=a.get("n"))&&(b=nfunc(b)
  1719. r'''(?x)
  1720. # (expr, ...,
  1721. \((?:(?:\s*[\w$]+\s*=)?(?:[\w$"+\.\s(\[]+(?:[)\]]\s*)?),)*
  1722. # b=...
  1723. (?P<b>[\w$]+)\s*=\s*(?!(?P=b)[^\w$])[\w$]+\s*(?:(?:
  1724. \.\s*[\w$]+ |
  1725. \[\s*[\w$]+\s*\] |
  1726. \.\s*get\s*\(\s*[\w$"]+\s*\)
  1727. )\s*){,2}(?:\s*\|\|\s*null(?=\s*\)))?\s*
  1728. \)\s*&&\s*\( # ...)&&(
  1729. # b = nfunc, b = narray[idx]
  1730. (?P=b)\s*=\s*(?P<nfunc>[\w$]+)\s*
  1731. (?:\[\s*(?P<idx>[\w$]+)\s*\]\s*)?
  1732. # (...)
  1733. \(\s*[\w$]+\s*\)
  1734. ''', jscode, 'Initial JS player n function name', group=('nfunc', 'idx'),
  1735. default=(None, None))
  1736. """
  1737. if not func_name:
  1738. # nfunc=function(x){...}|function nfunc(x); ...
  1739. # ... var y=[nfunc]|y[idx]=nfunc);
  1740. # obvious REs hang, so use a two-stage tactic
  1741. for m in re.finditer(r'''(?x)
  1742. [\n;]var\s(?:(?:(?!,).)+,|\s)*?(?!\d)[\w$]+(?:\[(?P<idx>\d+)\])?\s*=\s*
  1743. (?(idx)|\[\s*)(?P<nfunc>(?!\d)[\w$]+)(?(idx)|\s*\])
  1744. \s*?[;\n]
  1745. ''', jscode):
  1746. func_name = self._search_regex(
  1747. r'[;,]\s*(function\s+)?({0})(?(1)|\s*=\s*function)\s*\((?!\d)[\w$]+\)\s*\{1}(?!\s*return\s)'.format(
  1748. re.escape(m.group('nfunc')), '{'),
  1749. jscode, 'Initial JS player n function name (2)', group=2, default=None)
  1750. if func_name:
  1751. idx = m.group('idx')
  1752. break
  1753. # thx bashonly: yt-dlp/yt-dlp/pull/10611
  1754. if not func_name:
  1755. self.report_warning('Falling back to generic n function search', only_once=True)
  1756. return self._search_regex(
  1757. r'''(?xs)
  1758. (?:(?<=[^\w$])|^) # instead of \b, which ignores $
  1759. (?P<name>(?!\d)[a-zA-Z\d_$]+)\s*=\s*function\((?!\d)[a-zA-Z\d_$]+\)
  1760. \s*\{(?:(?!};).)+?(?:
  1761. ["']enhanced_except_ |
  1762. return\s*(?P<q>"|')[a-zA-Z\d-]+_w8_(?P=q)\s*\+\s*[\w$]+
  1763. )
  1764. ''', jscode, 'Initial JS player n function name', group='name')
  1765. if not idx:
  1766. return func_name
  1767. return self._search_json(
  1768. r'(?<![\w-])var\s(?:(?:(?!,).)+,|\s)*?{0}\s*='.format(re.escape(func_name)), jscode,
  1769. 'Initial JS player n function list ({0}.{1})'.format(func_name, idx),
  1770. func_name, contains_pattern=r'\[.+\]', end_pattern='[,;]',
  1771. transform_source=js_to_json)[int(idx)]
  1772. def _extract_n_function_code(self, video_id, player_url):
  1773. player_id = self._extract_player_info(player_url)
  1774. func_code = self.cache.load('youtube-nsig', player_id, min_ver='2025.04.07')
  1775. jscode = func_code or self._load_player(video_id, player_url)
  1776. jsi = JSInterpreter(jscode)
  1777. if func_code:
  1778. return jsi, player_id, func_code
  1779. return self._extract_n_function_code_jsi(video_id, jsi, player_id)
  1780. def _extract_n_function_code_jsi(self, video_id, jsi, player_id=None):
  1781. func_name = self._extract_n_function_name(jsi.code)
  1782. func_code = self._extract_sig_fn(jsi, func_name)
  1783. if player_id:
  1784. self.cache.store('youtube-nsig', player_id, func_code)
  1785. return jsi, player_id, func_code
  1786. def _extract_n_function_from_code(self, jsi, func_code):
  1787. func = jsi.extract_function_from_code(*func_code)
  1788. def extract_nsig(s):
  1789. try:
  1790. ret = func([s], kwargs={'_ytdl_do_not_return': s})
  1791. except JSInterpreter.Exception:
  1792. raise
  1793. except Exception as e:
  1794. raise JSInterpreter.Exception(traceback.format_exc(), cause=e)
  1795. if ret.startswith('enhanced_except_') or ret.endswith(s):
  1796. raise JSInterpreter.Exception('Signature function returned an exception')
  1797. return ret
  1798. return extract_nsig
  1799. def _unthrottle_format_urls(self, video_id, player_url, *formats):
  1800. def decrypt_nsig(n):
  1801. return self._cached(self._decrypt_nsig, 'nsig', n, player_url)
  1802. for fmt in formats:
  1803. parsed_fmt_url = compat_urllib_parse.urlparse(fmt['url'])
  1804. n_param = compat_parse_qs(parsed_fmt_url.query).get('n')
  1805. if not n_param:
  1806. continue
  1807. n_param = n_param[-1]
  1808. n_response = decrypt_nsig(n_param)(n_param, video_id, player_url)
  1809. if n_response is None:
  1810. # give up if descrambling failed
  1811. break
  1812. fmt['url'] = update_url_query(fmt['url'], {'n': n_response})
  1813. # from yt-dlp, with tweaks
  1814. def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False):
  1815. """
  1816. Extract signatureTimestamp (sts)
  1817. Required to tell API what sig/player version is in use.
  1818. """
  1819. sts = traverse_obj(ytcfg, 'STS', expected_type=int)
  1820. if not sts:
  1821. # Attempt to extract from player
  1822. if player_url is None:
  1823. error_msg = 'Cannot extract signature timestamp without player_url.'
  1824. if fatal:
  1825. raise ExtractorError(error_msg)
  1826. self.report_warning(error_msg)
  1827. return
  1828. code = self._load_player(video_id, player_url, fatal=fatal)
  1829. sts = int_or_none(self._search_regex(
  1830. r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code or '',
  1831. 'JS player signature timestamp', group='sts', fatal=fatal))
  1832. return sts
  1833. def _mark_watched(self, video_id, player_response):
  1834. playback_url = url_or_none(try_get(
  1835. player_response,
  1836. lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']))
  1837. if not playback_url:
  1838. return
  1839. # cpn generation algorithm is reverse engineered from base.js.
  1840. # In fact it works even with dummy cpn.
  1841. CPN_ALPHABET = string.ascii_letters + string.digits + '-_'
  1842. cpn = ''.join(CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(16))
  1843. # more consistent results setting it to right before the end
  1844. qs = parse_qs(playback_url)
  1845. video_length = '{0}'.format(float((qs.get('len') or ['1.5'])[0]) - 1)
  1846. playback_url = update_url_query(
  1847. playback_url, {
  1848. 'ver': '2',
  1849. 'cpn': cpn,
  1850. 'cmt': video_length,
  1851. 'el': 'detailpage', # otherwise defaults to "shorts"
  1852. })
  1853. self._download_webpage(
  1854. playback_url, video_id, 'Marking watched',
  1855. 'Unable to mark watched', fatal=False)
  1856. @staticmethod
  1857. def _extract_urls(webpage):
  1858. # Embedded YouTube player
  1859. entries = [
  1860. unescapeHTML(mobj.group('url'))
  1861. for mobj in re.finditer(r'''(?x)
  1862. (?:
  1863. <iframe[^>]+?src=|
  1864. data-video-url=|
  1865. <embed[^>]+?src=|
  1866. embedSWF\(?:\s*|
  1867. <object[^>]+data=|
  1868. new\s+SWFObject\(
  1869. )
  1870. (["\'])
  1871. (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
  1872. (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
  1873. \1''', webpage)]
  1874. # lazyYT YouTube embed
  1875. entries.extend(list(map(
  1876. unescapeHTML,
  1877. re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
  1878. # Wordpress "YouTube Video Importer" plugin
  1879. matches = re.findall(r'''(?x)<div[^>]+
  1880. class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
  1881. data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
  1882. entries.extend(m[-1] for m in matches)
  1883. return entries
  1884. @staticmethod
  1885. def _extract_url(webpage):
  1886. urls = YoutubeIE._extract_urls(webpage)
  1887. return urls[0] if urls else None
  1888. @classmethod
  1889. def extract_id(cls, url):
  1890. mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
  1891. if mobj is None:
  1892. raise ExtractorError('Invalid URL: %s' % url)
  1893. return mobj.group(2)
  1894. def _extract_chapters_from_json(self, data, video_id, duration):
  1895. chapters_list = try_get(
  1896. data,
  1897. lambda x: x['playerOverlays']
  1898. ['playerOverlayRenderer']
  1899. ['decoratedPlayerBarRenderer']
  1900. ['decoratedPlayerBarRenderer']
  1901. ['playerBar']
  1902. ['chapteredPlayerBarRenderer']
  1903. ['chapters'],
  1904. list)
  1905. if not chapters_list:
  1906. return
  1907. def chapter_time(chapter):
  1908. return float_or_none(
  1909. try_get(
  1910. chapter,
  1911. lambda x: x['chapterRenderer']['timeRangeStartMillis'],
  1912. int),
  1913. scale=1000)
  1914. chapters = []
  1915. for next_num, chapter in enumerate(chapters_list, start=1):
  1916. start_time = chapter_time(chapter)
  1917. if start_time is None:
  1918. continue
  1919. end_time = (chapter_time(chapters_list[next_num])
  1920. if next_num < len(chapters_list) else duration)
  1921. if end_time is None:
  1922. continue
  1923. title = try_get(
  1924. chapter, lambda x: x['chapterRenderer']['title']['simpleText'],
  1925. compat_str)
  1926. chapters.append({
  1927. 'start_time': start_time,
  1928. 'end_time': end_time,
  1929. 'title': title,
  1930. })
  1931. return chapters
  1932. def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
  1933. return self._parse_json(self._search_regex(
  1934. (r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
  1935. regex), webpage, name, default='{}'), video_id, fatal=False)
  1936. def _real_extract(self, url):
  1937. url, smuggled_data = unsmuggle_url(url, {})
  1938. video_id = self._match_id(url)
  1939. base_url = self.http_scheme() + '//www.youtube.com/'
  1940. webpage_url = base_url + 'watch?v=' + video_id
  1941. webpage = self._download_webpage(
  1942. webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
  1943. player_response = None
  1944. player_url = None
  1945. if webpage:
  1946. player_response = self._extract_yt_initial_variable(
  1947. webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
  1948. video_id, 'initial player response')
  1949. is_live = traverse_obj(player_response, ('videoDetails', 'isLive'))
  1950. if False and not player_response:
  1951. player_response = self._call_api(
  1952. 'player', {'videoId': video_id}, video_id)
  1953. if True or not player_response:
  1954. origin = 'https://www.youtube.com'
  1955. pb_context = {'html5Preference': 'HTML5_PREF_WANTS'}
  1956. player_url = self._extract_player_url(webpage)
  1957. ytcfg = self._extract_ytcfg(video_id, webpage or '')
  1958. sts = self._extract_signature_timestamp(video_id, player_url, ytcfg)
  1959. if sts:
  1960. pb_context['signatureTimestamp'] = sts
  1961. client_names = traverse_obj(self._INNERTUBE_CLIENTS, (
  1962. T(dict.items), lambda _, k_v: not k_v[1].get('REQUIRE_PO_TOKEN'),
  1963. 0))[:1]
  1964. if 'web' not in client_names:
  1965. # webpage links won't download: ignore links and playability
  1966. player_response = filter_dict(
  1967. player_response or {},
  1968. lambda k, _: k not in ('streamingData', 'playabilityStatus'))
  1969. if is_live and 'ios' not in client_names:
  1970. client_names.append('ios')
  1971. headers = {
  1972. 'Sec-Fetch-Mode': 'navigate',
  1973. 'Origin': origin,
  1974. 'X-Goog-Visitor-Id': self._extract_visitor_data(ytcfg) or '',
  1975. }
  1976. auth = self._generate_sapisidhash_header(origin)
  1977. if auth is not None:
  1978. headers['Authorization'] = auth
  1979. headers['X-Origin'] = origin
  1980. for client in traverse_obj(self._INNERTUBE_CLIENTS, (client_names, T(dict))):
  1981. query = {
  1982. 'playbackContext': {
  1983. 'contentPlaybackContext': pb_context,
  1984. },
  1985. 'contentCheckOk': True,
  1986. 'racyCheckOk': True,
  1987. 'context': {
  1988. 'client': merge_dicts(
  1989. traverse_obj(client, ('INNERTUBE_CONTEXT', 'client')), {
  1990. 'hl': 'en',
  1991. 'timeZone': 'UTC',
  1992. 'utcOffsetMinutes': 0,
  1993. }),
  1994. },
  1995. 'videoId': video_id,
  1996. }
  1997. api_headers = merge_dicts(headers, traverse_obj(client, {
  1998. 'X-YouTube-Client-Name': 'INNERTUBE_CONTEXT_CLIENT_NAME',
  1999. 'X-YouTube-Client-Version': (
  2000. 'INNERTUBE_CONTEXT', 'client', 'clientVersion'),
  2001. 'User-Agent': (
  2002. 'INNERTUBE_CONTEXT', 'client', 'userAgent'),
  2003. }))
  2004. api_player_response = self._call_api(
  2005. 'player', query, video_id, fatal=False, headers=api_headers,
  2006. note=join_nonempty(
  2007. 'Downloading', traverse_obj(query, (
  2008. 'context', 'client', 'clientName')),
  2009. 'API JSON', delim=' '))
  2010. hls = traverse_obj(
  2011. (player_response, api_player_response),
  2012. (Ellipsis, 'streamingData', 'hlsManifestUrl', T(url_or_none)))
  2013. if len(hls) == 2 and not hls[0] and hls[1]:
  2014. player_response['streamingData']['hlsManifestUrl'] = hls[1]
  2015. else:
  2016. video_details = merge_dicts(*traverse_obj(
  2017. (player_response, api_player_response),
  2018. (Ellipsis, 'videoDetails', T(dict))))
  2019. player_response.update(filter_dict(
  2020. api_player_response or {}, cndn=lambda k, _: k != 'captions'))
  2021. player_response['videoDetails'] = video_details
  2022. def is_agegated(playability):
  2023. if not isinstance(playability, dict):
  2024. return
  2025. if playability.get('desktopLegacyAgeGateReason'):
  2026. return True
  2027. reasons = filter(None, (playability.get(r) for r in ('status', 'reason')))
  2028. AGE_GATE_REASONS = (
  2029. 'confirm your age', 'age-restricted', 'inappropriate', # reason
  2030. 'age_verification_required', 'age_check_required', # status
  2031. )
  2032. return any(expected in reason for expected in AGE_GATE_REASONS for reason in reasons)
  2033. def get_playability_status(response):
  2034. return try_get(response, lambda x: x['playabilityStatus'], dict) or {}
  2035. playability_status = get_playability_status(player_response)
  2036. if (is_agegated(playability_status)
  2037. and int_or_none(self._downloader.params.get('age_limit'), default=18) >= 18):
  2038. self.report_age_confirmation()
  2039. # Thanks: https://github.com/yt-dlp/yt-dlp/pull/3233
  2040. pb_context = {'html5Preference': 'HTML5_PREF_WANTS'}
  2041. # Use signatureTimestamp if available
  2042. # Thanks https://github.com/ytdl-org/youtube-dl/issues/31034#issuecomment-1160718026
  2043. player_url = self._extract_player_url(webpage)
  2044. ytcfg = self._extract_ytcfg(video_id, webpage)
  2045. sts = self._extract_signature_timestamp(video_id, player_url, ytcfg)
  2046. if sts:
  2047. pb_context['signatureTimestamp'] = sts
  2048. query = {
  2049. 'playbackContext': {'contentPlaybackContext': pb_context},
  2050. 'contentCheckOk': True,
  2051. 'racyCheckOk': True,
  2052. 'context': {
  2053. 'client': {'clientName': 'TVHTML5_SIMPLY_EMBEDDED_PLAYER', 'clientVersion': '2.0', 'hl': 'en', 'clientScreen': 'EMBED'},
  2054. 'thirdParty': {'embedUrl': 'https://google.com'},
  2055. },
  2056. 'videoId': video_id,
  2057. }
  2058. headers = {
  2059. 'X-YouTube-Client-Name': '85',
  2060. 'X-YouTube-Client-Version': '2.0',
  2061. 'Origin': 'https://www.youtube.com',
  2062. }
  2063. video_info = self._call_api('player', query, video_id, fatal=False, headers=headers)
  2064. age_gate_status = get_playability_status(video_info)
  2065. if age_gate_status.get('status') == 'OK':
  2066. player_response = video_info
  2067. playability_status = age_gate_status
  2068. trailer_video_id = try_get(
  2069. playability_status,
  2070. lambda x: x['errorScreen']['playerLegacyDesktopYpcTrailerRenderer']['trailerVideoId'],
  2071. compat_str)
  2072. if trailer_video_id:
  2073. return self.url_result(
  2074. trailer_video_id, self.ie_key(), trailer_video_id)
  2075. def get_text(x):
  2076. if not x:
  2077. return
  2078. text = x.get('simpleText')
  2079. if text and isinstance(text, compat_str):
  2080. return text
  2081. runs = x.get('runs')
  2082. if not isinstance(runs, list):
  2083. return
  2084. return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)])
  2085. search_meta = (
  2086. (lambda x: self._html_search_meta(x, webpage, default=None))
  2087. if webpage else lambda _: None)
  2088. video_details = player_response.get('videoDetails') or {}
  2089. microformat = try_get(
  2090. player_response,
  2091. lambda x: x['microformat']['playerMicroformatRenderer'],
  2092. dict) or {}
  2093. video_title = video_details.get('title') \
  2094. or get_text(microformat.get('title')) \
  2095. or search_meta(['og:title', 'twitter:title', 'title'])
  2096. video_description = video_details.get('shortDescription')
  2097. if not smuggled_data.get('force_singlefeed', False):
  2098. if not self._downloader.params.get('noplaylist'):
  2099. multifeed_metadata_list = try_get(
  2100. player_response,
  2101. lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
  2102. compat_str)
  2103. if multifeed_metadata_list:
  2104. entries = []
  2105. feed_ids = []
  2106. for feed in multifeed_metadata_list.split(','):
  2107. # Unquote should take place before split on comma (,) since textual
  2108. # fields may contain comma as well (see
  2109. # https://github.com/ytdl-org/youtube-dl/issues/8536)
  2110. feed_data = compat_parse_qs(
  2111. compat_urllib_parse_unquote_plus(feed))
  2112. def feed_entry(name):
  2113. return try_get(
  2114. feed_data, lambda x: x[name][0], compat_str)
  2115. feed_id = feed_entry('id')
  2116. if not feed_id:
  2117. continue
  2118. feed_title = feed_entry('title')
  2119. title = video_title
  2120. if feed_title:
  2121. title += ' (%s)' % feed_title
  2122. entries.append({
  2123. '_type': 'url_transparent',
  2124. 'ie_key': 'Youtube',
  2125. 'url': smuggle_url(
  2126. base_url + 'watch?v=' + feed_data['id'][0],
  2127. {'force_singlefeed': True}),
  2128. 'title': title,
  2129. })
  2130. feed_ids.append(feed_id)
  2131. self.to_screen(
  2132. 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
  2133. % (', '.join(feed_ids), video_id))
  2134. return self.playlist_result(
  2135. entries, video_id, video_title, video_description)
  2136. else:
  2137. self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
  2138. if not player_url:
  2139. player_url = self._extract_player_url(webpage)
  2140. formats = []
  2141. itags = collections.defaultdict(set)
  2142. itag_qualities = {}
  2143. q = qualities(['tiny', 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'])
  2144. CHUNK_SIZE = 10 << 20
  2145. is_live = video_details.get('isLive')
  2146. streaming_data = player_response.get('streamingData') or {}
  2147. streaming_formats = streaming_data.get('formats') or []
  2148. streaming_formats.extend(streaming_data.get('adaptiveFormats') or [])
  2149. def build_fragments(f):
  2150. return LazyList({
  2151. 'url': update_url_query(f['url'], {
  2152. 'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, f['filesize'])),
  2153. })
  2154. } for range_start in range(0, f['filesize'], CHUNK_SIZE))
  2155. lower = lambda s: s.lower()
  2156. for fmt in streaming_formats:
  2157. if fmt.get('targetDurationSec'):
  2158. continue
  2159. itag = str_or_none(fmt.get('itag'))
  2160. audio_track = traverse_obj(fmt, ('audioTrack', T(dict))) or {}
  2161. quality = traverse_obj(fmt, ((
  2162. # The 3gp format (17) in android client has a quality of "small",
  2163. # but is actually worse than other formats
  2164. T(lambda _: 'tiny' if itag == 17 else None),
  2165. ('quality', T(lambda q: q if q and q != 'tiny' else None)),
  2166. ('audioQuality', T(lower)),
  2167. 'quality'), T(txt_or_none)), get_all=False)
  2168. if quality and itag:
  2169. itag_qualities[itag] = quality
  2170. # FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment
  2171. # (adding `&sq=0` to the URL) and parsing emsg box to determine the
  2172. # number of fragments that would subsequently be requested with (`&sq=N`)
  2173. if fmt.get('type') == 'FORMAT_STREAM_TYPE_OTF':
  2174. continue
  2175. fmt_url = fmt.get('url')
  2176. if not fmt_url:
  2177. sc = compat_parse_qs(fmt.get('signatureCipher'))
  2178. fmt_url = traverse_obj(sc, ('url', -1, T(url_or_none)))
  2179. encrypted_sig = traverse_obj(sc, ('s', -1))
  2180. if not (fmt_url and encrypted_sig):
  2181. continue
  2182. player_url = player_url or self._extract_player_url(webpage)
  2183. if not player_url:
  2184. continue
  2185. try:
  2186. fmt_url = update_url_query(fmt_url, {
  2187. traverse_obj(sc, ('sp', -1)) or 'signature':
  2188. [self._decrypt_signature(encrypted_sig, video_id, player_url)],
  2189. })
  2190. except ExtractorError as e:
  2191. self.report_warning('Signature extraction failed: Some formats may be missing',
  2192. video_id=video_id, only_once=True)
  2193. self.write_debug(error_to_compat_str(e), only_once=True)
  2194. continue
  2195. language_preference = (
  2196. 10 if audio_track.get('audioIsDefault')
  2197. else -10 if 'descriptive' in (traverse_obj(audio_track, ('displayName', T(lower))) or '')
  2198. else -1)
  2199. name = (
  2200. traverse_obj(fmt, ('qualityLabel', T(txt_or_none)))
  2201. or quality.replace('audio_quality_', ''))
  2202. dct = {
  2203. 'format_id': join_nonempty(itag, fmt.get('isDrc') and 'drc'),
  2204. 'url': fmt_url,
  2205. # Format 22 is likely to be damaged: see https://github.com/yt-dlp/yt-dlp/issues/3372
  2206. 'source_preference': ((-5 if itag == '22' else -1)
  2207. + (100 if 'Premium' in name else 0)),
  2208. 'quality': q(quality),
  2209. 'language': join_nonempty(audio_track.get('id', '').split('.')[0],
  2210. 'desc' if language_preference < -1 else '') or None,
  2211. 'language_preference': language_preference,
  2212. # Strictly de-prioritize 3gp formats
  2213. 'preference': -2 if itag == '17' else None,
  2214. }
  2215. if itag:
  2216. itags[itag].add(('https', dct.get('language')))
  2217. self._unthrottle_format_urls(video_id, player_url, dct)
  2218. dct.update(traverse_obj(fmt, {
  2219. 'asr': ('audioSampleRate', T(int_or_none)),
  2220. 'filesize': ('contentLength', T(int_or_none)),
  2221. 'format_note': ('qualityLabel', T(lambda x: x or quality)),
  2222. # for some formats, fps is wrongly returned as 1
  2223. 'fps': ('fps', T(int_or_none), T(lambda f: f if f > 1 else None)),
  2224. 'audio_channels': ('audioChannels', T(int_or_none)),
  2225. 'height': ('height', T(int_or_none)),
  2226. 'has_drm': ('drmFamilies', T(bool)),
  2227. 'tbr': (('averageBitrate', 'bitrate'), T(lambda t: float_or_none(t, 1000))),
  2228. 'width': ('width', T(int_or_none)),
  2229. '_duration_ms': ('approxDurationMs', T(int_or_none)),
  2230. }, get_all=False))
  2231. mime_mobj = re.match(
  2232. r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', fmt.get('mimeType') or '')
  2233. if mime_mobj:
  2234. dct['ext'] = mimetype2ext(mime_mobj.group(1))
  2235. dct.update(parse_codecs(mime_mobj.group(2)))
  2236. single_stream = 'none' in (dct.get(c) for c in ('acodec', 'vcodec'))
  2237. if single_stream and dct.get('ext'):
  2238. dct['container'] = dct['ext'] + '_dash'
  2239. if single_stream or itag == '17':
  2240. # avoid Youtube throttling
  2241. dct.update({
  2242. 'protocol': 'http_dash_segments',
  2243. 'fragments': build_fragments(dct),
  2244. } if dct['filesize'] else {
  2245. 'downloader_options': {'http_chunk_size': CHUNK_SIZE}, # No longer useful?
  2246. })
  2247. formats.append(dct)
  2248. def process_manifest_format(f, proto, client_name, itag, all_formats=False):
  2249. key = (proto, f.get('language'))
  2250. if not all_formats and key in itags[itag]:
  2251. return False
  2252. itags[itag].add(key)
  2253. if itag:
  2254. f['format_id'] = (
  2255. '{0}-{1}'.format(itag, proto)
  2256. if all_formats or any(p != proto for p, _ in itags[itag])
  2257. else itag)
  2258. if f.get('source_preference') is None:
  2259. f['source_preference'] = -1
  2260. if itag in ('616', '235'):
  2261. f['format_note'] = join_nonempty(f.get('format_note'), 'Premium', delim=' ')
  2262. f['source_preference'] += 100
  2263. f['quality'] = q(traverse_obj(f, (
  2264. 'format_id', T(lambda s: itag_qualities[s.split('-')[0]])), default=-1))
  2265. if try_call(lambda: f['fps'] <= 1):
  2266. del f['fps']
  2267. if proto == 'hls' and f.get('has_drm'):
  2268. f['has_drm'] = 'maybe'
  2269. f['source_preference'] -= 5
  2270. return True
  2271. hls_manifest_url = streaming_data.get('hlsManifestUrl')
  2272. if hls_manifest_url:
  2273. for f in self._extract_m3u8_formats(
  2274. hls_manifest_url, video_id, 'mp4',
  2275. entry_protocol='m3u8_native', live=is_live, fatal=False):
  2276. if process_manifest_format(
  2277. f, 'hls', None, self._search_regex(
  2278. r'/itag/(\d+)', f['url'], 'itag', default=None)):
  2279. formats.append(f)
  2280. if self._downloader.params.get('youtube_include_dash_manifest', True):
  2281. dash_manifest_url = streaming_data.get('dashManifestUrl')
  2282. if dash_manifest_url:
  2283. for f in self._extract_mpd_formats(
  2284. dash_manifest_url, video_id, fatal=False):
  2285. if process_manifest_format(
  2286. f, 'dash', None, f['format_id']):
  2287. f['filesize'] = traverse_obj(f, (
  2288. ('fragment_base_url', 'url'), T(lambda u: self._search_regex(
  2289. r'/clen/(\d+)', u, 'file size', default=None)),
  2290. T(int_or_none)), get_all=False)
  2291. formats.append(f)
  2292. playable_formats = [f for f in formats if not f.get('has_drm')]
  2293. if formats:
  2294. if not playable_formats:
  2295. # If there are no formats that definitely don't have DRM, all have DRM
  2296. self.report_drm(video_id)
  2297. formats[:] = playable_formats
  2298. else:
  2299. if streaming_data.get('licenseInfos'):
  2300. raise ExtractorError(
  2301. 'This video is DRM protected.', expected=True)
  2302. pemr = try_get(
  2303. playability_status,
  2304. lambda x: x['errorScreen']['playerErrorMessageRenderer'],
  2305. dict) or {}
  2306. reason = get_text(pemr.get('reason')) or playability_status.get('reason')
  2307. subreason = pemr.get('subreason')
  2308. if subreason:
  2309. subreason = clean_html(get_text(subreason))
  2310. if subreason == 'The uploader has not made this video available in your country.':
  2311. countries = microformat.get('availableCountries')
  2312. if not countries:
  2313. regions_allowed = search_meta('regionsAllowed')
  2314. countries = regions_allowed.split(',') if regions_allowed else None
  2315. self.raise_geo_restricted(
  2316. subreason, countries)
  2317. reason += '\n' + subreason
  2318. if reason:
  2319. raise ExtractorError(reason, expected=True)
  2320. self._sort_formats(formats)
  2321. keywords = video_details.get('keywords') or []
  2322. if not keywords and webpage:
  2323. keywords = [
  2324. unescapeHTML(m.group('content'))
  2325. for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
  2326. for keyword in keywords:
  2327. if keyword.startswith('yt:stretch='):
  2328. mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword)
  2329. if mobj:
  2330. # NB: float is intentional for forcing float division
  2331. w, h = (float(v) for v in mobj.groups())
  2332. if w > 0 and h > 0:
  2333. ratio = w / h
  2334. for f in formats:
  2335. if f.get('vcodec') != 'none':
  2336. f['stretched_ratio'] = ratio
  2337. break
  2338. thumbnails = []
  2339. for container in (video_details, microformat):
  2340. for thumbnail in try_get(
  2341. container,
  2342. lambda x: x['thumbnail']['thumbnails'], list) or []:
  2343. thumbnail_url = url_or_none(thumbnail.get('url'))
  2344. if not thumbnail_url:
  2345. continue
  2346. thumbnails.append({
  2347. 'height': int_or_none(thumbnail.get('height')),
  2348. 'url': update_url(thumbnail_url, query=None, fragment=None),
  2349. 'width': int_or_none(thumbnail.get('width')),
  2350. })
  2351. if thumbnails:
  2352. break
  2353. else:
  2354. thumbnail = search_meta(['og:image', 'twitter:image'])
  2355. if thumbnail:
  2356. thumbnails = [{'url': thumbnail}]
  2357. category = microformat.get('category') or search_meta('genre')
  2358. channel_id = self._extract_channel_id(
  2359. webpage, videodetails=video_details, metadata=microformat)
  2360. duration = int_or_none(
  2361. video_details.get('lengthSeconds')
  2362. or microformat.get('lengthSeconds')) \
  2363. or parse_duration(search_meta('duration'))
  2364. for f in formats:
  2365. # Some formats may have much smaller duration than others (possibly damaged during encoding)
  2366. # but avoid false positives with small duration differences.
  2367. # Ref: https://github.com/yt-dlp/yt-dlp/issues/2823
  2368. if try_call(lambda x: float(x.pop('_duration_ms')) / duration < 500, args=(f,)):
  2369. self.report_warning(
  2370. '{0}: Some possibly damaged formats will be deprioritized'.format(video_id), only_once=True)
  2371. # Strictly de-prioritize damaged formats
  2372. f['preference'] = -10
  2373. owner_profile_url = self._yt_urljoin(self._extract_author_var(
  2374. webpage, 'url', videodetails=video_details, metadata=microformat))
  2375. uploader = self._extract_author_var(
  2376. webpage, 'name', videodetails=video_details, metadata=microformat)
  2377. info = {
  2378. 'id': video_id,
  2379. 'title': self._live_title(video_title) if is_live else video_title,
  2380. 'formats': formats,
  2381. 'thumbnails': thumbnails,
  2382. 'description': video_description,
  2383. 'upload_date': unified_strdate(
  2384. microformat.get('uploadDate')
  2385. or search_meta('uploadDate')),
  2386. 'uploader': uploader,
  2387. 'channel_id': channel_id,
  2388. 'duration': duration,
  2389. 'view_count': int_or_none(
  2390. video_details.get('viewCount')
  2391. or microformat.get('viewCount')
  2392. or search_meta('interactionCount')),
  2393. 'average_rating': float_or_none(video_details.get('averageRating')),
  2394. 'age_limit': 18 if (
  2395. microformat.get('isFamilySafe') is False
  2396. or search_meta('isFamilyFriendly') == 'false'
  2397. or search_meta('og:restrictions:age') == '18+') else 0,
  2398. 'webpage_url': webpage_url,
  2399. 'categories': [category] if category else None,
  2400. 'tags': keywords,
  2401. 'is_live': is_live,
  2402. }
  2403. pctr = traverse_obj(
  2404. (player_response, api_player_response),
  2405. (Ellipsis, 'captions', 'playerCaptionsTracklistRenderer', T(dict)))
  2406. if pctr:
  2407. def process_language(container, base_url, lang_code, query):
  2408. lang_subs = []
  2409. for fmt in self._SUBTITLE_FORMATS:
  2410. query.update({
  2411. 'fmt': fmt,
  2412. })
  2413. lang_subs.append({
  2414. 'ext': fmt,
  2415. 'url': update_url_query(base_url, query),
  2416. })
  2417. container[lang_code] = lang_subs
  2418. def process_subtitles():
  2419. subtitles = {}
  2420. for caption_track in traverse_obj(pctr, (
  2421. Ellipsis, 'captionTracks', lambda _, v: (
  2422. v.get('baseUrl') and v.get('languageCode')))):
  2423. base_url = self._yt_urljoin(caption_track['baseUrl'])
  2424. if not base_url:
  2425. continue
  2426. lang_code = caption_track['languageCode']
  2427. if caption_track.get('kind') != 'asr':
  2428. process_language(
  2429. subtitles, base_url, lang_code, {})
  2430. continue
  2431. automatic_captions = {}
  2432. process_language(
  2433. automatic_captions, base_url, lang_code, {})
  2434. for translation_language in traverse_obj(pctr, (
  2435. Ellipsis, 'translationLanguages', lambda _, v: v.get('languageCode'))):
  2436. translation_language_code = translation_language['languageCode']
  2437. process_language(
  2438. automatic_captions, base_url, translation_language_code,
  2439. {'tlang': translation_language_code})
  2440. info['automatic_captions'] = automatic_captions
  2441. info['subtitles'] = subtitles
  2442. process_subtitles()
  2443. parsed_url = compat_urllib_parse_urlparse(url)
  2444. for component in (parsed_url.fragment, parsed_url.query):
  2445. query = compat_parse_qs(component)
  2446. for k, v in query.items():
  2447. for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
  2448. d_k += '_time'
  2449. if d_k not in info and k in s_ks:
  2450. info[d_k] = parse_duration(query[k][0])
  2451. if video_description:
  2452. # Youtube Music Auto-generated description
  2453. mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
  2454. if mobj:
  2455. release_year = mobj.group('release_year')
  2456. release_date = mobj.group('release_date')
  2457. if release_date:
  2458. release_date = release_date.replace('-', '')
  2459. if not release_year:
  2460. release_year = release_date[:4]
  2461. info.update({
  2462. 'album': mobj.group('album'.strip()),
  2463. 'artist': mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·')),
  2464. 'track': mobj.group('track').strip(),
  2465. 'release_date': release_date,
  2466. 'release_year': int_or_none(release_year),
  2467. })
  2468. initial_data = None
  2469. if webpage:
  2470. initial_data = self._extract_yt_initial_variable(
  2471. webpage, self._YT_INITIAL_DATA_RE, video_id,
  2472. 'yt initial data')
  2473. if not initial_data:
  2474. initial_data = self._call_api(
  2475. 'next', {'videoId': video_id}, video_id, fatal=False)
  2476. if initial_data:
  2477. chapters = self._extract_chapters_from_json(
  2478. initial_data, video_id, duration)
  2479. if not chapters:
  2480. for engagment_pannel in (initial_data.get('engagementPanels') or []):
  2481. contents = try_get(
  2482. engagment_pannel, lambda x: x['engagementPanelSectionListRenderer']['content']['macroMarkersListRenderer']['contents'],
  2483. list)
  2484. if not contents:
  2485. continue
  2486. def chapter_time(mmlir):
  2487. return parse_duration(
  2488. get_text(mmlir.get('timeDescription')))
  2489. chapters = []
  2490. for next_num, content in enumerate(contents, start=1):
  2491. mmlir = content.get('macroMarkersListItemRenderer') or {}
  2492. start_time = chapter_time(mmlir)
  2493. end_time = chapter_time(try_get(
  2494. contents, lambda x: x[next_num]['macroMarkersListItemRenderer'])) \
  2495. if next_num < len(contents) else duration
  2496. if start_time is None or end_time is None:
  2497. continue
  2498. chapters.append({
  2499. 'start_time': start_time,
  2500. 'end_time': end_time,
  2501. 'title': get_text(mmlir.get('title')),
  2502. })
  2503. if chapters:
  2504. break
  2505. if chapters:
  2506. info['chapters'] = chapters
  2507. contents = try_get(
  2508. initial_data,
  2509. lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'],
  2510. list) or []
  2511. if not info['channel_id']:
  2512. channel_id = self._extract_channel_id('', renderers=contents)
  2513. if not info['uploader']:
  2514. info['uploader'] = self._extract_author_var('', 'name', renderers=contents)
  2515. if not owner_profile_url:
  2516. owner_profile_url = self._yt_urljoin(self._extract_author_var('', 'url', renderers=contents))
  2517. for content in contents:
  2518. vpir = content.get('videoPrimaryInfoRenderer')
  2519. if vpir:
  2520. stl = vpir.get('superTitleLink')
  2521. if stl:
  2522. stl = get_text(stl)
  2523. if try_get(
  2524. vpir,
  2525. lambda x: x['superTitleIcon']['iconType']) == 'LOCATION_PIN':
  2526. info['location'] = stl
  2527. else:
  2528. # •? doesn't match, but [•]? does; \xa0 = non-breaking space
  2529. mobj = re.search(r'([^\xa0\s].*?)[\xa0\s]*S(\d+)[\xa0\s]*[•]?[\xa0\s]*E(\d+)', stl)
  2530. if mobj:
  2531. info.update({
  2532. 'series': mobj.group(1),
  2533. 'season_number': int(mobj.group(2)),
  2534. 'episode_number': int(mobj.group(3)),
  2535. })
  2536. for tlb in (try_get(
  2537. vpir,
  2538. lambda x: x['videoActions']['menuRenderer']['topLevelButtons'],
  2539. list) or []):
  2540. tbr = traverse_obj(tlb, ('segmentedLikeDislikeButtonRenderer', 'likeButton', 'toggleButtonRenderer'), 'toggleButtonRenderer') or {}
  2541. for getter, regex in [(
  2542. lambda x: x['defaultText']['accessibility']['accessibilityData'],
  2543. r'(?P<count>[\d,]+)\s*(?P<type>(?:dis)?like)'), ([
  2544. lambda x: x['accessibility'],
  2545. lambda x: x['accessibilityData']['accessibilityData'],
  2546. ], r'(?P<type>(?:dis)?like) this video along with (?P<count>[\d,]+) other people')]:
  2547. label = (try_get(tbr, getter, dict) or {}).get('label')
  2548. if label:
  2549. mobj = re.match(regex, label)
  2550. if mobj:
  2551. info[mobj.group('type') + '_count'] = str_to_int(mobj.group('count'))
  2552. break
  2553. sbr_tooltip = try_get(
  2554. vpir, lambda x: x['sentimentBar']['sentimentBarRenderer']['tooltip'])
  2555. if sbr_tooltip:
  2556. # however dislike_count was hidden by YT, as if there could ever be dislikable content on YT
  2557. like_count, dislike_count = sbr_tooltip.split(' / ')
  2558. info.update({
  2559. 'like_count': str_to_int(like_count),
  2560. 'dislike_count': str_to_int(dislike_count),
  2561. })
  2562. else:
  2563. info['like_count'] = traverse_obj(vpir, (
  2564. 'videoActions', 'menuRenderer', 'topLevelButtons', Ellipsis,
  2565. 'segmentedLikeDislikeButtonViewModel', 'likeButtonViewModel', 'likeButtonViewModel',
  2566. 'toggleButtonViewModel', 'toggleButtonViewModel', 'defaultButtonViewModel',
  2567. 'buttonViewModel', (('title', ('accessibilityText', T(lambda s: s.split()), Ellipsis))), T(parse_count)),
  2568. get_all=False)
  2569. vsir = content.get('videoSecondaryInfoRenderer')
  2570. if vsir:
  2571. rows = try_get(
  2572. vsir,
  2573. lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
  2574. list) or []
  2575. multiple_songs = False
  2576. for row in rows:
  2577. if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
  2578. multiple_songs = True
  2579. break
  2580. for row in rows:
  2581. mrr = row.get('metadataRowRenderer') or {}
  2582. mrr_title = mrr.get('title')
  2583. if not mrr_title:
  2584. continue
  2585. mrr_title = get_text(mrr['title'])
  2586. mrr_contents_text = get_text(mrr['contents'][0])
  2587. if mrr_title == 'License':
  2588. info['license'] = mrr_contents_text
  2589. elif not multiple_songs:
  2590. if mrr_title == 'Album':
  2591. info['album'] = mrr_contents_text
  2592. elif mrr_title == 'Artist':
  2593. info['artist'] = mrr_contents_text
  2594. elif mrr_title == 'Song':
  2595. info['track'] = mrr_contents_text
  2596. # this is not extraction but spelunking!
  2597. carousel_lockups = traverse_obj(
  2598. initial_data,
  2599. ('engagementPanels', Ellipsis, 'engagementPanelSectionListRenderer',
  2600. 'content', 'structuredDescriptionContentRenderer', 'items', Ellipsis,
  2601. 'videoDescriptionMusicSectionRenderer', 'carouselLockups', Ellipsis),
  2602. expected_type=dict) or []
  2603. # try to reproduce logic from metadataRowContainerRenderer above (if it still is)
  2604. fields = (('ALBUM', 'album'), ('ARTIST', 'artist'), ('SONG', 'track'), ('LICENSES', 'license'))
  2605. # multiple_songs ?
  2606. if len(carousel_lockups) > 1:
  2607. fields = fields[-1:]
  2608. for info_row in traverse_obj(
  2609. carousel_lockups,
  2610. (0, 'carouselLockupRenderer', 'infoRows', Ellipsis, 'infoRowRenderer'),
  2611. expected_type=dict):
  2612. row_title = traverse_obj(info_row, ('title', 'simpleText'))
  2613. row_text = traverse_obj(info_row, 'defaultMetadata', 'expandedMetadata', expected_type=get_text)
  2614. if not row_text:
  2615. continue
  2616. for name, field in fields:
  2617. if name == row_title and not info.get(field):
  2618. info[field] = row_text
  2619. for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
  2620. v = info.get(s_k)
  2621. if v:
  2622. info[d_k] = v
  2623. self.mark_watched(video_id, player_response)
  2624. return merge_dicts(
  2625. info, {
  2626. 'uploader_id': self._extract_uploader_id(owner_profile_url),
  2627. 'uploader_url': owner_profile_url,
  2628. 'channel_id': channel_id,
  2629. 'channel_url': channel_id and self._yt_urljoin('/channel/' + channel_id),
  2630. 'channel': info['uploader'],
  2631. })
  2632. class YoutubeTabIE(YoutubeBaseInfoExtractor):
  2633. IE_DESC = 'YouTube.com tab'
  2634. _VALID_URL = r'''(?x)
  2635. https?://
  2636. (?:\w+\.)?
  2637. (?:
  2638. youtube(?:kids)?\.com|
  2639. invidio\.us
  2640. )/
  2641. (?:
  2642. (?:channel|c|user|feed|hashtag)/|
  2643. (?:playlist|watch)\?.*?\blist=|
  2644. (?!(?:watch|embed|v|e|results)\b)
  2645. )
  2646. (?P<id>[^/?\#&]+)
  2647. '''
  2648. IE_NAME = 'youtube:tab'
  2649. _TESTS = [{
  2650. # Shorts
  2651. 'url': 'https://www.youtube.com/@SuperCooperShorts/shorts',
  2652. 'playlist_mincount': 5,
  2653. 'info_dict': {
  2654. 'description': 'Short clips from Super Cooper Sundays!',
  2655. 'id': 'UCKMA8kHZ8bPYpnMNaUSxfEQ',
  2656. 'title': 'Super Cooper Shorts - Shorts',
  2657. 'uploader': 'Super Cooper Shorts',
  2658. 'uploader_id': '@SuperCooperShorts',
  2659. },
  2660. }, {
  2661. # Channel that does not have a Shorts tab. Test should just download videos on Home tab instead
  2662. 'url': 'https://www.youtube.com/@emergencyawesome/shorts',
  2663. 'info_dict': {
  2664. 'description': 'md5:592c080c06fef4de3c902c4a8eecd850',
  2665. 'id': 'UCDiFRMQWpcp8_KD4vwIVicw',
  2666. 'title': 'Emergency Awesome - Home',
  2667. },
  2668. 'playlist_mincount': 5,
  2669. 'skip': 'new test page needed to replace `Emergency Awesome - Shorts`',
  2670. }, {
  2671. # playlists, multipage
  2672. 'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
  2673. 'playlist_mincount': 94,
  2674. 'info_dict': {
  2675. 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
  2676. 'title': r're:Igor Kleiner(?: Ph\.D\.)? - Playlists',
  2677. 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
  2678. 'uploader': 'Igor Kleiner',
  2679. 'uploader_id': '@IgorDataScience',
  2680. },
  2681. }, {
  2682. # playlists, multipage, different order
  2683. 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
  2684. 'playlist_mincount': 94,
  2685. 'info_dict': {
  2686. 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
  2687. 'title': r're:Igor Kleiner(?: Ph\.D\.)? - Playlists',
  2688. 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
  2689. 'uploader': 'Igor Kleiner',
  2690. 'uploader_id': '@IgorDataScience',
  2691. },
  2692. }, {
  2693. # playlists, series
  2694. 'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3',
  2695. 'playlist_mincount': 5,
  2696. 'info_dict': {
  2697. 'id': 'UCYO_jab_esuFRV4b17AJtAw',
  2698. 'title': '3Blue1Brown - Playlists',
  2699. 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
  2700. 'uploader': '3Blue1Brown',
  2701. 'uploader_id': '@3blue1brown',
  2702. },
  2703. }, {
  2704. # playlists, singlepage
  2705. 'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
  2706. 'playlist_mincount': 4,
  2707. 'info_dict': {
  2708. 'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
  2709. 'title': 'ThirstForScience - Playlists',
  2710. 'description': 'md5:609399d937ea957b0f53cbffb747a14c',
  2711. 'uploader': 'ThirstForScience',
  2712. 'uploader_id': '@ThirstForScience',
  2713. },
  2714. }, {
  2715. 'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
  2716. 'only_matching': True,
  2717. }, {
  2718. # basic, single video playlist
  2719. 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2720. 'info_dict': {
  2721. 'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2722. 'title': 'youtube-dl public playlist',
  2723. 'uploader': 'Sergey M.',
  2724. 'uploader_id': '@sergeym.6173',
  2725. 'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
  2726. },
  2727. 'playlist_count': 1,
  2728. }, {
  2729. # empty playlist
  2730. 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
  2731. 'info_dict': {
  2732. 'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
  2733. 'title': 'youtube-dl empty playlist',
  2734. 'uploader': 'Sergey M.',
  2735. 'uploader_id': '@sergeym.6173',
  2736. 'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
  2737. },
  2738. 'playlist_count': 0,
  2739. }, {
  2740. # Home tab
  2741. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
  2742. 'info_dict': {
  2743. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2744. 'title': 'lex will - Home',
  2745. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2746. 'uploader': 'lex will',
  2747. 'uploader_id': '@lexwill718',
  2748. },
  2749. 'playlist_mincount': 2,
  2750. }, {
  2751. # Videos tab
  2752. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
  2753. 'info_dict': {
  2754. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2755. 'title': 'lex will - Videos',
  2756. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2757. 'uploader': 'lex will',
  2758. 'uploader_id': '@lexwill718',
  2759. },
  2760. 'playlist_mincount': 975,
  2761. }, {
  2762. # Videos tab, sorted by popular
  2763. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
  2764. 'info_dict': {
  2765. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2766. 'title': 'lex will - Videos',
  2767. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2768. 'uploader': 'lex will',
  2769. 'uploader_id': '@lexwill718',
  2770. },
  2771. 'playlist_mincount': 199,
  2772. }, {
  2773. # Playlists tab
  2774. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
  2775. 'info_dict': {
  2776. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2777. 'title': 'lex will - Playlists',
  2778. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2779. 'uploader': 'lex will',
  2780. 'uploader_id': '@lexwill718',
  2781. },
  2782. 'playlist_mincount': 17,
  2783. }, {
  2784. # Community tab
  2785. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
  2786. 'info_dict': {
  2787. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2788. 'title': 'lex will - Community',
  2789. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2790. 'uploader': 'lex will',
  2791. 'uploader_id': '@lexwill718',
  2792. },
  2793. 'playlist_mincount': 18,
  2794. }, {
  2795. # Channels tab
  2796. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
  2797. 'info_dict': {
  2798. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2799. 'title': r're:lex will - (?:Home|Channels)',
  2800. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2801. 'uploader': 'lex will',
  2802. 'uploader_id': '@lexwill718',
  2803. },
  2804. 'playlist_mincount': 75,
  2805. }, {
  2806. # Releases tab
  2807. 'url': 'https://www.youtube.com/@daftpunk/releases',
  2808. 'info_dict': {
  2809. 'id': 'UC_kRDKYrUlrbtrSiyu5Tflg',
  2810. 'title': 'Daft Punk - Releases',
  2811. 'description': 'Daft Punk (1993 - 2021) - Official YouTube Channel',
  2812. 'uploader_id': '@daftpunk',
  2813. 'uploader': 'Daft Punk',
  2814. },
  2815. 'playlist_mincount': 36,
  2816. }, {
  2817. 'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2818. 'only_matching': True,
  2819. }, {
  2820. 'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2821. 'only_matching': True,
  2822. }, {
  2823. 'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2824. 'only_matching': True,
  2825. }, {
  2826. 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
  2827. 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
  2828. 'info_dict': {
  2829. 'title': '29C3: Not my department',
  2830. 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
  2831. 'uploader': 'Christiaan008',
  2832. 'uploader_id': '@ChRiStIaAn008',
  2833. 'channel_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
  2834. },
  2835. 'playlist_count': 96,
  2836. }, {
  2837. 'note': 'Large playlist',
  2838. 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
  2839. 'info_dict': {
  2840. 'title': 'Uploads from Cauchemar',
  2841. 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
  2842. 'uploader': 'Cauchemar',
  2843. 'uploader_id': '@Cauchemar89',
  2844. 'channel_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
  2845. },
  2846. 'playlist_mincount': 1123,
  2847. }, {
  2848. # even larger playlist, 8832 videos
  2849. 'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
  2850. 'only_matching': True,
  2851. }, {
  2852. 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
  2853. 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
  2854. 'info_dict': {
  2855. 'title': 'Uploads from Interstellar Movie',
  2856. 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
  2857. 'uploader': 'Interstellar Movie',
  2858. 'uploader_id': '@InterstellarMovie',
  2859. 'channel_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
  2860. },
  2861. 'playlist_mincount': 21,
  2862. }, {
  2863. # https://github.com/ytdl-org/youtube-dl/issues/21844
  2864. 'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
  2865. 'info_dict': {
  2866. 'title': 'Data Analysis with Dr Mike Pound',
  2867. 'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
  2868. 'uploader': 'Computerphile',
  2869. 'uploader_id': '@Computerphile',
  2870. 'channel_id': 'UC9-y-6csu5WGm29I7JiwpnA',
  2871. },
  2872. 'playlist_mincount': 11,
  2873. }, {
  2874. 'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2875. 'only_matching': True,
  2876. }, {
  2877. # Playlist URL that does not actually serve a playlist
  2878. 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
  2879. 'info_dict': {
  2880. 'id': 'FqZTN594JQw',
  2881. 'ext': 'webm',
  2882. 'title': "Smiley's People 01 detective, Adventure Series, Action",
  2883. 'uploader': 'STREEM',
  2884. 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
  2885. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
  2886. 'upload_date': '20150526',
  2887. 'license': 'Standard YouTube License',
  2888. 'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
  2889. 'categories': ['People & Blogs'],
  2890. 'tags': list,
  2891. 'view_count': int,
  2892. 'like_count': int,
  2893. },
  2894. 'params': {
  2895. 'skip_download': True,
  2896. },
  2897. 'skip': 'This video is not available.',
  2898. 'add_ie': [YoutubeIE.ie_key()],
  2899. }, {
  2900. 'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
  2901. 'only_matching': True,
  2902. }, {
  2903. 'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
  2904. 'only_matching': True,
  2905. }, {
  2906. 'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
  2907. 'info_dict': {
  2908. 'id': r're:[\da-zA-Z_-]{8,}',
  2909. 'ext': 'mp4',
  2910. 'title': r're:(?s)[A-Z].{20,}',
  2911. 'uploader': 'Sky News',
  2912. 'uploader_id': '@SkyNews',
  2913. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@SkyNews',
  2914. 'upload_date': r're:\d{8}',
  2915. 'description': r're:(?s)(?:.*\n)+SUBSCRIBE to our YouTube channel for more videos: http://www\.youtube\.com/skynews *\n.*',
  2916. 'categories': ['News & Politics'],
  2917. 'tags': list,
  2918. 'like_count': int,
  2919. },
  2920. 'params': {
  2921. 'skip_download': True,
  2922. },
  2923. }, {
  2924. 'url': 'https://www.youtube.com/user/TheYoungTurks/live',
  2925. 'info_dict': {
  2926. 'id': 'a48o2S1cPoo',
  2927. 'ext': 'mp4',
  2928. 'title': 'The Young Turks - Live Main Show',
  2929. 'uploader': 'The Young Turks',
  2930. 'uploader_id': 'TheYoungTurks',
  2931. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
  2932. 'upload_date': '20150715',
  2933. 'license': 'Standard YouTube License',
  2934. 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
  2935. 'categories': ['News & Politics'],
  2936. 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
  2937. 'like_count': int,
  2938. },
  2939. 'params': {
  2940. 'skip_download': True,
  2941. },
  2942. 'only_matching': True,
  2943. }, {
  2944. 'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
  2945. 'only_matching': True,
  2946. }, {
  2947. 'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
  2948. 'only_matching': True,
  2949. }, {
  2950. 'url': 'https://www.youtube.com/feed/trending',
  2951. 'only_matching': True,
  2952. }, {
  2953. # needs auth
  2954. 'url': 'https://www.youtube.com/feed/library',
  2955. 'only_matching': True,
  2956. }, {
  2957. # needs auth
  2958. 'url': 'https://www.youtube.com/feed/history',
  2959. 'only_matching': True,
  2960. }, {
  2961. # needs auth
  2962. 'url': 'https://www.youtube.com/feed/subscriptions',
  2963. 'only_matching': True,
  2964. }, {
  2965. # needs auth
  2966. 'url': 'https://www.youtube.com/feed/watch_later',
  2967. 'only_matching': True,
  2968. }, {
  2969. # no longer available?
  2970. 'url': 'https://www.youtube.com/feed/recommended',
  2971. 'only_matching': True,
  2972. }, {
  2973. # inline playlist with not always working continuations
  2974. 'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
  2975. 'only_matching': True,
  2976. }, {
  2977. 'url': 'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8',
  2978. 'only_matching': True,
  2979. }, {
  2980. 'url': 'https://www.youtube.com/course',
  2981. 'only_matching': True,
  2982. }, {
  2983. 'url': 'https://www.youtube.com/zsecurity',
  2984. 'only_matching': True,
  2985. }, {
  2986. 'url': 'http://www.youtube.com/NASAgovVideo/videos',
  2987. 'only_matching': True,
  2988. }, {
  2989. 'url': 'https://www.youtube.com/TheYoungTurks/live',
  2990. 'only_matching': True,
  2991. }, {
  2992. 'url': 'https://www.youtube.com/hashtag/cctv9',
  2993. 'info_dict': {
  2994. 'id': 'cctv9',
  2995. 'title': '#cctv9',
  2996. },
  2997. 'playlist_mincount': 350,
  2998. }, {
  2999. 'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
  3000. 'only_matching': True,
  3001. }, {
  3002. 'note': 'Search tab',
  3003. 'url': 'https://www.youtube.com/c/3blue1brown/search?query=linear%20algebra',
  3004. 'playlist_mincount': 20,
  3005. 'info_dict': {
  3006. 'id': 'UCYO_jab_esuFRV4b17AJtAw',
  3007. 'title': '3Blue1Brown - Search - linear algebra',
  3008. 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
  3009. 'uploader': '3Blue1Brown',
  3010. 'uploader_id': '@3blue1brown',
  3011. 'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
  3012. },
  3013. }]
  3014. @classmethod
  3015. def suitable(cls, url):
  3016. return not YoutubeIE.suitable(url) and super(
  3017. YoutubeTabIE, cls).suitable(url)
  3018. @staticmethod
  3019. def _extract_grid_item_renderer(item):
  3020. assert isinstance(item, dict)
  3021. for key, renderer in item.items():
  3022. if not key.startswith('grid') or not key.endswith('Renderer'):
  3023. continue
  3024. if not isinstance(renderer, dict):
  3025. continue
  3026. return renderer
  3027. @staticmethod
  3028. def _get_text(r, k):
  3029. return traverse_obj(
  3030. r, (k, 'runs', 0, 'text'), (k, 'simpleText'),
  3031. expected_type=txt_or_none)
  3032. def _grid_entries(self, grid_renderer):
  3033. for item in traverse_obj(grid_renderer, ('items', Ellipsis, T(dict))):
  3034. lockup_view_model = traverse_obj(item, ('lockupViewModel', T(dict)))
  3035. if lockup_view_model:
  3036. entry = self._extract_lockup_view_model(lockup_view_model)
  3037. if entry:
  3038. yield entry
  3039. continue
  3040. renderer = self._extract_grid_item_renderer(item)
  3041. if not isinstance(renderer, dict):
  3042. continue
  3043. title = self._get_text(renderer, 'title')
  3044. # playlist
  3045. playlist_id = renderer.get('playlistId')
  3046. if playlist_id:
  3047. yield self.url_result(
  3048. 'https://www.youtube.com/playlist?list=%s' % playlist_id,
  3049. ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
  3050. video_title=title)
  3051. continue
  3052. # video
  3053. video_id = renderer.get('videoId')
  3054. if video_id:
  3055. yield self._extract_video(renderer)
  3056. continue
  3057. # channel
  3058. channel_id = renderer.get('channelId')
  3059. if channel_id:
  3060. title = self._get_text(renderer, 'title')
  3061. yield self.url_result(
  3062. 'https://www.youtube.com/channel/%s' % channel_id,
  3063. ie=YoutubeTabIE.ie_key(), video_title=title)
  3064. continue
  3065. # generic endpoint URL support
  3066. ep_url = urljoin('https://www.youtube.com/', try_get(
  3067. renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
  3068. compat_str))
  3069. if ep_url:
  3070. for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE):
  3071. if ie.suitable(ep_url):
  3072. yield self.url_result(
  3073. ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title)
  3074. break
  3075. def _shelf_entries_from_content(self, shelf_renderer):
  3076. content = shelf_renderer.get('content')
  3077. if not isinstance(content, dict):
  3078. return
  3079. renderer = content.get('gridRenderer')
  3080. if renderer:
  3081. # TODO: add support for nested playlists so each shelf is processed
  3082. # as separate playlist
  3083. # TODO: this includes only first N items
  3084. for entry in self._grid_entries(renderer):
  3085. yield entry
  3086. renderer = content.get('horizontalListRenderer')
  3087. if renderer:
  3088. # TODO
  3089. pass
  3090. def _shelf_entries(self, shelf_renderer, skip_channels=False):
  3091. ep = try_get(
  3092. shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
  3093. compat_str)
  3094. shelf_url = urljoin('https://www.youtube.com', ep)
  3095. if shelf_url:
  3096. # Skipping links to another channels, note that checking for
  3097. # endpoint.commandMetadata.webCommandMetadata.webPageTypwebPageType == WEB_PAGE_TYPE_CHANNEL
  3098. # will not work
  3099. if skip_channels and '/channels?' in shelf_url:
  3100. return
  3101. title = try_get(
  3102. shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
  3103. yield self.url_result(shelf_url, video_title=title)
  3104. # Shelf may not contain shelf URL, fallback to extraction from content
  3105. for entry in self._shelf_entries_from_content(shelf_renderer):
  3106. yield entry
  3107. def _playlist_entries(self, video_list_renderer):
  3108. for content in video_list_renderer['contents']:
  3109. if not isinstance(content, dict):
  3110. continue
  3111. renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
  3112. if not isinstance(renderer, dict):
  3113. continue
  3114. video_id = renderer.get('videoId')
  3115. if not video_id:
  3116. continue
  3117. yield self._extract_video(renderer)
  3118. def _extract_lockup_view_model(self, view_model):
  3119. content_id = view_model.get('contentId')
  3120. if not content_id:
  3121. return
  3122. content_type = view_model.get('contentType')
  3123. if content_type not in ('LOCKUP_CONTENT_TYPE_PLAYLIST', 'LOCKUP_CONTENT_TYPE_PODCAST'):
  3124. self.report_warning(
  3125. 'Unsupported lockup view model content type "{0}"{1}'.format(content_type, bug_reports_message()), only_once=True)
  3126. return
  3127. return merge_dicts(self.url_result(
  3128. update_url_query('https://www.youtube.com/playlist', {'list': content_id}),
  3129. ie=YoutubeTabIE.ie_key(), video_id=content_id), {
  3130. 'title': traverse_obj(view_model, (
  3131. 'metadata', 'lockupMetadataViewModel', 'title', 'content', T(compat_str))),
  3132. 'thumbnails': self._extract_thumbnails(view_model, (
  3133. 'contentImage', 'collectionThumbnailViewModel', 'primaryThumbnail',
  3134. 'thumbnailViewModel', 'image'), final_key='sources'),
  3135. })
  3136. def _extract_shorts_lockup_view_model(self, view_model):
  3137. content_id = traverse_obj(view_model, (
  3138. 'onTap', 'innertubeCommand', 'reelWatchEndpoint', 'videoId',
  3139. T(lambda v: v if YoutubeIE.suitable(v) else None)))
  3140. if not content_id:
  3141. return
  3142. return merge_dicts(self.url_result(
  3143. content_id, ie=YoutubeIE.ie_key(), video_id=content_id), {
  3144. 'title': traverse_obj(view_model, (
  3145. 'overlayMetadata', 'primaryText', 'content', T(compat_str))),
  3146. 'thumbnails': self._extract_thumbnails(
  3147. view_model, 'thumbnail', final_key='sources'),
  3148. })
  3149. def _video_entry(self, video_renderer):
  3150. video_id = video_renderer.get('videoId')
  3151. if video_id:
  3152. return self._extract_video(video_renderer)
  3153. def _post_thread_entries(self, post_thread_renderer):
  3154. post_renderer = try_get(
  3155. post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
  3156. if not post_renderer:
  3157. return
  3158. # video attachment
  3159. video_renderer = try_get(
  3160. post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict)
  3161. video_id = None
  3162. if video_renderer:
  3163. entry = self._video_entry(video_renderer)
  3164. if entry:
  3165. yield entry
  3166. # inline video links
  3167. runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
  3168. for run in runs:
  3169. if not isinstance(run, dict):
  3170. continue
  3171. ep_url = try_get(
  3172. run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
  3173. if not ep_url:
  3174. continue
  3175. if not YoutubeIE.suitable(ep_url):
  3176. continue
  3177. ep_video_id = YoutubeIE._match_id(ep_url)
  3178. if video_id == ep_video_id:
  3179. continue
  3180. yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
  3181. def _post_thread_continuation_entries(self, post_thread_continuation):
  3182. contents = post_thread_continuation.get('contents')
  3183. if not isinstance(contents, list):
  3184. return
  3185. for content in contents:
  3186. renderer = content.get('backstagePostThreadRenderer')
  3187. if not isinstance(renderer, dict):
  3188. continue
  3189. for entry in self._post_thread_entries(renderer):
  3190. yield entry
  3191. def _rich_grid_entries(self, contents):
  3192. for content in traverse_obj(
  3193. contents, (Ellipsis, 'richItemRenderer', 'content'),
  3194. expected_type=dict):
  3195. video_renderer = traverse_obj(
  3196. content, 'videoRenderer', 'reelItemRenderer',
  3197. expected_type=dict)
  3198. if video_renderer:
  3199. entry = self._video_entry(video_renderer)
  3200. if entry:
  3201. yield entry
  3202. # shorts item
  3203. shorts_lockup_view_model = content.get('shortsLockupViewModel')
  3204. if shorts_lockup_view_model:
  3205. entry = self._extract_shorts_lockup_view_model(shorts_lockup_view_model)
  3206. if entry:
  3207. yield entry
  3208. # playlist
  3209. renderer = traverse_obj(
  3210. content, 'playlistRenderer', expected_type=dict) or {}
  3211. title = self._get_text(renderer, 'title')
  3212. playlist_id = renderer.get('playlistId')
  3213. if playlist_id:
  3214. yield self.url_result(
  3215. 'https://www.youtube.com/playlist?list=%s' % playlist_id,
  3216. ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
  3217. video_title=title)
  3218. @staticmethod
  3219. def _build_continuation_query(continuation, ctp=None):
  3220. query = {
  3221. 'ctoken': continuation,
  3222. 'continuation': continuation,
  3223. }
  3224. if ctp:
  3225. query['itct'] = ctp
  3226. return query
  3227. @staticmethod
  3228. def _extract_next_continuation_data(renderer):
  3229. next_continuation = try_get(
  3230. renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict)
  3231. if not next_continuation:
  3232. return
  3233. continuation = next_continuation.get('continuation')
  3234. if not continuation:
  3235. return
  3236. ctp = next_continuation.get('clickTrackingParams')
  3237. return YoutubeTabIE._build_continuation_query(continuation, ctp)
  3238. @classmethod
  3239. def _extract_continuation(cls, renderer):
  3240. next_continuation = cls._extract_next_continuation_data(renderer)
  3241. if next_continuation:
  3242. return next_continuation
  3243. for command in traverse_obj(renderer, (
  3244. ('contents', 'items', 'rows'), Ellipsis, 'continuationItemRenderer',
  3245. ('continuationEndpoint', ('button', 'buttonRenderer', 'command')),
  3246. (('commandExecutorCommand', 'commands', Ellipsis), None), T(dict))):
  3247. continuation = traverse_obj(command, ('continuationCommand', 'token', T(compat_str)))
  3248. if not continuation:
  3249. continue
  3250. ctp = command.get('clickTrackingParams')
  3251. return cls._build_continuation_query(continuation, ctp)
  3252. def _entries(self, tab, item_id, webpage):
  3253. tab_content = try_get(tab, lambda x: x['content'], dict)
  3254. if not tab_content:
  3255. return
  3256. slr_renderer = try_get(tab_content, lambda x: x['sectionListRenderer'], dict)
  3257. if slr_renderer:
  3258. is_channels_tab = tab.get('title') == 'Channels'
  3259. continuation = None
  3260. slr_contents = try_get(slr_renderer, lambda x: x['contents'], list) or []
  3261. for slr_content in slr_contents:
  3262. if not isinstance(slr_content, dict):
  3263. continue
  3264. is_renderer = try_get(slr_content, lambda x: x['itemSectionRenderer'], dict)
  3265. if not is_renderer:
  3266. continue
  3267. isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
  3268. for isr_content in isr_contents:
  3269. if not isinstance(isr_content, dict):
  3270. continue
  3271. renderer = isr_content.get('playlistVideoListRenderer')
  3272. if renderer:
  3273. for entry in self._playlist_entries(renderer):
  3274. yield entry
  3275. continuation = self._extract_continuation(renderer)
  3276. continue
  3277. renderer = isr_content.get('gridRenderer')
  3278. if renderer:
  3279. for entry in self._grid_entries(renderer):
  3280. yield entry
  3281. continuation = self._extract_continuation(renderer)
  3282. continue
  3283. renderer = isr_content.get('shelfRenderer')
  3284. if renderer:
  3285. for entry in self._shelf_entries(renderer, not is_channels_tab):
  3286. yield entry
  3287. continue
  3288. renderer = isr_content.get('backstagePostThreadRenderer')
  3289. if renderer:
  3290. for entry in self._post_thread_entries(renderer):
  3291. yield entry
  3292. continuation = self._extract_continuation(renderer)
  3293. continue
  3294. renderer = isr_content.get('videoRenderer')
  3295. if renderer:
  3296. entry = self._video_entry(renderer)
  3297. if entry:
  3298. yield entry
  3299. renderer = isr_content.get('richGridRenderer')
  3300. if renderer:
  3301. for from_ in self._rich_grid_entries(
  3302. traverse_obj(renderer, ('contents', Ellipsis, T(dict)))):
  3303. yield from_
  3304. continuation = self._extract_continuation(renderer)
  3305. continue
  3306. if not continuation:
  3307. continuation = self._extract_continuation(is_renderer)
  3308. if not continuation:
  3309. continuation = self._extract_continuation(slr_renderer)
  3310. else:
  3311. rich_grid_renderer = tab_content.get('richGridRenderer')
  3312. if not rich_grid_renderer:
  3313. return
  3314. for from_ in self._rich_grid_entries(
  3315. traverse_obj(rich_grid_renderer, ('contents', Ellipsis, T(dict)))):
  3316. yield from_
  3317. continuation = self._extract_continuation(rich_grid_renderer)
  3318. ytcfg = self._extract_ytcfg(item_id, webpage)
  3319. client_version = try_get(
  3320. ytcfg, lambda x: x['INNERTUBE_CLIENT_VERSION'], compat_str) or '2.20210407.08.00'
  3321. headers = {
  3322. 'x-youtube-client-name': '1',
  3323. 'x-youtube-client-version': client_version,
  3324. 'content-type': 'application/json',
  3325. }
  3326. context = try_get(ytcfg, lambda x: x['INNERTUBE_CONTEXT'], dict) or {
  3327. 'client': {
  3328. 'clientName': 'WEB',
  3329. 'clientVersion': client_version,
  3330. },
  3331. }
  3332. visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str)
  3333. identity_token = self._extract_identity_token(ytcfg, webpage)
  3334. if identity_token:
  3335. headers['x-youtube-identity-token'] = identity_token
  3336. data = {
  3337. 'context': context,
  3338. }
  3339. for page_num in itertools.count(1):
  3340. if not continuation:
  3341. break
  3342. if visitor_data:
  3343. headers['X-Goog-Visitor-Id'] = visitor_data
  3344. data['continuation'] = continuation['continuation']
  3345. data['clickTracking'] = {
  3346. 'clickTrackingParams': continuation['itct'],
  3347. }
  3348. count = 0
  3349. retries = 3
  3350. while count <= retries:
  3351. try:
  3352. # Downloading page may result in intermittent 5xx HTTP error
  3353. # that is usually worked around with a retry
  3354. response = self._download_json(
  3355. 'https://www.youtube.com/youtubei/v1/browse',
  3356. None, 'Downloading page %d%s' % (page_num, ' (retry #%d)' % count if count else ''),
  3357. query={
  3358. # 'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
  3359. 'prettyPrint': 'false',
  3360. },
  3361. headers=headers, data=json.dumps(data).encode('utf8'))
  3362. break
  3363. except ExtractorError as e:
  3364. if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503):
  3365. count += 1
  3366. if count <= retries:
  3367. continue
  3368. raise
  3369. if not response:
  3370. break
  3371. visitor_data = try_get(
  3372. response, lambda x: x['responseContext']['visitorData'], compat_str) or visitor_data
  3373. continuation_contents = try_get(
  3374. response, lambda x: x['continuationContents'], dict)
  3375. if continuation_contents:
  3376. continuation_renderer = continuation_contents.get('playlistVideoListContinuation')
  3377. if continuation_renderer:
  3378. for entry in self._playlist_entries(continuation_renderer):
  3379. yield entry
  3380. continuation = self._extract_continuation(continuation_renderer)
  3381. continue
  3382. continuation_renderer = continuation_contents.get('gridContinuation')
  3383. if continuation_renderer:
  3384. for entry in self._grid_entries(continuation_renderer):
  3385. yield entry
  3386. continuation = self._extract_continuation(continuation_renderer)
  3387. continue
  3388. continuation_renderer = continuation_contents.get('itemSectionContinuation')
  3389. if continuation_renderer:
  3390. for entry in self._post_thread_continuation_entries(continuation_renderer):
  3391. yield entry
  3392. continuation = self._extract_continuation(continuation_renderer)
  3393. continue
  3394. on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
  3395. continuation_items = try_get(
  3396. on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
  3397. if continuation_items:
  3398. continuation_item = continuation_items[0]
  3399. if not isinstance(continuation_item, dict):
  3400. continue
  3401. renderer = self._extract_grid_item_renderer(continuation_item)
  3402. if renderer:
  3403. grid_renderer = {'items': continuation_items}
  3404. for entry in self._grid_entries(grid_renderer):
  3405. yield entry
  3406. continuation = self._extract_continuation(grid_renderer)
  3407. continue
  3408. renderer = continuation_item.get('playlistVideoRenderer') or continuation_item.get('itemSectionRenderer')
  3409. if renderer:
  3410. video_list_renderer = {'contents': continuation_items}
  3411. for entry in self._playlist_entries(video_list_renderer):
  3412. yield entry
  3413. continuation = self._extract_continuation(video_list_renderer)
  3414. continue
  3415. renderer = continuation_item.get('backstagePostThreadRenderer')
  3416. if renderer:
  3417. continuation_renderer = {'contents': continuation_items}
  3418. for entry in self._post_thread_continuation_entries(continuation_renderer):
  3419. yield entry
  3420. continuation = self._extract_continuation(continuation_renderer)
  3421. continue
  3422. renderer = continuation_item.get('richItemRenderer')
  3423. if renderer:
  3424. for entry in self._rich_grid_entries(continuation_items):
  3425. yield entry
  3426. continuation = self._extract_continuation({'contents': continuation_items})
  3427. continue
  3428. break
  3429. @staticmethod
  3430. def _extract_selected_tab(tabs):
  3431. for tab in tabs:
  3432. renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {}
  3433. if renderer.get('selected') is True:
  3434. return renderer
  3435. else:
  3436. raise ExtractorError('Unable to find selected tab')
  3437. def _extract_uploader(self, metadata, data):
  3438. uploader = {}
  3439. renderers = traverse_obj(data,
  3440. ('sidebar', 'playlistSidebarRenderer', 'items'))
  3441. uploader['channel_id'] = self._extract_channel_id('', metadata=metadata, renderers=renderers)
  3442. uploader['uploader'] = (
  3443. self._extract_author_var('', 'name', renderers=renderers)
  3444. or self._extract_author_var('', 'name', metadata=metadata))
  3445. uploader['uploader_url'] = self._yt_urljoin(
  3446. self._extract_author_var('', 'url', metadata=metadata, renderers=renderers))
  3447. uploader['uploader_id'] = self._extract_uploader_id(uploader['uploader_url'])
  3448. uploader['channel'] = uploader['uploader']
  3449. return uploader
  3450. @classmethod
  3451. def _extract_alert(cls, data):
  3452. alerts = []
  3453. for alert in traverse_obj(data, ('alerts', Ellipsis), expected_type=dict):
  3454. alert_text = traverse_obj(
  3455. alert, (None, lambda x: x['alertRenderer']['text']), get_all=False)
  3456. if not alert_text:
  3457. continue
  3458. text = cls._get_text(alert_text, 'text')
  3459. if text:
  3460. alerts.append(text)
  3461. return '\n'.join(alerts)
  3462. def _extract_from_tabs(self, item_id, webpage, data, tabs):
  3463. selected_tab = self._extract_selected_tab(tabs)
  3464. renderer = traverse_obj(data, ('metadata', 'channelMetadataRenderer'),
  3465. expected_type=dict) or {}
  3466. playlist_id = item_id
  3467. title = description = None
  3468. if renderer:
  3469. channel_title = txt_or_none(renderer.get('title')) or item_id
  3470. tab_title = txt_or_none(selected_tab.get('title'))
  3471. title = join_nonempty(
  3472. channel_title or item_id, tab_title,
  3473. txt_or_none(selected_tab.get('expandedText')),
  3474. delim=' - ')
  3475. description = txt_or_none(renderer.get('description'))
  3476. playlist_id = txt_or_none(renderer.get('externalId')) or playlist_id
  3477. else:
  3478. renderer = traverse_obj(data,
  3479. ('metadata', 'playlistMetadataRenderer'),
  3480. ('header', 'hashtagHeaderRenderer'),
  3481. expected_type=dict) or {}
  3482. title = traverse_obj(renderer, 'title', ('hashtag', 'simpleText'),
  3483. expected_type=txt_or_none)
  3484. playlist = self.playlist_result(
  3485. self._entries(selected_tab, item_id, webpage),
  3486. playlist_id=playlist_id, playlist_title=title,
  3487. playlist_description=description)
  3488. return merge_dicts(playlist, self._extract_uploader(renderer, data))
  3489. def _extract_from_playlist(self, item_id, url, data, playlist):
  3490. title = traverse_obj((playlist, data),
  3491. (0, 'title'), (1, 'titleText', 'simpleText'),
  3492. expected_type=txt_or_none)
  3493. playlist_id = txt_or_none(playlist.get('playlistId')) or item_id
  3494. # Inline playlist rendition continuation does not always work
  3495. # at Youtube side, so delegating regular tab-based playlist URL
  3496. # processing whenever possible.
  3497. playlist_url = urljoin(url, traverse_obj(
  3498. playlist, ('endpoint', 'commandMetadata', 'webCommandMetadata', 'url'),
  3499. expected_type=url_or_none))
  3500. if playlist_url and playlist_url != url:
  3501. return self.url_result(
  3502. playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
  3503. video_title=title)
  3504. return self.playlist_result(
  3505. self._playlist_entries(playlist), playlist_id=playlist_id,
  3506. playlist_title=title)
  3507. def _extract_identity_token(self, ytcfg, webpage):
  3508. if ytcfg:
  3509. token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
  3510. if token:
  3511. return token
  3512. return self._search_regex(
  3513. r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
  3514. 'identity token', default=None)
  3515. def _real_extract(self, url):
  3516. item_id = self._match_id(url)
  3517. url = update_url(url, netloc='www.youtube.com')
  3518. qs = parse_qs(url)
  3519. def qs_get(key, default=None):
  3520. return qs.get(key, [default])[-1]
  3521. # Go around for /feeds/videos.xml?playlist_id={pl_id}
  3522. if item_id == 'feeds' and '/feeds/videos.xml?' in url:
  3523. playlist_id = qs_get('playlist_id')
  3524. if playlist_id:
  3525. return self.url_result(
  3526. update_url_query('https://www.youtube.com/playlist', {
  3527. 'list': playlist_id,
  3528. }), ie=self.ie_key(), video_id=playlist_id)
  3529. # Handle both video/playlist URLs
  3530. video_id = qs_get('v')
  3531. playlist_id = qs_get('list')
  3532. if video_id and playlist_id:
  3533. if self._downloader.params.get('noplaylist'):
  3534. self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
  3535. return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
  3536. self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
  3537. webpage = self._download_webpage(url, item_id)
  3538. data = self._extract_yt_initial_data(item_id, webpage)
  3539. tabs = try_get(
  3540. data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
  3541. if tabs:
  3542. return self._extract_from_tabs(item_id, webpage, data, tabs)
  3543. playlist = try_get(
  3544. data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
  3545. if playlist:
  3546. return self._extract_from_playlist(item_id, url, data, playlist)
  3547. # Fallback to video extraction if no playlist alike page is recognized.
  3548. # First check for the current video then try the v attribute of URL query.
  3549. video_id = try_get(
  3550. data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
  3551. compat_str) or video_id
  3552. if video_id:
  3553. return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
  3554. # Capture and output alerts
  3555. alert = self._extract_alert(data)
  3556. if alert:
  3557. raise ExtractorError(alert, expected=True)
  3558. # Failed to recognize
  3559. raise ExtractorError('Unable to recognize tab page')
  3560. class YoutubePlaylistIE(InfoExtractor):
  3561. IE_DESC = 'YouTube.com playlists'
  3562. _VALID_URL = r'''(?x)(?:
  3563. (?:https?://)?
  3564. (?:\w+\.)?
  3565. (?:
  3566. (?:
  3567. youtube(?:kids)?\.com|
  3568. invidio\.us
  3569. )
  3570. /.*?\?.*?\blist=
  3571. )?
  3572. (?P<id>%(playlist_id)s)
  3573. )''' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
  3574. IE_NAME = 'youtube:playlist'
  3575. _TESTS = [{
  3576. 'note': 'issue #673',
  3577. 'url': 'PLBB231211A4F62143',
  3578. 'info_dict': {
  3579. 'title': '[OLD]Team Fortress 2 (Class-based LP)',
  3580. 'id': 'PLBB231211A4F62143',
  3581. 'uploader': 'Wickman',
  3582. 'uploader_id': '@WickmanVT',
  3583. 'channel_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
  3584. },
  3585. 'playlist_mincount': 29,
  3586. }, {
  3587. 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
  3588. 'info_dict': {
  3589. 'title': 'YDL_safe_search',
  3590. 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
  3591. },
  3592. 'playlist_count': 2,
  3593. 'skip': 'This playlist is private',
  3594. }, {
  3595. 'note': 'embedded',
  3596. 'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
  3597. # TODO: full playlist requires _reload_with_unavailable_videos()
  3598. # 'playlist_count': 4,
  3599. 'playlist_mincount': 1,
  3600. 'info_dict': {
  3601. 'title': 'JODA15',
  3602. 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
  3603. 'uploader': 'milan',
  3604. 'uploader_id': '@milan5503',
  3605. 'channel_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
  3606. },
  3607. }, {
  3608. 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
  3609. 'playlist_mincount': 455,
  3610. 'info_dict': {
  3611. 'title': '2018 Chinese New Singles (11/6 updated)',
  3612. 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
  3613. 'uploader': 'LBK',
  3614. 'uploader_id': '@music_king',
  3615. 'channel_id': 'UC21nz3_MesPLqtDqwdvnoxA',
  3616. },
  3617. }, {
  3618. 'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
  3619. 'only_matching': True,
  3620. }, {
  3621. # music album playlist
  3622. 'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
  3623. 'only_matching': True,
  3624. }]
  3625. @classmethod
  3626. def suitable(cls, url):
  3627. if YoutubeTabIE.suitable(url):
  3628. return False
  3629. if parse_qs(url).get('v', [None])[0]:
  3630. return False
  3631. return super(YoutubePlaylistIE, cls).suitable(url)
  3632. def _real_extract(self, url):
  3633. playlist_id = self._match_id(url)
  3634. qs = parse_qs(url)
  3635. if not qs:
  3636. qs = {'list': playlist_id}
  3637. return self.url_result(
  3638. update_url_query('https://www.youtube.com/playlist', qs),
  3639. ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
  3640. class YoutubeYtBeIE(InfoExtractor):
  3641. _VALID_URL = r'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{11})/*?.*?\blist=(?P<playlist_id>%(playlist_id)s)' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
  3642. _TESTS = [{
  3643. 'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
  3644. 'info_dict': {
  3645. 'id': 'yeWKywCrFtk',
  3646. 'ext': 'mp4',
  3647. 'title': 'Small Scale Baler and Braiding Rugs',
  3648. 'uploader': 'Backus-Page House Museum',
  3649. 'uploader_id': '@backuspagemuseum',
  3650. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@backuspagemuseum',
  3651. 'upload_date': '20161008',
  3652. 'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
  3653. 'categories': ['Nonprofits & Activism'],
  3654. 'tags': list,
  3655. 'like_count': int,
  3656. },
  3657. 'params': {
  3658. 'noplaylist': True,
  3659. 'skip_download': True,
  3660. },
  3661. }, {
  3662. 'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
  3663. 'only_matching': True,
  3664. }]
  3665. def _real_extract(self, url):
  3666. mobj = re.match(self._VALID_URL, url)
  3667. video_id = mobj.group('id')
  3668. playlist_id = mobj.group('playlist_id')
  3669. return self.url_result(
  3670. update_url_query('https://www.youtube.com/watch', {
  3671. 'v': video_id,
  3672. 'list': playlist_id,
  3673. 'feature': 'youtu.be',
  3674. }), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
  3675. class YoutubeYtUserIE(InfoExtractor):
  3676. _VALID_URL = r'ytuser:(?P<id>.+)'
  3677. _TESTS = [{
  3678. 'url': 'ytuser:phihag',
  3679. 'only_matching': True,
  3680. }]
  3681. def _real_extract(self, url):
  3682. user_id = self._match_id(url)
  3683. return self.url_result(
  3684. 'https://www.youtube.com/user/%s' % user_id,
  3685. ie=YoutubeTabIE.ie_key(), video_id=user_id)
  3686. class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
  3687. IE_NAME = 'youtube:favorites'
  3688. IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
  3689. _VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
  3690. _LOGIN_REQUIRED = True
  3691. _TESTS = [{
  3692. 'url': ':ytfav',
  3693. 'only_matching': True,
  3694. }, {
  3695. 'url': ':ytfavorites',
  3696. 'only_matching': True,
  3697. }]
  3698. def _real_extract(self, url):
  3699. return self.url_result(
  3700. 'https://www.youtube.com/playlist?list=LL',
  3701. ie=YoutubeTabIE.ie_key())
  3702. class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
  3703. IE_DESC = 'YouTube.com searches'
  3704. IE_NAME = 'youtube:search'
  3705. _SEARCH_KEY = 'ytsearch'
  3706. _SEARCH_PARAMS = 'EgIQAQ%3D%3D' # Videos only
  3707. _MAX_RESULTS = float('inf')
  3708. _TESTS = [{
  3709. 'url': 'ytsearch10:youtube-dl test video',
  3710. 'playlist_count': 10,
  3711. 'info_dict': {
  3712. 'id': 'youtube-dl test video',
  3713. 'title': 'youtube-dl test video',
  3714. },
  3715. }]
  3716. def _get_n_results(self, query, n):
  3717. """Get a specified number of results for a query"""
  3718. entries = itertools.islice(self._search_results(query, self._SEARCH_PARAMS), 0, None if n == float('inf') else n)
  3719. return self.playlist_result(entries, query, query)
  3720. class YoutubeSearchDateIE(YoutubeSearchIE):
  3721. IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
  3722. _SEARCH_KEY = 'ytsearchdate'
  3723. IE_DESC = 'YouTube.com searches, newest videos first'
  3724. _SEARCH_PARAMS = 'CAISAhAB' # Videos only, sorted by date
  3725. _TESTS = [{
  3726. 'url': 'ytsearchdate10:youtube-dl test video',
  3727. 'playlist_count': 10,
  3728. 'info_dict': {
  3729. 'id': 'youtube-dl test video',
  3730. 'title': 'youtube-dl test video',
  3731. },
  3732. }]
  3733. class YoutubeSearchURLIE(YoutubeBaseInfoExtractor):
  3734. IE_DESC = 'YouTube search URLs with sorting and filter support'
  3735. IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
  3736. _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?:[^&]+)(?:[&]|$)'
  3737. _TESTS = [{
  3738. 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
  3739. 'playlist_mincount': 5,
  3740. 'info_dict': {
  3741. 'id': 'youtube-dl test video',
  3742. 'title': 'youtube-dl test video',
  3743. },
  3744. 'params': {'playlistend': 5},
  3745. }, {
  3746. 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
  3747. 'only_matching': True,
  3748. }]
  3749. def _real_extract(self, url):
  3750. qs = parse_qs(url)
  3751. query = (qs.get('search_query') or qs.get('q'))[-1]
  3752. params = qs.get('sp', ('',))[-1]
  3753. return self.playlist_result(self._search_results(query, params), query, query)
  3754. class YoutubeFeedsInfoExtractor(YoutubeTabIE):
  3755. """
  3756. Base class for feed extractors
  3757. Subclasses must define the _FEED_NAME property.
  3758. """
  3759. _LOGIN_REQUIRED = True
  3760. @property
  3761. def IE_NAME(self):
  3762. return 'youtube:%s' % self._FEED_NAME
  3763. def _real_initialize(self):
  3764. self._login()
  3765. def _real_extract(self, url):
  3766. return self.url_result(
  3767. 'https://www.youtube.com/feed/%s' % self._FEED_NAME,
  3768. ie=YoutubeTabIE.ie_key())
  3769. class YoutubeWatchLaterIE(InfoExtractor):
  3770. IE_NAME = 'youtube:watchlater'
  3771. IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
  3772. _VALID_URL = r':ytwatchlater'
  3773. _TESTS = [{
  3774. 'url': ':ytwatchlater',
  3775. 'only_matching': True,
  3776. }]
  3777. def _real_extract(self, url):
  3778. return self.url_result(
  3779. 'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
  3780. class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
  3781. IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
  3782. _VALID_URL = r':ytrec(?:ommended)?'
  3783. _FEED_NAME = 'recommended'
  3784. _TESTS = [{
  3785. 'url': ':ytrec',
  3786. 'only_matching': True,
  3787. }, {
  3788. 'url': ':ytrecommended',
  3789. 'only_matching': True,
  3790. }]
  3791. class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
  3792. IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
  3793. _VALID_URL = r':ytsubs(?:criptions)?'
  3794. _FEED_NAME = 'subscriptions'
  3795. _TESTS = [{
  3796. 'url': ':ytsubs',
  3797. 'only_matching': True,
  3798. }, {
  3799. 'url': ':ytsubscriptions',
  3800. 'only_matching': True,
  3801. }]
  3802. class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
  3803. IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
  3804. _VALID_URL = r':ythistory'
  3805. _FEED_NAME = 'history'
  3806. _TESTS = [{
  3807. 'url': ':ythistory',
  3808. 'only_matching': True,
  3809. }]
  3810. class YoutubeTruncatedURLIE(InfoExtractor):
  3811. IE_NAME = 'youtube:truncated_url'
  3812. IE_DESC = False # Do not list
  3813. _VALID_URL = r'''(?x)
  3814. (?:https?://)?
  3815. (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
  3816. (?:watch\?(?:
  3817. feature=[a-z_]+|
  3818. annotation_id=annotation_[^&]+|
  3819. x-yt-cl=[0-9]+|
  3820. hl=[^&]*|
  3821. t=[0-9]+
  3822. )?
  3823. |
  3824. attribution_link\?a=[^&]+
  3825. )
  3826. $
  3827. '''
  3828. _TESTS = [{
  3829. 'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
  3830. 'only_matching': True,
  3831. }, {
  3832. 'url': 'https://www.youtube.com/watch?',
  3833. 'only_matching': True,
  3834. }, {
  3835. 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
  3836. 'only_matching': True,
  3837. }, {
  3838. 'url': 'https://www.youtube.com/watch?feature=foo',
  3839. 'only_matching': True,
  3840. }, {
  3841. 'url': 'https://www.youtube.com/watch?hl=en-GB',
  3842. 'only_matching': True,
  3843. }, {
  3844. 'url': 'https://www.youtube.com/watch?t=2372',
  3845. 'only_matching': True,
  3846. }]
  3847. def _real_extract(self, url):
  3848. raise ExtractorError(
  3849. 'Did you forget to quote the URL? Remember that & is a meta '
  3850. 'character in most shells, so you want to put the URL in quotes, '
  3851. 'like youtube-dl '
  3852. '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
  3853. ' or simply youtube-dl BaW_jenozKc .',
  3854. expected=True)
  3855. class YoutubeTruncatedIDIE(InfoExtractor):
  3856. IE_NAME = 'youtube:truncated_id'
  3857. IE_DESC = False # Do not list
  3858. _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
  3859. _TESTS = [{
  3860. 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
  3861. 'only_matching': True,
  3862. }]
  3863. def _real_extract(self, url):
  3864. video_id = self._match_id(url)
  3865. raise ExtractorError(
  3866. 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
  3867. expected=True)