logo

youtube-dl

[mirror] Download/Watch videos from video hostersgit clone https://hacktivis.me/git/mirror/youtube-dl.git

youtube.py (149339B)


  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import itertools
  4. import json
  5. import os.path
  6. import random
  7. import re
  8. import traceback
  9. from .common import InfoExtractor, SearchInfoExtractor
  10. from ..compat import (
  11. compat_chr,
  12. compat_HTTPError,
  13. compat_map as map,
  14. compat_parse_qs,
  15. compat_str,
  16. compat_urllib_parse_unquote_plus,
  17. compat_urllib_parse_urlencode,
  18. compat_urllib_parse_urlparse,
  19. compat_urlparse,
  20. )
  21. from ..jsinterp import JSInterpreter
  22. from ..utils import (
  23. ExtractorError,
  24. clean_html,
  25. dict_get,
  26. error_to_compat_str,
  27. float_or_none,
  28. int_or_none,
  29. js_to_json,
  30. mimetype2ext,
  31. parse_codecs,
  32. parse_duration,
  33. qualities,
  34. remove_start,
  35. smuggle_url,
  36. str_or_none,
  37. str_to_int,
  38. try_get,
  39. unescapeHTML,
  40. unified_strdate,
  41. unsmuggle_url,
  42. update_url_query,
  43. url_or_none,
  44. urlencode_postdata,
  45. urljoin,
  46. )
  47. def parse_qs(url):
  48. return compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
  49. class YoutubeBaseInfoExtractor(InfoExtractor):
  50. """Provide base functions for Youtube extractors"""
  51. _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
  52. _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
  53. _LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
  54. _CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
  55. _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
  56. _NETRC_MACHINE = 'youtube'
  57. # If True it will raise an error if no login info is provided
  58. _LOGIN_REQUIRED = False
  59. _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM)'
  60. def _login(self):
  61. """
  62. Attempt to log in to YouTube.
  63. True is returned if successful or skipped.
  64. False is returned if login failed.
  65. If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
  66. """
  67. username, password = self._get_login_info()
  68. # No authentication to be performed
  69. if username is None:
  70. if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
  71. raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
  72. return True
  73. login_page = self._download_webpage(
  74. self._LOGIN_URL, None,
  75. note='Downloading login page',
  76. errnote='unable to fetch login page', fatal=False)
  77. if login_page is False:
  78. return
  79. login_form = self._hidden_inputs(login_page)
  80. def req(url, f_req, note, errnote):
  81. data = login_form.copy()
  82. data.update({
  83. 'pstMsg': 1,
  84. 'checkConnection': 'youtube',
  85. 'checkedDomains': 'youtube',
  86. 'hl': 'en',
  87. 'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
  88. 'f.req': json.dumps(f_req),
  89. 'flowName': 'GlifWebSignIn',
  90. 'flowEntry': 'ServiceLogin',
  91. # TODO: reverse actual botguard identifier generation algo
  92. 'bgRequest': '["identifier",""]',
  93. })
  94. return self._download_json(
  95. url, None, note=note, errnote=errnote,
  96. transform_source=lambda s: re.sub(r'^[^[]*', '', s),
  97. fatal=False,
  98. data=urlencode_postdata(data), headers={
  99. 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
  100. 'Google-Accounts-XSRF': 1,
  101. })
  102. def warn(message):
  103. self._downloader.report_warning(message)
  104. lookup_req = [
  105. username,
  106. None, [], None, 'US', None, None, 2, False, True,
  107. [
  108. None, None,
  109. [2, 1, None, 1,
  110. 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
  111. None, [], 4],
  112. 1, [None, None, []], None, None, None, True
  113. ],
  114. username,
  115. ]
  116. lookup_results = req(
  117. self._LOOKUP_URL, lookup_req,
  118. 'Looking up account info', 'Unable to look up account info')
  119. if lookup_results is False:
  120. return False
  121. user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
  122. if not user_hash:
  123. warn('Unable to extract user hash')
  124. return False
  125. challenge_req = [
  126. user_hash,
  127. None, 1, None, [1, None, None, None, [password, None, True]],
  128. [
  129. None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
  130. 1, [None, None, []], None, None, None, True
  131. ]]
  132. challenge_results = req(
  133. self._CHALLENGE_URL, challenge_req,
  134. 'Logging in', 'Unable to log in')
  135. if challenge_results is False:
  136. return
  137. login_res = try_get(challenge_results, lambda x: x[0][5], list)
  138. if login_res:
  139. login_msg = try_get(login_res, lambda x: x[5], compat_str)
  140. warn(
  141. 'Unable to login: %s' % 'Invalid password'
  142. if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
  143. return False
  144. res = try_get(challenge_results, lambda x: x[0][-1], list)
  145. if not res:
  146. warn('Unable to extract result entry')
  147. return False
  148. login_challenge = try_get(res, lambda x: x[0][0], list)
  149. if login_challenge:
  150. challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
  151. if challenge_str == 'TWO_STEP_VERIFICATION':
  152. # SEND_SUCCESS - TFA code has been successfully sent to phone
  153. # QUOTA_EXCEEDED - reached the limit of TFA codes
  154. status = try_get(login_challenge, lambda x: x[5], compat_str)
  155. if status == 'QUOTA_EXCEEDED':
  156. warn('Exceeded the limit of TFA codes, try later')
  157. return False
  158. tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
  159. if not tl:
  160. warn('Unable to extract TL')
  161. return False
  162. tfa_code = self._get_tfa_info('2-step verification code')
  163. if not tfa_code:
  164. warn(
  165. 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
  166. '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
  167. return False
  168. tfa_code = remove_start(tfa_code, 'G-')
  169. tfa_req = [
  170. user_hash, None, 2, None,
  171. [
  172. 9, None, None, None, None, None, None, None,
  173. [None, tfa_code, True, 2]
  174. ]]
  175. tfa_results = req(
  176. self._TFA_URL.format(tl), tfa_req,
  177. 'Submitting TFA code', 'Unable to submit TFA code')
  178. if tfa_results is False:
  179. return False
  180. tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
  181. if tfa_res:
  182. tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
  183. warn(
  184. 'Unable to finish TFA: %s' % 'Invalid TFA code'
  185. if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
  186. return False
  187. check_cookie_url = try_get(
  188. tfa_results, lambda x: x[0][-1][2], compat_str)
  189. else:
  190. CHALLENGES = {
  191. 'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
  192. 'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
  193. 'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
  194. }
  195. challenge = CHALLENGES.get(
  196. challenge_str,
  197. '%s returned error %s.' % (self.IE_NAME, challenge_str))
  198. warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
  199. return False
  200. else:
  201. check_cookie_url = try_get(res, lambda x: x[2], compat_str)
  202. if not check_cookie_url:
  203. warn('Unable to extract CheckCookie URL')
  204. return False
  205. check_cookie_results = self._download_webpage(
  206. check_cookie_url, None, 'Checking cookie', fatal=False)
  207. if check_cookie_results is False:
  208. return False
  209. if 'https://myaccount.google.com/' not in check_cookie_results:
  210. warn('Unable to log in')
  211. return False
  212. return True
  213. def _initialize_consent(self):
  214. cookies = self._get_cookies('https://www.youtube.com/')
  215. if cookies.get('__Secure-3PSID'):
  216. return
  217. consent_id = None
  218. consent = cookies.get('CONSENT')
  219. if consent:
  220. if 'YES' in consent.value:
  221. return
  222. consent_id = self._search_regex(
  223. r'PENDING\+(\d+)', consent.value, 'consent', default=None)
  224. if not consent_id:
  225. consent_id = random.randint(100, 999)
  226. self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
  227. def _real_initialize(self):
  228. self._initialize_consent()
  229. if self._downloader is None:
  230. return
  231. if not self._login():
  232. return
  233. _DEFAULT_API_DATA = {
  234. 'context': {
  235. 'client': {
  236. 'clientName': 'WEB',
  237. 'clientVersion': '2.20201021.03.00',
  238. }
  239. },
  240. }
  241. _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
  242. _YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
  243. _YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
  244. def _call_api(self, ep, query, video_id, fatal=True):
  245. data = self._DEFAULT_API_DATA.copy()
  246. data.update(query)
  247. return self._download_json(
  248. 'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id,
  249. note='Downloading API JSON', errnote='Unable to download API page',
  250. data=json.dumps(data).encode('utf8'), fatal=fatal,
  251. headers={'content-type': 'application/json'},
  252. query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'})
  253. def _extract_yt_initial_data(self, video_id, webpage):
  254. return self._parse_json(
  255. self._search_regex(
  256. (r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE),
  257. self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'),
  258. video_id)
  259. def _extract_ytcfg(self, video_id, webpage):
  260. return self._parse_json(
  261. self._search_regex(
  262. r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
  263. default='{}'), video_id, fatal=False) or {}
  264. def _extract_video(self, renderer):
  265. video_id = renderer['videoId']
  266. title = try_get(
  267. renderer,
  268. (lambda x: x['title']['runs'][0]['text'],
  269. lambda x: x['title']['simpleText']), compat_str)
  270. description = try_get(
  271. renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'],
  272. compat_str)
  273. duration = parse_duration(try_get(
  274. renderer, lambda x: x['lengthText']['simpleText'], compat_str))
  275. view_count_text = try_get(
  276. renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
  277. view_count = str_to_int(self._search_regex(
  278. r'^([\d,]+)', re.sub(r'\s', '', view_count_text),
  279. 'view count', default=None))
  280. uploader = try_get(
  281. renderer,
  282. (lambda x: x['ownerText']['runs'][0]['text'],
  283. lambda x: x['shortBylineText']['runs'][0]['text']), compat_str)
  284. return {
  285. '_type': 'url',
  286. 'ie_key': YoutubeIE.ie_key(),
  287. 'id': video_id,
  288. 'url': video_id,
  289. 'title': title,
  290. 'description': description,
  291. 'duration': duration,
  292. 'view_count': view_count,
  293. 'uploader': uploader,
  294. }
  295. def _search_results(self, query, params):
  296. data = {
  297. 'context': {
  298. 'client': {
  299. 'clientName': 'WEB',
  300. 'clientVersion': '2.20201021.03.00',
  301. }
  302. },
  303. 'query': query,
  304. }
  305. if params:
  306. data['params'] = params
  307. for page_num in itertools.count(1):
  308. search = self._download_json(
  309. 'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
  310. video_id='query "%s"' % query,
  311. note='Downloading page %s' % page_num,
  312. errnote='Unable to download API page', fatal=False,
  313. data=json.dumps(data).encode('utf8'),
  314. headers={'content-type': 'application/json'})
  315. if not search:
  316. break
  317. slr_contents = try_get(
  318. search,
  319. (lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
  320. lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
  321. list)
  322. if not slr_contents:
  323. break
  324. for slr_content in slr_contents:
  325. isr_contents = try_get(
  326. slr_content,
  327. lambda x: x['itemSectionRenderer']['contents'],
  328. list)
  329. if not isr_contents:
  330. continue
  331. for content in isr_contents:
  332. if not isinstance(content, dict):
  333. continue
  334. video = content.get('videoRenderer')
  335. if not isinstance(video, dict):
  336. continue
  337. video_id = video.get('videoId')
  338. if not video_id:
  339. continue
  340. yield self._extract_video(video)
  341. token = try_get(
  342. slr_contents,
  343. lambda x: x[-1]['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'],
  344. compat_str)
  345. if not token:
  346. break
  347. data['continuation'] = token
  348. class YoutubeIE(YoutubeBaseInfoExtractor):
  349. IE_DESC = 'YouTube.com'
  350. _INVIDIOUS_SITES = (
  351. # invidious-redirect websites
  352. r'(?:www\.)?redirect\.invidious\.io',
  353. r'(?:(?:www|dev)\.)?invidio\.us',
  354. # Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md
  355. r'(?:(?:www|no)\.)?invidiou\.sh',
  356. r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
  357. r'(?:www\.)?invidious\.kabi\.tk',
  358. r'(?:www\.)?invidious\.13ad\.de',
  359. r'(?:www\.)?invidious\.mastodon\.host',
  360. r'(?:www\.)?invidious\.zapashcanon\.fr',
  361. r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks',
  362. r'(?:www\.)?invidious\.tinfoil-hat\.net',
  363. r'(?:www\.)?invidious\.himiko\.cloud',
  364. r'(?:www\.)?invidious\.reallyancient\.tech',
  365. r'(?:www\.)?invidious\.tube',
  366. r'(?:www\.)?invidiou\.site',
  367. r'(?:www\.)?invidious\.site',
  368. r'(?:www\.)?invidious\.xyz',
  369. r'(?:www\.)?invidious\.nixnet\.xyz',
  370. r'(?:www\.)?invidious\.048596\.xyz',
  371. r'(?:www\.)?invidious\.drycat\.fr',
  372. r'(?:www\.)?inv\.skyn3t\.in',
  373. r'(?:www\.)?tube\.poal\.co',
  374. r'(?:www\.)?tube\.connect\.cafe',
  375. r'(?:www\.)?vid\.wxzm\.sx',
  376. r'(?:www\.)?vid\.mint\.lgbt',
  377. r'(?:www\.)?vid\.puffyan\.us',
  378. r'(?:www\.)?yewtu\.be',
  379. r'(?:www\.)?yt\.elukerio\.org',
  380. r'(?:www\.)?yt\.lelux\.fi',
  381. r'(?:www\.)?invidious\.ggc-project\.de',
  382. r'(?:www\.)?yt\.maisputain\.ovh',
  383. r'(?:www\.)?ytprivate\.com',
  384. r'(?:www\.)?invidious\.13ad\.de',
  385. r'(?:www\.)?invidious\.toot\.koeln',
  386. r'(?:www\.)?invidious\.fdn\.fr',
  387. r'(?:www\.)?watch\.nettohikari\.com',
  388. r'(?:www\.)?invidious\.namazso\.eu',
  389. r'(?:www\.)?invidious\.silkky\.cloud',
  390. r'(?:www\.)?invidious\.exonip\.de',
  391. r'(?:www\.)?invidious\.riverside\.rocks',
  392. r'(?:www\.)?invidious\.blamefran\.net',
  393. r'(?:www\.)?invidious\.moomoo\.de',
  394. r'(?:www\.)?ytb\.trom\.tf',
  395. r'(?:www\.)?yt\.cyberhost\.uk',
  396. r'(?:www\.)?kgg2m7yk5aybusll\.onion',
  397. r'(?:www\.)?qklhadlycap4cnod\.onion',
  398. r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion',
  399. r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion',
  400. r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion',
  401. r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion',
  402. r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p',
  403. r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion',
  404. r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
  405. r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
  406. r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion',
  407. r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
  408. )
  409. _VALID_URL = r"""(?x)^
  410. (
  411. (?:https?://|//) # http(s):// or protocol-independent URL
  412. (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com|
  413. (?:www\.)?deturl\.com/www\.youtube\.com|
  414. (?:www\.)?pwnyoutube\.com|
  415. (?:www\.)?hooktube\.com|
  416. (?:www\.)?yourepeat\.com|
  417. tube\.majestyc\.net|
  418. %(invidious)s|
  419. youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains
  420. (?:.*?\#/)? # handle anchor (#/) redirect urls
  421. (?: # the various things that can precede the ID:
  422. (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
  423. |shorts/
  424. |(?: # or the v= param in all its forms
  425. (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
  426. (?:\?|\#!?) # the params delimiter ? or # or #!
  427. (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
  428. v=
  429. )
  430. ))
  431. |(?:
  432. youtu\.be| # just youtu.be/xxxx
  433. vid\.plus| # or vid.plus/xxxx
  434. zwearz\.com/watch| # or zwearz.com/watch/xxxx
  435. %(invidious)s
  436. )/
  437. |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
  438. )
  439. )? # all until now is optional -> you can pass the naked ID
  440. (?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
  441. (?(1).+)? # if we found the ID, everything can follow
  442. $""" % {
  443. 'invidious': '|'.join(_INVIDIOUS_SITES),
  444. }
  445. _PLAYER_INFO_RE = (
  446. r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
  447. r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
  448. r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
  449. )
  450. _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
  451. _GEO_BYPASS = False
  452. IE_NAME = 'youtube'
  453. _TESTS = [
  454. {
  455. 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
  456. 'info_dict': {
  457. 'id': 'BaW_jenozKc',
  458. 'ext': 'mp4',
  459. 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
  460. 'uploader': 'Philipp Hagemeister',
  461. 'uploader_id': 'phihag',
  462. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
  463. 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
  464. 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
  465. 'upload_date': '20121002',
  466. 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
  467. 'categories': ['Science & Technology'],
  468. 'tags': ['youtube-dl'],
  469. 'duration': 10,
  470. 'view_count': int,
  471. 'like_count': int,
  472. 'dislike_count': int,
  473. 'start_time': 1,
  474. 'end_time': 9,
  475. }
  476. },
  477. {
  478. 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
  479. 'note': 'Embed-only video (#1746)',
  480. 'info_dict': {
  481. 'id': 'yZIXLfi8CZQ',
  482. 'ext': 'mp4',
  483. 'upload_date': '20120608',
  484. 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
  485. 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
  486. 'uploader': 'SET India',
  487. 'uploader_id': 'setindia',
  488. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
  489. 'age_limit': 18,
  490. },
  491. 'skip': 'Private video',
  492. },
  493. {
  494. 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
  495. 'note': 'Use the first video ID in the URL',
  496. 'info_dict': {
  497. 'id': 'BaW_jenozKc',
  498. 'ext': 'mp4',
  499. 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
  500. 'uploader': 'Philipp Hagemeister',
  501. 'uploader_id': 'phihag',
  502. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
  503. 'upload_date': '20121002',
  504. 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
  505. 'categories': ['Science & Technology'],
  506. 'tags': ['youtube-dl'],
  507. 'duration': 10,
  508. 'view_count': int,
  509. 'like_count': int,
  510. 'dislike_count': int,
  511. },
  512. 'params': {
  513. 'skip_download': True,
  514. },
  515. },
  516. {
  517. 'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
  518. 'note': '256k DASH audio (format 141) via DASH manifest',
  519. 'info_dict': {
  520. 'id': 'a9LDPn-MO4I',
  521. 'ext': 'm4a',
  522. 'upload_date': '20121002',
  523. 'uploader_id': '8KVIDEO',
  524. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
  525. 'description': '',
  526. 'uploader': '8KVIDEO',
  527. 'title': 'UHDTV TEST 8K VIDEO.mp4'
  528. },
  529. 'params': {
  530. 'youtube_include_dash_manifest': True,
  531. 'format': '141',
  532. },
  533. 'skip': 'format 141 not served anymore',
  534. },
  535. # DASH manifest with encrypted signature
  536. {
  537. 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
  538. 'info_dict': {
  539. 'id': 'IB3lcPjvWLA',
  540. 'ext': 'm4a',
  541. 'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
  542. 'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
  543. 'duration': 244,
  544. 'uploader': 'AfrojackVEVO',
  545. 'uploader_id': 'AfrojackVEVO',
  546. 'upload_date': '20131011',
  547. 'abr': 129.495,
  548. },
  549. 'params': {
  550. 'youtube_include_dash_manifest': True,
  551. 'format': '141/bestaudio[ext=m4a]',
  552. },
  553. },
  554. # Controversy video
  555. {
  556. 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
  557. 'info_dict': {
  558. 'id': 'T4XJQO3qol8',
  559. 'ext': 'mp4',
  560. 'duration': 219,
  561. 'upload_date': '20100909',
  562. 'uploader': 'Amazing Atheist',
  563. 'uploader_id': 'TheAmazingAtheist',
  564. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
  565. 'title': 'Burning Everyone\'s Koran',
  566. 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms \r\n\r\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
  567. }
  568. },
  569. # Normal age-gate video (No vevo, embed allowed), available via embed page
  570. {
  571. 'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
  572. 'info_dict': {
  573. 'id': 'HtVdAasjOgU',
  574. 'ext': 'mp4',
  575. 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
  576. 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
  577. 'duration': 142,
  578. 'uploader': 'The Witcher',
  579. 'uploader_id': 'WitcherGame',
  580. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
  581. 'upload_date': '20140605',
  582. 'age_limit': 18,
  583. },
  584. },
  585. {
  586. # Age-gated video only available with authentication (unavailable
  587. # via embed page workaround)
  588. 'url': 'XgnwCQzjau8',
  589. 'only_matching': True,
  590. },
  591. # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
  592. # YouTube Red ad is not captured for creator
  593. {
  594. 'url': '__2ABJjxzNo',
  595. 'info_dict': {
  596. 'id': '__2ABJjxzNo',
  597. 'ext': 'mp4',
  598. 'duration': 266,
  599. 'upload_date': '20100430',
  600. 'uploader_id': 'deadmau5',
  601. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
  602. 'creator': 'deadmau5',
  603. 'description': 'md5:6cbcd3a92ce1bc676fc4d6ab4ace2336',
  604. 'uploader': 'deadmau5',
  605. 'title': 'Deadmau5 - Some Chords (HD)',
  606. 'alt_title': 'Some Chords',
  607. },
  608. 'expected_warnings': [
  609. 'DASH manifest missing',
  610. ]
  611. },
  612. # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
  613. {
  614. 'url': 'lqQg6PlCWgI',
  615. 'info_dict': {
  616. 'id': 'lqQg6PlCWgI',
  617. 'ext': 'mp4',
  618. 'duration': 6085,
  619. 'upload_date': '20150827',
  620. 'uploader_id': 'olympic',
  621. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
  622. 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
  623. 'uploader': 'Olympic',
  624. 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
  625. },
  626. 'params': {
  627. 'skip_download': 'requires avconv',
  628. }
  629. },
  630. # Non-square pixels
  631. {
  632. 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
  633. 'info_dict': {
  634. 'id': '_b-2C3KPAM0',
  635. 'ext': 'mp4',
  636. 'stretched_ratio': 16 / 9.,
  637. 'duration': 85,
  638. 'upload_date': '20110310',
  639. 'uploader_id': 'AllenMeow',
  640. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
  641. 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
  642. 'uploader': '孫ᄋᄅ',
  643. 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
  644. },
  645. },
  646. # url_encoded_fmt_stream_map is empty string
  647. {
  648. 'url': 'qEJwOuvDf7I',
  649. 'info_dict': {
  650. 'id': 'qEJwOuvDf7I',
  651. 'ext': 'webm',
  652. 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
  653. 'description': '',
  654. 'upload_date': '20150404',
  655. 'uploader_id': 'spbelect',
  656. 'uploader': 'Наблюдатели Петербурга',
  657. },
  658. 'params': {
  659. 'skip_download': 'requires avconv',
  660. },
  661. 'skip': 'This live event has ended.',
  662. },
  663. # Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
  664. {
  665. 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
  666. 'info_dict': {
  667. 'id': 'FIl7x6_3R5Y',
  668. 'ext': 'webm',
  669. 'title': 'md5:7b81415841e02ecd4313668cde88737a',
  670. 'description': 'md5:116377fd2963b81ec4ce64b542173306',
  671. 'duration': 220,
  672. 'upload_date': '20150625',
  673. 'uploader_id': 'dorappi2000',
  674. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
  675. 'uploader': 'dorappi2000',
  676. 'formats': 'mincount:31',
  677. },
  678. 'skip': 'not actual anymore',
  679. },
  680. # DASH manifest with segment_list
  681. {
  682. 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
  683. 'md5': '8ce563a1d667b599d21064e982ab9e31',
  684. 'info_dict': {
  685. 'id': 'CsmdDsKjzN8',
  686. 'ext': 'mp4',
  687. 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
  688. 'uploader': 'Airtek',
  689. 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
  690. 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
  691. 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
  692. },
  693. 'params': {
  694. 'youtube_include_dash_manifest': True,
  695. 'format': '135', # bestvideo
  696. },
  697. 'skip': 'This live event has ended.',
  698. },
  699. {
  700. # Multifeed videos (multiple cameras), URL is for Main Camera
  701. 'url': 'https://www.youtube.com/watch?v=jvGDaLqkpTg',
  702. 'info_dict': {
  703. 'id': 'jvGDaLqkpTg',
  704. 'title': 'Tom Clancy Free Weekend Rainbow Whatever',
  705. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  706. },
  707. 'playlist': [{
  708. 'info_dict': {
  709. 'id': 'jvGDaLqkpTg',
  710. 'ext': 'mp4',
  711. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Main Camera)',
  712. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  713. 'duration': 10643,
  714. 'upload_date': '20161111',
  715. 'uploader': 'Team PGP',
  716. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  717. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  718. },
  719. }, {
  720. 'info_dict': {
  721. 'id': '3AKt1R1aDnw',
  722. 'ext': 'mp4',
  723. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 2)',
  724. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  725. 'duration': 10991,
  726. 'upload_date': '20161111',
  727. 'uploader': 'Team PGP',
  728. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  729. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  730. },
  731. }, {
  732. 'info_dict': {
  733. 'id': 'RtAMM00gpVc',
  734. 'ext': 'mp4',
  735. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 3)',
  736. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  737. 'duration': 10995,
  738. 'upload_date': '20161111',
  739. 'uploader': 'Team PGP',
  740. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  741. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  742. },
  743. }, {
  744. 'info_dict': {
  745. 'id': '6N2fdlP3C5U',
  746. 'ext': 'mp4',
  747. 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 4)',
  748. 'description': 'md5:e03b909557865076822aa169218d6a5d',
  749. 'duration': 10990,
  750. 'upload_date': '20161111',
  751. 'uploader': 'Team PGP',
  752. 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
  753. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
  754. },
  755. }],
  756. 'params': {
  757. 'skip_download': True,
  758. },
  759. },
  760. {
  761. # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
  762. 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
  763. 'info_dict': {
  764. 'id': 'gVfLd0zydlo',
  765. 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
  766. },
  767. 'playlist_count': 2,
  768. 'skip': 'Not multifeed anymore',
  769. },
  770. {
  771. 'url': 'https://vid.plus/FlRa-iH7PGw',
  772. 'only_matching': True,
  773. },
  774. {
  775. 'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
  776. 'only_matching': True,
  777. },
  778. {
  779. # Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
  780. # Also tests cut-off URL expansion in video description (see
  781. # https://github.com/ytdl-org/youtube-dl/issues/1892,
  782. # https://github.com/ytdl-org/youtube-dl/issues/8164)
  783. 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
  784. 'info_dict': {
  785. 'id': 'lsguqyKfVQg',
  786. 'ext': 'mp4',
  787. 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
  788. 'alt_title': 'Dark Walk - Position Music',
  789. 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
  790. 'duration': 133,
  791. 'upload_date': '20151119',
  792. 'uploader_id': 'IronSoulElf',
  793. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
  794. 'uploader': 'IronSoulElf',
  795. 'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
  796. 'track': 'Dark Walk - Position Music',
  797. 'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
  798. 'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
  799. },
  800. 'params': {
  801. 'skip_download': True,
  802. },
  803. },
  804. {
  805. # Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
  806. 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
  807. 'only_matching': True,
  808. },
  809. {
  810. # Video with yt:stretch=17:0
  811. 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
  812. 'info_dict': {
  813. 'id': 'Q39EVAstoRM',
  814. 'ext': 'mp4',
  815. 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
  816. 'description': 'md5:ee18a25c350637c8faff806845bddee9',
  817. 'upload_date': '20151107',
  818. 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
  819. 'uploader': 'CH GAMER DROID',
  820. },
  821. 'params': {
  822. 'skip_download': True,
  823. },
  824. 'skip': 'This video does not exist.',
  825. },
  826. {
  827. # Video with incomplete 'yt:stretch=16:'
  828. 'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI',
  829. 'only_matching': True,
  830. },
  831. {
  832. # Video licensed under Creative Commons
  833. 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
  834. 'info_dict': {
  835. 'id': 'M4gD1WSo5mA',
  836. 'ext': 'mp4',
  837. 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
  838. 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
  839. 'duration': 721,
  840. 'upload_date': '20150127',
  841. 'uploader_id': 'BerkmanCenter',
  842. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
  843. 'uploader': 'The Berkman Klein Center for Internet & Society',
  844. 'license': 'Creative Commons Attribution license (reuse allowed)',
  845. },
  846. 'params': {
  847. 'skip_download': True,
  848. },
  849. },
  850. {
  851. # Channel-like uploader_url
  852. 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
  853. 'info_dict': {
  854. 'id': 'eQcmzGIKrzg',
  855. 'ext': 'mp4',
  856. 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
  857. 'description': 'md5:13a2503d7b5904ef4b223aa101628f39',
  858. 'duration': 4060,
  859. 'upload_date': '20151119',
  860. 'uploader': 'Bernie Sanders',
  861. 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
  862. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
  863. 'license': 'Creative Commons Attribution license (reuse allowed)',
  864. },
  865. 'params': {
  866. 'skip_download': True,
  867. },
  868. },
  869. {
  870. 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
  871. 'only_matching': True,
  872. },
  873. {
  874. # YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
  875. 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
  876. 'only_matching': True,
  877. },
  878. {
  879. # Rental video preview
  880. 'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
  881. 'info_dict': {
  882. 'id': 'uGpuVWrhIzE',
  883. 'ext': 'mp4',
  884. 'title': 'Piku - Trailer',
  885. 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
  886. 'upload_date': '20150811',
  887. 'uploader': 'FlixMatrix',
  888. 'uploader_id': 'FlixMatrixKaravan',
  889. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
  890. 'license': 'Standard YouTube License',
  891. },
  892. 'params': {
  893. 'skip_download': True,
  894. },
  895. 'skip': 'This video is not available.',
  896. },
  897. {
  898. # YouTube Red video with episode data
  899. 'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
  900. 'info_dict': {
  901. 'id': 'iqKdEhx-dD4',
  902. 'ext': 'mp4',
  903. 'title': 'Isolation - Mind Field (Ep 1)',
  904. 'description': 'md5:f540112edec5d09fc8cc752d3d4ba3cd',
  905. 'duration': 2085,
  906. 'upload_date': '20170118',
  907. 'uploader': 'Vsauce',
  908. 'uploader_id': 'Vsauce',
  909. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
  910. 'series': 'Mind Field',
  911. 'season_number': 1,
  912. 'episode_number': 1,
  913. },
  914. 'params': {
  915. 'skip_download': True,
  916. },
  917. 'expected_warnings': [
  918. 'Skipping DASH manifest',
  919. ],
  920. },
  921. {
  922. # The following content has been identified by the YouTube community
  923. # as inappropriate or offensive to some audiences.
  924. 'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
  925. 'info_dict': {
  926. 'id': '6SJNVb0GnPI',
  927. 'ext': 'mp4',
  928. 'title': 'Race Differences in Intelligence',
  929. 'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
  930. 'duration': 965,
  931. 'upload_date': '20140124',
  932. 'uploader': 'New Century Foundation',
  933. 'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
  934. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
  935. },
  936. 'params': {
  937. 'skip_download': True,
  938. },
  939. 'skip': 'This video has been removed for violating YouTube\'s policy on hate speech.',
  940. },
  941. {
  942. # itag 212
  943. 'url': '1t24XAntNCY',
  944. 'only_matching': True,
  945. },
  946. {
  947. # geo restricted to JP
  948. 'url': 'sJL6WA-aGkQ',
  949. 'only_matching': True,
  950. },
  951. {
  952. 'url': 'https://invidio.us/watch?v=BaW_jenozKc',
  953. 'only_matching': True,
  954. },
  955. {
  956. 'url': 'https://redirect.invidious.io/watch?v=BaW_jenozKc',
  957. 'only_matching': True,
  958. },
  959. {
  960. # from https://nitter.pussthecat.org/YouTube/status/1360363141947944964#m
  961. 'url': 'https://redirect.invidious.io/Yh0AhrY9GjA',
  962. 'only_matching': True,
  963. },
  964. {
  965. # DRM protected
  966. 'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
  967. 'only_matching': True,
  968. },
  969. {
  970. # Video with unsupported adaptive stream type formats
  971. 'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
  972. 'info_dict': {
  973. 'id': 'Z4Vy8R84T1U',
  974. 'ext': 'mp4',
  975. 'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
  976. 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
  977. 'duration': 433,
  978. 'upload_date': '20130923',
  979. 'uploader': 'Amelia Putri Harwita',
  980. 'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
  981. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
  982. 'formats': 'maxcount:10',
  983. },
  984. 'params': {
  985. 'skip_download': True,
  986. 'youtube_include_dash_manifest': False,
  987. },
  988. 'skip': 'not actual anymore',
  989. },
  990. {
  991. # Youtube Music Auto-generated description
  992. 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
  993. 'info_dict': {
  994. 'id': 'MgNrAu2pzNs',
  995. 'ext': 'mp4',
  996. 'title': 'Voyeur Girl',
  997. 'description': 'md5:7ae382a65843d6df2685993e90a8628f',
  998. 'upload_date': '20190312',
  999. 'uploader': 'Stephen - Topic',
  1000. 'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
  1001. 'artist': 'Stephen',
  1002. 'track': 'Voyeur Girl',
  1003. 'album': 'it\'s too much love to know my dear',
  1004. 'release_date': '20190313',
  1005. 'release_year': 2019,
  1006. },
  1007. 'params': {
  1008. 'skip_download': True,
  1009. },
  1010. },
  1011. {
  1012. 'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
  1013. 'only_matching': True,
  1014. },
  1015. {
  1016. # invalid -> valid video id redirection
  1017. 'url': 'DJztXj2GPfl',
  1018. 'info_dict': {
  1019. 'id': 'DJztXj2GPfk',
  1020. 'ext': 'mp4',
  1021. 'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
  1022. 'description': 'md5:bf577a41da97918e94fa9798d9228825',
  1023. 'upload_date': '20090125',
  1024. 'uploader': 'Prochorowka',
  1025. 'uploader_id': 'Prochorowka',
  1026. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
  1027. 'artist': 'Panjabi MC',
  1028. 'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
  1029. 'album': 'Beware of the Boys (Mundian To Bach Ke)',
  1030. },
  1031. 'params': {
  1032. 'skip_download': True,
  1033. },
  1034. 'skip': 'Video unavailable',
  1035. },
  1036. {
  1037. # empty description results in an empty string
  1038. 'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
  1039. 'info_dict': {
  1040. 'id': 'x41yOUIvK2k',
  1041. 'ext': 'mp4',
  1042. 'title': 'IMG 3456',
  1043. 'description': '',
  1044. 'upload_date': '20170613',
  1045. 'uploader_id': 'ElevageOrVert',
  1046. 'uploader': 'ElevageOrVert',
  1047. },
  1048. 'params': {
  1049. 'skip_download': True,
  1050. },
  1051. },
  1052. {
  1053. # with '};' inside yt initial data (see [1])
  1054. # see [2] for an example with '};' inside ytInitialPlayerResponse
  1055. # 1. https://github.com/ytdl-org/youtube-dl/issues/27093
  1056. # 2. https://github.com/ytdl-org/youtube-dl/issues/27216
  1057. 'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
  1058. 'info_dict': {
  1059. 'id': 'CHqg6qOn4no',
  1060. 'ext': 'mp4',
  1061. 'title': 'Part 77 Sort a list of simple types in c#',
  1062. 'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
  1063. 'upload_date': '20130831',
  1064. 'uploader_id': 'kudvenkat',
  1065. 'uploader': 'kudvenkat',
  1066. },
  1067. 'params': {
  1068. 'skip_download': True,
  1069. },
  1070. },
  1071. {
  1072. # another example of '};' in ytInitialData
  1073. 'url': 'https://www.youtube.com/watch?v=gVfgbahppCY',
  1074. 'only_matching': True,
  1075. },
  1076. {
  1077. 'url': 'https://www.youtube.com/watch_popup?v=63RmMXCd_bQ',
  1078. 'only_matching': True,
  1079. },
  1080. {
  1081. # https://github.com/ytdl-org/youtube-dl/pull/28094
  1082. 'url': 'OtqTfy26tG0',
  1083. 'info_dict': {
  1084. 'id': 'OtqTfy26tG0',
  1085. 'ext': 'mp4',
  1086. 'title': 'Burn Out',
  1087. 'description': 'md5:8d07b84dcbcbfb34bc12a56d968b6131',
  1088. 'upload_date': '20141120',
  1089. 'uploader': 'The Cinematic Orchestra - Topic',
  1090. 'uploader_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
  1091. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
  1092. 'artist': 'The Cinematic Orchestra',
  1093. 'track': 'Burn Out',
  1094. 'album': 'Every Day',
  1095. 'release_data': None,
  1096. 'release_year': None,
  1097. },
  1098. 'params': {
  1099. 'skip_download': True,
  1100. },
  1101. },
  1102. {
  1103. # controversial video, only works with bpctr when authenticated with cookies
  1104. 'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
  1105. 'only_matching': True,
  1106. },
  1107. {
  1108. # restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
  1109. 'url': 'cBvYw8_A0vQ',
  1110. 'info_dict': {
  1111. 'id': 'cBvYw8_A0vQ',
  1112. 'ext': 'mp4',
  1113. 'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き',
  1114. 'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
  1115. 'upload_date': '20201120',
  1116. 'uploader': 'Walk around Japan',
  1117. 'uploader_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
  1118. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
  1119. },
  1120. 'params': {
  1121. 'skip_download': True,
  1122. },
  1123. },
  1124. {
  1125. # YT 'Shorts'
  1126. 'url': 'https://youtube.com/shorts/4L2J27mJ3Dc',
  1127. 'info_dict': {
  1128. 'id': '4L2J27mJ3Dc',
  1129. 'ext': 'mp4',
  1130. 'upload_date': '20211025',
  1131. 'uploader': 'Charlie Berens',
  1132. 'description': 'md5:976512b8a29269b93bbd8a61edc45a6d',
  1133. 'uploader_id': 'fivedlrmilkshake',
  1134. 'title': 'Midwest Squid Game #Shorts',
  1135. },
  1136. 'params': {
  1137. 'skip_download': True,
  1138. },
  1139. },
  1140. ]
  1141. _formats = {
  1142. '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
  1143. '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
  1144. '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
  1145. '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
  1146. '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
  1147. '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  1148. '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1149. '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1150. # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
  1151. '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
  1152. '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  1153. '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  1154. '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
  1155. '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
  1156. '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
  1157. '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
  1158. '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1159. '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  1160. # 3D videos
  1161. '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
  1162. '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
  1163. '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
  1164. '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
  1165. '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
  1166. '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
  1167. '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
  1168. # Apple HTTP Live Streaming
  1169. '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  1170. '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  1171. '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
  1172. '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
  1173. '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
  1174. '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
  1175. '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  1176. '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
  1177. # DASH mp4 video
  1178. '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1179. '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1180. '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1181. '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1182. '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1183. '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
  1184. '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1185. '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1186. '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1187. '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
  1188. '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
  1189. '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
  1190. # Dash mp4 audio
  1191. '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
  1192. '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
  1193. '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
  1194. '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
  1195. '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
  1196. '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
  1197. '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
  1198. # Dash webm
  1199. '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1200. '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1201. '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1202. '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1203. '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1204. '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  1205. '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
  1206. '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1207. '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1208. '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1209. '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1210. '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1211. '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1212. '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1213. '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1214. # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
  1215. '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1216. '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1217. '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1218. '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1219. '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  1220. '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  1221. # Dash webm audio
  1222. '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
  1223. '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
  1224. # Dash webm audio with opus inside
  1225. '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
  1226. '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
  1227. '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
  1228. # RTMP (unnamed)
  1229. '_rtmp': {'protocol': 'rtmp'},
  1230. # av01 video only formats sometimes served with "unknown" codecs
  1231. '394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1232. '395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1233. '396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1234. '397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  1235. }
  1236. @classmethod
  1237. def suitable(cls, url):
  1238. # Hack for lazy extractors until more generic solution is implemented
  1239. # (see #28780)
  1240. from .youtube import parse_qs
  1241. qs = parse_qs(url)
  1242. if qs.get('list', [None])[0]:
  1243. return False
  1244. return super(YoutubeIE, cls).suitable(url)
  1245. def __init__(self, *args, **kwargs):
  1246. super(YoutubeIE, self).__init__(*args, **kwargs)
  1247. self._code_cache = {}
  1248. self._player_cache = {}
  1249. def _signature_cache_id(self, example_sig):
  1250. """ Return a string representation of a signature """
  1251. return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
  1252. @classmethod
  1253. def _extract_player_info(cls, player_url):
  1254. for player_re in cls._PLAYER_INFO_RE:
  1255. id_m = re.search(player_re, player_url)
  1256. if id_m:
  1257. break
  1258. else:
  1259. raise ExtractorError('Cannot identify player %r' % player_url)
  1260. return id_m.group('id')
  1261. def _get_player_code(self, video_id, player_url, player_id=None):
  1262. if not player_id:
  1263. player_id = self._extract_player_info(player_url)
  1264. if player_id not in self._code_cache:
  1265. self._code_cache[player_id] = self._download_webpage(
  1266. player_url, video_id,
  1267. note='Downloading player ' + player_id,
  1268. errnote='Download of %s failed' % player_url)
  1269. return self._code_cache[player_id]
  1270. def _extract_signature_function(self, video_id, player_url, example_sig):
  1271. player_id = self._extract_player_info(player_url)
  1272. # Read from filesystem cache
  1273. func_id = 'js_%s_%s' % (
  1274. player_id, self._signature_cache_id(example_sig))
  1275. assert os.path.basename(func_id) == func_id
  1276. cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
  1277. if cache_spec is not None:
  1278. return lambda s: ''.join(s[i] for i in cache_spec)
  1279. code = self._get_player_code(video_id, player_url, player_id)
  1280. res = self._parse_sig_js(code)
  1281. test_string = ''.join(map(compat_chr, range(len(example_sig))))
  1282. cache_res = res(test_string)
  1283. cache_spec = [ord(c) for c in cache_res]
  1284. self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
  1285. return res
  1286. def _print_sig_code(self, func, example_sig):
  1287. def gen_sig_code(idxs):
  1288. def _genslice(start, end, step):
  1289. starts = '' if start == 0 else str(start)
  1290. ends = (':%d' % (end + step)) if end + step >= 0 else ':'
  1291. steps = '' if step == 1 else (':%d' % step)
  1292. return 's[%s%s%s]' % (starts, ends, steps)
  1293. step = None
  1294. # Quelch pyflakes warnings - start will be set when step is set
  1295. start = '(Never used)'
  1296. for i, prev in zip(idxs[1:], idxs[:-1]):
  1297. if step is not None:
  1298. if i - prev == step:
  1299. continue
  1300. yield _genslice(start, prev, step)
  1301. step = None
  1302. continue
  1303. if i - prev in [-1, 1]:
  1304. step = i - prev
  1305. start = prev
  1306. continue
  1307. else:
  1308. yield 's[%d]' % prev
  1309. if step is None:
  1310. yield 's[%d]' % i
  1311. else:
  1312. yield _genslice(start, i, step)
  1313. test_string = ''.join(map(compat_chr, range(len(example_sig))))
  1314. cache_res = func(test_string)
  1315. cache_spec = [ord(c) for c in cache_res]
  1316. expr_code = ' + '.join(gen_sig_code(cache_spec))
  1317. signature_id_tuple = '(%s)' % (
  1318. ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
  1319. code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
  1320. ' return %s\n') % (signature_id_tuple, expr_code)
  1321. self.to_screen('Extracted signature function:\n' + code)
  1322. def _parse_sig_js(self, jscode):
  1323. funcname = self._search_regex(
  1324. (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1325. r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1326. r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
  1327. r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
  1328. r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)',
  1329. r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
  1330. r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
  1331. # Obsolete patterns
  1332. r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1333. r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
  1334. r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1335. r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1336. r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1337. r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1338. r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1339. r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
  1340. jscode, 'Initial JS player signature function name', group='sig')
  1341. jsi = JSInterpreter(jscode)
  1342. initial_function = jsi.extract_function(funcname)
  1343. return lambda s: initial_function([s])
  1344. def _decrypt_signature(self, s, video_id, player_url):
  1345. """Turn the encrypted s field into a working signature"""
  1346. if player_url is None:
  1347. raise ExtractorError('Cannot decrypt signature without player_url')
  1348. try:
  1349. player_id = (player_url, self._signature_cache_id(s))
  1350. if player_id not in self._player_cache:
  1351. func = self._extract_signature_function(
  1352. video_id, player_url, s
  1353. )
  1354. self._player_cache[player_id] = func
  1355. func = self._player_cache[player_id]
  1356. if self._downloader.params.get('youtube_print_sig_code'):
  1357. self._print_sig_code(func, s)
  1358. return func(s)
  1359. except Exception as e:
  1360. tb = traceback.format_exc()
  1361. raise ExtractorError(
  1362. 'Signature extraction failed: ' + tb, cause=e)
  1363. def _extract_player_url(self, webpage):
  1364. player_url = self._search_regex(
  1365. r'"(?:PLAYER_JS_URL|jsUrl)"\s*:\s*"([^"]+)"',
  1366. webpage or '', 'player URL', fatal=False)
  1367. if not player_url:
  1368. return
  1369. if player_url.startswith('//'):
  1370. player_url = 'https:' + player_url
  1371. elif not re.match(r'https?://', player_url):
  1372. player_url = compat_urlparse.urljoin(
  1373. 'https://www.youtube.com', player_url)
  1374. return player_url
  1375. # from yt-dlp
  1376. # See also:
  1377. # 1. https://github.com/ytdl-org/youtube-dl/issues/29326#issuecomment-894619419
  1378. # 2. https://code.videolan.org/videolan/vlc/-/blob/4fb284e5af69aa9ac2100ccbdd3b88debec9987f/share/lua/playlist/youtube.lua#L116
  1379. # 3. https://github.com/ytdl-org/youtube-dl/issues/30097#issuecomment-950157377
  1380. def _extract_n_function_name(self, jscode):
  1381. target = r'(?P<nfunc>[a-zA-Z_$][\w$]*)(?:\[(?P<idx>\d+)\])?'
  1382. nfunc_and_idx = self._search_regex(
  1383. r'\.get\("n"\)\)&&\(b=(%s)\([\w$]+\)' % (target, ),
  1384. jscode, 'Initial JS player n function name')
  1385. nfunc, idx = re.match(target, nfunc_and_idx).group('nfunc', 'idx')
  1386. if not idx:
  1387. return nfunc
  1388. if int_or_none(idx) == 0:
  1389. real_nfunc = self._search_regex(
  1390. r'var %s\s*=\s*\[([a-zA-Z_$][\w$]*)\];' % (re.escape(nfunc), ), jscode,
  1391. 'Initial JS player n function alias ({nfunc}[{idx}])'.format(**locals()))
  1392. if real_nfunc:
  1393. return real_nfunc
  1394. return self._parse_json(self._search_regex(
  1395. r'var %s\s*=\s*(\[.+?\]);' % (re.escape(nfunc), ), jscode,
  1396. 'Initial JS player n function name ({nfunc}[{idx}])'.format(**locals())), nfunc, transform_source=js_to_json)[int(idx)]
  1397. def _extract_n_function(self, video_id, player_url):
  1398. player_id = self._extract_player_info(player_url)
  1399. func_code = self._downloader.cache.load('youtube-nsig', player_id)
  1400. if func_code:
  1401. jsi = JSInterpreter(func_code)
  1402. else:
  1403. jscode = self._get_player_code(video_id, player_url, player_id)
  1404. funcname = self._extract_n_function_name(jscode)
  1405. jsi = JSInterpreter(jscode)
  1406. func_code = jsi.extract_function_code(funcname)
  1407. self._downloader.cache.store('youtube-nsig', player_id, func_code)
  1408. if self._downloader.params.get('youtube_print_sig_code'):
  1409. self.to_screen('Extracted nsig function from {0}:\n{1}\n'.format(player_id, func_code[1]))
  1410. return lambda s: jsi.extract_function_from_code(*func_code)([s])
  1411. def _n_descramble(self, n_param, player_url, video_id):
  1412. """Compute the response to YT's "n" parameter challenge
  1413. Args:
  1414. n_param -- challenge string that is the value of the
  1415. URL's "n" query parameter
  1416. player_url -- URL of YT player JS
  1417. video_id
  1418. """
  1419. sig_id = ('nsig_value', n_param)
  1420. if sig_id in self._player_cache:
  1421. return self._player_cache[sig_id]
  1422. try:
  1423. player_id = ('nsig', player_url)
  1424. if player_id not in self._player_cache:
  1425. self._player_cache[player_id] = self._extract_n_function(video_id, player_url)
  1426. func = self._player_cache[player_id]
  1427. self._player_cache[sig_id] = func(n_param)
  1428. if self._downloader.params.get('verbose', False):
  1429. self._downloader.to_screen('[debug] [%s] %s' % (self.IE_NAME, 'Decrypted nsig {0} => {1}'.format(n_param, self._player_cache[sig_id])))
  1430. return self._player_cache[sig_id]
  1431. except Exception as e:
  1432. self._downloader.report_warning(
  1433. '[%s] %s (%s %s)' % (
  1434. self.IE_NAME,
  1435. 'Unable to decode n-parameter: download likely to be throttled',
  1436. error_to_compat_str(e),
  1437. traceback.format_exc()))
  1438. def _unthrottle_format_urls(self, video_id, player_url, formats):
  1439. for fmt in formats:
  1440. parsed_fmt_url = compat_urlparse.urlparse(fmt['url'])
  1441. qs = compat_urlparse.parse_qs(parsed_fmt_url.query)
  1442. n_param = qs.get('n')
  1443. if not n_param:
  1444. continue
  1445. n_param = n_param[-1]
  1446. n_response = self._n_descramble(n_param, player_url, video_id)
  1447. if n_response:
  1448. qs['n'] = [n_response]
  1449. fmt['url'] = compat_urlparse.urlunparse(
  1450. parsed_fmt_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
  1451. def _mark_watched(self, video_id, player_response):
  1452. playback_url = url_or_none(try_get(
  1453. player_response,
  1454. lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']))
  1455. if not playback_url:
  1456. return
  1457. parsed_playback_url = compat_urlparse.urlparse(playback_url)
  1458. qs = compat_urlparse.parse_qs(parsed_playback_url.query)
  1459. # cpn generation algorithm is reverse engineered from base.js.
  1460. # In fact it works even with dummy cpn.
  1461. CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
  1462. cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
  1463. qs.update({
  1464. 'ver': ['2'],
  1465. 'cpn': [cpn],
  1466. })
  1467. playback_url = compat_urlparse.urlunparse(
  1468. parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
  1469. self._download_webpage(
  1470. playback_url, video_id, 'Marking watched',
  1471. 'Unable to mark watched', fatal=False)
  1472. @staticmethod
  1473. def _extract_urls(webpage):
  1474. # Embedded YouTube player
  1475. entries = [
  1476. unescapeHTML(mobj.group('url'))
  1477. for mobj in re.finditer(r'''(?x)
  1478. (?:
  1479. <iframe[^>]+?src=|
  1480. data-video-url=|
  1481. <embed[^>]+?src=|
  1482. embedSWF\(?:\s*|
  1483. <object[^>]+data=|
  1484. new\s+SWFObject\(
  1485. )
  1486. (["\'])
  1487. (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
  1488. (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
  1489. \1''', webpage)]
  1490. # lazyYT YouTube embed
  1491. entries.extend(list(map(
  1492. unescapeHTML,
  1493. re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
  1494. # Wordpress "YouTube Video Importer" plugin
  1495. matches = re.findall(r'''(?x)<div[^>]+
  1496. class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
  1497. data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
  1498. entries.extend(m[-1] for m in matches)
  1499. return entries
  1500. @staticmethod
  1501. def _extract_url(webpage):
  1502. urls = YoutubeIE._extract_urls(webpage)
  1503. return urls[0] if urls else None
  1504. @classmethod
  1505. def extract_id(cls, url):
  1506. mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
  1507. if mobj is None:
  1508. raise ExtractorError('Invalid URL: %s' % url)
  1509. video_id = mobj.group(2)
  1510. return video_id
  1511. def _extract_chapters_from_json(self, data, video_id, duration):
  1512. chapters_list = try_get(
  1513. data,
  1514. lambda x: x['playerOverlays']
  1515. ['playerOverlayRenderer']
  1516. ['decoratedPlayerBarRenderer']
  1517. ['decoratedPlayerBarRenderer']
  1518. ['playerBar']
  1519. ['chapteredPlayerBarRenderer']
  1520. ['chapters'],
  1521. list)
  1522. if not chapters_list:
  1523. return
  1524. def chapter_time(chapter):
  1525. return float_or_none(
  1526. try_get(
  1527. chapter,
  1528. lambda x: x['chapterRenderer']['timeRangeStartMillis'],
  1529. int),
  1530. scale=1000)
  1531. chapters = []
  1532. for next_num, chapter in enumerate(chapters_list, start=1):
  1533. start_time = chapter_time(chapter)
  1534. if start_time is None:
  1535. continue
  1536. end_time = (chapter_time(chapters_list[next_num])
  1537. if next_num < len(chapters_list) else duration)
  1538. if end_time is None:
  1539. continue
  1540. title = try_get(
  1541. chapter, lambda x: x['chapterRenderer']['title']['simpleText'],
  1542. compat_str)
  1543. chapters.append({
  1544. 'start_time': start_time,
  1545. 'end_time': end_time,
  1546. 'title': title,
  1547. })
  1548. return chapters
  1549. def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
  1550. return self._parse_json(self._search_regex(
  1551. (r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
  1552. regex), webpage, name, default='{}'), video_id, fatal=False)
  1553. def _real_extract(self, url):
  1554. url, smuggled_data = unsmuggle_url(url, {})
  1555. video_id = self._match_id(url)
  1556. base_url = self.http_scheme() + '//www.youtube.com/'
  1557. webpage_url = base_url + 'watch?v=' + video_id
  1558. webpage = self._download_webpage(
  1559. webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
  1560. player_response = None
  1561. if webpage:
  1562. player_response = self._extract_yt_initial_variable(
  1563. webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
  1564. video_id, 'initial player response')
  1565. if not player_response:
  1566. player_response = self._call_api(
  1567. 'player', {'videoId': video_id}, video_id)
  1568. playability_status = player_response.get('playabilityStatus') or {}
  1569. if playability_status.get('reason') == 'Sign in to confirm your age':
  1570. video_info = self._download_webpage(
  1571. base_url + 'get_video_info', video_id,
  1572. 'Refetching age-gated info webpage',
  1573. 'unable to download video info webpage', query={
  1574. 'video_id': video_id,
  1575. 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
  1576. 'html5': 1,
  1577. # See https://github.com/ytdl-org/youtube-dl/issues/29333#issuecomment-864049544
  1578. 'c': 'TVHTML5',
  1579. 'cver': '6.20180913',
  1580. }, fatal=False)
  1581. if video_info:
  1582. pr = self._parse_json(
  1583. try_get(
  1584. compat_parse_qs(video_info),
  1585. lambda x: x['player_response'][0], compat_str) or '{}',
  1586. video_id, fatal=False)
  1587. if pr and isinstance(pr, dict):
  1588. player_response = pr
  1589. trailer_video_id = try_get(
  1590. playability_status,
  1591. lambda x: x['errorScreen']['playerLegacyDesktopYpcTrailerRenderer']['trailerVideoId'],
  1592. compat_str)
  1593. if trailer_video_id:
  1594. return self.url_result(
  1595. trailer_video_id, self.ie_key(), trailer_video_id)
  1596. def get_text(x):
  1597. if not x:
  1598. return
  1599. text = x.get('simpleText')
  1600. if text and isinstance(text, compat_str):
  1601. return text
  1602. runs = x.get('runs')
  1603. if not isinstance(runs, list):
  1604. return
  1605. return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)])
  1606. search_meta = (
  1607. lambda x: self._html_search_meta(x, webpage, default=None)) \
  1608. if webpage else lambda x: None
  1609. video_details = player_response.get('videoDetails') or {}
  1610. microformat = try_get(
  1611. player_response,
  1612. lambda x: x['microformat']['playerMicroformatRenderer'],
  1613. dict) or {}
  1614. video_title = video_details.get('title') \
  1615. or get_text(microformat.get('title')) \
  1616. or search_meta(['og:title', 'twitter:title', 'title'])
  1617. video_description = video_details.get('shortDescription')
  1618. if not smuggled_data.get('force_singlefeed', False):
  1619. if not self._downloader.params.get('noplaylist'):
  1620. multifeed_metadata_list = try_get(
  1621. player_response,
  1622. lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
  1623. compat_str)
  1624. if multifeed_metadata_list:
  1625. entries = []
  1626. feed_ids = []
  1627. for feed in multifeed_metadata_list.split(','):
  1628. # Unquote should take place before split on comma (,) since textual
  1629. # fields may contain comma as well (see
  1630. # https://github.com/ytdl-org/youtube-dl/issues/8536)
  1631. feed_data = compat_parse_qs(
  1632. compat_urllib_parse_unquote_plus(feed))
  1633. def feed_entry(name):
  1634. return try_get(
  1635. feed_data, lambda x: x[name][0], compat_str)
  1636. feed_id = feed_entry('id')
  1637. if not feed_id:
  1638. continue
  1639. feed_title = feed_entry('title')
  1640. title = video_title
  1641. if feed_title:
  1642. title += ' (%s)' % feed_title
  1643. entries.append({
  1644. '_type': 'url_transparent',
  1645. 'ie_key': 'Youtube',
  1646. 'url': smuggle_url(
  1647. base_url + 'watch?v=' + feed_data['id'][0],
  1648. {'force_singlefeed': True}),
  1649. 'title': title,
  1650. })
  1651. feed_ids.append(feed_id)
  1652. self.to_screen(
  1653. 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
  1654. % (', '.join(feed_ids), video_id))
  1655. return self.playlist_result(
  1656. entries, video_id, video_title, video_description)
  1657. else:
  1658. self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
  1659. formats = []
  1660. itags = []
  1661. itag_qualities = {}
  1662. player_url = None
  1663. q = qualities(['tiny', 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'])
  1664. streaming_data = player_response.get('streamingData') or {}
  1665. streaming_formats = streaming_data.get('formats') or []
  1666. streaming_formats.extend(streaming_data.get('adaptiveFormats') or [])
  1667. for fmt in streaming_formats:
  1668. if fmt.get('targetDurationSec') or fmt.get('drmFamilies'):
  1669. continue
  1670. itag = str_or_none(fmt.get('itag'))
  1671. quality = fmt.get('quality')
  1672. if itag and quality:
  1673. itag_qualities[itag] = quality
  1674. # FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment
  1675. # (adding `&sq=0` to the URL) and parsing emsg box to determine the
  1676. # number of fragment that would subsequently requested with (`&sq=N`)
  1677. if fmt.get('type') == 'FORMAT_STREAM_TYPE_OTF':
  1678. continue
  1679. fmt_url = fmt.get('url')
  1680. if not fmt_url:
  1681. sc = compat_parse_qs(fmt.get('signatureCipher'))
  1682. fmt_url = url_or_none(try_get(sc, lambda x: x['url'][0]))
  1683. encrypted_sig = try_get(sc, lambda x: x['s'][0])
  1684. if not (sc and fmt_url and encrypted_sig):
  1685. continue
  1686. if not player_url:
  1687. player_url = self._extract_player_url(webpage)
  1688. if not player_url:
  1689. continue
  1690. signature = self._decrypt_signature(sc['s'][0], video_id, player_url)
  1691. sp = try_get(sc, lambda x: x['sp'][0]) or 'signature'
  1692. fmt_url += '&' + sp + '=' + signature
  1693. if itag:
  1694. itags.append(itag)
  1695. tbr = float_or_none(
  1696. fmt.get('averageBitrate') or fmt.get('bitrate'), 1000)
  1697. dct = {
  1698. 'asr': int_or_none(fmt.get('audioSampleRate')),
  1699. 'filesize': int_or_none(fmt.get('contentLength')),
  1700. 'format_id': itag,
  1701. 'format_note': fmt.get('qualityLabel') or quality,
  1702. 'fps': int_or_none(fmt.get('fps')),
  1703. 'height': int_or_none(fmt.get('height')),
  1704. 'quality': q(quality),
  1705. 'tbr': tbr,
  1706. 'url': fmt_url,
  1707. 'width': fmt.get('width'),
  1708. }
  1709. mimetype = fmt.get('mimeType')
  1710. if mimetype:
  1711. mobj = re.match(
  1712. r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', mimetype)
  1713. if mobj:
  1714. dct['ext'] = mimetype2ext(mobj.group(1))
  1715. dct.update(parse_codecs(mobj.group(2)))
  1716. no_audio = dct.get('acodec') == 'none'
  1717. no_video = dct.get('vcodec') == 'none'
  1718. if no_audio:
  1719. dct['vbr'] = tbr
  1720. if no_video:
  1721. dct['abr'] = tbr
  1722. if no_audio or no_video:
  1723. dct['downloader_options'] = {
  1724. # Youtube throttles chunks >~10M
  1725. 'http_chunk_size': 10485760,
  1726. }
  1727. if dct.get('ext'):
  1728. dct['container'] = dct['ext'] + '_dash'
  1729. formats.append(dct)
  1730. hls_manifest_url = streaming_data.get('hlsManifestUrl')
  1731. if hls_manifest_url:
  1732. for f in self._extract_m3u8_formats(
  1733. hls_manifest_url, video_id, 'mp4', fatal=False):
  1734. itag = self._search_regex(
  1735. r'/itag/(\d+)', f['url'], 'itag', default=None)
  1736. if itag:
  1737. f['format_id'] = itag
  1738. formats.append(f)
  1739. if self._downloader.params.get('youtube_include_dash_manifest', True):
  1740. dash_manifest_url = streaming_data.get('dashManifestUrl')
  1741. if dash_manifest_url:
  1742. for f in self._extract_mpd_formats(
  1743. dash_manifest_url, video_id, fatal=False):
  1744. itag = f['format_id']
  1745. if itag in itags:
  1746. continue
  1747. if itag in itag_qualities:
  1748. f['quality'] = q(itag_qualities[itag])
  1749. filesize = int_or_none(self._search_regex(
  1750. r'/clen/(\d+)', f.get('fragment_base_url')
  1751. or f['url'], 'file size', default=None))
  1752. if filesize:
  1753. f['filesize'] = filesize
  1754. formats.append(f)
  1755. if not formats:
  1756. if streaming_data.get('licenseInfos'):
  1757. raise ExtractorError(
  1758. 'This video is DRM protected.', expected=True)
  1759. pemr = try_get(
  1760. playability_status,
  1761. lambda x: x['errorScreen']['playerErrorMessageRenderer'],
  1762. dict) or {}
  1763. reason = get_text(pemr.get('reason')) or playability_status.get('reason')
  1764. subreason = pemr.get('subreason')
  1765. if subreason:
  1766. subreason = clean_html(get_text(subreason))
  1767. if subreason == 'The uploader has not made this video available in your country.':
  1768. countries = microformat.get('availableCountries')
  1769. if not countries:
  1770. regions_allowed = search_meta('regionsAllowed')
  1771. countries = regions_allowed.split(',') if regions_allowed else None
  1772. self.raise_geo_restricted(
  1773. subreason, countries)
  1774. reason += '\n' + subreason
  1775. if reason:
  1776. raise ExtractorError(reason, expected=True)
  1777. self._sort_formats(formats)
  1778. keywords = video_details.get('keywords') or []
  1779. if not keywords and webpage:
  1780. keywords = [
  1781. unescapeHTML(m.group('content'))
  1782. for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
  1783. for keyword in keywords:
  1784. if keyword.startswith('yt:stretch='):
  1785. mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword)
  1786. if mobj:
  1787. # NB: float is intentional for forcing float division
  1788. w, h = (float(v) for v in mobj.groups())
  1789. if w > 0 and h > 0:
  1790. ratio = w / h
  1791. for f in formats:
  1792. if f.get('vcodec') != 'none':
  1793. f['stretched_ratio'] = ratio
  1794. break
  1795. thumbnails = []
  1796. for container in (video_details, microformat):
  1797. for thumbnail in (try_get(
  1798. container,
  1799. lambda x: x['thumbnail']['thumbnails'], list) or []):
  1800. thumbnail_url = thumbnail.get('url')
  1801. if not thumbnail_url:
  1802. continue
  1803. thumbnails.append({
  1804. 'height': int_or_none(thumbnail.get('height')),
  1805. 'url': thumbnail_url,
  1806. 'width': int_or_none(thumbnail.get('width')),
  1807. })
  1808. if thumbnails:
  1809. break
  1810. else:
  1811. thumbnail = search_meta(['og:image', 'twitter:image'])
  1812. if thumbnail:
  1813. thumbnails = [{'url': thumbnail}]
  1814. category = microformat.get('category') or search_meta('genre')
  1815. channel_id = video_details.get('channelId') \
  1816. or microformat.get('externalChannelId') \
  1817. or search_meta('channelId')
  1818. duration = int_or_none(
  1819. video_details.get('lengthSeconds')
  1820. or microformat.get('lengthSeconds')) \
  1821. or parse_duration(search_meta('duration'))
  1822. is_live = video_details.get('isLive')
  1823. owner_profile_url = microformat.get('ownerProfileUrl')
  1824. if not player_url:
  1825. player_url = self._extract_player_url(webpage)
  1826. self._unthrottle_format_urls(video_id, player_url, formats)
  1827. info = {
  1828. 'id': video_id,
  1829. 'title': self._live_title(video_title) if is_live else video_title,
  1830. 'formats': formats,
  1831. 'thumbnails': thumbnails,
  1832. 'description': video_description,
  1833. 'upload_date': unified_strdate(
  1834. microformat.get('uploadDate')
  1835. or search_meta('uploadDate')),
  1836. 'uploader': video_details['author'],
  1837. 'uploader_id': self._search_regex(r'/(?:channel|user)/([^/?&#]+)', owner_profile_url, 'uploader id') if owner_profile_url else None,
  1838. 'uploader_url': owner_profile_url,
  1839. 'channel_id': channel_id,
  1840. 'channel_url': 'https://www.youtube.com/channel/' + channel_id if channel_id else None,
  1841. 'duration': duration,
  1842. 'view_count': int_or_none(
  1843. video_details.get('viewCount')
  1844. or microformat.get('viewCount')
  1845. or search_meta('interactionCount')),
  1846. 'average_rating': float_or_none(video_details.get('averageRating')),
  1847. 'age_limit': 18 if (
  1848. microformat.get('isFamilySafe') is False
  1849. or search_meta('isFamilyFriendly') == 'false'
  1850. or search_meta('og:restrictions:age') == '18+') else 0,
  1851. 'webpage_url': webpage_url,
  1852. 'categories': [category] if category else None,
  1853. 'tags': keywords,
  1854. 'is_live': is_live,
  1855. }
  1856. pctr = try_get(
  1857. player_response,
  1858. lambda x: x['captions']['playerCaptionsTracklistRenderer'], dict)
  1859. if pctr:
  1860. def process_language(container, base_url, lang_code, query):
  1861. lang_subs = []
  1862. for fmt in self._SUBTITLE_FORMATS:
  1863. query.update({
  1864. 'fmt': fmt,
  1865. })
  1866. lang_subs.append({
  1867. 'ext': fmt,
  1868. 'url': update_url_query(base_url, query),
  1869. })
  1870. container[lang_code] = lang_subs
  1871. subtitles = {}
  1872. for caption_track in (pctr.get('captionTracks') or []):
  1873. base_url = caption_track.get('baseUrl')
  1874. if not base_url:
  1875. continue
  1876. if caption_track.get('kind') != 'asr':
  1877. lang_code = caption_track.get('languageCode')
  1878. if not lang_code:
  1879. continue
  1880. process_language(
  1881. subtitles, base_url, lang_code, {})
  1882. continue
  1883. automatic_captions = {}
  1884. for translation_language in (pctr.get('translationLanguages') or []):
  1885. translation_language_code = translation_language.get('languageCode')
  1886. if not translation_language_code:
  1887. continue
  1888. process_language(
  1889. automatic_captions, base_url, translation_language_code,
  1890. {'tlang': translation_language_code})
  1891. info['automatic_captions'] = automatic_captions
  1892. info['subtitles'] = subtitles
  1893. parsed_url = compat_urllib_parse_urlparse(url)
  1894. for component in [parsed_url.fragment, parsed_url.query]:
  1895. query = compat_parse_qs(component)
  1896. for k, v in query.items():
  1897. for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
  1898. d_k += '_time'
  1899. if d_k not in info and k in s_ks:
  1900. info[d_k] = parse_duration(query[k][0])
  1901. if video_description:
  1902. mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
  1903. if mobj:
  1904. release_year = mobj.group('release_year')
  1905. release_date = mobj.group('release_date')
  1906. if release_date:
  1907. release_date = release_date.replace('-', '')
  1908. if not release_year:
  1909. release_year = release_date[:4]
  1910. info.update({
  1911. 'album': mobj.group('album'.strip()),
  1912. 'artist': mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·')),
  1913. 'track': mobj.group('track').strip(),
  1914. 'release_date': release_date,
  1915. 'release_year': int_or_none(release_year),
  1916. })
  1917. initial_data = None
  1918. if webpage:
  1919. initial_data = self._extract_yt_initial_variable(
  1920. webpage, self._YT_INITIAL_DATA_RE, video_id,
  1921. 'yt initial data')
  1922. if not initial_data:
  1923. initial_data = self._call_api(
  1924. 'next', {'videoId': video_id}, video_id, fatal=False)
  1925. if initial_data:
  1926. chapters = self._extract_chapters_from_json(
  1927. initial_data, video_id, duration)
  1928. if not chapters:
  1929. for engagment_pannel in (initial_data.get('engagementPanels') or []):
  1930. contents = try_get(
  1931. engagment_pannel, lambda x: x['engagementPanelSectionListRenderer']['content']['macroMarkersListRenderer']['contents'],
  1932. list)
  1933. if not contents:
  1934. continue
  1935. def chapter_time(mmlir):
  1936. return parse_duration(
  1937. get_text(mmlir.get('timeDescription')))
  1938. chapters = []
  1939. for next_num, content in enumerate(contents, start=1):
  1940. mmlir = content.get('macroMarkersListItemRenderer') or {}
  1941. start_time = chapter_time(mmlir)
  1942. end_time = chapter_time(try_get(
  1943. contents, lambda x: x[next_num]['macroMarkersListItemRenderer'])) \
  1944. if next_num < len(contents) else duration
  1945. if start_time is None or end_time is None:
  1946. continue
  1947. chapters.append({
  1948. 'start_time': start_time,
  1949. 'end_time': end_time,
  1950. 'title': get_text(mmlir.get('title')),
  1951. })
  1952. if chapters:
  1953. break
  1954. if chapters:
  1955. info['chapters'] = chapters
  1956. contents = try_get(
  1957. initial_data,
  1958. lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'],
  1959. list) or []
  1960. for content in contents:
  1961. vpir = content.get('videoPrimaryInfoRenderer')
  1962. if vpir:
  1963. stl = vpir.get('superTitleLink')
  1964. if stl:
  1965. stl = get_text(stl)
  1966. if try_get(
  1967. vpir,
  1968. lambda x: x['superTitleIcon']['iconType']) == 'LOCATION_PIN':
  1969. info['location'] = stl
  1970. else:
  1971. mobj = re.search(r'(.+?)\s*S(\d+)\s*•\s*E(\d+)', stl)
  1972. if mobj:
  1973. info.update({
  1974. 'series': mobj.group(1),
  1975. 'season_number': int(mobj.group(2)),
  1976. 'episode_number': int(mobj.group(3)),
  1977. })
  1978. for tlb in (try_get(
  1979. vpir,
  1980. lambda x: x['videoActions']['menuRenderer']['topLevelButtons'],
  1981. list) or []):
  1982. tbr = tlb.get('toggleButtonRenderer') or {}
  1983. for getter, regex in [(
  1984. lambda x: x['defaultText']['accessibility']['accessibilityData'],
  1985. r'(?P<count>[\d,]+)\s*(?P<type>(?:dis)?like)'), ([
  1986. lambda x: x['accessibility'],
  1987. lambda x: x['accessibilityData']['accessibilityData'],
  1988. ], r'(?P<type>(?:dis)?like) this video along with (?P<count>[\d,]+) other people')]:
  1989. label = (try_get(tbr, getter, dict) or {}).get('label')
  1990. if label:
  1991. mobj = re.match(regex, label)
  1992. if mobj:
  1993. info[mobj.group('type') + '_count'] = str_to_int(mobj.group('count'))
  1994. break
  1995. sbr_tooltip = try_get(
  1996. vpir, lambda x: x['sentimentBar']['sentimentBarRenderer']['tooltip'])
  1997. if sbr_tooltip:
  1998. like_count, dislike_count = sbr_tooltip.split(' / ')
  1999. info.update({
  2000. 'like_count': str_to_int(like_count),
  2001. 'dislike_count': str_to_int(dislike_count),
  2002. })
  2003. vsir = content.get('videoSecondaryInfoRenderer')
  2004. if vsir:
  2005. info['channel'] = get_text(try_get(
  2006. vsir,
  2007. lambda x: x['owner']['videoOwnerRenderer']['title'],
  2008. dict))
  2009. rows = try_get(
  2010. vsir,
  2011. lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
  2012. list) or []
  2013. multiple_songs = False
  2014. for row in rows:
  2015. if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
  2016. multiple_songs = True
  2017. break
  2018. for row in rows:
  2019. mrr = row.get('metadataRowRenderer') or {}
  2020. mrr_title = mrr.get('title')
  2021. if not mrr_title:
  2022. continue
  2023. mrr_title = get_text(mrr['title'])
  2024. mrr_contents_text = get_text(mrr['contents'][0])
  2025. if mrr_title == 'License':
  2026. info['license'] = mrr_contents_text
  2027. elif not multiple_songs:
  2028. if mrr_title == 'Album':
  2029. info['album'] = mrr_contents_text
  2030. elif mrr_title == 'Artist':
  2031. info['artist'] = mrr_contents_text
  2032. elif mrr_title == 'Song':
  2033. info['track'] = mrr_contents_text
  2034. for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
  2035. v = info.get(s_k)
  2036. if v:
  2037. info[d_k] = v
  2038. self.mark_watched(video_id, player_response)
  2039. return info
  2040. class YoutubeTabIE(YoutubeBaseInfoExtractor):
  2041. IE_DESC = 'YouTube.com tab'
  2042. _VALID_URL = r'''(?x)
  2043. https?://
  2044. (?:\w+\.)?
  2045. (?:
  2046. youtube(?:kids)?\.com|
  2047. invidio\.us
  2048. )/
  2049. (?:
  2050. (?:channel|c|user|feed|hashtag)/|
  2051. (?:playlist|watch)\?.*?\blist=|
  2052. (?!(?:watch|embed|v|e|results)\b)
  2053. )
  2054. (?P<id>[^/?\#&]+)
  2055. '''
  2056. IE_NAME = 'youtube:tab'
  2057. _TESTS = [{
  2058. # playlists, multipage
  2059. 'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
  2060. 'playlist_mincount': 94,
  2061. 'info_dict': {
  2062. 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
  2063. 'title': 'Игорь Клейнер - Playlists',
  2064. 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
  2065. },
  2066. }, {
  2067. # playlists, multipage, different order
  2068. 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
  2069. 'playlist_mincount': 94,
  2070. 'info_dict': {
  2071. 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
  2072. 'title': 'Игорь Клейнер - Playlists',
  2073. 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
  2074. },
  2075. }, {
  2076. # playlists, series
  2077. 'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3',
  2078. 'playlist_mincount': 5,
  2079. 'info_dict': {
  2080. 'id': 'UCYO_jab_esuFRV4b17AJtAw',
  2081. 'title': '3Blue1Brown - Playlists',
  2082. 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
  2083. },
  2084. }, {
  2085. # playlists, singlepage
  2086. 'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
  2087. 'playlist_mincount': 4,
  2088. 'info_dict': {
  2089. 'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
  2090. 'title': 'ThirstForScience - Playlists',
  2091. 'description': 'md5:609399d937ea957b0f53cbffb747a14c',
  2092. }
  2093. }, {
  2094. 'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
  2095. 'only_matching': True,
  2096. }, {
  2097. # basic, single video playlist
  2098. 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2099. 'info_dict': {
  2100. 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
  2101. 'uploader': 'Sergey M.',
  2102. 'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2103. 'title': 'youtube-dl public playlist',
  2104. },
  2105. 'playlist_count': 1,
  2106. }, {
  2107. # empty playlist
  2108. 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
  2109. 'info_dict': {
  2110. 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
  2111. 'uploader': 'Sergey M.',
  2112. 'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
  2113. 'title': 'youtube-dl empty playlist',
  2114. },
  2115. 'playlist_count': 0,
  2116. }, {
  2117. # Home tab
  2118. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
  2119. 'info_dict': {
  2120. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2121. 'title': 'lex will - Home',
  2122. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2123. },
  2124. 'playlist_mincount': 2,
  2125. }, {
  2126. # Videos tab
  2127. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
  2128. 'info_dict': {
  2129. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2130. 'title': 'lex will - Videos',
  2131. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2132. },
  2133. 'playlist_mincount': 975,
  2134. }, {
  2135. # Videos tab, sorted by popular
  2136. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
  2137. 'info_dict': {
  2138. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2139. 'title': 'lex will - Videos',
  2140. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2141. },
  2142. 'playlist_mincount': 199,
  2143. }, {
  2144. # Playlists tab
  2145. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
  2146. 'info_dict': {
  2147. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2148. 'title': 'lex will - Playlists',
  2149. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2150. },
  2151. 'playlist_mincount': 17,
  2152. }, {
  2153. # Community tab
  2154. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
  2155. 'info_dict': {
  2156. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2157. 'title': 'lex will - Community',
  2158. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2159. },
  2160. 'playlist_mincount': 18,
  2161. }, {
  2162. # Channels tab
  2163. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
  2164. 'info_dict': {
  2165. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2166. 'title': 'lex will - Channels',
  2167. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2168. },
  2169. 'playlist_mincount': 138,
  2170. }, {
  2171. 'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2172. 'only_matching': True,
  2173. }, {
  2174. 'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2175. 'only_matching': True,
  2176. }, {
  2177. 'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2178. 'only_matching': True,
  2179. }, {
  2180. 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
  2181. 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
  2182. 'info_dict': {
  2183. 'title': '29C3: Not my department',
  2184. 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
  2185. 'uploader': 'Christiaan008',
  2186. 'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
  2187. },
  2188. 'playlist_count': 96,
  2189. }, {
  2190. 'note': 'Large playlist',
  2191. 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
  2192. 'info_dict': {
  2193. 'title': 'Uploads from Cauchemar',
  2194. 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
  2195. 'uploader': 'Cauchemar',
  2196. 'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
  2197. },
  2198. 'playlist_mincount': 1123,
  2199. }, {
  2200. # even larger playlist, 8832 videos
  2201. 'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
  2202. 'only_matching': True,
  2203. }, {
  2204. 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
  2205. 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
  2206. 'info_dict': {
  2207. 'title': 'Uploads from Interstellar Movie',
  2208. 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
  2209. 'uploader': 'Interstellar Movie',
  2210. 'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
  2211. },
  2212. 'playlist_mincount': 21,
  2213. }, {
  2214. # https://github.com/ytdl-org/youtube-dl/issues/21844
  2215. 'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
  2216. 'info_dict': {
  2217. 'title': 'Data Analysis with Dr Mike Pound',
  2218. 'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
  2219. 'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
  2220. 'uploader': 'Computerphile',
  2221. },
  2222. 'playlist_mincount': 11,
  2223. }, {
  2224. 'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2225. 'only_matching': True,
  2226. }, {
  2227. # Playlist URL that does not actually serve a playlist
  2228. 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
  2229. 'info_dict': {
  2230. 'id': 'FqZTN594JQw',
  2231. 'ext': 'webm',
  2232. 'title': "Smiley's People 01 detective, Adventure Series, Action",
  2233. 'uploader': 'STREEM',
  2234. 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
  2235. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
  2236. 'upload_date': '20150526',
  2237. 'license': 'Standard YouTube License',
  2238. 'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
  2239. 'categories': ['People & Blogs'],
  2240. 'tags': list,
  2241. 'view_count': int,
  2242. 'like_count': int,
  2243. 'dislike_count': int,
  2244. },
  2245. 'params': {
  2246. 'skip_download': True,
  2247. },
  2248. 'skip': 'This video is not available.',
  2249. 'add_ie': [YoutubeIE.ie_key()],
  2250. }, {
  2251. 'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
  2252. 'only_matching': True,
  2253. }, {
  2254. 'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
  2255. 'only_matching': True,
  2256. }, {
  2257. 'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
  2258. 'info_dict': {
  2259. 'id': '9Auq9mYxFEE',
  2260. 'ext': 'mp4',
  2261. 'title': 'Watch Sky News live',
  2262. 'uploader': 'Sky News',
  2263. 'uploader_id': 'skynews',
  2264. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
  2265. 'upload_date': '20191102',
  2266. 'description': 'md5:78de4e1c2359d0ea3ed829678e38b662',
  2267. 'categories': ['News & Politics'],
  2268. 'tags': list,
  2269. 'like_count': int,
  2270. 'dislike_count': int,
  2271. },
  2272. 'params': {
  2273. 'skip_download': True,
  2274. },
  2275. }, {
  2276. 'url': 'https://www.youtube.com/user/TheYoungTurks/live',
  2277. 'info_dict': {
  2278. 'id': 'a48o2S1cPoo',
  2279. 'ext': 'mp4',
  2280. 'title': 'The Young Turks - Live Main Show',
  2281. 'uploader': 'The Young Turks',
  2282. 'uploader_id': 'TheYoungTurks',
  2283. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
  2284. 'upload_date': '20150715',
  2285. 'license': 'Standard YouTube License',
  2286. 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
  2287. 'categories': ['News & Politics'],
  2288. 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
  2289. 'like_count': int,
  2290. 'dislike_count': int,
  2291. },
  2292. 'params': {
  2293. 'skip_download': True,
  2294. },
  2295. 'only_matching': True,
  2296. }, {
  2297. 'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
  2298. 'only_matching': True,
  2299. }, {
  2300. 'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
  2301. 'only_matching': True,
  2302. }, {
  2303. 'url': 'https://www.youtube.com/feed/trending',
  2304. 'only_matching': True,
  2305. }, {
  2306. # needs auth
  2307. 'url': 'https://www.youtube.com/feed/library',
  2308. 'only_matching': True,
  2309. }, {
  2310. # needs auth
  2311. 'url': 'https://www.youtube.com/feed/history',
  2312. 'only_matching': True,
  2313. }, {
  2314. # needs auth
  2315. 'url': 'https://www.youtube.com/feed/subscriptions',
  2316. 'only_matching': True,
  2317. }, {
  2318. # needs auth
  2319. 'url': 'https://www.youtube.com/feed/watch_later',
  2320. 'only_matching': True,
  2321. }, {
  2322. # no longer available?
  2323. 'url': 'https://www.youtube.com/feed/recommended',
  2324. 'only_matching': True,
  2325. }, {
  2326. # inline playlist with not always working continuations
  2327. 'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
  2328. 'only_matching': True,
  2329. }, {
  2330. 'url': 'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8',
  2331. 'only_matching': True,
  2332. }, {
  2333. 'url': 'https://www.youtube.com/course',
  2334. 'only_matching': True,
  2335. }, {
  2336. 'url': 'https://www.youtube.com/zsecurity',
  2337. 'only_matching': True,
  2338. }, {
  2339. 'url': 'http://www.youtube.com/NASAgovVideo/videos',
  2340. 'only_matching': True,
  2341. }, {
  2342. 'url': 'https://www.youtube.com/TheYoungTurks/live',
  2343. 'only_matching': True,
  2344. }, {
  2345. 'url': 'https://www.youtube.com/hashtag/cctv9',
  2346. 'info_dict': {
  2347. 'id': 'cctv9',
  2348. 'title': '#cctv9',
  2349. },
  2350. 'playlist_mincount': 350,
  2351. }, {
  2352. 'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
  2353. 'only_matching': True,
  2354. }, {
  2355. 'note': 'Search tab',
  2356. 'url': 'https://www.youtube.com/c/3blue1brown/search?query=linear%20algebra',
  2357. 'playlist_mincount': 40,
  2358. 'info_dict': {
  2359. 'id': 'UCYO_jab_esuFRV4b17AJtAw',
  2360. 'title': '3Blue1Brown - Search - linear algebra',
  2361. 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
  2362. 'uploader': '3Blue1Brown',
  2363. 'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
  2364. }
  2365. }]
  2366. @classmethod
  2367. def suitable(cls, url):
  2368. return False if YoutubeIE.suitable(url) else super(
  2369. YoutubeTabIE, cls).suitable(url)
  2370. def _extract_channel_id(self, webpage):
  2371. channel_id = self._html_search_meta(
  2372. 'channelId', webpage, 'channel id', default=None)
  2373. if channel_id:
  2374. return channel_id
  2375. channel_url = self._html_search_meta(
  2376. ('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
  2377. 'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
  2378. 'twitter:app:url:googleplay'), webpage, 'channel url')
  2379. return self._search_regex(
  2380. r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
  2381. channel_url, 'channel id')
  2382. @staticmethod
  2383. def _extract_grid_item_renderer(item):
  2384. assert isinstance(item, dict)
  2385. for key, renderer in item.items():
  2386. if not key.startswith('grid') or not key.endswith('Renderer'):
  2387. continue
  2388. if not isinstance(renderer, dict):
  2389. continue
  2390. return renderer
  2391. def _grid_entries(self, grid_renderer):
  2392. for item in grid_renderer['items']:
  2393. if not isinstance(item, dict):
  2394. continue
  2395. renderer = self._extract_grid_item_renderer(item)
  2396. if not isinstance(renderer, dict):
  2397. continue
  2398. title = try_get(
  2399. renderer, (lambda x: x['title']['runs'][0]['text'],
  2400. lambda x: x['title']['simpleText']), compat_str)
  2401. # playlist
  2402. playlist_id = renderer.get('playlistId')
  2403. if playlist_id:
  2404. yield self.url_result(
  2405. 'https://www.youtube.com/playlist?list=%s' % playlist_id,
  2406. ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
  2407. video_title=title)
  2408. continue
  2409. # video
  2410. video_id = renderer.get('videoId')
  2411. if video_id:
  2412. yield self._extract_video(renderer)
  2413. continue
  2414. # channel
  2415. channel_id = renderer.get('channelId')
  2416. if channel_id:
  2417. title = try_get(
  2418. renderer, lambda x: x['title']['simpleText'], compat_str)
  2419. yield self.url_result(
  2420. 'https://www.youtube.com/channel/%s' % channel_id,
  2421. ie=YoutubeTabIE.ie_key(), video_title=title)
  2422. continue
  2423. # generic endpoint URL support
  2424. ep_url = urljoin('https://www.youtube.com/', try_get(
  2425. renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
  2426. compat_str))
  2427. if ep_url:
  2428. for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE):
  2429. if ie.suitable(ep_url):
  2430. yield self.url_result(
  2431. ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title)
  2432. break
  2433. def _shelf_entries_from_content(self, shelf_renderer):
  2434. content = shelf_renderer.get('content')
  2435. if not isinstance(content, dict):
  2436. return
  2437. renderer = content.get('gridRenderer')
  2438. if renderer:
  2439. # TODO: add support for nested playlists so each shelf is processed
  2440. # as separate playlist
  2441. # TODO: this includes only first N items
  2442. for entry in self._grid_entries(renderer):
  2443. yield entry
  2444. renderer = content.get('horizontalListRenderer')
  2445. if renderer:
  2446. # TODO
  2447. pass
  2448. def _shelf_entries(self, shelf_renderer, skip_channels=False):
  2449. ep = try_get(
  2450. shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
  2451. compat_str)
  2452. shelf_url = urljoin('https://www.youtube.com', ep)
  2453. if shelf_url:
  2454. # Skipping links to another channels, note that checking for
  2455. # endpoint.commandMetadata.webCommandMetadata.webPageTypwebPageType == WEB_PAGE_TYPE_CHANNEL
  2456. # will not work
  2457. if skip_channels and '/channels?' in shelf_url:
  2458. return
  2459. title = try_get(
  2460. shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
  2461. yield self.url_result(shelf_url, video_title=title)
  2462. # Shelf may not contain shelf URL, fallback to extraction from content
  2463. for entry in self._shelf_entries_from_content(shelf_renderer):
  2464. yield entry
  2465. def _playlist_entries(self, video_list_renderer):
  2466. for content in video_list_renderer['contents']:
  2467. if not isinstance(content, dict):
  2468. continue
  2469. renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
  2470. if not isinstance(renderer, dict):
  2471. continue
  2472. video_id = renderer.get('videoId')
  2473. if not video_id:
  2474. continue
  2475. yield self._extract_video(renderer)
  2476. def _video_entry(self, video_renderer):
  2477. video_id = video_renderer.get('videoId')
  2478. if video_id:
  2479. return self._extract_video(video_renderer)
  2480. def _post_thread_entries(self, post_thread_renderer):
  2481. post_renderer = try_get(
  2482. post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
  2483. if not post_renderer:
  2484. return
  2485. # video attachment
  2486. video_renderer = try_get(
  2487. post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict)
  2488. video_id = None
  2489. if video_renderer:
  2490. entry = self._video_entry(video_renderer)
  2491. if entry:
  2492. yield entry
  2493. # inline video links
  2494. runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
  2495. for run in runs:
  2496. if not isinstance(run, dict):
  2497. continue
  2498. ep_url = try_get(
  2499. run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
  2500. if not ep_url:
  2501. continue
  2502. if not YoutubeIE.suitable(ep_url):
  2503. continue
  2504. ep_video_id = YoutubeIE._match_id(ep_url)
  2505. if video_id == ep_video_id:
  2506. continue
  2507. yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
  2508. def _post_thread_continuation_entries(self, post_thread_continuation):
  2509. contents = post_thread_continuation.get('contents')
  2510. if not isinstance(contents, list):
  2511. return
  2512. for content in contents:
  2513. renderer = content.get('backstagePostThreadRenderer')
  2514. if not isinstance(renderer, dict):
  2515. continue
  2516. for entry in self._post_thread_entries(renderer):
  2517. yield entry
  2518. def _rich_grid_entries(self, contents):
  2519. for content in contents:
  2520. video_renderer = try_get(content, lambda x: x['richItemRenderer']['content']['videoRenderer'], dict)
  2521. if video_renderer:
  2522. entry = self._video_entry(video_renderer)
  2523. if entry:
  2524. yield entry
  2525. @staticmethod
  2526. def _build_continuation_query(continuation, ctp=None):
  2527. query = {
  2528. 'ctoken': continuation,
  2529. 'continuation': continuation,
  2530. }
  2531. if ctp:
  2532. query['itct'] = ctp
  2533. return query
  2534. @staticmethod
  2535. def _extract_next_continuation_data(renderer):
  2536. next_continuation = try_get(
  2537. renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict)
  2538. if not next_continuation:
  2539. return
  2540. continuation = next_continuation.get('continuation')
  2541. if not continuation:
  2542. return
  2543. ctp = next_continuation.get('clickTrackingParams')
  2544. return YoutubeTabIE._build_continuation_query(continuation, ctp)
  2545. @classmethod
  2546. def _extract_continuation(cls, renderer):
  2547. next_continuation = cls._extract_next_continuation_data(renderer)
  2548. if next_continuation:
  2549. return next_continuation
  2550. contents = []
  2551. for key in ('contents', 'items'):
  2552. contents.extend(try_get(renderer, lambda x: x[key], list) or [])
  2553. for content in contents:
  2554. if not isinstance(content, dict):
  2555. continue
  2556. continuation_ep = try_get(
  2557. content, lambda x: x['continuationItemRenderer']['continuationEndpoint'],
  2558. dict)
  2559. if not continuation_ep:
  2560. continue
  2561. continuation = try_get(
  2562. continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
  2563. if not continuation:
  2564. continue
  2565. ctp = continuation_ep.get('clickTrackingParams')
  2566. return YoutubeTabIE._build_continuation_query(continuation, ctp)
  2567. def _entries(self, tab, item_id, webpage):
  2568. tab_content = try_get(tab, lambda x: x['content'], dict)
  2569. if not tab_content:
  2570. return
  2571. slr_renderer = try_get(tab_content, lambda x: x['sectionListRenderer'], dict)
  2572. if slr_renderer:
  2573. is_channels_tab = tab.get('title') == 'Channels'
  2574. continuation = None
  2575. slr_contents = try_get(slr_renderer, lambda x: x['contents'], list) or []
  2576. for slr_content in slr_contents:
  2577. if not isinstance(slr_content, dict):
  2578. continue
  2579. is_renderer = try_get(slr_content, lambda x: x['itemSectionRenderer'], dict)
  2580. if not is_renderer:
  2581. continue
  2582. isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
  2583. for isr_content in isr_contents:
  2584. if not isinstance(isr_content, dict):
  2585. continue
  2586. renderer = isr_content.get('playlistVideoListRenderer')
  2587. if renderer:
  2588. for entry in self._playlist_entries(renderer):
  2589. yield entry
  2590. continuation = self._extract_continuation(renderer)
  2591. continue
  2592. renderer = isr_content.get('gridRenderer')
  2593. if renderer:
  2594. for entry in self._grid_entries(renderer):
  2595. yield entry
  2596. continuation = self._extract_continuation(renderer)
  2597. continue
  2598. renderer = isr_content.get('shelfRenderer')
  2599. if renderer:
  2600. for entry in self._shelf_entries(renderer, not is_channels_tab):
  2601. yield entry
  2602. continue
  2603. renderer = isr_content.get('backstagePostThreadRenderer')
  2604. if renderer:
  2605. for entry in self._post_thread_entries(renderer):
  2606. yield entry
  2607. continuation = self._extract_continuation(renderer)
  2608. continue
  2609. renderer = isr_content.get('videoRenderer')
  2610. if renderer:
  2611. entry = self._video_entry(renderer)
  2612. if entry:
  2613. yield entry
  2614. if not continuation:
  2615. continuation = self._extract_continuation(is_renderer)
  2616. if not continuation:
  2617. continuation = self._extract_continuation(slr_renderer)
  2618. else:
  2619. rich_grid_renderer = tab_content.get('richGridRenderer')
  2620. if not rich_grid_renderer:
  2621. return
  2622. for entry in self._rich_grid_entries(rich_grid_renderer.get('contents') or []):
  2623. yield entry
  2624. continuation = self._extract_continuation(rich_grid_renderer)
  2625. ytcfg = self._extract_ytcfg(item_id, webpage)
  2626. client_version = try_get(
  2627. ytcfg, lambda x: x['INNERTUBE_CLIENT_VERSION'], compat_str) or '2.20210407.08.00'
  2628. headers = {
  2629. 'x-youtube-client-name': '1',
  2630. 'x-youtube-client-version': client_version,
  2631. 'content-type': 'application/json',
  2632. }
  2633. context = try_get(ytcfg, lambda x: x['INNERTUBE_CONTEXT'], dict) or {
  2634. 'client': {
  2635. 'clientName': 'WEB',
  2636. 'clientVersion': client_version,
  2637. }
  2638. }
  2639. visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str)
  2640. identity_token = self._extract_identity_token(ytcfg, webpage)
  2641. if identity_token:
  2642. headers['x-youtube-identity-token'] = identity_token
  2643. data = {
  2644. 'context': context,
  2645. }
  2646. for page_num in itertools.count(1):
  2647. if not continuation:
  2648. break
  2649. if visitor_data:
  2650. headers['x-goog-visitor-id'] = visitor_data
  2651. data['continuation'] = continuation['continuation']
  2652. data['clickTracking'] = {
  2653. 'clickTrackingParams': continuation['itct']
  2654. }
  2655. count = 0
  2656. retries = 3
  2657. while count <= retries:
  2658. try:
  2659. # Downloading page may result in intermittent 5xx HTTP error
  2660. # that is usually worked around with a retry
  2661. response = self._download_json(
  2662. 'https://www.youtube.com/youtubei/v1/browse?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
  2663. None, 'Downloading page %d%s' % (page_num, ' (retry #%d)' % count if count else ''),
  2664. headers=headers, data=json.dumps(data).encode('utf8'))
  2665. break
  2666. except ExtractorError as e:
  2667. if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503):
  2668. count += 1
  2669. if count <= retries:
  2670. continue
  2671. raise
  2672. if not response:
  2673. break
  2674. visitor_data = try_get(
  2675. response, lambda x: x['responseContext']['visitorData'], compat_str) or visitor_data
  2676. continuation_contents = try_get(
  2677. response, lambda x: x['continuationContents'], dict)
  2678. if continuation_contents:
  2679. continuation_renderer = continuation_contents.get('playlistVideoListContinuation')
  2680. if continuation_renderer:
  2681. for entry in self._playlist_entries(continuation_renderer):
  2682. yield entry
  2683. continuation = self._extract_continuation(continuation_renderer)
  2684. continue
  2685. continuation_renderer = continuation_contents.get('gridContinuation')
  2686. if continuation_renderer:
  2687. for entry in self._grid_entries(continuation_renderer):
  2688. yield entry
  2689. continuation = self._extract_continuation(continuation_renderer)
  2690. continue
  2691. continuation_renderer = continuation_contents.get('itemSectionContinuation')
  2692. if continuation_renderer:
  2693. for entry in self._post_thread_continuation_entries(continuation_renderer):
  2694. yield entry
  2695. continuation = self._extract_continuation(continuation_renderer)
  2696. continue
  2697. on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
  2698. continuation_items = try_get(
  2699. on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
  2700. if continuation_items:
  2701. continuation_item = continuation_items[0]
  2702. if not isinstance(continuation_item, dict):
  2703. continue
  2704. renderer = self._extract_grid_item_renderer(continuation_item)
  2705. if renderer:
  2706. grid_renderer = {'items': continuation_items}
  2707. for entry in self._grid_entries(grid_renderer):
  2708. yield entry
  2709. continuation = self._extract_continuation(grid_renderer)
  2710. continue
  2711. renderer = continuation_item.get('playlistVideoRenderer') or continuation_item.get('itemSectionRenderer')
  2712. if renderer:
  2713. video_list_renderer = {'contents': continuation_items}
  2714. for entry in self._playlist_entries(video_list_renderer):
  2715. yield entry
  2716. continuation = self._extract_continuation(video_list_renderer)
  2717. continue
  2718. renderer = continuation_item.get('backstagePostThreadRenderer')
  2719. if renderer:
  2720. continuation_renderer = {'contents': continuation_items}
  2721. for entry in self._post_thread_continuation_entries(continuation_renderer):
  2722. yield entry
  2723. continuation = self._extract_continuation(continuation_renderer)
  2724. continue
  2725. renderer = continuation_item.get('richItemRenderer')
  2726. if renderer:
  2727. for entry in self._rich_grid_entries(continuation_items):
  2728. yield entry
  2729. continuation = self._extract_continuation({'contents': continuation_items})
  2730. continue
  2731. break
  2732. @staticmethod
  2733. def _extract_selected_tab(tabs):
  2734. for tab in tabs:
  2735. renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {}
  2736. if renderer.get('selected') is True:
  2737. return renderer
  2738. else:
  2739. raise ExtractorError('Unable to find selected tab')
  2740. @staticmethod
  2741. def _extract_uploader(data):
  2742. uploader = {}
  2743. sidebar_renderer = try_get(
  2744. data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list)
  2745. if sidebar_renderer:
  2746. for item in sidebar_renderer:
  2747. if not isinstance(item, dict):
  2748. continue
  2749. renderer = item.get('playlistSidebarSecondaryInfoRenderer')
  2750. if not isinstance(renderer, dict):
  2751. continue
  2752. owner = try_get(
  2753. renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
  2754. if owner:
  2755. uploader['uploader'] = owner.get('text')
  2756. uploader['uploader_id'] = try_get(
  2757. owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
  2758. uploader['uploader_url'] = urljoin(
  2759. 'https://www.youtube.com/',
  2760. try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
  2761. return uploader
  2762. @staticmethod
  2763. def _extract_alert(data):
  2764. alerts = []
  2765. for alert in try_get(data, lambda x: x['alerts'], list) or []:
  2766. if not isinstance(alert, dict):
  2767. continue
  2768. alert_text = try_get(
  2769. alert, lambda x: x['alertRenderer']['text'], dict)
  2770. if not alert_text:
  2771. continue
  2772. text = try_get(
  2773. alert_text,
  2774. (lambda x: x['simpleText'], lambda x: x['runs'][0]['text']),
  2775. compat_str)
  2776. if text:
  2777. alerts.append(text)
  2778. return '\n'.join(alerts)
  2779. def _extract_from_tabs(self, item_id, webpage, data, tabs):
  2780. selected_tab = self._extract_selected_tab(tabs)
  2781. renderer = try_get(
  2782. data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
  2783. playlist_id = item_id
  2784. title = description = None
  2785. if renderer:
  2786. channel_title = renderer.get('title') or item_id
  2787. tab_title = selected_tab.get('title')
  2788. title = channel_title or item_id
  2789. if tab_title:
  2790. title += ' - %s' % tab_title
  2791. if selected_tab.get('expandedText'):
  2792. title += ' - %s' % selected_tab['expandedText']
  2793. description = renderer.get('description')
  2794. playlist_id = renderer.get('externalId')
  2795. else:
  2796. renderer = try_get(
  2797. data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
  2798. if renderer:
  2799. title = renderer.get('title')
  2800. else:
  2801. renderer = try_get(
  2802. data, lambda x: x['header']['hashtagHeaderRenderer'], dict)
  2803. if renderer:
  2804. title = try_get(renderer, lambda x: x['hashtag']['simpleText'])
  2805. playlist = self.playlist_result(
  2806. self._entries(selected_tab, item_id, webpage),
  2807. playlist_id=playlist_id, playlist_title=title,
  2808. playlist_description=description)
  2809. playlist.update(self._extract_uploader(data))
  2810. return playlist
  2811. def _extract_from_playlist(self, item_id, url, data, playlist):
  2812. title = playlist.get('title') or try_get(
  2813. data, lambda x: x['titleText']['simpleText'], compat_str)
  2814. playlist_id = playlist.get('playlistId') or item_id
  2815. # Inline playlist rendition continuation does not always work
  2816. # at Youtube side, so delegating regular tab-based playlist URL
  2817. # processing whenever possible.
  2818. playlist_url = urljoin(url, try_get(
  2819. playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
  2820. compat_str))
  2821. if playlist_url and playlist_url != url:
  2822. return self.url_result(
  2823. playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
  2824. video_title=title)
  2825. return self.playlist_result(
  2826. self._playlist_entries(playlist), playlist_id=playlist_id,
  2827. playlist_title=title)
  2828. def _extract_identity_token(self, ytcfg, webpage):
  2829. if ytcfg:
  2830. token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
  2831. if token:
  2832. return token
  2833. return self._search_regex(
  2834. r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
  2835. 'identity token', default=None)
  2836. def _real_extract(self, url):
  2837. item_id = self._match_id(url)
  2838. url = compat_urlparse.urlunparse(
  2839. compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
  2840. # Handle both video/playlist URLs
  2841. qs = parse_qs(url)
  2842. video_id = qs.get('v', [None])[0]
  2843. playlist_id = qs.get('list', [None])[0]
  2844. if video_id and playlist_id:
  2845. if self._downloader.params.get('noplaylist'):
  2846. self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
  2847. return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
  2848. self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
  2849. webpage = self._download_webpage(url, item_id)
  2850. data = self._extract_yt_initial_data(item_id, webpage)
  2851. tabs = try_get(
  2852. data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
  2853. if tabs:
  2854. return self._extract_from_tabs(item_id, webpage, data, tabs)
  2855. playlist = try_get(
  2856. data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
  2857. if playlist:
  2858. return self._extract_from_playlist(item_id, url, data, playlist)
  2859. # Fallback to video extraction if no playlist alike page is recognized.
  2860. # First check for the current video then try the v attribute of URL query.
  2861. video_id = try_get(
  2862. data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
  2863. compat_str) or video_id
  2864. if video_id:
  2865. return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
  2866. # Capture and output alerts
  2867. alert = self._extract_alert(data)
  2868. if alert:
  2869. raise ExtractorError(alert, expected=True)
  2870. # Failed to recognize
  2871. raise ExtractorError('Unable to recognize tab page')
  2872. class YoutubePlaylistIE(InfoExtractor):
  2873. IE_DESC = 'YouTube.com playlists'
  2874. _VALID_URL = r'''(?x)(?:
  2875. (?:https?://)?
  2876. (?:\w+\.)?
  2877. (?:
  2878. (?:
  2879. youtube(?:kids)?\.com|
  2880. invidio\.us
  2881. )
  2882. /.*?\?.*?\blist=
  2883. )?
  2884. (?P<id>%(playlist_id)s)
  2885. )''' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
  2886. IE_NAME = 'youtube:playlist'
  2887. _TESTS = [{
  2888. 'note': 'issue #673',
  2889. 'url': 'PLBB231211A4F62143',
  2890. 'info_dict': {
  2891. 'title': '[OLD]Team Fortress 2 (Class-based LP)',
  2892. 'id': 'PLBB231211A4F62143',
  2893. 'uploader': 'Wickydoo',
  2894. 'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
  2895. },
  2896. 'playlist_mincount': 29,
  2897. }, {
  2898. 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
  2899. 'info_dict': {
  2900. 'title': 'YDL_safe_search',
  2901. 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
  2902. },
  2903. 'playlist_count': 2,
  2904. 'skip': 'This playlist is private',
  2905. }, {
  2906. 'note': 'embedded',
  2907. 'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
  2908. 'playlist_count': 4,
  2909. 'info_dict': {
  2910. 'title': 'JODA15',
  2911. 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
  2912. 'uploader': 'milan',
  2913. 'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
  2914. }
  2915. }, {
  2916. 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
  2917. 'playlist_mincount': 982,
  2918. 'info_dict': {
  2919. 'title': '2018 Chinese New Singles (11/6 updated)',
  2920. 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
  2921. 'uploader': 'LBK',
  2922. 'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
  2923. }
  2924. }, {
  2925. 'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
  2926. 'only_matching': True,
  2927. }, {
  2928. # music album playlist
  2929. 'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
  2930. 'only_matching': True,
  2931. }]
  2932. @classmethod
  2933. def suitable(cls, url):
  2934. if YoutubeTabIE.suitable(url):
  2935. return False
  2936. # Hack for lazy extractors until more generic solution is implemented
  2937. # (see #28780)
  2938. from .youtube import parse_qs
  2939. qs = parse_qs(url)
  2940. if qs.get('v', [None])[0]:
  2941. return False
  2942. return super(YoutubePlaylistIE, cls).suitable(url)
  2943. def _real_extract(self, url):
  2944. playlist_id = self._match_id(url)
  2945. qs = parse_qs(url)
  2946. if not qs:
  2947. qs = {'list': playlist_id}
  2948. return self.url_result(
  2949. update_url_query('https://www.youtube.com/playlist', qs),
  2950. ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
  2951. class YoutubeYtBeIE(InfoExtractor):
  2952. _VALID_URL = r'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{11})/*?.*?\blist=(?P<playlist_id>%(playlist_id)s)' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
  2953. _TESTS = [{
  2954. 'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
  2955. 'info_dict': {
  2956. 'id': 'yeWKywCrFtk',
  2957. 'ext': 'mp4',
  2958. 'title': 'Small Scale Baler and Braiding Rugs',
  2959. 'uploader': 'Backus-Page House Museum',
  2960. 'uploader_id': 'backuspagemuseum',
  2961. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
  2962. 'upload_date': '20161008',
  2963. 'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
  2964. 'categories': ['Nonprofits & Activism'],
  2965. 'tags': list,
  2966. 'like_count': int,
  2967. 'dislike_count': int,
  2968. },
  2969. 'params': {
  2970. 'noplaylist': True,
  2971. 'skip_download': True,
  2972. },
  2973. }, {
  2974. 'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
  2975. 'only_matching': True,
  2976. }]
  2977. def _real_extract(self, url):
  2978. mobj = re.match(self._VALID_URL, url)
  2979. video_id = mobj.group('id')
  2980. playlist_id = mobj.group('playlist_id')
  2981. return self.url_result(
  2982. update_url_query('https://www.youtube.com/watch', {
  2983. 'v': video_id,
  2984. 'list': playlist_id,
  2985. 'feature': 'youtu.be',
  2986. }), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
  2987. class YoutubeYtUserIE(InfoExtractor):
  2988. _VALID_URL = r'ytuser:(?P<id>.+)'
  2989. _TESTS = [{
  2990. 'url': 'ytuser:phihag',
  2991. 'only_matching': True,
  2992. }]
  2993. def _real_extract(self, url):
  2994. user_id = self._match_id(url)
  2995. return self.url_result(
  2996. 'https://www.youtube.com/user/%s' % user_id,
  2997. ie=YoutubeTabIE.ie_key(), video_id=user_id)
  2998. class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
  2999. IE_NAME = 'youtube:favorites'
  3000. IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
  3001. _VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
  3002. _LOGIN_REQUIRED = True
  3003. _TESTS = [{
  3004. 'url': ':ytfav',
  3005. 'only_matching': True,
  3006. }, {
  3007. 'url': ':ytfavorites',
  3008. 'only_matching': True,
  3009. }]
  3010. def _real_extract(self, url):
  3011. return self.url_result(
  3012. 'https://www.youtube.com/playlist?list=LL',
  3013. ie=YoutubeTabIE.ie_key())
  3014. class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
  3015. IE_DESC = 'YouTube.com searches'
  3016. IE_NAME = 'youtube:search'
  3017. _SEARCH_KEY = 'ytsearch'
  3018. _SEARCH_PARAMS = 'EgIQAQ%3D%3D' # Videos only
  3019. _MAX_RESULTS = float('inf')
  3020. _TESTS = [{
  3021. 'url': 'ytsearch10:youtube-dl test video',
  3022. 'playlist_count': 10,
  3023. 'info_dict': {
  3024. 'id': 'youtube-dl test video',
  3025. 'title': 'youtube-dl test video',
  3026. }
  3027. }]
  3028. def _get_n_results(self, query, n):
  3029. """Get a specified number of results for a query"""
  3030. entries = itertools.islice(self._search_results(query, self._SEARCH_PARAMS), 0, None if n == float('inf') else n)
  3031. return self.playlist_result(entries, query, query)
  3032. class YoutubeSearchDateIE(YoutubeSearchIE):
  3033. IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
  3034. _SEARCH_KEY = 'ytsearchdate'
  3035. IE_DESC = 'YouTube.com searches, newest videos first'
  3036. _SEARCH_PARAMS = 'CAISAhAB' # Videos only, sorted by date
  3037. _TESTS = [{
  3038. 'url': 'ytsearchdate10:youtube-dl test video',
  3039. 'playlist_count': 10,
  3040. 'info_dict': {
  3041. 'id': 'youtube-dl test video',
  3042. 'title': 'youtube-dl test video',
  3043. }
  3044. }]
  3045. class YoutubeSearchURLIE(YoutubeBaseInfoExtractor):
  3046. IE_DESC = 'YouTube search URLs with sorting and filter support'
  3047. IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
  3048. _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?:[^&]+)(?:[&]|$)'
  3049. _TESTS = [{
  3050. 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
  3051. 'playlist_mincount': 5,
  3052. 'info_dict': {
  3053. 'id': 'youtube-dl test video',
  3054. 'title': 'youtube-dl test video',
  3055. },
  3056. 'params': {'playlistend': 5}
  3057. }, {
  3058. 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
  3059. 'only_matching': True,
  3060. }]
  3061. def _real_extract(self, url):
  3062. qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
  3063. query = (qs.get('search_query') or qs.get('q'))[0]
  3064. params = qs.get('sp', ('',))[0]
  3065. return self.playlist_result(self._search_results(query, params), query, query)
  3066. class YoutubeFeedsInfoExtractor(YoutubeTabIE):
  3067. """
  3068. Base class for feed extractors
  3069. Subclasses must define the _FEED_NAME property.
  3070. """
  3071. _LOGIN_REQUIRED = True
  3072. @property
  3073. def IE_NAME(self):
  3074. return 'youtube:%s' % self._FEED_NAME
  3075. def _real_initialize(self):
  3076. self._login()
  3077. def _real_extract(self, url):
  3078. return self.url_result(
  3079. 'https://www.youtube.com/feed/%s' % self._FEED_NAME,
  3080. ie=YoutubeTabIE.ie_key())
  3081. class YoutubeWatchLaterIE(InfoExtractor):
  3082. IE_NAME = 'youtube:watchlater'
  3083. IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
  3084. _VALID_URL = r':ytwatchlater'
  3085. _TESTS = [{
  3086. 'url': ':ytwatchlater',
  3087. 'only_matching': True,
  3088. }]
  3089. def _real_extract(self, url):
  3090. return self.url_result(
  3091. 'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
  3092. class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
  3093. IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
  3094. _VALID_URL = r':ytrec(?:ommended)?'
  3095. _FEED_NAME = 'recommended'
  3096. _TESTS = [{
  3097. 'url': ':ytrec',
  3098. 'only_matching': True,
  3099. }, {
  3100. 'url': ':ytrecommended',
  3101. 'only_matching': True,
  3102. }]
  3103. class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
  3104. IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
  3105. _VALID_URL = r':ytsubs(?:criptions)?'
  3106. _FEED_NAME = 'subscriptions'
  3107. _TESTS = [{
  3108. 'url': ':ytsubs',
  3109. 'only_matching': True,
  3110. }, {
  3111. 'url': ':ytsubscriptions',
  3112. 'only_matching': True,
  3113. }]
  3114. class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
  3115. IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
  3116. _VALID_URL = r':ythistory'
  3117. _FEED_NAME = 'history'
  3118. _TESTS = [{
  3119. 'url': ':ythistory',
  3120. 'only_matching': True,
  3121. }]
  3122. class YoutubeTruncatedURLIE(InfoExtractor):
  3123. IE_NAME = 'youtube:truncated_url'
  3124. IE_DESC = False # Do not list
  3125. _VALID_URL = r'''(?x)
  3126. (?:https?://)?
  3127. (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
  3128. (?:watch\?(?:
  3129. feature=[a-z_]+|
  3130. annotation_id=annotation_[^&]+|
  3131. x-yt-cl=[0-9]+|
  3132. hl=[^&]*|
  3133. t=[0-9]+
  3134. )?
  3135. |
  3136. attribution_link\?a=[^&]+
  3137. )
  3138. $
  3139. '''
  3140. _TESTS = [{
  3141. 'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
  3142. 'only_matching': True,
  3143. }, {
  3144. 'url': 'https://www.youtube.com/watch?',
  3145. 'only_matching': True,
  3146. }, {
  3147. 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
  3148. 'only_matching': True,
  3149. }, {
  3150. 'url': 'https://www.youtube.com/watch?feature=foo',
  3151. 'only_matching': True,
  3152. }, {
  3153. 'url': 'https://www.youtube.com/watch?hl=en-GB',
  3154. 'only_matching': True,
  3155. }, {
  3156. 'url': 'https://www.youtube.com/watch?t=2372',
  3157. 'only_matching': True,
  3158. }]
  3159. def _real_extract(self, url):
  3160. raise ExtractorError(
  3161. 'Did you forget to quote the URL? Remember that & is a meta '
  3162. 'character in most shells, so you want to put the URL in quotes, '
  3163. 'like youtube-dl '
  3164. '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
  3165. ' or simply youtube-dl BaW_jenozKc .',
  3166. expected=True)
  3167. class YoutubeTruncatedIDIE(InfoExtractor):
  3168. IE_NAME = 'youtube:truncated_id'
  3169. IE_DESC = False # Do not list
  3170. _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
  3171. _TESTS = [{
  3172. 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
  3173. 'only_matching': True,
  3174. }]
  3175. def _real_extract(self, url):
  3176. video_id = self._match_id(url)
  3177. raise ExtractorError(
  3178. 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
  3179. expected=True)