logo

youtube-dl

[mirror] Download/Watch videos from video hostersgit clone https://hacktivis.me/git/mirror/youtube-dl.git

niconico.py (33644B)


  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import datetime
  4. import itertools
  5. import json
  6. import re
  7. from .common import InfoExtractor, SearchInfoExtractor
  8. from ..postprocessor.ffmpeg import FFmpegPostProcessor
  9. from ..compat import (
  10. compat_parse_qs,
  11. compat_str,
  12. compat_urllib_parse_urlparse,
  13. )
  14. from ..utils import (
  15. ExtractorError,
  16. dict_get,
  17. float_or_none,
  18. int_or_none,
  19. OnDemandPagedList,
  20. parse_duration,
  21. parse_iso8601,
  22. PostProcessingError,
  23. remove_start,
  24. str_or_none,
  25. try_get,
  26. unified_timestamp,
  27. urlencode_postdata,
  28. xpath_text,
  29. )
  30. class NiconicoIE(InfoExtractor):
  31. IE_NAME = 'niconico'
  32. IE_DESC = 'ニコニコ動画'
  33. _TESTS = [{
  34. 'url': 'http://www.nicovideo.jp/watch/sm22312215',
  35. 'md5': 'a5bad06f1347452102953f323c69da34s',
  36. 'info_dict': {
  37. 'id': 'sm22312215',
  38. 'ext': 'mp4',
  39. 'title': 'Big Buck Bunny',
  40. 'thumbnail': r're:https?://.*',
  41. 'uploader': 'takuya0301',
  42. 'uploader_id': '2698420',
  43. 'upload_date': '20131123',
  44. 'timestamp': int, # timestamp is unstable
  45. 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
  46. 'duration': 33,
  47. 'view_count': int,
  48. 'comment_count': int,
  49. },
  50. 'skip': 'Requires an account',
  51. }, {
  52. # File downloaded with and without credentials are different, so omit
  53. # the md5 field
  54. 'url': 'http://www.nicovideo.jp/watch/nm14296458',
  55. 'info_dict': {
  56. 'id': 'nm14296458',
  57. 'ext': 'swf',
  58. 'title': '【鏡音リン】Dance on media【オリジナル】take2!',
  59. 'description': 'md5:689f066d74610b3b22e0f1739add0f58',
  60. 'thumbnail': r're:https?://.*',
  61. 'uploader': 'りょうた',
  62. 'uploader_id': '18822557',
  63. 'upload_date': '20110429',
  64. 'timestamp': 1304065916,
  65. 'duration': 209,
  66. },
  67. 'skip': 'Requires an account',
  68. }, {
  69. # 'video exists but is marked as "deleted"
  70. # md5 is unstable
  71. 'url': 'http://www.nicovideo.jp/watch/sm10000',
  72. 'info_dict': {
  73. 'id': 'sm10000',
  74. 'ext': 'unknown_video',
  75. 'description': 'deleted',
  76. 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
  77. 'thumbnail': r're:https?://.*',
  78. 'upload_date': '20071224',
  79. 'timestamp': int, # timestamp field has different value if logged in
  80. 'duration': 304,
  81. 'view_count': int,
  82. },
  83. 'skip': 'Requires an account',
  84. }, {
  85. 'url': 'http://www.nicovideo.jp/watch/so22543406',
  86. 'info_dict': {
  87. 'id': '1388129933',
  88. 'ext': 'mp4',
  89. 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
  90. 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
  91. 'thumbnail': r're:https?://.*',
  92. 'timestamp': 1388851200,
  93. 'upload_date': '20140104',
  94. 'uploader': 'アニメロチャンネル',
  95. 'uploader_id': '312',
  96. },
  97. 'skip': 'The viewing period of the video you were searching for has expired.',
  98. }, {
  99. # video not available via `getflv`; "old" HTML5 video
  100. 'url': 'http://www.nicovideo.jp/watch/sm1151009',
  101. 'md5': '8fa81c364eb619d4085354eab075598a',
  102. 'info_dict': {
  103. 'id': 'sm1151009',
  104. 'ext': 'mp4',
  105. 'title': 'マスターシステム本体内蔵のスペハリのメインテーマ(PSG版)',
  106. 'description': 'md5:6ee077e0581ff5019773e2e714cdd0b7',
  107. 'thumbnail': r're:https?://.*',
  108. 'duration': 184,
  109. 'timestamp': 1190868283,
  110. 'upload_date': '20070927',
  111. 'uploader': 'denden2',
  112. 'uploader_id': '1392194',
  113. 'view_count': int,
  114. 'comment_count': int,
  115. },
  116. 'skip': 'Requires an account',
  117. }, {
  118. # "New" HTML5 video
  119. # md5 is unstable
  120. 'url': 'http://www.nicovideo.jp/watch/sm31464864',
  121. 'info_dict': {
  122. 'id': 'sm31464864',
  123. 'ext': 'mp4',
  124. 'title': '新作TVアニメ「戦姫絶唱シンフォギアAXZ」PV 最高画質',
  125. 'description': 'md5:e52974af9a96e739196b2c1ca72b5feb',
  126. 'timestamp': 1498514060,
  127. 'upload_date': '20170626',
  128. 'uploader': 'ゲスト',
  129. 'uploader_id': '40826363',
  130. 'thumbnail': r're:https?://.*',
  131. 'duration': 198,
  132. 'view_count': int,
  133. 'comment_count': int,
  134. },
  135. 'skip': 'Requires an account',
  136. }, {
  137. # Video without owner
  138. 'url': 'http://www.nicovideo.jp/watch/sm18238488',
  139. 'md5': 'd265680a1f92bdcbbd2a507fc9e78a9e',
  140. 'info_dict': {
  141. 'id': 'sm18238488',
  142. 'ext': 'mp4',
  143. 'title': '【実写版】ミュータントタートルズ',
  144. 'description': 'md5:15df8988e47a86f9e978af2064bf6d8e',
  145. 'timestamp': 1341160408,
  146. 'upload_date': '20120701',
  147. 'uploader': None,
  148. 'uploader_id': None,
  149. 'thumbnail': r're:https?://.*',
  150. 'duration': 5271,
  151. 'view_count': int,
  152. 'comment_count': int,
  153. },
  154. 'skip': 'Requires an account',
  155. }, {
  156. 'url': 'http://sp.nicovideo.jp/watch/sm28964488?ss_pos=1&cp_in=wt_tg',
  157. 'only_matching': True,
  158. }, {
  159. # DMC video with heartbeat
  160. 'url': 'https://www.nicovideo.jp/watch/sm34815188',
  161. 'md5': '9360c6e1f1519d7759e2fe8e1326ae83',
  162. 'info_dict': {
  163. 'id': 'sm34815188',
  164. 'ext': 'mp4',
  165. 'title': 'md5:aee93e9f3366db72f902f6cd5d389cb7',
  166. 'description': 'md5:7b9149fc7a00ab053cafaf5c19662704',
  167. 'thumbnail': r're:https?://.*',
  168. 'uploader': 'md5:2762e18fa74dbb40aa1ad27c6291ee32',
  169. 'uploader_id': '67449889',
  170. 'upload_date': '20190322',
  171. 'timestamp': int, # timestamp is unstable
  172. 'duration': 1082.0,
  173. 'view_count': int,
  174. 'comment_count': int,
  175. },
  176. }]
  177. _VALID_URL = r'https?://(?:www\.|secure\.|sp\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
  178. _NETRC_MACHINE = 'niconico'
  179. _API_HEADERS = {
  180. 'X-Frontend-ID': '6',
  181. 'X-Frontend-Version': '0'
  182. }
  183. def _real_initialize(self):
  184. self._login()
  185. def _login(self):
  186. username, password = self._get_login_info()
  187. # No authentication to be performed
  188. if not username:
  189. return True
  190. # Log in
  191. login_ok = True
  192. login_form_strs = {
  193. 'mail_tel': username,
  194. 'password': password,
  195. }
  196. urlh = self._request_webpage(
  197. 'https://account.nicovideo.jp/api/v1/login', None,
  198. note='Logging in', errnote='Unable to log in',
  199. data=urlencode_postdata(login_form_strs))
  200. if urlh is False:
  201. login_ok = False
  202. else:
  203. parts = compat_urllib_parse_urlparse(urlh.geturl())
  204. if compat_parse_qs(parts.query).get('message', [None])[0] == 'cant_login':
  205. login_ok = False
  206. if not login_ok:
  207. self._downloader.report_warning('unable to log in: bad username or password')
  208. return login_ok
  209. def _get_heartbeat_info(self, info_dict):
  210. video_id, video_src_id, audio_src_id = info_dict['url'].split(':')[1].split('/')
  211. api_data = (
  212. info_dict.get('_api_data')
  213. or self._parse_json(
  214. self._html_search_regex(
  215. 'data-api-data="([^"]+)"',
  216. self._download_webpage('http://www.nicovideo.jp/watch/' + video_id, video_id),
  217. 'API data', default='{}'),
  218. video_id))
  219. session_api_data = try_get(api_data, lambda x: x['media']['delivery']['movie']['session'])
  220. session_api_endpoint = try_get(session_api_data, lambda x: x['urls'][0])
  221. def ping():
  222. status = try_get(
  223. self._download_json(
  224. 'https://nvapi.nicovideo.jp/v1/2ab0cbaa/watch', video_id,
  225. query={'t': try_get(api_data, lambda x: x['media']['delivery']['trackingId'])},
  226. note='Acquiring permission for downloading video',
  227. headers=self._API_HEADERS),
  228. lambda x: x['meta']['status'])
  229. if status != 200:
  230. self.report_warning('Failed to acquire permission for playing video. The video may not download.')
  231. yesno = lambda x: 'yes' if x else 'no'
  232. # m3u8 (encryption)
  233. if try_get(api_data, lambda x: x['media']['delivery']['encryption']) is not None:
  234. protocol = 'm3u8'
  235. encryption = self._parse_json(session_api_data['token'], video_id)['hls_encryption']
  236. session_api_http_parameters = {
  237. 'parameters': {
  238. 'hls_parameters': {
  239. 'encryption': {
  240. encryption: {
  241. 'encrypted_key': try_get(api_data, lambda x: x['media']['delivery']['encryption']['encryptedKey']),
  242. 'key_uri': try_get(api_data, lambda x: x['media']['delivery']['encryption']['keyUri'])
  243. }
  244. },
  245. 'transfer_preset': '',
  246. 'use_ssl': yesno(session_api_endpoint['isSsl']),
  247. 'use_well_known_port': yesno(session_api_endpoint['isWellKnownPort']),
  248. 'segment_duration': 6000,
  249. }
  250. }
  251. }
  252. # http
  253. else:
  254. protocol = 'http'
  255. session_api_http_parameters = {
  256. 'parameters': {
  257. 'http_output_download_parameters': {
  258. 'use_ssl': yesno(session_api_endpoint['isSsl']),
  259. 'use_well_known_port': yesno(session_api_endpoint['isWellKnownPort']),
  260. }
  261. }
  262. }
  263. session_response = self._download_json(
  264. session_api_endpoint['url'], video_id,
  265. query={'_format': 'json'},
  266. headers={'Content-Type': 'application/json'},
  267. note='Downloading JSON metadata for %s' % info_dict['format_id'],
  268. data=json.dumps({
  269. 'session': {
  270. 'client_info': {
  271. 'player_id': session_api_data.get('playerId'),
  272. },
  273. 'content_auth': {
  274. 'auth_type': try_get(session_api_data, lambda x: x['authTypes'][session_api_data['protocols'][0]]),
  275. 'content_key_timeout': session_api_data.get('contentKeyTimeout'),
  276. 'service_id': 'nicovideo',
  277. 'service_user_id': session_api_data.get('serviceUserId')
  278. },
  279. 'content_id': session_api_data.get('contentId'),
  280. 'content_src_id_sets': [{
  281. 'content_src_ids': [{
  282. 'src_id_to_mux': {
  283. 'audio_src_ids': [audio_src_id],
  284. 'video_src_ids': [video_src_id],
  285. }
  286. }]
  287. }],
  288. 'content_type': 'movie',
  289. 'content_uri': '',
  290. 'keep_method': {
  291. 'heartbeat': {
  292. 'lifetime': session_api_data.get('heartbeatLifetime')
  293. }
  294. },
  295. 'priority': session_api_data.get('priority'),
  296. 'protocol': {
  297. 'name': 'http',
  298. 'parameters': {
  299. 'http_parameters': session_api_http_parameters
  300. }
  301. },
  302. 'recipe_id': session_api_data.get('recipeId'),
  303. 'session_operation_auth': {
  304. 'session_operation_auth_by_signature': {
  305. 'signature': session_api_data.get('signature'),
  306. 'token': session_api_data.get('token'),
  307. }
  308. },
  309. 'timing_constraint': 'unlimited'
  310. }
  311. }).encode())
  312. info_dict['url'] = session_response['data']['session']['content_uri']
  313. info_dict['protocol'] = protocol
  314. # get heartbeat info
  315. heartbeat_info_dict = {
  316. 'url': session_api_endpoint['url'] + '/' + session_response['data']['session']['id'] + '?_format=json&_method=PUT',
  317. 'data': json.dumps(session_response['data']),
  318. # interval, convert milliseconds to seconds, then halve to make a buffer.
  319. 'interval': float_or_none(session_api_data.get('heartbeatLifetime'), scale=3000),
  320. 'ping': ping
  321. }
  322. return info_dict, heartbeat_info_dict
  323. def _extract_format_for_quality(self, api_data, video_id, audio_quality, video_quality):
  324. def parse_format_id(id_code):
  325. mobj = re.match(r'''(?x)
  326. (?:archive_)?
  327. (?:(?P<codec>[^_]+)_)?
  328. (?:(?P<br>[\d]+)kbps_)?
  329. (?:(?P<res>[\d+]+)p_)?
  330. ''', '%s_' % id_code)
  331. return mobj.groupdict() if mobj else {}
  332. protocol = 'niconico_dmc'
  333. format_id = '-'.join(map(lambda s: remove_start(s['id'], 'archive_'), [video_quality, audio_quality]))
  334. vdict = parse_format_id(video_quality['id'])
  335. adict = parse_format_id(audio_quality['id'])
  336. resolution = try_get(video_quality, lambda x: x['metadata']['resolution'], dict) or {'height': vdict.get('res')}
  337. vbr = try_get(video_quality, lambda x: x['metadata']['bitrate'], float)
  338. return {
  339. 'url': '%s:%s/%s/%s' % (protocol, video_id, video_quality['id'], audio_quality['id']),
  340. 'format_id': format_id,
  341. 'format_note': 'DMC %s' % try_get(video_quality, lambda x: x['metadata']['label'], compat_str),
  342. 'ext': 'mp4', # Session API are used in HTML5, which always serves mp4
  343. 'vcodec': vdict.get('codec'),
  344. 'acodec': adict.get('codec'),
  345. 'vbr': float_or_none(vbr, 1000) or float_or_none(vdict.get('br')),
  346. 'abr': float_or_none(audio_quality.get('bitrate'), 1000) or float_or_none(adict.get('br')),
  347. 'height': int_or_none(resolution.get('height', vdict.get('res'))),
  348. 'width': int_or_none(resolution.get('width')),
  349. 'quality': -2 if 'low' in format_id else -1, # Default quality value is -1
  350. 'protocol': protocol,
  351. 'http_headers': {
  352. 'Origin': 'https://www.nicovideo.jp',
  353. 'Referer': 'https://www.nicovideo.jp/watch/' + video_id,
  354. }
  355. }
  356. def _real_extract(self, url):
  357. video_id = self._match_id(url)
  358. # Get video webpage for API data.
  359. webpage, handle = self._download_webpage_handle(
  360. 'http://www.nicovideo.jp/watch/' + video_id, video_id)
  361. if video_id.startswith('so'):
  362. video_id = self._match_id(handle.geturl())
  363. api_data = self._parse_json(self._html_search_regex(
  364. 'data-api-data="([^"]+)"', webpage,
  365. 'API data', default='{}'), video_id)
  366. def get_video_info_web(items):
  367. return dict_get(api_data['video'], items)
  368. # Get video info
  369. video_info_xml = self._download_xml(
  370. 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id,
  371. video_id, note='Downloading video info page')
  372. def get_video_info_xml(items):
  373. if not isinstance(items, list):
  374. items = [items]
  375. for item in items:
  376. ret = xpath_text(video_info_xml, './/' + item)
  377. if ret:
  378. return ret
  379. if get_video_info_xml('error'):
  380. error_code = get_video_info_xml('code')
  381. if error_code == 'DELETED':
  382. raise ExtractorError('The video has been deleted.',
  383. expected=True)
  384. elif error_code == 'NOT_FOUND':
  385. raise ExtractorError('The video is not found.',
  386. expected=True)
  387. elif error_code == 'COMMUNITY':
  388. self.to_screen('%s: The video is community members only.' % video_id)
  389. else:
  390. raise ExtractorError('%s reports error: %s' % (self.IE_NAME, error_code))
  391. # Start extracting video formats
  392. formats = []
  393. # Get HTML5 videos info
  394. quality_info = try_get(api_data, lambda x: x['media']['delivery']['movie'])
  395. if not quality_info:
  396. raise ExtractorError('The video can\'t be downloaded', expected=True)
  397. for audio_quality in quality_info.get('audios') or {}:
  398. for video_quality in quality_info.get('videos') or {}:
  399. if not audio_quality.get('isAvailable') or not video_quality.get('isAvailable'):
  400. continue
  401. formats.append(self._extract_format_for_quality(
  402. api_data, video_id, audio_quality, video_quality))
  403. # Get flv/swf info
  404. timestamp = None
  405. video_real_url = try_get(api_data, lambda x: x['video']['smileInfo']['url'])
  406. if video_real_url:
  407. is_economy = video_real_url.endswith('low')
  408. if is_economy:
  409. self.report_warning('Site is currently in economy mode! You will only have access to lower quality streams')
  410. # Invoking ffprobe to determine resolution
  411. pp = FFmpegPostProcessor(self._downloader)
  412. cookies = self._get_cookies('https://nicovideo.jp').output(header='', sep='; path=/; domain=nicovideo.jp;\n')
  413. self.to_screen('%s: %s' % (video_id, 'Checking smile format with ffprobe'))
  414. try:
  415. metadata = pp.get_metadata_object(video_real_url, ['-cookies', cookies])
  416. except PostProcessingError as err:
  417. raise ExtractorError(err.msg, expected=True)
  418. v_stream = a_stream = {}
  419. # Some complex swf files doesn't have video stream (e.g. nm4809023)
  420. for stream in metadata['streams']:
  421. if stream['codec_type'] == 'video':
  422. v_stream = stream
  423. elif stream['codec_type'] == 'audio':
  424. a_stream = stream
  425. # Community restricted videos seem to have issues with the thumb API not returning anything at all
  426. filesize = int(
  427. (get_video_info_xml('size_high') if not is_economy else get_video_info_xml('size_low'))
  428. or metadata['format']['size']
  429. )
  430. extension = (
  431. get_video_info_xml('movie_type')
  432. or 'mp4' if 'mp4' in metadata['format']['format_name'] else metadata['format']['format_name']
  433. )
  434. # 'creation_time' tag on video stream of re-encoded SMILEVIDEO mp4 files are '1970-01-01T00:00:00.000000Z'.
  435. timestamp = (
  436. parse_iso8601(get_video_info_web('first_retrieve'))
  437. or unified_timestamp(get_video_info_web('postedDateTime'))
  438. )
  439. metadata_timestamp = (
  440. parse_iso8601(try_get(v_stream, lambda x: x['tags']['creation_time']))
  441. or timestamp if extension != 'mp4' else 0
  442. )
  443. # According to compconf, smile videos from pre-2017 are always better quality than their DMC counterparts
  444. smile_threshold_timestamp = parse_iso8601('2016-12-08T00:00:00+09:00')
  445. is_source = timestamp < smile_threshold_timestamp or metadata_timestamp > 0
  446. # If movie file size is unstable, old server movie is not source movie.
  447. if filesize > 1:
  448. formats.append({
  449. 'url': video_real_url,
  450. 'format_id': 'smile' if not is_economy else 'smile_low',
  451. 'format_note': 'SMILEVIDEO source' if not is_economy else 'SMILEVIDEO low quality',
  452. 'ext': extension,
  453. 'container': extension,
  454. 'vcodec': v_stream.get('codec_name'),
  455. 'acodec': a_stream.get('codec_name'),
  456. # Some complex swf files doesn't have total bit rate metadata (e.g. nm6049209)
  457. 'tbr': int_or_none(metadata['format'].get('bit_rate'), scale=1000),
  458. 'vbr': int_or_none(v_stream.get('bit_rate'), scale=1000),
  459. 'abr': int_or_none(a_stream.get('bit_rate'), scale=1000),
  460. 'height': int_or_none(v_stream.get('height')),
  461. 'width': int_or_none(v_stream.get('width')),
  462. 'source_preference': 5 if not is_economy else -2,
  463. 'quality': 5 if is_source and not is_economy else None,
  464. 'filesize': filesize
  465. })
  466. self._sort_formats(formats)
  467. # Start extracting information
  468. title = (
  469. get_video_info_xml('title') # prefer to get the untranslated original title
  470. or get_video_info_web(['originalTitle', 'title'])
  471. or self._og_search_title(webpage, default=None)
  472. or self._html_search_regex(
  473. r'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
  474. webpage, 'video title'))
  475. watch_api_data_string = self._html_search_regex(
  476. r'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
  477. webpage, 'watch api data', default=None)
  478. watch_api_data = self._parse_json(watch_api_data_string, video_id) if watch_api_data_string else {}
  479. video_detail = watch_api_data.get('videoDetail', {})
  480. thumbnail = (
  481. self._html_search_regex(r'<meta property="og:image" content="([^"]+)">', webpage, 'thumbnail data', default=None)
  482. or dict_get( # choose highest from 720p to 240p
  483. get_video_info_web('thumbnail'),
  484. ['ogp', 'player', 'largeUrl', 'middleUrl', 'url'])
  485. or self._html_search_meta('image', webpage, 'thumbnail', default=None)
  486. or video_detail.get('thumbnail'))
  487. description = get_video_info_web('description')
  488. if not timestamp:
  489. match = self._html_search_meta('datePublished', webpage, 'date published', default=None)
  490. if match:
  491. timestamp = parse_iso8601(match.replace('+', ':00+'))
  492. if not timestamp and video_detail.get('postedAt'):
  493. timestamp = parse_iso8601(
  494. video_detail['postedAt'].replace('/', '-'),
  495. delimiter=' ', timezone=datetime.timedelta(hours=9))
  496. timestamp = timestamp or try_get(api_data, lambda x: parse_iso8601(x['video']['registeredAt']))
  497. view_count = int_or_none(get_video_info_web(['view_counter', 'viewCount']))
  498. if not view_count:
  499. match = self._html_search_regex(
  500. r'>Views: <strong[^>]*>([^<]+)</strong>',
  501. webpage, 'view count', default=None)
  502. if match:
  503. view_count = int_or_none(match.replace(',', ''))
  504. view_count = (
  505. view_count
  506. or video_detail.get('viewCount')
  507. or try_get(api_data, lambda x: x['video']['count']['view']))
  508. comment_count = (
  509. int_or_none(get_video_info_web('comment_num'))
  510. or video_detail.get('commentCount')
  511. or try_get(api_data, lambda x: x['video']['count']['comment']))
  512. if not comment_count:
  513. match = self._html_search_regex(
  514. r'>Comments: <strong[^>]*>([^<]+)</strong>',
  515. webpage, 'comment count', default=None)
  516. if match:
  517. comment_count = int_or_none(match.replace(',', ''))
  518. duration = (parse_duration(
  519. get_video_info_web('length')
  520. or self._html_search_meta(
  521. 'video:duration', webpage, 'video duration', default=None))
  522. or video_detail.get('length')
  523. or get_video_info_web('duration'))
  524. webpage_url = get_video_info_web('watch_url') or url
  525. # for channel movie and community movie
  526. channel_id = try_get(
  527. api_data,
  528. (lambda x: x['channel']['globalId'],
  529. lambda x: x['community']['globalId']))
  530. channel = try_get(
  531. api_data,
  532. (lambda x: x['channel']['name'],
  533. lambda x: x['community']['name']))
  534. # Note: cannot use api_data.get('owner', {}) because owner may be set to "null"
  535. # in the JSON, which will cause None to be returned instead of {}.
  536. owner = try_get(api_data, lambda x: x.get('owner'), dict) or {}
  537. uploader_id = str_or_none(
  538. get_video_info_web(['ch_id', 'user_id'])
  539. or owner.get('id')
  540. or channel_id
  541. )
  542. uploader = (
  543. get_video_info_web(['ch_name', 'user_nickname'])
  544. or owner.get('nickname')
  545. or channel
  546. )
  547. return {
  548. 'id': video_id,
  549. '_api_data': api_data,
  550. 'title': title,
  551. 'formats': formats,
  552. 'thumbnail': thumbnail,
  553. 'description': description,
  554. 'uploader': uploader,
  555. 'timestamp': timestamp,
  556. 'uploader_id': uploader_id,
  557. 'channel': channel,
  558. 'channel_id': channel_id,
  559. 'view_count': view_count,
  560. 'comment_count': comment_count,
  561. 'duration': duration,
  562. 'webpage_url': webpage_url,
  563. }
  564. class NiconicoPlaylistIE(InfoExtractor):
  565. _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/(?:user/\d+/|my/)?mylist/(?P<id>\d+)'
  566. _TESTS = [{
  567. 'url': 'http://www.nicovideo.jp/mylist/27411728',
  568. 'info_dict': {
  569. 'id': '27411728',
  570. 'title': 'AKB48のオールナイトニッポン',
  571. 'description': 'md5:d89694c5ded4b6c693dea2db6e41aa08',
  572. 'uploader': 'のっく',
  573. 'uploader_id': '805442',
  574. },
  575. 'playlist_mincount': 225,
  576. }, {
  577. 'url': 'https://www.nicovideo.jp/user/805442/mylist/27411728',
  578. 'only_matching': True,
  579. }]
  580. _API_HEADERS = {
  581. 'X-Frontend-ID': '6',
  582. 'X-Frontend-Version': '0'
  583. }
  584. def _real_extract(self, url):
  585. list_id = self._match_id(url)
  586. def get_page_data(pagenum, pagesize):
  587. return self._download_json(
  588. 'http://nvapi.nicovideo.jp/v2/mylists/' + list_id, list_id,
  589. query={'page': 1 + pagenum, 'pageSize': pagesize},
  590. headers=self._API_HEADERS).get('data').get('mylist')
  591. data = get_page_data(0, 1)
  592. title = data.get('name')
  593. description = data.get('description')
  594. uploader = data.get('owner').get('name')
  595. uploader_id = data.get('owner').get('id')
  596. def pagefunc(pagenum):
  597. data = get_page_data(pagenum, 25)
  598. return ({
  599. '_type': 'url',
  600. 'url': 'http://www.nicovideo.jp/watch/' + item.get('watchId'),
  601. } for item in data.get('items'))
  602. return {
  603. '_type': 'playlist',
  604. 'id': list_id,
  605. 'title': title,
  606. 'description': description,
  607. 'uploader': uploader,
  608. 'uploader_id': uploader_id,
  609. 'entries': OnDemandPagedList(pagefunc, 25),
  610. }
  611. class NicovideoSearchBaseIE(InfoExtractor):
  612. _MAX_RESULTS = float('inf')
  613. def _entries(self, url, item_id, query=None, note='Downloading page %(page)s'):
  614. query = query or {}
  615. pages = [query['page']] if 'page' in query else itertools.count(1)
  616. for page_num in pages:
  617. query['page'] = str(page_num)
  618. webpage = self._download_webpage(url, item_id, query=query, note=note % {'page': page_num})
  619. results = re.findall(r'(?<=data-video-id=)["\']?(?P<videoid>.+?)(?=["\'])', webpage)
  620. for item in results:
  621. yield self.url_result('http://www.nicovideo.jp/watch/%s' % item, 'Niconico', item)
  622. if not results:
  623. break
  624. def _get_n_results(self, query, n):
  625. entries = self._entries(self._proto_relative_url('//www.nicovideo.jp/search/%s' % query), query)
  626. if n < self._MAX_RESULTS:
  627. entries = itertools.islice(entries, 0, n)
  628. return self.playlist_result(entries, query, query)
  629. class NicovideoSearchIE(NicovideoSearchBaseIE, SearchInfoExtractor):
  630. IE_DESC = 'Nico video search'
  631. IE_NAME = 'nicovideo:search'
  632. _SEARCH_KEY = 'nicosearch'
  633. def _search_results(self, query):
  634. return self._entries(
  635. self._proto_relative_url('//www.nicovideo.jp/search/%s' % query), query)
  636. class NicovideoSearchURLIE(NicovideoSearchBaseIE):
  637. IE_NAME = '%s_url' % NicovideoSearchIE.IE_NAME
  638. IE_DESC = 'Nico video search URLs'
  639. _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/search/(?P<id>[^?#&]+)?'
  640. _TESTS = [{
  641. 'url': 'http://www.nicovideo.jp/search/sm9',
  642. 'info_dict': {
  643. 'id': 'sm9',
  644. 'title': 'sm9'
  645. },
  646. 'playlist_mincount': 40,
  647. }, {
  648. 'url': 'https://www.nicovideo.jp/search/sm9?sort=h&order=d&end=2020-12-31&start=2020-01-01',
  649. 'info_dict': {
  650. 'id': 'sm9',
  651. 'title': 'sm9'
  652. },
  653. 'playlist_count': 31,
  654. }]
  655. def _real_extract(self, url):
  656. query = self._match_id(url)
  657. return self.playlist_result(self._entries(url, query), query, query)
  658. class NicovideoSearchDateIE(NicovideoSearchBaseIE, SearchInfoExtractor):
  659. IE_DESC = 'Nico video search, newest first'
  660. IE_NAME = '%s:date' % NicovideoSearchIE.IE_NAME
  661. _SEARCH_KEY = 'nicosearchdate'
  662. _TESTS = [{
  663. 'url': 'nicosearchdateall:a',
  664. 'info_dict': {
  665. 'id': 'a',
  666. 'title': 'a'
  667. },
  668. 'playlist_mincount': 1610,
  669. }]
  670. _START_DATE = datetime.date(2007, 1, 1)
  671. _RESULTS_PER_PAGE = 32
  672. _MAX_PAGES = 50
  673. def _entries(self, url, item_id, start_date=None, end_date=None):
  674. start_date, end_date = start_date or self._START_DATE, end_date or datetime.datetime.now().date()
  675. # If the last page has a full page of videos, we need to break down the query interval further
  676. last_page_len = len(list(self._get_entries_for_date(
  677. url, item_id, start_date, end_date, self._MAX_PAGES,
  678. note='Checking number of videos from {0} to {1}'.format(start_date, end_date))))
  679. if (last_page_len == self._RESULTS_PER_PAGE and start_date != end_date):
  680. midpoint = start_date + ((end_date - start_date) // 2)
  681. for entry in itertools.chain(
  682. iter(self._entries(url, item_id, midpoint, end_date)),
  683. iter(self._entries(url, item_id, start_date, midpoint))):
  684. yield entry
  685. else:
  686. self.to_screen('{0}: Downloading results from {1} to {2}'.format(item_id, start_date, end_date))
  687. for entry in iter(self._get_entries_for_date(
  688. url, item_id, start_date, end_date, note=' Downloading page %(page)s')):
  689. yield entry
  690. def _get_entries_for_date(self, url, item_id, start_date, end_date=None, page_num=None, note=None):
  691. query = {
  692. 'start': compat_str(start_date),
  693. 'end': compat_str(end_date or start_date),
  694. 'sort': 'f',
  695. 'order': 'd',
  696. }
  697. if page_num:
  698. query['page'] = compat_str(page_num)
  699. for entry in iter(super(NicovideoSearchDateIE, self)._entries(url, item_id, query=query, note=note)):
  700. yield entry
  701. class NiconicoUserIE(InfoExtractor):
  702. _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/user/(?P<id>\d+)/?(?:$|[#?])'
  703. _TEST = {
  704. 'url': 'https://www.nicovideo.jp/user/419948',
  705. 'info_dict': {
  706. 'id': '419948',
  707. },
  708. 'playlist_mincount': 101,
  709. }
  710. _API_URL = "https://nvapi.nicovideo.jp/v1/users/%s/videos?sortKey=registeredAt&sortOrder=desc&pageSize=%s&page=%s"
  711. _PAGE_SIZE = 100
  712. _API_HEADERS = {
  713. 'X-Frontend-ID': '6',
  714. 'X-Frontend-Version': '0'
  715. }
  716. def _entries(self, list_id):
  717. total_count = 1
  718. count = page_num = 0
  719. while count < total_count:
  720. json_parsed = self._download_json(
  721. self._API_URL % (list_id, self._PAGE_SIZE, page_num + 1), list_id,
  722. headers=self._API_HEADERS,
  723. note='Downloading JSON metadata%s' % (' page %d' % page_num if page_num else ''))
  724. if not page_num:
  725. total_count = int_or_none(json_parsed['data'].get('totalCount'))
  726. for entry in json_parsed["data"]["items"]:
  727. count += 1
  728. yield self.url_result('https://www.nicovideo.jp/watch/%s' % entry['id'])
  729. page_num += 1
  730. def _real_extract(self, url):
  731. list_id = self._match_id(url)
  732. return self.playlist_result(self._entries(list_id), list_id)